xref: /openbmc/linux/drivers/scsi/ipr.c (revision 7559e757)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ipr.c -- driver for IBM Power Linux RAID adapters
4  *
5  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) 2003, 2004 IBM Corporation
8  */
9 
10 /*
11  * Notes:
12  *
13  * This driver is used to control the following SCSI adapters:
14  *
15  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
16  *
17  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
18  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
19  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
20  *              Embedded SCSI adapter on p615 and p655 systems
21  *
22  * Supported Hardware Features:
23  *	- Ultra 320 SCSI controller
24  *	- PCI-X host interface
25  *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
26  *	- Non-Volatile Write Cache
27  *	- Supports attachment of non-RAID disks, tape, and optical devices
28  *	- RAID Levels 0, 5, 10
29  *	- Hot spare
30  *	- Background Parity Checking
31  *	- Background Data Scrubbing
32  *	- Ability to increase the capacity of an existing RAID 5 disk array
33  *		by adding disks
34  *
35  * Driver Features:
36  *	- Tagged command queuing
37  *	- Adapter microcode download
38  *	- PCI hot plug
39  *	- SCSI device hot plug
40  *
41  */
42 
43 #include <linux/fs.h>
44 #include <linux/init.h>
45 #include <linux/types.h>
46 #include <linux/errno.h>
47 #include <linux/kernel.h>
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <linux/ioport.h>
51 #include <linux/delay.h>
52 #include <linux/pci.h>
53 #include <linux/wait.h>
54 #include <linux/spinlock.h>
55 #include <linux/sched.h>
56 #include <linux/interrupt.h>
57 #include <linux/blkdev.h>
58 #include <linux/firmware.h>
59 #include <linux/module.h>
60 #include <linux/moduleparam.h>
61 #include <linux/hdreg.h>
62 #include <linux/reboot.h>
63 #include <linux/stringify.h>
64 #include <asm/io.h>
65 #include <asm/irq.h>
66 #include <asm/processor.h>
67 #include <scsi/scsi.h>
68 #include <scsi/scsi_host.h>
69 #include <scsi/scsi_tcq.h>
70 #include <scsi/scsi_eh.h>
71 #include <scsi/scsi_cmnd.h>
72 #include "ipr.h"
73 
74 /*
75  *   Global Data
76  */
77 static LIST_HEAD(ipr_ioa_head);
78 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
79 static unsigned int ipr_max_speed = 1;
80 static int ipr_testmode = 0;
81 static unsigned int ipr_fastfail = 0;
82 static unsigned int ipr_transop_timeout = 0;
83 static unsigned int ipr_debug = 0;
84 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
85 static unsigned int ipr_dual_ioa_raid = 1;
86 static unsigned int ipr_number_of_msix = 16;
87 static unsigned int ipr_fast_reboot;
88 static DEFINE_SPINLOCK(ipr_driver_lock);
89 
90 /* This table describes the differences between DMA controller chips */
91 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
92 	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
93 		.mailbox = 0x0042C,
94 		.max_cmds = 100,
95 		.cache_line_size = 0x20,
96 		.clear_isr = 1,
97 		.iopoll_weight = 0,
98 		{
99 			.set_interrupt_mask_reg = 0x0022C,
100 			.clr_interrupt_mask_reg = 0x00230,
101 			.clr_interrupt_mask_reg32 = 0x00230,
102 			.sense_interrupt_mask_reg = 0x0022C,
103 			.sense_interrupt_mask_reg32 = 0x0022C,
104 			.clr_interrupt_reg = 0x00228,
105 			.clr_interrupt_reg32 = 0x00228,
106 			.sense_interrupt_reg = 0x00224,
107 			.sense_interrupt_reg32 = 0x00224,
108 			.ioarrin_reg = 0x00404,
109 			.sense_uproc_interrupt_reg = 0x00214,
110 			.sense_uproc_interrupt_reg32 = 0x00214,
111 			.set_uproc_interrupt_reg = 0x00214,
112 			.set_uproc_interrupt_reg32 = 0x00214,
113 			.clr_uproc_interrupt_reg = 0x00218,
114 			.clr_uproc_interrupt_reg32 = 0x00218
115 		}
116 	},
117 	{ /* Snipe and Scamp */
118 		.mailbox = 0x0052C,
119 		.max_cmds = 100,
120 		.cache_line_size = 0x20,
121 		.clear_isr = 1,
122 		.iopoll_weight = 0,
123 		{
124 			.set_interrupt_mask_reg = 0x00288,
125 			.clr_interrupt_mask_reg = 0x0028C,
126 			.clr_interrupt_mask_reg32 = 0x0028C,
127 			.sense_interrupt_mask_reg = 0x00288,
128 			.sense_interrupt_mask_reg32 = 0x00288,
129 			.clr_interrupt_reg = 0x00284,
130 			.clr_interrupt_reg32 = 0x00284,
131 			.sense_interrupt_reg = 0x00280,
132 			.sense_interrupt_reg32 = 0x00280,
133 			.ioarrin_reg = 0x00504,
134 			.sense_uproc_interrupt_reg = 0x00290,
135 			.sense_uproc_interrupt_reg32 = 0x00290,
136 			.set_uproc_interrupt_reg = 0x00290,
137 			.set_uproc_interrupt_reg32 = 0x00290,
138 			.clr_uproc_interrupt_reg = 0x00294,
139 			.clr_uproc_interrupt_reg32 = 0x00294
140 		}
141 	},
142 	{ /* CRoC */
143 		.mailbox = 0x00044,
144 		.max_cmds = 1000,
145 		.cache_line_size = 0x20,
146 		.clear_isr = 0,
147 		.iopoll_weight = 64,
148 		{
149 			.set_interrupt_mask_reg = 0x00010,
150 			.clr_interrupt_mask_reg = 0x00018,
151 			.clr_interrupt_mask_reg32 = 0x0001C,
152 			.sense_interrupt_mask_reg = 0x00010,
153 			.sense_interrupt_mask_reg32 = 0x00014,
154 			.clr_interrupt_reg = 0x00008,
155 			.clr_interrupt_reg32 = 0x0000C,
156 			.sense_interrupt_reg = 0x00000,
157 			.sense_interrupt_reg32 = 0x00004,
158 			.ioarrin_reg = 0x00070,
159 			.sense_uproc_interrupt_reg = 0x00020,
160 			.sense_uproc_interrupt_reg32 = 0x00024,
161 			.set_uproc_interrupt_reg = 0x00020,
162 			.set_uproc_interrupt_reg32 = 0x00024,
163 			.clr_uproc_interrupt_reg = 0x00028,
164 			.clr_uproc_interrupt_reg32 = 0x0002C,
165 			.init_feedback_reg = 0x0005C,
166 			.dump_addr_reg = 0x00064,
167 			.dump_data_reg = 0x00068,
168 			.endian_swap_reg = 0x00084
169 		}
170 	},
171 };
172 
173 static const struct ipr_chip_t ipr_chip[] = {
174 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
175 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
176 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
177 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
178 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
179 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
180 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
181 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
182 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
183 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
184 };
185 
186 static int ipr_max_bus_speeds[] = {
187 	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
188 };
189 
190 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
191 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
192 module_param_named(max_speed, ipr_max_speed, uint, 0);
193 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
194 module_param_named(log_level, ipr_log_level, uint, 0);
195 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
196 module_param_named(testmode, ipr_testmode, int, 0);
197 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
198 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
199 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
200 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
201 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
202 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
203 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
204 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
205 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
206 module_param_named(max_devs, ipr_max_devs, int, 0);
207 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
208 		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
209 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
210 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
211 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
212 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
213 MODULE_LICENSE("GPL");
214 MODULE_VERSION(IPR_DRIVER_VERSION);
215 
216 /*  A constant array of IOASCs/URCs/Error Messages */
217 static const
218 struct ipr_error_table_t ipr_error_table[] = {
219 	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
220 	"8155: An unknown error was received"},
221 	{0x00330000, 0, 0,
222 	"Soft underlength error"},
223 	{0x005A0000, 0, 0,
224 	"Command to be cancelled not found"},
225 	{0x00808000, 0, 0,
226 	"Qualified success"},
227 	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
228 	"FFFE: Soft device bus error recovered by the IOA"},
229 	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
230 	"4101: Soft device bus fabric error"},
231 	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
232 	"FFFC: Logical block guard error recovered by the device"},
233 	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
234 	"FFFC: Logical block reference tag error recovered by the device"},
235 	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
236 	"4171: Recovered scatter list tag / sequence number error"},
237 	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
238 	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
239 	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
240 	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
241 	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
242 	"FFFD: Recovered logical block reference tag error detected by the IOA"},
243 	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
244 	"FFFD: Logical block guard error recovered by the IOA"},
245 	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
246 	"FFF9: Device sector reassign successful"},
247 	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
248 	"FFF7: Media error recovered by device rewrite procedures"},
249 	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
250 	"7001: IOA sector reassignment successful"},
251 	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
252 	"FFF9: Soft media error. Sector reassignment recommended"},
253 	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
254 	"FFF7: Media error recovered by IOA rewrite procedures"},
255 	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
256 	"FF3D: Soft PCI bus error recovered by the IOA"},
257 	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
258 	"FFF6: Device hardware error recovered by the IOA"},
259 	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
260 	"FFF6: Device hardware error recovered by the device"},
261 	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
262 	"FF3D: Soft IOA error recovered by the IOA"},
263 	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
264 	"FFFA: Undefined device response recovered by the IOA"},
265 	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
266 	"FFF6: Device bus error, message or command phase"},
267 	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
268 	"FFFE: Task Management Function failed"},
269 	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
270 	"FFF6: Failure prediction threshold exceeded"},
271 	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
272 	"8009: Impending cache battery pack failure"},
273 	{0x02040100, 0, 0,
274 	"Logical Unit in process of becoming ready"},
275 	{0x02040200, 0, 0,
276 	"Initializing command required"},
277 	{0x02040400, 0, 0,
278 	"34FF: Disk device format in progress"},
279 	{0x02040C00, 0, 0,
280 	"Logical unit not accessible, target port in unavailable state"},
281 	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
282 	"9070: IOA requested reset"},
283 	{0x023F0000, 0, 0,
284 	"Synchronization required"},
285 	{0x02408500, 0, 0,
286 	"IOA microcode download required"},
287 	{0x02408600, 0, 0,
288 	"Device bus connection is prohibited by host"},
289 	{0x024E0000, 0, 0,
290 	"No ready, IOA shutdown"},
291 	{0x025A0000, 0, 0,
292 	"Not ready, IOA has been shutdown"},
293 	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
294 	"3020: Storage subsystem configuration error"},
295 	{0x03110B00, 0, 0,
296 	"FFF5: Medium error, data unreadable, recommend reassign"},
297 	{0x03110C00, 0, 0,
298 	"7000: Medium error, data unreadable, do not reassign"},
299 	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
300 	"FFF3: Disk media format bad"},
301 	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
302 	"3002: Addressed device failed to respond to selection"},
303 	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
304 	"3100: Device bus error"},
305 	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
306 	"3109: IOA timed out a device command"},
307 	{0x04088000, 0, 0,
308 	"3120: SCSI bus is not operational"},
309 	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
310 	"4100: Hard device bus fabric error"},
311 	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
312 	"310C: Logical block guard error detected by the device"},
313 	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
314 	"310C: Logical block reference tag error detected by the device"},
315 	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
316 	"4170: Scatter list tag / sequence number error"},
317 	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
318 	"8150: Logical block CRC error on IOA to Host transfer"},
319 	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
320 	"4170: Logical block sequence number error on IOA to Host transfer"},
321 	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
322 	"310D: Logical block reference tag error detected by the IOA"},
323 	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
324 	"310D: Logical block guard error detected by the IOA"},
325 	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
326 	"9000: IOA reserved area data check"},
327 	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
328 	"9001: IOA reserved area invalid data pattern"},
329 	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
330 	"9002: IOA reserved area LRC error"},
331 	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
332 	"Hardware Error, IOA metadata access error"},
333 	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
334 	"102E: Out of alternate sectors for disk storage"},
335 	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
336 	"FFF4: Data transfer underlength error"},
337 	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
338 	"FFF4: Data transfer overlength error"},
339 	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
340 	"3400: Logical unit failure"},
341 	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
342 	"FFF4: Device microcode is corrupt"},
343 	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
344 	"8150: PCI bus error"},
345 	{0x04430000, 1, 0,
346 	"Unsupported device bus message received"},
347 	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
348 	"FFF4: Disk device problem"},
349 	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
350 	"8150: Permanent IOA failure"},
351 	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
352 	"3010: Disk device returned wrong response to IOA"},
353 	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
354 	"8151: IOA microcode error"},
355 	{0x04448500, 0, 0,
356 	"Device bus status error"},
357 	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
358 	"8157: IOA error requiring IOA reset to recover"},
359 	{0x04448700, 0, 0,
360 	"ATA device status error"},
361 	{0x04490000, 0, 0,
362 	"Message reject received from the device"},
363 	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
364 	"8008: A permanent cache battery pack failure occurred"},
365 	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
366 	"9090: Disk unit has been modified after the last known status"},
367 	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
368 	"9081: IOA detected device error"},
369 	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
370 	"9082: IOA detected device error"},
371 	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
372 	"3110: Device bus error, message or command phase"},
373 	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
374 	"3110: SAS Command / Task Management Function failed"},
375 	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
376 	"9091: Incorrect hardware configuration change has been detected"},
377 	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
378 	"9073: Invalid multi-adapter configuration"},
379 	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
380 	"4010: Incorrect connection between cascaded expanders"},
381 	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
382 	"4020: Connections exceed IOA design limits"},
383 	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
384 	"4030: Incorrect multipath connection"},
385 	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
386 	"4110: Unsupported enclosure function"},
387 	{0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
388 	"4120: SAS cable VPD cannot be read"},
389 	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
390 	"FFF4: Command to logical unit failed"},
391 	{0x05240000, 1, 0,
392 	"Illegal request, invalid request type or request packet"},
393 	{0x05250000, 0, 0,
394 	"Illegal request, invalid resource handle"},
395 	{0x05258000, 0, 0,
396 	"Illegal request, commands not allowed to this device"},
397 	{0x05258100, 0, 0,
398 	"Illegal request, command not allowed to a secondary adapter"},
399 	{0x05258200, 0, 0,
400 	"Illegal request, command not allowed to a non-optimized resource"},
401 	{0x05260000, 0, 0,
402 	"Illegal request, invalid field in parameter list"},
403 	{0x05260100, 0, 0,
404 	"Illegal request, parameter not supported"},
405 	{0x05260200, 0, 0,
406 	"Illegal request, parameter value invalid"},
407 	{0x052C0000, 0, 0,
408 	"Illegal request, command sequence error"},
409 	{0x052C8000, 1, 0,
410 	"Illegal request, dual adapter support not enabled"},
411 	{0x052C8100, 1, 0,
412 	"Illegal request, another cable connector was physically disabled"},
413 	{0x054E8000, 1, 0,
414 	"Illegal request, inconsistent group id/group count"},
415 	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
416 	"9031: Array protection temporarily suspended, protection resuming"},
417 	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
418 	"9040: Array protection temporarily suspended, protection resuming"},
419 	{0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
420 	"4080: IOA exceeded maximum operating temperature"},
421 	{0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
422 	"4085: Service required"},
423 	{0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
424 	"4086: SAS Adapter Hardware Configuration Error"},
425 	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
426 	"3140: Device bus not ready to ready transition"},
427 	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
428 	"FFFB: SCSI bus was reset"},
429 	{0x06290500, 0, 0,
430 	"FFFE: SCSI bus transition to single ended"},
431 	{0x06290600, 0, 0,
432 	"FFFE: SCSI bus transition to LVD"},
433 	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
434 	"FFFB: SCSI bus was reset by another initiator"},
435 	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
436 	"3029: A device replacement has occurred"},
437 	{0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
438 	"4102: Device bus fabric performance degradation"},
439 	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
440 	"9051: IOA cache data exists for a missing or failed device"},
441 	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
442 	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
443 	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
444 	"9025: Disk unit is not supported at its physical location"},
445 	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
446 	"3020: IOA detected a SCSI bus configuration error"},
447 	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
448 	"3150: SCSI bus configuration error"},
449 	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
450 	"9074: Asymmetric advanced function disk configuration"},
451 	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
452 	"4040: Incomplete multipath connection between IOA and enclosure"},
453 	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
454 	"4041: Incomplete multipath connection between enclosure and device"},
455 	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
456 	"9075: Incomplete multipath connection between IOA and remote IOA"},
457 	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
458 	"9076: Configuration error, missing remote IOA"},
459 	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
460 	"4050: Enclosure does not support a required multipath function"},
461 	{0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
462 	"4121: Configuration error, required cable is missing"},
463 	{0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
464 	"4122: Cable is not plugged into the correct location on remote IOA"},
465 	{0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
466 	"4123: Configuration error, invalid cable vital product data"},
467 	{0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
468 	"4124: Configuration error, both cable ends are plugged into the same IOA"},
469 	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
470 	"4070: Logically bad block written on device"},
471 	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
472 	"9041: Array protection temporarily suspended"},
473 	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
474 	"9042: Corrupt array parity detected on specified device"},
475 	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
476 	"9030: Array no longer protected due to missing or failed disk unit"},
477 	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
478 	"9071: Link operational transition"},
479 	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
480 	"9072: Link not operational transition"},
481 	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
482 	"9032: Array exposed but still protected"},
483 	{0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
484 	"70DD: Device forced failed by disrupt device command"},
485 	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
486 	"4061: Multipath redundancy level got better"},
487 	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
488 	"4060: Multipath redundancy level got worse"},
489 	{0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
490 	"9083: Device raw mode enabled"},
491 	{0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
492 	"9084: Device raw mode disabled"},
493 	{0x07270000, 0, 0,
494 	"Failure due to other device"},
495 	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
496 	"9008: IOA does not support functions expected by devices"},
497 	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
498 	"9010: Cache data associated with attached devices cannot be found"},
499 	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
500 	"9011: Cache data belongs to devices other than those attached"},
501 	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
502 	"9020: Array missing 2 or more devices with only 1 device present"},
503 	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
504 	"9021: Array missing 2 or more devices with 2 or more devices present"},
505 	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
506 	"9022: Exposed array is missing a required device"},
507 	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
508 	"9023: Array member(s) not at required physical locations"},
509 	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
510 	"9024: Array not functional due to present hardware configuration"},
511 	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
512 	"9026: Array not functional due to present hardware configuration"},
513 	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
514 	"9027: Array is missing a device and parity is out of sync"},
515 	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
516 	"9028: Maximum number of arrays already exist"},
517 	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
518 	"9050: Required cache data cannot be located for a disk unit"},
519 	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
520 	"9052: Cache data exists for a device that has been modified"},
521 	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
522 	"9054: IOA resources not available due to previous problems"},
523 	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
524 	"9092: Disk unit requires initialization before use"},
525 	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
526 	"9029: Incorrect hardware configuration change has been detected"},
527 	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
528 	"9060: One or more disk pairs are missing from an array"},
529 	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
530 	"9061: One or more disks are missing from an array"},
531 	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
532 	"9062: One or more disks are missing from an array"},
533 	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
534 	"9063: Maximum number of functional arrays has been exceeded"},
535 	{0x07279A00, 0, 0,
536 	"Data protect, other volume set problem"},
537 	{0x0B260000, 0, 0,
538 	"Aborted command, invalid descriptor"},
539 	{0x0B3F9000, 0, 0,
540 	"Target operating conditions have changed, dual adapter takeover"},
541 	{0x0B530200, 0, 0,
542 	"Aborted command, medium removal prevented"},
543 	{0x0B5A0000, 0, 0,
544 	"Command terminated by host"},
545 	{0x0B5B8000, 0, 0,
546 	"Aborted command, command terminated by host"}
547 };
548 
549 static const struct ipr_ses_table_entry ipr_ses_table[] = {
550 	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
551 	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
552 	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
553 	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
554 	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
555 	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
556 	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
557 	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
558 	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
559 	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
560 	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
561 	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
562 	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
563 };
564 
565 /*
566  *  Function Prototypes
567  */
568 static int ipr_reset_alert(struct ipr_cmnd *);
569 static void ipr_process_ccn(struct ipr_cmnd *);
570 static void ipr_process_error(struct ipr_cmnd *);
571 static void ipr_reset_ioa_job(struct ipr_cmnd *);
572 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
573 				   enum ipr_shutdown_type);
574 
575 #ifdef CONFIG_SCSI_IPR_TRACE
576 /**
577  * ipr_trc_hook - Add a trace entry to the driver trace
578  * @ipr_cmd:	ipr command struct
579  * @type:		trace type
580  * @add_data:	additional data
581  *
582  * Return value:
583  * 	none
584  **/
585 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
586 			 u8 type, u32 add_data)
587 {
588 	struct ipr_trace_entry *trace_entry;
589 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
590 	unsigned int trace_index;
591 
592 	trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
593 	trace_entry = &ioa_cfg->trace[trace_index];
594 	trace_entry->time = jiffies;
595 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
596 	trace_entry->type = type;
597 	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
598 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
599 	trace_entry->u.add_data = add_data;
600 	wmb();
601 }
602 #else
603 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
604 #endif
605 
606 /**
607  * ipr_lock_and_done - Acquire lock and complete command
608  * @ipr_cmd:	ipr command struct
609  *
610  * Return value:
611  *	none
612  **/
613 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
614 {
615 	unsigned long lock_flags;
616 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
617 
618 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
619 	ipr_cmd->done(ipr_cmd);
620 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
621 }
622 
623 /**
624  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
625  * @ipr_cmd:	ipr command struct
626  *
627  * Return value:
628  * 	none
629  **/
630 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
631 {
632 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
633 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
634 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
635 	int hrrq_id;
636 
637 	hrrq_id = ioarcb->cmd_pkt.hrrq_id;
638 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
639 	ioarcb->cmd_pkt.hrrq_id = hrrq_id;
640 	ioarcb->data_transfer_length = 0;
641 	ioarcb->read_data_transfer_length = 0;
642 	ioarcb->ioadl_len = 0;
643 	ioarcb->read_ioadl_len = 0;
644 
645 	if (ipr_cmd->ioa_cfg->sis64) {
646 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
647 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
648 	} else {
649 		ioarcb->write_ioadl_addr =
650 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
651 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
652 	}
653 
654 	ioasa->hdr.ioasc = 0;
655 	ioasa->hdr.residual_data_len = 0;
656 	ipr_cmd->scsi_cmd = NULL;
657 	ipr_cmd->sense_buffer[0] = 0;
658 	ipr_cmd->dma_use_sg = 0;
659 }
660 
661 /**
662  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
663  * @ipr_cmd:	ipr command struct
664  * @fast_done:	fast done function call-back
665  *
666  * Return value:
667  * 	none
668  **/
669 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
670 			      void (*fast_done) (struct ipr_cmnd *))
671 {
672 	ipr_reinit_ipr_cmnd(ipr_cmd);
673 	ipr_cmd->u.scratch = 0;
674 	ipr_cmd->sibling = NULL;
675 	ipr_cmd->eh_comp = NULL;
676 	ipr_cmd->fast_done = fast_done;
677 	timer_setup(&ipr_cmd->timer, NULL, 0);
678 }
679 
680 /**
681  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
682  * @hrrq:	hrr queue
683  *
684  * Return value:
685  * 	pointer to ipr command struct
686  **/
687 static
688 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
689 {
690 	struct ipr_cmnd *ipr_cmd = NULL;
691 
692 	if (likely(!list_empty(&hrrq->hrrq_free_q))) {
693 		ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
694 			struct ipr_cmnd, queue);
695 		list_del(&ipr_cmd->queue);
696 	}
697 
698 
699 	return ipr_cmd;
700 }
701 
702 /**
703  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
704  * @ioa_cfg:	ioa config struct
705  *
706  * Return value:
707  *	pointer to ipr command struct
708  **/
709 static
710 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
711 {
712 	struct ipr_cmnd *ipr_cmd =
713 		__ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
714 	ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
715 	return ipr_cmd;
716 }
717 
718 /**
719  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
720  * @ioa_cfg:	ioa config struct
721  * @clr_ints:     interrupts to clear
722  *
723  * This function masks all interrupts on the adapter, then clears the
724  * interrupts specified in the mask
725  *
726  * Return value:
727  * 	none
728  **/
729 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
730 					  u32 clr_ints)
731 {
732 	int i;
733 
734 	/* Stop new interrupts */
735 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
736 		spin_lock(&ioa_cfg->hrrq[i]._lock);
737 		ioa_cfg->hrrq[i].allow_interrupts = 0;
738 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
739 	}
740 
741 	/* Set interrupt mask to stop all new interrupts */
742 	if (ioa_cfg->sis64)
743 		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
744 	else
745 		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
746 
747 	/* Clear any pending interrupts */
748 	if (ioa_cfg->sis64)
749 		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
750 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
751 	readl(ioa_cfg->regs.sense_interrupt_reg);
752 }
753 
754 /**
755  * ipr_save_pcix_cmd_reg - Save PCI-X command register
756  * @ioa_cfg:	ioa config struct
757  *
758  * Return value:
759  * 	0 on success / -EIO on failure
760  **/
761 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
762 {
763 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
764 
765 	if (pcix_cmd_reg == 0)
766 		return 0;
767 
768 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
769 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
770 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
771 		return -EIO;
772 	}
773 
774 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
775 	return 0;
776 }
777 
778 /**
779  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
780  * @ioa_cfg:	ioa config struct
781  *
782  * Return value:
783  * 	0 on success / -EIO on failure
784  **/
785 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
786 {
787 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
788 
789 	if (pcix_cmd_reg) {
790 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
793 			return -EIO;
794 		}
795 	}
796 
797 	return 0;
798 }
799 
800 
801 /**
802  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
803  * @ipr_cmd:	ipr command struct
804  *
805  * This function is invoked by the interrupt handler for
806  * ops generated by the SCSI mid-layer which are being aborted.
807  *
808  * Return value:
809  * 	none
810  **/
811 static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
812 {
813 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
814 
815 	scsi_cmd->result |= (DID_ERROR << 16);
816 
817 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
818 	scsi_done(scsi_cmd);
819 	if (ipr_cmd->eh_comp)
820 		complete(ipr_cmd->eh_comp);
821 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
822 }
823 
824 /**
825  * ipr_scsi_eh_done - mid-layer done function for aborted ops
826  * @ipr_cmd:	ipr command struct
827  *
828  * This function is invoked by the interrupt handler for
829  * ops generated by the SCSI mid-layer which are being aborted.
830  *
831  * Return value:
832  * 	none
833  **/
834 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
835 {
836 	unsigned long hrrq_flags;
837 	struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
838 
839 	spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
840 	__ipr_scsi_eh_done(ipr_cmd);
841 	spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
842 }
843 
844 /**
845  * ipr_fail_all_ops - Fails all outstanding ops.
846  * @ioa_cfg:	ioa config struct
847  *
848  * This function fails all outstanding ops.
849  *
850  * Return value:
851  * 	none
852  **/
853 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
854 {
855 	struct ipr_cmnd *ipr_cmd, *temp;
856 	struct ipr_hrr_queue *hrrq;
857 
858 	ENTER;
859 	for_each_hrrq(hrrq, ioa_cfg) {
860 		spin_lock(&hrrq->_lock);
861 		list_for_each_entry_safe(ipr_cmd,
862 					temp, &hrrq->hrrq_pending_q, queue) {
863 			list_del(&ipr_cmd->queue);
864 
865 			ipr_cmd->s.ioasa.hdr.ioasc =
866 				cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
867 			ipr_cmd->s.ioasa.hdr.ilid =
868 				cpu_to_be32(IPR_DRIVER_ILID);
869 
870 			if (ipr_cmd->scsi_cmd)
871 				ipr_cmd->done = __ipr_scsi_eh_done;
872 
873 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
874 				     IPR_IOASC_IOA_WAS_RESET);
875 			del_timer(&ipr_cmd->timer);
876 			ipr_cmd->done(ipr_cmd);
877 		}
878 		spin_unlock(&hrrq->_lock);
879 	}
880 	LEAVE;
881 }
882 
883 /**
884  * ipr_send_command -  Send driver initiated requests.
885  * @ipr_cmd:		ipr command struct
886  *
887  * This function sends a command to the adapter using the correct write call.
888  * In the case of sis64, calculate the ioarcb size required. Then or in the
889  * appropriate bits.
890  *
891  * Return value:
892  * 	none
893  **/
894 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
895 {
896 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
897 	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
898 
899 	if (ioa_cfg->sis64) {
900 		/* The default size is 256 bytes */
901 		send_dma_addr |= 0x1;
902 
903 		/* If the number of ioadls * size of ioadl > 128 bytes,
904 		   then use a 512 byte ioarcb */
905 		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
906 			send_dma_addr |= 0x4;
907 		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
908 	} else
909 		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
910 }
911 
912 /**
913  * ipr_do_req -  Send driver initiated requests.
914  * @ipr_cmd:		ipr command struct
915  * @done:			done function
916  * @timeout_func:	timeout function
917  * @timeout:		timeout value
918  *
919  * This function sends the specified command to the adapter with the
920  * timeout given. The done function is invoked on command completion.
921  *
922  * Return value:
923  * 	none
924  **/
925 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
926 		       void (*done) (struct ipr_cmnd *),
927 		       void (*timeout_func) (struct timer_list *), u32 timeout)
928 {
929 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
930 
931 	ipr_cmd->done = done;
932 
933 	ipr_cmd->timer.expires = jiffies + timeout;
934 	ipr_cmd->timer.function = timeout_func;
935 
936 	add_timer(&ipr_cmd->timer);
937 
938 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
939 
940 	ipr_send_command(ipr_cmd);
941 }
942 
943 /**
944  * ipr_internal_cmd_done - Op done function for an internally generated op.
945  * @ipr_cmd:	ipr command struct
946  *
947  * This function is the op done function for an internally generated,
948  * blocking op. It simply wakes the sleeping thread.
949  *
950  * Return value:
951  * 	none
952  **/
953 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
954 {
955 	if (ipr_cmd->sibling)
956 		ipr_cmd->sibling = NULL;
957 	else
958 		complete(&ipr_cmd->completion);
959 }
960 
961 /**
962  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
963  * @ipr_cmd:	ipr command struct
964  * @dma_addr:	dma address
965  * @len:	transfer length
966  * @flags:	ioadl flag value
967  *
968  * This function initializes an ioadl in the case where there is only a single
969  * descriptor.
970  *
971  * Return value:
972  * 	nothing
973  **/
974 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
975 			   u32 len, int flags)
976 {
977 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
978 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
979 
980 	ipr_cmd->dma_use_sg = 1;
981 
982 	if (ipr_cmd->ioa_cfg->sis64) {
983 		ioadl64->flags = cpu_to_be32(flags);
984 		ioadl64->data_len = cpu_to_be32(len);
985 		ioadl64->address = cpu_to_be64(dma_addr);
986 
987 		ipr_cmd->ioarcb.ioadl_len =
988 		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
989 		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
990 	} else {
991 		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
992 		ioadl->address = cpu_to_be32(dma_addr);
993 
994 		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
995 			ipr_cmd->ioarcb.read_ioadl_len =
996 				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
997 			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
998 		} else {
999 			ipr_cmd->ioarcb.ioadl_len =
1000 			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1001 			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1002 		}
1003 	}
1004 }
1005 
1006 /**
1007  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1008  * @ipr_cmd:	ipr command struct
1009  * @timeout_func:	function to invoke if command times out
1010  * @timeout:	timeout
1011  *
1012  * Return value:
1013  * 	none
1014  **/
1015 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1016 				  void (*timeout_func) (struct timer_list *),
1017 				  u32 timeout)
1018 {
1019 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1020 
1021 	init_completion(&ipr_cmd->completion);
1022 	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1023 
1024 	spin_unlock_irq(ioa_cfg->host->host_lock);
1025 	wait_for_completion(&ipr_cmd->completion);
1026 	spin_lock_irq(ioa_cfg->host->host_lock);
1027 }
1028 
1029 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1030 {
1031 	unsigned int hrrq;
1032 
1033 	if (ioa_cfg->hrrq_num == 1)
1034 		hrrq = 0;
1035 	else {
1036 		hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1037 		hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1038 	}
1039 	return hrrq;
1040 }
1041 
1042 /**
1043  * ipr_send_hcam - Send an HCAM to the adapter.
1044  * @ioa_cfg:	ioa config struct
1045  * @type:		HCAM type
1046  * @hostrcb:	hostrcb struct
1047  *
1048  * This function will send a Host Controlled Async command to the adapter.
1049  * If HCAMs are currently not allowed to be issued to the adapter, it will
1050  * place the hostrcb on the free queue.
1051  *
1052  * Return value:
1053  * 	none
1054  **/
1055 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1056 			  struct ipr_hostrcb *hostrcb)
1057 {
1058 	struct ipr_cmnd *ipr_cmd;
1059 	struct ipr_ioarcb *ioarcb;
1060 
1061 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1062 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1063 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1064 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1065 
1066 		ipr_cmd->u.hostrcb = hostrcb;
1067 		ioarcb = &ipr_cmd->ioarcb;
1068 
1069 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1070 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1071 		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1072 		ioarcb->cmd_pkt.cdb[1] = type;
1073 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1074 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1075 
1076 		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1077 			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1078 
1079 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1080 			ipr_cmd->done = ipr_process_ccn;
1081 		else
1082 			ipr_cmd->done = ipr_process_error;
1083 
1084 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1085 
1086 		ipr_send_command(ipr_cmd);
1087 	} else {
1088 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1089 	}
1090 }
1091 
1092 /**
1093  * ipr_init_res_entry - Initialize a resource entry struct.
1094  * @res:	resource entry struct
1095  * @cfgtew:	config table entry wrapper struct
1096  *
1097  * Return value:
1098  * 	none
1099  **/
1100 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1101 			       struct ipr_config_table_entry_wrapper *cfgtew)
1102 {
1103 	int found = 0;
1104 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1105 	struct ipr_resource_entry *gscsi_res = NULL;
1106 
1107 	res->needs_sync_complete = 0;
1108 	res->in_erp = 0;
1109 	res->add_to_ml = 0;
1110 	res->del_from_ml = 0;
1111 	res->resetting_device = 0;
1112 	res->reset_occurred = 0;
1113 	res->sdev = NULL;
1114 
1115 	if (ioa_cfg->sis64) {
1116 		res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1117 		res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1118 		res->qmodel = IPR_QUEUEING_MODEL64(res);
1119 		res->type = cfgtew->u.cfgte64->res_type;
1120 
1121 		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1122 			sizeof(res->res_path));
1123 
1124 		res->bus = 0;
1125 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1126 			sizeof(res->dev_lun.scsi_lun));
1127 		res->lun = scsilun_to_int(&res->dev_lun);
1128 
1129 		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1130 			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1131 				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1132 					found = 1;
1133 					res->target = gscsi_res->target;
1134 					break;
1135 				}
1136 			}
1137 			if (!found) {
1138 				res->target = find_first_zero_bit(ioa_cfg->target_ids,
1139 								  ioa_cfg->max_devs_supported);
1140 				set_bit(res->target, ioa_cfg->target_ids);
1141 			}
1142 		} else if (res->type == IPR_RES_TYPE_IOAFP) {
1143 			res->bus = IPR_IOAFP_VIRTUAL_BUS;
1144 			res->target = 0;
1145 		} else if (res->type == IPR_RES_TYPE_ARRAY) {
1146 			res->bus = IPR_ARRAY_VIRTUAL_BUS;
1147 			res->target = find_first_zero_bit(ioa_cfg->array_ids,
1148 							  ioa_cfg->max_devs_supported);
1149 			set_bit(res->target, ioa_cfg->array_ids);
1150 		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1151 			res->bus = IPR_VSET_VIRTUAL_BUS;
1152 			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1153 							  ioa_cfg->max_devs_supported);
1154 			set_bit(res->target, ioa_cfg->vset_ids);
1155 		} else {
1156 			res->target = find_first_zero_bit(ioa_cfg->target_ids,
1157 							  ioa_cfg->max_devs_supported);
1158 			set_bit(res->target, ioa_cfg->target_ids);
1159 		}
1160 	} else {
1161 		res->qmodel = IPR_QUEUEING_MODEL(res);
1162 		res->flags = cfgtew->u.cfgte->flags;
1163 		if (res->flags & IPR_IS_IOA_RESOURCE)
1164 			res->type = IPR_RES_TYPE_IOAFP;
1165 		else
1166 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1167 
1168 		res->bus = cfgtew->u.cfgte->res_addr.bus;
1169 		res->target = cfgtew->u.cfgte->res_addr.target;
1170 		res->lun = cfgtew->u.cfgte->res_addr.lun;
1171 		res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1172 	}
1173 }
1174 
1175 /**
1176  * ipr_is_same_device - Determine if two devices are the same.
1177  * @res:	resource entry struct
1178  * @cfgtew:	config table entry wrapper struct
1179  *
1180  * Return value:
1181  * 	1 if the devices are the same / 0 otherwise
1182  **/
1183 static int ipr_is_same_device(struct ipr_resource_entry *res,
1184 			      struct ipr_config_table_entry_wrapper *cfgtew)
1185 {
1186 	if (res->ioa_cfg->sis64) {
1187 		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1188 					sizeof(cfgtew->u.cfgte64->dev_id)) &&
1189 			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1190 					sizeof(cfgtew->u.cfgte64->lun))) {
1191 			return 1;
1192 		}
1193 	} else {
1194 		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1195 		    res->target == cfgtew->u.cfgte->res_addr.target &&
1196 		    res->lun == cfgtew->u.cfgte->res_addr.lun)
1197 			return 1;
1198 	}
1199 
1200 	return 0;
1201 }
1202 
1203 /**
1204  * __ipr_format_res_path - Format the resource path for printing.
1205  * @res_path:	resource path
1206  * @buffer:	buffer
1207  * @len:	length of buffer provided
1208  *
1209  * Return value:
1210  * 	pointer to buffer
1211  **/
1212 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1213 {
1214 	int i;
1215 	char *p = buffer;
1216 
1217 	*p = '\0';
1218 	p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
1219 	for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++)
1220 		p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
1221 
1222 	return buffer;
1223 }
1224 
1225 /**
1226  * ipr_format_res_path - Format the resource path for printing.
1227  * @ioa_cfg:	ioa config struct
1228  * @res_path:	resource path
1229  * @buffer:	buffer
1230  * @len:	length of buffer provided
1231  *
1232  * Return value:
1233  *	pointer to buffer
1234  **/
1235 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1236 				 u8 *res_path, char *buffer, int len)
1237 {
1238 	char *p = buffer;
1239 
1240 	*p = '\0';
1241 	p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1242 	__ipr_format_res_path(res_path, p, len - (p - buffer));
1243 	return buffer;
1244 }
1245 
1246 /**
1247  * ipr_update_res_entry - Update the resource entry.
1248  * @res:	resource entry struct
1249  * @cfgtew:	config table entry wrapper struct
1250  *
1251  * Return value:
1252  *      none
1253  **/
1254 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1255 				 struct ipr_config_table_entry_wrapper *cfgtew)
1256 {
1257 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1258 	int new_path = 0;
1259 
1260 	if (res->ioa_cfg->sis64) {
1261 		res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1262 		res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1263 		res->type = cfgtew->u.cfgte64->res_type;
1264 
1265 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1266 			sizeof(struct ipr_std_inq_data));
1267 
1268 		res->qmodel = IPR_QUEUEING_MODEL64(res);
1269 		res->res_handle = cfgtew->u.cfgte64->res_handle;
1270 		res->dev_id = cfgtew->u.cfgte64->dev_id;
1271 
1272 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1273 			sizeof(res->dev_lun.scsi_lun));
1274 
1275 		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1276 					sizeof(res->res_path))) {
1277 			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1278 				sizeof(res->res_path));
1279 			new_path = 1;
1280 		}
1281 
1282 		if (res->sdev && new_path)
1283 			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1284 				    ipr_format_res_path(res->ioa_cfg,
1285 					res->res_path, buffer, sizeof(buffer)));
1286 	} else {
1287 		res->flags = cfgtew->u.cfgte->flags;
1288 		if (res->flags & IPR_IS_IOA_RESOURCE)
1289 			res->type = IPR_RES_TYPE_IOAFP;
1290 		else
1291 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1292 
1293 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1294 			sizeof(struct ipr_std_inq_data));
1295 
1296 		res->qmodel = IPR_QUEUEING_MODEL(res);
1297 		res->res_handle = cfgtew->u.cfgte->res_handle;
1298 	}
1299 }
1300 
1301 /**
1302  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1303  * 			  for the resource.
1304  * @res:	resource entry struct
1305  *
1306  * Return value:
1307  *      none
1308  **/
1309 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1310 {
1311 	struct ipr_resource_entry *gscsi_res = NULL;
1312 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1313 
1314 	if (!ioa_cfg->sis64)
1315 		return;
1316 
1317 	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1318 		clear_bit(res->target, ioa_cfg->array_ids);
1319 	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1320 		clear_bit(res->target, ioa_cfg->vset_ids);
1321 	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1322 		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1323 			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1324 				return;
1325 		clear_bit(res->target, ioa_cfg->target_ids);
1326 
1327 	} else if (res->bus == 0)
1328 		clear_bit(res->target, ioa_cfg->target_ids);
1329 }
1330 
1331 /**
1332  * ipr_handle_config_change - Handle a config change from the adapter
1333  * @ioa_cfg:	ioa config struct
1334  * @hostrcb:	hostrcb
1335  *
1336  * Return value:
1337  * 	none
1338  **/
1339 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1340 				     struct ipr_hostrcb *hostrcb)
1341 {
1342 	struct ipr_resource_entry *res = NULL;
1343 	struct ipr_config_table_entry_wrapper cfgtew;
1344 	__be32 cc_res_handle;
1345 
1346 	u32 is_ndn = 1;
1347 
1348 	if (ioa_cfg->sis64) {
1349 		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1350 		cc_res_handle = cfgtew.u.cfgte64->res_handle;
1351 	} else {
1352 		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1353 		cc_res_handle = cfgtew.u.cfgte->res_handle;
1354 	}
1355 
1356 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1357 		if (res->res_handle == cc_res_handle) {
1358 			is_ndn = 0;
1359 			break;
1360 		}
1361 	}
1362 
1363 	if (is_ndn) {
1364 		if (list_empty(&ioa_cfg->free_res_q)) {
1365 			ipr_send_hcam(ioa_cfg,
1366 				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1367 				      hostrcb);
1368 			return;
1369 		}
1370 
1371 		res = list_entry(ioa_cfg->free_res_q.next,
1372 				 struct ipr_resource_entry, queue);
1373 
1374 		list_del(&res->queue);
1375 		ipr_init_res_entry(res, &cfgtew);
1376 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1377 	}
1378 
1379 	ipr_update_res_entry(res, &cfgtew);
1380 
1381 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1382 		if (res->sdev) {
1383 			res->del_from_ml = 1;
1384 			res->res_handle = IPR_INVALID_RES_HANDLE;
1385 			schedule_work(&ioa_cfg->work_q);
1386 		} else {
1387 			ipr_clear_res_target(res);
1388 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1389 		}
1390 	} else if (!res->sdev || res->del_from_ml) {
1391 		res->add_to_ml = 1;
1392 		schedule_work(&ioa_cfg->work_q);
1393 	}
1394 
1395 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1396 }
1397 
1398 /**
1399  * ipr_process_ccn - Op done function for a CCN.
1400  * @ipr_cmd:	ipr command struct
1401  *
1402  * This function is the op done function for a configuration
1403  * change notification host controlled async from the adapter.
1404  *
1405  * Return value:
1406  * 	none
1407  **/
1408 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1409 {
1410 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1411 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1412 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1413 
1414 	list_del_init(&hostrcb->queue);
1415 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1416 
1417 	if (ioasc) {
1418 		if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1419 		    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1420 			dev_err(&ioa_cfg->pdev->dev,
1421 				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1422 
1423 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1424 	} else {
1425 		ipr_handle_config_change(ioa_cfg, hostrcb);
1426 	}
1427 }
1428 
1429 /**
1430  * strip_whitespace - Strip and pad trailing whitespace.
1431  * @i:		size of buffer
1432  * @buf:	string to modify
1433  *
1434  * This function will strip all trailing whitespace and
1435  * NUL terminate the string.
1436  *
1437  **/
1438 static void strip_whitespace(int i, char *buf)
1439 {
1440 	if (i < 1)
1441 		return;
1442 	i--;
1443 	while (i && buf[i] == ' ')
1444 		i--;
1445 	buf[i+1] = '\0';
1446 }
1447 
1448 /**
1449  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1450  * @prefix:		string to print at start of printk
1451  * @hostrcb:	hostrcb pointer
1452  * @vpd:		vendor/product id/sn struct
1453  *
1454  * Return value:
1455  * 	none
1456  **/
1457 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1458 				struct ipr_vpd *vpd)
1459 {
1460 	char vendor_id[IPR_VENDOR_ID_LEN + 1];
1461 	char product_id[IPR_PROD_ID_LEN + 1];
1462 	char sn[IPR_SERIAL_NUM_LEN + 1];
1463 
1464 	memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1465 	strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
1466 
1467 	memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
1468 	strip_whitespace(IPR_PROD_ID_LEN, product_id);
1469 
1470 	memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
1471 	strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
1472 
1473 	ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
1474 		     vendor_id, product_id, sn);
1475 }
1476 
1477 /**
1478  * ipr_log_vpd - Log the passed VPD to the error log.
1479  * @vpd:		vendor/product id/sn struct
1480  *
1481  * Return value:
1482  * 	none
1483  **/
1484 static void ipr_log_vpd(struct ipr_vpd *vpd)
1485 {
1486 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1487 		    + IPR_SERIAL_NUM_LEN];
1488 
1489 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1490 	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1491 	       IPR_PROD_ID_LEN);
1492 	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1493 	ipr_err("Vendor/Product ID: %s\n", buffer);
1494 
1495 	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1496 	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1497 	ipr_err("    Serial Number: %s\n", buffer);
1498 }
1499 
1500 /**
1501  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1502  * @prefix:		string to print at start of printk
1503  * @hostrcb:	hostrcb pointer
1504  * @vpd:		vendor/product id/sn/wwn struct
1505  *
1506  * Return value:
1507  * 	none
1508  **/
1509 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1510 				    struct ipr_ext_vpd *vpd)
1511 {
1512 	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1513 	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1514 		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1515 }
1516 
1517 /**
1518  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1519  * @vpd:		vendor/product id/sn/wwn struct
1520  *
1521  * Return value:
1522  * 	none
1523  **/
1524 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1525 {
1526 	ipr_log_vpd(&vpd->vpd);
1527 	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1528 		be32_to_cpu(vpd->wwid[1]));
1529 }
1530 
1531 /**
1532  * ipr_log_enhanced_cache_error - Log a cache error.
1533  * @ioa_cfg:	ioa config struct
1534  * @hostrcb:	hostrcb struct
1535  *
1536  * Return value:
1537  * 	none
1538  **/
1539 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1540 					 struct ipr_hostrcb *hostrcb)
1541 {
1542 	struct ipr_hostrcb_type_12_error *error;
1543 
1544 	if (ioa_cfg->sis64)
1545 		error = &hostrcb->hcam.u.error64.u.type_12_error;
1546 	else
1547 		error = &hostrcb->hcam.u.error.u.type_12_error;
1548 
1549 	ipr_err("-----Current Configuration-----\n");
1550 	ipr_err("Cache Directory Card Information:\n");
1551 	ipr_log_ext_vpd(&error->ioa_vpd);
1552 	ipr_err("Adapter Card Information:\n");
1553 	ipr_log_ext_vpd(&error->cfc_vpd);
1554 
1555 	ipr_err("-----Expected Configuration-----\n");
1556 	ipr_err("Cache Directory Card Information:\n");
1557 	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1558 	ipr_err("Adapter Card Information:\n");
1559 	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1560 
1561 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1562 		     be32_to_cpu(error->ioa_data[0]),
1563 		     be32_to_cpu(error->ioa_data[1]),
1564 		     be32_to_cpu(error->ioa_data[2]));
1565 }
1566 
1567 /**
1568  * ipr_log_cache_error - Log a cache error.
1569  * @ioa_cfg:	ioa config struct
1570  * @hostrcb:	hostrcb struct
1571  *
1572  * Return value:
1573  * 	none
1574  **/
1575 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1576 				struct ipr_hostrcb *hostrcb)
1577 {
1578 	struct ipr_hostrcb_type_02_error *error =
1579 		&hostrcb->hcam.u.error.u.type_02_error;
1580 
1581 	ipr_err("-----Current Configuration-----\n");
1582 	ipr_err("Cache Directory Card Information:\n");
1583 	ipr_log_vpd(&error->ioa_vpd);
1584 	ipr_err("Adapter Card Information:\n");
1585 	ipr_log_vpd(&error->cfc_vpd);
1586 
1587 	ipr_err("-----Expected Configuration-----\n");
1588 	ipr_err("Cache Directory Card Information:\n");
1589 	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1590 	ipr_err("Adapter Card Information:\n");
1591 	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1592 
1593 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1594 		     be32_to_cpu(error->ioa_data[0]),
1595 		     be32_to_cpu(error->ioa_data[1]),
1596 		     be32_to_cpu(error->ioa_data[2]));
1597 }
1598 
1599 /**
1600  * ipr_log_enhanced_config_error - Log a configuration error.
1601  * @ioa_cfg:	ioa config struct
1602  * @hostrcb:	hostrcb struct
1603  *
1604  * Return value:
1605  * 	none
1606  **/
1607 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1608 					  struct ipr_hostrcb *hostrcb)
1609 {
1610 	int errors_logged, i;
1611 	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1612 	struct ipr_hostrcb_type_13_error *error;
1613 
1614 	error = &hostrcb->hcam.u.error.u.type_13_error;
1615 	errors_logged = be32_to_cpu(error->errors_logged);
1616 
1617 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1618 		be32_to_cpu(error->errors_detected), errors_logged);
1619 
1620 	dev_entry = error->dev;
1621 
1622 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1623 		ipr_err_separator;
1624 
1625 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1626 		ipr_log_ext_vpd(&dev_entry->vpd);
1627 
1628 		ipr_err("-----New Device Information-----\n");
1629 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1630 
1631 		ipr_err("Cache Directory Card Information:\n");
1632 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1633 
1634 		ipr_err("Adapter Card Information:\n");
1635 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1636 	}
1637 }
1638 
1639 /**
1640  * ipr_log_sis64_config_error - Log a device error.
1641  * @ioa_cfg:	ioa config struct
1642  * @hostrcb:	hostrcb struct
1643  *
1644  * Return value:
1645  * 	none
1646  **/
1647 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1648 				       struct ipr_hostrcb *hostrcb)
1649 {
1650 	int errors_logged, i;
1651 	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1652 	struct ipr_hostrcb_type_23_error *error;
1653 	char buffer[IPR_MAX_RES_PATH_LENGTH];
1654 
1655 	error = &hostrcb->hcam.u.error64.u.type_23_error;
1656 	errors_logged = be32_to_cpu(error->errors_logged);
1657 
1658 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1659 		be32_to_cpu(error->errors_detected), errors_logged);
1660 
1661 	dev_entry = error->dev;
1662 
1663 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1664 		ipr_err_separator;
1665 
1666 		ipr_err("Device %d : %s", i + 1,
1667 			__ipr_format_res_path(dev_entry->res_path,
1668 					      buffer, sizeof(buffer)));
1669 		ipr_log_ext_vpd(&dev_entry->vpd);
1670 
1671 		ipr_err("-----New Device Information-----\n");
1672 		ipr_log_ext_vpd(&dev_entry->new_vpd);
1673 
1674 		ipr_err("Cache Directory Card Information:\n");
1675 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1676 
1677 		ipr_err("Adapter Card Information:\n");
1678 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1679 	}
1680 }
1681 
1682 /**
1683  * ipr_log_config_error - Log a configuration error.
1684  * @ioa_cfg:	ioa config struct
1685  * @hostrcb:	hostrcb struct
1686  *
1687  * Return value:
1688  * 	none
1689  **/
1690 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1691 				 struct ipr_hostrcb *hostrcb)
1692 {
1693 	int errors_logged, i;
1694 	struct ipr_hostrcb_device_data_entry *dev_entry;
1695 	struct ipr_hostrcb_type_03_error *error;
1696 
1697 	error = &hostrcb->hcam.u.error.u.type_03_error;
1698 	errors_logged = be32_to_cpu(error->errors_logged);
1699 
1700 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1701 		be32_to_cpu(error->errors_detected), errors_logged);
1702 
1703 	dev_entry = error->dev;
1704 
1705 	for (i = 0; i < errors_logged; i++, dev_entry++) {
1706 		ipr_err_separator;
1707 
1708 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1709 		ipr_log_vpd(&dev_entry->vpd);
1710 
1711 		ipr_err("-----New Device Information-----\n");
1712 		ipr_log_vpd(&dev_entry->new_vpd);
1713 
1714 		ipr_err("Cache Directory Card Information:\n");
1715 		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1716 
1717 		ipr_err("Adapter Card Information:\n");
1718 		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1719 
1720 		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1721 			be32_to_cpu(dev_entry->ioa_data[0]),
1722 			be32_to_cpu(dev_entry->ioa_data[1]),
1723 			be32_to_cpu(dev_entry->ioa_data[2]),
1724 			be32_to_cpu(dev_entry->ioa_data[3]),
1725 			be32_to_cpu(dev_entry->ioa_data[4]));
1726 	}
1727 }
1728 
1729 /**
1730  * ipr_log_enhanced_array_error - Log an array configuration error.
1731  * @ioa_cfg:	ioa config struct
1732  * @hostrcb:	hostrcb struct
1733  *
1734  * Return value:
1735  * 	none
1736  **/
1737 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1738 					 struct ipr_hostrcb *hostrcb)
1739 {
1740 	int i, num_entries;
1741 	struct ipr_hostrcb_type_14_error *error;
1742 	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1743 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1744 
1745 	error = &hostrcb->hcam.u.error.u.type_14_error;
1746 
1747 	ipr_err_separator;
1748 
1749 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1750 		error->protection_level,
1751 		ioa_cfg->host->host_no,
1752 		error->last_func_vset_res_addr.bus,
1753 		error->last_func_vset_res_addr.target,
1754 		error->last_func_vset_res_addr.lun);
1755 
1756 	ipr_err_separator;
1757 
1758 	array_entry = error->array_member;
1759 	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1760 			    ARRAY_SIZE(error->array_member));
1761 
1762 	for (i = 0; i < num_entries; i++, array_entry++) {
1763 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1764 			continue;
1765 
1766 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1767 			ipr_err("Exposed Array Member %d:\n", i);
1768 		else
1769 			ipr_err("Array Member %d:\n", i);
1770 
1771 		ipr_log_ext_vpd(&array_entry->vpd);
1772 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1773 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1774 				 "Expected Location");
1775 
1776 		ipr_err_separator;
1777 	}
1778 }
1779 
1780 /**
1781  * ipr_log_array_error - Log an array configuration error.
1782  * @ioa_cfg:	ioa config struct
1783  * @hostrcb:	hostrcb struct
1784  *
1785  * Return value:
1786  * 	none
1787  **/
1788 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1789 				struct ipr_hostrcb *hostrcb)
1790 {
1791 	int i;
1792 	struct ipr_hostrcb_type_04_error *error;
1793 	struct ipr_hostrcb_array_data_entry *array_entry;
1794 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1795 
1796 	error = &hostrcb->hcam.u.error.u.type_04_error;
1797 
1798 	ipr_err_separator;
1799 
1800 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1801 		error->protection_level,
1802 		ioa_cfg->host->host_no,
1803 		error->last_func_vset_res_addr.bus,
1804 		error->last_func_vset_res_addr.target,
1805 		error->last_func_vset_res_addr.lun);
1806 
1807 	ipr_err_separator;
1808 
1809 	array_entry = error->array_member;
1810 
1811 	for (i = 0; i < 18; i++) {
1812 		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1813 			continue;
1814 
1815 		if (be32_to_cpu(error->exposed_mode_adn) == i)
1816 			ipr_err("Exposed Array Member %d:\n", i);
1817 		else
1818 			ipr_err("Array Member %d:\n", i);
1819 
1820 		ipr_log_vpd(&array_entry->vpd);
1821 
1822 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1823 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1824 				 "Expected Location");
1825 
1826 		ipr_err_separator;
1827 
1828 		if (i == 9)
1829 			array_entry = error->array_member2;
1830 		else
1831 			array_entry++;
1832 	}
1833 }
1834 
1835 /**
1836  * ipr_log_hex_data - Log additional hex IOA error data.
1837  * @ioa_cfg:	ioa config struct
1838  * @data:		IOA error data
1839  * @len:		data length
1840  *
1841  * Return value:
1842  * 	none
1843  **/
1844 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1845 {
1846 	int i;
1847 
1848 	if (len == 0)
1849 		return;
1850 
1851 	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1852 		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1853 
1854 	for (i = 0; i < len / 4; i += 4) {
1855 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1856 			be32_to_cpu(data[i]),
1857 			be32_to_cpu(data[i+1]),
1858 			be32_to_cpu(data[i+2]),
1859 			be32_to_cpu(data[i+3]));
1860 	}
1861 }
1862 
1863 /**
1864  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1865  * @ioa_cfg:	ioa config struct
1866  * @hostrcb:	hostrcb struct
1867  *
1868  * Return value:
1869  * 	none
1870  **/
1871 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1872 					    struct ipr_hostrcb *hostrcb)
1873 {
1874 	struct ipr_hostrcb_type_17_error *error;
1875 
1876 	if (ioa_cfg->sis64)
1877 		error = &hostrcb->hcam.u.error64.u.type_17_error;
1878 	else
1879 		error = &hostrcb->hcam.u.error.u.type_17_error;
1880 
1881 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1882 	strim(error->failure_reason);
1883 
1884 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1885 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1886 	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1887 	ipr_log_hex_data(ioa_cfg, error->data,
1888 			 be32_to_cpu(hostrcb->hcam.length) -
1889 			 (offsetof(struct ipr_hostrcb_error, u) +
1890 			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1891 }
1892 
1893 /**
1894  * ipr_log_dual_ioa_error - Log a dual adapter error.
1895  * @ioa_cfg:	ioa config struct
1896  * @hostrcb:	hostrcb struct
1897  *
1898  * Return value:
1899  * 	none
1900  **/
1901 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1902 				   struct ipr_hostrcb *hostrcb)
1903 {
1904 	struct ipr_hostrcb_type_07_error *error;
1905 
1906 	error = &hostrcb->hcam.u.error.u.type_07_error;
1907 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1908 	strim(error->failure_reason);
1909 
1910 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1911 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1912 	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1913 	ipr_log_hex_data(ioa_cfg, error->data,
1914 			 be32_to_cpu(hostrcb->hcam.length) -
1915 			 (offsetof(struct ipr_hostrcb_error, u) +
1916 			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1917 }
1918 
1919 static const struct {
1920 	u8 active;
1921 	char *desc;
1922 } path_active_desc[] = {
1923 	{ IPR_PATH_NO_INFO, "Path" },
1924 	{ IPR_PATH_ACTIVE, "Active path" },
1925 	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1926 };
1927 
1928 static const struct {
1929 	u8 state;
1930 	char *desc;
1931 } path_state_desc[] = {
1932 	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1933 	{ IPR_PATH_HEALTHY, "is healthy" },
1934 	{ IPR_PATH_DEGRADED, "is degraded" },
1935 	{ IPR_PATH_FAILED, "is failed" }
1936 };
1937 
1938 /**
1939  * ipr_log_fabric_path - Log a fabric path error
1940  * @hostrcb:	hostrcb struct
1941  * @fabric:		fabric descriptor
1942  *
1943  * Return value:
1944  * 	none
1945  **/
1946 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1947 				struct ipr_hostrcb_fabric_desc *fabric)
1948 {
1949 	int i, j;
1950 	u8 path_state = fabric->path_state;
1951 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1952 	u8 state = path_state & IPR_PATH_STATE_MASK;
1953 
1954 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1955 		if (path_active_desc[i].active != active)
1956 			continue;
1957 
1958 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1959 			if (path_state_desc[j].state != state)
1960 				continue;
1961 
1962 			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1963 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1964 					     path_active_desc[i].desc, path_state_desc[j].desc,
1965 					     fabric->ioa_port);
1966 			} else if (fabric->cascaded_expander == 0xff) {
1967 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1968 					     path_active_desc[i].desc, path_state_desc[j].desc,
1969 					     fabric->ioa_port, fabric->phy);
1970 			} else if (fabric->phy == 0xff) {
1971 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1972 					     path_active_desc[i].desc, path_state_desc[j].desc,
1973 					     fabric->ioa_port, fabric->cascaded_expander);
1974 			} else {
1975 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1976 					     path_active_desc[i].desc, path_state_desc[j].desc,
1977 					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1978 			}
1979 			return;
1980 		}
1981 	}
1982 
1983 	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1984 		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1985 }
1986 
1987 /**
1988  * ipr_log64_fabric_path - Log a fabric path error
1989  * @hostrcb:	hostrcb struct
1990  * @fabric:		fabric descriptor
1991  *
1992  * Return value:
1993  * 	none
1994  **/
1995 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1996 				  struct ipr_hostrcb64_fabric_desc *fabric)
1997 {
1998 	int i, j;
1999 	u8 path_state = fabric->path_state;
2000 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2001 	u8 state = path_state & IPR_PATH_STATE_MASK;
2002 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2003 
2004 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2005 		if (path_active_desc[i].active != active)
2006 			continue;
2007 
2008 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2009 			if (path_state_desc[j].state != state)
2010 				continue;
2011 
2012 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2013 				     path_active_desc[i].desc, path_state_desc[j].desc,
2014 				     ipr_format_res_path(hostrcb->ioa_cfg,
2015 						fabric->res_path,
2016 						buffer, sizeof(buffer)));
2017 			return;
2018 		}
2019 	}
2020 
2021 	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2022 		ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2023 				    buffer, sizeof(buffer)));
2024 }
2025 
2026 static const struct {
2027 	u8 type;
2028 	char *desc;
2029 } path_type_desc[] = {
2030 	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
2031 	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
2032 	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2033 	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2034 };
2035 
2036 static const struct {
2037 	u8 status;
2038 	char *desc;
2039 } path_status_desc[] = {
2040 	{ IPR_PATH_CFG_NO_PROB, "Functional" },
2041 	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
2042 	{ IPR_PATH_CFG_FAILED, "Failed" },
2043 	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
2044 	{ IPR_PATH_NOT_DETECTED, "Missing" },
2045 	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2046 };
2047 
2048 static const char *link_rate[] = {
2049 	"unknown",
2050 	"disabled",
2051 	"phy reset problem",
2052 	"spinup hold",
2053 	"port selector",
2054 	"unknown",
2055 	"unknown",
2056 	"unknown",
2057 	"1.5Gbps",
2058 	"3.0Gbps",
2059 	"unknown",
2060 	"unknown",
2061 	"unknown",
2062 	"unknown",
2063 	"unknown",
2064 	"unknown"
2065 };
2066 
2067 /**
2068  * ipr_log_path_elem - Log a fabric path element.
2069  * @hostrcb:	hostrcb struct
2070  * @cfg:		fabric path element struct
2071  *
2072  * Return value:
2073  * 	none
2074  **/
2075 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2076 			      struct ipr_hostrcb_config_element *cfg)
2077 {
2078 	int i, j;
2079 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2080 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2081 
2082 	if (type == IPR_PATH_CFG_NOT_EXIST)
2083 		return;
2084 
2085 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2086 		if (path_type_desc[i].type != type)
2087 			continue;
2088 
2089 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2090 			if (path_status_desc[j].status != status)
2091 				continue;
2092 
2093 			if (type == IPR_PATH_CFG_IOA_PORT) {
2094 				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2095 					     path_status_desc[j].desc, path_type_desc[i].desc,
2096 					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2097 					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2098 			} else {
2099 				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2100 					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2101 						     path_status_desc[j].desc, path_type_desc[i].desc,
2102 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2103 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2104 				} else if (cfg->cascaded_expander == 0xff) {
2105 					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2106 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2107 						     path_type_desc[i].desc, cfg->phy,
2108 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2109 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2110 				} else if (cfg->phy == 0xff) {
2111 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2112 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2113 						     path_type_desc[i].desc, cfg->cascaded_expander,
2114 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2115 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2116 				} else {
2117 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2118 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2119 						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2120 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2121 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2122 				}
2123 			}
2124 			return;
2125 		}
2126 	}
2127 
2128 	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2129 		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2130 		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2131 		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2132 }
2133 
2134 /**
2135  * ipr_log64_path_elem - Log a fabric path element.
2136  * @hostrcb:	hostrcb struct
2137  * @cfg:		fabric path element struct
2138  *
2139  * Return value:
2140  * 	none
2141  **/
2142 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2143 				struct ipr_hostrcb64_config_element *cfg)
2144 {
2145 	int i, j;
2146 	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2147 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2148 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2149 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2150 
2151 	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2152 		return;
2153 
2154 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2155 		if (path_type_desc[i].type != type)
2156 			continue;
2157 
2158 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2159 			if (path_status_desc[j].status != status)
2160 				continue;
2161 
2162 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2163 				     path_status_desc[j].desc, path_type_desc[i].desc,
2164 				     ipr_format_res_path(hostrcb->ioa_cfg,
2165 					cfg->res_path, buffer, sizeof(buffer)),
2166 					link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2167 					be32_to_cpu(cfg->wwid[0]),
2168 					be32_to_cpu(cfg->wwid[1]));
2169 			return;
2170 		}
2171 	}
2172 	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2173 		     "WWN=%08X%08X\n", cfg->type_status,
2174 		     ipr_format_res_path(hostrcb->ioa_cfg,
2175 			cfg->res_path, buffer, sizeof(buffer)),
2176 			link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2177 			be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2178 }
2179 
2180 /**
2181  * ipr_log_fabric_error - Log a fabric error.
2182  * @ioa_cfg:	ioa config struct
2183  * @hostrcb:	hostrcb struct
2184  *
2185  * Return value:
2186  * 	none
2187  **/
2188 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2189 				 struct ipr_hostrcb *hostrcb)
2190 {
2191 	struct ipr_hostrcb_type_20_error *error;
2192 	struct ipr_hostrcb_fabric_desc *fabric;
2193 	struct ipr_hostrcb_config_element *cfg;
2194 	int i, add_len;
2195 
2196 	error = &hostrcb->hcam.u.error.u.type_20_error;
2197 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2198 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2199 
2200 	add_len = be32_to_cpu(hostrcb->hcam.length) -
2201 		(offsetof(struct ipr_hostrcb_error, u) +
2202 		 offsetof(struct ipr_hostrcb_type_20_error, desc));
2203 
2204 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2205 		ipr_log_fabric_path(hostrcb, fabric);
2206 		for_each_fabric_cfg(fabric, cfg)
2207 			ipr_log_path_elem(hostrcb, cfg);
2208 
2209 		add_len -= be16_to_cpu(fabric->length);
2210 		fabric = (struct ipr_hostrcb_fabric_desc *)
2211 			((unsigned long)fabric + be16_to_cpu(fabric->length));
2212 	}
2213 
2214 	ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2215 }
2216 
2217 /**
2218  * ipr_log_sis64_array_error - Log a sis64 array error.
2219  * @ioa_cfg:	ioa config struct
2220  * @hostrcb:	hostrcb struct
2221  *
2222  * Return value:
2223  * 	none
2224  **/
2225 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2226 				      struct ipr_hostrcb *hostrcb)
2227 {
2228 	int i, num_entries;
2229 	struct ipr_hostrcb_type_24_error *error;
2230 	struct ipr_hostrcb64_array_data_entry *array_entry;
2231 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2232 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2233 
2234 	error = &hostrcb->hcam.u.error64.u.type_24_error;
2235 
2236 	ipr_err_separator;
2237 
2238 	ipr_err("RAID %s Array Configuration: %s\n",
2239 		error->protection_level,
2240 		ipr_format_res_path(ioa_cfg, error->last_res_path,
2241 			buffer, sizeof(buffer)));
2242 
2243 	ipr_err_separator;
2244 
2245 	array_entry = error->array_member;
2246 	num_entries = min_t(u32, error->num_entries,
2247 			    ARRAY_SIZE(error->array_member));
2248 
2249 	for (i = 0; i < num_entries; i++, array_entry++) {
2250 
2251 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2252 			continue;
2253 
2254 		if (error->exposed_mode_adn == i)
2255 			ipr_err("Exposed Array Member %d:\n", i);
2256 		else
2257 			ipr_err("Array Member %d:\n", i);
2258 
2259 		ipr_err("Array Member %d:\n", i);
2260 		ipr_log_ext_vpd(&array_entry->vpd);
2261 		ipr_err("Current Location: %s\n",
2262 			 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2263 				buffer, sizeof(buffer)));
2264 		ipr_err("Expected Location: %s\n",
2265 			 ipr_format_res_path(ioa_cfg,
2266 				array_entry->expected_res_path,
2267 				buffer, sizeof(buffer)));
2268 
2269 		ipr_err_separator;
2270 	}
2271 }
2272 
2273 /**
2274  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2275  * @ioa_cfg:	ioa config struct
2276  * @hostrcb:	hostrcb struct
2277  *
2278  * Return value:
2279  * 	none
2280  **/
2281 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2282 				       struct ipr_hostrcb *hostrcb)
2283 {
2284 	struct ipr_hostrcb_type_30_error *error;
2285 	struct ipr_hostrcb64_fabric_desc *fabric;
2286 	struct ipr_hostrcb64_config_element *cfg;
2287 	int i, add_len;
2288 
2289 	error = &hostrcb->hcam.u.error64.u.type_30_error;
2290 
2291 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2292 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2293 
2294 	add_len = be32_to_cpu(hostrcb->hcam.length) -
2295 		(offsetof(struct ipr_hostrcb64_error, u) +
2296 		 offsetof(struct ipr_hostrcb_type_30_error, desc));
2297 
2298 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2299 		ipr_log64_fabric_path(hostrcb, fabric);
2300 		for_each_fabric_cfg(fabric, cfg)
2301 			ipr_log64_path_elem(hostrcb, cfg);
2302 
2303 		add_len -= be16_to_cpu(fabric->length);
2304 		fabric = (struct ipr_hostrcb64_fabric_desc *)
2305 			((unsigned long)fabric + be16_to_cpu(fabric->length));
2306 	}
2307 
2308 	ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2309 }
2310 
2311 /**
2312  * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2313  * @ioa_cfg:    ioa config struct
2314  * @hostrcb:    hostrcb struct
2315  *
2316  * Return value:
2317  *      none
2318  **/
2319 static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2320 				       struct ipr_hostrcb *hostrcb)
2321 {
2322 	struct ipr_hostrcb_type_41_error *error;
2323 
2324 	error = &hostrcb->hcam.u.error64.u.type_41_error;
2325 
2326 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2327 	ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2328 	ipr_log_hex_data(ioa_cfg, error->data,
2329 			 be32_to_cpu(hostrcb->hcam.length) -
2330 			 (offsetof(struct ipr_hostrcb_error, u) +
2331 			  offsetof(struct ipr_hostrcb_type_41_error, data)));
2332 }
2333 /**
2334  * ipr_log_generic_error - Log an adapter error.
2335  * @ioa_cfg:	ioa config struct
2336  * @hostrcb:	hostrcb struct
2337  *
2338  * Return value:
2339  * 	none
2340  **/
2341 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2342 				  struct ipr_hostrcb *hostrcb)
2343 {
2344 	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2345 			 be32_to_cpu(hostrcb->hcam.length));
2346 }
2347 
2348 /**
2349  * ipr_log_sis64_device_error - Log a cache error.
2350  * @ioa_cfg:	ioa config struct
2351  * @hostrcb:	hostrcb struct
2352  *
2353  * Return value:
2354  * 	none
2355  **/
2356 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2357 					 struct ipr_hostrcb *hostrcb)
2358 {
2359 	struct ipr_hostrcb_type_21_error *error;
2360 	char buffer[IPR_MAX_RES_PATH_LENGTH];
2361 
2362 	error = &hostrcb->hcam.u.error64.u.type_21_error;
2363 
2364 	ipr_err("-----Failing Device Information-----\n");
2365 	ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2366 		be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2367 		 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2368 	ipr_err("Device Resource Path: %s\n",
2369 		__ipr_format_res_path(error->res_path,
2370 				      buffer, sizeof(buffer)));
2371 	error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2372 	error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2373 	ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2374 	ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2375 	ipr_err("SCSI Sense Data:\n");
2376 	ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2377 	ipr_err("SCSI Command Descriptor Block: \n");
2378 	ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2379 
2380 	ipr_err("Additional IOA Data:\n");
2381 	ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2382 }
2383 
2384 /**
2385  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2386  * @ioasc:	IOASC
2387  *
2388  * This function will return the index of into the ipr_error_table
2389  * for the specified IOASC. If the IOASC is not in the table,
2390  * 0 will be returned, which points to the entry used for unknown errors.
2391  *
2392  * Return value:
2393  * 	index into the ipr_error_table
2394  **/
2395 static u32 ipr_get_error(u32 ioasc)
2396 {
2397 	int i;
2398 
2399 	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2400 		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2401 			return i;
2402 
2403 	return 0;
2404 }
2405 
2406 /**
2407  * ipr_handle_log_data - Log an adapter error.
2408  * @ioa_cfg:	ioa config struct
2409  * @hostrcb:	hostrcb struct
2410  *
2411  * This function logs an adapter error to the system.
2412  *
2413  * Return value:
2414  * 	none
2415  **/
2416 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2417 				struct ipr_hostrcb *hostrcb)
2418 {
2419 	u32 ioasc;
2420 	int error_index;
2421 	struct ipr_hostrcb_type_21_error *error;
2422 
2423 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2424 		return;
2425 
2426 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2427 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2428 
2429 	if (ioa_cfg->sis64)
2430 		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2431 	else
2432 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2433 
2434 	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2435 	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2436 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
2437 		scsi_report_bus_reset(ioa_cfg->host,
2438 				      hostrcb->hcam.u.error.fd_res_addr.bus);
2439 	}
2440 
2441 	error_index = ipr_get_error(ioasc);
2442 
2443 	if (!ipr_error_table[error_index].log_hcam)
2444 		return;
2445 
2446 	if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2447 	    hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2448 		error = &hostrcb->hcam.u.error64.u.type_21_error;
2449 
2450 		if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2451 			ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2452 				return;
2453 	}
2454 
2455 	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2456 
2457 	/* Set indication we have logged an error */
2458 	ioa_cfg->errors_logged++;
2459 
2460 	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2461 		return;
2462 	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2463 		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2464 
2465 	switch (hostrcb->hcam.overlay_id) {
2466 	case IPR_HOST_RCB_OVERLAY_ID_2:
2467 		ipr_log_cache_error(ioa_cfg, hostrcb);
2468 		break;
2469 	case IPR_HOST_RCB_OVERLAY_ID_3:
2470 		ipr_log_config_error(ioa_cfg, hostrcb);
2471 		break;
2472 	case IPR_HOST_RCB_OVERLAY_ID_4:
2473 	case IPR_HOST_RCB_OVERLAY_ID_6:
2474 		ipr_log_array_error(ioa_cfg, hostrcb);
2475 		break;
2476 	case IPR_HOST_RCB_OVERLAY_ID_7:
2477 		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2478 		break;
2479 	case IPR_HOST_RCB_OVERLAY_ID_12:
2480 		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2481 		break;
2482 	case IPR_HOST_RCB_OVERLAY_ID_13:
2483 		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2484 		break;
2485 	case IPR_HOST_RCB_OVERLAY_ID_14:
2486 	case IPR_HOST_RCB_OVERLAY_ID_16:
2487 		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2488 		break;
2489 	case IPR_HOST_RCB_OVERLAY_ID_17:
2490 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2491 		break;
2492 	case IPR_HOST_RCB_OVERLAY_ID_20:
2493 		ipr_log_fabric_error(ioa_cfg, hostrcb);
2494 		break;
2495 	case IPR_HOST_RCB_OVERLAY_ID_21:
2496 		ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2497 		break;
2498 	case IPR_HOST_RCB_OVERLAY_ID_23:
2499 		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2500 		break;
2501 	case IPR_HOST_RCB_OVERLAY_ID_24:
2502 	case IPR_HOST_RCB_OVERLAY_ID_26:
2503 		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2504 		break;
2505 	case IPR_HOST_RCB_OVERLAY_ID_30:
2506 		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2507 		break;
2508 	case IPR_HOST_RCB_OVERLAY_ID_41:
2509 		ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2510 		break;
2511 	case IPR_HOST_RCB_OVERLAY_ID_1:
2512 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2513 	default:
2514 		ipr_log_generic_error(ioa_cfg, hostrcb);
2515 		break;
2516 	}
2517 }
2518 
2519 static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2520 {
2521 	struct ipr_hostrcb *hostrcb;
2522 
2523 	hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2524 					struct ipr_hostrcb, queue);
2525 
2526 	if (unlikely(!hostrcb)) {
2527 		dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2528 		hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2529 						struct ipr_hostrcb, queue);
2530 	}
2531 
2532 	list_del_init(&hostrcb->queue);
2533 	return hostrcb;
2534 }
2535 
2536 /**
2537  * ipr_process_error - Op done function for an adapter error log.
2538  * @ipr_cmd:	ipr command struct
2539  *
2540  * This function is the op done function for an error log host
2541  * controlled async from the adapter. It will log the error and
2542  * send the HCAM back to the adapter.
2543  *
2544  * Return value:
2545  * 	none
2546  **/
2547 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2548 {
2549 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2550 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2551 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2552 	u32 fd_ioasc;
2553 
2554 	if (ioa_cfg->sis64)
2555 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2556 	else
2557 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2558 
2559 	list_del_init(&hostrcb->queue);
2560 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2561 
2562 	if (!ioasc) {
2563 		ipr_handle_log_data(ioa_cfg, hostrcb);
2564 		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2565 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2566 	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2567 		   ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2568 		dev_err(&ioa_cfg->pdev->dev,
2569 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
2570 	}
2571 
2572 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2573 	schedule_work(&ioa_cfg->work_q);
2574 	hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2575 
2576 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2577 }
2578 
2579 /**
2580  * ipr_timeout -  An internally generated op has timed out.
2581  * @t: Timer context used to fetch ipr command struct
2582  *
2583  * This function blocks host requests and initiates an
2584  * adapter reset.
2585  *
2586  * Return value:
2587  * 	none
2588  **/
2589 static void ipr_timeout(struct timer_list *t)
2590 {
2591 	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2592 	unsigned long lock_flags = 0;
2593 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2594 
2595 	ENTER;
2596 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2597 
2598 	ioa_cfg->errors_logged++;
2599 	dev_err(&ioa_cfg->pdev->dev,
2600 		"Adapter being reset due to command timeout.\n");
2601 
2602 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2603 		ioa_cfg->sdt_state = GET_DUMP;
2604 
2605 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2606 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2607 
2608 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2609 	LEAVE;
2610 }
2611 
2612 /**
2613  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2614  * @t: Timer context used to fetch ipr command struct
2615  *
2616  * This function blocks host requests and initiates an
2617  * adapter reset.
2618  *
2619  * Return value:
2620  * 	none
2621  **/
2622 static void ipr_oper_timeout(struct timer_list *t)
2623 {
2624 	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2625 	unsigned long lock_flags = 0;
2626 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2627 
2628 	ENTER;
2629 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2630 
2631 	ioa_cfg->errors_logged++;
2632 	dev_err(&ioa_cfg->pdev->dev,
2633 		"Adapter timed out transitioning to operational.\n");
2634 
2635 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2636 		ioa_cfg->sdt_state = GET_DUMP;
2637 
2638 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2639 		if (ipr_fastfail)
2640 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2641 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2642 	}
2643 
2644 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2645 	LEAVE;
2646 }
2647 
2648 /**
2649  * ipr_find_ses_entry - Find matching SES in SES table
2650  * @res:	resource entry struct of SES
2651  *
2652  * Return value:
2653  * 	pointer to SES table entry / NULL on failure
2654  **/
2655 static const struct ipr_ses_table_entry *
2656 ipr_find_ses_entry(struct ipr_resource_entry *res)
2657 {
2658 	int i, j, matches;
2659 	struct ipr_std_inq_vpids *vpids;
2660 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
2661 
2662 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2663 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2664 			if (ste->compare_product_id_byte[j] == 'X') {
2665 				vpids = &res->std_inq_data.vpids;
2666 				if (vpids->product_id[j] == ste->product_id[j])
2667 					matches++;
2668 				else
2669 					break;
2670 			} else
2671 				matches++;
2672 		}
2673 
2674 		if (matches == IPR_PROD_ID_LEN)
2675 			return ste;
2676 	}
2677 
2678 	return NULL;
2679 }
2680 
2681 /**
2682  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2683  * @ioa_cfg:	ioa config struct
2684  * @bus:		SCSI bus
2685  * @bus_width:	bus width
2686  *
2687  * Return value:
2688  *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2689  *	For a 2-byte wide SCSI bus, the maximum transfer speed is
2690  *	twice the maximum transfer rate (e.g. for a wide enabled bus,
2691  *	max 160MHz = max 320MB/sec).
2692  **/
2693 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2694 {
2695 	struct ipr_resource_entry *res;
2696 	const struct ipr_ses_table_entry *ste;
2697 	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2698 
2699 	/* Loop through each config table entry in the config table buffer */
2700 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2701 		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2702 			continue;
2703 
2704 		if (bus != res->bus)
2705 			continue;
2706 
2707 		if (!(ste = ipr_find_ses_entry(res)))
2708 			continue;
2709 
2710 		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2711 	}
2712 
2713 	return max_xfer_rate;
2714 }
2715 
2716 /**
2717  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2718  * @ioa_cfg:		ioa config struct
2719  * @max_delay:		max delay in micro-seconds to wait
2720  *
2721  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2722  *
2723  * Return value:
2724  * 	0 on success / other on failure
2725  **/
2726 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2727 {
2728 	volatile u32 pcii_reg;
2729 	int delay = 1;
2730 
2731 	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2732 	while (delay < max_delay) {
2733 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2734 
2735 		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2736 			return 0;
2737 
2738 		/* udelay cannot be used if delay is more than a few milliseconds */
2739 		if ((delay / 1000) > MAX_UDELAY_MS)
2740 			mdelay(delay / 1000);
2741 		else
2742 			udelay(delay);
2743 
2744 		delay += delay;
2745 	}
2746 	return -EIO;
2747 }
2748 
2749 /**
2750  * ipr_get_sis64_dump_data_section - Dump IOA memory
2751  * @ioa_cfg:			ioa config struct
2752  * @start_addr:			adapter address to dump
2753  * @dest:			destination kernel buffer
2754  * @length_in_words:		length to dump in 4 byte words
2755  *
2756  * Return value:
2757  * 	0 on success
2758  **/
2759 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2760 					   u32 start_addr,
2761 					   __be32 *dest, u32 length_in_words)
2762 {
2763 	int i;
2764 
2765 	for (i = 0; i < length_in_words; i++) {
2766 		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2767 		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2768 		dest++;
2769 	}
2770 
2771 	return 0;
2772 }
2773 
2774 /**
2775  * ipr_get_ldump_data_section - Dump IOA memory
2776  * @ioa_cfg:			ioa config struct
2777  * @start_addr:			adapter address to dump
2778  * @dest:				destination kernel buffer
2779  * @length_in_words:	length to dump in 4 byte words
2780  *
2781  * Return value:
2782  * 	0 on success / -EIO on failure
2783  **/
2784 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2785 				      u32 start_addr,
2786 				      __be32 *dest, u32 length_in_words)
2787 {
2788 	volatile u32 temp_pcii_reg;
2789 	int i, delay = 0;
2790 
2791 	if (ioa_cfg->sis64)
2792 		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2793 						       dest, length_in_words);
2794 
2795 	/* Write IOA interrupt reg starting LDUMP state  */
2796 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2797 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2798 
2799 	/* Wait for IO debug acknowledge */
2800 	if (ipr_wait_iodbg_ack(ioa_cfg,
2801 			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2802 		dev_err(&ioa_cfg->pdev->dev,
2803 			"IOA dump long data transfer timeout\n");
2804 		return -EIO;
2805 	}
2806 
2807 	/* Signal LDUMP interlocked - clear IO debug ack */
2808 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2809 	       ioa_cfg->regs.clr_interrupt_reg);
2810 
2811 	/* Write Mailbox with starting address */
2812 	writel(start_addr, ioa_cfg->ioa_mailbox);
2813 
2814 	/* Signal address valid - clear IOA Reset alert */
2815 	writel(IPR_UPROCI_RESET_ALERT,
2816 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2817 
2818 	for (i = 0; i < length_in_words; i++) {
2819 		/* Wait for IO debug acknowledge */
2820 		if (ipr_wait_iodbg_ack(ioa_cfg,
2821 				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2822 			dev_err(&ioa_cfg->pdev->dev,
2823 				"IOA dump short data transfer timeout\n");
2824 			return -EIO;
2825 		}
2826 
2827 		/* Read data from mailbox and increment destination pointer */
2828 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2829 		dest++;
2830 
2831 		/* For all but the last word of data, signal data received */
2832 		if (i < (length_in_words - 1)) {
2833 			/* Signal dump data received - Clear IO debug Ack */
2834 			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2835 			       ioa_cfg->regs.clr_interrupt_reg);
2836 		}
2837 	}
2838 
2839 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2840 	writel(IPR_UPROCI_RESET_ALERT,
2841 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2842 
2843 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2844 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2845 
2846 	/* Signal dump data received - Clear IO debug Ack */
2847 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2848 	       ioa_cfg->regs.clr_interrupt_reg);
2849 
2850 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2851 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2852 		temp_pcii_reg =
2853 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2854 
2855 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2856 			return 0;
2857 
2858 		udelay(10);
2859 		delay += 10;
2860 	}
2861 
2862 	return 0;
2863 }
2864 
2865 #ifdef CONFIG_SCSI_IPR_DUMP
2866 /**
2867  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2868  * @ioa_cfg:		ioa config struct
2869  * @pci_address:	adapter address
2870  * @length:			length of data to copy
2871  *
2872  * Copy data from PCI adapter to kernel buffer.
2873  * Note: length MUST be a 4 byte multiple
2874  * Return value:
2875  * 	0 on success / other on failure
2876  **/
2877 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2878 			unsigned long pci_address, u32 length)
2879 {
2880 	int bytes_copied = 0;
2881 	int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2882 	__be32 *page;
2883 	unsigned long lock_flags = 0;
2884 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2885 
2886 	if (ioa_cfg->sis64)
2887 		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2888 	else
2889 		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2890 
2891 	while (bytes_copied < length &&
2892 	       (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2893 		if (ioa_dump->page_offset >= PAGE_SIZE ||
2894 		    ioa_dump->page_offset == 0) {
2895 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2896 
2897 			if (!page) {
2898 				ipr_trace;
2899 				return bytes_copied;
2900 			}
2901 
2902 			ioa_dump->page_offset = 0;
2903 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2904 			ioa_dump->next_page_index++;
2905 		} else
2906 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2907 
2908 		rem_len = length - bytes_copied;
2909 		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2910 		cur_len = min(rem_len, rem_page_len);
2911 
2912 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2913 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2914 			rc = -EIO;
2915 		} else {
2916 			rc = ipr_get_ldump_data_section(ioa_cfg,
2917 							pci_address + bytes_copied,
2918 							&page[ioa_dump->page_offset / 4],
2919 							(cur_len / sizeof(u32)));
2920 		}
2921 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2922 
2923 		if (!rc) {
2924 			ioa_dump->page_offset += cur_len;
2925 			bytes_copied += cur_len;
2926 		} else {
2927 			ipr_trace;
2928 			break;
2929 		}
2930 		schedule();
2931 	}
2932 
2933 	return bytes_copied;
2934 }
2935 
2936 /**
2937  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2938  * @hdr:	dump entry header struct
2939  *
2940  * Return value:
2941  * 	nothing
2942  **/
2943 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2944 {
2945 	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2946 	hdr->num_elems = 1;
2947 	hdr->offset = sizeof(*hdr);
2948 	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2949 }
2950 
2951 /**
2952  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2953  * @ioa_cfg:	ioa config struct
2954  * @driver_dump:	driver dump struct
2955  *
2956  * Return value:
2957  * 	nothing
2958  **/
2959 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2960 				   struct ipr_driver_dump *driver_dump)
2961 {
2962 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2963 
2964 	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2965 	driver_dump->ioa_type_entry.hdr.len =
2966 		sizeof(struct ipr_dump_ioa_type_entry) -
2967 		sizeof(struct ipr_dump_entry_header);
2968 	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2969 	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2970 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2971 	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2972 		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2973 		ucode_vpd->minor_release[1];
2974 	driver_dump->hdr.num_entries++;
2975 }
2976 
2977 /**
2978  * ipr_dump_version_data - Fill in the driver version in the dump.
2979  * @ioa_cfg:	ioa config struct
2980  * @driver_dump:	driver dump struct
2981  *
2982  * Return value:
2983  * 	nothing
2984  **/
2985 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2986 				  struct ipr_driver_dump *driver_dump)
2987 {
2988 	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2989 	driver_dump->version_entry.hdr.len =
2990 		sizeof(struct ipr_dump_version_entry) -
2991 		sizeof(struct ipr_dump_entry_header);
2992 	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2993 	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2994 	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2995 	driver_dump->hdr.num_entries++;
2996 }
2997 
2998 /**
2999  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3000  * @ioa_cfg:	ioa config struct
3001  * @driver_dump:	driver dump struct
3002  *
3003  * Return value:
3004  * 	nothing
3005  **/
3006 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3007 				   struct ipr_driver_dump *driver_dump)
3008 {
3009 	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3010 	driver_dump->trace_entry.hdr.len =
3011 		sizeof(struct ipr_dump_trace_entry) -
3012 		sizeof(struct ipr_dump_entry_header);
3013 	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3014 	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3015 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3016 	driver_dump->hdr.num_entries++;
3017 }
3018 
3019 /**
3020  * ipr_dump_location_data - Fill in the IOA location in the dump.
3021  * @ioa_cfg:	ioa config struct
3022  * @driver_dump:	driver dump struct
3023  *
3024  * Return value:
3025  * 	nothing
3026  **/
3027 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3028 				   struct ipr_driver_dump *driver_dump)
3029 {
3030 	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3031 	driver_dump->location_entry.hdr.len =
3032 		sizeof(struct ipr_dump_location_entry) -
3033 		sizeof(struct ipr_dump_entry_header);
3034 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3035 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3036 	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3037 	driver_dump->hdr.num_entries++;
3038 }
3039 
3040 /**
3041  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3042  * @ioa_cfg:	ioa config struct
3043  * @dump:		dump struct
3044  *
3045  * Return value:
3046  * 	nothing
3047  **/
3048 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3049 {
3050 	unsigned long start_addr, sdt_word;
3051 	unsigned long lock_flags = 0;
3052 	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3053 	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3054 	u32 num_entries, max_num_entries, start_off, end_off;
3055 	u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3056 	struct ipr_sdt *sdt;
3057 	int valid = 1;
3058 	int i;
3059 
3060 	ENTER;
3061 
3062 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3063 
3064 	if (ioa_cfg->sdt_state != READ_DUMP) {
3065 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3066 		return;
3067 	}
3068 
3069 	if (ioa_cfg->sis64) {
3070 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3071 		ssleep(IPR_DUMP_DELAY_SECONDS);
3072 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3073 	}
3074 
3075 	start_addr = readl(ioa_cfg->ioa_mailbox);
3076 
3077 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3078 		dev_err(&ioa_cfg->pdev->dev,
3079 			"Invalid dump table format: %lx\n", start_addr);
3080 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3081 		return;
3082 	}
3083 
3084 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3085 
3086 	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3087 
3088 	/* Initialize the overall dump header */
3089 	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3090 	driver_dump->hdr.num_entries = 1;
3091 	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3092 	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3093 	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3094 	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3095 
3096 	ipr_dump_version_data(ioa_cfg, driver_dump);
3097 	ipr_dump_location_data(ioa_cfg, driver_dump);
3098 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3099 	ipr_dump_trace_data(ioa_cfg, driver_dump);
3100 
3101 	/* Update dump_header */
3102 	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3103 
3104 	/* IOA Dump entry */
3105 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3106 	ioa_dump->hdr.len = 0;
3107 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3108 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3109 
3110 	/* First entries in sdt are actually a list of dump addresses and
3111 	 lengths to gather the real dump data.  sdt represents the pointer
3112 	 to the ioa generated dump table.  Dump data will be extracted based
3113 	 on entries in this table */
3114 	sdt = &ioa_dump->sdt;
3115 
3116 	if (ioa_cfg->sis64) {
3117 		max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3118 		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3119 	} else {
3120 		max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3121 		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3122 	}
3123 
3124 	bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3125 			(max_num_entries * sizeof(struct ipr_sdt_entry));
3126 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3127 					bytes_to_copy / sizeof(__be32));
3128 
3129 	/* Smart Dump table is ready to use and the first entry is valid */
3130 	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3131 	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3132 		dev_err(&ioa_cfg->pdev->dev,
3133 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
3134 			rc, be32_to_cpu(sdt->hdr.state));
3135 		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3136 		ioa_cfg->sdt_state = DUMP_OBTAINED;
3137 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3138 		return;
3139 	}
3140 
3141 	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3142 
3143 	if (num_entries > max_num_entries)
3144 		num_entries = max_num_entries;
3145 
3146 	/* Update dump length to the actual data to be copied */
3147 	dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3148 	if (ioa_cfg->sis64)
3149 		dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3150 	else
3151 		dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3152 
3153 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3154 
3155 	for (i = 0; i < num_entries; i++) {
3156 		if (ioa_dump->hdr.len > max_dump_size) {
3157 			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3158 			break;
3159 		}
3160 
3161 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3162 			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3163 			if (ioa_cfg->sis64)
3164 				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3165 			else {
3166 				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3167 				end_off = be32_to_cpu(sdt->entry[i].end_token);
3168 
3169 				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3170 					bytes_to_copy = end_off - start_off;
3171 				else
3172 					valid = 0;
3173 			}
3174 			if (valid) {
3175 				if (bytes_to_copy > max_dump_size) {
3176 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3177 					continue;
3178 				}
3179 
3180 				/* Copy data from adapter to driver buffers */
3181 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3182 							    bytes_to_copy);
3183 
3184 				ioa_dump->hdr.len += bytes_copied;
3185 
3186 				if (bytes_copied != bytes_to_copy) {
3187 					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3188 					break;
3189 				}
3190 			}
3191 		}
3192 	}
3193 
3194 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3195 
3196 	/* Update dump_header */
3197 	driver_dump->hdr.len += ioa_dump->hdr.len;
3198 	wmb();
3199 	ioa_cfg->sdt_state = DUMP_OBTAINED;
3200 	LEAVE;
3201 }
3202 
3203 #else
3204 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3205 #endif
3206 
3207 /**
3208  * ipr_release_dump - Free adapter dump memory
3209  * @kref:	kref struct
3210  *
3211  * Return value:
3212  *	nothing
3213  **/
3214 static void ipr_release_dump(struct kref *kref)
3215 {
3216 	struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3217 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3218 	unsigned long lock_flags = 0;
3219 	int i;
3220 
3221 	ENTER;
3222 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3223 	ioa_cfg->dump = NULL;
3224 	ioa_cfg->sdt_state = INACTIVE;
3225 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3226 
3227 	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3228 		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3229 
3230 	vfree(dump->ioa_dump.ioa_data);
3231 	kfree(dump);
3232 	LEAVE;
3233 }
3234 
3235 static void ipr_add_remove_thread(struct work_struct *work)
3236 {
3237 	unsigned long lock_flags;
3238 	struct ipr_resource_entry *res;
3239 	struct scsi_device *sdev;
3240 	struct ipr_ioa_cfg *ioa_cfg =
3241 		container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3242 	u8 bus, target, lun;
3243 	int did_work;
3244 
3245 	ENTER;
3246 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3247 
3248 restart:
3249 	do {
3250 		did_work = 0;
3251 		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3252 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3253 			return;
3254 		}
3255 
3256 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3257 			if (res->del_from_ml && res->sdev) {
3258 				did_work = 1;
3259 				sdev = res->sdev;
3260 				if (!scsi_device_get(sdev)) {
3261 					if (!res->add_to_ml)
3262 						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3263 					else
3264 						res->del_from_ml = 0;
3265 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3266 					scsi_remove_device(sdev);
3267 					scsi_device_put(sdev);
3268 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3269 				}
3270 				break;
3271 			}
3272 		}
3273 	} while (did_work);
3274 
3275 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3276 		if (res->add_to_ml) {
3277 			bus = res->bus;
3278 			target = res->target;
3279 			lun = res->lun;
3280 			res->add_to_ml = 0;
3281 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3282 			scsi_add_device(ioa_cfg->host, bus, target, lun);
3283 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3284 			goto restart;
3285 		}
3286 	}
3287 
3288 	ioa_cfg->scan_done = 1;
3289 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3290 	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3291 	LEAVE;
3292 }
3293 
3294 /**
3295  * ipr_worker_thread - Worker thread
3296  * @work:		ioa config struct
3297  *
3298  * Called at task level from a work thread. This function takes care
3299  * of adding and removing device from the mid-layer as configuration
3300  * changes are detected by the adapter.
3301  *
3302  * Return value:
3303  * 	nothing
3304  **/
3305 static void ipr_worker_thread(struct work_struct *work)
3306 {
3307 	unsigned long lock_flags;
3308 	struct ipr_dump *dump;
3309 	struct ipr_ioa_cfg *ioa_cfg =
3310 		container_of(work, struct ipr_ioa_cfg, work_q);
3311 
3312 	ENTER;
3313 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3314 
3315 	if (ioa_cfg->sdt_state == READ_DUMP) {
3316 		dump = ioa_cfg->dump;
3317 		if (!dump) {
3318 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3319 			return;
3320 		}
3321 		kref_get(&dump->kref);
3322 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3323 		ipr_get_ioa_dump(ioa_cfg, dump);
3324 		kref_put(&dump->kref, ipr_release_dump);
3325 
3326 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3327 		if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3328 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3329 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3330 		return;
3331 	}
3332 
3333 	if (ioa_cfg->scsi_unblock) {
3334 		ioa_cfg->scsi_unblock = 0;
3335 		ioa_cfg->scsi_blocked = 0;
3336 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3337 		scsi_unblock_requests(ioa_cfg->host);
3338 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3339 		if (ioa_cfg->scsi_blocked)
3340 			scsi_block_requests(ioa_cfg->host);
3341 	}
3342 
3343 	if (!ioa_cfg->scan_enabled) {
3344 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3345 		return;
3346 	}
3347 
3348 	schedule_work(&ioa_cfg->scsi_add_work_q);
3349 
3350 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3351 	LEAVE;
3352 }
3353 
3354 #ifdef CONFIG_SCSI_IPR_TRACE
3355 /**
3356  * ipr_read_trace - Dump the adapter trace
3357  * @filp:		open sysfs file
3358  * @kobj:		kobject struct
3359  * @bin_attr:		bin_attribute struct
3360  * @buf:		buffer
3361  * @off:		offset
3362  * @count:		buffer size
3363  *
3364  * Return value:
3365  *	number of bytes printed to buffer
3366  **/
3367 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3368 			      struct bin_attribute *bin_attr,
3369 			      char *buf, loff_t off, size_t count)
3370 {
3371 	struct device *dev = kobj_to_dev(kobj);
3372 	struct Scsi_Host *shost = class_to_shost(dev);
3373 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3374 	unsigned long lock_flags = 0;
3375 	ssize_t ret;
3376 
3377 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3378 	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3379 				IPR_TRACE_SIZE);
3380 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3381 
3382 	return ret;
3383 }
3384 
3385 static struct bin_attribute ipr_trace_attr = {
3386 	.attr =	{
3387 		.name = "trace",
3388 		.mode = S_IRUGO,
3389 	},
3390 	.size = 0,
3391 	.read = ipr_read_trace,
3392 };
3393 #endif
3394 
3395 /**
3396  * ipr_show_fw_version - Show the firmware version
3397  * @dev:	class device struct
3398  * @attr:	device attribute (unused)
3399  * @buf:	buffer
3400  *
3401  * Return value:
3402  *	number of bytes printed to buffer
3403  **/
3404 static ssize_t ipr_show_fw_version(struct device *dev,
3405 				   struct device_attribute *attr, char *buf)
3406 {
3407 	struct Scsi_Host *shost = class_to_shost(dev);
3408 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3409 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3410 	unsigned long lock_flags = 0;
3411 	int len;
3412 
3413 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3414 	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3415 		       ucode_vpd->major_release, ucode_vpd->card_type,
3416 		       ucode_vpd->minor_release[0],
3417 		       ucode_vpd->minor_release[1]);
3418 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3419 	return len;
3420 }
3421 
3422 static struct device_attribute ipr_fw_version_attr = {
3423 	.attr = {
3424 		.name =		"fw_version",
3425 		.mode =		S_IRUGO,
3426 	},
3427 	.show = ipr_show_fw_version,
3428 };
3429 
3430 /**
3431  * ipr_show_log_level - Show the adapter's error logging level
3432  * @dev:	class device struct
3433  * @attr:	device attribute (unused)
3434  * @buf:	buffer
3435  *
3436  * Return value:
3437  * 	number of bytes printed to buffer
3438  **/
3439 static ssize_t ipr_show_log_level(struct device *dev,
3440 				   struct device_attribute *attr, char *buf)
3441 {
3442 	struct Scsi_Host *shost = class_to_shost(dev);
3443 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3444 	unsigned long lock_flags = 0;
3445 	int len;
3446 
3447 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3448 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3449 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3450 	return len;
3451 }
3452 
3453 /**
3454  * ipr_store_log_level - Change the adapter's error logging level
3455  * @dev:	class device struct
3456  * @attr:	device attribute (unused)
3457  * @buf:	buffer
3458  * @count:	buffer size
3459  *
3460  * Return value:
3461  * 	number of bytes printed to buffer
3462  **/
3463 static ssize_t ipr_store_log_level(struct device *dev,
3464 				   struct device_attribute *attr,
3465 				   const char *buf, size_t count)
3466 {
3467 	struct Scsi_Host *shost = class_to_shost(dev);
3468 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3469 	unsigned long lock_flags = 0;
3470 
3471 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3472 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3473 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3474 	return strlen(buf);
3475 }
3476 
3477 static struct device_attribute ipr_log_level_attr = {
3478 	.attr = {
3479 		.name =		"log_level",
3480 		.mode =		S_IRUGO | S_IWUSR,
3481 	},
3482 	.show = ipr_show_log_level,
3483 	.store = ipr_store_log_level
3484 };
3485 
3486 /**
3487  * ipr_store_diagnostics - IOA Diagnostics interface
3488  * @dev:	device struct
3489  * @attr:	device attribute (unused)
3490  * @buf:	buffer
3491  * @count:	buffer size
3492  *
3493  * This function will reset the adapter and wait a reasonable
3494  * amount of time for any errors that the adapter might log.
3495  *
3496  * Return value:
3497  * 	count on success / other on failure
3498  **/
3499 static ssize_t ipr_store_diagnostics(struct device *dev,
3500 				     struct device_attribute *attr,
3501 				     const char *buf, size_t count)
3502 {
3503 	struct Scsi_Host *shost = class_to_shost(dev);
3504 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3505 	unsigned long lock_flags = 0;
3506 	int rc = count;
3507 
3508 	if (!capable(CAP_SYS_ADMIN))
3509 		return -EACCES;
3510 
3511 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3512 	while (ioa_cfg->in_reset_reload) {
3513 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3514 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3515 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3516 	}
3517 
3518 	ioa_cfg->errors_logged = 0;
3519 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3520 
3521 	if (ioa_cfg->in_reset_reload) {
3522 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3523 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3524 
3525 		/* Wait for a second for any errors to be logged */
3526 		msleep(1000);
3527 	} else {
3528 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3529 		return -EIO;
3530 	}
3531 
3532 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3533 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3534 		rc = -EIO;
3535 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3536 
3537 	return rc;
3538 }
3539 
3540 static struct device_attribute ipr_diagnostics_attr = {
3541 	.attr = {
3542 		.name =		"run_diagnostics",
3543 		.mode =		S_IWUSR,
3544 	},
3545 	.store = ipr_store_diagnostics
3546 };
3547 
3548 /**
3549  * ipr_show_adapter_state - Show the adapter's state
3550  * @dev:	device struct
3551  * @attr:	device attribute (unused)
3552  * @buf:	buffer
3553  *
3554  * Return value:
3555  * 	number of bytes printed to buffer
3556  **/
3557 static ssize_t ipr_show_adapter_state(struct device *dev,
3558 				      struct device_attribute *attr, char *buf)
3559 {
3560 	struct Scsi_Host *shost = class_to_shost(dev);
3561 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3562 	unsigned long lock_flags = 0;
3563 	int len;
3564 
3565 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3566 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3567 		len = snprintf(buf, PAGE_SIZE, "offline\n");
3568 	else
3569 		len = snprintf(buf, PAGE_SIZE, "online\n");
3570 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3571 	return len;
3572 }
3573 
3574 /**
3575  * ipr_store_adapter_state - Change adapter state
3576  * @dev:	device struct
3577  * @attr:	device attribute (unused)
3578  * @buf:	buffer
3579  * @count:	buffer size
3580  *
3581  * This function will change the adapter's state.
3582  *
3583  * Return value:
3584  * 	count on success / other on failure
3585  **/
3586 static ssize_t ipr_store_adapter_state(struct device *dev,
3587 				       struct device_attribute *attr,
3588 				       const char *buf, size_t count)
3589 {
3590 	struct Scsi_Host *shost = class_to_shost(dev);
3591 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3592 	unsigned long lock_flags;
3593 	int result = count, i;
3594 
3595 	if (!capable(CAP_SYS_ADMIN))
3596 		return -EACCES;
3597 
3598 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3599 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3600 	    !strncmp(buf, "online", 6)) {
3601 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3602 			spin_lock(&ioa_cfg->hrrq[i]._lock);
3603 			ioa_cfg->hrrq[i].ioa_is_dead = 0;
3604 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
3605 		}
3606 		wmb();
3607 		ioa_cfg->reset_retries = 0;
3608 		ioa_cfg->in_ioa_bringdown = 0;
3609 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3610 	}
3611 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3613 
3614 	return result;
3615 }
3616 
3617 static struct device_attribute ipr_ioa_state_attr = {
3618 	.attr = {
3619 		.name =		"online_state",
3620 		.mode =		S_IRUGO | S_IWUSR,
3621 	},
3622 	.show = ipr_show_adapter_state,
3623 	.store = ipr_store_adapter_state
3624 };
3625 
3626 /**
3627  * ipr_store_reset_adapter - Reset the adapter
3628  * @dev:	device struct
3629  * @attr:	device attribute (unused)
3630  * @buf:	buffer
3631  * @count:	buffer size
3632  *
3633  * This function will reset the adapter.
3634  *
3635  * Return value:
3636  * 	count on success / other on failure
3637  **/
3638 static ssize_t ipr_store_reset_adapter(struct device *dev,
3639 				       struct device_attribute *attr,
3640 				       const char *buf, size_t count)
3641 {
3642 	struct Scsi_Host *shost = class_to_shost(dev);
3643 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3644 	unsigned long lock_flags;
3645 	int result = count;
3646 
3647 	if (!capable(CAP_SYS_ADMIN))
3648 		return -EACCES;
3649 
3650 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3651 	if (!ioa_cfg->in_reset_reload)
3652 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3653 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3654 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3655 
3656 	return result;
3657 }
3658 
3659 static struct device_attribute ipr_ioa_reset_attr = {
3660 	.attr = {
3661 		.name =		"reset_host",
3662 		.mode =		S_IWUSR,
3663 	},
3664 	.store = ipr_store_reset_adapter
3665 };
3666 
3667 static int ipr_iopoll(struct irq_poll *iop, int budget);
3668  /**
3669  * ipr_show_iopoll_weight - Show ipr polling mode
3670  * @dev:	class device struct
3671  * @attr:	device attribute (unused)
3672  * @buf:	buffer
3673  *
3674  * Return value:
3675  *	number of bytes printed to buffer
3676  **/
3677 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3678 				   struct device_attribute *attr, char *buf)
3679 {
3680 	struct Scsi_Host *shost = class_to_shost(dev);
3681 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3682 	unsigned long lock_flags = 0;
3683 	int len;
3684 
3685 	spin_lock_irqsave(shost->host_lock, lock_flags);
3686 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3687 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
3688 
3689 	return len;
3690 }
3691 
3692 /**
3693  * ipr_store_iopoll_weight - Change the adapter's polling mode
3694  * @dev:	class device struct
3695  * @attr:	device attribute (unused)
3696  * @buf:	buffer
3697  * @count:	buffer size
3698  *
3699  * Return value:
3700  *	number of bytes printed to buffer
3701  **/
3702 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3703 					struct device_attribute *attr,
3704 					const char *buf, size_t count)
3705 {
3706 	struct Scsi_Host *shost = class_to_shost(dev);
3707 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3708 	unsigned long user_iopoll_weight;
3709 	unsigned long lock_flags = 0;
3710 	int i;
3711 
3712 	if (!ioa_cfg->sis64) {
3713 		dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3714 		return -EINVAL;
3715 	}
3716 	if (kstrtoul(buf, 10, &user_iopoll_weight))
3717 		return -EINVAL;
3718 
3719 	if (user_iopoll_weight > 256) {
3720 		dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3721 		return -EINVAL;
3722 	}
3723 
3724 	if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3725 		dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3726 		return strlen(buf);
3727 	}
3728 
3729 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3730 		for (i = 1; i < ioa_cfg->hrrq_num; i++)
3731 			irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3732 	}
3733 
3734 	spin_lock_irqsave(shost->host_lock, lock_flags);
3735 	ioa_cfg->iopoll_weight = user_iopoll_weight;
3736 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3737 		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3738 			irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3739 					ioa_cfg->iopoll_weight, ipr_iopoll);
3740 		}
3741 	}
3742 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
3743 
3744 	return strlen(buf);
3745 }
3746 
3747 static struct device_attribute ipr_iopoll_weight_attr = {
3748 	.attr = {
3749 		.name =		"iopoll_weight",
3750 		.mode =		S_IRUGO | S_IWUSR,
3751 	},
3752 	.show = ipr_show_iopoll_weight,
3753 	.store = ipr_store_iopoll_weight
3754 };
3755 
3756 /**
3757  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3758  * @buf_len:		buffer length
3759  *
3760  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3761  * list to use for microcode download
3762  *
3763  * Return value:
3764  * 	pointer to sglist / NULL on failure
3765  **/
3766 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3767 {
3768 	int sg_size, order;
3769 	struct ipr_sglist *sglist;
3770 
3771 	/* Get the minimum size per scatter/gather element */
3772 	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3773 
3774 	/* Get the actual size per element */
3775 	order = get_order(sg_size);
3776 
3777 	/* Allocate a scatter/gather list for the DMA */
3778 	sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3779 	if (sglist == NULL) {
3780 		ipr_trace;
3781 		return NULL;
3782 	}
3783 	sglist->order = order;
3784 	sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3785 					      &sglist->num_sg);
3786 	if (!sglist->scatterlist) {
3787 		kfree(sglist);
3788 		return NULL;
3789 	}
3790 
3791 	return sglist;
3792 }
3793 
3794 /**
3795  * ipr_free_ucode_buffer - Frees a microcode download buffer
3796  * @sglist:		scatter/gather list pointer
3797  *
3798  * Free a DMA'able ucode download buffer previously allocated with
3799  * ipr_alloc_ucode_buffer
3800  *
3801  * Return value:
3802  * 	nothing
3803  **/
3804 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3805 {
3806 	sgl_free_order(sglist->scatterlist, sglist->order);
3807 	kfree(sglist);
3808 }
3809 
3810 /**
3811  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3812  * @sglist:		scatter/gather list pointer
3813  * @buffer:		buffer pointer
3814  * @len:		buffer length
3815  *
3816  * Copy a microcode image from a user buffer into a buffer allocated by
3817  * ipr_alloc_ucode_buffer
3818  *
3819  * Return value:
3820  * 	0 on success / other on failure
3821  **/
3822 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3823 				 u8 *buffer, u32 len)
3824 {
3825 	int bsize_elem, i, result = 0;
3826 	struct scatterlist *sg;
3827 
3828 	/* Determine the actual number of bytes per element */
3829 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3830 
3831 	sg = sglist->scatterlist;
3832 
3833 	for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
3834 			buffer += bsize_elem) {
3835 		struct page *page = sg_page(sg);
3836 
3837 		memcpy_to_page(page, 0, buffer, bsize_elem);
3838 
3839 		sg->length = bsize_elem;
3840 
3841 		if (result != 0) {
3842 			ipr_trace;
3843 			return result;
3844 		}
3845 	}
3846 
3847 	if (len % bsize_elem) {
3848 		struct page *page = sg_page(sg);
3849 
3850 		memcpy_to_page(page, 0, buffer, len % bsize_elem);
3851 
3852 		sg->length = len % bsize_elem;
3853 	}
3854 
3855 	sglist->buffer_len = len;
3856 	return result;
3857 }
3858 
3859 /**
3860  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3861  * @ipr_cmd:		ipr command struct
3862  * @sglist:		scatter/gather list
3863  *
3864  * Builds a microcode download IOA data list (IOADL).
3865  *
3866  **/
3867 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3868 				    struct ipr_sglist *sglist)
3869 {
3870 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3871 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3872 	struct scatterlist *scatterlist = sglist->scatterlist;
3873 	struct scatterlist *sg;
3874 	int i;
3875 
3876 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3877 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3878 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3879 
3880 	ioarcb->ioadl_len =
3881 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3882 	for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3883 		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3884 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
3885 		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
3886 	}
3887 
3888 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3889 }
3890 
3891 /**
3892  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3893  * @ipr_cmd:	ipr command struct
3894  * @sglist:		scatter/gather list
3895  *
3896  * Builds a microcode download IOA data list (IOADL).
3897  *
3898  **/
3899 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3900 				  struct ipr_sglist *sglist)
3901 {
3902 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3903 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3904 	struct scatterlist *scatterlist = sglist->scatterlist;
3905 	struct scatterlist *sg;
3906 	int i;
3907 
3908 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3909 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3910 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3911 
3912 	ioarcb->ioadl_len =
3913 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3914 
3915 	for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3916 		ioadl[i].flags_and_data_len =
3917 			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
3918 		ioadl[i].address =
3919 			cpu_to_be32(sg_dma_address(sg));
3920 	}
3921 
3922 	ioadl[i-1].flags_and_data_len |=
3923 		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3924 }
3925 
3926 /**
3927  * ipr_update_ioa_ucode - Update IOA's microcode
3928  * @ioa_cfg:	ioa config struct
3929  * @sglist:		scatter/gather list
3930  *
3931  * Initiate an adapter reset to update the IOA's microcode
3932  *
3933  * Return value:
3934  * 	0 on success / -EIO on failure
3935  **/
3936 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3937 				struct ipr_sglist *sglist)
3938 {
3939 	unsigned long lock_flags;
3940 
3941 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3942 	while (ioa_cfg->in_reset_reload) {
3943 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3944 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3945 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3946 	}
3947 
3948 	if (ioa_cfg->ucode_sglist) {
3949 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3950 		dev_err(&ioa_cfg->pdev->dev,
3951 			"Microcode download already in progress\n");
3952 		return -EIO;
3953 	}
3954 
3955 	sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3956 					sglist->scatterlist, sglist->num_sg,
3957 					DMA_TO_DEVICE);
3958 
3959 	if (!sglist->num_dma_sg) {
3960 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3961 		dev_err(&ioa_cfg->pdev->dev,
3962 			"Failed to map microcode download buffer!\n");
3963 		return -EIO;
3964 	}
3965 
3966 	ioa_cfg->ucode_sglist = sglist;
3967 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3968 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3969 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3970 
3971 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3972 	ioa_cfg->ucode_sglist = NULL;
3973 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3974 	return 0;
3975 }
3976 
3977 /**
3978  * ipr_store_update_fw - Update the firmware on the adapter
3979  * @dev:	device struct
3980  * @attr:	device attribute (unused)
3981  * @buf:	buffer
3982  * @count:	buffer size
3983  *
3984  * This function will update the firmware on the adapter.
3985  *
3986  * Return value:
3987  * 	count on success / other on failure
3988  **/
3989 static ssize_t ipr_store_update_fw(struct device *dev,
3990 				   struct device_attribute *attr,
3991 				   const char *buf, size_t count)
3992 {
3993 	struct Scsi_Host *shost = class_to_shost(dev);
3994 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3995 	struct ipr_ucode_image_header *image_hdr;
3996 	const struct firmware *fw_entry;
3997 	struct ipr_sglist *sglist;
3998 	char fname[100];
3999 	char *src;
4000 	char *endline;
4001 	int result, dnld_size;
4002 
4003 	if (!capable(CAP_SYS_ADMIN))
4004 		return -EACCES;
4005 
4006 	snprintf(fname, sizeof(fname), "%s", buf);
4007 
4008 	endline = strchr(fname, '\n');
4009 	if (endline)
4010 		*endline = '\0';
4011 
4012 	if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4013 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4014 		return -EIO;
4015 	}
4016 
4017 	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4018 
4019 	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4020 	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4021 	sglist = ipr_alloc_ucode_buffer(dnld_size);
4022 
4023 	if (!sglist) {
4024 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4025 		release_firmware(fw_entry);
4026 		return -ENOMEM;
4027 	}
4028 
4029 	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4030 
4031 	if (result) {
4032 		dev_err(&ioa_cfg->pdev->dev,
4033 			"Microcode buffer copy to DMA buffer failed\n");
4034 		goto out;
4035 	}
4036 
4037 	ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4038 
4039 	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4040 
4041 	if (!result)
4042 		result = count;
4043 out:
4044 	ipr_free_ucode_buffer(sglist);
4045 	release_firmware(fw_entry);
4046 	return result;
4047 }
4048 
4049 static struct device_attribute ipr_update_fw_attr = {
4050 	.attr = {
4051 		.name =		"update_fw",
4052 		.mode =		S_IWUSR,
4053 	},
4054 	.store = ipr_store_update_fw
4055 };
4056 
4057 /**
4058  * ipr_show_fw_type - Show the adapter's firmware type.
4059  * @dev:	class device struct
4060  * @attr:	device attribute (unused)
4061  * @buf:	buffer
4062  *
4063  * Return value:
4064  *	number of bytes printed to buffer
4065  **/
4066 static ssize_t ipr_show_fw_type(struct device *dev,
4067 				struct device_attribute *attr, char *buf)
4068 {
4069 	struct Scsi_Host *shost = class_to_shost(dev);
4070 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4071 	unsigned long lock_flags = 0;
4072 	int len;
4073 
4074 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4075 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4076 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4077 	return len;
4078 }
4079 
4080 static struct device_attribute ipr_ioa_fw_type_attr = {
4081 	.attr = {
4082 		.name =		"fw_type",
4083 		.mode =		S_IRUGO,
4084 	},
4085 	.show = ipr_show_fw_type
4086 };
4087 
4088 static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4089 				struct bin_attribute *bin_attr, char *buf,
4090 				loff_t off, size_t count)
4091 {
4092 	struct device *cdev = kobj_to_dev(kobj);
4093 	struct Scsi_Host *shost = class_to_shost(cdev);
4094 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4095 	struct ipr_hostrcb *hostrcb;
4096 	unsigned long lock_flags = 0;
4097 	int ret;
4098 
4099 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4100 	hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4101 					struct ipr_hostrcb, queue);
4102 	if (!hostrcb) {
4103 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4104 		return 0;
4105 	}
4106 	ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4107 				sizeof(hostrcb->hcam));
4108 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4109 	return ret;
4110 }
4111 
4112 static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4113 				struct bin_attribute *bin_attr, char *buf,
4114 				loff_t off, size_t count)
4115 {
4116 	struct device *cdev = kobj_to_dev(kobj);
4117 	struct Scsi_Host *shost = class_to_shost(cdev);
4118 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4119 	struct ipr_hostrcb *hostrcb;
4120 	unsigned long lock_flags = 0;
4121 
4122 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4123 	hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4124 					struct ipr_hostrcb, queue);
4125 	if (!hostrcb) {
4126 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4127 		return count;
4128 	}
4129 
4130 	/* Reclaim hostrcb before exit */
4131 	list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4132 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4133 	return count;
4134 }
4135 
4136 static struct bin_attribute ipr_ioa_async_err_log = {
4137 	.attr = {
4138 		.name =		"async_err_log",
4139 		.mode =		S_IRUGO | S_IWUSR,
4140 	},
4141 	.size = 0,
4142 	.read = ipr_read_async_err_log,
4143 	.write = ipr_next_async_err_log
4144 };
4145 
4146 static struct attribute *ipr_ioa_attrs[] = {
4147 	&ipr_fw_version_attr.attr,
4148 	&ipr_log_level_attr.attr,
4149 	&ipr_diagnostics_attr.attr,
4150 	&ipr_ioa_state_attr.attr,
4151 	&ipr_ioa_reset_attr.attr,
4152 	&ipr_update_fw_attr.attr,
4153 	&ipr_ioa_fw_type_attr.attr,
4154 	&ipr_iopoll_weight_attr.attr,
4155 	NULL,
4156 };
4157 
4158 ATTRIBUTE_GROUPS(ipr_ioa);
4159 
4160 #ifdef CONFIG_SCSI_IPR_DUMP
4161 /**
4162  * ipr_read_dump - Dump the adapter
4163  * @filp:		open sysfs file
4164  * @kobj:		kobject struct
4165  * @bin_attr:		bin_attribute struct
4166  * @buf:		buffer
4167  * @off:		offset
4168  * @count:		buffer size
4169  *
4170  * Return value:
4171  *	number of bytes printed to buffer
4172  **/
4173 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4174 			     struct bin_attribute *bin_attr,
4175 			     char *buf, loff_t off, size_t count)
4176 {
4177 	struct device *cdev = kobj_to_dev(kobj);
4178 	struct Scsi_Host *shost = class_to_shost(cdev);
4179 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4180 	struct ipr_dump *dump;
4181 	unsigned long lock_flags = 0;
4182 	char *src;
4183 	int len, sdt_end;
4184 	size_t rc = count;
4185 
4186 	if (!capable(CAP_SYS_ADMIN))
4187 		return -EACCES;
4188 
4189 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4190 	dump = ioa_cfg->dump;
4191 
4192 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4193 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4194 		return 0;
4195 	}
4196 	kref_get(&dump->kref);
4197 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4198 
4199 	if (off > dump->driver_dump.hdr.len) {
4200 		kref_put(&dump->kref, ipr_release_dump);
4201 		return 0;
4202 	}
4203 
4204 	if (off + count > dump->driver_dump.hdr.len) {
4205 		count = dump->driver_dump.hdr.len - off;
4206 		rc = count;
4207 	}
4208 
4209 	if (count && off < sizeof(dump->driver_dump)) {
4210 		if (off + count > sizeof(dump->driver_dump))
4211 			len = sizeof(dump->driver_dump) - off;
4212 		else
4213 			len = count;
4214 		src = (u8 *)&dump->driver_dump + off;
4215 		memcpy(buf, src, len);
4216 		buf += len;
4217 		off += len;
4218 		count -= len;
4219 	}
4220 
4221 	off -= sizeof(dump->driver_dump);
4222 
4223 	if (ioa_cfg->sis64)
4224 		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4225 			  (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4226 			   sizeof(struct ipr_sdt_entry));
4227 	else
4228 		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4229 			  (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4230 
4231 	if (count && off < sdt_end) {
4232 		if (off + count > sdt_end)
4233 			len = sdt_end - off;
4234 		else
4235 			len = count;
4236 		src = (u8 *)&dump->ioa_dump + off;
4237 		memcpy(buf, src, len);
4238 		buf += len;
4239 		off += len;
4240 		count -= len;
4241 	}
4242 
4243 	off -= sdt_end;
4244 
4245 	while (count) {
4246 		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4247 			len = PAGE_ALIGN(off) - off;
4248 		else
4249 			len = count;
4250 		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4251 		src += off & ~PAGE_MASK;
4252 		memcpy(buf, src, len);
4253 		buf += len;
4254 		off += len;
4255 		count -= len;
4256 	}
4257 
4258 	kref_put(&dump->kref, ipr_release_dump);
4259 	return rc;
4260 }
4261 
4262 /**
4263  * ipr_alloc_dump - Prepare for adapter dump
4264  * @ioa_cfg:	ioa config struct
4265  *
4266  * Return value:
4267  *	0 on success / other on failure
4268  **/
4269 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4270 {
4271 	struct ipr_dump *dump;
4272 	__be32 **ioa_data;
4273 	unsigned long lock_flags = 0;
4274 
4275 	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4276 
4277 	if (!dump) {
4278 		ipr_err("Dump memory allocation failed\n");
4279 		return -ENOMEM;
4280 	}
4281 
4282 	if (ioa_cfg->sis64)
4283 		ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4284 					      sizeof(__be32 *)));
4285 	else
4286 		ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4287 					      sizeof(__be32 *)));
4288 
4289 	if (!ioa_data) {
4290 		ipr_err("Dump memory allocation failed\n");
4291 		kfree(dump);
4292 		return -ENOMEM;
4293 	}
4294 
4295 	dump->ioa_dump.ioa_data = ioa_data;
4296 
4297 	kref_init(&dump->kref);
4298 	dump->ioa_cfg = ioa_cfg;
4299 
4300 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4301 
4302 	if (INACTIVE != ioa_cfg->sdt_state) {
4303 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4304 		vfree(dump->ioa_dump.ioa_data);
4305 		kfree(dump);
4306 		return 0;
4307 	}
4308 
4309 	ioa_cfg->dump = dump;
4310 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4311 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4312 		ioa_cfg->dump_taken = 1;
4313 		schedule_work(&ioa_cfg->work_q);
4314 	}
4315 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4316 
4317 	return 0;
4318 }
4319 
4320 /**
4321  * ipr_free_dump - Free adapter dump memory
4322  * @ioa_cfg:	ioa config struct
4323  *
4324  * Return value:
4325  *	0 on success / other on failure
4326  **/
4327 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4328 {
4329 	struct ipr_dump *dump;
4330 	unsigned long lock_flags = 0;
4331 
4332 	ENTER;
4333 
4334 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4335 	dump = ioa_cfg->dump;
4336 	if (!dump) {
4337 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4338 		return 0;
4339 	}
4340 
4341 	ioa_cfg->dump = NULL;
4342 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4343 
4344 	kref_put(&dump->kref, ipr_release_dump);
4345 
4346 	LEAVE;
4347 	return 0;
4348 }
4349 
4350 /**
4351  * ipr_write_dump - Setup dump state of adapter
4352  * @filp:		open sysfs file
4353  * @kobj:		kobject struct
4354  * @bin_attr:		bin_attribute struct
4355  * @buf:		buffer
4356  * @off:		offset
4357  * @count:		buffer size
4358  *
4359  * Return value:
4360  *	number of bytes printed to buffer
4361  **/
4362 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4363 			      struct bin_attribute *bin_attr,
4364 			      char *buf, loff_t off, size_t count)
4365 {
4366 	struct device *cdev = kobj_to_dev(kobj);
4367 	struct Scsi_Host *shost = class_to_shost(cdev);
4368 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4369 	int rc;
4370 
4371 	if (!capable(CAP_SYS_ADMIN))
4372 		return -EACCES;
4373 
4374 	if (buf[0] == '1')
4375 		rc = ipr_alloc_dump(ioa_cfg);
4376 	else if (buf[0] == '0')
4377 		rc = ipr_free_dump(ioa_cfg);
4378 	else
4379 		return -EINVAL;
4380 
4381 	if (rc)
4382 		return rc;
4383 	else
4384 		return count;
4385 }
4386 
4387 static struct bin_attribute ipr_dump_attr = {
4388 	.attr =	{
4389 		.name = "dump",
4390 		.mode = S_IRUSR | S_IWUSR,
4391 	},
4392 	.size = 0,
4393 	.read = ipr_read_dump,
4394 	.write = ipr_write_dump
4395 };
4396 #else
4397 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4398 #endif
4399 
4400 /**
4401  * ipr_change_queue_depth - Change the device's queue depth
4402  * @sdev:	scsi device struct
4403  * @qdepth:	depth to set
4404  *
4405  * Return value:
4406  * 	actual depth set
4407  **/
4408 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4409 {
4410 	scsi_change_queue_depth(sdev, qdepth);
4411 	return sdev->queue_depth;
4412 }
4413 
4414 /**
4415  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4416  * @dev:	device struct
4417  * @attr:	device attribute structure
4418  * @buf:	buffer
4419  *
4420  * Return value:
4421  * 	number of bytes printed to buffer
4422  **/
4423 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4424 {
4425 	struct scsi_device *sdev = to_scsi_device(dev);
4426 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4427 	struct ipr_resource_entry *res;
4428 	unsigned long lock_flags = 0;
4429 	ssize_t len = -ENXIO;
4430 
4431 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4432 	res = (struct ipr_resource_entry *)sdev->hostdata;
4433 	if (res)
4434 		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4435 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4436 	return len;
4437 }
4438 
4439 static struct device_attribute ipr_adapter_handle_attr = {
4440 	.attr = {
4441 		.name = 	"adapter_handle",
4442 		.mode =		S_IRUSR,
4443 	},
4444 	.show = ipr_show_adapter_handle
4445 };
4446 
4447 /**
4448  * ipr_show_resource_path - Show the resource path or the resource address for
4449  *			    this device.
4450  * @dev:	device struct
4451  * @attr:	device attribute structure
4452  * @buf:	buffer
4453  *
4454  * Return value:
4455  * 	number of bytes printed to buffer
4456  **/
4457 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4458 {
4459 	struct scsi_device *sdev = to_scsi_device(dev);
4460 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4461 	struct ipr_resource_entry *res;
4462 	unsigned long lock_flags = 0;
4463 	ssize_t len = -ENXIO;
4464 	char buffer[IPR_MAX_RES_PATH_LENGTH];
4465 
4466 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4467 	res = (struct ipr_resource_entry *)sdev->hostdata;
4468 	if (res && ioa_cfg->sis64)
4469 		len = snprintf(buf, PAGE_SIZE, "%s\n",
4470 			       __ipr_format_res_path(res->res_path, buffer,
4471 						     sizeof(buffer)));
4472 	else if (res)
4473 		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4474 			       res->bus, res->target, res->lun);
4475 
4476 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4477 	return len;
4478 }
4479 
4480 static struct device_attribute ipr_resource_path_attr = {
4481 	.attr = {
4482 		.name = 	"resource_path",
4483 		.mode =		S_IRUGO,
4484 	},
4485 	.show = ipr_show_resource_path
4486 };
4487 
4488 /**
4489  * ipr_show_device_id - Show the device_id for this device.
4490  * @dev:	device struct
4491  * @attr:	device attribute structure
4492  * @buf:	buffer
4493  *
4494  * Return value:
4495  *	number of bytes printed to buffer
4496  **/
4497 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4498 {
4499 	struct scsi_device *sdev = to_scsi_device(dev);
4500 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4501 	struct ipr_resource_entry *res;
4502 	unsigned long lock_flags = 0;
4503 	ssize_t len = -ENXIO;
4504 
4505 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4506 	res = (struct ipr_resource_entry *)sdev->hostdata;
4507 	if (res && ioa_cfg->sis64)
4508 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4509 	else if (res)
4510 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4511 
4512 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4513 	return len;
4514 }
4515 
4516 static struct device_attribute ipr_device_id_attr = {
4517 	.attr = {
4518 		.name =		"device_id",
4519 		.mode =		S_IRUGO,
4520 	},
4521 	.show = ipr_show_device_id
4522 };
4523 
4524 /**
4525  * ipr_show_resource_type - Show the resource type for this device.
4526  * @dev:	device struct
4527  * @attr:	device attribute structure
4528  * @buf:	buffer
4529  *
4530  * Return value:
4531  *	number of bytes printed to buffer
4532  **/
4533 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4534 {
4535 	struct scsi_device *sdev = to_scsi_device(dev);
4536 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4537 	struct ipr_resource_entry *res;
4538 	unsigned long lock_flags = 0;
4539 	ssize_t len = -ENXIO;
4540 
4541 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4542 	res = (struct ipr_resource_entry *)sdev->hostdata;
4543 
4544 	if (res)
4545 		len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4546 
4547 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4548 	return len;
4549 }
4550 
4551 static struct device_attribute ipr_resource_type_attr = {
4552 	.attr = {
4553 		.name =		"resource_type",
4554 		.mode =		S_IRUGO,
4555 	},
4556 	.show = ipr_show_resource_type
4557 };
4558 
4559 /**
4560  * ipr_show_raw_mode - Show the adapter's raw mode
4561  * @dev:	class device struct
4562  * @attr:	device attribute (unused)
4563  * @buf:	buffer
4564  *
4565  * Return value:
4566  * 	number of bytes printed to buffer
4567  **/
4568 static ssize_t ipr_show_raw_mode(struct device *dev,
4569 				 struct device_attribute *attr, char *buf)
4570 {
4571 	struct scsi_device *sdev = to_scsi_device(dev);
4572 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4573 	struct ipr_resource_entry *res;
4574 	unsigned long lock_flags = 0;
4575 	ssize_t len;
4576 
4577 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4578 	res = (struct ipr_resource_entry *)sdev->hostdata;
4579 	if (res)
4580 		len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4581 	else
4582 		len = -ENXIO;
4583 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4584 	return len;
4585 }
4586 
4587 /**
4588  * ipr_store_raw_mode - Change the adapter's raw mode
4589  * @dev:	class device struct
4590  * @attr:	device attribute (unused)
4591  * @buf:	buffer
4592  * @count:		buffer size
4593  *
4594  * Return value:
4595  * 	number of bytes printed to buffer
4596  **/
4597 static ssize_t ipr_store_raw_mode(struct device *dev,
4598 				  struct device_attribute *attr,
4599 				  const char *buf, size_t count)
4600 {
4601 	struct scsi_device *sdev = to_scsi_device(dev);
4602 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4603 	struct ipr_resource_entry *res;
4604 	unsigned long lock_flags = 0;
4605 	ssize_t len;
4606 
4607 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4608 	res = (struct ipr_resource_entry *)sdev->hostdata;
4609 	if (res) {
4610 		if (ipr_is_af_dasd_device(res)) {
4611 			res->raw_mode = simple_strtoul(buf, NULL, 10);
4612 			len = strlen(buf);
4613 			if (res->sdev)
4614 				sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4615 					res->raw_mode ? "enabled" : "disabled");
4616 		} else
4617 			len = -EINVAL;
4618 	} else
4619 		len = -ENXIO;
4620 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4621 	return len;
4622 }
4623 
4624 static struct device_attribute ipr_raw_mode_attr = {
4625 	.attr = {
4626 		.name =		"raw_mode",
4627 		.mode =		S_IRUGO | S_IWUSR,
4628 	},
4629 	.show = ipr_show_raw_mode,
4630 	.store = ipr_store_raw_mode
4631 };
4632 
4633 static struct attribute *ipr_dev_attrs[] = {
4634 	&ipr_adapter_handle_attr.attr,
4635 	&ipr_resource_path_attr.attr,
4636 	&ipr_device_id_attr.attr,
4637 	&ipr_resource_type_attr.attr,
4638 	&ipr_raw_mode_attr.attr,
4639 	NULL,
4640 };
4641 
4642 ATTRIBUTE_GROUPS(ipr_dev);
4643 
4644 /**
4645  * ipr_biosparam - Return the HSC mapping
4646  * @sdev:			scsi device struct
4647  * @block_device:	block device pointer
4648  * @capacity:		capacity of the device
4649  * @parm:			Array containing returned HSC values.
4650  *
4651  * This function generates the HSC parms that fdisk uses.
4652  * We want to make sure we return something that places partitions
4653  * on 4k boundaries for best performance with the IOA.
4654  *
4655  * Return value:
4656  * 	0 on success
4657  **/
4658 static int ipr_biosparam(struct scsi_device *sdev,
4659 			 struct block_device *block_device,
4660 			 sector_t capacity, int *parm)
4661 {
4662 	int heads, sectors;
4663 	sector_t cylinders;
4664 
4665 	heads = 128;
4666 	sectors = 32;
4667 
4668 	cylinders = capacity;
4669 	sector_div(cylinders, (128 * 32));
4670 
4671 	/* return result */
4672 	parm[0] = heads;
4673 	parm[1] = sectors;
4674 	parm[2] = cylinders;
4675 
4676 	return 0;
4677 }
4678 
4679 /**
4680  * ipr_find_starget - Find target based on bus/target.
4681  * @starget:	scsi target struct
4682  *
4683  * Return value:
4684  * 	resource entry pointer if found / NULL if not found
4685  **/
4686 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4687 {
4688 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4689 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4690 	struct ipr_resource_entry *res;
4691 
4692 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4693 		if ((res->bus == starget->channel) &&
4694 		    (res->target == starget->id)) {
4695 			return res;
4696 		}
4697 	}
4698 
4699 	return NULL;
4700 }
4701 
4702 /**
4703  * ipr_target_destroy - Destroy a SCSI target
4704  * @starget:	scsi target struct
4705  *
4706  **/
4707 static void ipr_target_destroy(struct scsi_target *starget)
4708 {
4709 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4710 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4711 
4712 	if (ioa_cfg->sis64) {
4713 		if (!ipr_find_starget(starget)) {
4714 			if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4715 				clear_bit(starget->id, ioa_cfg->array_ids);
4716 			else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4717 				clear_bit(starget->id, ioa_cfg->vset_ids);
4718 			else if (starget->channel == 0)
4719 				clear_bit(starget->id, ioa_cfg->target_ids);
4720 		}
4721 	}
4722 }
4723 
4724 /**
4725  * ipr_find_sdev - Find device based on bus/target/lun.
4726  * @sdev:	scsi device struct
4727  *
4728  * Return value:
4729  * 	resource entry pointer if found / NULL if not found
4730  **/
4731 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4732 {
4733 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4734 	struct ipr_resource_entry *res;
4735 
4736 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4737 		if ((res->bus == sdev->channel) &&
4738 		    (res->target == sdev->id) &&
4739 		    (res->lun == sdev->lun))
4740 			return res;
4741 	}
4742 
4743 	return NULL;
4744 }
4745 
4746 /**
4747  * ipr_slave_destroy - Unconfigure a SCSI device
4748  * @sdev:	scsi device struct
4749  *
4750  * Return value:
4751  * 	nothing
4752  **/
4753 static void ipr_slave_destroy(struct scsi_device *sdev)
4754 {
4755 	struct ipr_resource_entry *res;
4756 	struct ipr_ioa_cfg *ioa_cfg;
4757 	unsigned long lock_flags = 0;
4758 
4759 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4760 
4761 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4762 	res = (struct ipr_resource_entry *) sdev->hostdata;
4763 	if (res) {
4764 		sdev->hostdata = NULL;
4765 		res->sdev = NULL;
4766 	}
4767 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4768 }
4769 
4770 /**
4771  * ipr_slave_configure - Configure a SCSI device
4772  * @sdev:	scsi device struct
4773  *
4774  * This function configures the specified scsi device.
4775  *
4776  * Return value:
4777  * 	0 on success
4778  **/
4779 static int ipr_slave_configure(struct scsi_device *sdev)
4780 {
4781 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4782 	struct ipr_resource_entry *res;
4783 	unsigned long lock_flags = 0;
4784 	char buffer[IPR_MAX_RES_PATH_LENGTH];
4785 
4786 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4787 	res = sdev->hostdata;
4788 	if (res) {
4789 		if (ipr_is_af_dasd_device(res))
4790 			sdev->type = TYPE_RAID;
4791 		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4792 			sdev->scsi_level = 4;
4793 			sdev->no_uld_attach = 1;
4794 		}
4795 		if (ipr_is_vset_device(res)) {
4796 			sdev->scsi_level = SCSI_SPC_3;
4797 			sdev->no_report_opcodes = 1;
4798 			blk_queue_rq_timeout(sdev->request_queue,
4799 					     IPR_VSET_RW_TIMEOUT);
4800 			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4801 		}
4802 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4803 
4804 		if (ioa_cfg->sis64)
4805 			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4806 				    ipr_format_res_path(ioa_cfg,
4807 				res->res_path, buffer, sizeof(buffer)));
4808 		return 0;
4809 	}
4810 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4811 	return 0;
4812 }
4813 
4814 /**
4815  * ipr_slave_alloc - Prepare for commands to a device.
4816  * @sdev:	scsi device struct
4817  *
4818  * This function saves a pointer to the resource entry
4819  * in the scsi device struct if the device exists. We
4820  * can then use this pointer in ipr_queuecommand when
4821  * handling new commands.
4822  *
4823  * Return value:
4824  * 	0 on success / -ENXIO if device does not exist
4825  **/
4826 static int ipr_slave_alloc(struct scsi_device *sdev)
4827 {
4828 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4829 	struct ipr_resource_entry *res;
4830 	unsigned long lock_flags;
4831 	int rc = -ENXIO;
4832 
4833 	sdev->hostdata = NULL;
4834 
4835 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4836 
4837 	res = ipr_find_sdev(sdev);
4838 	if (res) {
4839 		res->sdev = sdev;
4840 		res->add_to_ml = 0;
4841 		res->in_erp = 0;
4842 		sdev->hostdata = res;
4843 		if (!ipr_is_naca_model(res))
4844 			res->needs_sync_complete = 1;
4845 		rc = 0;
4846 		if (ipr_is_gata(res)) {
4847 			sdev_printk(KERN_ERR, sdev, "SATA devices are no longer "
4848 				"supported by this driver. Skipping device.\n");
4849 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4850 			return -ENXIO;
4851 		}
4852 	}
4853 
4854 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4855 
4856 	return rc;
4857 }
4858 
4859 /**
4860  * ipr_match_lun - Match function for specified LUN
4861  * @ipr_cmd:	ipr command struct
4862  * @device:		device to match (sdev)
4863  *
4864  * Returns:
4865  *	1 if command matches sdev / 0 if command does not match sdev
4866  **/
4867 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4868 {
4869 	if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4870 		return 1;
4871 	return 0;
4872 }
4873 
4874 /**
4875  * ipr_cmnd_is_free - Check if a command is free or not
4876  * @ipr_cmd:	ipr command struct
4877  *
4878  * Returns:
4879  *	true / false
4880  **/
4881 static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
4882 {
4883 	struct ipr_cmnd *loop_cmd;
4884 
4885 	list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
4886 		if (loop_cmd == ipr_cmd)
4887 			return true;
4888 	}
4889 
4890 	return false;
4891 }
4892 
4893 /**
4894  * ipr_wait_for_ops - Wait for matching commands to complete
4895  * @ioa_cfg:	ioa config struct
4896  * @device:		device to match (sdev)
4897  * @match:		match function to use
4898  *
4899  * Returns:
4900  *	SUCCESS / FAILED
4901  **/
4902 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4903 			    int (*match)(struct ipr_cmnd *, void *))
4904 {
4905 	struct ipr_cmnd *ipr_cmd;
4906 	int wait, i;
4907 	unsigned long flags;
4908 	struct ipr_hrr_queue *hrrq;
4909 	signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4910 	DECLARE_COMPLETION_ONSTACK(comp);
4911 
4912 	ENTER;
4913 	do {
4914 		wait = 0;
4915 
4916 		for_each_hrrq(hrrq, ioa_cfg) {
4917 			spin_lock_irqsave(hrrq->lock, flags);
4918 			for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
4919 				ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
4920 				if (!ipr_cmnd_is_free(ipr_cmd)) {
4921 					if (match(ipr_cmd, device)) {
4922 						ipr_cmd->eh_comp = &comp;
4923 						wait++;
4924 					}
4925 				}
4926 			}
4927 			spin_unlock_irqrestore(hrrq->lock, flags);
4928 		}
4929 
4930 		if (wait) {
4931 			timeout = wait_for_completion_timeout(&comp, timeout);
4932 
4933 			if (!timeout) {
4934 				wait = 0;
4935 
4936 				for_each_hrrq(hrrq, ioa_cfg) {
4937 					spin_lock_irqsave(hrrq->lock, flags);
4938 					for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
4939 						ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
4940 						if (!ipr_cmnd_is_free(ipr_cmd)) {
4941 							if (match(ipr_cmd, device)) {
4942 								ipr_cmd->eh_comp = NULL;
4943 								wait++;
4944 							}
4945 						}
4946 					}
4947 					spin_unlock_irqrestore(hrrq->lock, flags);
4948 				}
4949 
4950 				if (wait)
4951 					dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4952 				LEAVE;
4953 				return wait ? FAILED : SUCCESS;
4954 			}
4955 		}
4956 	} while (wait);
4957 
4958 	LEAVE;
4959 	return SUCCESS;
4960 }
4961 
4962 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4963 {
4964 	struct ipr_ioa_cfg *ioa_cfg;
4965 	unsigned long lock_flags = 0;
4966 	int rc = SUCCESS;
4967 
4968 	ENTER;
4969 	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4970 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4971 
4972 	if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4973 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4974 		dev_err(&ioa_cfg->pdev->dev,
4975 			"Adapter being reset as a result of error recovery.\n");
4976 
4977 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4978 			ioa_cfg->sdt_state = GET_DUMP;
4979 	}
4980 
4981 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4982 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4983 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4984 
4985 	/* If we got hit with a host reset while we were already resetting
4986 	 the adapter for some reason, and the reset failed. */
4987 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4988 		ipr_trace;
4989 		rc = FAILED;
4990 	}
4991 
4992 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4993 	LEAVE;
4994 	return rc;
4995 }
4996 
4997 /**
4998  * ipr_device_reset - Reset the device
4999  * @ioa_cfg:	ioa config struct
5000  * @res:		resource entry struct
5001  *
5002  * This function issues a device reset to the affected device.
5003  * If the device is a SCSI device, a LUN reset will be sent
5004  * to the device first. If that does not work, a target reset
5005  * will be sent.
5006  *
5007  * Return value:
5008  *	0 on success / non-zero on failure
5009  **/
5010 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5011 			    struct ipr_resource_entry *res)
5012 {
5013 	struct ipr_cmnd *ipr_cmd;
5014 	struct ipr_ioarcb *ioarcb;
5015 	struct ipr_cmd_pkt *cmd_pkt;
5016 	u32 ioasc;
5017 
5018 	ENTER;
5019 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5020 	ioarcb = &ipr_cmd->ioarcb;
5021 	cmd_pkt = &ioarcb->cmd_pkt;
5022 
5023 	if (ipr_cmd->ioa_cfg->sis64)
5024 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5025 
5026 	ioarcb->res_handle = res->res_handle;
5027 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5028 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5029 
5030 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5031 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5032 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5033 
5034 	LEAVE;
5035 	return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5036 }
5037 
5038 /**
5039  * __ipr_eh_dev_reset - Reset the device
5040  * @scsi_cmd:	scsi command struct
5041  *
5042  * This function issues a device reset to the affected device.
5043  * A LUN reset will be sent to the device first. If that does
5044  * not work, a target reset will be sent.
5045  *
5046  * Return value:
5047  *	SUCCESS / FAILED
5048  **/
5049 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5050 {
5051 	struct ipr_ioa_cfg *ioa_cfg;
5052 	struct ipr_resource_entry *res;
5053 	int rc = 0;
5054 
5055 	ENTER;
5056 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5057 	res = scsi_cmd->device->hostdata;
5058 
5059 	/*
5060 	 * If we are currently going through reset/reload, return failed. This will force the
5061 	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5062 	 * reset to complete
5063 	 */
5064 	if (ioa_cfg->in_reset_reload)
5065 		return FAILED;
5066 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5067 		return FAILED;
5068 
5069 	res->resetting_device = 1;
5070 	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5071 
5072 	rc = ipr_device_reset(ioa_cfg, res);
5073 	res->resetting_device = 0;
5074 	res->reset_occurred = 1;
5075 
5076 	LEAVE;
5077 	return rc ? FAILED : SUCCESS;
5078 }
5079 
5080 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5081 {
5082 	int rc;
5083 	struct ipr_ioa_cfg *ioa_cfg;
5084 	struct ipr_resource_entry *res;
5085 
5086 	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5087 	res = cmd->device->hostdata;
5088 
5089 	if (!res)
5090 		return FAILED;
5091 
5092 	spin_lock_irq(cmd->device->host->host_lock);
5093 	rc = __ipr_eh_dev_reset(cmd);
5094 	spin_unlock_irq(cmd->device->host->host_lock);
5095 
5096 	if (rc == SUCCESS)
5097 		rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5098 
5099 	return rc;
5100 }
5101 
5102 /**
5103  * ipr_bus_reset_done - Op done function for bus reset.
5104  * @ipr_cmd:	ipr command struct
5105  *
5106  * This function is the op done function for a bus reset
5107  *
5108  * Return value:
5109  * 	none
5110  **/
5111 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5112 {
5113 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5114 	struct ipr_resource_entry *res;
5115 
5116 	ENTER;
5117 	if (!ioa_cfg->sis64)
5118 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5119 			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5120 				scsi_report_bus_reset(ioa_cfg->host, res->bus);
5121 				break;
5122 			}
5123 		}
5124 
5125 	/*
5126 	 * If abort has not completed, indicate the reset has, else call the
5127 	 * abort's done function to wake the sleeping eh thread
5128 	 */
5129 	if (ipr_cmd->sibling->sibling)
5130 		ipr_cmd->sibling->sibling = NULL;
5131 	else
5132 		ipr_cmd->sibling->done(ipr_cmd->sibling);
5133 
5134 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5135 	LEAVE;
5136 }
5137 
5138 /**
5139  * ipr_abort_timeout - An abort task has timed out
5140  * @t: Timer context used to fetch ipr command struct
5141  *
5142  * This function handles when an abort task times out. If this
5143  * happens we issue a bus reset since we have resources tied
5144  * up that must be freed before returning to the midlayer.
5145  *
5146  * Return value:
5147  *	none
5148  **/
5149 static void ipr_abort_timeout(struct timer_list *t)
5150 {
5151 	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5152 	struct ipr_cmnd *reset_cmd;
5153 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5154 	struct ipr_cmd_pkt *cmd_pkt;
5155 	unsigned long lock_flags = 0;
5156 
5157 	ENTER;
5158 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5159 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5160 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5161 		return;
5162 	}
5163 
5164 	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5165 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5166 	ipr_cmd->sibling = reset_cmd;
5167 	reset_cmd->sibling = ipr_cmd;
5168 	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5169 	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5170 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5171 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5172 	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5173 
5174 	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5175 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5176 	LEAVE;
5177 }
5178 
5179 /**
5180  * ipr_cancel_op - Cancel specified op
5181  * @scsi_cmd:	scsi command struct
5182  *
5183  * This function cancels specified op.
5184  *
5185  * Return value:
5186  *	SUCCESS / FAILED
5187  **/
5188 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5189 {
5190 	struct ipr_cmnd *ipr_cmd;
5191 	struct ipr_ioa_cfg *ioa_cfg;
5192 	struct ipr_resource_entry *res;
5193 	struct ipr_cmd_pkt *cmd_pkt;
5194 	u32 ioasc;
5195 	int i, op_found = 0;
5196 	struct ipr_hrr_queue *hrrq;
5197 
5198 	ENTER;
5199 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5200 	res = scsi_cmd->device->hostdata;
5201 
5202 	/* If we are currently going through reset/reload, return failed.
5203 	 * This will force the mid-layer to call ipr_eh_host_reset,
5204 	 * which will then go to sleep and wait for the reset to complete
5205 	 */
5206 	if (ioa_cfg->in_reset_reload ||
5207 	    ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5208 		return FAILED;
5209 	if (!res)
5210 		return FAILED;
5211 
5212 	/*
5213 	 * If we are aborting a timed out op, chances are that the timeout was caused
5214 	 * by a still not detected EEH error. In such cases, reading a register will
5215 	 * trigger the EEH recovery infrastructure.
5216 	 */
5217 	readl(ioa_cfg->regs.sense_interrupt_reg);
5218 
5219 	if (!ipr_is_gscsi(res))
5220 		return FAILED;
5221 
5222 	for_each_hrrq(hrrq, ioa_cfg) {
5223 		spin_lock(&hrrq->_lock);
5224 		for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5225 			if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5226 				if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5227 					op_found = 1;
5228 					break;
5229 				}
5230 			}
5231 		}
5232 		spin_unlock(&hrrq->_lock);
5233 	}
5234 
5235 	if (!op_found)
5236 		return SUCCESS;
5237 
5238 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5239 	ipr_cmd->ioarcb.res_handle = res->res_handle;
5240 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5241 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5242 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5243 	ipr_cmd->u.sdev = scsi_cmd->device;
5244 
5245 	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5246 		    scsi_cmd->cmnd[0]);
5247 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5248 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5249 
5250 	/*
5251 	 * If the abort task timed out and we sent a bus reset, we will get
5252 	 * one the following responses to the abort
5253 	 */
5254 	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5255 		ioasc = 0;
5256 		ipr_trace;
5257 	}
5258 
5259 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5260 	if (!ipr_is_naca_model(res))
5261 		res->needs_sync_complete = 1;
5262 
5263 	LEAVE;
5264 	return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5265 }
5266 
5267 /**
5268  * ipr_scan_finished - Report whether scan is done
5269  * @shost:           scsi host struct
5270  * @elapsed_time:    elapsed time
5271  *
5272  * Return value:
5273  *	0 if scan in progress / 1 if scan is complete
5274  **/
5275 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5276 {
5277 	unsigned long lock_flags;
5278 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5279 	int rc = 0;
5280 
5281 	spin_lock_irqsave(shost->host_lock, lock_flags);
5282 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5283 		rc = 1;
5284 	if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5285 		rc = 1;
5286 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
5287 	return rc;
5288 }
5289 
5290 /**
5291  * ipr_eh_abort - Reset the host adapter
5292  * @scsi_cmd:	scsi command struct
5293  *
5294  * Return value:
5295  * 	SUCCESS / FAILED
5296  **/
5297 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5298 {
5299 	unsigned long flags;
5300 	int rc;
5301 	struct ipr_ioa_cfg *ioa_cfg;
5302 
5303 	ENTER;
5304 
5305 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5306 
5307 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5308 	rc = ipr_cancel_op(scsi_cmd);
5309 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5310 
5311 	if (rc == SUCCESS)
5312 		rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5313 	LEAVE;
5314 	return rc;
5315 }
5316 
5317 /**
5318  * ipr_handle_other_interrupt - Handle "other" interrupts
5319  * @ioa_cfg:	ioa config struct
5320  * @int_reg:	interrupt register
5321  *
5322  * Return value:
5323  * 	IRQ_NONE / IRQ_HANDLED
5324  **/
5325 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5326 					      u32 int_reg)
5327 {
5328 	irqreturn_t rc = IRQ_HANDLED;
5329 	u32 int_mask_reg;
5330 
5331 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5332 	int_reg &= ~int_mask_reg;
5333 
5334 	/* If an interrupt on the adapter did not occur, ignore it.
5335 	 * Or in the case of SIS 64, check for a stage change interrupt.
5336 	 */
5337 	if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5338 		if (ioa_cfg->sis64) {
5339 			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5340 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5341 			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5342 
5343 				/* clear stage change */
5344 				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5345 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5346 				list_del(&ioa_cfg->reset_cmd->queue);
5347 				del_timer(&ioa_cfg->reset_cmd->timer);
5348 				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5349 				return IRQ_HANDLED;
5350 			}
5351 		}
5352 
5353 		return IRQ_NONE;
5354 	}
5355 
5356 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5357 		/* Mask the interrupt */
5358 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5359 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5360 
5361 		list_del(&ioa_cfg->reset_cmd->queue);
5362 		del_timer(&ioa_cfg->reset_cmd->timer);
5363 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5364 	} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5365 		if (ioa_cfg->clear_isr) {
5366 			if (ipr_debug && printk_ratelimit())
5367 				dev_err(&ioa_cfg->pdev->dev,
5368 					"Spurious interrupt detected. 0x%08X\n", int_reg);
5369 			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5370 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5371 			return IRQ_NONE;
5372 		}
5373 	} else {
5374 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5375 			ioa_cfg->ioa_unit_checked = 1;
5376 		else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5377 			dev_err(&ioa_cfg->pdev->dev,
5378 				"No Host RRQ. 0x%08X\n", int_reg);
5379 		else
5380 			dev_err(&ioa_cfg->pdev->dev,
5381 				"Permanent IOA failure. 0x%08X\n", int_reg);
5382 
5383 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5384 			ioa_cfg->sdt_state = GET_DUMP;
5385 
5386 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5387 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5388 	}
5389 
5390 	return rc;
5391 }
5392 
5393 /**
5394  * ipr_isr_eh - Interrupt service routine error handler
5395  * @ioa_cfg:	ioa config struct
5396  * @msg:	message to log
5397  * @number:	various meanings depending on the caller/message
5398  *
5399  * Return value:
5400  * 	none
5401  **/
5402 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5403 {
5404 	ioa_cfg->errors_logged++;
5405 	dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5406 
5407 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5408 		ioa_cfg->sdt_state = GET_DUMP;
5409 
5410 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5411 }
5412 
5413 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5414 						struct list_head *doneq)
5415 {
5416 	u32 ioasc;
5417 	u16 cmd_index;
5418 	struct ipr_cmnd *ipr_cmd;
5419 	struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5420 	int num_hrrq = 0;
5421 
5422 	/* If interrupts are disabled, ignore the interrupt */
5423 	if (!hrr_queue->allow_interrupts)
5424 		return 0;
5425 
5426 	while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5427 	       hrr_queue->toggle_bit) {
5428 
5429 		cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5430 			     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5431 			     IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5432 
5433 		if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5434 			     cmd_index < hrr_queue->min_cmd_id)) {
5435 			ipr_isr_eh(ioa_cfg,
5436 				"Invalid response handle from IOA: ",
5437 				cmd_index);
5438 			break;
5439 		}
5440 
5441 		ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5442 		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5443 
5444 		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5445 
5446 		list_move_tail(&ipr_cmd->queue, doneq);
5447 
5448 		if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5449 			hrr_queue->hrrq_curr++;
5450 		} else {
5451 			hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5452 			hrr_queue->toggle_bit ^= 1u;
5453 		}
5454 		num_hrrq++;
5455 		if (budget > 0 && num_hrrq >= budget)
5456 			break;
5457 	}
5458 
5459 	return num_hrrq;
5460 }
5461 
5462 static int ipr_iopoll(struct irq_poll *iop, int budget)
5463 {
5464 	struct ipr_hrr_queue *hrrq;
5465 	struct ipr_cmnd *ipr_cmd, *temp;
5466 	unsigned long hrrq_flags;
5467 	int completed_ops;
5468 	LIST_HEAD(doneq);
5469 
5470 	hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5471 
5472 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
5473 	completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5474 
5475 	if (completed_ops < budget)
5476 		irq_poll_complete(iop);
5477 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5478 
5479 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5480 		list_del(&ipr_cmd->queue);
5481 		del_timer(&ipr_cmd->timer);
5482 		ipr_cmd->fast_done(ipr_cmd);
5483 	}
5484 
5485 	return completed_ops;
5486 }
5487 
5488 /**
5489  * ipr_isr - Interrupt service routine
5490  * @irq:	irq number
5491  * @devp:	pointer to ioa config struct
5492  *
5493  * Return value:
5494  * 	IRQ_NONE / IRQ_HANDLED
5495  **/
5496 static irqreturn_t ipr_isr(int irq, void *devp)
5497 {
5498 	struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5499 	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5500 	unsigned long hrrq_flags = 0;
5501 	u32 int_reg = 0;
5502 	int num_hrrq = 0;
5503 	int irq_none = 0;
5504 	struct ipr_cmnd *ipr_cmd, *temp;
5505 	irqreturn_t rc = IRQ_NONE;
5506 	LIST_HEAD(doneq);
5507 
5508 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
5509 	/* If interrupts are disabled, ignore the interrupt */
5510 	if (!hrrq->allow_interrupts) {
5511 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5512 		return IRQ_NONE;
5513 	}
5514 
5515 	while (1) {
5516 		if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5517 			rc =  IRQ_HANDLED;
5518 
5519 			if (!ioa_cfg->clear_isr)
5520 				break;
5521 
5522 			/* Clear the PCI interrupt */
5523 			num_hrrq = 0;
5524 			do {
5525 				writel(IPR_PCII_HRRQ_UPDATED,
5526 				     ioa_cfg->regs.clr_interrupt_reg32);
5527 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5528 			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5529 				num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5530 
5531 		} else if (rc == IRQ_NONE && irq_none == 0) {
5532 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5533 			irq_none++;
5534 		} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5535 			   int_reg & IPR_PCII_HRRQ_UPDATED) {
5536 			ipr_isr_eh(ioa_cfg,
5537 				"Error clearing HRRQ: ", num_hrrq);
5538 			rc = IRQ_HANDLED;
5539 			break;
5540 		} else
5541 			break;
5542 	}
5543 
5544 	if (unlikely(rc == IRQ_NONE))
5545 		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5546 
5547 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5548 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5549 		list_del(&ipr_cmd->queue);
5550 		del_timer(&ipr_cmd->timer);
5551 		ipr_cmd->fast_done(ipr_cmd);
5552 	}
5553 	return rc;
5554 }
5555 
5556 /**
5557  * ipr_isr_mhrrq - Interrupt service routine
5558  * @irq:	irq number
5559  * @devp:	pointer to ioa config struct
5560  *
5561  * Return value:
5562  *	IRQ_NONE / IRQ_HANDLED
5563  **/
5564 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5565 {
5566 	struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5567 	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5568 	unsigned long hrrq_flags = 0;
5569 	struct ipr_cmnd *ipr_cmd, *temp;
5570 	irqreturn_t rc = IRQ_NONE;
5571 	LIST_HEAD(doneq);
5572 
5573 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
5574 
5575 	/* If interrupts are disabled, ignore the interrupt */
5576 	if (!hrrq->allow_interrupts) {
5577 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5578 		return IRQ_NONE;
5579 	}
5580 
5581 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5582 		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5583 		       hrrq->toggle_bit) {
5584 			irq_poll_sched(&hrrq->iopoll);
5585 			spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5586 			return IRQ_HANDLED;
5587 		}
5588 	} else {
5589 		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5590 			hrrq->toggle_bit)
5591 
5592 			if (ipr_process_hrrq(hrrq, -1, &doneq))
5593 				rc =  IRQ_HANDLED;
5594 	}
5595 
5596 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5597 
5598 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5599 		list_del(&ipr_cmd->queue);
5600 		del_timer(&ipr_cmd->timer);
5601 		ipr_cmd->fast_done(ipr_cmd);
5602 	}
5603 	return rc;
5604 }
5605 
5606 /**
5607  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5608  * @ioa_cfg:	ioa config struct
5609  * @ipr_cmd:	ipr command struct
5610  *
5611  * Return value:
5612  * 	0 on success / -1 on failure
5613  **/
5614 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5615 			     struct ipr_cmnd *ipr_cmd)
5616 {
5617 	int i, nseg;
5618 	struct scatterlist *sg;
5619 	u32 length;
5620 	u32 ioadl_flags = 0;
5621 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5622 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5623 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5624 
5625 	length = scsi_bufflen(scsi_cmd);
5626 	if (!length)
5627 		return 0;
5628 
5629 	nseg = scsi_dma_map(scsi_cmd);
5630 	if (nseg < 0) {
5631 		if (printk_ratelimit())
5632 			dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5633 		return -1;
5634 	}
5635 
5636 	ipr_cmd->dma_use_sg = nseg;
5637 
5638 	ioarcb->data_transfer_length = cpu_to_be32(length);
5639 	ioarcb->ioadl_len =
5640 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5641 
5642 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5643 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5644 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5645 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5646 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5647 
5648 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5649 		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5650 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5651 		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5652 	}
5653 
5654 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5655 	return 0;
5656 }
5657 
5658 /**
5659  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5660  * @ioa_cfg:	ioa config struct
5661  * @ipr_cmd:	ipr command struct
5662  *
5663  * Return value:
5664  * 	0 on success / -1 on failure
5665  **/
5666 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5667 			   struct ipr_cmnd *ipr_cmd)
5668 {
5669 	int i, nseg;
5670 	struct scatterlist *sg;
5671 	u32 length;
5672 	u32 ioadl_flags = 0;
5673 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5674 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5675 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5676 
5677 	length = scsi_bufflen(scsi_cmd);
5678 	if (!length)
5679 		return 0;
5680 
5681 	nseg = scsi_dma_map(scsi_cmd);
5682 	if (nseg < 0) {
5683 		dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5684 		return -1;
5685 	}
5686 
5687 	ipr_cmd->dma_use_sg = nseg;
5688 
5689 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5690 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5691 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5692 		ioarcb->data_transfer_length = cpu_to_be32(length);
5693 		ioarcb->ioadl_len =
5694 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5695 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5696 		ioadl_flags = IPR_IOADL_FLAGS_READ;
5697 		ioarcb->read_data_transfer_length = cpu_to_be32(length);
5698 		ioarcb->read_ioadl_len =
5699 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5700 	}
5701 
5702 	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5703 		ioadl = ioarcb->u.add_data.u.ioadl;
5704 		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5705 				    offsetof(struct ipr_ioarcb, u.add_data));
5706 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5707 	}
5708 
5709 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5710 		ioadl[i].flags_and_data_len =
5711 			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5712 		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5713 	}
5714 
5715 	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5716 	return 0;
5717 }
5718 
5719 /**
5720  * __ipr_erp_done - Process completion of ERP for a device
5721  * @ipr_cmd:		ipr command struct
5722  *
5723  * This function copies the sense buffer into the scsi_cmd
5724  * struct and pushes the scsi_done function.
5725  *
5726  * Return value:
5727  * 	nothing
5728  **/
5729 static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5730 {
5731 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5732 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5733 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5734 
5735 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5736 		scsi_cmd->result |= (DID_ERROR << 16);
5737 		scmd_printk(KERN_ERR, scsi_cmd,
5738 			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5739 	} else {
5740 		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5741 		       SCSI_SENSE_BUFFERSIZE);
5742 	}
5743 
5744 	if (res) {
5745 		if (!ipr_is_naca_model(res))
5746 			res->needs_sync_complete = 1;
5747 		res->in_erp = 0;
5748 	}
5749 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5750 	scsi_done(scsi_cmd);
5751 	if (ipr_cmd->eh_comp)
5752 		complete(ipr_cmd->eh_comp);
5753 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5754 }
5755 
5756 /**
5757  * ipr_erp_done - Process completion of ERP for a device
5758  * @ipr_cmd:		ipr command struct
5759  *
5760  * This function copies the sense buffer into the scsi_cmd
5761  * struct and pushes the scsi_done function.
5762  *
5763  * Return value:
5764  * 	nothing
5765  **/
5766 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5767 {
5768 	struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
5769 	unsigned long hrrq_flags;
5770 
5771 	spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
5772 	__ipr_erp_done(ipr_cmd);
5773 	spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
5774 }
5775 
5776 /**
5777  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5778  * @ipr_cmd:	ipr command struct
5779  *
5780  * Return value:
5781  * 	none
5782  **/
5783 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5784 {
5785 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5786 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5787 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5788 
5789 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5790 	ioarcb->data_transfer_length = 0;
5791 	ioarcb->read_data_transfer_length = 0;
5792 	ioarcb->ioadl_len = 0;
5793 	ioarcb->read_ioadl_len = 0;
5794 	ioasa->hdr.ioasc = 0;
5795 	ioasa->hdr.residual_data_len = 0;
5796 
5797 	if (ipr_cmd->ioa_cfg->sis64)
5798 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
5799 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5800 	else {
5801 		ioarcb->write_ioadl_addr =
5802 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5803 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5804 	}
5805 }
5806 
5807 /**
5808  * __ipr_erp_request_sense - Send request sense to a device
5809  * @ipr_cmd:	ipr command struct
5810  *
5811  * This function sends a request sense to a device as a result
5812  * of a check condition.
5813  *
5814  * Return value:
5815  * 	nothing
5816  **/
5817 static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5818 {
5819 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5820 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5821 
5822 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5823 		__ipr_erp_done(ipr_cmd);
5824 		return;
5825 	}
5826 
5827 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5828 
5829 	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5830 	cmd_pkt->cdb[0] = REQUEST_SENSE;
5831 	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5832 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5833 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5834 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5835 
5836 	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5837 		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5838 
5839 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5840 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
5841 }
5842 
5843 /**
5844  * ipr_erp_request_sense - Send request sense to a device
5845  * @ipr_cmd:	ipr command struct
5846  *
5847  * This function sends a request sense to a device as a result
5848  * of a check condition.
5849  *
5850  * Return value:
5851  * 	nothing
5852  **/
5853 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5854 {
5855 	struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
5856 	unsigned long hrrq_flags;
5857 
5858 	spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
5859 	__ipr_erp_request_sense(ipr_cmd);
5860 	spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
5861 }
5862 
5863 /**
5864  * ipr_erp_cancel_all - Send cancel all to a device
5865  * @ipr_cmd:	ipr command struct
5866  *
5867  * This function sends a cancel all to a device to clear the
5868  * queue. If we are running TCQ on the device, QERR is set to 1,
5869  * which means all outstanding ops have been dropped on the floor.
5870  * Cancel all will return them to us.
5871  *
5872  * Return value:
5873  * 	nothing
5874  **/
5875 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5876 {
5877 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5878 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5879 	struct ipr_cmd_pkt *cmd_pkt;
5880 
5881 	res->in_erp = 1;
5882 
5883 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5884 
5885 	if (!scsi_cmd->device->simple_tags) {
5886 		__ipr_erp_request_sense(ipr_cmd);
5887 		return;
5888 	}
5889 
5890 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5891 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5892 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5893 
5894 	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5895 		   IPR_CANCEL_ALL_TIMEOUT);
5896 }
5897 
5898 /**
5899  * ipr_dump_ioasa - Dump contents of IOASA
5900  * @ioa_cfg:	ioa config struct
5901  * @ipr_cmd:	ipr command struct
5902  * @res:		resource entry struct
5903  *
5904  * This function is invoked by the interrupt handler when ops
5905  * fail. It will log the IOASA if appropriate. Only called
5906  * for GPDD ops.
5907  *
5908  * Return value:
5909  * 	none
5910  **/
5911 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5912 			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5913 {
5914 	int i;
5915 	u16 data_len;
5916 	u32 ioasc, fd_ioasc;
5917 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5918 	__be32 *ioasa_data = (__be32 *)ioasa;
5919 	int error_index;
5920 
5921 	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5922 	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5923 
5924 	if (0 == ioasc)
5925 		return;
5926 
5927 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5928 		return;
5929 
5930 	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5931 		error_index = ipr_get_error(fd_ioasc);
5932 	else
5933 		error_index = ipr_get_error(ioasc);
5934 
5935 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5936 		/* Don't log an error if the IOA already logged one */
5937 		if (ioasa->hdr.ilid != 0)
5938 			return;
5939 
5940 		if (!ipr_is_gscsi(res))
5941 			return;
5942 
5943 		if (ipr_error_table[error_index].log_ioasa == 0)
5944 			return;
5945 	}
5946 
5947 	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5948 
5949 	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5950 	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5951 		data_len = sizeof(struct ipr_ioasa64);
5952 	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5953 		data_len = sizeof(struct ipr_ioasa);
5954 
5955 	ipr_err("IOASA Dump:\n");
5956 
5957 	for (i = 0; i < data_len / 4; i += 4) {
5958 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5959 			be32_to_cpu(ioasa_data[i]),
5960 			be32_to_cpu(ioasa_data[i+1]),
5961 			be32_to_cpu(ioasa_data[i+2]),
5962 			be32_to_cpu(ioasa_data[i+3]));
5963 	}
5964 }
5965 
5966 /**
5967  * ipr_gen_sense - Generate SCSI sense data from an IOASA
5968  * @ipr_cmd:	ipr command struct
5969  *
5970  * Return value:
5971  * 	none
5972  **/
5973 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5974 {
5975 	u32 failing_lba;
5976 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5977 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5978 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5979 	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5980 
5981 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5982 
5983 	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5984 		return;
5985 
5986 	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5987 
5988 	if (ipr_is_vset_device(res) &&
5989 	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5990 	    ioasa->u.vset.failing_lba_hi != 0) {
5991 		sense_buf[0] = 0x72;
5992 		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5993 		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5994 		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5995 
5996 		sense_buf[7] = 12;
5997 		sense_buf[8] = 0;
5998 		sense_buf[9] = 0x0A;
5999 		sense_buf[10] = 0x80;
6000 
6001 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6002 
6003 		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6004 		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6005 		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6006 		sense_buf[15] = failing_lba & 0x000000ff;
6007 
6008 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6009 
6010 		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6011 		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6012 		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6013 		sense_buf[19] = failing_lba & 0x000000ff;
6014 	} else {
6015 		sense_buf[0] = 0x70;
6016 		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6017 		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6018 		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6019 
6020 		/* Illegal request */
6021 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6022 		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6023 			sense_buf[7] = 10;	/* additional length */
6024 
6025 			/* IOARCB was in error */
6026 			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6027 				sense_buf[15] = 0xC0;
6028 			else	/* Parameter data was invalid */
6029 				sense_buf[15] = 0x80;
6030 
6031 			sense_buf[16] =
6032 			    ((IPR_FIELD_POINTER_MASK &
6033 			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6034 			sense_buf[17] =
6035 			    (IPR_FIELD_POINTER_MASK &
6036 			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6037 		} else {
6038 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6039 				if (ipr_is_vset_device(res))
6040 					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6041 				else
6042 					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6043 
6044 				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
6045 				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6046 				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6047 				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6048 				sense_buf[6] = failing_lba & 0x000000ff;
6049 			}
6050 
6051 			sense_buf[7] = 6;	/* additional length */
6052 		}
6053 	}
6054 }
6055 
6056 /**
6057  * ipr_get_autosense - Copy autosense data to sense buffer
6058  * @ipr_cmd:	ipr command struct
6059  *
6060  * This function copies the autosense buffer to the buffer
6061  * in the scsi_cmd, if there is autosense available.
6062  *
6063  * Return value:
6064  *	1 if autosense was available / 0 if not
6065  **/
6066 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6067 {
6068 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6069 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6070 
6071 	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6072 		return 0;
6073 
6074 	if (ipr_cmd->ioa_cfg->sis64)
6075 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6076 		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6077 			   SCSI_SENSE_BUFFERSIZE));
6078 	else
6079 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6080 		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6081 			   SCSI_SENSE_BUFFERSIZE));
6082 	return 1;
6083 }
6084 
6085 /**
6086  * ipr_erp_start - Process an error response for a SCSI op
6087  * @ioa_cfg:	ioa config struct
6088  * @ipr_cmd:	ipr command struct
6089  *
6090  * This function determines whether or not to initiate ERP
6091  * on the affected device.
6092  *
6093  * Return value:
6094  * 	nothing
6095  **/
6096 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6097 			      struct ipr_cmnd *ipr_cmd)
6098 {
6099 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6100 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6101 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6102 	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6103 
6104 	if (!res) {
6105 		__ipr_scsi_eh_done(ipr_cmd);
6106 		return;
6107 	}
6108 
6109 	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6110 		ipr_gen_sense(ipr_cmd);
6111 
6112 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6113 
6114 	switch (masked_ioasc) {
6115 	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6116 		if (ipr_is_naca_model(res))
6117 			scsi_cmd->result |= (DID_ABORT << 16);
6118 		else
6119 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
6120 		break;
6121 	case IPR_IOASC_IR_RESOURCE_HANDLE:
6122 	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6123 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
6124 		break;
6125 	case IPR_IOASC_HW_SEL_TIMEOUT:
6126 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
6127 		if (!ipr_is_naca_model(res))
6128 			res->needs_sync_complete = 1;
6129 		break;
6130 	case IPR_IOASC_SYNC_REQUIRED:
6131 		if (!res->in_erp)
6132 			res->needs_sync_complete = 1;
6133 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
6134 		break;
6135 	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6136 	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6137 		/*
6138 		 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6139 		 * so SCSI mid-layer and upper layers handle it accordingly.
6140 		 */
6141 		if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6142 			scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6143 		break;
6144 	case IPR_IOASC_BUS_WAS_RESET:
6145 	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6146 		/*
6147 		 * Report the bus reset and ask for a retry. The device
6148 		 * will give CC/UA the next command.
6149 		 */
6150 		if (!res->resetting_device)
6151 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6152 		scsi_cmd->result |= (DID_ERROR << 16);
6153 		if (!ipr_is_naca_model(res))
6154 			res->needs_sync_complete = 1;
6155 		break;
6156 	case IPR_IOASC_HW_DEV_BUS_STATUS:
6157 		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6158 		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6159 			if (!ipr_get_autosense(ipr_cmd)) {
6160 				if (!ipr_is_naca_model(res)) {
6161 					ipr_erp_cancel_all(ipr_cmd);
6162 					return;
6163 				}
6164 			}
6165 		}
6166 		if (!ipr_is_naca_model(res))
6167 			res->needs_sync_complete = 1;
6168 		break;
6169 	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6170 		break;
6171 	case IPR_IOASC_IR_NON_OPTIMIZED:
6172 		if (res->raw_mode) {
6173 			res->raw_mode = 0;
6174 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
6175 		} else
6176 			scsi_cmd->result |= (DID_ERROR << 16);
6177 		break;
6178 	default:
6179 		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6180 			scsi_cmd->result |= (DID_ERROR << 16);
6181 		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6182 			res->needs_sync_complete = 1;
6183 		break;
6184 	}
6185 
6186 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
6187 	scsi_done(scsi_cmd);
6188 	if (ipr_cmd->eh_comp)
6189 		complete(ipr_cmd->eh_comp);
6190 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6191 }
6192 
6193 /**
6194  * ipr_scsi_done - mid-layer done function
6195  * @ipr_cmd:	ipr command struct
6196  *
6197  * This function is invoked by the interrupt handler for
6198  * ops generated by the SCSI mid-layer
6199  *
6200  * Return value:
6201  * 	none
6202  **/
6203 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6204 {
6205 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6206 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6207 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6208 	unsigned long lock_flags;
6209 
6210 	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6211 
6212 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6213 		scsi_dma_unmap(scsi_cmd);
6214 
6215 		spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6216 		scsi_done(scsi_cmd);
6217 		if (ipr_cmd->eh_comp)
6218 			complete(ipr_cmd->eh_comp);
6219 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6220 		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6221 	} else {
6222 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6223 		spin_lock(&ipr_cmd->hrrq->_lock);
6224 		ipr_erp_start(ioa_cfg, ipr_cmd);
6225 		spin_unlock(&ipr_cmd->hrrq->_lock);
6226 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6227 	}
6228 }
6229 
6230 /**
6231  * ipr_queuecommand - Queue a mid-layer request
6232  * @shost:		scsi host struct
6233  * @scsi_cmd:	scsi command struct
6234  *
6235  * This function queues a request generated by the mid-layer.
6236  *
6237  * Return value:
6238  *	0 on success
6239  *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6240  *	SCSI_MLQUEUE_HOST_BUSY if host is busy
6241  **/
6242 static int ipr_queuecommand(struct Scsi_Host *shost,
6243 			    struct scsi_cmnd *scsi_cmd)
6244 {
6245 	struct ipr_ioa_cfg *ioa_cfg;
6246 	struct ipr_resource_entry *res;
6247 	struct ipr_ioarcb *ioarcb;
6248 	struct ipr_cmnd *ipr_cmd;
6249 	unsigned long hrrq_flags;
6250 	int rc;
6251 	struct ipr_hrr_queue *hrrq;
6252 	int hrrq_id;
6253 
6254 	ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6255 
6256 	scsi_cmd->result = (DID_OK << 16);
6257 	res = scsi_cmd->device->hostdata;
6258 
6259 	hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6260 	hrrq = &ioa_cfg->hrrq[hrrq_id];
6261 
6262 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
6263 	/*
6264 	 * We are currently blocking all devices due to a host reset
6265 	 * We have told the host to stop giving us new requests, but
6266 	 * ERP ops don't count. FIXME
6267 	 */
6268 	if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6269 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6270 		return SCSI_MLQUEUE_HOST_BUSY;
6271 	}
6272 
6273 	/*
6274 	 * FIXME - Create scsi_set_host_offline interface
6275 	 *  and the ioa_is_dead check can be removed
6276 	 */
6277 	if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6278 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6279 		goto err_nodev;
6280 	}
6281 
6282 	ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6283 	if (ipr_cmd == NULL) {
6284 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6285 		return SCSI_MLQUEUE_HOST_BUSY;
6286 	}
6287 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6288 
6289 	ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6290 	ioarcb = &ipr_cmd->ioarcb;
6291 
6292 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6293 	ipr_cmd->scsi_cmd = scsi_cmd;
6294 	ipr_cmd->done = ipr_scsi_eh_done;
6295 
6296 	if (ipr_is_gscsi(res)) {
6297 		if (scsi_cmd->underflow == 0)
6298 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6299 
6300 		if (res->reset_occurred) {
6301 			res->reset_occurred = 0;
6302 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6303 		}
6304 	}
6305 
6306 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6307 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6308 
6309 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6310 		if (scsi_cmd->flags & SCMD_TAGGED)
6311 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6312 		else
6313 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6314 	}
6315 
6316 	if (scsi_cmd->cmnd[0] >= 0xC0 &&
6317 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6318 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6319 	}
6320 	if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6321 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6322 
6323 		if (scsi_cmd->underflow == 0)
6324 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6325 	}
6326 
6327 	if (ioa_cfg->sis64)
6328 		rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6329 	else
6330 		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6331 
6332 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
6333 	if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6334 		list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6335 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6336 		if (!rc)
6337 			scsi_dma_unmap(scsi_cmd);
6338 		return SCSI_MLQUEUE_HOST_BUSY;
6339 	}
6340 
6341 	if (unlikely(hrrq->ioa_is_dead)) {
6342 		list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6343 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6344 		scsi_dma_unmap(scsi_cmd);
6345 		goto err_nodev;
6346 	}
6347 
6348 	ioarcb->res_handle = res->res_handle;
6349 	if (res->needs_sync_complete) {
6350 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6351 		res->needs_sync_complete = 0;
6352 	}
6353 	list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6354 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6355 	ipr_send_command(ipr_cmd);
6356 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6357 	return 0;
6358 
6359 err_nodev:
6360 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
6361 	memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6362 	scsi_cmd->result = (DID_NO_CONNECT << 16);
6363 	scsi_done(scsi_cmd);
6364 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6365 	return 0;
6366 }
6367 
6368 /**
6369  * ipr_ioa_info - Get information about the card/driver
6370  * @host:	scsi host struct
6371  *
6372  * Return value:
6373  * 	pointer to buffer with description string
6374  **/
6375 static const char *ipr_ioa_info(struct Scsi_Host *host)
6376 {
6377 	static char buffer[512];
6378 	struct ipr_ioa_cfg *ioa_cfg;
6379 	unsigned long lock_flags = 0;
6380 
6381 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6382 
6383 	spin_lock_irqsave(host->host_lock, lock_flags);
6384 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6385 	spin_unlock_irqrestore(host->host_lock, lock_flags);
6386 
6387 	return buffer;
6388 }
6389 
6390 static const struct scsi_host_template driver_template = {
6391 	.module = THIS_MODULE,
6392 	.name = "IPR",
6393 	.info = ipr_ioa_info,
6394 	.queuecommand = ipr_queuecommand,
6395 	.eh_abort_handler = ipr_eh_abort,
6396 	.eh_device_reset_handler = ipr_eh_dev_reset,
6397 	.eh_host_reset_handler = ipr_eh_host_reset,
6398 	.slave_alloc = ipr_slave_alloc,
6399 	.slave_configure = ipr_slave_configure,
6400 	.slave_destroy = ipr_slave_destroy,
6401 	.scan_finished = ipr_scan_finished,
6402 	.target_destroy = ipr_target_destroy,
6403 	.change_queue_depth = ipr_change_queue_depth,
6404 	.bios_param = ipr_biosparam,
6405 	.can_queue = IPR_MAX_COMMANDS,
6406 	.this_id = -1,
6407 	.sg_tablesize = IPR_MAX_SGLIST,
6408 	.max_sectors = IPR_IOA_MAX_SECTORS,
6409 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6410 	.shost_groups = ipr_ioa_groups,
6411 	.sdev_groups = ipr_dev_groups,
6412 	.proc_name = IPR_NAME,
6413 };
6414 
6415 #ifdef CONFIG_PPC_PSERIES
6416 static const u16 ipr_blocked_processors[] = {
6417 	PVR_NORTHSTAR,
6418 	PVR_PULSAR,
6419 	PVR_POWER4,
6420 	PVR_ICESTAR,
6421 	PVR_SSTAR,
6422 	PVR_POWER4p,
6423 	PVR_630,
6424 	PVR_630p
6425 };
6426 
6427 /**
6428  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6429  * @ioa_cfg:	ioa cfg struct
6430  *
6431  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6432  * certain pSeries hardware. This function determines if the given
6433  * adapter is in one of these confgurations or not.
6434  *
6435  * Return value:
6436  * 	1 if adapter is not supported / 0 if adapter is supported
6437  **/
6438 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6439 {
6440 	int i;
6441 
6442 	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6443 		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6444 			if (pvr_version_is(ipr_blocked_processors[i]))
6445 				return 1;
6446 		}
6447 	}
6448 	return 0;
6449 }
6450 #else
6451 #define ipr_invalid_adapter(ioa_cfg) 0
6452 #endif
6453 
6454 /**
6455  * ipr_ioa_bringdown_done - IOA bring down completion.
6456  * @ipr_cmd:	ipr command struct
6457  *
6458  * This function processes the completion of an adapter bring down.
6459  * It wakes any reset sleepers.
6460  *
6461  * Return value:
6462  * 	IPR_RC_JOB_RETURN
6463  **/
6464 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6465 {
6466 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6467 	int i;
6468 
6469 	ENTER;
6470 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6471 		ipr_trace;
6472 		ioa_cfg->scsi_unblock = 1;
6473 		schedule_work(&ioa_cfg->work_q);
6474 	}
6475 
6476 	ioa_cfg->in_reset_reload = 0;
6477 	ioa_cfg->reset_retries = 0;
6478 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6479 		spin_lock(&ioa_cfg->hrrq[i]._lock);
6480 		ioa_cfg->hrrq[i].ioa_is_dead = 1;
6481 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
6482 	}
6483 	wmb();
6484 
6485 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6486 	wake_up_all(&ioa_cfg->reset_wait_q);
6487 	LEAVE;
6488 
6489 	return IPR_RC_JOB_RETURN;
6490 }
6491 
6492 /**
6493  * ipr_ioa_reset_done - IOA reset completion.
6494  * @ipr_cmd:	ipr command struct
6495  *
6496  * This function processes the completion of an adapter reset.
6497  * It schedules any necessary mid-layer add/removes and
6498  * wakes any reset sleepers.
6499  *
6500  * Return value:
6501  * 	IPR_RC_JOB_RETURN
6502  **/
6503 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6504 {
6505 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6506 	struct ipr_resource_entry *res;
6507 	int j;
6508 
6509 	ENTER;
6510 	ioa_cfg->in_reset_reload = 0;
6511 	for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6512 		spin_lock(&ioa_cfg->hrrq[j]._lock);
6513 		ioa_cfg->hrrq[j].allow_cmds = 1;
6514 		spin_unlock(&ioa_cfg->hrrq[j]._lock);
6515 	}
6516 	wmb();
6517 	ioa_cfg->reset_cmd = NULL;
6518 	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6519 
6520 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6521 		if (res->add_to_ml || res->del_from_ml) {
6522 			ipr_trace;
6523 			break;
6524 		}
6525 	}
6526 	schedule_work(&ioa_cfg->work_q);
6527 
6528 	for (j = 0; j < IPR_NUM_HCAMS; j++) {
6529 		list_del_init(&ioa_cfg->hostrcb[j]->queue);
6530 		if (j < IPR_NUM_LOG_HCAMS)
6531 			ipr_send_hcam(ioa_cfg,
6532 				IPR_HCAM_CDB_OP_CODE_LOG_DATA,
6533 				ioa_cfg->hostrcb[j]);
6534 		else
6535 			ipr_send_hcam(ioa_cfg,
6536 				IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
6537 				ioa_cfg->hostrcb[j]);
6538 	}
6539 
6540 	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6541 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6542 
6543 	ioa_cfg->reset_retries = 0;
6544 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6545 	wake_up_all(&ioa_cfg->reset_wait_q);
6546 
6547 	ioa_cfg->scsi_unblock = 1;
6548 	schedule_work(&ioa_cfg->work_q);
6549 	LEAVE;
6550 	return IPR_RC_JOB_RETURN;
6551 }
6552 
6553 /**
6554  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6555  * @supported_dev:	supported device struct
6556  * @vpids:			vendor product id struct
6557  *
6558  * Return value:
6559  * 	none
6560  **/
6561 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6562 				 struct ipr_std_inq_vpids *vpids)
6563 {
6564 	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6565 	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6566 	supported_dev->num_records = 1;
6567 	supported_dev->data_length =
6568 		cpu_to_be16(sizeof(struct ipr_supported_device));
6569 	supported_dev->reserved = 0;
6570 }
6571 
6572 /**
6573  * ipr_set_supported_devs - Send Set Supported Devices for a device
6574  * @ipr_cmd:	ipr command struct
6575  *
6576  * This function sends a Set Supported Devices to the adapter
6577  *
6578  * Return value:
6579  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6580  **/
6581 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6582 {
6583 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6584 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6585 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6586 	struct ipr_resource_entry *res = ipr_cmd->u.res;
6587 
6588 	ipr_cmd->job_step = ipr_ioa_reset_done;
6589 
6590 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6591 		if (!ipr_is_scsi_disk(res))
6592 			continue;
6593 
6594 		ipr_cmd->u.res = res;
6595 		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6596 
6597 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6598 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6599 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6600 
6601 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6602 		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6603 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6604 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6605 
6606 		ipr_init_ioadl(ipr_cmd,
6607 			       ioa_cfg->vpd_cbs_dma +
6608 				 offsetof(struct ipr_misc_cbs, supp_dev),
6609 			       sizeof(struct ipr_supported_device),
6610 			       IPR_IOADL_FLAGS_WRITE_LAST);
6611 
6612 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6613 			   IPR_SET_SUP_DEVICE_TIMEOUT);
6614 
6615 		if (!ioa_cfg->sis64)
6616 			ipr_cmd->job_step = ipr_set_supported_devs;
6617 		LEAVE;
6618 		return IPR_RC_JOB_RETURN;
6619 	}
6620 
6621 	LEAVE;
6622 	return IPR_RC_JOB_CONTINUE;
6623 }
6624 
6625 /**
6626  * ipr_get_mode_page - Locate specified mode page
6627  * @mode_pages:	mode page buffer
6628  * @page_code:	page code to find
6629  * @len:		minimum required length for mode page
6630  *
6631  * Return value:
6632  * 	pointer to mode page / NULL on failure
6633  **/
6634 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6635 			       u32 page_code, u32 len)
6636 {
6637 	struct ipr_mode_page_hdr *mode_hdr;
6638 	u32 page_length;
6639 	u32 length;
6640 
6641 	if (!mode_pages || (mode_pages->hdr.length == 0))
6642 		return NULL;
6643 
6644 	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6645 	mode_hdr = (struct ipr_mode_page_hdr *)
6646 		(mode_pages->data + mode_pages->hdr.block_desc_len);
6647 
6648 	while (length) {
6649 		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6650 			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6651 				return mode_hdr;
6652 			break;
6653 		} else {
6654 			page_length = (sizeof(struct ipr_mode_page_hdr) +
6655 				       mode_hdr->page_length);
6656 			length -= page_length;
6657 			mode_hdr = (struct ipr_mode_page_hdr *)
6658 				((unsigned long)mode_hdr + page_length);
6659 		}
6660 	}
6661 	return NULL;
6662 }
6663 
6664 /**
6665  * ipr_check_term_power - Check for term power errors
6666  * @ioa_cfg:	ioa config struct
6667  * @mode_pages:	IOAFP mode pages buffer
6668  *
6669  * Check the IOAFP's mode page 28 for term power errors
6670  *
6671  * Return value:
6672  * 	nothing
6673  **/
6674 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6675 				 struct ipr_mode_pages *mode_pages)
6676 {
6677 	int i;
6678 	int entry_length;
6679 	struct ipr_dev_bus_entry *bus;
6680 	struct ipr_mode_page28 *mode_page;
6681 
6682 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6683 				      sizeof(struct ipr_mode_page28));
6684 
6685 	entry_length = mode_page->entry_length;
6686 
6687 	bus = mode_page->bus;
6688 
6689 	for (i = 0; i < mode_page->num_entries; i++) {
6690 		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6691 			dev_err(&ioa_cfg->pdev->dev,
6692 				"Term power is absent on scsi bus %d\n",
6693 				bus->res_addr.bus);
6694 		}
6695 
6696 		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6697 	}
6698 }
6699 
6700 /**
6701  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6702  * @ioa_cfg:	ioa config struct
6703  *
6704  * Looks through the config table checking for SES devices. If
6705  * the SES device is in the SES table indicating a maximum SCSI
6706  * bus speed, the speed is limited for the bus.
6707  *
6708  * Return value:
6709  * 	none
6710  **/
6711 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6712 {
6713 	u32 max_xfer_rate;
6714 	int i;
6715 
6716 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6717 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6718 						       ioa_cfg->bus_attr[i].bus_width);
6719 
6720 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6721 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6722 	}
6723 }
6724 
6725 /**
6726  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6727  * @ioa_cfg:	ioa config struct
6728  * @mode_pages:	mode page 28 buffer
6729  *
6730  * Updates mode page 28 based on driver configuration
6731  *
6732  * Return value:
6733  * 	none
6734  **/
6735 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6736 					  struct ipr_mode_pages *mode_pages)
6737 {
6738 	int i, entry_length;
6739 	struct ipr_dev_bus_entry *bus;
6740 	struct ipr_bus_attributes *bus_attr;
6741 	struct ipr_mode_page28 *mode_page;
6742 
6743 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6744 				      sizeof(struct ipr_mode_page28));
6745 
6746 	entry_length = mode_page->entry_length;
6747 
6748 	/* Loop for each device bus entry */
6749 	for (i = 0, bus = mode_page->bus;
6750 	     i < mode_page->num_entries;
6751 	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6752 		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6753 			dev_err(&ioa_cfg->pdev->dev,
6754 				"Invalid resource address reported: 0x%08X\n",
6755 				IPR_GET_PHYS_LOC(bus->res_addr));
6756 			continue;
6757 		}
6758 
6759 		bus_attr = &ioa_cfg->bus_attr[i];
6760 		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6761 		bus->bus_width = bus_attr->bus_width;
6762 		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6763 		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6764 		if (bus_attr->qas_enabled)
6765 			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6766 		else
6767 			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6768 	}
6769 }
6770 
6771 /**
6772  * ipr_build_mode_select - Build a mode select command
6773  * @ipr_cmd:	ipr command struct
6774  * @res_handle:	resource handle to send command to
6775  * @parm:		Byte 2 of Mode Sense command
6776  * @dma_addr:	DMA buffer address
6777  * @xfer_len:	data transfer length
6778  *
6779  * Return value:
6780  * 	none
6781  **/
6782 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6783 				  __be32 res_handle, u8 parm,
6784 				  dma_addr_t dma_addr, u8 xfer_len)
6785 {
6786 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6787 
6788 	ioarcb->res_handle = res_handle;
6789 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6790 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6791 	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6792 	ioarcb->cmd_pkt.cdb[1] = parm;
6793 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6794 
6795 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6796 }
6797 
6798 /**
6799  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6800  * @ipr_cmd:	ipr command struct
6801  *
6802  * This function sets up the SCSI bus attributes and sends
6803  * a Mode Select for Page 28 to activate them.
6804  *
6805  * Return value:
6806  * 	IPR_RC_JOB_RETURN
6807  **/
6808 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6809 {
6810 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6811 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6812 	int length;
6813 
6814 	ENTER;
6815 	ipr_scsi_bus_speed_limit(ioa_cfg);
6816 	ipr_check_term_power(ioa_cfg, mode_pages);
6817 	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6818 	length = mode_pages->hdr.length + 1;
6819 	mode_pages->hdr.length = 0;
6820 
6821 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6822 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6823 			      length);
6824 
6825 	ipr_cmd->job_step = ipr_set_supported_devs;
6826 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6827 				    struct ipr_resource_entry, queue);
6828 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6829 
6830 	LEAVE;
6831 	return IPR_RC_JOB_RETURN;
6832 }
6833 
6834 /**
6835  * ipr_build_mode_sense - Builds a mode sense command
6836  * @ipr_cmd:	ipr command struct
6837  * @res_handle:		resource entry struct
6838  * @parm:		Byte 2 of mode sense command
6839  * @dma_addr:	DMA address of mode sense buffer
6840  * @xfer_len:	Size of DMA buffer
6841  *
6842  * Return value:
6843  * 	none
6844  **/
6845 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6846 				 __be32 res_handle,
6847 				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6848 {
6849 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6850 
6851 	ioarcb->res_handle = res_handle;
6852 	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6853 	ioarcb->cmd_pkt.cdb[2] = parm;
6854 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6855 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6856 
6857 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6858 }
6859 
6860 /**
6861  * ipr_reset_cmd_failed - Handle failure of IOA reset command
6862  * @ipr_cmd:	ipr command struct
6863  *
6864  * This function handles the failure of an IOA bringup command.
6865  *
6866  * Return value:
6867  * 	IPR_RC_JOB_RETURN
6868  **/
6869 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6870 {
6871 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6872 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6873 
6874 	dev_err(&ioa_cfg->pdev->dev,
6875 		"0x%02X failed with IOASC: 0x%08X\n",
6876 		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6877 
6878 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6879 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6880 	return IPR_RC_JOB_RETURN;
6881 }
6882 
6883 /**
6884  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6885  * @ipr_cmd:	ipr command struct
6886  *
6887  * This function handles the failure of a Mode Sense to the IOAFP.
6888  * Some adapters do not handle all mode pages.
6889  *
6890  * Return value:
6891  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6892  **/
6893 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6894 {
6895 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6896 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6897 
6898 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6899 		ipr_cmd->job_step = ipr_set_supported_devs;
6900 		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6901 					    struct ipr_resource_entry, queue);
6902 		return IPR_RC_JOB_CONTINUE;
6903 	}
6904 
6905 	return ipr_reset_cmd_failed(ipr_cmd);
6906 }
6907 
6908 /**
6909  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6910  * @ipr_cmd:	ipr command struct
6911  *
6912  * This function send a Page 28 mode sense to the IOA to
6913  * retrieve SCSI bus attributes.
6914  *
6915  * Return value:
6916  * 	IPR_RC_JOB_RETURN
6917  **/
6918 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6919 {
6920 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6921 
6922 	ENTER;
6923 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6924 			     0x28, ioa_cfg->vpd_cbs_dma +
6925 			     offsetof(struct ipr_misc_cbs, mode_pages),
6926 			     sizeof(struct ipr_mode_pages));
6927 
6928 	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6929 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6930 
6931 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6932 
6933 	LEAVE;
6934 	return IPR_RC_JOB_RETURN;
6935 }
6936 
6937 /**
6938  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6939  * @ipr_cmd:	ipr command struct
6940  *
6941  * This function enables dual IOA RAID support if possible.
6942  *
6943  * Return value:
6944  * 	IPR_RC_JOB_RETURN
6945  **/
6946 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6947 {
6948 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6949 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6950 	struct ipr_mode_page24 *mode_page;
6951 	int length;
6952 
6953 	ENTER;
6954 	mode_page = ipr_get_mode_page(mode_pages, 0x24,
6955 				      sizeof(struct ipr_mode_page24));
6956 
6957 	if (mode_page)
6958 		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6959 
6960 	length = mode_pages->hdr.length + 1;
6961 	mode_pages->hdr.length = 0;
6962 
6963 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6964 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6965 			      length);
6966 
6967 	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6968 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6969 
6970 	LEAVE;
6971 	return IPR_RC_JOB_RETURN;
6972 }
6973 
6974 /**
6975  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6976  * @ipr_cmd:	ipr command struct
6977  *
6978  * This function handles the failure of a Mode Sense to the IOAFP.
6979  * Some adapters do not handle all mode pages.
6980  *
6981  * Return value:
6982  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6983  **/
6984 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6985 {
6986 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6987 
6988 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6989 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6990 		return IPR_RC_JOB_CONTINUE;
6991 	}
6992 
6993 	return ipr_reset_cmd_failed(ipr_cmd);
6994 }
6995 
6996 /**
6997  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6998  * @ipr_cmd:	ipr command struct
6999  *
7000  * This function send a mode sense to the IOA to retrieve
7001  * the IOA Advanced Function Control mode page.
7002  *
7003  * Return value:
7004  * 	IPR_RC_JOB_RETURN
7005  **/
7006 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7007 {
7008 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7009 
7010 	ENTER;
7011 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7012 			     0x24, ioa_cfg->vpd_cbs_dma +
7013 			     offsetof(struct ipr_misc_cbs, mode_pages),
7014 			     sizeof(struct ipr_mode_pages));
7015 
7016 	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7017 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7018 
7019 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7020 
7021 	LEAVE;
7022 	return IPR_RC_JOB_RETURN;
7023 }
7024 
7025 /**
7026  * ipr_init_res_table - Initialize the resource table
7027  * @ipr_cmd:	ipr command struct
7028  *
7029  * This function looks through the existing resource table, comparing
7030  * it with the config table. This function will take care of old/new
7031  * devices and schedule adding/removing them from the mid-layer
7032  * as appropriate.
7033  *
7034  * Return value:
7035  * 	IPR_RC_JOB_CONTINUE
7036  **/
7037 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7038 {
7039 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7040 	struct ipr_resource_entry *res, *temp;
7041 	struct ipr_config_table_entry_wrapper cfgtew;
7042 	int entries, found, flag, i;
7043 	LIST_HEAD(old_res);
7044 
7045 	ENTER;
7046 	if (ioa_cfg->sis64)
7047 		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7048 	else
7049 		flag = ioa_cfg->u.cfg_table->hdr.flags;
7050 
7051 	if (flag & IPR_UCODE_DOWNLOAD_REQ)
7052 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7053 
7054 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7055 		list_move_tail(&res->queue, &old_res);
7056 
7057 	if (ioa_cfg->sis64)
7058 		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7059 	else
7060 		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7061 
7062 	for (i = 0; i < entries; i++) {
7063 		if (ioa_cfg->sis64)
7064 			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7065 		else
7066 			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7067 		found = 0;
7068 
7069 		list_for_each_entry_safe(res, temp, &old_res, queue) {
7070 			if (ipr_is_same_device(res, &cfgtew)) {
7071 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7072 				found = 1;
7073 				break;
7074 			}
7075 		}
7076 
7077 		if (!found) {
7078 			if (list_empty(&ioa_cfg->free_res_q)) {
7079 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7080 				break;
7081 			}
7082 
7083 			found = 1;
7084 			res = list_entry(ioa_cfg->free_res_q.next,
7085 					 struct ipr_resource_entry, queue);
7086 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7087 			ipr_init_res_entry(res, &cfgtew);
7088 			res->add_to_ml = 1;
7089 		} else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7090 			res->sdev->allow_restart = 1;
7091 
7092 		if (found)
7093 			ipr_update_res_entry(res, &cfgtew);
7094 	}
7095 
7096 	list_for_each_entry_safe(res, temp, &old_res, queue) {
7097 		if (res->sdev) {
7098 			res->del_from_ml = 1;
7099 			res->res_handle = IPR_INVALID_RES_HANDLE;
7100 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7101 		}
7102 	}
7103 
7104 	list_for_each_entry_safe(res, temp, &old_res, queue) {
7105 		ipr_clear_res_target(res);
7106 		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7107 	}
7108 
7109 	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7110 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7111 	else
7112 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7113 
7114 	LEAVE;
7115 	return IPR_RC_JOB_CONTINUE;
7116 }
7117 
7118 /**
7119  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7120  * @ipr_cmd:	ipr command struct
7121  *
7122  * This function sends a Query IOA Configuration command
7123  * to the adapter to retrieve the IOA configuration table.
7124  *
7125  * Return value:
7126  * 	IPR_RC_JOB_RETURN
7127  **/
7128 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7129 {
7130 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7131 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7132 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7133 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7134 
7135 	ENTER;
7136 	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7137 		ioa_cfg->dual_raid = 1;
7138 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7139 		 ucode_vpd->major_release, ucode_vpd->card_type,
7140 		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7141 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7142 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7143 
7144 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7145 	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7146 	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7147 	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7148 
7149 	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7150 		       IPR_IOADL_FLAGS_READ_LAST);
7151 
7152 	ipr_cmd->job_step = ipr_init_res_table;
7153 
7154 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7155 
7156 	LEAVE;
7157 	return IPR_RC_JOB_RETURN;
7158 }
7159 
7160 static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7161 {
7162 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7163 
7164 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7165 		return IPR_RC_JOB_CONTINUE;
7166 
7167 	return ipr_reset_cmd_failed(ipr_cmd);
7168 }
7169 
7170 static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7171 					 __be32 res_handle, u8 sa_code)
7172 {
7173 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7174 
7175 	ioarcb->res_handle = res_handle;
7176 	ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7177 	ioarcb->cmd_pkt.cdb[1] = sa_code;
7178 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7179 }
7180 
7181 /**
7182  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7183  * action
7184  * @ipr_cmd:	ipr command struct
7185  *
7186  * Return value:
7187  *	none
7188  **/
7189 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7190 {
7191 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7192 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7193 	struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7194 
7195 	ENTER;
7196 
7197 	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7198 
7199 	if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7200 		ipr_build_ioa_service_action(ipr_cmd,
7201 					     cpu_to_be32(IPR_IOA_RES_HANDLE),
7202 					     IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7203 
7204 		ioarcb->cmd_pkt.cdb[2] = 0x40;
7205 
7206 		ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7207 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7208 			   IPR_SET_SUP_DEVICE_TIMEOUT);
7209 
7210 		LEAVE;
7211 		return IPR_RC_JOB_RETURN;
7212 	}
7213 
7214 	LEAVE;
7215 	return IPR_RC_JOB_CONTINUE;
7216 }
7217 
7218 /**
7219  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7220  * @ipr_cmd:	ipr command struct
7221  * @flags:	flags to send
7222  * @page:	page to inquire
7223  * @dma_addr:	DMA address
7224  * @xfer_len:	transfer data length
7225  *
7226  * This utility function sends an inquiry to the adapter.
7227  *
7228  * Return value:
7229  * 	none
7230  **/
7231 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7232 			      dma_addr_t dma_addr, u8 xfer_len)
7233 {
7234 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7235 
7236 	ENTER;
7237 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7238 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7239 
7240 	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7241 	ioarcb->cmd_pkt.cdb[1] = flags;
7242 	ioarcb->cmd_pkt.cdb[2] = page;
7243 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
7244 
7245 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7246 
7247 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7248 	LEAVE;
7249 }
7250 
7251 /**
7252  * ipr_inquiry_page_supported - Is the given inquiry page supported
7253  * @page0:		inquiry page 0 buffer
7254  * @page:		page code.
7255  *
7256  * This function determines if the specified inquiry page is supported.
7257  *
7258  * Return value:
7259  *	1 if page is supported / 0 if not
7260  **/
7261 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7262 {
7263 	int i;
7264 
7265 	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7266 		if (page0->page[i] == page)
7267 			return 1;
7268 
7269 	return 0;
7270 }
7271 
7272 /**
7273  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7274  * @ipr_cmd:	ipr command struct
7275  *
7276  * This function sends a Page 0xC4 inquiry to the adapter
7277  * to retrieve software VPD information.
7278  *
7279  * Return value:
7280  *	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7281  **/
7282 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
7283 {
7284 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7285 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7286 	struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7287 
7288 	ENTER;
7289 	ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
7290 	memset(pageC4, 0, sizeof(*pageC4));
7291 
7292 	if (ipr_inquiry_page_supported(page0, 0xC4)) {
7293 		ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
7294 				  (ioa_cfg->vpd_cbs_dma
7295 				   + offsetof(struct ipr_misc_cbs,
7296 					      pageC4_data)),
7297 				  sizeof(struct ipr_inquiry_pageC4));
7298 		return IPR_RC_JOB_RETURN;
7299 	}
7300 
7301 	LEAVE;
7302 	return IPR_RC_JOB_CONTINUE;
7303 }
7304 
7305 /**
7306  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7307  * @ipr_cmd:	ipr command struct
7308  *
7309  * This function sends a Page 0xD0 inquiry to the adapter
7310  * to retrieve adapter capabilities.
7311  *
7312  * Return value:
7313  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7314  **/
7315 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7316 {
7317 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7318 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7319 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7320 
7321 	ENTER;
7322 	ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
7323 	memset(cap, 0, sizeof(*cap));
7324 
7325 	if (ipr_inquiry_page_supported(page0, 0xD0)) {
7326 		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7327 				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7328 				  sizeof(struct ipr_inquiry_cap));
7329 		return IPR_RC_JOB_RETURN;
7330 	}
7331 
7332 	LEAVE;
7333 	return IPR_RC_JOB_CONTINUE;
7334 }
7335 
7336 /**
7337  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7338  * @ipr_cmd:	ipr command struct
7339  *
7340  * This function sends a Page 3 inquiry to the adapter
7341  * to retrieve software VPD information.
7342  *
7343  * Return value:
7344  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7345  **/
7346 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7347 {
7348 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7349 
7350 	ENTER;
7351 
7352 	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7353 
7354 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7355 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7356 			  sizeof(struct ipr_inquiry_page3));
7357 
7358 	LEAVE;
7359 	return IPR_RC_JOB_RETURN;
7360 }
7361 
7362 /**
7363  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7364  * @ipr_cmd:	ipr command struct
7365  *
7366  * This function sends a Page 0 inquiry to the adapter
7367  * to retrieve supported inquiry pages.
7368  *
7369  * Return value:
7370  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7371  **/
7372 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7373 {
7374 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7375 	char type[5];
7376 
7377 	ENTER;
7378 
7379 	/* Grab the type out of the VPD and store it away */
7380 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7381 	type[4] = '\0';
7382 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7383 
7384 	if (ipr_invalid_adapter(ioa_cfg)) {
7385 		dev_err(&ioa_cfg->pdev->dev,
7386 			"Adapter not supported in this hardware configuration.\n");
7387 
7388 		if (!ipr_testmode) {
7389 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7390 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7391 			list_add_tail(&ipr_cmd->queue,
7392 					&ioa_cfg->hrrq->hrrq_free_q);
7393 			return IPR_RC_JOB_RETURN;
7394 		}
7395 	}
7396 
7397 	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7398 
7399 	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7400 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7401 			  sizeof(struct ipr_inquiry_page0));
7402 
7403 	LEAVE;
7404 	return IPR_RC_JOB_RETURN;
7405 }
7406 
7407 /**
7408  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7409  * @ipr_cmd:	ipr command struct
7410  *
7411  * This function sends a standard inquiry to the adapter.
7412  *
7413  * Return value:
7414  * 	IPR_RC_JOB_RETURN
7415  **/
7416 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7417 {
7418 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7419 
7420 	ENTER;
7421 	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7422 
7423 	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7424 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7425 			  sizeof(struct ipr_ioa_vpd));
7426 
7427 	LEAVE;
7428 	return IPR_RC_JOB_RETURN;
7429 }
7430 
7431 /**
7432  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7433  * @ipr_cmd:	ipr command struct
7434  *
7435  * This function send an Identify Host Request Response Queue
7436  * command to establish the HRRQ with the adapter.
7437  *
7438  * Return value:
7439  * 	IPR_RC_JOB_RETURN
7440  **/
7441 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7442 {
7443 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7444 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7445 	struct ipr_hrr_queue *hrrq;
7446 
7447 	ENTER;
7448 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7449 	if (ioa_cfg->identify_hrrq_index == 0)
7450 		dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7451 
7452 	if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7453 		hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7454 
7455 		ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7456 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7457 
7458 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7459 		if (ioa_cfg->sis64)
7460 			ioarcb->cmd_pkt.cdb[1] = 0x1;
7461 
7462 		if (ioa_cfg->nvectors == 1)
7463 			ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7464 		else
7465 			ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7466 
7467 		ioarcb->cmd_pkt.cdb[2] =
7468 			((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7469 		ioarcb->cmd_pkt.cdb[3] =
7470 			((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7471 		ioarcb->cmd_pkt.cdb[4] =
7472 			((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7473 		ioarcb->cmd_pkt.cdb[5] =
7474 			((u64) hrrq->host_rrq_dma) & 0xff;
7475 		ioarcb->cmd_pkt.cdb[7] =
7476 			((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7477 		ioarcb->cmd_pkt.cdb[8] =
7478 			(sizeof(u32) * hrrq->size) & 0xff;
7479 
7480 		if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7481 			ioarcb->cmd_pkt.cdb[9] =
7482 					ioa_cfg->identify_hrrq_index;
7483 
7484 		if (ioa_cfg->sis64) {
7485 			ioarcb->cmd_pkt.cdb[10] =
7486 				((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7487 			ioarcb->cmd_pkt.cdb[11] =
7488 				((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7489 			ioarcb->cmd_pkt.cdb[12] =
7490 				((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7491 			ioarcb->cmd_pkt.cdb[13] =
7492 				((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7493 		}
7494 
7495 		if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7496 			ioarcb->cmd_pkt.cdb[14] =
7497 					ioa_cfg->identify_hrrq_index;
7498 
7499 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7500 			   IPR_INTERNAL_TIMEOUT);
7501 
7502 		if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7503 			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7504 
7505 		LEAVE;
7506 		return IPR_RC_JOB_RETURN;
7507 	}
7508 
7509 	LEAVE;
7510 	return IPR_RC_JOB_CONTINUE;
7511 }
7512 
7513 /**
7514  * ipr_reset_timer_done - Adapter reset timer function
7515  * @t: Timer context used to fetch ipr command struct
7516  *
7517  * Description: This function is used in adapter reset processing
7518  * for timing events. If the reset_cmd pointer in the IOA
7519  * config struct is not this adapter's we are doing nested
7520  * resets and fail_all_ops will take care of freeing the
7521  * command block.
7522  *
7523  * Return value:
7524  * 	none
7525  **/
7526 static void ipr_reset_timer_done(struct timer_list *t)
7527 {
7528 	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
7529 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7530 	unsigned long lock_flags = 0;
7531 
7532 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7533 
7534 	if (ioa_cfg->reset_cmd == ipr_cmd) {
7535 		list_del(&ipr_cmd->queue);
7536 		ipr_cmd->done(ipr_cmd);
7537 	}
7538 
7539 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7540 }
7541 
7542 /**
7543  * ipr_reset_start_timer - Start a timer for adapter reset job
7544  * @ipr_cmd:	ipr command struct
7545  * @timeout:	timeout value
7546  *
7547  * Description: This function is used in adapter reset processing
7548  * for timing events. If the reset_cmd pointer in the IOA
7549  * config struct is not this adapter's we are doing nested
7550  * resets and fail_all_ops will take care of freeing the
7551  * command block.
7552  *
7553  * Return value:
7554  * 	none
7555  **/
7556 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7557 				  unsigned long timeout)
7558 {
7559 
7560 	ENTER;
7561 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7562 	ipr_cmd->done = ipr_reset_ioa_job;
7563 
7564 	ipr_cmd->timer.expires = jiffies + timeout;
7565 	ipr_cmd->timer.function = ipr_reset_timer_done;
7566 	add_timer(&ipr_cmd->timer);
7567 }
7568 
7569 /**
7570  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7571  * @ioa_cfg:	ioa cfg struct
7572  *
7573  * Return value:
7574  * 	nothing
7575  **/
7576 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7577 {
7578 	struct ipr_hrr_queue *hrrq;
7579 
7580 	for_each_hrrq(hrrq, ioa_cfg) {
7581 		spin_lock(&hrrq->_lock);
7582 		memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7583 
7584 		/* Initialize Host RRQ pointers */
7585 		hrrq->hrrq_start = hrrq->host_rrq;
7586 		hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7587 		hrrq->hrrq_curr = hrrq->hrrq_start;
7588 		hrrq->toggle_bit = 1;
7589 		spin_unlock(&hrrq->_lock);
7590 	}
7591 	wmb();
7592 
7593 	ioa_cfg->identify_hrrq_index = 0;
7594 	if (ioa_cfg->hrrq_num == 1)
7595 		atomic_set(&ioa_cfg->hrrq_index, 0);
7596 	else
7597 		atomic_set(&ioa_cfg->hrrq_index, 1);
7598 
7599 	/* Zero out config table */
7600 	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7601 }
7602 
7603 /**
7604  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7605  * @ipr_cmd:	ipr command struct
7606  *
7607  * Return value:
7608  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7609  **/
7610 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7611 {
7612 	unsigned long stage, stage_time;
7613 	u32 feedback;
7614 	volatile u32 int_reg;
7615 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7616 	u64 maskval = 0;
7617 
7618 	feedback = readl(ioa_cfg->regs.init_feedback_reg);
7619 	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7620 	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7621 
7622 	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7623 
7624 	/* sanity check the stage_time value */
7625 	if (stage_time == 0)
7626 		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7627 	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7628 		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7629 	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7630 		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7631 
7632 	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7633 		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7634 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7635 		stage_time = ioa_cfg->transop_timeout;
7636 		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7637 	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7638 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7639 		if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7640 			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7641 			maskval = IPR_PCII_IPL_STAGE_CHANGE;
7642 			maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7643 			writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7644 			int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7645 			return IPR_RC_JOB_CONTINUE;
7646 		}
7647 	}
7648 
7649 	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7650 	ipr_cmd->timer.function = ipr_oper_timeout;
7651 	ipr_cmd->done = ipr_reset_ioa_job;
7652 	add_timer(&ipr_cmd->timer);
7653 
7654 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7655 
7656 	return IPR_RC_JOB_RETURN;
7657 }
7658 
7659 /**
7660  * ipr_reset_enable_ioa - Enable the IOA following a reset.
7661  * @ipr_cmd:	ipr command struct
7662  *
7663  * This function reinitializes some control blocks and
7664  * enables destructive diagnostics on the adapter.
7665  *
7666  * Return value:
7667  * 	IPR_RC_JOB_RETURN
7668  **/
7669 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7670 {
7671 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7672 	volatile u32 int_reg;
7673 	volatile u64 maskval;
7674 	int i;
7675 
7676 	ENTER;
7677 	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7678 	ipr_init_ioa_mem(ioa_cfg);
7679 
7680 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7681 		spin_lock(&ioa_cfg->hrrq[i]._lock);
7682 		ioa_cfg->hrrq[i].allow_interrupts = 1;
7683 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
7684 	}
7685 	if (ioa_cfg->sis64) {
7686 		/* Set the adapter to the correct endian mode. */
7687 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7688 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7689 	}
7690 
7691 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7692 
7693 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7694 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7695 		       ioa_cfg->regs.clr_interrupt_mask_reg32);
7696 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7697 		return IPR_RC_JOB_CONTINUE;
7698 	}
7699 
7700 	/* Enable destructive diagnostics on IOA */
7701 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7702 
7703 	if (ioa_cfg->sis64) {
7704 		maskval = IPR_PCII_IPL_STAGE_CHANGE;
7705 		maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7706 		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7707 	} else
7708 		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7709 
7710 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7711 
7712 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7713 
7714 	if (ioa_cfg->sis64) {
7715 		ipr_cmd->job_step = ipr_reset_next_stage;
7716 		return IPR_RC_JOB_CONTINUE;
7717 	}
7718 
7719 	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7720 	ipr_cmd->timer.function = ipr_oper_timeout;
7721 	ipr_cmd->done = ipr_reset_ioa_job;
7722 	add_timer(&ipr_cmd->timer);
7723 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7724 
7725 	LEAVE;
7726 	return IPR_RC_JOB_RETURN;
7727 }
7728 
7729 /**
7730  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7731  * @ipr_cmd:	ipr command struct
7732  *
7733  * This function is invoked when an adapter dump has run out
7734  * of processing time.
7735  *
7736  * Return value:
7737  * 	IPR_RC_JOB_CONTINUE
7738  **/
7739 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7740 {
7741 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7742 
7743 	if (ioa_cfg->sdt_state == GET_DUMP)
7744 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7745 	else if (ioa_cfg->sdt_state == READ_DUMP)
7746 		ioa_cfg->sdt_state = ABORT_DUMP;
7747 
7748 	ioa_cfg->dump_timeout = 1;
7749 	ipr_cmd->job_step = ipr_reset_alert;
7750 
7751 	return IPR_RC_JOB_CONTINUE;
7752 }
7753 
7754 /**
7755  * ipr_unit_check_no_data - Log a unit check/no data error log
7756  * @ioa_cfg:		ioa config struct
7757  *
7758  * Logs an error indicating the adapter unit checked, but for some
7759  * reason, we were unable to fetch the unit check buffer.
7760  *
7761  * Return value:
7762  * 	nothing
7763  **/
7764 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7765 {
7766 	ioa_cfg->errors_logged++;
7767 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7768 }
7769 
7770 /**
7771  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7772  * @ioa_cfg:		ioa config struct
7773  *
7774  * Fetches the unit check buffer from the adapter by clocking the data
7775  * through the mailbox register.
7776  *
7777  * Return value:
7778  * 	nothing
7779  **/
7780 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7781 {
7782 	unsigned long mailbox;
7783 	struct ipr_hostrcb *hostrcb;
7784 	struct ipr_uc_sdt sdt;
7785 	int rc, length;
7786 	u32 ioasc;
7787 
7788 	mailbox = readl(ioa_cfg->ioa_mailbox);
7789 
7790 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7791 		ipr_unit_check_no_data(ioa_cfg);
7792 		return;
7793 	}
7794 
7795 	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7796 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7797 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7798 
7799 	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7800 	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7801 	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7802 		ipr_unit_check_no_data(ioa_cfg);
7803 		return;
7804 	}
7805 
7806 	/* Find length of the first sdt entry (UC buffer) */
7807 	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7808 		length = be32_to_cpu(sdt.entry[0].end_token);
7809 	else
7810 		length = (be32_to_cpu(sdt.entry[0].end_token) -
7811 			  be32_to_cpu(sdt.entry[0].start_token)) &
7812 			  IPR_FMT2_MBX_ADDR_MASK;
7813 
7814 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7815 			     struct ipr_hostrcb, queue);
7816 	list_del_init(&hostrcb->queue);
7817 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7818 
7819 	rc = ipr_get_ldump_data_section(ioa_cfg,
7820 					be32_to_cpu(sdt.entry[0].start_token),
7821 					(__be32 *)&hostrcb->hcam,
7822 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7823 
7824 	if (!rc) {
7825 		ipr_handle_log_data(ioa_cfg, hostrcb);
7826 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7827 		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7828 		    ioa_cfg->sdt_state == GET_DUMP)
7829 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7830 	} else
7831 		ipr_unit_check_no_data(ioa_cfg);
7832 
7833 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7834 }
7835 
7836 /**
7837  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7838  * @ipr_cmd:	ipr command struct
7839  *
7840  * Description: This function will call to get the unit check buffer.
7841  *
7842  * Return value:
7843  *	IPR_RC_JOB_RETURN
7844  **/
7845 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7846 {
7847 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7848 
7849 	ENTER;
7850 	ioa_cfg->ioa_unit_checked = 0;
7851 	ipr_get_unit_check_buffer(ioa_cfg);
7852 	ipr_cmd->job_step = ipr_reset_alert;
7853 	ipr_reset_start_timer(ipr_cmd, 0);
7854 
7855 	LEAVE;
7856 	return IPR_RC_JOB_RETURN;
7857 }
7858 
7859 static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
7860 {
7861 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7862 
7863 	ENTER;
7864 
7865 	if (ioa_cfg->sdt_state != GET_DUMP)
7866 		return IPR_RC_JOB_RETURN;
7867 
7868 	if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
7869 	    (readl(ioa_cfg->regs.sense_interrupt_reg) &
7870 	     IPR_PCII_MAILBOX_STABLE)) {
7871 
7872 		if (!ipr_cmd->u.time_left)
7873 			dev_err(&ioa_cfg->pdev->dev,
7874 				"Timed out waiting for Mailbox register.\n");
7875 
7876 		ioa_cfg->sdt_state = READ_DUMP;
7877 		ioa_cfg->dump_timeout = 0;
7878 		if (ioa_cfg->sis64)
7879 			ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7880 		else
7881 			ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
7882 		ipr_cmd->job_step = ipr_reset_wait_for_dump;
7883 		schedule_work(&ioa_cfg->work_q);
7884 
7885 	} else {
7886 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7887 		ipr_reset_start_timer(ipr_cmd,
7888 				      IPR_CHECK_FOR_RESET_TIMEOUT);
7889 	}
7890 
7891 	LEAVE;
7892 	return IPR_RC_JOB_RETURN;
7893 }
7894 
7895 /**
7896  * ipr_reset_restore_cfg_space - Restore PCI config space.
7897  * @ipr_cmd:	ipr command struct
7898  *
7899  * Description: This function restores the saved PCI config space of
7900  * the adapter, fails all outstanding ops back to the callers, and
7901  * fetches the dump/unit check if applicable to this reset.
7902  *
7903  * Return value:
7904  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7905  **/
7906 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7907 {
7908 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7909 
7910 	ENTER;
7911 	ioa_cfg->pdev->state_saved = true;
7912 	pci_restore_state(ioa_cfg->pdev);
7913 
7914 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7915 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7916 		return IPR_RC_JOB_CONTINUE;
7917 	}
7918 
7919 	ipr_fail_all_ops(ioa_cfg);
7920 
7921 	if (ioa_cfg->sis64) {
7922 		/* Set the adapter to the correct endian mode. */
7923 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7924 		readl(ioa_cfg->regs.endian_swap_reg);
7925 	}
7926 
7927 	if (ioa_cfg->ioa_unit_checked) {
7928 		if (ioa_cfg->sis64) {
7929 			ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7930 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7931 			return IPR_RC_JOB_RETURN;
7932 		} else {
7933 			ioa_cfg->ioa_unit_checked = 0;
7934 			ipr_get_unit_check_buffer(ioa_cfg);
7935 			ipr_cmd->job_step = ipr_reset_alert;
7936 			ipr_reset_start_timer(ipr_cmd, 0);
7937 			return IPR_RC_JOB_RETURN;
7938 		}
7939 	}
7940 
7941 	if (ioa_cfg->in_ioa_bringdown) {
7942 		ipr_cmd->job_step = ipr_ioa_bringdown_done;
7943 	} else if (ioa_cfg->sdt_state == GET_DUMP) {
7944 		ipr_cmd->job_step = ipr_dump_mailbox_wait;
7945 		ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
7946 	} else {
7947 		ipr_cmd->job_step = ipr_reset_enable_ioa;
7948 	}
7949 
7950 	LEAVE;
7951 	return IPR_RC_JOB_CONTINUE;
7952 }
7953 
7954 /**
7955  * ipr_reset_bist_done - BIST has completed on the adapter.
7956  * @ipr_cmd:	ipr command struct
7957  *
7958  * Description: Unblock config space and resume the reset process.
7959  *
7960  * Return value:
7961  * 	IPR_RC_JOB_CONTINUE
7962  **/
7963 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7964 {
7965 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7966 
7967 	ENTER;
7968 	if (ioa_cfg->cfg_locked)
7969 		pci_cfg_access_unlock(ioa_cfg->pdev);
7970 	ioa_cfg->cfg_locked = 0;
7971 	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7972 	LEAVE;
7973 	return IPR_RC_JOB_CONTINUE;
7974 }
7975 
7976 /**
7977  * ipr_reset_start_bist - Run BIST on the adapter.
7978  * @ipr_cmd:	ipr command struct
7979  *
7980  * Description: This function runs BIST on the adapter, then delays 2 seconds.
7981  *
7982  * Return value:
7983  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7984  **/
7985 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7986 {
7987 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7988 	int rc = PCIBIOS_SUCCESSFUL;
7989 
7990 	ENTER;
7991 	if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7992 		writel(IPR_UPROCI_SIS64_START_BIST,
7993 		       ioa_cfg->regs.set_uproc_interrupt_reg32);
7994 	else
7995 		rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7996 
7997 	if (rc == PCIBIOS_SUCCESSFUL) {
7998 		ipr_cmd->job_step = ipr_reset_bist_done;
7999 		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8000 		rc = IPR_RC_JOB_RETURN;
8001 	} else {
8002 		if (ioa_cfg->cfg_locked)
8003 			pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8004 		ioa_cfg->cfg_locked = 0;
8005 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8006 		rc = IPR_RC_JOB_CONTINUE;
8007 	}
8008 
8009 	LEAVE;
8010 	return rc;
8011 }
8012 
8013 /**
8014  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8015  * @ipr_cmd:	ipr command struct
8016  *
8017  * Description: This clears PCI reset to the adapter and delays two seconds.
8018  *
8019  * Return value:
8020  * 	IPR_RC_JOB_RETURN
8021  **/
8022 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8023 {
8024 	ENTER;
8025 	ipr_cmd->job_step = ipr_reset_bist_done;
8026 	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8027 	LEAVE;
8028 	return IPR_RC_JOB_RETURN;
8029 }
8030 
8031 /**
8032  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8033  * @work:	work struct
8034  *
8035  * Description: This pulses warm reset to a slot.
8036  *
8037  **/
8038 static void ipr_reset_reset_work(struct work_struct *work)
8039 {
8040 	struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8041 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8042 	struct pci_dev *pdev = ioa_cfg->pdev;
8043 	unsigned long lock_flags = 0;
8044 
8045 	ENTER;
8046 	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8047 	msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8048 	pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8049 
8050 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8051 	if (ioa_cfg->reset_cmd == ipr_cmd)
8052 		ipr_reset_ioa_job(ipr_cmd);
8053 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8054 	LEAVE;
8055 }
8056 
8057 /**
8058  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8059  * @ipr_cmd:	ipr command struct
8060  *
8061  * Description: This asserts PCI reset to the adapter.
8062  *
8063  * Return value:
8064  * 	IPR_RC_JOB_RETURN
8065  **/
8066 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8067 {
8068 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8069 
8070 	ENTER;
8071 	INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8072 	queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8073 	ipr_cmd->job_step = ipr_reset_slot_reset_done;
8074 	LEAVE;
8075 	return IPR_RC_JOB_RETURN;
8076 }
8077 
8078 /**
8079  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8080  * @ipr_cmd:	ipr command struct
8081  *
8082  * Description: This attempts to block config access to the IOA.
8083  *
8084  * Return value:
8085  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8086  **/
8087 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8088 {
8089 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8090 	int rc = IPR_RC_JOB_CONTINUE;
8091 
8092 	if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8093 		ioa_cfg->cfg_locked = 1;
8094 		ipr_cmd->job_step = ioa_cfg->reset;
8095 	} else {
8096 		if (ipr_cmd->u.time_left) {
8097 			rc = IPR_RC_JOB_RETURN;
8098 			ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8099 			ipr_reset_start_timer(ipr_cmd,
8100 					      IPR_CHECK_FOR_RESET_TIMEOUT);
8101 		} else {
8102 			ipr_cmd->job_step = ioa_cfg->reset;
8103 			dev_err(&ioa_cfg->pdev->dev,
8104 				"Timed out waiting to lock config access. Resetting anyway.\n");
8105 		}
8106 	}
8107 
8108 	return rc;
8109 }
8110 
8111 /**
8112  * ipr_reset_block_config_access - Block config access to the IOA
8113  * @ipr_cmd:	ipr command struct
8114  *
8115  * Description: This attempts to block config access to the IOA
8116  *
8117  * Return value:
8118  * 	IPR_RC_JOB_CONTINUE
8119  **/
8120 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8121 {
8122 	ipr_cmd->ioa_cfg->cfg_locked = 0;
8123 	ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8124 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8125 	return IPR_RC_JOB_CONTINUE;
8126 }
8127 
8128 /**
8129  * ipr_reset_allowed - Query whether or not IOA can be reset
8130  * @ioa_cfg:	ioa config struct
8131  *
8132  * Return value:
8133  * 	0 if reset not allowed / non-zero if reset is allowed
8134  **/
8135 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8136 {
8137 	volatile u32 temp_reg;
8138 
8139 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8140 	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8141 }
8142 
8143 /**
8144  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8145  * @ipr_cmd:	ipr command struct
8146  *
8147  * Description: This function waits for adapter permission to run BIST,
8148  * then runs BIST. If the adapter does not give permission after a
8149  * reasonable time, we will reset the adapter anyway. The impact of
8150  * resetting the adapter without warning the adapter is the risk of
8151  * losing the persistent error log on the adapter. If the adapter is
8152  * reset while it is writing to the flash on the adapter, the flash
8153  * segment will have bad ECC and be zeroed.
8154  *
8155  * Return value:
8156  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8157  **/
8158 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8159 {
8160 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8161 	int rc = IPR_RC_JOB_RETURN;
8162 
8163 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8164 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8165 		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8166 	} else {
8167 		ipr_cmd->job_step = ipr_reset_block_config_access;
8168 		rc = IPR_RC_JOB_CONTINUE;
8169 	}
8170 
8171 	return rc;
8172 }
8173 
8174 /**
8175  * ipr_reset_alert - Alert the adapter of a pending reset
8176  * @ipr_cmd:	ipr command struct
8177  *
8178  * Description: This function alerts the adapter that it will be reset.
8179  * If memory space is not currently enabled, proceed directly
8180  * to running BIST on the adapter. The timer must always be started
8181  * so we guarantee we do not run BIST from ipr_isr.
8182  *
8183  * Return value:
8184  * 	IPR_RC_JOB_RETURN
8185  **/
8186 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8187 {
8188 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8189 	u16 cmd_reg;
8190 	int rc;
8191 
8192 	ENTER;
8193 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8194 
8195 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8196 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8197 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8198 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8199 	} else {
8200 		ipr_cmd->job_step = ipr_reset_block_config_access;
8201 	}
8202 
8203 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8204 	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8205 
8206 	LEAVE;
8207 	return IPR_RC_JOB_RETURN;
8208 }
8209 
8210 /**
8211  * ipr_reset_quiesce_done - Complete IOA disconnect
8212  * @ipr_cmd:	ipr command struct
8213  *
8214  * Description: Freeze the adapter to complete quiesce processing
8215  *
8216  * Return value:
8217  * 	IPR_RC_JOB_CONTINUE
8218  **/
8219 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8220 {
8221 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8222 
8223 	ENTER;
8224 	ipr_cmd->job_step = ipr_ioa_bringdown_done;
8225 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8226 	LEAVE;
8227 	return IPR_RC_JOB_CONTINUE;
8228 }
8229 
8230 /**
8231  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8232  * @ipr_cmd:	ipr command struct
8233  *
8234  * Description: Ensure nothing is outstanding to the IOA and
8235  *			proceed with IOA disconnect. Otherwise reset the IOA.
8236  *
8237  * Return value:
8238  * 	IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8239  **/
8240 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8241 {
8242 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8243 	struct ipr_cmnd *loop_cmd;
8244 	struct ipr_hrr_queue *hrrq;
8245 	int rc = IPR_RC_JOB_CONTINUE;
8246 	int count = 0;
8247 
8248 	ENTER;
8249 	ipr_cmd->job_step = ipr_reset_quiesce_done;
8250 
8251 	for_each_hrrq(hrrq, ioa_cfg) {
8252 		spin_lock(&hrrq->_lock);
8253 		list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8254 			count++;
8255 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8256 			list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8257 			rc = IPR_RC_JOB_RETURN;
8258 			break;
8259 		}
8260 		spin_unlock(&hrrq->_lock);
8261 
8262 		if (count)
8263 			break;
8264 	}
8265 
8266 	LEAVE;
8267 	return rc;
8268 }
8269 
8270 /**
8271  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8272  * @ipr_cmd:	ipr command struct
8273  *
8274  * Description: Cancel any oustanding HCAMs to the IOA.
8275  *
8276  * Return value:
8277  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8278  **/
8279 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8280 {
8281 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8282 	int rc = IPR_RC_JOB_CONTINUE;
8283 	struct ipr_cmd_pkt *cmd_pkt;
8284 	struct ipr_cmnd *hcam_cmd;
8285 	struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8286 
8287 	ENTER;
8288 	ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8289 
8290 	if (!hrrq->ioa_is_dead) {
8291 		if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8292 			list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8293 				if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8294 					continue;
8295 
8296 				ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8297 				ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8298 				cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8299 				cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8300 				cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8301 				cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8302 				cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8303 				cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8304 				cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8305 				cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8306 				cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8307 				cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8308 				cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8309 				cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8310 
8311 				ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8312 					   IPR_CANCEL_TIMEOUT);
8313 
8314 				rc = IPR_RC_JOB_RETURN;
8315 				ipr_cmd->job_step = ipr_reset_cancel_hcam;
8316 				break;
8317 			}
8318 		}
8319 	} else
8320 		ipr_cmd->job_step = ipr_reset_alert;
8321 
8322 	LEAVE;
8323 	return rc;
8324 }
8325 
8326 /**
8327  * ipr_reset_ucode_download_done - Microcode download completion
8328  * @ipr_cmd:	ipr command struct
8329  *
8330  * Description: This function unmaps the microcode download buffer.
8331  *
8332  * Return value:
8333  * 	IPR_RC_JOB_CONTINUE
8334  **/
8335 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8336 {
8337 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8338 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8339 
8340 	dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8341 		     sglist->num_sg, DMA_TO_DEVICE);
8342 
8343 	ipr_cmd->job_step = ipr_reset_alert;
8344 	return IPR_RC_JOB_CONTINUE;
8345 }
8346 
8347 /**
8348  * ipr_reset_ucode_download - Download microcode to the adapter
8349  * @ipr_cmd:	ipr command struct
8350  *
8351  * Description: This function checks to see if it there is microcode
8352  * to download to the adapter. If there is, a download is performed.
8353  *
8354  * Return value:
8355  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8356  **/
8357 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8358 {
8359 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8360 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8361 
8362 	ENTER;
8363 	ipr_cmd->job_step = ipr_reset_alert;
8364 
8365 	if (!sglist)
8366 		return IPR_RC_JOB_CONTINUE;
8367 
8368 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8369 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8370 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8371 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8372 	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8373 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8374 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8375 
8376 	if (ioa_cfg->sis64)
8377 		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8378 	else
8379 		ipr_build_ucode_ioadl(ipr_cmd, sglist);
8380 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
8381 
8382 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8383 		   IPR_WRITE_BUFFER_TIMEOUT);
8384 
8385 	LEAVE;
8386 	return IPR_RC_JOB_RETURN;
8387 }
8388 
8389 /**
8390  * ipr_reset_shutdown_ioa - Shutdown the adapter
8391  * @ipr_cmd:	ipr command struct
8392  *
8393  * Description: This function issues an adapter shutdown of the
8394  * specified type to the specified adapter as part of the
8395  * adapter reset job.
8396  *
8397  * Return value:
8398  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8399  **/
8400 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8401 {
8402 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8403 	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8404 	unsigned long timeout;
8405 	int rc = IPR_RC_JOB_CONTINUE;
8406 
8407 	ENTER;
8408 	if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8409 		ipr_cmd->job_step = ipr_reset_cancel_hcam;
8410 	else if (shutdown_type != IPR_SHUTDOWN_NONE &&
8411 			!ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8412 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8413 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8414 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8415 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8416 
8417 		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8418 			timeout = IPR_SHUTDOWN_TIMEOUT;
8419 		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8420 			timeout = IPR_INTERNAL_TIMEOUT;
8421 		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8422 			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8423 		else
8424 			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8425 
8426 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8427 
8428 		rc = IPR_RC_JOB_RETURN;
8429 		ipr_cmd->job_step = ipr_reset_ucode_download;
8430 	} else
8431 		ipr_cmd->job_step = ipr_reset_alert;
8432 
8433 	LEAVE;
8434 	return rc;
8435 }
8436 
8437 /**
8438  * ipr_reset_ioa_job - Adapter reset job
8439  * @ipr_cmd:	ipr command struct
8440  *
8441  * Description: This function is the job router for the adapter reset job.
8442  *
8443  * Return value:
8444  * 	none
8445  **/
8446 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8447 {
8448 	u32 rc, ioasc;
8449 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8450 
8451 	do {
8452 		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8453 
8454 		if (ioa_cfg->reset_cmd != ipr_cmd) {
8455 			/*
8456 			 * We are doing nested adapter resets and this is
8457 			 * not the current reset job.
8458 			 */
8459 			list_add_tail(&ipr_cmd->queue,
8460 					&ipr_cmd->hrrq->hrrq_free_q);
8461 			return;
8462 		}
8463 
8464 		if (IPR_IOASC_SENSE_KEY(ioasc)) {
8465 			rc = ipr_cmd->job_step_failed(ipr_cmd);
8466 			if (rc == IPR_RC_JOB_RETURN)
8467 				return;
8468 		}
8469 
8470 		ipr_reinit_ipr_cmnd(ipr_cmd);
8471 		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8472 		rc = ipr_cmd->job_step(ipr_cmd);
8473 	} while (rc == IPR_RC_JOB_CONTINUE);
8474 }
8475 
8476 /**
8477  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8478  * @ioa_cfg:		ioa config struct
8479  * @job_step:		first job step of reset job
8480  * @shutdown_type:	shutdown type
8481  *
8482  * Description: This function will initiate the reset of the given adapter
8483  * starting at the selected job step.
8484  * If the caller needs to wait on the completion of the reset,
8485  * the caller must sleep on the reset_wait_q.
8486  *
8487  * Return value:
8488  * 	none
8489  **/
8490 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8491 				    int (*job_step) (struct ipr_cmnd *),
8492 				    enum ipr_shutdown_type shutdown_type)
8493 {
8494 	struct ipr_cmnd *ipr_cmd;
8495 	int i;
8496 
8497 	ioa_cfg->in_reset_reload = 1;
8498 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8499 		spin_lock(&ioa_cfg->hrrq[i]._lock);
8500 		ioa_cfg->hrrq[i].allow_cmds = 0;
8501 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
8502 	}
8503 	wmb();
8504 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8505 		ioa_cfg->scsi_unblock = 0;
8506 		ioa_cfg->scsi_blocked = 1;
8507 		scsi_block_requests(ioa_cfg->host);
8508 	}
8509 
8510 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8511 	ioa_cfg->reset_cmd = ipr_cmd;
8512 	ipr_cmd->job_step = job_step;
8513 	ipr_cmd->u.shutdown_type = shutdown_type;
8514 
8515 	ipr_reset_ioa_job(ipr_cmd);
8516 }
8517 
8518 /**
8519  * ipr_initiate_ioa_reset - Initiate an adapter reset
8520  * @ioa_cfg:		ioa config struct
8521  * @shutdown_type:	shutdown type
8522  *
8523  * Description: This function will initiate the reset of the given adapter.
8524  * If the caller needs to wait on the completion of the reset,
8525  * the caller must sleep on the reset_wait_q.
8526  *
8527  * Return value:
8528  * 	none
8529  **/
8530 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8531 				   enum ipr_shutdown_type shutdown_type)
8532 {
8533 	int i;
8534 
8535 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8536 		return;
8537 
8538 	if (ioa_cfg->in_reset_reload) {
8539 		if (ioa_cfg->sdt_state == GET_DUMP)
8540 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8541 		else if (ioa_cfg->sdt_state == READ_DUMP)
8542 			ioa_cfg->sdt_state = ABORT_DUMP;
8543 	}
8544 
8545 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8546 		dev_err(&ioa_cfg->pdev->dev,
8547 			"IOA taken offline - error recovery failed\n");
8548 
8549 		ioa_cfg->reset_retries = 0;
8550 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8551 			spin_lock(&ioa_cfg->hrrq[i]._lock);
8552 			ioa_cfg->hrrq[i].ioa_is_dead = 1;
8553 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
8554 		}
8555 		wmb();
8556 
8557 		if (ioa_cfg->in_ioa_bringdown) {
8558 			ioa_cfg->reset_cmd = NULL;
8559 			ioa_cfg->in_reset_reload = 0;
8560 			ipr_fail_all_ops(ioa_cfg);
8561 			wake_up_all(&ioa_cfg->reset_wait_q);
8562 
8563 			if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8564 				ioa_cfg->scsi_unblock = 1;
8565 				schedule_work(&ioa_cfg->work_q);
8566 			}
8567 			return;
8568 		} else {
8569 			ioa_cfg->in_ioa_bringdown = 1;
8570 			shutdown_type = IPR_SHUTDOWN_NONE;
8571 		}
8572 	}
8573 
8574 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8575 				shutdown_type);
8576 }
8577 
8578 /**
8579  * ipr_reset_freeze - Hold off all I/O activity
8580  * @ipr_cmd:	ipr command struct
8581  *
8582  * Description: If the PCI slot is frozen, hold off all I/O
8583  * activity; then, as soon as the slot is available again,
8584  * initiate an adapter reset.
8585  */
8586 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8587 {
8588 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8589 	int i;
8590 
8591 	/* Disallow new interrupts, avoid loop */
8592 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8593 		spin_lock(&ioa_cfg->hrrq[i]._lock);
8594 		ioa_cfg->hrrq[i].allow_interrupts = 0;
8595 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
8596 	}
8597 	wmb();
8598 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8599 	ipr_cmd->done = ipr_reset_ioa_job;
8600 	return IPR_RC_JOB_RETURN;
8601 }
8602 
8603 /**
8604  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8605  * @pdev:	PCI device struct
8606  *
8607  * Description: This routine is called to tell us that the MMIO
8608  * access to the IOA has been restored
8609  */
8610 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8611 {
8612 	unsigned long flags = 0;
8613 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8614 
8615 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8616 	if (!ioa_cfg->probe_done)
8617 		pci_save_state(pdev);
8618 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8619 	return PCI_ERS_RESULT_NEED_RESET;
8620 }
8621 
8622 /**
8623  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8624  * @pdev:	PCI device struct
8625  *
8626  * Description: This routine is called to tell us that the PCI bus
8627  * is down. Can't do anything here, except put the device driver
8628  * into a holding pattern, waiting for the PCI bus to come back.
8629  */
8630 static void ipr_pci_frozen(struct pci_dev *pdev)
8631 {
8632 	unsigned long flags = 0;
8633 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8634 
8635 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8636 	if (ioa_cfg->probe_done)
8637 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8638 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8639 }
8640 
8641 /**
8642  * ipr_pci_slot_reset - Called when PCI slot has been reset.
8643  * @pdev:	PCI device struct
8644  *
8645  * Description: This routine is called by the pci error recovery
8646  * code after the PCI slot has been reset, just before we
8647  * should resume normal operations.
8648  */
8649 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8650 {
8651 	unsigned long flags = 0;
8652 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8653 
8654 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8655 	if (ioa_cfg->probe_done) {
8656 		if (ioa_cfg->needs_warm_reset)
8657 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8658 		else
8659 			_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8660 						IPR_SHUTDOWN_NONE);
8661 	} else
8662 		wake_up_all(&ioa_cfg->eeh_wait_q);
8663 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8664 	return PCI_ERS_RESULT_RECOVERED;
8665 }
8666 
8667 /**
8668  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8669  * @pdev:	PCI device struct
8670  *
8671  * Description: This routine is called when the PCI bus has
8672  * permanently failed.
8673  */
8674 static void ipr_pci_perm_failure(struct pci_dev *pdev)
8675 {
8676 	unsigned long flags = 0;
8677 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8678 	int i;
8679 
8680 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8681 	if (ioa_cfg->probe_done) {
8682 		if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8683 			ioa_cfg->sdt_state = ABORT_DUMP;
8684 		ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8685 		ioa_cfg->in_ioa_bringdown = 1;
8686 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8687 			spin_lock(&ioa_cfg->hrrq[i]._lock);
8688 			ioa_cfg->hrrq[i].allow_cmds = 0;
8689 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
8690 		}
8691 		wmb();
8692 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8693 	} else
8694 		wake_up_all(&ioa_cfg->eeh_wait_q);
8695 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8696 }
8697 
8698 /**
8699  * ipr_pci_error_detected - Called when a PCI error is detected.
8700  * @pdev:	PCI device struct
8701  * @state:	PCI channel state
8702  *
8703  * Description: Called when a PCI error is detected.
8704  *
8705  * Return value:
8706  * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8707  */
8708 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8709 					       pci_channel_state_t state)
8710 {
8711 	switch (state) {
8712 	case pci_channel_io_frozen:
8713 		ipr_pci_frozen(pdev);
8714 		return PCI_ERS_RESULT_CAN_RECOVER;
8715 	case pci_channel_io_perm_failure:
8716 		ipr_pci_perm_failure(pdev);
8717 		return PCI_ERS_RESULT_DISCONNECT;
8718 	default:
8719 		break;
8720 	}
8721 	return PCI_ERS_RESULT_NEED_RESET;
8722 }
8723 
8724 /**
8725  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8726  * @ioa_cfg:	ioa cfg struct
8727  *
8728  * Description: This is the second phase of adapter initialization
8729  * This function takes care of initilizing the adapter to the point
8730  * where it can accept new commands.
8731  * Return value:
8732  *     none
8733  **/
8734 static void ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8735 {
8736 	unsigned long host_lock_flags = 0;
8737 
8738 	ENTER;
8739 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8740 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8741 	ioa_cfg->probe_done = 1;
8742 	if (ioa_cfg->needs_hard_reset) {
8743 		ioa_cfg->needs_hard_reset = 0;
8744 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8745 	} else
8746 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8747 					IPR_SHUTDOWN_NONE);
8748 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8749 
8750 	LEAVE;
8751 }
8752 
8753 /**
8754  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8755  * @ioa_cfg:	ioa config struct
8756  *
8757  * Return value:
8758  * 	none
8759  **/
8760 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8761 {
8762 	int i;
8763 
8764 	if (ioa_cfg->ipr_cmnd_list) {
8765 		for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8766 			if (ioa_cfg->ipr_cmnd_list[i])
8767 				dma_pool_free(ioa_cfg->ipr_cmd_pool,
8768 					      ioa_cfg->ipr_cmnd_list[i],
8769 					      ioa_cfg->ipr_cmnd_list_dma[i]);
8770 
8771 			ioa_cfg->ipr_cmnd_list[i] = NULL;
8772 		}
8773 	}
8774 
8775 	dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
8776 
8777 	kfree(ioa_cfg->ipr_cmnd_list);
8778 	kfree(ioa_cfg->ipr_cmnd_list_dma);
8779 	ioa_cfg->ipr_cmnd_list = NULL;
8780 	ioa_cfg->ipr_cmnd_list_dma = NULL;
8781 	ioa_cfg->ipr_cmd_pool = NULL;
8782 }
8783 
8784 /**
8785  * ipr_free_mem - Frees memory allocated for an adapter
8786  * @ioa_cfg:	ioa cfg struct
8787  *
8788  * Return value:
8789  * 	nothing
8790  **/
8791 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8792 {
8793 	int i;
8794 
8795 	kfree(ioa_cfg->res_entries);
8796 	dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8797 			  ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8798 	ipr_free_cmd_blks(ioa_cfg);
8799 
8800 	for (i = 0; i < ioa_cfg->hrrq_num; i++)
8801 		dma_free_coherent(&ioa_cfg->pdev->dev,
8802 				  sizeof(u32) * ioa_cfg->hrrq[i].size,
8803 				  ioa_cfg->hrrq[i].host_rrq,
8804 				  ioa_cfg->hrrq[i].host_rrq_dma);
8805 
8806 	dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8807 			  ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
8808 
8809 	for (i = 0; i < IPR_MAX_HCAMS; i++) {
8810 		dma_free_coherent(&ioa_cfg->pdev->dev,
8811 				  sizeof(struct ipr_hostrcb),
8812 				  ioa_cfg->hostrcb[i],
8813 				  ioa_cfg->hostrcb_dma[i]);
8814 	}
8815 
8816 	ipr_free_dump(ioa_cfg);
8817 	kfree(ioa_cfg->trace);
8818 }
8819 
8820 /**
8821  * ipr_free_irqs - Free all allocated IRQs for the adapter.
8822  * @ioa_cfg:	ipr cfg struct
8823  *
8824  * This function frees all allocated IRQs for the
8825  * specified adapter.
8826  *
8827  * Return value:
8828  * 	none
8829  **/
8830 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
8831 {
8832 	struct pci_dev *pdev = ioa_cfg->pdev;
8833 	int i;
8834 
8835 	for (i = 0; i < ioa_cfg->nvectors; i++)
8836 		free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
8837 	pci_free_irq_vectors(pdev);
8838 }
8839 
8840 /**
8841  * ipr_free_all_resources - Free all allocated resources for an adapter.
8842  * @ioa_cfg:	ioa config struct
8843  *
8844  * This function frees all allocated resources for the
8845  * specified adapter.
8846  *
8847  * Return value:
8848  * 	none
8849  **/
8850 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8851 {
8852 	struct pci_dev *pdev = ioa_cfg->pdev;
8853 
8854 	ENTER;
8855 	ipr_free_irqs(ioa_cfg);
8856 	if (ioa_cfg->reset_work_q)
8857 		destroy_workqueue(ioa_cfg->reset_work_q);
8858 	iounmap(ioa_cfg->hdw_dma_regs);
8859 	pci_release_regions(pdev);
8860 	ipr_free_mem(ioa_cfg);
8861 	scsi_host_put(ioa_cfg->host);
8862 	pci_disable_device(pdev);
8863 	LEAVE;
8864 }
8865 
8866 /**
8867  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8868  * @ioa_cfg:	ioa config struct
8869  *
8870  * Return value:
8871  * 	0 on success / -ENOMEM on allocation failure
8872  **/
8873 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8874 {
8875 	struct ipr_cmnd *ipr_cmd;
8876 	struct ipr_ioarcb *ioarcb;
8877 	dma_addr_t dma_addr;
8878 	int i, entries_each_hrrq, hrrq_id = 0;
8879 
8880 	ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
8881 						sizeof(struct ipr_cmnd), 512, 0);
8882 
8883 	if (!ioa_cfg->ipr_cmd_pool)
8884 		return -ENOMEM;
8885 
8886 	ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8887 	ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8888 
8889 	if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8890 		ipr_free_cmd_blks(ioa_cfg);
8891 		return -ENOMEM;
8892 	}
8893 
8894 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8895 		if (ioa_cfg->hrrq_num > 1) {
8896 			if (i == 0) {
8897 				entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8898 				ioa_cfg->hrrq[i].min_cmd_id = 0;
8899 				ioa_cfg->hrrq[i].max_cmd_id =
8900 					(entries_each_hrrq - 1);
8901 			} else {
8902 				entries_each_hrrq =
8903 					IPR_NUM_BASE_CMD_BLKS/
8904 					(ioa_cfg->hrrq_num - 1);
8905 				ioa_cfg->hrrq[i].min_cmd_id =
8906 					IPR_NUM_INTERNAL_CMD_BLKS +
8907 					(i - 1) * entries_each_hrrq;
8908 				ioa_cfg->hrrq[i].max_cmd_id =
8909 					(IPR_NUM_INTERNAL_CMD_BLKS +
8910 					i * entries_each_hrrq - 1);
8911 			}
8912 		} else {
8913 			entries_each_hrrq = IPR_NUM_CMD_BLKS;
8914 			ioa_cfg->hrrq[i].min_cmd_id = 0;
8915 			ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8916 		}
8917 		ioa_cfg->hrrq[i].size = entries_each_hrrq;
8918 	}
8919 
8920 	BUG_ON(ioa_cfg->hrrq_num == 0);
8921 
8922 	i = IPR_NUM_CMD_BLKS -
8923 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8924 	if (i > 0) {
8925 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8926 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8927 	}
8928 
8929 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8930 		ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
8931 				GFP_KERNEL, &dma_addr);
8932 
8933 		if (!ipr_cmd) {
8934 			ipr_free_cmd_blks(ioa_cfg);
8935 			return -ENOMEM;
8936 		}
8937 
8938 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8939 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8940 
8941 		ioarcb = &ipr_cmd->ioarcb;
8942 		ipr_cmd->dma_addr = dma_addr;
8943 		if (ioa_cfg->sis64)
8944 			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8945 		else
8946 			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8947 
8948 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
8949 		if (ioa_cfg->sis64) {
8950 			ioarcb->u.sis64_addr_data.data_ioadl_addr =
8951 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8952 			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8953 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8954 		} else {
8955 			ioarcb->write_ioadl_addr =
8956 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8957 			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8958 			ioarcb->ioasa_host_pci_addr =
8959 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8960 		}
8961 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8962 		ipr_cmd->cmd_index = i;
8963 		ipr_cmd->ioa_cfg = ioa_cfg;
8964 		ipr_cmd->sense_buffer_dma = dma_addr +
8965 			offsetof(struct ipr_cmnd, sense_buffer);
8966 
8967 		ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
8968 		ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8969 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8970 		if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8971 			hrrq_id++;
8972 	}
8973 
8974 	return 0;
8975 }
8976 
8977 /**
8978  * ipr_alloc_mem - Allocate memory for an adapter
8979  * @ioa_cfg:	ioa config struct
8980  *
8981  * Return value:
8982  * 	0 on success / non-zero for error
8983  **/
8984 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8985 {
8986 	struct pci_dev *pdev = ioa_cfg->pdev;
8987 	int i, rc = -ENOMEM;
8988 
8989 	ENTER;
8990 	ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
8991 				       sizeof(struct ipr_resource_entry),
8992 				       GFP_KERNEL);
8993 
8994 	if (!ioa_cfg->res_entries)
8995 		goto out;
8996 
8997 	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8998 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8999 		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9000 	}
9001 
9002 	ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9003 					      sizeof(struct ipr_misc_cbs),
9004 					      &ioa_cfg->vpd_cbs_dma,
9005 					      GFP_KERNEL);
9006 
9007 	if (!ioa_cfg->vpd_cbs)
9008 		goto out_free_res_entries;
9009 
9010 	if (ipr_alloc_cmd_blks(ioa_cfg))
9011 		goto out_free_vpd_cbs;
9012 
9013 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9014 		ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9015 					sizeof(u32) * ioa_cfg->hrrq[i].size,
9016 					&ioa_cfg->hrrq[i].host_rrq_dma,
9017 					GFP_KERNEL);
9018 
9019 		if (!ioa_cfg->hrrq[i].host_rrq)  {
9020 			while (--i >= 0)
9021 				dma_free_coherent(&pdev->dev,
9022 					sizeof(u32) * ioa_cfg->hrrq[i].size,
9023 					ioa_cfg->hrrq[i].host_rrq,
9024 					ioa_cfg->hrrq[i].host_rrq_dma);
9025 			goto out_ipr_free_cmd_blocks;
9026 		}
9027 		ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9028 	}
9029 
9030 	ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9031 						  ioa_cfg->cfg_table_size,
9032 						  &ioa_cfg->cfg_table_dma,
9033 						  GFP_KERNEL);
9034 
9035 	if (!ioa_cfg->u.cfg_table)
9036 		goto out_free_host_rrq;
9037 
9038 	for (i = 0; i < IPR_MAX_HCAMS; i++) {
9039 		ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9040 							 sizeof(struct ipr_hostrcb),
9041 							 &ioa_cfg->hostrcb_dma[i],
9042 							 GFP_KERNEL);
9043 
9044 		if (!ioa_cfg->hostrcb[i])
9045 			goto out_free_hostrcb_dma;
9046 
9047 		ioa_cfg->hostrcb[i]->hostrcb_dma =
9048 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9049 		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9050 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9051 	}
9052 
9053 	ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9054 				 sizeof(struct ipr_trace_entry),
9055 				 GFP_KERNEL);
9056 
9057 	if (!ioa_cfg->trace)
9058 		goto out_free_hostrcb_dma;
9059 
9060 	rc = 0;
9061 out:
9062 	LEAVE;
9063 	return rc;
9064 
9065 out_free_hostrcb_dma:
9066 	while (i-- > 0) {
9067 		dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9068 				  ioa_cfg->hostrcb[i],
9069 				  ioa_cfg->hostrcb_dma[i]);
9070 	}
9071 	dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9072 			  ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9073 out_free_host_rrq:
9074 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9075 		dma_free_coherent(&pdev->dev,
9076 				  sizeof(u32) * ioa_cfg->hrrq[i].size,
9077 				  ioa_cfg->hrrq[i].host_rrq,
9078 				  ioa_cfg->hrrq[i].host_rrq_dma);
9079 	}
9080 out_ipr_free_cmd_blocks:
9081 	ipr_free_cmd_blks(ioa_cfg);
9082 out_free_vpd_cbs:
9083 	dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9084 			  ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9085 out_free_res_entries:
9086 	kfree(ioa_cfg->res_entries);
9087 	goto out;
9088 }
9089 
9090 /**
9091  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9092  * @ioa_cfg:	ioa config struct
9093  *
9094  * Return value:
9095  * 	none
9096  **/
9097 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9098 {
9099 	int i;
9100 
9101 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9102 		ioa_cfg->bus_attr[i].bus = i;
9103 		ioa_cfg->bus_attr[i].qas_enabled = 0;
9104 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9105 		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9106 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9107 		else
9108 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9109 	}
9110 }
9111 
9112 /**
9113  * ipr_init_regs - Initialize IOA registers
9114  * @ioa_cfg:	ioa config struct
9115  *
9116  * Return value:
9117  *	none
9118  **/
9119 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9120 {
9121 	const struct ipr_interrupt_offsets *p;
9122 	struct ipr_interrupts *t;
9123 	void __iomem *base;
9124 
9125 	p = &ioa_cfg->chip_cfg->regs;
9126 	t = &ioa_cfg->regs;
9127 	base = ioa_cfg->hdw_dma_regs;
9128 
9129 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9130 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9131 	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9132 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9133 	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9134 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9135 	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9136 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9137 	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9138 	t->ioarrin_reg = base + p->ioarrin_reg;
9139 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9140 	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9141 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9142 	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9143 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9144 	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9145 
9146 	if (ioa_cfg->sis64) {
9147 		t->init_feedback_reg = base + p->init_feedback_reg;
9148 		t->dump_addr_reg = base + p->dump_addr_reg;
9149 		t->dump_data_reg = base + p->dump_data_reg;
9150 		t->endian_swap_reg = base + p->endian_swap_reg;
9151 	}
9152 }
9153 
9154 /**
9155  * ipr_init_ioa_cfg - Initialize IOA config struct
9156  * @ioa_cfg:	ioa config struct
9157  * @host:		scsi host struct
9158  * @pdev:		PCI dev struct
9159  *
9160  * Return value:
9161  * 	none
9162  **/
9163 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9164 			     struct Scsi_Host *host, struct pci_dev *pdev)
9165 {
9166 	int i;
9167 
9168 	ioa_cfg->host = host;
9169 	ioa_cfg->pdev = pdev;
9170 	ioa_cfg->log_level = ipr_log_level;
9171 	ioa_cfg->doorbell = IPR_DOORBELL;
9172 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9173 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9174 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9175 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9176 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9177 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9178 
9179 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9180 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9181 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9182 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9183 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9184 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9185 	INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9186 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
9187 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
9188 	init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9189 	ioa_cfg->sdt_state = INACTIVE;
9190 
9191 	ipr_initialize_bus_attr(ioa_cfg);
9192 	ioa_cfg->max_devs_supported = ipr_max_devs;
9193 
9194 	if (ioa_cfg->sis64) {
9195 		host->max_channel = IPR_MAX_SIS64_BUSES;
9196 		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9197 		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9198 		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9199 			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9200 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9201 					   + ((sizeof(struct ipr_config_table_entry64)
9202 					       * ioa_cfg->max_devs_supported)));
9203 	} else {
9204 		host->max_channel = IPR_VSET_BUS;
9205 		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9206 		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9207 		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9208 			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9209 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9210 					   + ((sizeof(struct ipr_config_table_entry)
9211 					       * ioa_cfg->max_devs_supported)));
9212 	}
9213 
9214 	host->unique_id = host->host_no;
9215 	host->max_cmd_len = IPR_MAX_CDB_LEN;
9216 	host->can_queue = ioa_cfg->max_cmds;
9217 	pci_set_drvdata(pdev, ioa_cfg);
9218 
9219 	for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9220 		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9221 		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9222 		spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9223 		if (i == 0)
9224 			ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9225 		else
9226 			ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9227 	}
9228 }
9229 
9230 /**
9231  * ipr_get_chip_info - Find adapter chip information
9232  * @dev_id:		PCI device id struct
9233  *
9234  * Return value:
9235  * 	ptr to chip information on success / NULL on failure
9236  **/
9237 static const struct ipr_chip_t *
9238 ipr_get_chip_info(const struct pci_device_id *dev_id)
9239 {
9240 	int i;
9241 
9242 	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9243 		if (ipr_chip[i].vendor == dev_id->vendor &&
9244 		    ipr_chip[i].device == dev_id->device)
9245 			return &ipr_chip[i];
9246 	return NULL;
9247 }
9248 
9249 /**
9250  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9251  *						during probe time
9252  * @ioa_cfg:	ioa config struct
9253  *
9254  * Return value:
9255  * 	None
9256  **/
9257 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9258 {
9259 	struct pci_dev *pdev = ioa_cfg->pdev;
9260 
9261 	if (pci_channel_offline(pdev)) {
9262 		wait_event_timeout(ioa_cfg->eeh_wait_q,
9263 				   !pci_channel_offline(pdev),
9264 				   IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9265 		pci_restore_state(pdev);
9266 	}
9267 }
9268 
9269 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9270 {
9271 	int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9272 
9273 	for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9274 		snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9275 			 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9276 		ioa_cfg->vectors_info[vec_idx].
9277 			desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9278 	}
9279 }
9280 
9281 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
9282 		struct pci_dev *pdev)
9283 {
9284 	int i, rc;
9285 
9286 	for (i = 1; i < ioa_cfg->nvectors; i++) {
9287 		rc = request_irq(pci_irq_vector(pdev, i),
9288 			ipr_isr_mhrrq,
9289 			0,
9290 			ioa_cfg->vectors_info[i].desc,
9291 			&ioa_cfg->hrrq[i]);
9292 		if (rc) {
9293 			while (--i > 0)
9294 				free_irq(pci_irq_vector(pdev, i),
9295 					&ioa_cfg->hrrq[i]);
9296 			return rc;
9297 		}
9298 	}
9299 	return 0;
9300 }
9301 
9302 /**
9303  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9304  * @devp:		PCI device struct
9305  * @irq:		IRQ number
9306  *
9307  * Description: Simply set the msi_received flag to 1 indicating that
9308  * Message Signaled Interrupts are supported.
9309  *
9310  * Return value:
9311  * 	0 on success / non-zero on failure
9312  **/
9313 static irqreturn_t ipr_test_intr(int irq, void *devp)
9314 {
9315 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9316 	unsigned long lock_flags = 0;
9317 
9318 	dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9319 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9320 
9321 	ioa_cfg->msi_received = 1;
9322 	wake_up(&ioa_cfg->msi_wait_q);
9323 
9324 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9325 	return IRQ_HANDLED;
9326 }
9327 
9328 /**
9329  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9330  * @ioa_cfg:		ioa config struct
9331  * @pdev:		PCI device struct
9332  *
9333  * Description: This routine sets up and initiates a test interrupt to determine
9334  * if the interrupt is received via the ipr_test_intr() service routine.
9335  * If the tests fails, the driver will fall back to LSI.
9336  *
9337  * Return value:
9338  * 	0 on success / non-zero on failure
9339  **/
9340 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9341 {
9342 	int rc;
9343 	unsigned long lock_flags = 0;
9344 	int irq = pci_irq_vector(pdev, 0);
9345 
9346 	ENTER;
9347 
9348 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9349 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
9350 	ioa_cfg->msi_received = 0;
9351 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9352 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9353 	readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9354 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9355 
9356 	rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9357 	if (rc) {
9358 		dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
9359 		return rc;
9360 	} else if (ipr_debug)
9361 		dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
9362 
9363 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9364 	readl(ioa_cfg->regs.sense_interrupt_reg);
9365 	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9366 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9367 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9368 
9369 	if (!ioa_cfg->msi_received) {
9370 		/* MSI test failed */
9371 		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9372 		rc = -EOPNOTSUPP;
9373 	} else if (ipr_debug)
9374 		dev_info(&pdev->dev, "MSI test succeeded.\n");
9375 
9376 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9377 
9378 	free_irq(irq, ioa_cfg);
9379 
9380 	LEAVE;
9381 
9382 	return rc;
9383 }
9384 
9385  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9386  * @pdev:		PCI device struct
9387  * @dev_id:		PCI device id struct
9388  *
9389  * Return value:
9390  * 	0 on success / non-zero on failure
9391  **/
9392 static int ipr_probe_ioa(struct pci_dev *pdev,
9393 			 const struct pci_device_id *dev_id)
9394 {
9395 	struct ipr_ioa_cfg *ioa_cfg;
9396 	struct Scsi_Host *host;
9397 	unsigned long ipr_regs_pci;
9398 	void __iomem *ipr_regs;
9399 	int rc = PCIBIOS_SUCCESSFUL;
9400 	volatile u32 mask, uproc, interrupts;
9401 	unsigned long lock_flags, driver_lock_flags;
9402 	unsigned int irq_flag;
9403 
9404 	ENTER;
9405 
9406 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9407 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9408 
9409 	if (!host) {
9410 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9411 		rc = -ENOMEM;
9412 		goto out;
9413 	}
9414 
9415 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9416 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9417 
9418 	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9419 
9420 	if (!ioa_cfg->ipr_chip) {
9421 		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9422 			dev_id->vendor, dev_id->device);
9423 		goto out_scsi_host_put;
9424 	}
9425 
9426 	/* set SIS 32 or SIS 64 */
9427 	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9428 	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9429 	ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9430 	ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9431 
9432 	if (ipr_transop_timeout)
9433 		ioa_cfg->transop_timeout = ipr_transop_timeout;
9434 	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9435 		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9436 	else
9437 		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9438 
9439 	ioa_cfg->revid = pdev->revision;
9440 
9441 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9442 
9443 	ipr_regs_pci = pci_resource_start(pdev, 0);
9444 
9445 	rc = pci_request_regions(pdev, IPR_NAME);
9446 	if (rc < 0) {
9447 		dev_err(&pdev->dev,
9448 			"Couldn't register memory range of registers\n");
9449 		goto out_scsi_host_put;
9450 	}
9451 
9452 	rc = pci_enable_device(pdev);
9453 
9454 	if (rc || pci_channel_offline(pdev)) {
9455 		if (pci_channel_offline(pdev)) {
9456 			ipr_wait_for_pci_err_recovery(ioa_cfg);
9457 			rc = pci_enable_device(pdev);
9458 		}
9459 
9460 		if (rc) {
9461 			dev_err(&pdev->dev, "Cannot enable adapter\n");
9462 			ipr_wait_for_pci_err_recovery(ioa_cfg);
9463 			goto out_release_regions;
9464 		}
9465 	}
9466 
9467 	ipr_regs = pci_ioremap_bar(pdev, 0);
9468 
9469 	if (!ipr_regs) {
9470 		dev_err(&pdev->dev,
9471 			"Couldn't map memory range of registers\n");
9472 		rc = -ENOMEM;
9473 		goto out_disable;
9474 	}
9475 
9476 	ioa_cfg->hdw_dma_regs = ipr_regs;
9477 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9478 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9479 
9480 	ipr_init_regs(ioa_cfg);
9481 
9482 	if (ioa_cfg->sis64) {
9483 		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9484 		if (rc < 0) {
9485 			dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9486 			rc = dma_set_mask_and_coherent(&pdev->dev,
9487 						       DMA_BIT_MASK(32));
9488 		}
9489 	} else
9490 		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9491 
9492 	if (rc < 0) {
9493 		dev_err(&pdev->dev, "Failed to set DMA mask\n");
9494 		goto cleanup_nomem;
9495 	}
9496 
9497 	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9498 				   ioa_cfg->chip_cfg->cache_line_size);
9499 
9500 	if (rc != PCIBIOS_SUCCESSFUL) {
9501 		dev_err(&pdev->dev, "Write of cache line size failed\n");
9502 		ipr_wait_for_pci_err_recovery(ioa_cfg);
9503 		rc = -EIO;
9504 		goto cleanup_nomem;
9505 	}
9506 
9507 	/* Issue MMIO read to ensure card is not in EEH */
9508 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9509 	ipr_wait_for_pci_err_recovery(ioa_cfg);
9510 
9511 	if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9512 		dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9513 			IPR_MAX_MSIX_VECTORS);
9514 		ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9515 	}
9516 
9517 	irq_flag = PCI_IRQ_LEGACY;
9518 	if (ioa_cfg->ipr_chip->has_msi)
9519 		irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
9520 	rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
9521 	if (rc < 0) {
9522 		ipr_wait_for_pci_err_recovery(ioa_cfg);
9523 		goto cleanup_nomem;
9524 	}
9525 	ioa_cfg->nvectors = rc;
9526 
9527 	if (!pdev->msi_enabled && !pdev->msix_enabled)
9528 		ioa_cfg->clear_isr = 1;
9529 
9530 	pci_set_master(pdev);
9531 
9532 	if (pci_channel_offline(pdev)) {
9533 		ipr_wait_for_pci_err_recovery(ioa_cfg);
9534 		pci_set_master(pdev);
9535 		if (pci_channel_offline(pdev)) {
9536 			rc = -EIO;
9537 			goto out_msi_disable;
9538 		}
9539 	}
9540 
9541 	if (pdev->msi_enabled || pdev->msix_enabled) {
9542 		rc = ipr_test_msi(ioa_cfg, pdev);
9543 		switch (rc) {
9544 		case 0:
9545 			dev_info(&pdev->dev,
9546 				"Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
9547 				pdev->msix_enabled ? "-X" : "");
9548 			break;
9549 		case -EOPNOTSUPP:
9550 			ipr_wait_for_pci_err_recovery(ioa_cfg);
9551 			pci_free_irq_vectors(pdev);
9552 
9553 			ioa_cfg->nvectors = 1;
9554 			ioa_cfg->clear_isr = 1;
9555 			break;
9556 		default:
9557 			goto out_msi_disable;
9558 		}
9559 	}
9560 
9561 	ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9562 				(unsigned int)num_online_cpus(),
9563 				(unsigned int)IPR_MAX_HRRQ_NUM);
9564 
9565 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9566 		goto out_msi_disable;
9567 
9568 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9569 		goto out_msi_disable;
9570 
9571 	rc = ipr_alloc_mem(ioa_cfg);
9572 	if (rc < 0) {
9573 		dev_err(&pdev->dev,
9574 			"Couldn't allocate enough memory for device driver!\n");
9575 		goto out_msi_disable;
9576 	}
9577 
9578 	/* Save away PCI config space for use following IOA reset */
9579 	rc = pci_save_state(pdev);
9580 
9581 	if (rc != PCIBIOS_SUCCESSFUL) {
9582 		dev_err(&pdev->dev, "Failed to save PCI config space\n");
9583 		rc = -EIO;
9584 		goto cleanup_nolog;
9585 	}
9586 
9587 	/*
9588 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
9589 	 * the card is in an unknown state and needs a hard reset
9590 	 */
9591 	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9592 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9593 	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9594 	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9595 		ioa_cfg->needs_hard_reset = 1;
9596 	if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9597 		ioa_cfg->needs_hard_reset = 1;
9598 	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9599 		ioa_cfg->ioa_unit_checked = 1;
9600 
9601 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9602 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9603 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9604 
9605 	if (pdev->msi_enabled || pdev->msix_enabled) {
9606 		name_msi_vectors(ioa_cfg);
9607 		rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
9608 			ioa_cfg->vectors_info[0].desc,
9609 			&ioa_cfg->hrrq[0]);
9610 		if (!rc)
9611 			rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
9612 	} else {
9613 		rc = request_irq(pdev->irq, ipr_isr,
9614 			 IRQF_SHARED,
9615 			 IPR_NAME, &ioa_cfg->hrrq[0]);
9616 	}
9617 	if (rc) {
9618 		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9619 			pdev->irq, rc);
9620 		goto cleanup_nolog;
9621 	}
9622 
9623 	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9624 	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9625 		ioa_cfg->needs_warm_reset = 1;
9626 		ioa_cfg->reset = ipr_reset_slot_reset;
9627 
9628 		ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
9629 								WQ_MEM_RECLAIM, host->host_no);
9630 
9631 		if (!ioa_cfg->reset_work_q) {
9632 			dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
9633 			rc = -ENOMEM;
9634 			goto out_free_irq;
9635 		}
9636 	} else
9637 		ioa_cfg->reset = ipr_reset_start_bist;
9638 
9639 	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9640 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9641 	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9642 
9643 	LEAVE;
9644 out:
9645 	return rc;
9646 
9647 out_free_irq:
9648 	ipr_free_irqs(ioa_cfg);
9649 cleanup_nolog:
9650 	ipr_free_mem(ioa_cfg);
9651 out_msi_disable:
9652 	ipr_wait_for_pci_err_recovery(ioa_cfg);
9653 	pci_free_irq_vectors(pdev);
9654 cleanup_nomem:
9655 	iounmap(ipr_regs);
9656 out_disable:
9657 	pci_disable_device(pdev);
9658 out_release_regions:
9659 	pci_release_regions(pdev);
9660 out_scsi_host_put:
9661 	scsi_host_put(host);
9662 	goto out;
9663 }
9664 
9665 /**
9666  * ipr_initiate_ioa_bringdown - Bring down an adapter
9667  * @ioa_cfg:		ioa config struct
9668  * @shutdown_type:	shutdown type
9669  *
9670  * Description: This function will initiate bringing down the adapter.
9671  * This consists of issuing an IOA shutdown to the adapter
9672  * to flush the cache, and running BIST.
9673  * If the caller needs to wait on the completion of the reset,
9674  * the caller must sleep on the reset_wait_q.
9675  *
9676  * Return value:
9677  * 	none
9678  **/
9679 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9680 				       enum ipr_shutdown_type shutdown_type)
9681 {
9682 	ENTER;
9683 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9684 		ioa_cfg->sdt_state = ABORT_DUMP;
9685 	ioa_cfg->reset_retries = 0;
9686 	ioa_cfg->in_ioa_bringdown = 1;
9687 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9688 	LEAVE;
9689 }
9690 
9691 /**
9692  * __ipr_remove - Remove a single adapter
9693  * @pdev:	pci device struct
9694  *
9695  * Adapter hot plug remove entry point.
9696  *
9697  * Return value:
9698  * 	none
9699  **/
9700 static void __ipr_remove(struct pci_dev *pdev)
9701 {
9702 	unsigned long host_lock_flags = 0;
9703 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9704 	int i;
9705 	unsigned long driver_lock_flags;
9706 	ENTER;
9707 
9708 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9709 	while (ioa_cfg->in_reset_reload) {
9710 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9711 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9712 		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9713 	}
9714 
9715 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9716 		spin_lock(&ioa_cfg->hrrq[i]._lock);
9717 		ioa_cfg->hrrq[i].removing_ioa = 1;
9718 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
9719 	}
9720 	wmb();
9721 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9722 
9723 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9724 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9725 	flush_work(&ioa_cfg->work_q);
9726 	if (ioa_cfg->reset_work_q)
9727 		flush_workqueue(ioa_cfg->reset_work_q);
9728 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9729 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9730 
9731 	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9732 	list_del(&ioa_cfg->queue);
9733 	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9734 
9735 	if (ioa_cfg->sdt_state == ABORT_DUMP)
9736 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9737 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9738 
9739 	ipr_free_all_resources(ioa_cfg);
9740 
9741 	LEAVE;
9742 }
9743 
9744 /**
9745  * ipr_remove - IOA hot plug remove entry point
9746  * @pdev:	pci device struct
9747  *
9748  * Adapter hot plug remove entry point.
9749  *
9750  * Return value:
9751  * 	none
9752  **/
9753 static void ipr_remove(struct pci_dev *pdev)
9754 {
9755 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9756 
9757 	ENTER;
9758 
9759 	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9760 			      &ipr_trace_attr);
9761 	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9762 			     &ipr_dump_attr);
9763 	sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
9764 			&ipr_ioa_async_err_log);
9765 	scsi_remove_host(ioa_cfg->host);
9766 
9767 	__ipr_remove(pdev);
9768 
9769 	LEAVE;
9770 }
9771 
9772 /**
9773  * ipr_probe - Adapter hot plug add entry point
9774  * @pdev:	pci device struct
9775  * @dev_id:	pci device ID
9776  *
9777  * Return value:
9778  * 	0 on success / non-zero on failure
9779  **/
9780 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9781 {
9782 	struct ipr_ioa_cfg *ioa_cfg;
9783 	unsigned long flags;
9784 	int rc, i;
9785 
9786 	rc = ipr_probe_ioa(pdev, dev_id);
9787 
9788 	if (rc)
9789 		return rc;
9790 
9791 	ioa_cfg = pci_get_drvdata(pdev);
9792 	ipr_probe_ioa_part2(ioa_cfg);
9793 
9794 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9795 
9796 	if (rc) {
9797 		__ipr_remove(pdev);
9798 		return rc;
9799 	}
9800 
9801 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9802 				   &ipr_trace_attr);
9803 
9804 	if (rc) {
9805 		scsi_remove_host(ioa_cfg->host);
9806 		__ipr_remove(pdev);
9807 		return rc;
9808 	}
9809 
9810 	rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
9811 			&ipr_ioa_async_err_log);
9812 
9813 	if (rc) {
9814 		ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9815 				&ipr_dump_attr);
9816 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9817 				&ipr_trace_attr);
9818 		scsi_remove_host(ioa_cfg->host);
9819 		__ipr_remove(pdev);
9820 		return rc;
9821 	}
9822 
9823 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9824 				   &ipr_dump_attr);
9825 
9826 	if (rc) {
9827 		sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
9828 				      &ipr_ioa_async_err_log);
9829 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9830 				      &ipr_trace_attr);
9831 		scsi_remove_host(ioa_cfg->host);
9832 		__ipr_remove(pdev);
9833 		return rc;
9834 	}
9835 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9836 	ioa_cfg->scan_enabled = 1;
9837 	schedule_work(&ioa_cfg->work_q);
9838 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9839 
9840 	ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9841 
9842 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9843 		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9844 			irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
9845 					ioa_cfg->iopoll_weight, ipr_iopoll);
9846 		}
9847 	}
9848 
9849 	scsi_scan_host(ioa_cfg->host);
9850 
9851 	return 0;
9852 }
9853 
9854 /**
9855  * ipr_shutdown - Shutdown handler.
9856  * @pdev:	pci device struct
9857  *
9858  * This function is invoked upon system shutdown/reboot. It will issue
9859  * an adapter shutdown to the adapter to flush the write cache.
9860  *
9861  * Return value:
9862  * 	none
9863  **/
9864 static void ipr_shutdown(struct pci_dev *pdev)
9865 {
9866 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9867 	unsigned long lock_flags = 0;
9868 	enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
9869 	int i;
9870 
9871 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9872 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9873 		ioa_cfg->iopoll_weight = 0;
9874 		for (i = 1; i < ioa_cfg->hrrq_num; i++)
9875 			irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
9876 	}
9877 
9878 	while (ioa_cfg->in_reset_reload) {
9879 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9880 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9881 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9882 	}
9883 
9884 	if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
9885 		shutdown_type = IPR_SHUTDOWN_QUIESCE;
9886 
9887 	ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
9888 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9889 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9890 	if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
9891 		ipr_free_irqs(ioa_cfg);
9892 		pci_disable_device(ioa_cfg->pdev);
9893 	}
9894 }
9895 
9896 static struct pci_device_id ipr_pci_table[] = {
9897 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9898 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9899 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9900 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9901 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9902 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9903 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9904 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9905 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9906 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9907 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9908 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9909 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9910 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9911 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9912 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9913 		IPR_USE_LONG_TRANSOP_TIMEOUT },
9914 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9915 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9916 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9917 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9918 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9919 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9920 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9921 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9922 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9923 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9924 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9925 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9926 	      IPR_USE_LONG_TRANSOP_TIMEOUT},
9927 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9928 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9929 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9930 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9931 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9932 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9933 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9934 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9935 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9936 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9937 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9938 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
9939 	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
9940 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
9941 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
9942 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9943 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
9944 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9945 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9946 		IPR_USE_LONG_TRANSOP_TIMEOUT },
9947 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9948 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9949 		IPR_USE_LONG_TRANSOP_TIMEOUT },
9950 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9951 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9952 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9953 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9954 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9955 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9956 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9957 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
9958 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9959 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9960 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9961 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9962 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9963 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9964 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9965 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
9966 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9967 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
9968 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9969 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9970 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9971 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9972 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9973 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
9974 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9975 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
9976 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9977 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9978 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9979 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
9980 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9981 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
9982 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9983 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
9984 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9985 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
9986 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9987 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
9988 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9989 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
9990 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9991 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
9992 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9993 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
9994 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9995 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
9996 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9997 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
9998 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9999 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10000 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10001 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10002 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10003 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10004 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10005 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
10006 	{ }
10007 };
10008 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10009 
10010 static const struct pci_error_handlers ipr_err_handler = {
10011 	.error_detected = ipr_pci_error_detected,
10012 	.mmio_enabled = ipr_pci_mmio_enabled,
10013 	.slot_reset = ipr_pci_slot_reset,
10014 };
10015 
10016 static struct pci_driver ipr_driver = {
10017 	.name = IPR_NAME,
10018 	.id_table = ipr_pci_table,
10019 	.probe = ipr_probe,
10020 	.remove = ipr_remove,
10021 	.shutdown = ipr_shutdown,
10022 	.err_handler = &ipr_err_handler,
10023 };
10024 
10025 /**
10026  * ipr_halt_done - Shutdown prepare completion
10027  * @ipr_cmd:   ipr command struct
10028  *
10029  * Return value:
10030  * 	none
10031  **/
10032 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10033 {
10034 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10035 }
10036 
10037 /**
10038  * ipr_halt - Issue shutdown prepare to all adapters
10039  * @nb: Notifier block
10040  * @event: Notifier event
10041  * @buf: Notifier data (unused)
10042  *
10043  * Return value:
10044  * 	NOTIFY_OK on success / NOTIFY_DONE on failure
10045  **/
10046 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10047 {
10048 	struct ipr_cmnd *ipr_cmd;
10049 	struct ipr_ioa_cfg *ioa_cfg;
10050 	unsigned long flags = 0, driver_lock_flags;
10051 
10052 	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10053 		return NOTIFY_DONE;
10054 
10055 	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10056 
10057 	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10058 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10059 		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10060 		    (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10061 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10062 			continue;
10063 		}
10064 
10065 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10066 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10067 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10068 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10069 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10070 
10071 		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10072 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10073 	}
10074 	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10075 
10076 	return NOTIFY_OK;
10077 }
10078 
10079 static struct notifier_block ipr_notifier = {
10080 	ipr_halt, NULL, 0
10081 };
10082 
10083 /**
10084  * ipr_init - Module entry point
10085  *
10086  * Return value:
10087  * 	0 on success / negative value on failure
10088  **/
10089 static int __init ipr_init(void)
10090 {
10091 	int rc;
10092 
10093 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10094 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10095 
10096 	register_reboot_notifier(&ipr_notifier);
10097 	rc = pci_register_driver(&ipr_driver);
10098 	if (rc) {
10099 		unregister_reboot_notifier(&ipr_notifier);
10100 		return rc;
10101 	}
10102 
10103 	return 0;
10104 }
10105 
10106 /**
10107  * ipr_exit - Module unload
10108  *
10109  * Module unload entry point.
10110  *
10111  * Return value:
10112  * 	none
10113  **/
10114 static void __exit ipr_exit(void)
10115 {
10116 	unregister_reboot_notifier(&ipr_notifier);
10117 	pci_unregister_driver(&ipr_driver);
10118 }
10119 
10120 module_init(ipr_init);
10121 module_exit(ipr_exit);
10122