xref: /openbmc/linux/drivers/scsi/hpsa.c (revision e5f586c763a079349398e2b0c7c271386193ac34)
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2016 Microsemi Corporation
4  *    Copyright 2014-2015 PMC-Sierra, Inc.
5  *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6  *
7  *    This program is free software; you can redistribute it and/or modify
8  *    it under the terms of the GNU General Public License as published by
9  *    the Free Software Foundation; version 2 of the License.
10  *
11  *    This program is distributed in the hope that it will be useful,
12  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
15  *
16  *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
17  *
18  */
19 
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/pci-aspm.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/fs.h>
29 #include <linux/timer.h>
30 #include <linux/init.h>
31 #include <linux/spinlock.h>
32 #include <linux/compat.h>
33 #include <linux/blktrace_api.h>
34 #include <linux/uaccess.h>
35 #include <linux/io.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/completion.h>
38 #include <linux/moduleparam.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi_tcq.h>
44 #include <scsi/scsi_eh.h>
45 #include <scsi/scsi_transport_sas.h>
46 #include <scsi/scsi_dbg.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/jiffies.h>
52 #include <linux/percpu-defs.h>
53 #include <linux/percpu.h>
54 #include <asm/unaligned.h>
55 #include <asm/div64.h>
56 #include "hpsa_cmd.h"
57 #include "hpsa.h"
58 
59 /*
60  * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
61  * with an optional trailing '-' followed by a byte value (0-255).
62  */
63 #define HPSA_DRIVER_VERSION "3.4.16-0"
64 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65 #define HPSA "hpsa"
66 
67 /* How long to wait for CISS doorbell communication */
68 #define CLEAR_EVENT_WAIT_INTERVAL 20	/* ms for each msleep() call */
69 #define MODE_CHANGE_WAIT_INTERVAL 10	/* ms for each msleep() call */
70 #define MAX_CLEAR_EVENT_WAIT 30000	/* times 20 ms = 600 s */
71 #define MAX_MODE_CHANGE_WAIT 2000	/* times 10 ms = 20 s */
72 #define MAX_IOCTL_CONFIG_WAIT 1000
73 
74 /*define how many times we will try a command because of bus resets */
75 #define MAX_CMD_RETRIES 3
76 
77 /* Embedded module documentation macros - see modules.h */
78 MODULE_AUTHOR("Hewlett-Packard Company");
79 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
80 	HPSA_DRIVER_VERSION);
81 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
82 MODULE_VERSION(HPSA_DRIVER_VERSION);
83 MODULE_LICENSE("GPL");
84 
85 static int hpsa_allow_any;
86 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
87 MODULE_PARM_DESC(hpsa_allow_any,
88 		"Allow hpsa driver to access unknown HP Smart Array hardware");
89 static int hpsa_simple_mode;
90 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
91 MODULE_PARM_DESC(hpsa_simple_mode,
92 	"Use 'simple mode' rather than 'performant mode'");
93 
94 /* define the PCI info for the cards we can control */
95 static const struct pci_device_id hpsa_pci_device_id[] = {
96 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
97 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
98 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
99 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
100 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
101 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
102 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
103 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
104 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
105 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
106 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
107 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
108 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
109 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
110 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
111 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
112 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
113 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
114 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
115 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
116 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
117 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
118 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
119 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
120 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
121 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
122 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
123 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
124 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
125 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
126 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
127 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
128 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
129 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
130 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
131 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
132 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
133 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
134 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
135 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
136 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
137 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
138 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
139 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
140 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
141 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
142 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
143 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
144 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
145 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
146 	{PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
147 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
148 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
149 	{0,}
150 };
151 
152 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
153 
154 /*  board_id = Subsystem Device ID & Vendor ID
155  *  product = Marketing Name for the board
156  *  access = Address of the struct of function pointers
157  */
158 static struct board_type products[] = {
159 	{0x3241103C, "Smart Array P212", &SA5_access},
160 	{0x3243103C, "Smart Array P410", &SA5_access},
161 	{0x3245103C, "Smart Array P410i", &SA5_access},
162 	{0x3247103C, "Smart Array P411", &SA5_access},
163 	{0x3249103C, "Smart Array P812", &SA5_access},
164 	{0x324A103C, "Smart Array P712m", &SA5_access},
165 	{0x324B103C, "Smart Array P711m", &SA5_access},
166 	{0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
167 	{0x3350103C, "Smart Array P222", &SA5_access},
168 	{0x3351103C, "Smart Array P420", &SA5_access},
169 	{0x3352103C, "Smart Array P421", &SA5_access},
170 	{0x3353103C, "Smart Array P822", &SA5_access},
171 	{0x3354103C, "Smart Array P420i", &SA5_access},
172 	{0x3355103C, "Smart Array P220i", &SA5_access},
173 	{0x3356103C, "Smart Array P721m", &SA5_access},
174 	{0x1921103C, "Smart Array P830i", &SA5_access},
175 	{0x1922103C, "Smart Array P430", &SA5_access},
176 	{0x1923103C, "Smart Array P431", &SA5_access},
177 	{0x1924103C, "Smart Array P830", &SA5_access},
178 	{0x1926103C, "Smart Array P731m", &SA5_access},
179 	{0x1928103C, "Smart Array P230i", &SA5_access},
180 	{0x1929103C, "Smart Array P530", &SA5_access},
181 	{0x21BD103C, "Smart Array P244br", &SA5_access},
182 	{0x21BE103C, "Smart Array P741m", &SA5_access},
183 	{0x21BF103C, "Smart HBA H240ar", &SA5_access},
184 	{0x21C0103C, "Smart Array P440ar", &SA5_access},
185 	{0x21C1103C, "Smart Array P840ar", &SA5_access},
186 	{0x21C2103C, "Smart Array P440", &SA5_access},
187 	{0x21C3103C, "Smart Array P441", &SA5_access},
188 	{0x21C4103C, "Smart Array", &SA5_access},
189 	{0x21C5103C, "Smart Array P841", &SA5_access},
190 	{0x21C6103C, "Smart HBA H244br", &SA5_access},
191 	{0x21C7103C, "Smart HBA H240", &SA5_access},
192 	{0x21C8103C, "Smart HBA H241", &SA5_access},
193 	{0x21C9103C, "Smart Array", &SA5_access},
194 	{0x21CA103C, "Smart Array P246br", &SA5_access},
195 	{0x21CB103C, "Smart Array P840", &SA5_access},
196 	{0x21CC103C, "Smart Array", &SA5_access},
197 	{0x21CD103C, "Smart Array", &SA5_access},
198 	{0x21CE103C, "Smart HBA", &SA5_access},
199 	{0x05809005, "SmartHBA-SA", &SA5_access},
200 	{0x05819005, "SmartHBA-SA 8i", &SA5_access},
201 	{0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
202 	{0x05839005, "SmartHBA-SA 8e", &SA5_access},
203 	{0x05849005, "SmartHBA-SA 16i", &SA5_access},
204 	{0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
205 	{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
206 	{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
207 	{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
208 	{0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
209 	{0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
210 	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
211 };
212 
213 static struct scsi_transport_template *hpsa_sas_transport_template;
214 static int hpsa_add_sas_host(struct ctlr_info *h);
215 static void hpsa_delete_sas_host(struct ctlr_info *h);
216 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
217 			struct hpsa_scsi_dev_t *device);
218 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
219 static struct hpsa_scsi_dev_t
220 	*hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
221 		struct sas_rphy *rphy);
222 
223 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
224 static const struct scsi_cmnd hpsa_cmd_busy;
225 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
226 static const struct scsi_cmnd hpsa_cmd_idle;
227 static int number_of_controllers;
228 
229 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
230 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
231 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
232 
233 #ifdef CONFIG_COMPAT
234 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
235 	void __user *arg);
236 #endif
237 
238 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
239 static struct CommandList *cmd_alloc(struct ctlr_info *h);
240 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
241 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
242 					    struct scsi_cmnd *scmd);
243 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
244 	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
245 	int cmd_type);
246 static void hpsa_free_cmd_pool(struct ctlr_info *h);
247 #define VPD_PAGE (1 << 8)
248 #define HPSA_SIMPLE_ERROR_BITS 0x03
249 
250 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
251 static void hpsa_scan_start(struct Scsi_Host *);
252 static int hpsa_scan_finished(struct Scsi_Host *sh,
253 	unsigned long elapsed_time);
254 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
255 
256 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
257 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
258 static int hpsa_slave_alloc(struct scsi_device *sdev);
259 static int hpsa_slave_configure(struct scsi_device *sdev);
260 static void hpsa_slave_destroy(struct scsi_device *sdev);
261 
262 static void hpsa_update_scsi_devices(struct ctlr_info *h);
263 static int check_for_unit_attention(struct ctlr_info *h,
264 	struct CommandList *c);
265 static void check_ioctl_unit_attention(struct ctlr_info *h,
266 	struct CommandList *c);
267 /* performant mode helper functions */
268 static void calc_bucket_map(int *bucket, int num_buckets,
269 	int nsgs, int min_blocks, u32 *bucket_map);
270 static void hpsa_free_performant_mode(struct ctlr_info *h);
271 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
272 static inline u32 next_command(struct ctlr_info *h, u8 q);
273 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
274 			       u32 *cfg_base_addr, u64 *cfg_base_addr_index,
275 			       u64 *cfg_offset);
276 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
277 				    unsigned long *memory_bar);
278 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
279 static int wait_for_device_to_become_ready(struct ctlr_info *h,
280 					   unsigned char lunaddr[],
281 					   int reply_queue);
282 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
283 				     int wait_for_ready);
284 static inline void finish_cmd(struct CommandList *c);
285 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
286 #define BOARD_NOT_READY 0
287 #define BOARD_READY 1
288 static void hpsa_drain_accel_commands(struct ctlr_info *h);
289 static void hpsa_flush_cache(struct ctlr_info *h);
290 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
291 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
292 	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
293 static void hpsa_command_resubmit_worker(struct work_struct *work);
294 static u32 lockup_detected(struct ctlr_info *h);
295 static int detect_controller_lockup(struct ctlr_info *h);
296 static void hpsa_disable_rld_caching(struct ctlr_info *h);
297 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
298 	struct ReportExtendedLUNdata *buf, int bufsize);
299 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
300 	unsigned char scsi3addr[], u8 page);
301 static int hpsa_luns_changed(struct ctlr_info *h);
302 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
303 			       struct hpsa_scsi_dev_t *dev,
304 			       unsigned char *scsi3addr);
305 
306 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
307 {
308 	unsigned long *priv = shost_priv(sdev->host);
309 	return (struct ctlr_info *) *priv;
310 }
311 
312 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
313 {
314 	unsigned long *priv = shost_priv(sh);
315 	return (struct ctlr_info *) *priv;
316 }
317 
318 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
319 {
320 	return c->scsi_cmd == SCSI_CMD_IDLE;
321 }
322 
323 static inline bool hpsa_is_pending_event(struct CommandList *c)
324 {
325 	return c->abort_pending || c->reset_pending;
326 }
327 
328 /* extract sense key, asc, and ascq from sense data.  -1 means invalid. */
329 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
330 			u8 *sense_key, u8 *asc, u8 *ascq)
331 {
332 	struct scsi_sense_hdr sshdr;
333 	bool rc;
334 
335 	*sense_key = -1;
336 	*asc = -1;
337 	*ascq = -1;
338 
339 	if (sense_data_len < 1)
340 		return;
341 
342 	rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
343 	if (rc) {
344 		*sense_key = sshdr.sense_key;
345 		*asc = sshdr.asc;
346 		*ascq = sshdr.ascq;
347 	}
348 }
349 
350 static int check_for_unit_attention(struct ctlr_info *h,
351 	struct CommandList *c)
352 {
353 	u8 sense_key, asc, ascq;
354 	int sense_len;
355 
356 	if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
357 		sense_len = sizeof(c->err_info->SenseInfo);
358 	else
359 		sense_len = c->err_info->SenseLen;
360 
361 	decode_sense_data(c->err_info->SenseInfo, sense_len,
362 				&sense_key, &asc, &ascq);
363 	if (sense_key != UNIT_ATTENTION || asc == 0xff)
364 		return 0;
365 
366 	switch (asc) {
367 	case STATE_CHANGED:
368 		dev_warn(&h->pdev->dev,
369 			"%s: a state change detected, command retried\n",
370 			h->devname);
371 		break;
372 	case LUN_FAILED:
373 		dev_warn(&h->pdev->dev,
374 			"%s: LUN failure detected\n", h->devname);
375 		break;
376 	case REPORT_LUNS_CHANGED:
377 		dev_warn(&h->pdev->dev,
378 			"%s: report LUN data changed\n", h->devname);
379 	/*
380 	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
381 	 * target (array) devices.
382 	 */
383 		break;
384 	case POWER_OR_RESET:
385 		dev_warn(&h->pdev->dev,
386 			"%s: a power on or device reset detected\n",
387 			h->devname);
388 		break;
389 	case UNIT_ATTENTION_CLEARED:
390 		dev_warn(&h->pdev->dev,
391 			"%s: unit attention cleared by another initiator\n",
392 			h->devname);
393 		break;
394 	default:
395 		dev_warn(&h->pdev->dev,
396 			"%s: unknown unit attention detected\n",
397 			h->devname);
398 		break;
399 	}
400 	return 1;
401 }
402 
403 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
404 {
405 	if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
406 		(c->err_info->ScsiStatus != SAM_STAT_BUSY &&
407 		 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
408 		return 0;
409 	dev_warn(&h->pdev->dev, HPSA "device busy");
410 	return 1;
411 }
412 
413 static u32 lockup_detected(struct ctlr_info *h);
414 static ssize_t host_show_lockup_detected(struct device *dev,
415 		struct device_attribute *attr, char *buf)
416 {
417 	int ld;
418 	struct ctlr_info *h;
419 	struct Scsi_Host *shost = class_to_shost(dev);
420 
421 	h = shost_to_hba(shost);
422 	ld = lockup_detected(h);
423 
424 	return sprintf(buf, "ld=%d\n", ld);
425 }
426 
427 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
428 					 struct device_attribute *attr,
429 					 const char *buf, size_t count)
430 {
431 	int status, len;
432 	struct ctlr_info *h;
433 	struct Scsi_Host *shost = class_to_shost(dev);
434 	char tmpbuf[10];
435 
436 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
437 		return -EACCES;
438 	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
439 	strncpy(tmpbuf, buf, len);
440 	tmpbuf[len] = '\0';
441 	if (sscanf(tmpbuf, "%d", &status) != 1)
442 		return -EINVAL;
443 	h = shost_to_hba(shost);
444 	h->acciopath_status = !!status;
445 	dev_warn(&h->pdev->dev,
446 		"hpsa: HP SSD Smart Path %s via sysfs update.\n",
447 		h->acciopath_status ? "enabled" : "disabled");
448 	return count;
449 }
450 
451 static ssize_t host_store_raid_offload_debug(struct device *dev,
452 					 struct device_attribute *attr,
453 					 const char *buf, size_t count)
454 {
455 	int debug_level, len;
456 	struct ctlr_info *h;
457 	struct Scsi_Host *shost = class_to_shost(dev);
458 	char tmpbuf[10];
459 
460 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
461 		return -EACCES;
462 	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
463 	strncpy(tmpbuf, buf, len);
464 	tmpbuf[len] = '\0';
465 	if (sscanf(tmpbuf, "%d", &debug_level) != 1)
466 		return -EINVAL;
467 	if (debug_level < 0)
468 		debug_level = 0;
469 	h = shost_to_hba(shost);
470 	h->raid_offload_debug = debug_level;
471 	dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
472 		h->raid_offload_debug);
473 	return count;
474 }
475 
476 static ssize_t host_store_rescan(struct device *dev,
477 				 struct device_attribute *attr,
478 				 const char *buf, size_t count)
479 {
480 	struct ctlr_info *h;
481 	struct Scsi_Host *shost = class_to_shost(dev);
482 	h = shost_to_hba(shost);
483 	hpsa_scan_start(h->scsi_host);
484 	return count;
485 }
486 
487 static ssize_t host_show_firmware_revision(struct device *dev,
488 	     struct device_attribute *attr, char *buf)
489 {
490 	struct ctlr_info *h;
491 	struct Scsi_Host *shost = class_to_shost(dev);
492 	unsigned char *fwrev;
493 
494 	h = shost_to_hba(shost);
495 	if (!h->hba_inquiry_data)
496 		return 0;
497 	fwrev = &h->hba_inquiry_data[32];
498 	return snprintf(buf, 20, "%c%c%c%c\n",
499 		fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
500 }
501 
502 static ssize_t host_show_commands_outstanding(struct device *dev,
503 	     struct device_attribute *attr, char *buf)
504 {
505 	struct Scsi_Host *shost = class_to_shost(dev);
506 	struct ctlr_info *h = shost_to_hba(shost);
507 
508 	return snprintf(buf, 20, "%d\n",
509 			atomic_read(&h->commands_outstanding));
510 }
511 
512 static ssize_t host_show_transport_mode(struct device *dev,
513 	struct device_attribute *attr, char *buf)
514 {
515 	struct ctlr_info *h;
516 	struct Scsi_Host *shost = class_to_shost(dev);
517 
518 	h = shost_to_hba(shost);
519 	return snprintf(buf, 20, "%s\n",
520 		h->transMethod & CFGTBL_Trans_Performant ?
521 			"performant" : "simple");
522 }
523 
524 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
525 	struct device_attribute *attr, char *buf)
526 {
527 	struct ctlr_info *h;
528 	struct Scsi_Host *shost = class_to_shost(dev);
529 
530 	h = shost_to_hba(shost);
531 	return snprintf(buf, 30, "HP SSD Smart Path %s\n",
532 		(h->acciopath_status == 1) ?  "enabled" : "disabled");
533 }
534 
535 /* List of controllers which cannot be hard reset on kexec with reset_devices */
536 static u32 unresettable_controller[] = {
537 	0x324a103C, /* Smart Array P712m */
538 	0x324b103C, /* Smart Array P711m */
539 	0x3223103C, /* Smart Array P800 */
540 	0x3234103C, /* Smart Array P400 */
541 	0x3235103C, /* Smart Array P400i */
542 	0x3211103C, /* Smart Array E200i */
543 	0x3212103C, /* Smart Array E200 */
544 	0x3213103C, /* Smart Array E200i */
545 	0x3214103C, /* Smart Array E200i */
546 	0x3215103C, /* Smart Array E200i */
547 	0x3237103C, /* Smart Array E500 */
548 	0x323D103C, /* Smart Array P700m */
549 	0x40800E11, /* Smart Array 5i */
550 	0x409C0E11, /* Smart Array 6400 */
551 	0x409D0E11, /* Smart Array 6400 EM */
552 	0x40700E11, /* Smart Array 5300 */
553 	0x40820E11, /* Smart Array 532 */
554 	0x40830E11, /* Smart Array 5312 */
555 	0x409A0E11, /* Smart Array 641 */
556 	0x409B0E11, /* Smart Array 642 */
557 	0x40910E11, /* Smart Array 6i */
558 };
559 
560 /* List of controllers which cannot even be soft reset */
561 static u32 soft_unresettable_controller[] = {
562 	0x40800E11, /* Smart Array 5i */
563 	0x40700E11, /* Smart Array 5300 */
564 	0x40820E11, /* Smart Array 532 */
565 	0x40830E11, /* Smart Array 5312 */
566 	0x409A0E11, /* Smart Array 641 */
567 	0x409B0E11, /* Smart Array 642 */
568 	0x40910E11, /* Smart Array 6i */
569 	/* Exclude 640x boards.  These are two pci devices in one slot
570 	 * which share a battery backed cache module.  One controls the
571 	 * cache, the other accesses the cache through the one that controls
572 	 * it.  If we reset the one controlling the cache, the other will
573 	 * likely not be happy.  Just forbid resetting this conjoined mess.
574 	 * The 640x isn't really supported by hpsa anyway.
575 	 */
576 	0x409C0E11, /* Smart Array 6400 */
577 	0x409D0E11, /* Smart Array 6400 EM */
578 };
579 
580 static u32 needs_abort_tags_swizzled[] = {
581 	0x323D103C, /* Smart Array P700m */
582 	0x324a103C, /* Smart Array P712m */
583 	0x324b103C, /* SmartArray P711m */
584 };
585 
586 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
587 {
588 	int i;
589 
590 	for (i = 0; i < nelems; i++)
591 		if (a[i] == board_id)
592 			return 1;
593 	return 0;
594 }
595 
596 static int ctlr_is_hard_resettable(u32 board_id)
597 {
598 	return !board_id_in_array(unresettable_controller,
599 			ARRAY_SIZE(unresettable_controller), board_id);
600 }
601 
602 static int ctlr_is_soft_resettable(u32 board_id)
603 {
604 	return !board_id_in_array(soft_unresettable_controller,
605 			ARRAY_SIZE(soft_unresettable_controller), board_id);
606 }
607 
608 static int ctlr_is_resettable(u32 board_id)
609 {
610 	return ctlr_is_hard_resettable(board_id) ||
611 		ctlr_is_soft_resettable(board_id);
612 }
613 
614 static int ctlr_needs_abort_tags_swizzled(u32 board_id)
615 {
616 	return board_id_in_array(needs_abort_tags_swizzled,
617 			ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
618 }
619 
620 static ssize_t host_show_resettable(struct device *dev,
621 	struct device_attribute *attr, char *buf)
622 {
623 	struct ctlr_info *h;
624 	struct Scsi_Host *shost = class_to_shost(dev);
625 
626 	h = shost_to_hba(shost);
627 	return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
628 }
629 
630 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
631 {
632 	return (scsi3addr[3] & 0xC0) == 0x40;
633 }
634 
635 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
636 	"1(+0)ADM", "UNKNOWN", "PHYS DRV"
637 };
638 #define HPSA_RAID_0	0
639 #define HPSA_RAID_4	1
640 #define HPSA_RAID_1	2	/* also used for RAID 10 */
641 #define HPSA_RAID_5	3	/* also used for RAID 50 */
642 #define HPSA_RAID_51	4
643 #define HPSA_RAID_6	5	/* also used for RAID 60 */
644 #define HPSA_RAID_ADM	6	/* also used for RAID 1+0 ADM */
645 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
646 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
647 
648 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
649 {
650 	return !device->physical_device;
651 }
652 
653 static ssize_t raid_level_show(struct device *dev,
654 	     struct device_attribute *attr, char *buf)
655 {
656 	ssize_t l = 0;
657 	unsigned char rlevel;
658 	struct ctlr_info *h;
659 	struct scsi_device *sdev;
660 	struct hpsa_scsi_dev_t *hdev;
661 	unsigned long flags;
662 
663 	sdev = to_scsi_device(dev);
664 	h = sdev_to_hba(sdev);
665 	spin_lock_irqsave(&h->lock, flags);
666 	hdev = sdev->hostdata;
667 	if (!hdev) {
668 		spin_unlock_irqrestore(&h->lock, flags);
669 		return -ENODEV;
670 	}
671 
672 	/* Is this even a logical drive? */
673 	if (!is_logical_device(hdev)) {
674 		spin_unlock_irqrestore(&h->lock, flags);
675 		l = snprintf(buf, PAGE_SIZE, "N/A\n");
676 		return l;
677 	}
678 
679 	rlevel = hdev->raid_level;
680 	spin_unlock_irqrestore(&h->lock, flags);
681 	if (rlevel > RAID_UNKNOWN)
682 		rlevel = RAID_UNKNOWN;
683 	l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
684 	return l;
685 }
686 
687 static ssize_t lunid_show(struct device *dev,
688 	     struct device_attribute *attr, char *buf)
689 {
690 	struct ctlr_info *h;
691 	struct scsi_device *sdev;
692 	struct hpsa_scsi_dev_t *hdev;
693 	unsigned long flags;
694 	unsigned char lunid[8];
695 
696 	sdev = to_scsi_device(dev);
697 	h = sdev_to_hba(sdev);
698 	spin_lock_irqsave(&h->lock, flags);
699 	hdev = sdev->hostdata;
700 	if (!hdev) {
701 		spin_unlock_irqrestore(&h->lock, flags);
702 		return -ENODEV;
703 	}
704 	memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
705 	spin_unlock_irqrestore(&h->lock, flags);
706 	return snprintf(buf, 20, "0x%8phN\n", lunid);
707 }
708 
709 static ssize_t unique_id_show(struct device *dev,
710 	     struct device_attribute *attr, char *buf)
711 {
712 	struct ctlr_info *h;
713 	struct scsi_device *sdev;
714 	struct hpsa_scsi_dev_t *hdev;
715 	unsigned long flags;
716 	unsigned char sn[16];
717 
718 	sdev = to_scsi_device(dev);
719 	h = sdev_to_hba(sdev);
720 	spin_lock_irqsave(&h->lock, flags);
721 	hdev = sdev->hostdata;
722 	if (!hdev) {
723 		spin_unlock_irqrestore(&h->lock, flags);
724 		return -ENODEV;
725 	}
726 	memcpy(sn, hdev->device_id, sizeof(sn));
727 	spin_unlock_irqrestore(&h->lock, flags);
728 	return snprintf(buf, 16 * 2 + 2,
729 			"%02X%02X%02X%02X%02X%02X%02X%02X"
730 			"%02X%02X%02X%02X%02X%02X%02X%02X\n",
731 			sn[0], sn[1], sn[2], sn[3],
732 			sn[4], sn[5], sn[6], sn[7],
733 			sn[8], sn[9], sn[10], sn[11],
734 			sn[12], sn[13], sn[14], sn[15]);
735 }
736 
737 static ssize_t sas_address_show(struct device *dev,
738 	      struct device_attribute *attr, char *buf)
739 {
740 	struct ctlr_info *h;
741 	struct scsi_device *sdev;
742 	struct hpsa_scsi_dev_t *hdev;
743 	unsigned long flags;
744 	u64 sas_address;
745 
746 	sdev = to_scsi_device(dev);
747 	h = sdev_to_hba(sdev);
748 	spin_lock_irqsave(&h->lock, flags);
749 	hdev = sdev->hostdata;
750 	if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
751 		spin_unlock_irqrestore(&h->lock, flags);
752 		return -ENODEV;
753 	}
754 	sas_address = hdev->sas_address;
755 	spin_unlock_irqrestore(&h->lock, flags);
756 
757 	return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
758 }
759 
760 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
761 	     struct device_attribute *attr, char *buf)
762 {
763 	struct ctlr_info *h;
764 	struct scsi_device *sdev;
765 	struct hpsa_scsi_dev_t *hdev;
766 	unsigned long flags;
767 	int offload_enabled;
768 
769 	sdev = to_scsi_device(dev);
770 	h = sdev_to_hba(sdev);
771 	spin_lock_irqsave(&h->lock, flags);
772 	hdev = sdev->hostdata;
773 	if (!hdev) {
774 		spin_unlock_irqrestore(&h->lock, flags);
775 		return -ENODEV;
776 	}
777 	offload_enabled = hdev->offload_enabled;
778 	spin_unlock_irqrestore(&h->lock, flags);
779 	return snprintf(buf, 20, "%d\n", offload_enabled);
780 }
781 
782 #define MAX_PATHS 8
783 static ssize_t path_info_show(struct device *dev,
784 	     struct device_attribute *attr, char *buf)
785 {
786 	struct ctlr_info *h;
787 	struct scsi_device *sdev;
788 	struct hpsa_scsi_dev_t *hdev;
789 	unsigned long flags;
790 	int i;
791 	int output_len = 0;
792 	u8 box;
793 	u8 bay;
794 	u8 path_map_index = 0;
795 	char *active;
796 	unsigned char phys_connector[2];
797 
798 	sdev = to_scsi_device(dev);
799 	h = sdev_to_hba(sdev);
800 	spin_lock_irqsave(&h->devlock, flags);
801 	hdev = sdev->hostdata;
802 	if (!hdev) {
803 		spin_unlock_irqrestore(&h->devlock, flags);
804 		return -ENODEV;
805 	}
806 
807 	bay = hdev->bay;
808 	for (i = 0; i < MAX_PATHS; i++) {
809 		path_map_index = 1<<i;
810 		if (i == hdev->active_path_index)
811 			active = "Active";
812 		else if (hdev->path_map & path_map_index)
813 			active = "Inactive";
814 		else
815 			continue;
816 
817 		output_len += scnprintf(buf + output_len,
818 				PAGE_SIZE - output_len,
819 				"[%d:%d:%d:%d] %20.20s ",
820 				h->scsi_host->host_no,
821 				hdev->bus, hdev->target, hdev->lun,
822 				scsi_device_type(hdev->devtype));
823 
824 		if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
825 			output_len += scnprintf(buf + output_len,
826 						PAGE_SIZE - output_len,
827 						"%s\n", active);
828 			continue;
829 		}
830 
831 		box = hdev->box[i];
832 		memcpy(&phys_connector, &hdev->phys_connector[i],
833 			sizeof(phys_connector));
834 		if (phys_connector[0] < '0')
835 			phys_connector[0] = '0';
836 		if (phys_connector[1] < '0')
837 			phys_connector[1] = '0';
838 		output_len += scnprintf(buf + output_len,
839 				PAGE_SIZE - output_len,
840 				"PORT: %.2s ",
841 				phys_connector);
842 		if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
843 			hdev->expose_device) {
844 			if (box == 0 || box == 0xFF) {
845 				output_len += scnprintf(buf + output_len,
846 					PAGE_SIZE - output_len,
847 					"BAY: %hhu %s\n",
848 					bay, active);
849 			} else {
850 				output_len += scnprintf(buf + output_len,
851 					PAGE_SIZE - output_len,
852 					"BOX: %hhu BAY: %hhu %s\n",
853 					box, bay, active);
854 			}
855 		} else if (box != 0 && box != 0xFF) {
856 			output_len += scnprintf(buf + output_len,
857 				PAGE_SIZE - output_len, "BOX: %hhu %s\n",
858 				box, active);
859 		} else
860 			output_len += scnprintf(buf + output_len,
861 				PAGE_SIZE - output_len, "%s\n", active);
862 	}
863 
864 	spin_unlock_irqrestore(&h->devlock, flags);
865 	return output_len;
866 }
867 
868 static ssize_t host_show_ctlr_num(struct device *dev,
869 	struct device_attribute *attr, char *buf)
870 {
871 	struct ctlr_info *h;
872 	struct Scsi_Host *shost = class_to_shost(dev);
873 
874 	h = shost_to_hba(shost);
875 	return snprintf(buf, 20, "%d\n", h->ctlr);
876 }
877 
878 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
879 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
880 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
881 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
882 static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL);
883 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
884 			host_show_hp_ssd_smart_path_enabled, NULL);
885 static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
886 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
887 		host_show_hp_ssd_smart_path_status,
888 		host_store_hp_ssd_smart_path_status);
889 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
890 			host_store_raid_offload_debug);
891 static DEVICE_ATTR(firmware_revision, S_IRUGO,
892 	host_show_firmware_revision, NULL);
893 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
894 	host_show_commands_outstanding, NULL);
895 static DEVICE_ATTR(transport_mode, S_IRUGO,
896 	host_show_transport_mode, NULL);
897 static DEVICE_ATTR(resettable, S_IRUGO,
898 	host_show_resettable, NULL);
899 static DEVICE_ATTR(lockup_detected, S_IRUGO,
900 	host_show_lockup_detected, NULL);
901 static DEVICE_ATTR(ctlr_num, S_IRUGO,
902 	host_show_ctlr_num, NULL);
903 
904 static struct device_attribute *hpsa_sdev_attrs[] = {
905 	&dev_attr_raid_level,
906 	&dev_attr_lunid,
907 	&dev_attr_unique_id,
908 	&dev_attr_hp_ssd_smart_path_enabled,
909 	&dev_attr_path_info,
910 	&dev_attr_sas_address,
911 	NULL,
912 };
913 
914 static struct device_attribute *hpsa_shost_attrs[] = {
915 	&dev_attr_rescan,
916 	&dev_attr_firmware_revision,
917 	&dev_attr_commands_outstanding,
918 	&dev_attr_transport_mode,
919 	&dev_attr_resettable,
920 	&dev_attr_hp_ssd_smart_path_status,
921 	&dev_attr_raid_offload_debug,
922 	&dev_attr_lockup_detected,
923 	&dev_attr_ctlr_num,
924 	NULL,
925 };
926 
927 #define HPSA_NRESERVED_CMDS	(HPSA_CMDS_RESERVED_FOR_ABORTS + \
928 		HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
929 
930 static struct scsi_host_template hpsa_driver_template = {
931 	.module			= THIS_MODULE,
932 	.name			= HPSA,
933 	.proc_name		= HPSA,
934 	.queuecommand		= hpsa_scsi_queue_command,
935 	.scan_start		= hpsa_scan_start,
936 	.scan_finished		= hpsa_scan_finished,
937 	.change_queue_depth	= hpsa_change_queue_depth,
938 	.this_id		= -1,
939 	.use_clustering		= ENABLE_CLUSTERING,
940 	.eh_abort_handler	= hpsa_eh_abort_handler,
941 	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
942 	.ioctl			= hpsa_ioctl,
943 	.slave_alloc		= hpsa_slave_alloc,
944 	.slave_configure	= hpsa_slave_configure,
945 	.slave_destroy		= hpsa_slave_destroy,
946 #ifdef CONFIG_COMPAT
947 	.compat_ioctl		= hpsa_compat_ioctl,
948 #endif
949 	.sdev_attrs = hpsa_sdev_attrs,
950 	.shost_attrs = hpsa_shost_attrs,
951 	.max_sectors = 8192,
952 	.no_write_same = 1,
953 };
954 
955 static inline u32 next_command(struct ctlr_info *h, u8 q)
956 {
957 	u32 a;
958 	struct reply_queue_buffer *rq = &h->reply_queue[q];
959 
960 	if (h->transMethod & CFGTBL_Trans_io_accel1)
961 		return h->access.command_completed(h, q);
962 
963 	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
964 		return h->access.command_completed(h, q);
965 
966 	if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
967 		a = rq->head[rq->current_entry];
968 		rq->current_entry++;
969 		atomic_dec(&h->commands_outstanding);
970 	} else {
971 		a = FIFO_EMPTY;
972 	}
973 	/* Check for wraparound */
974 	if (rq->current_entry == h->max_commands) {
975 		rq->current_entry = 0;
976 		rq->wraparound ^= 1;
977 	}
978 	return a;
979 }
980 
981 /*
982  * There are some special bits in the bus address of the
983  * command that we have to set for the controller to know
984  * how to process the command:
985  *
986  * Normal performant mode:
987  * bit 0: 1 means performant mode, 0 means simple mode.
988  * bits 1-3 = block fetch table entry
989  * bits 4-6 = command type (== 0)
990  *
991  * ioaccel1 mode:
992  * bit 0 = "performant mode" bit.
993  * bits 1-3 = block fetch table entry
994  * bits 4-6 = command type (== 110)
995  * (command type is needed because ioaccel1 mode
996  * commands are submitted through the same register as normal
997  * mode commands, so this is how the controller knows whether
998  * the command is normal mode or ioaccel1 mode.)
999  *
1000  * ioaccel2 mode:
1001  * bit 0 = "performant mode" bit.
1002  * bits 1-4 = block fetch table entry (note extra bit)
1003  * bits 4-6 = not needed, because ioaccel2 mode has
1004  * a separate special register for submitting commands.
1005  */
1006 
1007 /*
1008  * set_performant_mode: Modify the tag for cciss performant
1009  * set bit 0 for pull model, bits 3-1 for block fetch
1010  * register number
1011  */
1012 #define DEFAULT_REPLY_QUEUE (-1)
1013 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1014 					int reply_queue)
1015 {
1016 	if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1017 		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1018 		if (unlikely(!h->msix_vectors))
1019 			return;
1020 		if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1021 			c->Header.ReplyQueue =
1022 				raw_smp_processor_id() % h->nreply_queues;
1023 		else
1024 			c->Header.ReplyQueue = reply_queue % h->nreply_queues;
1025 	}
1026 }
1027 
1028 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1029 						struct CommandList *c,
1030 						int reply_queue)
1031 {
1032 	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1033 
1034 	/*
1035 	 * Tell the controller to post the reply to the queue for this
1036 	 * processor.  This seems to give the best I/O throughput.
1037 	 */
1038 	if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1039 		cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
1040 	else
1041 		cp->ReplyQueue = reply_queue % h->nreply_queues;
1042 	/*
1043 	 * Set the bits in the address sent down to include:
1044 	 *  - performant mode bit (bit 0)
1045 	 *  - pull count (bits 1-3)
1046 	 *  - command type (bits 4-6)
1047 	 */
1048 	c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1049 					IOACCEL1_BUSADDR_CMDTYPE;
1050 }
1051 
1052 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1053 						struct CommandList *c,
1054 						int reply_queue)
1055 {
1056 	struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1057 		&h->ioaccel2_cmd_pool[c->cmdindex];
1058 
1059 	/* Tell the controller to post the reply to the queue for this
1060 	 * processor.  This seems to give the best I/O throughput.
1061 	 */
1062 	if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1063 		cp->reply_queue = smp_processor_id() % h->nreply_queues;
1064 	else
1065 		cp->reply_queue = reply_queue % h->nreply_queues;
1066 	/* Set the bits in the address sent down to include:
1067 	 *  - performant mode bit not used in ioaccel mode 2
1068 	 *  - pull count (bits 0-3)
1069 	 *  - command type isn't needed for ioaccel2
1070 	 */
1071 	c->busaddr |= h->ioaccel2_blockFetchTable[0];
1072 }
1073 
1074 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1075 						struct CommandList *c,
1076 						int reply_queue)
1077 {
1078 	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1079 
1080 	/*
1081 	 * Tell the controller to post the reply to the queue for this
1082 	 * processor.  This seems to give the best I/O throughput.
1083 	 */
1084 	if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1085 		cp->reply_queue = smp_processor_id() % h->nreply_queues;
1086 	else
1087 		cp->reply_queue = reply_queue % h->nreply_queues;
1088 	/*
1089 	 * Set the bits in the address sent down to include:
1090 	 *  - performant mode bit not used in ioaccel mode 2
1091 	 *  - pull count (bits 0-3)
1092 	 *  - command type isn't needed for ioaccel2
1093 	 */
1094 	c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1095 }
1096 
1097 static int is_firmware_flash_cmd(u8 *cdb)
1098 {
1099 	return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1100 }
1101 
1102 /*
1103  * During firmware flash, the heartbeat register may not update as frequently
1104  * as it should.  So we dial down lockup detection during firmware flash. and
1105  * dial it back up when firmware flash completes.
1106  */
1107 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1108 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1109 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1110 		struct CommandList *c)
1111 {
1112 	if (!is_firmware_flash_cmd(c->Request.CDB))
1113 		return;
1114 	atomic_inc(&h->firmware_flash_in_progress);
1115 	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1116 }
1117 
1118 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1119 		struct CommandList *c)
1120 {
1121 	if (is_firmware_flash_cmd(c->Request.CDB) &&
1122 		atomic_dec_and_test(&h->firmware_flash_in_progress))
1123 		h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1124 }
1125 
1126 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1127 	struct CommandList *c, int reply_queue)
1128 {
1129 	dial_down_lockup_detection_during_fw_flash(h, c);
1130 	atomic_inc(&h->commands_outstanding);
1131 	switch (c->cmd_type) {
1132 	case CMD_IOACCEL1:
1133 		set_ioaccel1_performant_mode(h, c, reply_queue);
1134 		writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1135 		break;
1136 	case CMD_IOACCEL2:
1137 		set_ioaccel2_performant_mode(h, c, reply_queue);
1138 		writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1139 		break;
1140 	case IOACCEL2_TMF:
1141 		set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1142 		writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1143 		break;
1144 	default:
1145 		set_performant_mode(h, c, reply_queue);
1146 		h->access.submit_command(h, c);
1147 	}
1148 }
1149 
1150 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1151 {
1152 	if (unlikely(hpsa_is_pending_event(c)))
1153 		return finish_cmd(c);
1154 
1155 	__enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1156 }
1157 
1158 static inline int is_hba_lunid(unsigned char scsi3addr[])
1159 {
1160 	return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1161 }
1162 
1163 static inline int is_scsi_rev_5(struct ctlr_info *h)
1164 {
1165 	if (!h->hba_inquiry_data)
1166 		return 0;
1167 	if ((h->hba_inquiry_data[2] & 0x07) == 5)
1168 		return 1;
1169 	return 0;
1170 }
1171 
1172 static int hpsa_find_target_lun(struct ctlr_info *h,
1173 	unsigned char scsi3addr[], int bus, int *target, int *lun)
1174 {
1175 	/* finds an unused bus, target, lun for a new physical device
1176 	 * assumes h->devlock is held
1177 	 */
1178 	int i, found = 0;
1179 	DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1180 
1181 	bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1182 
1183 	for (i = 0; i < h->ndevices; i++) {
1184 		if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1185 			__set_bit(h->dev[i]->target, lun_taken);
1186 	}
1187 
1188 	i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1189 	if (i < HPSA_MAX_DEVICES) {
1190 		/* *bus = 1; */
1191 		*target = i;
1192 		*lun = 0;
1193 		found = 1;
1194 	}
1195 	return !found;
1196 }
1197 
1198 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1199 	struct hpsa_scsi_dev_t *dev, char *description)
1200 {
1201 #define LABEL_SIZE 25
1202 	char label[LABEL_SIZE];
1203 
1204 	if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1205 		return;
1206 
1207 	switch (dev->devtype) {
1208 	case TYPE_RAID:
1209 		snprintf(label, LABEL_SIZE, "controller");
1210 		break;
1211 	case TYPE_ENCLOSURE:
1212 		snprintf(label, LABEL_SIZE, "enclosure");
1213 		break;
1214 	case TYPE_DISK:
1215 	case TYPE_ZBC:
1216 		if (dev->external)
1217 			snprintf(label, LABEL_SIZE, "external");
1218 		else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1219 			snprintf(label, LABEL_SIZE, "%s",
1220 				raid_label[PHYSICAL_DRIVE]);
1221 		else
1222 			snprintf(label, LABEL_SIZE, "RAID-%s",
1223 				dev->raid_level > RAID_UNKNOWN ? "?" :
1224 				raid_label[dev->raid_level]);
1225 		break;
1226 	case TYPE_ROM:
1227 		snprintf(label, LABEL_SIZE, "rom");
1228 		break;
1229 	case TYPE_TAPE:
1230 		snprintf(label, LABEL_SIZE, "tape");
1231 		break;
1232 	case TYPE_MEDIUM_CHANGER:
1233 		snprintf(label, LABEL_SIZE, "changer");
1234 		break;
1235 	default:
1236 		snprintf(label, LABEL_SIZE, "UNKNOWN");
1237 		break;
1238 	}
1239 
1240 	dev_printk(level, &h->pdev->dev,
1241 			"scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1242 			h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1243 			description,
1244 			scsi_device_type(dev->devtype),
1245 			dev->vendor,
1246 			dev->model,
1247 			label,
1248 			dev->offload_config ? '+' : '-',
1249 			dev->offload_enabled ? '+' : '-',
1250 			dev->expose_device);
1251 }
1252 
1253 /* Add an entry into h->dev[] array. */
1254 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1255 		struct hpsa_scsi_dev_t *device,
1256 		struct hpsa_scsi_dev_t *added[], int *nadded)
1257 {
1258 	/* assumes h->devlock is held */
1259 	int n = h->ndevices;
1260 	int i;
1261 	unsigned char addr1[8], addr2[8];
1262 	struct hpsa_scsi_dev_t *sd;
1263 
1264 	if (n >= HPSA_MAX_DEVICES) {
1265 		dev_err(&h->pdev->dev, "too many devices, some will be "
1266 			"inaccessible.\n");
1267 		return -1;
1268 	}
1269 
1270 	/* physical devices do not have lun or target assigned until now. */
1271 	if (device->lun != -1)
1272 		/* Logical device, lun is already assigned. */
1273 		goto lun_assigned;
1274 
1275 	/* If this device a non-zero lun of a multi-lun device
1276 	 * byte 4 of the 8-byte LUN addr will contain the logical
1277 	 * unit no, zero otherwise.
1278 	 */
1279 	if (device->scsi3addr[4] == 0) {
1280 		/* This is not a non-zero lun of a multi-lun device */
1281 		if (hpsa_find_target_lun(h, device->scsi3addr,
1282 			device->bus, &device->target, &device->lun) != 0)
1283 			return -1;
1284 		goto lun_assigned;
1285 	}
1286 
1287 	/* This is a non-zero lun of a multi-lun device.
1288 	 * Search through our list and find the device which
1289 	 * has the same 8 byte LUN address, excepting byte 4 and 5.
1290 	 * Assign the same bus and target for this new LUN.
1291 	 * Use the logical unit number from the firmware.
1292 	 */
1293 	memcpy(addr1, device->scsi3addr, 8);
1294 	addr1[4] = 0;
1295 	addr1[5] = 0;
1296 	for (i = 0; i < n; i++) {
1297 		sd = h->dev[i];
1298 		memcpy(addr2, sd->scsi3addr, 8);
1299 		addr2[4] = 0;
1300 		addr2[5] = 0;
1301 		/* differ only in byte 4 and 5? */
1302 		if (memcmp(addr1, addr2, 8) == 0) {
1303 			device->bus = sd->bus;
1304 			device->target = sd->target;
1305 			device->lun = device->scsi3addr[4];
1306 			break;
1307 		}
1308 	}
1309 	if (device->lun == -1) {
1310 		dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1311 			" suspect firmware bug or unsupported hardware "
1312 			"configuration.\n");
1313 			return -1;
1314 	}
1315 
1316 lun_assigned:
1317 
1318 	h->dev[n] = device;
1319 	h->ndevices++;
1320 	added[*nadded] = device;
1321 	(*nadded)++;
1322 	hpsa_show_dev_msg(KERN_INFO, h, device,
1323 		device->expose_device ? "added" : "masked");
1324 	device->offload_to_be_enabled = device->offload_enabled;
1325 	device->offload_enabled = 0;
1326 	return 0;
1327 }
1328 
1329 /* Update an entry in h->dev[] array. */
1330 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1331 	int entry, struct hpsa_scsi_dev_t *new_entry)
1332 {
1333 	int offload_enabled;
1334 	/* assumes h->devlock is held */
1335 	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1336 
1337 	/* Raid level changed. */
1338 	h->dev[entry]->raid_level = new_entry->raid_level;
1339 
1340 	/* Raid offload parameters changed.  Careful about the ordering. */
1341 	if (new_entry->offload_config && new_entry->offload_enabled) {
1342 		/*
1343 		 * if drive is newly offload_enabled, we want to copy the
1344 		 * raid map data first.  If previously offload_enabled and
1345 		 * offload_config were set, raid map data had better be
1346 		 * the same as it was before.  if raid map data is changed
1347 		 * then it had better be the case that
1348 		 * h->dev[entry]->offload_enabled is currently 0.
1349 		 */
1350 		h->dev[entry]->raid_map = new_entry->raid_map;
1351 		h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1352 	}
1353 	if (new_entry->hba_ioaccel_enabled) {
1354 		h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1355 		wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1356 	}
1357 	h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1358 	h->dev[entry]->offload_config = new_entry->offload_config;
1359 	h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1360 	h->dev[entry]->queue_depth = new_entry->queue_depth;
1361 
1362 	/*
1363 	 * We can turn off ioaccel offload now, but need to delay turning
1364 	 * it on until we can update h->dev[entry]->phys_disk[], but we
1365 	 * can't do that until all the devices are updated.
1366 	 */
1367 	h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1368 	if (!new_entry->offload_enabled)
1369 		h->dev[entry]->offload_enabled = 0;
1370 
1371 	offload_enabled = h->dev[entry]->offload_enabled;
1372 	h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1373 	hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1374 	h->dev[entry]->offload_enabled = offload_enabled;
1375 }
1376 
1377 /* Replace an entry from h->dev[] array. */
1378 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1379 	int entry, struct hpsa_scsi_dev_t *new_entry,
1380 	struct hpsa_scsi_dev_t *added[], int *nadded,
1381 	struct hpsa_scsi_dev_t *removed[], int *nremoved)
1382 {
1383 	/* assumes h->devlock is held */
1384 	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1385 	removed[*nremoved] = h->dev[entry];
1386 	(*nremoved)++;
1387 
1388 	/*
1389 	 * New physical devices won't have target/lun assigned yet
1390 	 * so we need to preserve the values in the slot we are replacing.
1391 	 */
1392 	if (new_entry->target == -1) {
1393 		new_entry->target = h->dev[entry]->target;
1394 		new_entry->lun = h->dev[entry]->lun;
1395 	}
1396 
1397 	h->dev[entry] = new_entry;
1398 	added[*nadded] = new_entry;
1399 	(*nadded)++;
1400 	hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1401 	new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1402 	new_entry->offload_enabled = 0;
1403 }
1404 
1405 /* Remove an entry from h->dev[] array. */
1406 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1407 	struct hpsa_scsi_dev_t *removed[], int *nremoved)
1408 {
1409 	/* assumes h->devlock is held */
1410 	int i;
1411 	struct hpsa_scsi_dev_t *sd;
1412 
1413 	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1414 
1415 	sd = h->dev[entry];
1416 	removed[*nremoved] = h->dev[entry];
1417 	(*nremoved)++;
1418 
1419 	for (i = entry; i < h->ndevices-1; i++)
1420 		h->dev[i] = h->dev[i+1];
1421 	h->ndevices--;
1422 	hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1423 }
1424 
1425 #define SCSI3ADDR_EQ(a, b) ( \
1426 	(a)[7] == (b)[7] && \
1427 	(a)[6] == (b)[6] && \
1428 	(a)[5] == (b)[5] && \
1429 	(a)[4] == (b)[4] && \
1430 	(a)[3] == (b)[3] && \
1431 	(a)[2] == (b)[2] && \
1432 	(a)[1] == (b)[1] && \
1433 	(a)[0] == (b)[0])
1434 
1435 static void fixup_botched_add(struct ctlr_info *h,
1436 	struct hpsa_scsi_dev_t *added)
1437 {
1438 	/* called when scsi_add_device fails in order to re-adjust
1439 	 * h->dev[] to match the mid layer's view.
1440 	 */
1441 	unsigned long flags;
1442 	int i, j;
1443 
1444 	spin_lock_irqsave(&h->lock, flags);
1445 	for (i = 0; i < h->ndevices; i++) {
1446 		if (h->dev[i] == added) {
1447 			for (j = i; j < h->ndevices-1; j++)
1448 				h->dev[j] = h->dev[j+1];
1449 			h->ndevices--;
1450 			break;
1451 		}
1452 	}
1453 	spin_unlock_irqrestore(&h->lock, flags);
1454 	kfree(added);
1455 }
1456 
1457 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1458 	struct hpsa_scsi_dev_t *dev2)
1459 {
1460 	/* we compare everything except lun and target as these
1461 	 * are not yet assigned.  Compare parts likely
1462 	 * to differ first
1463 	 */
1464 	if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1465 		sizeof(dev1->scsi3addr)) != 0)
1466 		return 0;
1467 	if (memcmp(dev1->device_id, dev2->device_id,
1468 		sizeof(dev1->device_id)) != 0)
1469 		return 0;
1470 	if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1471 		return 0;
1472 	if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1473 		return 0;
1474 	if (dev1->devtype != dev2->devtype)
1475 		return 0;
1476 	if (dev1->bus != dev2->bus)
1477 		return 0;
1478 	return 1;
1479 }
1480 
1481 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1482 	struct hpsa_scsi_dev_t *dev2)
1483 {
1484 	/* Device attributes that can change, but don't mean
1485 	 * that the device is a different device, nor that the OS
1486 	 * needs to be told anything about the change.
1487 	 */
1488 	if (dev1->raid_level != dev2->raid_level)
1489 		return 1;
1490 	if (dev1->offload_config != dev2->offload_config)
1491 		return 1;
1492 	if (dev1->offload_enabled != dev2->offload_enabled)
1493 		return 1;
1494 	if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1495 		if (dev1->queue_depth != dev2->queue_depth)
1496 			return 1;
1497 	return 0;
1498 }
1499 
1500 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
1501  * and return needle location in *index.  If scsi3addr matches, but not
1502  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1503  * location in *index.
1504  * In the case of a minor device attribute change, such as RAID level, just
1505  * return DEVICE_UPDATED, along with the updated device's location in index.
1506  * If needle not found, return DEVICE_NOT_FOUND.
1507  */
1508 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1509 	struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1510 	int *index)
1511 {
1512 	int i;
1513 #define DEVICE_NOT_FOUND 0
1514 #define DEVICE_CHANGED 1
1515 #define DEVICE_SAME 2
1516 #define DEVICE_UPDATED 3
1517 	if (needle == NULL)
1518 		return DEVICE_NOT_FOUND;
1519 
1520 	for (i = 0; i < haystack_size; i++) {
1521 		if (haystack[i] == NULL) /* previously removed. */
1522 			continue;
1523 		if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1524 			*index = i;
1525 			if (device_is_the_same(needle, haystack[i])) {
1526 				if (device_updated(needle, haystack[i]))
1527 					return DEVICE_UPDATED;
1528 				return DEVICE_SAME;
1529 			} else {
1530 				/* Keep offline devices offline */
1531 				if (needle->volume_offline)
1532 					return DEVICE_NOT_FOUND;
1533 				return DEVICE_CHANGED;
1534 			}
1535 		}
1536 	}
1537 	*index = -1;
1538 	return DEVICE_NOT_FOUND;
1539 }
1540 
1541 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1542 					unsigned char scsi3addr[])
1543 {
1544 	struct offline_device_entry *device;
1545 	unsigned long flags;
1546 
1547 	/* Check to see if device is already on the list */
1548 	spin_lock_irqsave(&h->offline_device_lock, flags);
1549 	list_for_each_entry(device, &h->offline_device_list, offline_list) {
1550 		if (memcmp(device->scsi3addr, scsi3addr,
1551 			sizeof(device->scsi3addr)) == 0) {
1552 			spin_unlock_irqrestore(&h->offline_device_lock, flags);
1553 			return;
1554 		}
1555 	}
1556 	spin_unlock_irqrestore(&h->offline_device_lock, flags);
1557 
1558 	/* Device is not on the list, add it. */
1559 	device = kmalloc(sizeof(*device), GFP_KERNEL);
1560 	if (!device)
1561 		return;
1562 
1563 	memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1564 	spin_lock_irqsave(&h->offline_device_lock, flags);
1565 	list_add_tail(&device->offline_list, &h->offline_device_list);
1566 	spin_unlock_irqrestore(&h->offline_device_lock, flags);
1567 }
1568 
1569 /* Print a message explaining various offline volume states */
1570 static void hpsa_show_volume_status(struct ctlr_info *h,
1571 	struct hpsa_scsi_dev_t *sd)
1572 {
1573 	if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1574 		dev_info(&h->pdev->dev,
1575 			"C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1576 			h->scsi_host->host_no,
1577 			sd->bus, sd->target, sd->lun);
1578 	switch (sd->volume_offline) {
1579 	case HPSA_LV_OK:
1580 		break;
1581 	case HPSA_LV_UNDERGOING_ERASE:
1582 		dev_info(&h->pdev->dev,
1583 			"C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1584 			h->scsi_host->host_no,
1585 			sd->bus, sd->target, sd->lun);
1586 		break;
1587 	case HPSA_LV_NOT_AVAILABLE:
1588 		dev_info(&h->pdev->dev,
1589 			"C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1590 			h->scsi_host->host_no,
1591 			sd->bus, sd->target, sd->lun);
1592 		break;
1593 	case HPSA_LV_UNDERGOING_RPI:
1594 		dev_info(&h->pdev->dev,
1595 			"C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1596 			h->scsi_host->host_no,
1597 			sd->bus, sd->target, sd->lun);
1598 		break;
1599 	case HPSA_LV_PENDING_RPI:
1600 		dev_info(&h->pdev->dev,
1601 			"C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1602 			h->scsi_host->host_no,
1603 			sd->bus, sd->target, sd->lun);
1604 		break;
1605 	case HPSA_LV_ENCRYPTED_NO_KEY:
1606 		dev_info(&h->pdev->dev,
1607 			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1608 			h->scsi_host->host_no,
1609 			sd->bus, sd->target, sd->lun);
1610 		break;
1611 	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1612 		dev_info(&h->pdev->dev,
1613 			"C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1614 			h->scsi_host->host_no,
1615 			sd->bus, sd->target, sd->lun);
1616 		break;
1617 	case HPSA_LV_UNDERGOING_ENCRYPTION:
1618 		dev_info(&h->pdev->dev,
1619 			"C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1620 			h->scsi_host->host_no,
1621 			sd->bus, sd->target, sd->lun);
1622 		break;
1623 	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1624 		dev_info(&h->pdev->dev,
1625 			"C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1626 			h->scsi_host->host_no,
1627 			sd->bus, sd->target, sd->lun);
1628 		break;
1629 	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1630 		dev_info(&h->pdev->dev,
1631 			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1632 			h->scsi_host->host_no,
1633 			sd->bus, sd->target, sd->lun);
1634 		break;
1635 	case HPSA_LV_PENDING_ENCRYPTION:
1636 		dev_info(&h->pdev->dev,
1637 			"C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1638 			h->scsi_host->host_no,
1639 			sd->bus, sd->target, sd->lun);
1640 		break;
1641 	case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1642 		dev_info(&h->pdev->dev,
1643 			"C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1644 			h->scsi_host->host_no,
1645 			sd->bus, sd->target, sd->lun);
1646 		break;
1647 	}
1648 }
1649 
1650 /*
1651  * Figure the list of physical drive pointers for a logical drive with
1652  * raid offload configured.
1653  */
1654 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1655 				struct hpsa_scsi_dev_t *dev[], int ndevices,
1656 				struct hpsa_scsi_dev_t *logical_drive)
1657 {
1658 	struct raid_map_data *map = &logical_drive->raid_map;
1659 	struct raid_map_disk_data *dd = &map->data[0];
1660 	int i, j;
1661 	int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1662 				le16_to_cpu(map->metadata_disks_per_row);
1663 	int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1664 				le16_to_cpu(map->layout_map_count) *
1665 				total_disks_per_row;
1666 	int nphys_disk = le16_to_cpu(map->layout_map_count) *
1667 				total_disks_per_row;
1668 	int qdepth;
1669 
1670 	if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1671 		nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1672 
1673 	logical_drive->nphysical_disks = nraid_map_entries;
1674 
1675 	qdepth = 0;
1676 	for (i = 0; i < nraid_map_entries; i++) {
1677 		logical_drive->phys_disk[i] = NULL;
1678 		if (!logical_drive->offload_config)
1679 			continue;
1680 		for (j = 0; j < ndevices; j++) {
1681 			if (dev[j] == NULL)
1682 				continue;
1683 			if (dev[j]->devtype != TYPE_DISK &&
1684 			    dev[j]->devtype != TYPE_ZBC)
1685 				continue;
1686 			if (is_logical_device(dev[j]))
1687 				continue;
1688 			if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1689 				continue;
1690 
1691 			logical_drive->phys_disk[i] = dev[j];
1692 			if (i < nphys_disk)
1693 				qdepth = min(h->nr_cmds, qdepth +
1694 				    logical_drive->phys_disk[i]->queue_depth);
1695 			break;
1696 		}
1697 
1698 		/*
1699 		 * This can happen if a physical drive is removed and
1700 		 * the logical drive is degraded.  In that case, the RAID
1701 		 * map data will refer to a physical disk which isn't actually
1702 		 * present.  And in that case offload_enabled should already
1703 		 * be 0, but we'll turn it off here just in case
1704 		 */
1705 		if (!logical_drive->phys_disk[i]) {
1706 			logical_drive->offload_enabled = 0;
1707 			logical_drive->offload_to_be_enabled = 0;
1708 			logical_drive->queue_depth = 8;
1709 		}
1710 	}
1711 	if (nraid_map_entries)
1712 		/*
1713 		 * This is correct for reads, too high for full stripe writes,
1714 		 * way too high for partial stripe writes
1715 		 */
1716 		logical_drive->queue_depth = qdepth;
1717 	else
1718 		logical_drive->queue_depth = h->nr_cmds;
1719 }
1720 
1721 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1722 				struct hpsa_scsi_dev_t *dev[], int ndevices)
1723 {
1724 	int i;
1725 
1726 	for (i = 0; i < ndevices; i++) {
1727 		if (dev[i] == NULL)
1728 			continue;
1729 		if (dev[i]->devtype != TYPE_DISK &&
1730 		    dev[i]->devtype != TYPE_ZBC)
1731 			continue;
1732 		if (!is_logical_device(dev[i]))
1733 			continue;
1734 
1735 		/*
1736 		 * If offload is currently enabled, the RAID map and
1737 		 * phys_disk[] assignment *better* not be changing
1738 		 * and since it isn't changing, we do not need to
1739 		 * update it.
1740 		 */
1741 		if (dev[i]->offload_enabled)
1742 			continue;
1743 
1744 		hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1745 	}
1746 }
1747 
1748 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1749 {
1750 	int rc = 0;
1751 
1752 	if (!h->scsi_host)
1753 		return 1;
1754 
1755 	if (is_logical_device(device)) /* RAID */
1756 		rc = scsi_add_device(h->scsi_host, device->bus,
1757 					device->target, device->lun);
1758 	else /* HBA */
1759 		rc = hpsa_add_sas_device(h->sas_host, device);
1760 
1761 	return rc;
1762 }
1763 
1764 static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1765 						struct hpsa_scsi_dev_t *dev)
1766 {
1767 	int i;
1768 	int count = 0;
1769 
1770 	for (i = 0; i < h->nr_cmds; i++) {
1771 		struct CommandList *c = h->cmd_pool + i;
1772 		int refcount = atomic_inc_return(&c->refcount);
1773 
1774 		if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1775 				dev->scsi3addr)) {
1776 			unsigned long flags;
1777 
1778 			spin_lock_irqsave(&h->lock, flags);	/* Implied MB */
1779 			if (!hpsa_is_cmd_idle(c))
1780 				++count;
1781 			spin_unlock_irqrestore(&h->lock, flags);
1782 		}
1783 
1784 		cmd_free(h, c);
1785 	}
1786 
1787 	return count;
1788 }
1789 
1790 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1791 						struct hpsa_scsi_dev_t *device)
1792 {
1793 	int cmds = 0;
1794 	int waits = 0;
1795 
1796 	while (1) {
1797 		cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1798 		if (cmds == 0)
1799 			break;
1800 		if (++waits > 20)
1801 			break;
1802 		dev_warn(&h->pdev->dev,
1803 			"%s: removing device with %d outstanding commands!\n",
1804 			__func__, cmds);
1805 		msleep(1000);
1806 	}
1807 }
1808 
1809 static void hpsa_remove_device(struct ctlr_info *h,
1810 			struct hpsa_scsi_dev_t *device)
1811 {
1812 	struct scsi_device *sdev = NULL;
1813 
1814 	if (!h->scsi_host)
1815 		return;
1816 
1817 	if (is_logical_device(device)) { /* RAID */
1818 		sdev = scsi_device_lookup(h->scsi_host, device->bus,
1819 						device->target, device->lun);
1820 		if (sdev) {
1821 			scsi_remove_device(sdev);
1822 			scsi_device_put(sdev);
1823 		} else {
1824 			/*
1825 			 * We don't expect to get here.  Future commands
1826 			 * to this device will get a selection timeout as
1827 			 * if the device were gone.
1828 			 */
1829 			hpsa_show_dev_msg(KERN_WARNING, h, device,
1830 					"didn't find device for removal.");
1831 		}
1832 	} else { /* HBA */
1833 
1834 		device->removed = 1;
1835 		hpsa_wait_for_outstanding_commands_for_dev(h, device);
1836 
1837 		hpsa_remove_sas_device(device);
1838 	}
1839 }
1840 
1841 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1842 	struct hpsa_scsi_dev_t *sd[], int nsds)
1843 {
1844 	/* sd contains scsi3 addresses and devtypes, and inquiry
1845 	 * data.  This function takes what's in sd to be the current
1846 	 * reality and updates h->dev[] to reflect that reality.
1847 	 */
1848 	int i, entry, device_change, changes = 0;
1849 	struct hpsa_scsi_dev_t *csd;
1850 	unsigned long flags;
1851 	struct hpsa_scsi_dev_t **added, **removed;
1852 	int nadded, nremoved;
1853 
1854 	/*
1855 	 * A reset can cause a device status to change
1856 	 * re-schedule the scan to see what happened.
1857 	 */
1858 	if (h->reset_in_progress) {
1859 		h->drv_req_rescan = 1;
1860 		return;
1861 	}
1862 
1863 	added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1864 	removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1865 
1866 	if (!added || !removed) {
1867 		dev_warn(&h->pdev->dev, "out of memory in "
1868 			"adjust_hpsa_scsi_table\n");
1869 		goto free_and_out;
1870 	}
1871 
1872 	spin_lock_irqsave(&h->devlock, flags);
1873 
1874 	/* find any devices in h->dev[] that are not in
1875 	 * sd[] and remove them from h->dev[], and for any
1876 	 * devices which have changed, remove the old device
1877 	 * info and add the new device info.
1878 	 * If minor device attributes change, just update
1879 	 * the existing device structure.
1880 	 */
1881 	i = 0;
1882 	nremoved = 0;
1883 	nadded = 0;
1884 	while (i < h->ndevices) {
1885 		csd = h->dev[i];
1886 		device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1887 		if (device_change == DEVICE_NOT_FOUND) {
1888 			changes++;
1889 			hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1890 			continue; /* remove ^^^, hence i not incremented */
1891 		} else if (device_change == DEVICE_CHANGED) {
1892 			changes++;
1893 			hpsa_scsi_replace_entry(h, i, sd[entry],
1894 				added, &nadded, removed, &nremoved);
1895 			/* Set it to NULL to prevent it from being freed
1896 			 * at the bottom of hpsa_update_scsi_devices()
1897 			 */
1898 			sd[entry] = NULL;
1899 		} else if (device_change == DEVICE_UPDATED) {
1900 			hpsa_scsi_update_entry(h, i, sd[entry]);
1901 		}
1902 		i++;
1903 	}
1904 
1905 	/* Now, make sure every device listed in sd[] is also
1906 	 * listed in h->dev[], adding them if they aren't found
1907 	 */
1908 
1909 	for (i = 0; i < nsds; i++) {
1910 		if (!sd[i]) /* if already added above. */
1911 			continue;
1912 
1913 		/* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1914 		 * as the SCSI mid-layer does not handle such devices well.
1915 		 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1916 		 * at 160Hz, and prevents the system from coming up.
1917 		 */
1918 		if (sd[i]->volume_offline) {
1919 			hpsa_show_volume_status(h, sd[i]);
1920 			hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1921 			continue;
1922 		}
1923 
1924 		device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1925 					h->ndevices, &entry);
1926 		if (device_change == DEVICE_NOT_FOUND) {
1927 			changes++;
1928 			if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1929 				break;
1930 			sd[i] = NULL; /* prevent from being freed later. */
1931 		} else if (device_change == DEVICE_CHANGED) {
1932 			/* should never happen... */
1933 			changes++;
1934 			dev_warn(&h->pdev->dev,
1935 				"device unexpectedly changed.\n");
1936 			/* but if it does happen, we just ignore that device */
1937 		}
1938 	}
1939 	hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1940 
1941 	/* Now that h->dev[]->phys_disk[] is coherent, we can enable
1942 	 * any logical drives that need it enabled.
1943 	 */
1944 	for (i = 0; i < h->ndevices; i++) {
1945 		if (h->dev[i] == NULL)
1946 			continue;
1947 		h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1948 	}
1949 
1950 	spin_unlock_irqrestore(&h->devlock, flags);
1951 
1952 	/* Monitor devices which are in one of several NOT READY states to be
1953 	 * brought online later. This must be done without holding h->devlock,
1954 	 * so don't touch h->dev[]
1955 	 */
1956 	for (i = 0; i < nsds; i++) {
1957 		if (!sd[i]) /* if already added above. */
1958 			continue;
1959 		if (sd[i]->volume_offline)
1960 			hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1961 	}
1962 
1963 	/* Don't notify scsi mid layer of any changes the first time through
1964 	 * (or if there are no changes) scsi_scan_host will do it later the
1965 	 * first time through.
1966 	 */
1967 	if (!changes)
1968 		goto free_and_out;
1969 
1970 	/* Notify scsi mid layer of any removed devices */
1971 	for (i = 0; i < nremoved; i++) {
1972 		if (removed[i] == NULL)
1973 			continue;
1974 		if (removed[i]->expose_device)
1975 			hpsa_remove_device(h, removed[i]);
1976 		kfree(removed[i]);
1977 		removed[i] = NULL;
1978 	}
1979 
1980 	/* Notify scsi mid layer of any added devices */
1981 	for (i = 0; i < nadded; i++) {
1982 		int rc = 0;
1983 
1984 		if (added[i] == NULL)
1985 			continue;
1986 		if (!(added[i]->expose_device))
1987 			continue;
1988 		rc = hpsa_add_device(h, added[i]);
1989 		if (!rc)
1990 			continue;
1991 		dev_warn(&h->pdev->dev,
1992 			"addition failed %d, device not added.", rc);
1993 		/* now we have to remove it from h->dev,
1994 		 * since it didn't get added to scsi mid layer
1995 		 */
1996 		fixup_botched_add(h, added[i]);
1997 		h->drv_req_rescan = 1;
1998 	}
1999 
2000 free_and_out:
2001 	kfree(added);
2002 	kfree(removed);
2003 }
2004 
2005 /*
2006  * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
2007  * Assume's h->devlock is held.
2008  */
2009 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2010 	int bus, int target, int lun)
2011 {
2012 	int i;
2013 	struct hpsa_scsi_dev_t *sd;
2014 
2015 	for (i = 0; i < h->ndevices; i++) {
2016 		sd = h->dev[i];
2017 		if (sd->bus == bus && sd->target == target && sd->lun == lun)
2018 			return sd;
2019 	}
2020 	return NULL;
2021 }
2022 
2023 static int hpsa_slave_alloc(struct scsi_device *sdev)
2024 {
2025 	struct hpsa_scsi_dev_t *sd = NULL;
2026 	unsigned long flags;
2027 	struct ctlr_info *h;
2028 
2029 	h = sdev_to_hba(sdev);
2030 	spin_lock_irqsave(&h->devlock, flags);
2031 	if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2032 		struct scsi_target *starget;
2033 		struct sas_rphy *rphy;
2034 
2035 		starget = scsi_target(sdev);
2036 		rphy = target_to_rphy(starget);
2037 		sd = hpsa_find_device_by_sas_rphy(h, rphy);
2038 		if (sd) {
2039 			sd->target = sdev_id(sdev);
2040 			sd->lun = sdev->lun;
2041 		}
2042 	}
2043 	if (!sd)
2044 		sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2045 					sdev_id(sdev), sdev->lun);
2046 
2047 	if (sd && sd->expose_device) {
2048 		atomic_set(&sd->ioaccel_cmds_out, 0);
2049 		sdev->hostdata = sd;
2050 	} else
2051 		sdev->hostdata = NULL;
2052 	spin_unlock_irqrestore(&h->devlock, flags);
2053 	return 0;
2054 }
2055 
2056 /* configure scsi device based on internal per-device structure */
2057 static int hpsa_slave_configure(struct scsi_device *sdev)
2058 {
2059 	struct hpsa_scsi_dev_t *sd;
2060 	int queue_depth;
2061 
2062 	sd = sdev->hostdata;
2063 	sdev->no_uld_attach = !sd || !sd->expose_device;
2064 
2065 	if (sd)
2066 		queue_depth = sd->queue_depth != 0 ?
2067 			sd->queue_depth : sdev->host->can_queue;
2068 	else
2069 		queue_depth = sdev->host->can_queue;
2070 
2071 	scsi_change_queue_depth(sdev, queue_depth);
2072 
2073 	return 0;
2074 }
2075 
2076 static void hpsa_slave_destroy(struct scsi_device *sdev)
2077 {
2078 	/* nothing to do. */
2079 }
2080 
2081 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2082 {
2083 	int i;
2084 
2085 	if (!h->ioaccel2_cmd_sg_list)
2086 		return;
2087 	for (i = 0; i < h->nr_cmds; i++) {
2088 		kfree(h->ioaccel2_cmd_sg_list[i]);
2089 		h->ioaccel2_cmd_sg_list[i] = NULL;
2090 	}
2091 	kfree(h->ioaccel2_cmd_sg_list);
2092 	h->ioaccel2_cmd_sg_list = NULL;
2093 }
2094 
2095 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2096 {
2097 	int i;
2098 
2099 	if (h->chainsize <= 0)
2100 		return 0;
2101 
2102 	h->ioaccel2_cmd_sg_list =
2103 		kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
2104 					GFP_KERNEL);
2105 	if (!h->ioaccel2_cmd_sg_list)
2106 		return -ENOMEM;
2107 	for (i = 0; i < h->nr_cmds; i++) {
2108 		h->ioaccel2_cmd_sg_list[i] =
2109 			kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
2110 					h->maxsgentries, GFP_KERNEL);
2111 		if (!h->ioaccel2_cmd_sg_list[i])
2112 			goto clean;
2113 	}
2114 	return 0;
2115 
2116 clean:
2117 	hpsa_free_ioaccel2_sg_chain_blocks(h);
2118 	return -ENOMEM;
2119 }
2120 
2121 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2122 {
2123 	int i;
2124 
2125 	if (!h->cmd_sg_list)
2126 		return;
2127 	for (i = 0; i < h->nr_cmds; i++) {
2128 		kfree(h->cmd_sg_list[i]);
2129 		h->cmd_sg_list[i] = NULL;
2130 	}
2131 	kfree(h->cmd_sg_list);
2132 	h->cmd_sg_list = NULL;
2133 }
2134 
2135 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2136 {
2137 	int i;
2138 
2139 	if (h->chainsize <= 0)
2140 		return 0;
2141 
2142 	h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
2143 				GFP_KERNEL);
2144 	if (!h->cmd_sg_list)
2145 		return -ENOMEM;
2146 
2147 	for (i = 0; i < h->nr_cmds; i++) {
2148 		h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
2149 						h->chainsize, GFP_KERNEL);
2150 		if (!h->cmd_sg_list[i])
2151 			goto clean;
2152 
2153 	}
2154 	return 0;
2155 
2156 clean:
2157 	hpsa_free_sg_chain_blocks(h);
2158 	return -ENOMEM;
2159 }
2160 
2161 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2162 	struct io_accel2_cmd *cp, struct CommandList *c)
2163 {
2164 	struct ioaccel2_sg_element *chain_block;
2165 	u64 temp64;
2166 	u32 chain_size;
2167 
2168 	chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2169 	chain_size = le32_to_cpu(cp->sg[0].length);
2170 	temp64 = pci_map_single(h->pdev, chain_block, chain_size,
2171 				PCI_DMA_TODEVICE);
2172 	if (dma_mapping_error(&h->pdev->dev, temp64)) {
2173 		/* prevent subsequent unmapping */
2174 		cp->sg->address = 0;
2175 		return -1;
2176 	}
2177 	cp->sg->address = cpu_to_le64(temp64);
2178 	return 0;
2179 }
2180 
2181 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2182 	struct io_accel2_cmd *cp)
2183 {
2184 	struct ioaccel2_sg_element *chain_sg;
2185 	u64 temp64;
2186 	u32 chain_size;
2187 
2188 	chain_sg = cp->sg;
2189 	temp64 = le64_to_cpu(chain_sg->address);
2190 	chain_size = le32_to_cpu(cp->sg[0].length);
2191 	pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2192 }
2193 
2194 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2195 	struct CommandList *c)
2196 {
2197 	struct SGDescriptor *chain_sg, *chain_block;
2198 	u64 temp64;
2199 	u32 chain_len;
2200 
2201 	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2202 	chain_block = h->cmd_sg_list[c->cmdindex];
2203 	chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2204 	chain_len = sizeof(*chain_sg) *
2205 		(le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2206 	chain_sg->Len = cpu_to_le32(chain_len);
2207 	temp64 = pci_map_single(h->pdev, chain_block, chain_len,
2208 				PCI_DMA_TODEVICE);
2209 	if (dma_mapping_error(&h->pdev->dev, temp64)) {
2210 		/* prevent subsequent unmapping */
2211 		chain_sg->Addr = cpu_to_le64(0);
2212 		return -1;
2213 	}
2214 	chain_sg->Addr = cpu_to_le64(temp64);
2215 	return 0;
2216 }
2217 
2218 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2219 	struct CommandList *c)
2220 {
2221 	struct SGDescriptor *chain_sg;
2222 
2223 	if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2224 		return;
2225 
2226 	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2227 	pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2228 			le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
2229 }
2230 
2231 
2232 /* Decode the various types of errors on ioaccel2 path.
2233  * Return 1 for any error that should generate a RAID path retry.
2234  * Return 0 for errors that don't require a RAID path retry.
2235  */
2236 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2237 					struct CommandList *c,
2238 					struct scsi_cmnd *cmd,
2239 					struct io_accel2_cmd *c2,
2240 					struct hpsa_scsi_dev_t *dev)
2241 {
2242 	int data_len;
2243 	int retry = 0;
2244 	u32 ioaccel2_resid = 0;
2245 
2246 	switch (c2->error_data.serv_response) {
2247 	case IOACCEL2_SERV_RESPONSE_COMPLETE:
2248 		switch (c2->error_data.status) {
2249 		case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2250 			break;
2251 		case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2252 			cmd->result |= SAM_STAT_CHECK_CONDITION;
2253 			if (c2->error_data.data_present !=
2254 					IOACCEL2_SENSE_DATA_PRESENT) {
2255 				memset(cmd->sense_buffer, 0,
2256 					SCSI_SENSE_BUFFERSIZE);
2257 				break;
2258 			}
2259 			/* copy the sense data */
2260 			data_len = c2->error_data.sense_data_len;
2261 			if (data_len > SCSI_SENSE_BUFFERSIZE)
2262 				data_len = SCSI_SENSE_BUFFERSIZE;
2263 			if (data_len > sizeof(c2->error_data.sense_data_buff))
2264 				data_len =
2265 					sizeof(c2->error_data.sense_data_buff);
2266 			memcpy(cmd->sense_buffer,
2267 				c2->error_data.sense_data_buff, data_len);
2268 			retry = 1;
2269 			break;
2270 		case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2271 			retry = 1;
2272 			break;
2273 		case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2274 			retry = 1;
2275 			break;
2276 		case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2277 			retry = 1;
2278 			break;
2279 		case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2280 			retry = 1;
2281 			break;
2282 		default:
2283 			retry = 1;
2284 			break;
2285 		}
2286 		break;
2287 	case IOACCEL2_SERV_RESPONSE_FAILURE:
2288 		switch (c2->error_data.status) {
2289 		case IOACCEL2_STATUS_SR_IO_ERROR:
2290 		case IOACCEL2_STATUS_SR_IO_ABORTED:
2291 		case IOACCEL2_STATUS_SR_OVERRUN:
2292 			retry = 1;
2293 			break;
2294 		case IOACCEL2_STATUS_SR_UNDERRUN:
2295 			cmd->result = (DID_OK << 16);		/* host byte */
2296 			cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
2297 			ioaccel2_resid = get_unaligned_le32(
2298 						&c2->error_data.resid_cnt[0]);
2299 			scsi_set_resid(cmd, ioaccel2_resid);
2300 			break;
2301 		case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2302 		case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2303 		case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2304 			/*
2305 			 * Did an HBA disk disappear? We will eventually
2306 			 * get a state change event from the controller but
2307 			 * in the meantime, we need to tell the OS that the
2308 			 * HBA disk is no longer there and stop I/O
2309 			 * from going down. This allows the potential re-insert
2310 			 * of the disk to get the same device node.
2311 			 */
2312 			if (dev->physical_device && dev->expose_device) {
2313 				cmd->result = DID_NO_CONNECT << 16;
2314 				dev->removed = 1;
2315 				h->drv_req_rescan = 1;
2316 				dev_warn(&h->pdev->dev,
2317 					"%s: device is gone!\n", __func__);
2318 			} else
2319 				/*
2320 				 * Retry by sending down the RAID path.
2321 				 * We will get an event from ctlr to
2322 				 * trigger rescan regardless.
2323 				 */
2324 				retry = 1;
2325 			break;
2326 		default:
2327 			retry = 1;
2328 		}
2329 		break;
2330 	case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2331 		break;
2332 	case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2333 		break;
2334 	case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2335 		retry = 1;
2336 		break;
2337 	case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2338 		break;
2339 	default:
2340 		retry = 1;
2341 		break;
2342 	}
2343 
2344 	return retry;	/* retry on raid path? */
2345 }
2346 
2347 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2348 		struct CommandList *c)
2349 {
2350 	bool do_wake = false;
2351 
2352 	/*
2353 	 * Prevent the following race in the abort handler:
2354 	 *
2355 	 * 1. LLD is requested to abort a SCSI command
2356 	 * 2. The SCSI command completes
2357 	 * 3. The struct CommandList associated with step 2 is made available
2358 	 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2359 	 * 5. Abort handler follows scsi_cmnd->host_scribble and
2360 	 *    finds struct CommandList and tries to aborts it
2361 	 * Now we have aborted the wrong command.
2362 	 *
2363 	 * Reset c->scsi_cmd here so that the abort or reset handler will know
2364 	 * this command has completed.  Then, check to see if the handler is
2365 	 * waiting for this command, and, if so, wake it.
2366 	 */
2367 	c->scsi_cmd = SCSI_CMD_IDLE;
2368 	mb();	/* Declare command idle before checking for pending events. */
2369 	if (c->abort_pending) {
2370 		do_wake = true;
2371 		c->abort_pending = false;
2372 	}
2373 	if (c->reset_pending) {
2374 		unsigned long flags;
2375 		struct hpsa_scsi_dev_t *dev;
2376 
2377 		/*
2378 		 * There appears to be a reset pending; lock the lock and
2379 		 * reconfirm.  If so, then decrement the count of outstanding
2380 		 * commands and wake the reset command if this is the last one.
2381 		 */
2382 		spin_lock_irqsave(&h->lock, flags);
2383 		dev = c->reset_pending;		/* Re-fetch under the lock. */
2384 		if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2385 			do_wake = true;
2386 		c->reset_pending = NULL;
2387 		spin_unlock_irqrestore(&h->lock, flags);
2388 	}
2389 
2390 	if (do_wake)
2391 		wake_up_all(&h->event_sync_wait_queue);
2392 }
2393 
2394 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2395 				      struct CommandList *c)
2396 {
2397 	hpsa_cmd_resolve_events(h, c);
2398 	cmd_tagged_free(h, c);
2399 }
2400 
2401 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2402 		struct CommandList *c, struct scsi_cmnd *cmd)
2403 {
2404 	hpsa_cmd_resolve_and_free(h, c);
2405 	if (cmd && cmd->scsi_done)
2406 		cmd->scsi_done(cmd);
2407 }
2408 
2409 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2410 {
2411 	INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2412 	queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2413 }
2414 
2415 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2416 {
2417 	cmd->result = DID_ABORT << 16;
2418 }
2419 
2420 static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2421 				    struct scsi_cmnd *cmd)
2422 {
2423 	hpsa_set_scsi_cmd_aborted(cmd);
2424 	dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2425 			 c->Request.CDB, c->err_info->ScsiStatus);
2426 	hpsa_cmd_resolve_and_free(h, c);
2427 }
2428 
2429 static void process_ioaccel2_completion(struct ctlr_info *h,
2430 		struct CommandList *c, struct scsi_cmnd *cmd,
2431 		struct hpsa_scsi_dev_t *dev)
2432 {
2433 	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2434 
2435 	/* check for good status */
2436 	if (likely(c2->error_data.serv_response == 0 &&
2437 			c2->error_data.status == 0))
2438 		return hpsa_cmd_free_and_done(h, c, cmd);
2439 
2440 	/*
2441 	 * Any RAID offload error results in retry which will use
2442 	 * the normal I/O path so the controller can handle whatever's
2443 	 * wrong.
2444 	 */
2445 	if (is_logical_device(dev) &&
2446 		c2->error_data.serv_response ==
2447 			IOACCEL2_SERV_RESPONSE_FAILURE) {
2448 		if (c2->error_data.status ==
2449 			IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2450 			dev->offload_enabled = 0;
2451 			dev->offload_to_be_enabled = 0;
2452 		}
2453 
2454 		return hpsa_retry_cmd(h, c);
2455 	}
2456 
2457 	if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2458 		return hpsa_retry_cmd(h, c);
2459 
2460 	return hpsa_cmd_free_and_done(h, c, cmd);
2461 }
2462 
2463 /* Returns 0 on success, < 0 otherwise. */
2464 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2465 					struct CommandList *cp)
2466 {
2467 	u8 tmf_status = cp->err_info->ScsiStatus;
2468 
2469 	switch (tmf_status) {
2470 	case CISS_TMF_COMPLETE:
2471 		/*
2472 		 * CISS_TMF_COMPLETE never happens, instead,
2473 		 * ei->CommandStatus == 0 for this case.
2474 		 */
2475 	case CISS_TMF_SUCCESS:
2476 		return 0;
2477 	case CISS_TMF_INVALID_FRAME:
2478 	case CISS_TMF_NOT_SUPPORTED:
2479 	case CISS_TMF_FAILED:
2480 	case CISS_TMF_WRONG_LUN:
2481 	case CISS_TMF_OVERLAPPED_TAG:
2482 		break;
2483 	default:
2484 		dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2485 				tmf_status);
2486 		break;
2487 	}
2488 	return -tmf_status;
2489 }
2490 
2491 static void complete_scsi_command(struct CommandList *cp)
2492 {
2493 	struct scsi_cmnd *cmd;
2494 	struct ctlr_info *h;
2495 	struct ErrorInfo *ei;
2496 	struct hpsa_scsi_dev_t *dev;
2497 	struct io_accel2_cmd *c2;
2498 
2499 	u8 sense_key;
2500 	u8 asc;      /* additional sense code */
2501 	u8 ascq;     /* additional sense code qualifier */
2502 	unsigned long sense_data_size;
2503 
2504 	ei = cp->err_info;
2505 	cmd = cp->scsi_cmd;
2506 	h = cp->h;
2507 
2508 	if (!cmd->device) {
2509 		cmd->result = DID_NO_CONNECT << 16;
2510 		return hpsa_cmd_free_and_done(h, cp, cmd);
2511 	}
2512 
2513 	dev = cmd->device->hostdata;
2514 	if (!dev) {
2515 		cmd->result = DID_NO_CONNECT << 16;
2516 		return hpsa_cmd_free_and_done(h, cp, cmd);
2517 	}
2518 	c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2519 
2520 	scsi_dma_unmap(cmd); /* undo the DMA mappings */
2521 	if ((cp->cmd_type == CMD_SCSI) &&
2522 		(le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2523 		hpsa_unmap_sg_chain_block(h, cp);
2524 
2525 	if ((cp->cmd_type == CMD_IOACCEL2) &&
2526 		(c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2527 		hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2528 
2529 	cmd->result = (DID_OK << 16); 		/* host byte */
2530 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
2531 
2532 	if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2533 		if (dev->physical_device && dev->expose_device &&
2534 			dev->removed) {
2535 			cmd->result = DID_NO_CONNECT << 16;
2536 			return hpsa_cmd_free_and_done(h, cp, cmd);
2537 		}
2538 		if (likely(cp->phys_disk != NULL))
2539 			atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2540 	}
2541 
2542 	/*
2543 	 * We check for lockup status here as it may be set for
2544 	 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2545 	 * fail_all_oustanding_cmds()
2546 	 */
2547 	if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2548 		/* DID_NO_CONNECT will prevent a retry */
2549 		cmd->result = DID_NO_CONNECT << 16;
2550 		return hpsa_cmd_free_and_done(h, cp, cmd);
2551 	}
2552 
2553 	if ((unlikely(hpsa_is_pending_event(cp)))) {
2554 		if (cp->reset_pending)
2555 			return hpsa_cmd_free_and_done(h, cp, cmd);
2556 		if (cp->abort_pending)
2557 			return hpsa_cmd_abort_and_free(h, cp, cmd);
2558 	}
2559 
2560 	if (cp->cmd_type == CMD_IOACCEL2)
2561 		return process_ioaccel2_completion(h, cp, cmd, dev);
2562 
2563 	scsi_set_resid(cmd, ei->ResidualCnt);
2564 	if (ei->CommandStatus == 0)
2565 		return hpsa_cmd_free_and_done(h, cp, cmd);
2566 
2567 	/* For I/O accelerator commands, copy over some fields to the normal
2568 	 * CISS header used below for error handling.
2569 	 */
2570 	if (cp->cmd_type == CMD_IOACCEL1) {
2571 		struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2572 		cp->Header.SGList = scsi_sg_count(cmd);
2573 		cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2574 		cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2575 			IOACCEL1_IOFLAGS_CDBLEN_MASK;
2576 		cp->Header.tag = c->tag;
2577 		memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2578 		memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2579 
2580 		/* Any RAID offload error results in retry which will use
2581 		 * the normal I/O path so the controller can handle whatever's
2582 		 * wrong.
2583 		 */
2584 		if (is_logical_device(dev)) {
2585 			if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2586 				dev->offload_enabled = 0;
2587 			return hpsa_retry_cmd(h, cp);
2588 		}
2589 	}
2590 
2591 	/* an error has occurred */
2592 	switch (ei->CommandStatus) {
2593 
2594 	case CMD_TARGET_STATUS:
2595 		cmd->result |= ei->ScsiStatus;
2596 		/* copy the sense data */
2597 		if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2598 			sense_data_size = SCSI_SENSE_BUFFERSIZE;
2599 		else
2600 			sense_data_size = sizeof(ei->SenseInfo);
2601 		if (ei->SenseLen < sense_data_size)
2602 			sense_data_size = ei->SenseLen;
2603 		memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2604 		if (ei->ScsiStatus)
2605 			decode_sense_data(ei->SenseInfo, sense_data_size,
2606 				&sense_key, &asc, &ascq);
2607 		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2608 			if (sense_key == ABORTED_COMMAND) {
2609 				cmd->result |= DID_SOFT_ERROR << 16;
2610 				break;
2611 			}
2612 			break;
2613 		}
2614 		/* Problem was not a check condition
2615 		 * Pass it up to the upper layers...
2616 		 */
2617 		if (ei->ScsiStatus) {
2618 			dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2619 				"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2620 				"Returning result: 0x%x\n",
2621 				cp, ei->ScsiStatus,
2622 				sense_key, asc, ascq,
2623 				cmd->result);
2624 		} else {  /* scsi status is zero??? How??? */
2625 			dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2626 				"Returning no connection.\n", cp),
2627 
2628 			/* Ordinarily, this case should never happen,
2629 			 * but there is a bug in some released firmware
2630 			 * revisions that allows it to happen if, for
2631 			 * example, a 4100 backplane loses power and
2632 			 * the tape drive is in it.  We assume that
2633 			 * it's a fatal error of some kind because we
2634 			 * can't show that it wasn't. We will make it
2635 			 * look like selection timeout since that is
2636 			 * the most common reason for this to occur,
2637 			 * and it's severe enough.
2638 			 */
2639 
2640 			cmd->result = DID_NO_CONNECT << 16;
2641 		}
2642 		break;
2643 
2644 	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2645 		break;
2646 	case CMD_DATA_OVERRUN:
2647 		dev_warn(&h->pdev->dev,
2648 			"CDB %16phN data overrun\n", cp->Request.CDB);
2649 		break;
2650 	case CMD_INVALID: {
2651 		/* print_bytes(cp, sizeof(*cp), 1, 0);
2652 		print_cmd(cp); */
2653 		/* We get CMD_INVALID if you address a non-existent device
2654 		 * instead of a selection timeout (no response).  You will
2655 		 * see this if you yank out a drive, then try to access it.
2656 		 * This is kind of a shame because it means that any other
2657 		 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2658 		 * missing target. */
2659 		cmd->result = DID_NO_CONNECT << 16;
2660 	}
2661 		break;
2662 	case CMD_PROTOCOL_ERR:
2663 		cmd->result = DID_ERROR << 16;
2664 		dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2665 				cp->Request.CDB);
2666 		break;
2667 	case CMD_HARDWARE_ERR:
2668 		cmd->result = DID_ERROR << 16;
2669 		dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2670 			cp->Request.CDB);
2671 		break;
2672 	case CMD_CONNECTION_LOST:
2673 		cmd->result = DID_ERROR << 16;
2674 		dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2675 			cp->Request.CDB);
2676 		break;
2677 	case CMD_ABORTED:
2678 		/* Return now to avoid calling scsi_done(). */
2679 		return hpsa_cmd_abort_and_free(h, cp, cmd);
2680 	case CMD_ABORT_FAILED:
2681 		cmd->result = DID_ERROR << 16;
2682 		dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2683 			cp->Request.CDB);
2684 		break;
2685 	case CMD_UNSOLICITED_ABORT:
2686 		cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2687 		dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2688 			cp->Request.CDB);
2689 		break;
2690 	case CMD_TIMEOUT:
2691 		cmd->result = DID_TIME_OUT << 16;
2692 		dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2693 			cp->Request.CDB);
2694 		break;
2695 	case CMD_UNABORTABLE:
2696 		cmd->result = DID_ERROR << 16;
2697 		dev_warn(&h->pdev->dev, "Command unabortable\n");
2698 		break;
2699 	case CMD_TMF_STATUS:
2700 		if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2701 			cmd->result = DID_ERROR << 16;
2702 		break;
2703 	case CMD_IOACCEL_DISABLED:
2704 		/* This only handles the direct pass-through case since RAID
2705 		 * offload is handled above.  Just attempt a retry.
2706 		 */
2707 		cmd->result = DID_SOFT_ERROR << 16;
2708 		dev_warn(&h->pdev->dev,
2709 				"cp %p had HP SSD Smart Path error\n", cp);
2710 		break;
2711 	default:
2712 		cmd->result = DID_ERROR << 16;
2713 		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2714 				cp, ei->CommandStatus);
2715 	}
2716 
2717 	return hpsa_cmd_free_and_done(h, cp, cmd);
2718 }
2719 
2720 static void hpsa_pci_unmap(struct pci_dev *pdev,
2721 	struct CommandList *c, int sg_used, int data_direction)
2722 {
2723 	int i;
2724 
2725 	for (i = 0; i < sg_used; i++)
2726 		pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2727 				le32_to_cpu(c->SG[i].Len),
2728 				data_direction);
2729 }
2730 
2731 static int hpsa_map_one(struct pci_dev *pdev,
2732 		struct CommandList *cp,
2733 		unsigned char *buf,
2734 		size_t buflen,
2735 		int data_direction)
2736 {
2737 	u64 addr64;
2738 
2739 	if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2740 		cp->Header.SGList = 0;
2741 		cp->Header.SGTotal = cpu_to_le16(0);
2742 		return 0;
2743 	}
2744 
2745 	addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2746 	if (dma_mapping_error(&pdev->dev, addr64)) {
2747 		/* Prevent subsequent unmap of something never mapped */
2748 		cp->Header.SGList = 0;
2749 		cp->Header.SGTotal = cpu_to_le16(0);
2750 		return -1;
2751 	}
2752 	cp->SG[0].Addr = cpu_to_le64(addr64);
2753 	cp->SG[0].Len = cpu_to_le32(buflen);
2754 	cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2755 	cp->Header.SGList = 1;   /* no. SGs contig in this cmd */
2756 	cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2757 	return 0;
2758 }
2759 
2760 #define NO_TIMEOUT ((unsigned long) -1)
2761 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2762 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2763 	struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2764 {
2765 	DECLARE_COMPLETION_ONSTACK(wait);
2766 
2767 	c->waiting = &wait;
2768 	__enqueue_cmd_and_start_io(h, c, reply_queue);
2769 	if (timeout_msecs == NO_TIMEOUT) {
2770 		/* TODO: get rid of this no-timeout thing */
2771 		wait_for_completion_io(&wait);
2772 		return IO_OK;
2773 	}
2774 	if (!wait_for_completion_io_timeout(&wait,
2775 					msecs_to_jiffies(timeout_msecs))) {
2776 		dev_warn(&h->pdev->dev, "Command timed out.\n");
2777 		return -ETIMEDOUT;
2778 	}
2779 	return IO_OK;
2780 }
2781 
2782 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2783 				   int reply_queue, unsigned long timeout_msecs)
2784 {
2785 	if (unlikely(lockup_detected(h))) {
2786 		c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2787 		return IO_OK;
2788 	}
2789 	return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2790 }
2791 
2792 static u32 lockup_detected(struct ctlr_info *h)
2793 {
2794 	int cpu;
2795 	u32 rc, *lockup_detected;
2796 
2797 	cpu = get_cpu();
2798 	lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2799 	rc = *lockup_detected;
2800 	put_cpu();
2801 	return rc;
2802 }
2803 
2804 #define MAX_DRIVER_CMD_RETRIES 25
2805 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2806 	struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2807 {
2808 	int backoff_time = 10, retry_count = 0;
2809 	int rc;
2810 
2811 	do {
2812 		memset(c->err_info, 0, sizeof(*c->err_info));
2813 		rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2814 						  timeout_msecs);
2815 		if (rc)
2816 			break;
2817 		retry_count++;
2818 		if (retry_count > 3) {
2819 			msleep(backoff_time);
2820 			if (backoff_time < 1000)
2821 				backoff_time *= 2;
2822 		}
2823 	} while ((check_for_unit_attention(h, c) ||
2824 			check_for_busy(h, c)) &&
2825 			retry_count <= MAX_DRIVER_CMD_RETRIES);
2826 	hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2827 	if (retry_count > MAX_DRIVER_CMD_RETRIES)
2828 		rc = -EIO;
2829 	return rc;
2830 }
2831 
2832 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2833 				struct CommandList *c)
2834 {
2835 	const u8 *cdb = c->Request.CDB;
2836 	const u8 *lun = c->Header.LUN.LunAddrBytes;
2837 
2838 	dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2839 		 txt, lun, cdb);
2840 }
2841 
2842 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2843 			struct CommandList *cp)
2844 {
2845 	const struct ErrorInfo *ei = cp->err_info;
2846 	struct device *d = &cp->h->pdev->dev;
2847 	u8 sense_key, asc, ascq;
2848 	int sense_len;
2849 
2850 	switch (ei->CommandStatus) {
2851 	case CMD_TARGET_STATUS:
2852 		if (ei->SenseLen > sizeof(ei->SenseInfo))
2853 			sense_len = sizeof(ei->SenseInfo);
2854 		else
2855 			sense_len = ei->SenseLen;
2856 		decode_sense_data(ei->SenseInfo, sense_len,
2857 					&sense_key, &asc, &ascq);
2858 		hpsa_print_cmd(h, "SCSI status", cp);
2859 		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2860 			dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2861 				sense_key, asc, ascq);
2862 		else
2863 			dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2864 		if (ei->ScsiStatus == 0)
2865 			dev_warn(d, "SCSI status is abnormally zero.  "
2866 			"(probably indicates selection timeout "
2867 			"reported incorrectly due to a known "
2868 			"firmware bug, circa July, 2001.)\n");
2869 		break;
2870 	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2871 		break;
2872 	case CMD_DATA_OVERRUN:
2873 		hpsa_print_cmd(h, "overrun condition", cp);
2874 		break;
2875 	case CMD_INVALID: {
2876 		/* controller unfortunately reports SCSI passthru's
2877 		 * to non-existent targets as invalid commands.
2878 		 */
2879 		hpsa_print_cmd(h, "invalid command", cp);
2880 		dev_warn(d, "probably means device no longer present\n");
2881 		}
2882 		break;
2883 	case CMD_PROTOCOL_ERR:
2884 		hpsa_print_cmd(h, "protocol error", cp);
2885 		break;
2886 	case CMD_HARDWARE_ERR:
2887 		hpsa_print_cmd(h, "hardware error", cp);
2888 		break;
2889 	case CMD_CONNECTION_LOST:
2890 		hpsa_print_cmd(h, "connection lost", cp);
2891 		break;
2892 	case CMD_ABORTED:
2893 		hpsa_print_cmd(h, "aborted", cp);
2894 		break;
2895 	case CMD_ABORT_FAILED:
2896 		hpsa_print_cmd(h, "abort failed", cp);
2897 		break;
2898 	case CMD_UNSOLICITED_ABORT:
2899 		hpsa_print_cmd(h, "unsolicited abort", cp);
2900 		break;
2901 	case CMD_TIMEOUT:
2902 		hpsa_print_cmd(h, "timed out", cp);
2903 		break;
2904 	case CMD_UNABORTABLE:
2905 		hpsa_print_cmd(h, "unabortable", cp);
2906 		break;
2907 	case CMD_CTLR_LOCKUP:
2908 		hpsa_print_cmd(h, "controller lockup detected", cp);
2909 		break;
2910 	default:
2911 		hpsa_print_cmd(h, "unknown status", cp);
2912 		dev_warn(d, "Unknown command status %x\n",
2913 				ei->CommandStatus);
2914 	}
2915 }
2916 
2917 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2918 			u16 page, unsigned char *buf,
2919 			unsigned char bufsize)
2920 {
2921 	int rc = IO_OK;
2922 	struct CommandList *c;
2923 	struct ErrorInfo *ei;
2924 
2925 	c = cmd_alloc(h);
2926 
2927 	if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2928 			page, scsi3addr, TYPE_CMD)) {
2929 		rc = -1;
2930 		goto out;
2931 	}
2932 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2933 					PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
2934 	if (rc)
2935 		goto out;
2936 	ei = c->err_info;
2937 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2938 		hpsa_scsi_interpret_error(h, c);
2939 		rc = -1;
2940 	}
2941 out:
2942 	cmd_free(h, c);
2943 	return rc;
2944 }
2945 
2946 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2947 	u8 reset_type, int reply_queue)
2948 {
2949 	int rc = IO_OK;
2950 	struct CommandList *c;
2951 	struct ErrorInfo *ei;
2952 
2953 	c = cmd_alloc(h);
2954 
2955 
2956 	/* fill_cmd can't fail here, no data buffer to map. */
2957 	(void) fill_cmd(c, reset_type, h, NULL, 0, 0,
2958 			scsi3addr, TYPE_MSG);
2959 	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2960 	if (rc) {
2961 		dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2962 		goto out;
2963 	}
2964 	/* no unmap needed here because no data xfer. */
2965 
2966 	ei = c->err_info;
2967 	if (ei->CommandStatus != 0) {
2968 		hpsa_scsi_interpret_error(h, c);
2969 		rc = -1;
2970 	}
2971 out:
2972 	cmd_free(h, c);
2973 	return rc;
2974 }
2975 
2976 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2977 			       struct hpsa_scsi_dev_t *dev,
2978 			       unsigned char *scsi3addr)
2979 {
2980 	int i;
2981 	bool match = false;
2982 	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2983 	struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2984 
2985 	if (hpsa_is_cmd_idle(c))
2986 		return false;
2987 
2988 	switch (c->cmd_type) {
2989 	case CMD_SCSI:
2990 	case CMD_IOCTL_PEND:
2991 		match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2992 				sizeof(c->Header.LUN.LunAddrBytes));
2993 		break;
2994 
2995 	case CMD_IOACCEL1:
2996 	case CMD_IOACCEL2:
2997 		if (c->phys_disk == dev) {
2998 			/* HBA mode match */
2999 			match = true;
3000 		} else {
3001 			/* Possible RAID mode -- check each phys dev. */
3002 			/* FIXME:  Do we need to take out a lock here?  If
3003 			 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
3004 			 * instead. */
3005 			for (i = 0; i < dev->nphysical_disks && !match; i++) {
3006 				/* FIXME: an alternate test might be
3007 				 *
3008 				 * match = dev->phys_disk[i]->ioaccel_handle
3009 				 *              == c2->scsi_nexus;      */
3010 				match = dev->phys_disk[i] == c->phys_disk;
3011 			}
3012 		}
3013 		break;
3014 
3015 	case IOACCEL2_TMF:
3016 		for (i = 0; i < dev->nphysical_disks && !match; i++) {
3017 			match = dev->phys_disk[i]->ioaccel_handle ==
3018 					le32_to_cpu(ac->it_nexus);
3019 		}
3020 		break;
3021 
3022 	case 0:		/* The command is in the middle of being initialized. */
3023 		match = false;
3024 		break;
3025 
3026 	default:
3027 		dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
3028 			c->cmd_type);
3029 		BUG();
3030 	}
3031 
3032 	return match;
3033 }
3034 
3035 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
3036 	unsigned char *scsi3addr, u8 reset_type, int reply_queue)
3037 {
3038 	int i;
3039 	int rc = 0;
3040 
3041 	/* We can really only handle one reset at a time */
3042 	if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
3043 		dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
3044 		return -EINTR;
3045 	}
3046 
3047 	BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
3048 
3049 	for (i = 0; i < h->nr_cmds; i++) {
3050 		struct CommandList *c = h->cmd_pool + i;
3051 		int refcount = atomic_inc_return(&c->refcount);
3052 
3053 		if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
3054 			unsigned long flags;
3055 
3056 			/*
3057 			 * Mark the target command as having a reset pending,
3058 			 * then lock a lock so that the command cannot complete
3059 			 * while we're considering it.  If the command is not
3060 			 * idle then count it; otherwise revoke the event.
3061 			 */
3062 			c->reset_pending = dev;
3063 			spin_lock_irqsave(&h->lock, flags);	/* Implied MB */
3064 			if (!hpsa_is_cmd_idle(c))
3065 				atomic_inc(&dev->reset_cmds_out);
3066 			else
3067 				c->reset_pending = NULL;
3068 			spin_unlock_irqrestore(&h->lock, flags);
3069 		}
3070 
3071 		cmd_free(h, c);
3072 	}
3073 
3074 	rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
3075 	if (!rc)
3076 		wait_event(h->event_sync_wait_queue,
3077 			atomic_read(&dev->reset_cmds_out) == 0 ||
3078 			lockup_detected(h));
3079 
3080 	if (unlikely(lockup_detected(h))) {
3081 		dev_warn(&h->pdev->dev,
3082 			 "Controller lockup detected during reset wait\n");
3083 		rc = -ENODEV;
3084 	}
3085 
3086 	if (unlikely(rc))
3087 		atomic_set(&dev->reset_cmds_out, 0);
3088 	else
3089 		wait_for_device_to_become_ready(h, scsi3addr, 0);
3090 
3091 	mutex_unlock(&h->reset_mutex);
3092 	return rc;
3093 }
3094 
3095 static void hpsa_get_raid_level(struct ctlr_info *h,
3096 	unsigned char *scsi3addr, unsigned char *raid_level)
3097 {
3098 	int rc;
3099 	unsigned char *buf;
3100 
3101 	*raid_level = RAID_UNKNOWN;
3102 	buf = kzalloc(64, GFP_KERNEL);
3103 	if (!buf)
3104 		return;
3105 
3106 	if (!hpsa_vpd_page_supported(h, scsi3addr,
3107 		HPSA_VPD_LV_DEVICE_GEOMETRY))
3108 		goto exit;
3109 
3110 	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3111 		HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3112 
3113 	if (rc == 0)
3114 		*raid_level = buf[8];
3115 	if (*raid_level > RAID_UNKNOWN)
3116 		*raid_level = RAID_UNKNOWN;
3117 exit:
3118 	kfree(buf);
3119 	return;
3120 }
3121 
3122 #define HPSA_MAP_DEBUG
3123 #ifdef HPSA_MAP_DEBUG
3124 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
3125 				struct raid_map_data *map_buff)
3126 {
3127 	struct raid_map_disk_data *dd = &map_buff->data[0];
3128 	int map, row, col;
3129 	u16 map_cnt, row_cnt, disks_per_row;
3130 
3131 	if (rc != 0)
3132 		return;
3133 
3134 	/* Show details only if debugging has been activated. */
3135 	if (h->raid_offload_debug < 2)
3136 		return;
3137 
3138 	dev_info(&h->pdev->dev, "structure_size = %u\n",
3139 				le32_to_cpu(map_buff->structure_size));
3140 	dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
3141 			le32_to_cpu(map_buff->volume_blk_size));
3142 	dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
3143 			le64_to_cpu(map_buff->volume_blk_cnt));
3144 	dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
3145 			map_buff->phys_blk_shift);
3146 	dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
3147 			map_buff->parity_rotation_shift);
3148 	dev_info(&h->pdev->dev, "strip_size = %u\n",
3149 			le16_to_cpu(map_buff->strip_size));
3150 	dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
3151 			le64_to_cpu(map_buff->disk_starting_blk));
3152 	dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
3153 			le64_to_cpu(map_buff->disk_blk_cnt));
3154 	dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
3155 			le16_to_cpu(map_buff->data_disks_per_row));
3156 	dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
3157 			le16_to_cpu(map_buff->metadata_disks_per_row));
3158 	dev_info(&h->pdev->dev, "row_cnt = %u\n",
3159 			le16_to_cpu(map_buff->row_cnt));
3160 	dev_info(&h->pdev->dev, "layout_map_count = %u\n",
3161 			le16_to_cpu(map_buff->layout_map_count));
3162 	dev_info(&h->pdev->dev, "flags = 0x%x\n",
3163 			le16_to_cpu(map_buff->flags));
3164 	dev_info(&h->pdev->dev, "encrypytion = %s\n",
3165 			le16_to_cpu(map_buff->flags) &
3166 			RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
3167 	dev_info(&h->pdev->dev, "dekindex = %u\n",
3168 			le16_to_cpu(map_buff->dekindex));
3169 	map_cnt = le16_to_cpu(map_buff->layout_map_count);
3170 	for (map = 0; map < map_cnt; map++) {
3171 		dev_info(&h->pdev->dev, "Map%u:\n", map);
3172 		row_cnt = le16_to_cpu(map_buff->row_cnt);
3173 		for (row = 0; row < row_cnt; row++) {
3174 			dev_info(&h->pdev->dev, "  Row%u:\n", row);
3175 			disks_per_row =
3176 				le16_to_cpu(map_buff->data_disks_per_row);
3177 			for (col = 0; col < disks_per_row; col++, dd++)
3178 				dev_info(&h->pdev->dev,
3179 					"    D%02u: h=0x%04x xor=%u,%u\n",
3180 					col, dd->ioaccel_handle,
3181 					dd->xor_mult[0], dd->xor_mult[1]);
3182 			disks_per_row =
3183 				le16_to_cpu(map_buff->metadata_disks_per_row);
3184 			for (col = 0; col < disks_per_row; col++, dd++)
3185 				dev_info(&h->pdev->dev,
3186 					"    M%02u: h=0x%04x xor=%u,%u\n",
3187 					col, dd->ioaccel_handle,
3188 					dd->xor_mult[0], dd->xor_mult[1]);
3189 		}
3190 	}
3191 }
3192 #else
3193 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
3194 			__attribute__((unused)) int rc,
3195 			__attribute__((unused)) struct raid_map_data *map_buff)
3196 {
3197 }
3198 #endif
3199 
3200 static int hpsa_get_raid_map(struct ctlr_info *h,
3201 	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3202 {
3203 	int rc = 0;
3204 	struct CommandList *c;
3205 	struct ErrorInfo *ei;
3206 
3207 	c = cmd_alloc(h);
3208 
3209 	if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3210 			sizeof(this_device->raid_map), 0,
3211 			scsi3addr, TYPE_CMD)) {
3212 		dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3213 		cmd_free(h, c);
3214 		return -1;
3215 	}
3216 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3217 					PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3218 	if (rc)
3219 		goto out;
3220 	ei = c->err_info;
3221 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3222 		hpsa_scsi_interpret_error(h, c);
3223 		rc = -1;
3224 		goto out;
3225 	}
3226 	cmd_free(h, c);
3227 
3228 	/* @todo in the future, dynamically allocate RAID map memory */
3229 	if (le32_to_cpu(this_device->raid_map.structure_size) >
3230 				sizeof(this_device->raid_map)) {
3231 		dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3232 		rc = -1;
3233 	}
3234 	hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3235 	return rc;
3236 out:
3237 	cmd_free(h, c);
3238 	return rc;
3239 }
3240 
3241 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
3242 		unsigned char scsi3addr[], u16 bmic_device_index,
3243 		struct bmic_sense_subsystem_info *buf, size_t bufsize)
3244 {
3245 	int rc = IO_OK;
3246 	struct CommandList *c;
3247 	struct ErrorInfo *ei;
3248 
3249 	c = cmd_alloc(h);
3250 
3251 	rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
3252 		0, RAID_CTLR_LUNID, TYPE_CMD);
3253 	if (rc)
3254 		goto out;
3255 
3256 	c->Request.CDB[2] = bmic_device_index & 0xff;
3257 	c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3258 
3259 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3260 				PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3261 	if (rc)
3262 		goto out;
3263 	ei = c->err_info;
3264 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3265 		hpsa_scsi_interpret_error(h, c);
3266 		rc = -1;
3267 	}
3268 out:
3269 	cmd_free(h, c);
3270 	return rc;
3271 }
3272 
3273 static int hpsa_bmic_id_controller(struct ctlr_info *h,
3274 	struct bmic_identify_controller *buf, size_t bufsize)
3275 {
3276 	int rc = IO_OK;
3277 	struct CommandList *c;
3278 	struct ErrorInfo *ei;
3279 
3280 	c = cmd_alloc(h);
3281 
3282 	rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
3283 		0, RAID_CTLR_LUNID, TYPE_CMD);
3284 	if (rc)
3285 		goto out;
3286 
3287 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3288 		PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3289 	if (rc)
3290 		goto out;
3291 	ei = c->err_info;
3292 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3293 		hpsa_scsi_interpret_error(h, c);
3294 		rc = -1;
3295 	}
3296 out:
3297 	cmd_free(h, c);
3298 	return rc;
3299 }
3300 
3301 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3302 		unsigned char scsi3addr[], u16 bmic_device_index,
3303 		struct bmic_identify_physical_device *buf, size_t bufsize)
3304 {
3305 	int rc = IO_OK;
3306 	struct CommandList *c;
3307 	struct ErrorInfo *ei;
3308 
3309 	c = cmd_alloc(h);
3310 	rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3311 		0, RAID_CTLR_LUNID, TYPE_CMD);
3312 	if (rc)
3313 		goto out;
3314 
3315 	c->Request.CDB[2] = bmic_device_index & 0xff;
3316 	c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3317 
3318 	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3319 						DEFAULT_TIMEOUT);
3320 	ei = c->err_info;
3321 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3322 		hpsa_scsi_interpret_error(h, c);
3323 		rc = -1;
3324 	}
3325 out:
3326 	cmd_free(h, c);
3327 
3328 	return rc;
3329 }
3330 
3331 /*
3332  * get enclosure information
3333  * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
3334  * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
3335  * Uses id_physical_device to determine the box_index.
3336  */
3337 static void hpsa_get_enclosure_info(struct ctlr_info *h,
3338 			unsigned char *scsi3addr,
3339 			struct ReportExtendedLUNdata *rlep, int rle_index,
3340 			struct hpsa_scsi_dev_t *encl_dev)
3341 {
3342 	int rc = -1;
3343 	struct CommandList *c = NULL;
3344 	struct ErrorInfo *ei = NULL;
3345 	struct bmic_sense_storage_box_params *bssbp = NULL;
3346 	struct bmic_identify_physical_device *id_phys = NULL;
3347 	struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3348 	u16 bmic_device_index = 0;
3349 
3350 	bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3351 
3352 	if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
3353 		rc = IO_OK;
3354 		goto out;
3355 	}
3356 
3357 	bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
3358 	if (!bssbp)
3359 		goto out;
3360 
3361 	id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3362 	if (!id_phys)
3363 		goto out;
3364 
3365 	rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
3366 						id_phys, sizeof(*id_phys));
3367 	if (rc) {
3368 		dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
3369 			__func__, encl_dev->external, bmic_device_index);
3370 		goto out;
3371 	}
3372 
3373 	c = cmd_alloc(h);
3374 
3375 	rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
3376 			sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
3377 
3378 	if (rc)
3379 		goto out;
3380 
3381 	if (id_phys->phys_connector[1] == 'E')
3382 		c->Request.CDB[5] = id_phys->box_index;
3383 	else
3384 		c->Request.CDB[5] = 0;
3385 
3386 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3387 						DEFAULT_TIMEOUT);
3388 	if (rc)
3389 		goto out;
3390 
3391 	ei = c->err_info;
3392 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3393 		rc = -1;
3394 		goto out;
3395 	}
3396 
3397 	encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
3398 	memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
3399 		bssbp->phys_connector, sizeof(bssbp->phys_connector));
3400 
3401 	rc = IO_OK;
3402 out:
3403 	kfree(bssbp);
3404 	kfree(id_phys);
3405 
3406 	if (c)
3407 		cmd_free(h, c);
3408 
3409 	if (rc != IO_OK)
3410 		hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
3411 			"Error, could not get enclosure information\n");
3412 }
3413 
3414 static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
3415 						unsigned char *scsi3addr)
3416 {
3417 	struct ReportExtendedLUNdata *physdev;
3418 	u32 nphysicals;
3419 	u64 sa = 0;
3420 	int i;
3421 
3422 	physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
3423 	if (!physdev)
3424 		return 0;
3425 
3426 	if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3427 		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3428 		kfree(physdev);
3429 		return 0;
3430 	}
3431 	nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
3432 
3433 	for (i = 0; i < nphysicals; i++)
3434 		if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
3435 			sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
3436 			break;
3437 		}
3438 
3439 	kfree(physdev);
3440 
3441 	return sa;
3442 }
3443 
3444 static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3445 					struct hpsa_scsi_dev_t *dev)
3446 {
3447 	int rc;
3448 	u64 sa = 0;
3449 
3450 	if (is_hba_lunid(scsi3addr)) {
3451 		struct bmic_sense_subsystem_info *ssi;
3452 
3453 		ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
3454 		if (!ssi)
3455 			return;
3456 
3457 		rc = hpsa_bmic_sense_subsystem_information(h,
3458 					scsi3addr, 0, ssi, sizeof(*ssi));
3459 		if (rc == 0) {
3460 			sa = get_unaligned_be64(ssi->primary_world_wide_id);
3461 			h->sas_address = sa;
3462 		}
3463 
3464 		kfree(ssi);
3465 	} else
3466 		sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
3467 
3468 	dev->sas_address = sa;
3469 }
3470 
3471 /* Get a device id from inquiry page 0x83 */
3472 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3473 	unsigned char scsi3addr[], u8 page)
3474 {
3475 	int rc;
3476 	int i;
3477 	int pages;
3478 	unsigned char *buf, bufsize;
3479 
3480 	buf = kzalloc(256, GFP_KERNEL);
3481 	if (!buf)
3482 		return false;
3483 
3484 	/* Get the size of the page list first */
3485 	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3486 				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3487 				buf, HPSA_VPD_HEADER_SZ);
3488 	if (rc != 0)
3489 		goto exit_unsupported;
3490 	pages = buf[3];
3491 	if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3492 		bufsize = pages + HPSA_VPD_HEADER_SZ;
3493 	else
3494 		bufsize = 255;
3495 
3496 	/* Get the whole VPD page list */
3497 	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3498 				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3499 				buf, bufsize);
3500 	if (rc != 0)
3501 		goto exit_unsupported;
3502 
3503 	pages = buf[3];
3504 	for (i = 1; i <= pages; i++)
3505 		if (buf[3 + i] == page)
3506 			goto exit_supported;
3507 exit_unsupported:
3508 	kfree(buf);
3509 	return false;
3510 exit_supported:
3511 	kfree(buf);
3512 	return true;
3513 }
3514 
3515 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3516 	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3517 {
3518 	int rc;
3519 	unsigned char *buf;
3520 	u8 ioaccel_status;
3521 
3522 	this_device->offload_config = 0;
3523 	this_device->offload_enabled = 0;
3524 	this_device->offload_to_be_enabled = 0;
3525 
3526 	buf = kzalloc(64, GFP_KERNEL);
3527 	if (!buf)
3528 		return;
3529 	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3530 		goto out;
3531 	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3532 			VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3533 	if (rc != 0)
3534 		goto out;
3535 
3536 #define IOACCEL_STATUS_BYTE 4
3537 #define OFFLOAD_CONFIGURED_BIT 0x01
3538 #define OFFLOAD_ENABLED_BIT 0x02
3539 	ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3540 	this_device->offload_config =
3541 		!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3542 	if (this_device->offload_config) {
3543 		this_device->offload_enabled =
3544 			!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3545 		if (hpsa_get_raid_map(h, scsi3addr, this_device))
3546 			this_device->offload_enabled = 0;
3547 	}
3548 	this_device->offload_to_be_enabled = this_device->offload_enabled;
3549 out:
3550 	kfree(buf);
3551 	return;
3552 }
3553 
3554 /* Get the device id from inquiry page 0x83 */
3555 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3556 	unsigned char *device_id, int index, int buflen)
3557 {
3558 	int rc;
3559 	unsigned char *buf;
3560 
3561 	/* Does controller have VPD for device id? */
3562 	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3563 		return 1; /* not supported */
3564 
3565 	buf = kzalloc(64, GFP_KERNEL);
3566 	if (!buf)
3567 		return -ENOMEM;
3568 
3569 	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3570 					HPSA_VPD_LV_DEVICE_ID, buf, 64);
3571 	if (rc == 0) {
3572 		if (buflen > 16)
3573 			buflen = 16;
3574 		memcpy(device_id, &buf[8], buflen);
3575 	}
3576 
3577 	kfree(buf);
3578 
3579 	return rc; /*0 - got id,  otherwise, didn't */
3580 }
3581 
3582 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3583 		void *buf, int bufsize,
3584 		int extended_response)
3585 {
3586 	int rc = IO_OK;
3587 	struct CommandList *c;
3588 	unsigned char scsi3addr[8];
3589 	struct ErrorInfo *ei;
3590 
3591 	c = cmd_alloc(h);
3592 
3593 	/* address the controller */
3594 	memset(scsi3addr, 0, sizeof(scsi3addr));
3595 	if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3596 		buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3597 		rc = -1;
3598 		goto out;
3599 	}
3600 	if (extended_response)
3601 		c->Request.CDB[1] = extended_response;
3602 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3603 					PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
3604 	if (rc)
3605 		goto out;
3606 	ei = c->err_info;
3607 	if (ei->CommandStatus != 0 &&
3608 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
3609 		hpsa_scsi_interpret_error(h, c);
3610 		rc = -1;
3611 	} else {
3612 		struct ReportLUNdata *rld = buf;
3613 
3614 		if (rld->extended_response_flag != extended_response) {
3615 			dev_err(&h->pdev->dev,
3616 				"report luns requested format %u, got %u\n",
3617 				extended_response,
3618 				rld->extended_response_flag);
3619 			rc = -1;
3620 		}
3621 	}
3622 out:
3623 	cmd_free(h, c);
3624 	return rc;
3625 }
3626 
3627 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3628 		struct ReportExtendedLUNdata *buf, int bufsize)
3629 {
3630 	int rc;
3631 	struct ReportLUNdata *lbuf;
3632 
3633 	rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3634 				      HPSA_REPORT_PHYS_EXTENDED);
3635 	if (!rc || !hpsa_allow_any)
3636 		return rc;
3637 
3638 	/* REPORT PHYS EXTENDED is not supported */
3639 	lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
3640 	if (!lbuf)
3641 		return -ENOMEM;
3642 
3643 	rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
3644 	if (!rc) {
3645 		int i;
3646 		u32 nphys;
3647 
3648 		/* Copy ReportLUNdata header */
3649 		memcpy(buf, lbuf, 8);
3650 		nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
3651 		for (i = 0; i < nphys; i++)
3652 			memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
3653 	}
3654 	kfree(lbuf);
3655 	return rc;
3656 }
3657 
3658 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3659 		struct ReportLUNdata *buf, int bufsize)
3660 {
3661 	return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3662 }
3663 
3664 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3665 	int bus, int target, int lun)
3666 {
3667 	device->bus = bus;
3668 	device->target = target;
3669 	device->lun = lun;
3670 }
3671 
3672 /* Use VPD inquiry to get details of volume status */
3673 static int hpsa_get_volume_status(struct ctlr_info *h,
3674 					unsigned char scsi3addr[])
3675 {
3676 	int rc;
3677 	int status;
3678 	int size;
3679 	unsigned char *buf;
3680 
3681 	buf = kzalloc(64, GFP_KERNEL);
3682 	if (!buf)
3683 		return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3684 
3685 	/* Does controller have VPD for logical volume status? */
3686 	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3687 		goto exit_failed;
3688 
3689 	/* Get the size of the VPD return buffer */
3690 	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3691 					buf, HPSA_VPD_HEADER_SZ);
3692 	if (rc != 0)
3693 		goto exit_failed;
3694 	size = buf[3];
3695 
3696 	/* Now get the whole VPD buffer */
3697 	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3698 					buf, size + HPSA_VPD_HEADER_SZ);
3699 	if (rc != 0)
3700 		goto exit_failed;
3701 	status = buf[4]; /* status byte */
3702 
3703 	kfree(buf);
3704 	return status;
3705 exit_failed:
3706 	kfree(buf);
3707 	return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3708 }
3709 
3710 /* Determine offline status of a volume.
3711  * Return either:
3712  *  0 (not offline)
3713  *  0xff (offline for unknown reasons)
3714  *  # (integer code indicating one of several NOT READY states
3715  *     describing why a volume is to be kept offline)
3716  */
3717 static unsigned char hpsa_volume_offline(struct ctlr_info *h,
3718 					unsigned char scsi3addr[])
3719 {
3720 	struct CommandList *c;
3721 	unsigned char *sense;
3722 	u8 sense_key, asc, ascq;
3723 	int sense_len;
3724 	int rc, ldstat = 0;
3725 	u16 cmd_status;
3726 	u8 scsi_status;
3727 #define ASC_LUN_NOT_READY 0x04
3728 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3729 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3730 
3731 	c = cmd_alloc(h);
3732 
3733 	(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3734 	rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3735 					DEFAULT_TIMEOUT);
3736 	if (rc) {
3737 		cmd_free(h, c);
3738 		return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3739 	}
3740 	sense = c->err_info->SenseInfo;
3741 	if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3742 		sense_len = sizeof(c->err_info->SenseInfo);
3743 	else
3744 		sense_len = c->err_info->SenseLen;
3745 	decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3746 	cmd_status = c->err_info->CommandStatus;
3747 	scsi_status = c->err_info->ScsiStatus;
3748 	cmd_free(h, c);
3749 
3750 	/* Determine the reason for not ready state */
3751 	ldstat = hpsa_get_volume_status(h, scsi3addr);
3752 
3753 	/* Keep volume offline in certain cases: */
3754 	switch (ldstat) {
3755 	case HPSA_LV_FAILED:
3756 	case HPSA_LV_UNDERGOING_ERASE:
3757 	case HPSA_LV_NOT_AVAILABLE:
3758 	case HPSA_LV_UNDERGOING_RPI:
3759 	case HPSA_LV_PENDING_RPI:
3760 	case HPSA_LV_ENCRYPTED_NO_KEY:
3761 	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3762 	case HPSA_LV_UNDERGOING_ENCRYPTION:
3763 	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3764 	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3765 		return ldstat;
3766 	case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3767 		/* If VPD status page isn't available,
3768 		 * use ASC/ASCQ to determine state
3769 		 */
3770 		if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3771 			(ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3772 			return ldstat;
3773 		break;
3774 	default:
3775 		break;
3776 	}
3777 	return HPSA_LV_OK;
3778 }
3779 
3780 /*
3781  * Find out if a logical device supports aborts by simply trying one.
3782  * Smart Array may claim not to support aborts on logical drives, but
3783  * if a MSA2000 * is connected, the drives on that will be presented
3784  * by the Smart Array as logical drives, and aborts may be sent to
3785  * those devices successfully.  So the simplest way to find out is
3786  * to simply try an abort and see how the device responds.
3787  */
3788 static int hpsa_device_supports_aborts(struct ctlr_info *h,
3789 					unsigned char *scsi3addr)
3790 {
3791 	struct CommandList *c;
3792 	struct ErrorInfo *ei;
3793 	int rc = 0;
3794 
3795 	u64 tag = (u64) -1; /* bogus tag */
3796 
3797 	/* Assume that physical devices support aborts */
3798 	if (!is_logical_dev_addr_mode(scsi3addr))
3799 		return 1;
3800 
3801 	c = cmd_alloc(h);
3802 
3803 	(void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3804 	(void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
3805 					DEFAULT_TIMEOUT);
3806 	/* no unmap needed here because no data xfer. */
3807 	ei = c->err_info;
3808 	switch (ei->CommandStatus) {
3809 	case CMD_INVALID:
3810 		rc = 0;
3811 		break;
3812 	case CMD_UNABORTABLE:
3813 	case CMD_ABORT_FAILED:
3814 		rc = 1;
3815 		break;
3816 	case CMD_TMF_STATUS:
3817 		rc = hpsa_evaluate_tmf_status(h, c);
3818 		break;
3819 	default:
3820 		rc = 0;
3821 		break;
3822 	}
3823 	cmd_free(h, c);
3824 	return rc;
3825 }
3826 
3827 static int hpsa_update_device_info(struct ctlr_info *h,
3828 	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3829 	unsigned char *is_OBDR_device)
3830 {
3831 
3832 #define OBDR_SIG_OFFSET 43
3833 #define OBDR_TAPE_SIG "$DR-10"
3834 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3835 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3836 
3837 	unsigned char *inq_buff;
3838 	unsigned char *obdr_sig;
3839 	int rc = 0;
3840 
3841 	inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3842 	if (!inq_buff) {
3843 		rc = -ENOMEM;
3844 		goto bail_out;
3845 	}
3846 
3847 	/* Do an inquiry to the device to see what it is. */
3848 	if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3849 		(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3850 		dev_err(&h->pdev->dev,
3851 			"%s: inquiry failed, device will be skipped.\n",
3852 			__func__);
3853 		rc = HPSA_INQUIRY_FAILED;
3854 		goto bail_out;
3855 	}
3856 
3857 	scsi_sanitize_inquiry_string(&inq_buff[8], 8);
3858 	scsi_sanitize_inquiry_string(&inq_buff[16], 16);
3859 
3860 	this_device->devtype = (inq_buff[0] & 0x1f);
3861 	memcpy(this_device->scsi3addr, scsi3addr, 8);
3862 	memcpy(this_device->vendor, &inq_buff[8],
3863 		sizeof(this_device->vendor));
3864 	memcpy(this_device->model, &inq_buff[16],
3865 		sizeof(this_device->model));
3866 	this_device->rev = inq_buff[2];
3867 	memset(this_device->device_id, 0,
3868 		sizeof(this_device->device_id));
3869 	if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3870 		sizeof(this_device->device_id)))
3871 		dev_err(&h->pdev->dev,
3872 			"hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
3873 			h->ctlr, __func__,
3874 			h->scsi_host->host_no,
3875 			this_device->target, this_device->lun,
3876 			scsi_device_type(this_device->devtype),
3877 			this_device->model);
3878 
3879 	if ((this_device->devtype == TYPE_DISK ||
3880 		this_device->devtype == TYPE_ZBC) &&
3881 		is_logical_dev_addr_mode(scsi3addr)) {
3882 		unsigned char volume_offline;
3883 
3884 		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3885 		if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3886 			hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3887 		volume_offline = hpsa_volume_offline(h, scsi3addr);
3888 		if (volume_offline == HPSA_LV_FAILED) {
3889 			rc = HPSA_LV_FAILED;
3890 			dev_err(&h->pdev->dev,
3891 				"%s: LV failed, device will be skipped.\n",
3892 				__func__);
3893 			goto bail_out;
3894 		}
3895 	} else {
3896 		this_device->raid_level = RAID_UNKNOWN;
3897 		this_device->offload_config = 0;
3898 		this_device->offload_enabled = 0;
3899 		this_device->offload_to_be_enabled = 0;
3900 		this_device->hba_ioaccel_enabled = 0;
3901 		this_device->volume_offline = 0;
3902 		this_device->queue_depth = h->nr_cmds;
3903 	}
3904 
3905 	if (is_OBDR_device) {
3906 		/* See if this is a One-Button-Disaster-Recovery device
3907 		 * by looking for "$DR-10" at offset 43 in inquiry data.
3908 		 */
3909 		obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3910 		*is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3911 					strncmp(obdr_sig, OBDR_TAPE_SIG,
3912 						OBDR_SIG_LEN) == 0);
3913 	}
3914 	kfree(inq_buff);
3915 	return 0;
3916 
3917 bail_out:
3918 	kfree(inq_buff);
3919 	return rc;
3920 }
3921 
3922 static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3923 			struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3924 {
3925 	unsigned long flags;
3926 	int rc, entry;
3927 	/*
3928 	 * See if this device supports aborts.  If we already know
3929 	 * the device, we already know if it supports aborts, otherwise
3930 	 * we have to find out if it supports aborts by trying one.
3931 	 */
3932 	spin_lock_irqsave(&h->devlock, flags);
3933 	rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3934 	if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3935 		entry >= 0 && entry < h->ndevices) {
3936 		dev->supports_aborts = h->dev[entry]->supports_aborts;
3937 		spin_unlock_irqrestore(&h->devlock, flags);
3938 	} else {
3939 		spin_unlock_irqrestore(&h->devlock, flags);
3940 		dev->supports_aborts =
3941 				hpsa_device_supports_aborts(h, scsi3addr);
3942 		if (dev->supports_aborts < 0)
3943 			dev->supports_aborts = 0;
3944 	}
3945 }
3946 
3947 /*
3948  * Helper function to assign bus, target, lun mapping of devices.
3949  * Logical drive target and lun are assigned at this time, but
3950  * physical device lun and target assignment are deferred (assigned
3951  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3952 */
3953 static void figure_bus_target_lun(struct ctlr_info *h,
3954 	u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3955 {
3956 	u32 lunid = get_unaligned_le32(lunaddrbytes);
3957 
3958 	if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3959 		/* physical device, target and lun filled in later */
3960 		if (is_hba_lunid(lunaddrbytes)) {
3961 			int bus = HPSA_HBA_BUS;
3962 
3963 			if (!device->rev)
3964 				bus = HPSA_LEGACY_HBA_BUS;
3965 			hpsa_set_bus_target_lun(device,
3966 					bus, 0, lunid & 0x3fff);
3967 		} else
3968 			/* defer target, lun assignment for physical devices */
3969 			hpsa_set_bus_target_lun(device,
3970 					HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
3971 		return;
3972 	}
3973 	/* It's a logical device */
3974 	if (device->external) {
3975 		hpsa_set_bus_target_lun(device,
3976 			HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
3977 			lunid & 0x00ff);
3978 		return;
3979 	}
3980 	hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
3981 				0, lunid & 0x3fff);
3982 }
3983 
3984 
3985 /*
3986  * Get address of physical disk used for an ioaccel2 mode command:
3987  *	1. Extract ioaccel2 handle from the command.
3988  *	2. Find a matching ioaccel2 handle from list of physical disks.
3989  *	3. Return:
3990  *		1 and set scsi3addr to address of matching physical
3991  *		0 if no matching physical disk was found.
3992  */
3993 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3994 	struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3995 {
3996 	struct io_accel2_cmd *c2 =
3997 			&h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3998 	unsigned long flags;
3999 	int i;
4000 
4001 	spin_lock_irqsave(&h->devlock, flags);
4002 	for (i = 0; i < h->ndevices; i++)
4003 		if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
4004 			memcpy(scsi3addr, h->dev[i]->scsi3addr,
4005 				sizeof(h->dev[i]->scsi3addr));
4006 			spin_unlock_irqrestore(&h->devlock, flags);
4007 			return 1;
4008 		}
4009 	spin_unlock_irqrestore(&h->devlock, flags);
4010 	return 0;
4011 }
4012 
4013 static int  figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
4014 	int i, int nphysicals, int nlocal_logicals)
4015 {
4016 	/* In report logicals, local logicals are listed first,
4017 	* then any externals.
4018 	*/
4019 	int logicals_start = nphysicals + (raid_ctlr_position == 0);
4020 
4021 	if (i == raid_ctlr_position)
4022 		return 0;
4023 
4024 	if (i < logicals_start)
4025 		return 0;
4026 
4027 	/* i is in logicals range, but still within local logicals */
4028 	if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
4029 		return 0;
4030 
4031 	return 1; /* it's an external lun */
4032 }
4033 
4034 /*
4035  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
4036  * logdev.  The number of luns in physdev and logdev are returned in
4037  * *nphysicals and *nlogicals, respectively.
4038  * Returns 0 on success, -1 otherwise.
4039  */
4040 static int hpsa_gather_lun_info(struct ctlr_info *h,
4041 	struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
4042 	struct ReportLUNdata *logdev, u32 *nlogicals)
4043 {
4044 	if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
4045 		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
4046 		return -1;
4047 	}
4048 	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
4049 	if (*nphysicals > HPSA_MAX_PHYS_LUN) {
4050 		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4051 			HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
4052 		*nphysicals = HPSA_MAX_PHYS_LUN;
4053 	}
4054 	if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
4055 		dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
4056 		return -1;
4057 	}
4058 	*nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
4059 	/* Reject Logicals in excess of our max capability. */
4060 	if (*nlogicals > HPSA_MAX_LUN) {
4061 		dev_warn(&h->pdev->dev,
4062 			"maximum logical LUNs (%d) exceeded.  "
4063 			"%d LUNs ignored.\n", HPSA_MAX_LUN,
4064 			*nlogicals - HPSA_MAX_LUN);
4065 			*nlogicals = HPSA_MAX_LUN;
4066 	}
4067 	if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
4068 		dev_warn(&h->pdev->dev,
4069 			"maximum logical + physical LUNs (%d) exceeded. "
4070 			"%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
4071 			*nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
4072 		*nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
4073 	}
4074 	return 0;
4075 }
4076 
4077 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
4078 	int i, int nphysicals, int nlogicals,
4079 	struct ReportExtendedLUNdata *physdev_list,
4080 	struct ReportLUNdata *logdev_list)
4081 {
4082 	/* Helper function, figure out where the LUN ID info is coming from
4083 	 * given index i, lists of physical and logical devices, where in
4084 	 * the list the raid controller is supposed to appear (first or last)
4085 	 */
4086 
4087 	int logicals_start = nphysicals + (raid_ctlr_position == 0);
4088 	int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
4089 
4090 	if (i == raid_ctlr_position)
4091 		return RAID_CTLR_LUNID;
4092 
4093 	if (i < logicals_start)
4094 		return &physdev_list->LUN[i -
4095 				(raid_ctlr_position == 0)].lunid[0];
4096 
4097 	if (i < last_device)
4098 		return &logdev_list->LUN[i - nphysicals -
4099 			(raid_ctlr_position == 0)][0];
4100 	BUG();
4101 	return NULL;
4102 }
4103 
4104 /* get physical drive ioaccel handle and queue depth */
4105 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4106 		struct hpsa_scsi_dev_t *dev,
4107 		struct ReportExtendedLUNdata *rlep, int rle_index,
4108 		struct bmic_identify_physical_device *id_phys)
4109 {
4110 	int rc;
4111 	struct ext_report_lun_entry *rle;
4112 
4113 	/*
4114 	 * external targets don't support BMIC
4115 	 */
4116 	if (dev->external) {
4117 		dev->queue_depth = 7;
4118 		return;
4119 	}
4120 
4121 	rle = &rlep->LUN[rle_index];
4122 
4123 	dev->ioaccel_handle = rle->ioaccel_handle;
4124 	if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
4125 		dev->hba_ioaccel_enabled = 1;
4126 	memset(id_phys, 0, sizeof(*id_phys));
4127 	rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
4128 			GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
4129 			sizeof(*id_phys));
4130 	if (!rc)
4131 		/* Reserve space for FW operations */
4132 #define DRIVE_CMDS_RESERVED_FOR_FW 2
4133 #define DRIVE_QUEUE_DEPTH 7
4134 		dev->queue_depth =
4135 			le16_to_cpu(id_phys->current_queue_depth_limit) -
4136 				DRIVE_CMDS_RESERVED_FOR_FW;
4137 	else
4138 		dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
4139 }
4140 
4141 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
4142 	struct ReportExtendedLUNdata *rlep, int rle_index,
4143 	struct bmic_identify_physical_device *id_phys)
4144 {
4145 	struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
4146 
4147 	if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
4148 		this_device->hba_ioaccel_enabled = 1;
4149 
4150 	memcpy(&this_device->active_path_index,
4151 		&id_phys->active_path_number,
4152 		sizeof(this_device->active_path_index));
4153 	memcpy(&this_device->path_map,
4154 		&id_phys->redundant_path_present_map,
4155 		sizeof(this_device->path_map));
4156 	memcpy(&this_device->box,
4157 		&id_phys->alternate_paths_phys_box_on_port,
4158 		sizeof(this_device->box));
4159 	memcpy(&this_device->phys_connector,
4160 		&id_phys->alternate_paths_phys_connector,
4161 		sizeof(this_device->phys_connector));
4162 	memcpy(&this_device->bay,
4163 		&id_phys->phys_bay_in_box,
4164 		sizeof(this_device->bay));
4165 }
4166 
4167 /* get number of local logical disks. */
4168 static int hpsa_set_local_logical_count(struct ctlr_info *h,
4169 	struct bmic_identify_controller *id_ctlr,
4170 	u32 *nlocals)
4171 {
4172 	int rc;
4173 
4174 	if (!id_ctlr) {
4175 		dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
4176 			__func__);
4177 		return -ENOMEM;
4178 	}
4179 	memset(id_ctlr, 0, sizeof(*id_ctlr));
4180 	rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4181 	if (!rc)
4182 		if (id_ctlr->configured_logical_drive_count < 256)
4183 			*nlocals = id_ctlr->configured_logical_drive_count;
4184 		else
4185 			*nlocals = le16_to_cpu(
4186 					id_ctlr->extended_logical_unit_count);
4187 	else
4188 		*nlocals = -1;
4189 	return rc;
4190 }
4191 
4192 static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
4193 {
4194 	struct bmic_identify_physical_device *id_phys;
4195 	bool is_spare = false;
4196 	int rc;
4197 
4198 	id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4199 	if (!id_phys)
4200 		return false;
4201 
4202 	rc = hpsa_bmic_id_physical_device(h,
4203 					lunaddrbytes,
4204 					GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
4205 					id_phys, sizeof(*id_phys));
4206 	if (rc == 0)
4207 		is_spare = (id_phys->more_flags >> 6) & 0x01;
4208 
4209 	kfree(id_phys);
4210 	return is_spare;
4211 }
4212 
4213 #define RPL_DEV_FLAG_NON_DISK                           0x1
4214 #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED  0x2
4215 #define RPL_DEV_FLAG_UNCONFIG_DISK                      0x4
4216 
4217 #define BMIC_DEVICE_TYPE_ENCLOSURE  6
4218 
4219 static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
4220 				struct ext_report_lun_entry *rle)
4221 {
4222 	u8 device_flags;
4223 	u8 device_type;
4224 
4225 	if (!MASKED_DEVICE(lunaddrbytes))
4226 		return false;
4227 
4228 	device_flags = rle->device_flags;
4229 	device_type = rle->device_type;
4230 
4231 	if (device_flags & RPL_DEV_FLAG_NON_DISK) {
4232 		if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
4233 			return false;
4234 		return true;
4235 	}
4236 
4237 	if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
4238 		return false;
4239 
4240 	if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
4241 		return false;
4242 
4243 	/*
4244 	 * Spares may be spun down, we do not want to
4245 	 * do an Inquiry to a RAID set spare drive as
4246 	 * that would have them spun up, that is a
4247 	 * performance hit because I/O to the RAID device
4248 	 * stops while the spin up occurs which can take
4249 	 * over 50 seconds.
4250 	 */
4251 	if (hpsa_is_disk_spare(h, lunaddrbytes))
4252 		return true;
4253 
4254 	return false;
4255 }
4256 
4257 static void hpsa_update_scsi_devices(struct ctlr_info *h)
4258 {
4259 	/* the idea here is we could get notified
4260 	 * that some devices have changed, so we do a report
4261 	 * physical luns and report logical luns cmd, and adjust
4262 	 * our list of devices accordingly.
4263 	 *
4264 	 * The scsi3addr's of devices won't change so long as the
4265 	 * adapter is not reset.  That means we can rescan and
4266 	 * tell which devices we already know about, vs. new
4267 	 * devices, vs.  disappearing devices.
4268 	 */
4269 	struct ReportExtendedLUNdata *physdev_list = NULL;
4270 	struct ReportLUNdata *logdev_list = NULL;
4271 	struct bmic_identify_physical_device *id_phys = NULL;
4272 	struct bmic_identify_controller *id_ctlr = NULL;
4273 	u32 nphysicals = 0;
4274 	u32 nlogicals = 0;
4275 	u32 nlocal_logicals = 0;
4276 	u32 ndev_allocated = 0;
4277 	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
4278 	int ncurrent = 0;
4279 	int i, n_ext_target_devs, ndevs_to_allocate;
4280 	int raid_ctlr_position;
4281 	bool physical_device;
4282 	DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
4283 
4284 	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
4285 	physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
4286 	logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
4287 	tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
4288 	id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
4289 	id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
4290 
4291 	if (!currentsd || !physdev_list || !logdev_list ||
4292 		!tmpdevice || !id_phys || !id_ctlr) {
4293 		dev_err(&h->pdev->dev, "out of memory\n");
4294 		goto out;
4295 	}
4296 	memset(lunzerobits, 0, sizeof(lunzerobits));
4297 
4298 	h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
4299 
4300 	if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
4301 			logdev_list, &nlogicals)) {
4302 		h->drv_req_rescan = 1;
4303 		goto out;
4304 	}
4305 
4306 	/* Set number of local logicals (non PTRAID) */
4307 	if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
4308 		dev_warn(&h->pdev->dev,
4309 			"%s: Can't determine number of local logical devices.\n",
4310 			__func__);
4311 	}
4312 
4313 	/* We might see up to the maximum number of logical and physical disks
4314 	 * plus external target devices, and a device for the local RAID
4315 	 * controller.
4316 	 */
4317 	ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
4318 
4319 	/* Allocate the per device structures */
4320 	for (i = 0; i < ndevs_to_allocate; i++) {
4321 		if (i >= HPSA_MAX_DEVICES) {
4322 			dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
4323 				"  %d devices ignored.\n", HPSA_MAX_DEVICES,
4324 				ndevs_to_allocate - HPSA_MAX_DEVICES);
4325 			break;
4326 		}
4327 
4328 		currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
4329 		if (!currentsd[i]) {
4330 			h->drv_req_rescan = 1;
4331 			goto out;
4332 		}
4333 		ndev_allocated++;
4334 	}
4335 
4336 	if (is_scsi_rev_5(h))
4337 		raid_ctlr_position = 0;
4338 	else
4339 		raid_ctlr_position = nphysicals + nlogicals;
4340 
4341 	/* adjust our table of devices */
4342 	n_ext_target_devs = 0;
4343 	for (i = 0; i < nphysicals + nlogicals + 1; i++) {
4344 		u8 *lunaddrbytes, is_OBDR = 0;
4345 		int rc = 0;
4346 		int phys_dev_index = i - (raid_ctlr_position == 0);
4347 		bool skip_device = false;
4348 
4349 		physical_device = i < nphysicals + (raid_ctlr_position == 0);
4350 
4351 		/* Figure out where the LUN ID info is coming from */
4352 		lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4353 			i, nphysicals, nlogicals, physdev_list, logdev_list);
4354 
4355 		/* Determine if this is a lun from an external target array */
4356 		tmpdevice->external =
4357 			figure_external_status(h, raid_ctlr_position, i,
4358 						nphysicals, nlocal_logicals);
4359 
4360 		/*
4361 		 * Skip over some devices such as a spare.
4362 		 */
4363 		if (!tmpdevice->external && physical_device) {
4364 			skip_device = hpsa_skip_device(h, lunaddrbytes,
4365 					&physdev_list->LUN[phys_dev_index]);
4366 			if (skip_device)
4367 				continue;
4368 		}
4369 
4370 		/* Get device type, vendor, model, device id */
4371 		rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
4372 							&is_OBDR);
4373 		if (rc == -ENOMEM) {
4374 			dev_warn(&h->pdev->dev,
4375 				"Out of memory, rescan deferred.\n");
4376 			h->drv_req_rescan = 1;
4377 			goto out;
4378 		}
4379 		if (rc) {
4380 			h->drv_req_rescan = 1;
4381 			continue;
4382 		}
4383 
4384 		figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4385 		hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
4386 		this_device = currentsd[ncurrent];
4387 
4388 		/* Turn on discovery_polling if there are ext target devices.
4389 		 * Event-based change notification is unreliable for those.
4390 		 */
4391 		if (!h->discovery_polling) {
4392 			if (tmpdevice->external) {
4393 				h->discovery_polling = 1;
4394 				dev_info(&h->pdev->dev,
4395 					"External target, activate discovery polling.\n");
4396 			}
4397 		}
4398 
4399 
4400 		*this_device = *tmpdevice;
4401 		this_device->physical_device = physical_device;
4402 
4403 		/*
4404 		 * Expose all devices except for physical devices that
4405 		 * are masked.
4406 		 */
4407 		if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
4408 			this_device->expose_device = 0;
4409 		else
4410 			this_device->expose_device = 1;
4411 
4412 
4413 		/*
4414 		 * Get the SAS address for physical devices that are exposed.
4415 		 */
4416 		if (this_device->physical_device && this_device->expose_device)
4417 			hpsa_get_sas_address(h, lunaddrbytes, this_device);
4418 
4419 		switch (this_device->devtype) {
4420 		case TYPE_ROM:
4421 			/* We don't *really* support actual CD-ROM devices,
4422 			 * just "One Button Disaster Recovery" tape drive
4423 			 * which temporarily pretends to be a CD-ROM drive.
4424 			 * So we check that the device is really an OBDR tape
4425 			 * device by checking for "$DR-10" in bytes 43-48 of
4426 			 * the inquiry data.
4427 			 */
4428 			if (is_OBDR)
4429 				ncurrent++;
4430 			break;
4431 		case TYPE_DISK:
4432 		case TYPE_ZBC:
4433 			if (this_device->physical_device) {
4434 				/* The disk is in HBA mode. */
4435 				/* Never use RAID mapper in HBA mode. */
4436 				this_device->offload_enabled = 0;
4437 				hpsa_get_ioaccel_drive_info(h, this_device,
4438 					physdev_list, phys_dev_index, id_phys);
4439 				hpsa_get_path_info(this_device,
4440 					physdev_list, phys_dev_index, id_phys);
4441 			}
4442 			ncurrent++;
4443 			break;
4444 		case TYPE_TAPE:
4445 		case TYPE_MEDIUM_CHANGER:
4446 			ncurrent++;
4447 			break;
4448 		case TYPE_ENCLOSURE:
4449 			if (!this_device->external)
4450 				hpsa_get_enclosure_info(h, lunaddrbytes,
4451 						physdev_list, phys_dev_index,
4452 						this_device);
4453 			ncurrent++;
4454 			break;
4455 		case TYPE_RAID:
4456 			/* Only present the Smartarray HBA as a RAID controller.
4457 			 * If it's a RAID controller other than the HBA itself
4458 			 * (an external RAID controller, MSA500 or similar)
4459 			 * don't present it.
4460 			 */
4461 			if (!is_hba_lunid(lunaddrbytes))
4462 				break;
4463 			ncurrent++;
4464 			break;
4465 		default:
4466 			break;
4467 		}
4468 		if (ncurrent >= HPSA_MAX_DEVICES)
4469 			break;
4470 	}
4471 
4472 	if (h->sas_host == NULL) {
4473 		int rc = 0;
4474 
4475 		rc = hpsa_add_sas_host(h);
4476 		if (rc) {
4477 			dev_warn(&h->pdev->dev,
4478 				"Could not add sas host %d\n", rc);
4479 			goto out;
4480 		}
4481 	}
4482 
4483 	adjust_hpsa_scsi_table(h, currentsd, ncurrent);
4484 out:
4485 	kfree(tmpdevice);
4486 	for (i = 0; i < ndev_allocated; i++)
4487 		kfree(currentsd[i]);
4488 	kfree(currentsd);
4489 	kfree(physdev_list);
4490 	kfree(logdev_list);
4491 	kfree(id_ctlr);
4492 	kfree(id_phys);
4493 }
4494 
4495 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
4496 				   struct scatterlist *sg)
4497 {
4498 	u64 addr64 = (u64) sg_dma_address(sg);
4499 	unsigned int len = sg_dma_len(sg);
4500 
4501 	desc->Addr = cpu_to_le64(addr64);
4502 	desc->Len = cpu_to_le32(len);
4503 	desc->Ext = 0;
4504 }
4505 
4506 /*
4507  * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4508  * dma mapping  and fills in the scatter gather entries of the
4509  * hpsa command, cp.
4510  */
4511 static int hpsa_scatter_gather(struct ctlr_info *h,
4512 		struct CommandList *cp,
4513 		struct scsi_cmnd *cmd)
4514 {
4515 	struct scatterlist *sg;
4516 	int use_sg, i, sg_limit, chained, last_sg;
4517 	struct SGDescriptor *curr_sg;
4518 
4519 	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4520 
4521 	use_sg = scsi_dma_map(cmd);
4522 	if (use_sg < 0)
4523 		return use_sg;
4524 
4525 	if (!use_sg)
4526 		goto sglist_finished;
4527 
4528 	/*
4529 	 * If the number of entries is greater than the max for a single list,
4530 	 * then we have a chained list; we will set up all but one entry in the
4531 	 * first list (the last entry is saved for link information);
4532 	 * otherwise, we don't have a chained list and we'll set up at each of
4533 	 * the entries in the one list.
4534 	 */
4535 	curr_sg = cp->SG;
4536 	chained = use_sg > h->max_cmd_sg_entries;
4537 	sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4538 	last_sg = scsi_sg_count(cmd) - 1;
4539 	scsi_for_each_sg(cmd, sg, sg_limit, i) {
4540 		hpsa_set_sg_descriptor(curr_sg, sg);
4541 		curr_sg++;
4542 	}
4543 
4544 	if (chained) {
4545 		/*
4546 		 * Continue with the chained list.  Set curr_sg to the chained
4547 		 * list.  Modify the limit to the total count less the entries
4548 		 * we've already set up.  Resume the scan at the list entry
4549 		 * where the previous loop left off.
4550 		 */
4551 		curr_sg = h->cmd_sg_list[cp->cmdindex];
4552 		sg_limit = use_sg - sg_limit;
4553 		for_each_sg(sg, sg, sg_limit, i) {
4554 			hpsa_set_sg_descriptor(curr_sg, sg);
4555 			curr_sg++;
4556 		}
4557 	}
4558 
4559 	/* Back the pointer up to the last entry and mark it as "last". */
4560 	(curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4561 
4562 	if (use_sg + chained > h->maxSG)
4563 		h->maxSG = use_sg + chained;
4564 
4565 	if (chained) {
4566 		cp->Header.SGList = h->max_cmd_sg_entries;
4567 		cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4568 		if (hpsa_map_sg_chain_block(h, cp)) {
4569 			scsi_dma_unmap(cmd);
4570 			return -1;
4571 		}
4572 		return 0;
4573 	}
4574 
4575 sglist_finished:
4576 
4577 	cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
4578 	cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4579 	return 0;
4580 }
4581 
4582 #define IO_ACCEL_INELIGIBLE (1)
4583 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4584 {
4585 	int is_write = 0;
4586 	u32 block;
4587 	u32 block_cnt;
4588 
4589 	/* Perform some CDB fixups if needed using 10 byte reads/writes only */
4590 	switch (cdb[0]) {
4591 	case WRITE_6:
4592 	case WRITE_12:
4593 		is_write = 1;
4594 	case READ_6:
4595 	case READ_12:
4596 		if (*cdb_len == 6) {
4597 			block = (((cdb[1] & 0x1F) << 16) |
4598 				(cdb[2] << 8) |
4599 				cdb[3]);
4600 			block_cnt = cdb[4];
4601 			if (block_cnt == 0)
4602 				block_cnt = 256;
4603 		} else {
4604 			BUG_ON(*cdb_len != 12);
4605 			block = get_unaligned_be32(&cdb[2]);
4606 			block_cnt = get_unaligned_be32(&cdb[6]);
4607 		}
4608 		if (block_cnt > 0xffff)
4609 			return IO_ACCEL_INELIGIBLE;
4610 
4611 		cdb[0] = is_write ? WRITE_10 : READ_10;
4612 		cdb[1] = 0;
4613 		cdb[2] = (u8) (block >> 24);
4614 		cdb[3] = (u8) (block >> 16);
4615 		cdb[4] = (u8) (block >> 8);
4616 		cdb[5] = (u8) (block);
4617 		cdb[6] = 0;
4618 		cdb[7] = (u8) (block_cnt >> 8);
4619 		cdb[8] = (u8) (block_cnt);
4620 		cdb[9] = 0;
4621 		*cdb_len = 10;
4622 		break;
4623 	}
4624 	return 0;
4625 }
4626 
4627 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4628 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4629 	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4630 {
4631 	struct scsi_cmnd *cmd = c->scsi_cmd;
4632 	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4633 	unsigned int len;
4634 	unsigned int total_len = 0;
4635 	struct scatterlist *sg;
4636 	u64 addr64;
4637 	int use_sg, i;
4638 	struct SGDescriptor *curr_sg;
4639 	u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4640 
4641 	/* TODO: implement chaining support */
4642 	if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4643 		atomic_dec(&phys_disk->ioaccel_cmds_out);
4644 		return IO_ACCEL_INELIGIBLE;
4645 	}
4646 
4647 	BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4648 
4649 	if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4650 		atomic_dec(&phys_disk->ioaccel_cmds_out);
4651 		return IO_ACCEL_INELIGIBLE;
4652 	}
4653 
4654 	c->cmd_type = CMD_IOACCEL1;
4655 
4656 	/* Adjust the DMA address to point to the accelerated command buffer */
4657 	c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4658 				(c->cmdindex * sizeof(*cp));
4659 	BUG_ON(c->busaddr & 0x0000007F);
4660 
4661 	use_sg = scsi_dma_map(cmd);
4662 	if (use_sg < 0) {
4663 		atomic_dec(&phys_disk->ioaccel_cmds_out);
4664 		return use_sg;
4665 	}
4666 
4667 	if (use_sg) {
4668 		curr_sg = cp->SG;
4669 		scsi_for_each_sg(cmd, sg, use_sg, i) {
4670 			addr64 = (u64) sg_dma_address(sg);
4671 			len  = sg_dma_len(sg);
4672 			total_len += len;
4673 			curr_sg->Addr = cpu_to_le64(addr64);
4674 			curr_sg->Len = cpu_to_le32(len);
4675 			curr_sg->Ext = cpu_to_le32(0);
4676 			curr_sg++;
4677 		}
4678 		(--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4679 
4680 		switch (cmd->sc_data_direction) {
4681 		case DMA_TO_DEVICE:
4682 			control |= IOACCEL1_CONTROL_DATA_OUT;
4683 			break;
4684 		case DMA_FROM_DEVICE:
4685 			control |= IOACCEL1_CONTROL_DATA_IN;
4686 			break;
4687 		case DMA_NONE:
4688 			control |= IOACCEL1_CONTROL_NODATAXFER;
4689 			break;
4690 		default:
4691 			dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4692 			cmd->sc_data_direction);
4693 			BUG();
4694 			break;
4695 		}
4696 	} else {
4697 		control |= IOACCEL1_CONTROL_NODATAXFER;
4698 	}
4699 
4700 	c->Header.SGList = use_sg;
4701 	/* Fill out the command structure to submit */
4702 	cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4703 	cp->transfer_len = cpu_to_le32(total_len);
4704 	cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4705 			(cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4706 	cp->control = cpu_to_le32(control);
4707 	memcpy(cp->CDB, cdb, cdb_len);
4708 	memcpy(cp->CISS_LUN, scsi3addr, 8);
4709 	/* Tag was already set at init time. */
4710 	enqueue_cmd_and_start_io(h, c);
4711 	return 0;
4712 }
4713 
4714 /*
4715  * Queue a command directly to a device behind the controller using the
4716  * I/O accelerator path.
4717  */
4718 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4719 	struct CommandList *c)
4720 {
4721 	struct scsi_cmnd *cmd = c->scsi_cmd;
4722 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4723 
4724 	if (!dev)
4725 		return -1;
4726 
4727 	c->phys_disk = dev;
4728 
4729 	return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4730 		cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4731 }
4732 
4733 /*
4734  * Set encryption parameters for the ioaccel2 request
4735  */
4736 static void set_encrypt_ioaccel2(struct ctlr_info *h,
4737 	struct CommandList *c, struct io_accel2_cmd *cp)
4738 {
4739 	struct scsi_cmnd *cmd = c->scsi_cmd;
4740 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4741 	struct raid_map_data *map = &dev->raid_map;
4742 	u64 first_block;
4743 
4744 	/* Are we doing encryption on this device */
4745 	if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4746 		return;
4747 	/* Set the data encryption key index. */
4748 	cp->dekindex = map->dekindex;
4749 
4750 	/* Set the encryption enable flag, encoded into direction field. */
4751 	cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4752 
4753 	/* Set encryption tweak values based on logical block address
4754 	 * If block size is 512, tweak value is LBA.
4755 	 * For other block sizes, tweak is (LBA * block size)/ 512)
4756 	 */
4757 	switch (cmd->cmnd[0]) {
4758 	/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4759 	case READ_6:
4760 	case WRITE_6:
4761 		first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4762 				(cmd->cmnd[2] << 8) |
4763 				cmd->cmnd[3]);
4764 		break;
4765 	case WRITE_10:
4766 	case READ_10:
4767 	/* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4768 	case WRITE_12:
4769 	case READ_12:
4770 		first_block = get_unaligned_be32(&cmd->cmnd[2]);
4771 		break;
4772 	case WRITE_16:
4773 	case READ_16:
4774 		first_block = get_unaligned_be64(&cmd->cmnd[2]);
4775 		break;
4776 	default:
4777 		dev_err(&h->pdev->dev,
4778 			"ERROR: %s: size (0x%x) not supported for encryption\n",
4779 			__func__, cmd->cmnd[0]);
4780 		BUG();
4781 		break;
4782 	}
4783 
4784 	if (le32_to_cpu(map->volume_blk_size) != 512)
4785 		first_block = first_block *
4786 				le32_to_cpu(map->volume_blk_size)/512;
4787 
4788 	cp->tweak_lower = cpu_to_le32(first_block);
4789 	cp->tweak_upper = cpu_to_le32(first_block >> 32);
4790 }
4791 
4792 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4793 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4794 	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4795 {
4796 	struct scsi_cmnd *cmd = c->scsi_cmd;
4797 	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4798 	struct ioaccel2_sg_element *curr_sg;
4799 	int use_sg, i;
4800 	struct scatterlist *sg;
4801 	u64 addr64;
4802 	u32 len;
4803 	u32 total_len = 0;
4804 
4805 	if (!cmd->device)
4806 		return -1;
4807 
4808 	if (!cmd->device->hostdata)
4809 		return -1;
4810 
4811 	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4812 
4813 	if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4814 		atomic_dec(&phys_disk->ioaccel_cmds_out);
4815 		return IO_ACCEL_INELIGIBLE;
4816 	}
4817 
4818 	c->cmd_type = CMD_IOACCEL2;
4819 	/* Adjust the DMA address to point to the accelerated command buffer */
4820 	c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4821 				(c->cmdindex * sizeof(*cp));
4822 	BUG_ON(c->busaddr & 0x0000007F);
4823 
4824 	memset(cp, 0, sizeof(*cp));
4825 	cp->IU_type = IOACCEL2_IU_TYPE;
4826 
4827 	use_sg = scsi_dma_map(cmd);
4828 	if (use_sg < 0) {
4829 		atomic_dec(&phys_disk->ioaccel_cmds_out);
4830 		return use_sg;
4831 	}
4832 
4833 	if (use_sg) {
4834 		curr_sg = cp->sg;
4835 		if (use_sg > h->ioaccel_maxsg) {
4836 			addr64 = le64_to_cpu(
4837 				h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4838 			curr_sg->address = cpu_to_le64(addr64);
4839 			curr_sg->length = 0;
4840 			curr_sg->reserved[0] = 0;
4841 			curr_sg->reserved[1] = 0;
4842 			curr_sg->reserved[2] = 0;
4843 			curr_sg->chain_indicator = 0x80;
4844 
4845 			curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4846 		}
4847 		scsi_for_each_sg(cmd, sg, use_sg, i) {
4848 			addr64 = (u64) sg_dma_address(sg);
4849 			len  = sg_dma_len(sg);
4850 			total_len += len;
4851 			curr_sg->address = cpu_to_le64(addr64);
4852 			curr_sg->length = cpu_to_le32(len);
4853 			curr_sg->reserved[0] = 0;
4854 			curr_sg->reserved[1] = 0;
4855 			curr_sg->reserved[2] = 0;
4856 			curr_sg->chain_indicator = 0;
4857 			curr_sg++;
4858 		}
4859 
4860 		switch (cmd->sc_data_direction) {
4861 		case DMA_TO_DEVICE:
4862 			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4863 			cp->direction |= IOACCEL2_DIR_DATA_OUT;
4864 			break;
4865 		case DMA_FROM_DEVICE:
4866 			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4867 			cp->direction |= IOACCEL2_DIR_DATA_IN;
4868 			break;
4869 		case DMA_NONE:
4870 			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4871 			cp->direction |= IOACCEL2_DIR_NO_DATA;
4872 			break;
4873 		default:
4874 			dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4875 				cmd->sc_data_direction);
4876 			BUG();
4877 			break;
4878 		}
4879 	} else {
4880 		cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4881 		cp->direction |= IOACCEL2_DIR_NO_DATA;
4882 	}
4883 
4884 	/* Set encryption parameters, if necessary */
4885 	set_encrypt_ioaccel2(h, c, cp);
4886 
4887 	cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4888 	cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4889 	memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4890 
4891 	cp->data_len = cpu_to_le32(total_len);
4892 	cp->err_ptr = cpu_to_le64(c->busaddr +
4893 			offsetof(struct io_accel2_cmd, error_data));
4894 	cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4895 
4896 	/* fill in sg elements */
4897 	if (use_sg > h->ioaccel_maxsg) {
4898 		cp->sg_count = 1;
4899 		cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
4900 		if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4901 			atomic_dec(&phys_disk->ioaccel_cmds_out);
4902 			scsi_dma_unmap(cmd);
4903 			return -1;
4904 		}
4905 	} else
4906 		cp->sg_count = (u8) use_sg;
4907 
4908 	enqueue_cmd_and_start_io(h, c);
4909 	return 0;
4910 }
4911 
4912 /*
4913  * Queue a command to the correct I/O accelerator path.
4914  */
4915 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4916 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4917 	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4918 {
4919 	if (!c->scsi_cmd->device)
4920 		return -1;
4921 
4922 	if (!c->scsi_cmd->device->hostdata)
4923 		return -1;
4924 
4925 	/* Try to honor the device's queue depth */
4926 	if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4927 					phys_disk->queue_depth) {
4928 		atomic_dec(&phys_disk->ioaccel_cmds_out);
4929 		return IO_ACCEL_INELIGIBLE;
4930 	}
4931 	if (h->transMethod & CFGTBL_Trans_io_accel1)
4932 		return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
4933 						cdb, cdb_len, scsi3addr,
4934 						phys_disk);
4935 	else
4936 		return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
4937 						cdb, cdb_len, scsi3addr,
4938 						phys_disk);
4939 }
4940 
4941 static void raid_map_helper(struct raid_map_data *map,
4942 		int offload_to_mirror, u32 *map_index, u32 *current_group)
4943 {
4944 	if (offload_to_mirror == 0)  {
4945 		/* use physical disk in the first mirrored group. */
4946 		*map_index %= le16_to_cpu(map->data_disks_per_row);
4947 		return;
4948 	}
4949 	do {
4950 		/* determine mirror group that *map_index indicates */
4951 		*current_group = *map_index /
4952 			le16_to_cpu(map->data_disks_per_row);
4953 		if (offload_to_mirror == *current_group)
4954 			continue;
4955 		if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
4956 			/* select map index from next group */
4957 			*map_index += le16_to_cpu(map->data_disks_per_row);
4958 			(*current_group)++;
4959 		} else {
4960 			/* select map index from first group */
4961 			*map_index %= le16_to_cpu(map->data_disks_per_row);
4962 			*current_group = 0;
4963 		}
4964 	} while (offload_to_mirror != *current_group);
4965 }
4966 
4967 /*
4968  * Attempt to perform offload RAID mapping for a logical volume I/O.
4969  */
4970 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4971 	struct CommandList *c)
4972 {
4973 	struct scsi_cmnd *cmd = c->scsi_cmd;
4974 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4975 	struct raid_map_data *map = &dev->raid_map;
4976 	struct raid_map_disk_data *dd = &map->data[0];
4977 	int is_write = 0;
4978 	u32 map_index;
4979 	u64 first_block, last_block;
4980 	u32 block_cnt;
4981 	u32 blocks_per_row;
4982 	u64 first_row, last_row;
4983 	u32 first_row_offset, last_row_offset;
4984 	u32 first_column, last_column;
4985 	u64 r0_first_row, r0_last_row;
4986 	u32 r5or6_blocks_per_row;
4987 	u64 r5or6_first_row, r5or6_last_row;
4988 	u32 r5or6_first_row_offset, r5or6_last_row_offset;
4989 	u32 r5or6_first_column, r5or6_last_column;
4990 	u32 total_disks_per_row;
4991 	u32 stripesize;
4992 	u32 first_group, last_group, current_group;
4993 	u32 map_row;
4994 	u32 disk_handle;
4995 	u64 disk_block;
4996 	u32 disk_block_cnt;
4997 	u8 cdb[16];
4998 	u8 cdb_len;
4999 	u16 strip_size;
5000 #if BITS_PER_LONG == 32
5001 	u64 tmpdiv;
5002 #endif
5003 	int offload_to_mirror;
5004 
5005 	if (!dev)
5006 		return -1;
5007 
5008 	/* check for valid opcode, get LBA and block count */
5009 	switch (cmd->cmnd[0]) {
5010 	case WRITE_6:
5011 		is_write = 1;
5012 	case READ_6:
5013 		first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
5014 				(cmd->cmnd[2] << 8) |
5015 				cmd->cmnd[3]);
5016 		block_cnt = cmd->cmnd[4];
5017 		if (block_cnt == 0)
5018 			block_cnt = 256;
5019 		break;
5020 	case WRITE_10:
5021 		is_write = 1;
5022 	case READ_10:
5023 		first_block =
5024 			(((u64) cmd->cmnd[2]) << 24) |
5025 			(((u64) cmd->cmnd[3]) << 16) |
5026 			(((u64) cmd->cmnd[4]) << 8) |
5027 			cmd->cmnd[5];
5028 		block_cnt =
5029 			(((u32) cmd->cmnd[7]) << 8) |
5030 			cmd->cmnd[8];
5031 		break;
5032 	case WRITE_12:
5033 		is_write = 1;
5034 	case READ_12:
5035 		first_block =
5036 			(((u64) cmd->cmnd[2]) << 24) |
5037 			(((u64) cmd->cmnd[3]) << 16) |
5038 			(((u64) cmd->cmnd[4]) << 8) |
5039 			cmd->cmnd[5];
5040 		block_cnt =
5041 			(((u32) cmd->cmnd[6]) << 24) |
5042 			(((u32) cmd->cmnd[7]) << 16) |
5043 			(((u32) cmd->cmnd[8]) << 8) |
5044 		cmd->cmnd[9];
5045 		break;
5046 	case WRITE_16:
5047 		is_write = 1;
5048 	case READ_16:
5049 		first_block =
5050 			(((u64) cmd->cmnd[2]) << 56) |
5051 			(((u64) cmd->cmnd[3]) << 48) |
5052 			(((u64) cmd->cmnd[4]) << 40) |
5053 			(((u64) cmd->cmnd[5]) << 32) |
5054 			(((u64) cmd->cmnd[6]) << 24) |
5055 			(((u64) cmd->cmnd[7]) << 16) |
5056 			(((u64) cmd->cmnd[8]) << 8) |
5057 			cmd->cmnd[9];
5058 		block_cnt =
5059 			(((u32) cmd->cmnd[10]) << 24) |
5060 			(((u32) cmd->cmnd[11]) << 16) |
5061 			(((u32) cmd->cmnd[12]) << 8) |
5062 			cmd->cmnd[13];
5063 		break;
5064 	default:
5065 		return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
5066 	}
5067 	last_block = first_block + block_cnt - 1;
5068 
5069 	/* check for write to non-RAID-0 */
5070 	if (is_write && dev->raid_level != 0)
5071 		return IO_ACCEL_INELIGIBLE;
5072 
5073 	/* check for invalid block or wraparound */
5074 	if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
5075 		last_block < first_block)
5076 		return IO_ACCEL_INELIGIBLE;
5077 
5078 	/* calculate stripe information for the request */
5079 	blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
5080 				le16_to_cpu(map->strip_size);
5081 	strip_size = le16_to_cpu(map->strip_size);
5082 #if BITS_PER_LONG == 32
5083 	tmpdiv = first_block;
5084 	(void) do_div(tmpdiv, blocks_per_row);
5085 	first_row = tmpdiv;
5086 	tmpdiv = last_block;
5087 	(void) do_div(tmpdiv, blocks_per_row);
5088 	last_row = tmpdiv;
5089 	first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5090 	last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5091 	tmpdiv = first_row_offset;
5092 	(void) do_div(tmpdiv, strip_size);
5093 	first_column = tmpdiv;
5094 	tmpdiv = last_row_offset;
5095 	(void) do_div(tmpdiv, strip_size);
5096 	last_column = tmpdiv;
5097 #else
5098 	first_row = first_block / blocks_per_row;
5099 	last_row = last_block / blocks_per_row;
5100 	first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
5101 	last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
5102 	first_column = first_row_offset / strip_size;
5103 	last_column = last_row_offset / strip_size;
5104 #endif
5105 
5106 	/* if this isn't a single row/column then give to the controller */
5107 	if ((first_row != last_row) || (first_column != last_column))
5108 		return IO_ACCEL_INELIGIBLE;
5109 
5110 	/* proceeding with driver mapping */
5111 	total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
5112 				le16_to_cpu(map->metadata_disks_per_row);
5113 	map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5114 				le16_to_cpu(map->row_cnt);
5115 	map_index = (map_row * total_disks_per_row) + first_column;
5116 
5117 	switch (dev->raid_level) {
5118 	case HPSA_RAID_0:
5119 		break; /* nothing special to do */
5120 	case HPSA_RAID_1:
5121 		/* Handles load balance across RAID 1 members.
5122 		 * (2-drive R1 and R10 with even # of drives.)
5123 		 * Appropriate for SSDs, not optimal for HDDs
5124 		 */
5125 		BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
5126 		if (dev->offload_to_mirror)
5127 			map_index += le16_to_cpu(map->data_disks_per_row);
5128 		dev->offload_to_mirror = !dev->offload_to_mirror;
5129 		break;
5130 	case HPSA_RAID_ADM:
5131 		/* Handles N-way mirrors  (R1-ADM)
5132 		 * and R10 with # of drives divisible by 3.)
5133 		 */
5134 		BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
5135 
5136 		offload_to_mirror = dev->offload_to_mirror;
5137 		raid_map_helper(map, offload_to_mirror,
5138 				&map_index, &current_group);
5139 		/* set mirror group to use next time */
5140 		offload_to_mirror =
5141 			(offload_to_mirror >=
5142 			le16_to_cpu(map->layout_map_count) - 1)
5143 			? 0 : offload_to_mirror + 1;
5144 		dev->offload_to_mirror = offload_to_mirror;
5145 		/* Avoid direct use of dev->offload_to_mirror within this
5146 		 * function since multiple threads might simultaneously
5147 		 * increment it beyond the range of dev->layout_map_count -1.
5148 		 */
5149 		break;
5150 	case HPSA_RAID_5:
5151 	case HPSA_RAID_6:
5152 		if (le16_to_cpu(map->layout_map_count) <= 1)
5153 			break;
5154 
5155 		/* Verify first and last block are in same RAID group */
5156 		r5or6_blocks_per_row =
5157 			le16_to_cpu(map->strip_size) *
5158 			le16_to_cpu(map->data_disks_per_row);
5159 		BUG_ON(r5or6_blocks_per_row == 0);
5160 		stripesize = r5or6_blocks_per_row *
5161 			le16_to_cpu(map->layout_map_count);
5162 #if BITS_PER_LONG == 32
5163 		tmpdiv = first_block;
5164 		first_group = do_div(tmpdiv, stripesize);
5165 		tmpdiv = first_group;
5166 		(void) do_div(tmpdiv, r5or6_blocks_per_row);
5167 		first_group = tmpdiv;
5168 		tmpdiv = last_block;
5169 		last_group = do_div(tmpdiv, stripesize);
5170 		tmpdiv = last_group;
5171 		(void) do_div(tmpdiv, r5or6_blocks_per_row);
5172 		last_group = tmpdiv;
5173 #else
5174 		first_group = (first_block % stripesize) / r5or6_blocks_per_row;
5175 		last_group = (last_block % stripesize) / r5or6_blocks_per_row;
5176 #endif
5177 		if (first_group != last_group)
5178 			return IO_ACCEL_INELIGIBLE;
5179 
5180 		/* Verify request is in a single row of RAID 5/6 */
5181 #if BITS_PER_LONG == 32
5182 		tmpdiv = first_block;
5183 		(void) do_div(tmpdiv, stripesize);
5184 		first_row = r5or6_first_row = r0_first_row = tmpdiv;
5185 		tmpdiv = last_block;
5186 		(void) do_div(tmpdiv, stripesize);
5187 		r5or6_last_row = r0_last_row = tmpdiv;
5188 #else
5189 		first_row = r5or6_first_row = r0_first_row =
5190 						first_block / stripesize;
5191 		r5or6_last_row = r0_last_row = last_block / stripesize;
5192 #endif
5193 		if (r5or6_first_row != r5or6_last_row)
5194 			return IO_ACCEL_INELIGIBLE;
5195 
5196 
5197 		/* Verify request is in a single column */
5198 #if BITS_PER_LONG == 32
5199 		tmpdiv = first_block;
5200 		first_row_offset = do_div(tmpdiv, stripesize);
5201 		tmpdiv = first_row_offset;
5202 		first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
5203 		r5or6_first_row_offset = first_row_offset;
5204 		tmpdiv = last_block;
5205 		r5or6_last_row_offset = do_div(tmpdiv, stripesize);
5206 		tmpdiv = r5or6_last_row_offset;
5207 		r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
5208 		tmpdiv = r5or6_first_row_offset;
5209 		(void) do_div(tmpdiv, map->strip_size);
5210 		first_column = r5or6_first_column = tmpdiv;
5211 		tmpdiv = r5or6_last_row_offset;
5212 		(void) do_div(tmpdiv, map->strip_size);
5213 		r5or6_last_column = tmpdiv;
5214 #else
5215 		first_row_offset = r5or6_first_row_offset =
5216 			(u32)((first_block % stripesize) %
5217 						r5or6_blocks_per_row);
5218 
5219 		r5or6_last_row_offset =
5220 			(u32)((last_block % stripesize) %
5221 						r5or6_blocks_per_row);
5222 
5223 		first_column = r5or6_first_column =
5224 			r5or6_first_row_offset / le16_to_cpu(map->strip_size);
5225 		r5or6_last_column =
5226 			r5or6_last_row_offset / le16_to_cpu(map->strip_size);
5227 #endif
5228 		if (r5or6_first_column != r5or6_last_column)
5229 			return IO_ACCEL_INELIGIBLE;
5230 
5231 		/* Request is eligible */
5232 		map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
5233 			le16_to_cpu(map->row_cnt);
5234 
5235 		map_index = (first_group *
5236 			(le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
5237 			(map_row * total_disks_per_row) + first_column;
5238 		break;
5239 	default:
5240 		return IO_ACCEL_INELIGIBLE;
5241 	}
5242 
5243 	if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
5244 		return IO_ACCEL_INELIGIBLE;
5245 
5246 	c->phys_disk = dev->phys_disk[map_index];
5247 	if (!c->phys_disk)
5248 		return IO_ACCEL_INELIGIBLE;
5249 
5250 	disk_handle = dd[map_index].ioaccel_handle;
5251 	disk_block = le64_to_cpu(map->disk_starting_blk) +
5252 			first_row * le16_to_cpu(map->strip_size) +
5253 			(first_row_offset - first_column *
5254 			le16_to_cpu(map->strip_size));
5255 	disk_block_cnt = block_cnt;
5256 
5257 	/* handle differing logical/physical block sizes */
5258 	if (map->phys_blk_shift) {
5259 		disk_block <<= map->phys_blk_shift;
5260 		disk_block_cnt <<= map->phys_blk_shift;
5261 	}
5262 	BUG_ON(disk_block_cnt > 0xffff);
5263 
5264 	/* build the new CDB for the physical disk I/O */
5265 	if (disk_block > 0xffffffff) {
5266 		cdb[0] = is_write ? WRITE_16 : READ_16;
5267 		cdb[1] = 0;
5268 		cdb[2] = (u8) (disk_block >> 56);
5269 		cdb[3] = (u8) (disk_block >> 48);
5270 		cdb[4] = (u8) (disk_block >> 40);
5271 		cdb[5] = (u8) (disk_block >> 32);
5272 		cdb[6] = (u8) (disk_block >> 24);
5273 		cdb[7] = (u8) (disk_block >> 16);
5274 		cdb[8] = (u8) (disk_block >> 8);
5275 		cdb[9] = (u8) (disk_block);
5276 		cdb[10] = (u8) (disk_block_cnt >> 24);
5277 		cdb[11] = (u8) (disk_block_cnt >> 16);
5278 		cdb[12] = (u8) (disk_block_cnt >> 8);
5279 		cdb[13] = (u8) (disk_block_cnt);
5280 		cdb[14] = 0;
5281 		cdb[15] = 0;
5282 		cdb_len = 16;
5283 	} else {
5284 		cdb[0] = is_write ? WRITE_10 : READ_10;
5285 		cdb[1] = 0;
5286 		cdb[2] = (u8) (disk_block >> 24);
5287 		cdb[3] = (u8) (disk_block >> 16);
5288 		cdb[4] = (u8) (disk_block >> 8);
5289 		cdb[5] = (u8) (disk_block);
5290 		cdb[6] = 0;
5291 		cdb[7] = (u8) (disk_block_cnt >> 8);
5292 		cdb[8] = (u8) (disk_block_cnt);
5293 		cdb[9] = 0;
5294 		cdb_len = 10;
5295 	}
5296 	return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
5297 						dev->scsi3addr,
5298 						dev->phys_disk[map_index]);
5299 }
5300 
5301 /*
5302  * Submit commands down the "normal" RAID stack path
5303  * All callers to hpsa_ciss_submit must check lockup_detected
5304  * beforehand, before (opt.) and after calling cmd_alloc
5305  */
5306 static int hpsa_ciss_submit(struct ctlr_info *h,
5307 	struct CommandList *c, struct scsi_cmnd *cmd,
5308 	unsigned char scsi3addr[])
5309 {
5310 	cmd->host_scribble = (unsigned char *) c;
5311 	c->cmd_type = CMD_SCSI;
5312 	c->scsi_cmd = cmd;
5313 	c->Header.ReplyQueue = 0;  /* unused in simple mode */
5314 	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
5315 	c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
5316 
5317 	/* Fill in the request block... */
5318 
5319 	c->Request.Timeout = 0;
5320 	BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
5321 	c->Request.CDBLen = cmd->cmd_len;
5322 	memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
5323 	switch (cmd->sc_data_direction) {
5324 	case DMA_TO_DEVICE:
5325 		c->Request.type_attr_dir =
5326 			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
5327 		break;
5328 	case DMA_FROM_DEVICE:
5329 		c->Request.type_attr_dir =
5330 			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
5331 		break;
5332 	case DMA_NONE:
5333 		c->Request.type_attr_dir =
5334 			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
5335 		break;
5336 	case DMA_BIDIRECTIONAL:
5337 		/* This can happen if a buggy application does a scsi passthru
5338 		 * and sets both inlen and outlen to non-zero. ( see
5339 		 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5340 		 */
5341 
5342 		c->Request.type_attr_dir =
5343 			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
5344 		/* This is technically wrong, and hpsa controllers should
5345 		 * reject it with CMD_INVALID, which is the most correct
5346 		 * response, but non-fibre backends appear to let it
5347 		 * slide by, and give the same results as if this field
5348 		 * were set correctly.  Either way is acceptable for
5349 		 * our purposes here.
5350 		 */
5351 
5352 		break;
5353 
5354 	default:
5355 		dev_err(&h->pdev->dev, "unknown data direction: %d\n",
5356 			cmd->sc_data_direction);
5357 		BUG();
5358 		break;
5359 	}
5360 
5361 	if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
5362 		hpsa_cmd_resolve_and_free(h, c);
5363 		return SCSI_MLQUEUE_HOST_BUSY;
5364 	}
5365 	enqueue_cmd_and_start_io(h, c);
5366 	/* the cmd'll come back via intr handler in complete_scsi_command()  */
5367 	return 0;
5368 }
5369 
5370 static void hpsa_cmd_init(struct ctlr_info *h, int index,
5371 				struct CommandList *c)
5372 {
5373 	dma_addr_t cmd_dma_handle, err_dma_handle;
5374 
5375 	/* Zero out all of commandlist except the last field, refcount */
5376 	memset(c, 0, offsetof(struct CommandList, refcount));
5377 	c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
5378 	cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5379 	c->err_info = h->errinfo_pool + index;
5380 	memset(c->err_info, 0, sizeof(*c->err_info));
5381 	err_dma_handle = h->errinfo_pool_dhandle
5382 	    + index * sizeof(*c->err_info);
5383 	c->cmdindex = index;
5384 	c->busaddr = (u32) cmd_dma_handle;
5385 	c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
5386 	c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
5387 	c->h = h;
5388 	c->scsi_cmd = SCSI_CMD_IDLE;
5389 }
5390 
5391 static void hpsa_preinitialize_commands(struct ctlr_info *h)
5392 {
5393 	int i;
5394 
5395 	for (i = 0; i < h->nr_cmds; i++) {
5396 		struct CommandList *c = h->cmd_pool + i;
5397 
5398 		hpsa_cmd_init(h, i, c);
5399 		atomic_set(&c->refcount, 0);
5400 	}
5401 }
5402 
5403 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
5404 				struct CommandList *c)
5405 {
5406 	dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
5407 
5408 	BUG_ON(c->cmdindex != index);
5409 
5410 	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
5411 	memset(c->err_info, 0, sizeof(*c->err_info));
5412 	c->busaddr = (u32) cmd_dma_handle;
5413 }
5414 
5415 static int hpsa_ioaccel_submit(struct ctlr_info *h,
5416 		struct CommandList *c, struct scsi_cmnd *cmd,
5417 		unsigned char *scsi3addr)
5418 {
5419 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5420 	int rc = IO_ACCEL_INELIGIBLE;
5421 
5422 	if (!dev)
5423 		return SCSI_MLQUEUE_HOST_BUSY;
5424 
5425 	cmd->host_scribble = (unsigned char *) c;
5426 
5427 	if (dev->offload_enabled) {
5428 		hpsa_cmd_init(h, c->cmdindex, c);
5429 		c->cmd_type = CMD_SCSI;
5430 		c->scsi_cmd = cmd;
5431 		rc = hpsa_scsi_ioaccel_raid_map(h, c);
5432 		if (rc < 0)     /* scsi_dma_map failed. */
5433 			rc = SCSI_MLQUEUE_HOST_BUSY;
5434 	} else if (dev->hba_ioaccel_enabled) {
5435 		hpsa_cmd_init(h, c->cmdindex, c);
5436 		c->cmd_type = CMD_SCSI;
5437 		c->scsi_cmd = cmd;
5438 		rc = hpsa_scsi_ioaccel_direct_map(h, c);
5439 		if (rc < 0)     /* scsi_dma_map failed. */
5440 			rc = SCSI_MLQUEUE_HOST_BUSY;
5441 	}
5442 	return rc;
5443 }
5444 
5445 static void hpsa_command_resubmit_worker(struct work_struct *work)
5446 {
5447 	struct scsi_cmnd *cmd;
5448 	struct hpsa_scsi_dev_t *dev;
5449 	struct CommandList *c = container_of(work, struct CommandList, work);
5450 
5451 	cmd = c->scsi_cmd;
5452 	dev = cmd->device->hostdata;
5453 	if (!dev) {
5454 		cmd->result = DID_NO_CONNECT << 16;
5455 		return hpsa_cmd_free_and_done(c->h, c, cmd);
5456 	}
5457 	if (c->reset_pending)
5458 		return hpsa_cmd_resolve_and_free(c->h, c);
5459 	if (c->abort_pending)
5460 		return hpsa_cmd_abort_and_free(c->h, c, cmd);
5461 	if (c->cmd_type == CMD_IOACCEL2) {
5462 		struct ctlr_info *h = c->h;
5463 		struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5464 		int rc;
5465 
5466 		if (c2->error_data.serv_response ==
5467 				IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
5468 			rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
5469 			if (rc == 0)
5470 				return;
5471 			if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5472 				/*
5473 				 * If we get here, it means dma mapping failed.
5474 				 * Try again via scsi mid layer, which will
5475 				 * then get SCSI_MLQUEUE_HOST_BUSY.
5476 				 */
5477 				cmd->result = DID_IMM_RETRY << 16;
5478 				return hpsa_cmd_free_and_done(h, c, cmd);
5479 			}
5480 			/* else, fall thru and resubmit down CISS path */
5481 		}
5482 	}
5483 	hpsa_cmd_partial_init(c->h, c->cmdindex, c);
5484 	if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
5485 		/*
5486 		 * If we get here, it means dma mapping failed. Try
5487 		 * again via scsi mid layer, which will then get
5488 		 * SCSI_MLQUEUE_HOST_BUSY.
5489 		 *
5490 		 * hpsa_ciss_submit will have already freed c
5491 		 * if it encountered a dma mapping failure.
5492 		 */
5493 		cmd->result = DID_IMM_RETRY << 16;
5494 		cmd->scsi_done(cmd);
5495 	}
5496 }
5497 
5498 /* Running in struct Scsi_Host->host_lock less mode */
5499 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
5500 {
5501 	struct ctlr_info *h;
5502 	struct hpsa_scsi_dev_t *dev;
5503 	unsigned char scsi3addr[8];
5504 	struct CommandList *c;
5505 	int rc = 0;
5506 
5507 	/* Get the ptr to our adapter structure out of cmd->host. */
5508 	h = sdev_to_hba(cmd->device);
5509 
5510 	BUG_ON(cmd->request->tag < 0);
5511 
5512 	dev = cmd->device->hostdata;
5513 	if (!dev) {
5514 		cmd->result = DID_NO_CONNECT << 16;
5515 		cmd->scsi_done(cmd);
5516 		return 0;
5517 	}
5518 
5519 	if (dev->removed) {
5520 		cmd->result = DID_NO_CONNECT << 16;
5521 		cmd->scsi_done(cmd);
5522 		return 0;
5523 	}
5524 
5525 	memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
5526 
5527 	if (unlikely(lockup_detected(h))) {
5528 		cmd->result = DID_NO_CONNECT << 16;
5529 		cmd->scsi_done(cmd);
5530 		return 0;
5531 	}
5532 	c = cmd_tagged_alloc(h, cmd);
5533 
5534 	/*
5535 	 * Call alternate submit routine for I/O accelerated commands.
5536 	 * Retries always go down the normal I/O path.
5537 	 */
5538 	if (likely(cmd->retries == 0 &&
5539 			!blk_rq_is_passthrough(cmd->request) &&
5540 			h->acciopath_status)) {
5541 		rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
5542 		if (rc == 0)
5543 			return 0;
5544 		if (rc == SCSI_MLQUEUE_HOST_BUSY) {
5545 			hpsa_cmd_resolve_and_free(h, c);
5546 			return SCSI_MLQUEUE_HOST_BUSY;
5547 		}
5548 	}
5549 	return hpsa_ciss_submit(h, c, cmd, scsi3addr);
5550 }
5551 
5552 static void hpsa_scan_complete(struct ctlr_info *h)
5553 {
5554 	unsigned long flags;
5555 
5556 	spin_lock_irqsave(&h->scan_lock, flags);
5557 	h->scan_finished = 1;
5558 	wake_up(&h->scan_wait_queue);
5559 	spin_unlock_irqrestore(&h->scan_lock, flags);
5560 }
5561 
5562 static void hpsa_scan_start(struct Scsi_Host *sh)
5563 {
5564 	struct ctlr_info *h = shost_to_hba(sh);
5565 	unsigned long flags;
5566 
5567 	/*
5568 	 * Don't let rescans be initiated on a controller known to be locked
5569 	 * up.  If the controller locks up *during* a rescan, that thread is
5570 	 * probably hosed, but at least we can prevent new rescan threads from
5571 	 * piling up on a locked up controller.
5572 	 */
5573 	if (unlikely(lockup_detected(h)))
5574 		return hpsa_scan_complete(h);
5575 
5576 	/*
5577 	 * If a scan is already waiting to run, no need to add another
5578 	 */
5579 	spin_lock_irqsave(&h->scan_lock, flags);
5580 	if (h->scan_waiting) {
5581 		spin_unlock_irqrestore(&h->scan_lock, flags);
5582 		return;
5583 	}
5584 
5585 	spin_unlock_irqrestore(&h->scan_lock, flags);
5586 
5587 	/* wait until any scan already in progress is finished. */
5588 	while (1) {
5589 		spin_lock_irqsave(&h->scan_lock, flags);
5590 		if (h->scan_finished)
5591 			break;
5592 		h->scan_waiting = 1;
5593 		spin_unlock_irqrestore(&h->scan_lock, flags);
5594 		wait_event(h->scan_wait_queue, h->scan_finished);
5595 		/* Note: We don't need to worry about a race between this
5596 		 * thread and driver unload because the midlayer will
5597 		 * have incremented the reference count, so unload won't
5598 		 * happen if we're in here.
5599 		 */
5600 	}
5601 	h->scan_finished = 0; /* mark scan as in progress */
5602 	h->scan_waiting = 0;
5603 	spin_unlock_irqrestore(&h->scan_lock, flags);
5604 
5605 	if (unlikely(lockup_detected(h)))
5606 		return hpsa_scan_complete(h);
5607 
5608 	/*
5609 	 * Do the scan after a reset completion
5610 	 */
5611 	if (h->reset_in_progress) {
5612 		h->drv_req_rescan = 1;
5613 		return;
5614 	}
5615 
5616 	hpsa_update_scsi_devices(h);
5617 
5618 	hpsa_scan_complete(h);
5619 }
5620 
5621 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5622 {
5623 	struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5624 
5625 	if (!logical_drive)
5626 		return -ENODEV;
5627 
5628 	if (qdepth < 1)
5629 		qdepth = 1;
5630 	else if (qdepth > logical_drive->queue_depth)
5631 		qdepth = logical_drive->queue_depth;
5632 
5633 	return scsi_change_queue_depth(sdev, qdepth);
5634 }
5635 
5636 static int hpsa_scan_finished(struct Scsi_Host *sh,
5637 	unsigned long elapsed_time)
5638 {
5639 	struct ctlr_info *h = shost_to_hba(sh);
5640 	unsigned long flags;
5641 	int finished;
5642 
5643 	spin_lock_irqsave(&h->scan_lock, flags);
5644 	finished = h->scan_finished;
5645 	spin_unlock_irqrestore(&h->scan_lock, flags);
5646 	return finished;
5647 }
5648 
5649 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5650 {
5651 	struct Scsi_Host *sh;
5652 
5653 	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5654 	if (sh == NULL) {
5655 		dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5656 		return -ENOMEM;
5657 	}
5658 
5659 	sh->io_port = 0;
5660 	sh->n_io_port = 0;
5661 	sh->this_id = -1;
5662 	sh->max_channel = 3;
5663 	sh->max_cmd_len = MAX_COMMAND_SIZE;
5664 	sh->max_lun = HPSA_MAX_LUN;
5665 	sh->max_id = HPSA_MAX_LUN;
5666 	sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5667 	sh->cmd_per_lun = sh->can_queue;
5668 	sh->sg_tablesize = h->maxsgentries;
5669 	sh->transportt = hpsa_sas_transport_template;
5670 	sh->hostdata[0] = (unsigned long) h;
5671 	sh->irq = pci_irq_vector(h->pdev, 0);
5672 	sh->unique_id = sh->irq;
5673 
5674 	h->scsi_host = sh;
5675 	return 0;
5676 }
5677 
5678 static int hpsa_scsi_add_host(struct ctlr_info *h)
5679 {
5680 	int rv;
5681 
5682 	rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5683 	if (rv) {
5684 		dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5685 		return rv;
5686 	}
5687 	scsi_scan_host(h->scsi_host);
5688 	return 0;
5689 }
5690 
5691 /*
5692  * The block layer has already gone to the trouble of picking out a unique,
5693  * small-integer tag for this request.  We use an offset from that value as
5694  * an index to select our command block.  (The offset allows us to reserve the
5695  * low-numbered entries for our own uses.)
5696  */
5697 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5698 {
5699 	int idx = scmd->request->tag;
5700 
5701 	if (idx < 0)
5702 		return idx;
5703 
5704 	/* Offset to leave space for internal cmds. */
5705 	return idx += HPSA_NRESERVED_CMDS;
5706 }
5707 
5708 /*
5709  * Send a TEST_UNIT_READY command to the specified LUN using the specified
5710  * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5711  */
5712 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5713 				struct CommandList *c, unsigned char lunaddr[],
5714 				int reply_queue)
5715 {
5716 	int rc;
5717 
5718 	/* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5719 	(void) fill_cmd(c, TEST_UNIT_READY, h,
5720 			NULL, 0, 0, lunaddr, TYPE_CMD);
5721 	rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
5722 	if (rc)
5723 		return rc;
5724 	/* no unmap needed here because no data xfer. */
5725 
5726 	/* Check if the unit is already ready. */
5727 	if (c->err_info->CommandStatus == CMD_SUCCESS)
5728 		return 0;
5729 
5730 	/*
5731 	 * The first command sent after reset will receive "unit attention" to
5732 	 * indicate that the LUN has been reset...this is actually what we're
5733 	 * looking for (but, success is good too).
5734 	 */
5735 	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5736 		c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5737 			(c->err_info->SenseInfo[2] == NO_SENSE ||
5738 			 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5739 		return 0;
5740 
5741 	return 1;
5742 }
5743 
5744 /*
5745  * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5746  * returns zero when the unit is ready, and non-zero when giving up.
5747  */
5748 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5749 				struct CommandList *c,
5750 				unsigned char lunaddr[], int reply_queue)
5751 {
5752 	int rc;
5753 	int count = 0;
5754 	int waittime = 1; /* seconds */
5755 
5756 	/* Send test unit ready until device ready, or give up. */
5757 	for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5758 
5759 		/*
5760 		 * Wait for a bit.  do this first, because if we send
5761 		 * the TUR right away, the reset will just abort it.
5762 		 */
5763 		msleep(1000 * waittime);
5764 
5765 		rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5766 		if (!rc)
5767 			break;
5768 
5769 		/* Increase wait time with each try, up to a point. */
5770 		if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5771 			waittime *= 2;
5772 
5773 		dev_warn(&h->pdev->dev,
5774 			 "waiting %d secs for device to become ready.\n",
5775 			 waittime);
5776 	}
5777 
5778 	return rc;
5779 }
5780 
5781 static int wait_for_device_to_become_ready(struct ctlr_info *h,
5782 					   unsigned char lunaddr[],
5783 					   int reply_queue)
5784 {
5785 	int first_queue;
5786 	int last_queue;
5787 	int rq;
5788 	int rc = 0;
5789 	struct CommandList *c;
5790 
5791 	c = cmd_alloc(h);
5792 
5793 	/*
5794 	 * If no specific reply queue was requested, then send the TUR
5795 	 * repeatedly, requesting a reply on each reply queue; otherwise execute
5796 	 * the loop exactly once using only the specified queue.
5797 	 */
5798 	if (reply_queue == DEFAULT_REPLY_QUEUE) {
5799 		first_queue = 0;
5800 		last_queue = h->nreply_queues - 1;
5801 	} else {
5802 		first_queue = reply_queue;
5803 		last_queue = reply_queue;
5804 	}
5805 
5806 	for (rq = first_queue; rq <= last_queue; rq++) {
5807 		rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5808 		if (rc)
5809 			break;
5810 	}
5811 
5812 	if (rc)
5813 		dev_warn(&h->pdev->dev, "giving up on device.\n");
5814 	else
5815 		dev_warn(&h->pdev->dev, "device is ready.\n");
5816 
5817 	cmd_free(h, c);
5818 	return rc;
5819 }
5820 
5821 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5822  * complaining.  Doing a host- or bus-reset can't do anything good here.
5823  */
5824 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5825 {
5826 	int rc;
5827 	struct ctlr_info *h;
5828 	struct hpsa_scsi_dev_t *dev;
5829 	u8 reset_type;
5830 	char msg[48];
5831 
5832 	/* find the controller to which the command to be aborted was sent */
5833 	h = sdev_to_hba(scsicmd->device);
5834 	if (h == NULL) /* paranoia */
5835 		return FAILED;
5836 
5837 	if (lockup_detected(h))
5838 		return FAILED;
5839 
5840 	dev = scsicmd->device->hostdata;
5841 	if (!dev) {
5842 		dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5843 		return FAILED;
5844 	}
5845 
5846 	/* if controller locked up, we can guarantee command won't complete */
5847 	if (lockup_detected(h)) {
5848 		snprintf(msg, sizeof(msg),
5849 			 "cmd %d RESET FAILED, lockup detected",
5850 			 hpsa_get_cmd_index(scsicmd));
5851 		hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5852 		return FAILED;
5853 	}
5854 
5855 	/* this reset request might be the result of a lockup; check */
5856 	if (detect_controller_lockup(h)) {
5857 		snprintf(msg, sizeof(msg),
5858 			 "cmd %d RESET FAILED, new lockup detected",
5859 			 hpsa_get_cmd_index(scsicmd));
5860 		hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5861 		return FAILED;
5862 	}
5863 
5864 	/* Do not attempt on controller */
5865 	if (is_hba_lunid(dev->scsi3addr))
5866 		return SUCCESS;
5867 
5868 	if (is_logical_dev_addr_mode(dev->scsi3addr))
5869 		reset_type = HPSA_DEVICE_RESET_MSG;
5870 	else
5871 		reset_type = HPSA_PHYS_TARGET_RESET;
5872 
5873 	sprintf(msg, "resetting %s",
5874 		reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
5875 	hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5876 
5877 	h->reset_in_progress = 1;
5878 
5879 	/* send a reset to the SCSI LUN which the command was sent to */
5880 	rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
5881 			   DEFAULT_REPLY_QUEUE);
5882 	sprintf(msg, "reset %s %s",
5883 		reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
5884 		rc == 0 ? "completed successfully" : "failed");
5885 	hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5886 	h->reset_in_progress = 0;
5887 	return rc == 0 ? SUCCESS : FAILED;
5888 }
5889 
5890 static void swizzle_abort_tag(u8 *tag)
5891 {
5892 	u8 original_tag[8];
5893 
5894 	memcpy(original_tag, tag, 8);
5895 	tag[0] = original_tag[3];
5896 	tag[1] = original_tag[2];
5897 	tag[2] = original_tag[1];
5898 	tag[3] = original_tag[0];
5899 	tag[4] = original_tag[7];
5900 	tag[5] = original_tag[6];
5901 	tag[6] = original_tag[5];
5902 	tag[7] = original_tag[4];
5903 }
5904 
5905 static void hpsa_get_tag(struct ctlr_info *h,
5906 	struct CommandList *c, __le32 *taglower, __le32 *tagupper)
5907 {
5908 	u64 tag;
5909 	if (c->cmd_type == CMD_IOACCEL1) {
5910 		struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5911 			&h->ioaccel_cmd_pool[c->cmdindex];
5912 		tag = le64_to_cpu(cm1->tag);
5913 		*tagupper = cpu_to_le32(tag >> 32);
5914 		*taglower = cpu_to_le32(tag);
5915 		return;
5916 	}
5917 	if (c->cmd_type == CMD_IOACCEL2) {
5918 		struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5919 			&h->ioaccel2_cmd_pool[c->cmdindex];
5920 		/* upper tag not used in ioaccel2 mode */
5921 		memset(tagupper, 0, sizeof(*tagupper));
5922 		*taglower = cm2->Tag;
5923 		return;
5924 	}
5925 	tag = le64_to_cpu(c->Header.tag);
5926 	*tagupper = cpu_to_le32(tag >> 32);
5927 	*taglower = cpu_to_le32(tag);
5928 }
5929 
5930 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
5931 	struct CommandList *abort, int reply_queue)
5932 {
5933 	int rc = IO_OK;
5934 	struct CommandList *c;
5935 	struct ErrorInfo *ei;
5936 	__le32 tagupper, taglower;
5937 
5938 	c = cmd_alloc(h);
5939 
5940 	/* fill_cmd can't fail here, no buffer to map */
5941 	(void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
5942 		0, 0, scsi3addr, TYPE_MSG);
5943 	if (h->needs_abort_tags_swizzled)
5944 		swizzle_abort_tag(&c->Request.CDB[4]);
5945 	(void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
5946 	hpsa_get_tag(h, abort, &taglower, &tagupper);
5947 	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5948 		__func__, tagupper, taglower);
5949 	/* no unmap needed here because no data xfer. */
5950 
5951 	ei = c->err_info;
5952 	switch (ei->CommandStatus) {
5953 	case CMD_SUCCESS:
5954 		break;
5955 	case CMD_TMF_STATUS:
5956 		rc = hpsa_evaluate_tmf_status(h, c);
5957 		break;
5958 	case CMD_UNABORTABLE: /* Very common, don't make noise. */
5959 		rc = -1;
5960 		break;
5961 	default:
5962 		dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5963 			__func__, tagupper, taglower);
5964 		hpsa_scsi_interpret_error(h, c);
5965 		rc = -1;
5966 		break;
5967 	}
5968 	cmd_free(h, c);
5969 	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5970 		__func__, tagupper, taglower);
5971 	return rc;
5972 }
5973 
5974 static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5975 	struct CommandList *command_to_abort, int reply_queue)
5976 {
5977 	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5978 	struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5979 	struct io_accel2_cmd *c2a =
5980 		&h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
5981 	struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5982 	struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5983 
5984 	if (!dev)
5985 		return;
5986 
5987 	/*
5988 	 * We're overlaying struct hpsa_tmf_struct on top of something which
5989 	 * was allocated as a struct io_accel2_cmd, so we better be sure it
5990 	 * actually fits, and doesn't overrun the error info space.
5991 	 */
5992 	BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5993 			sizeof(struct io_accel2_cmd));
5994 	BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5995 			offsetof(struct hpsa_tmf_struct, error_len) +
5996 				sizeof(ac->error_len));
5997 
5998 	c->cmd_type = IOACCEL2_TMF;
5999 	c->scsi_cmd = SCSI_CMD_BUSY;
6000 
6001 	/* Adjust the DMA address to point to the accelerated command buffer */
6002 	c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
6003 				(c->cmdindex * sizeof(struct io_accel2_cmd));
6004 	BUG_ON(c->busaddr & 0x0000007F);
6005 
6006 	memset(ac, 0, sizeof(*c2)); /* yes this is correct */
6007 	ac->iu_type = IOACCEL2_IU_TMF_TYPE;
6008 	ac->reply_queue = reply_queue;
6009 	ac->tmf = IOACCEL2_TMF_ABORT;
6010 	ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
6011 	memset(ac->lun_id, 0, sizeof(ac->lun_id));
6012 	ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
6013 	ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
6014 	ac->error_ptr = cpu_to_le64(c->busaddr +
6015 			offsetof(struct io_accel2_cmd, error_data));
6016 	ac->error_len = cpu_to_le32(sizeof(c2->error_data));
6017 }
6018 
6019 /* ioaccel2 path firmware cannot handle abort task requests.
6020  * Change abort requests to physical target reset, and send to the
6021  * address of the physical disk used for the ioaccel 2 command.
6022  * Return 0 on success (IO_OK)
6023  *	 -1 on failure
6024  */
6025 
6026 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
6027 	unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
6028 {
6029 	int rc = IO_OK;
6030 	struct scsi_cmnd *scmd; /* scsi command within request being aborted */
6031 	struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
6032 	unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
6033 	unsigned char *psa = &phys_scsi3addr[0];
6034 
6035 	/* Get a pointer to the hpsa logical device. */
6036 	scmd = abort->scsi_cmd;
6037 	dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
6038 	if (dev == NULL) {
6039 		dev_warn(&h->pdev->dev,
6040 			"Cannot abort: no device pointer for command.\n");
6041 			return -1; /* not abortable */
6042 	}
6043 
6044 	if (h->raid_offload_debug > 0)
6045 		dev_info(&h->pdev->dev,
6046 			"scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n",
6047 			h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
6048 			"Reset as abort", scsi3addr);
6049 
6050 	if (!dev->offload_enabled) {
6051 		dev_warn(&h->pdev->dev,
6052 			"Can't abort: device is not operating in HP SSD Smart Path mode.\n");
6053 		return -1; /* not abortable */
6054 	}
6055 
6056 	/* Incoming scsi3addr is logical addr. We need physical disk addr. */
6057 	if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
6058 		dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
6059 		return -1; /* not abortable */
6060 	}
6061 
6062 	/* send the reset */
6063 	if (h->raid_offload_debug > 0)
6064 		dev_info(&h->pdev->dev,
6065 			"Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n",
6066 			psa);
6067 	rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
6068 	if (rc != 0) {
6069 		dev_warn(&h->pdev->dev,
6070 			"Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n",
6071 			psa);
6072 		return rc; /* failed to reset */
6073 	}
6074 
6075 	/* wait for device to recover */
6076 	if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
6077 		dev_warn(&h->pdev->dev,
6078 			"Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n",
6079 			psa);
6080 		return -1;  /* failed to recover */
6081 	}
6082 
6083 	/* device recovered */
6084 	dev_info(&h->pdev->dev,
6085 		"Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n",
6086 		psa);
6087 
6088 	return rc; /* success */
6089 }
6090 
6091 static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
6092 	struct CommandList *abort, int reply_queue)
6093 {
6094 	int rc = IO_OK;
6095 	struct CommandList *c;
6096 	__le32 taglower, tagupper;
6097 	struct hpsa_scsi_dev_t *dev;
6098 	struct io_accel2_cmd *c2;
6099 
6100 	dev = abort->scsi_cmd->device->hostdata;
6101 	if (!dev)
6102 		return -1;
6103 
6104 	if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
6105 		return -1;
6106 
6107 	c = cmd_alloc(h);
6108 	setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
6109 	c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
6110 	(void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
6111 	hpsa_get_tag(h, abort, &taglower, &tagupper);
6112 	dev_dbg(&h->pdev->dev,
6113 		"%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
6114 		__func__, tagupper, taglower);
6115 	/* no unmap needed here because no data xfer. */
6116 
6117 	dev_dbg(&h->pdev->dev,
6118 		"%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
6119 		__func__, tagupper, taglower, c2->error_data.serv_response);
6120 	switch (c2->error_data.serv_response) {
6121 	case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
6122 	case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
6123 		rc = 0;
6124 		break;
6125 	case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
6126 	case IOACCEL2_SERV_RESPONSE_FAILURE:
6127 	case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
6128 		rc = -1;
6129 		break;
6130 	default:
6131 		dev_warn(&h->pdev->dev,
6132 			"%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
6133 			__func__, tagupper, taglower,
6134 			c2->error_data.serv_response);
6135 		rc = -1;
6136 	}
6137 	cmd_free(h, c);
6138 	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
6139 		tagupper, taglower);
6140 	return rc;
6141 }
6142 
6143 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
6144 	struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue)
6145 {
6146 	/*
6147 	 * ioccelerator mode 2 commands should be aborted via the
6148 	 * accelerated path, since RAID path is unaware of these commands,
6149 	 * but not all underlying firmware can handle abort TMF.
6150 	 * Change abort to physical device reset when abort TMF is unsupported.
6151 	 */
6152 	if (abort->cmd_type == CMD_IOACCEL2) {
6153 		if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) ||
6154 			dev->physical_device)
6155 			return hpsa_send_abort_ioaccel2(h, abort,
6156 						reply_queue);
6157 		else
6158 			return hpsa_send_reset_as_abort_ioaccel2(h,
6159 							dev->scsi3addr,
6160 							abort, reply_queue);
6161 	}
6162 	return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue);
6163 }
6164 
6165 /* Find out which reply queue a command was meant to return on */
6166 static int hpsa_extract_reply_queue(struct ctlr_info *h,
6167 					struct CommandList *c)
6168 {
6169 	if (c->cmd_type == CMD_IOACCEL2)
6170 		return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
6171 	return c->Header.ReplyQueue;
6172 }
6173 
6174 /*
6175  * Limit concurrency of abort commands to prevent
6176  * over-subscription of commands
6177  */
6178 static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
6179 {
6180 #define ABORT_CMD_WAIT_MSECS 5000
6181 	return !wait_event_timeout(h->abort_cmd_wait_queue,
6182 			atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
6183 			msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
6184 }
6185 
6186 /* Send an abort for the specified command.
6187  *	If the device and controller support it,
6188  *		send a task abort request.
6189  */
6190 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
6191 {
6192 
6193 	int rc;
6194 	struct ctlr_info *h;
6195 	struct hpsa_scsi_dev_t *dev;
6196 	struct CommandList *abort; /* pointer to command to be aborted */
6197 	struct scsi_cmnd *as;	/* ptr to scsi cmd inside aborted command. */
6198 	char msg[256];		/* For debug messaging. */
6199 	int ml = 0;
6200 	__le32 tagupper, taglower;
6201 	int refcount, reply_queue;
6202 
6203 	if (sc == NULL)
6204 		return FAILED;
6205 
6206 	if (sc->device == NULL)
6207 		return FAILED;
6208 
6209 	/* Find the controller of the command to be aborted */
6210 	h = sdev_to_hba(sc->device);
6211 	if (h == NULL)
6212 		return FAILED;
6213 
6214 	/* Find the device of the command to be aborted */
6215 	dev = sc->device->hostdata;
6216 	if (!dev) {
6217 		dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
6218 				msg);
6219 		return FAILED;
6220 	}
6221 
6222 	/* If controller locked up, we can guarantee command won't complete */
6223 	if (lockup_detected(h)) {
6224 		hpsa_show_dev_msg(KERN_WARNING, h, dev,
6225 					"ABORT FAILED, lockup detected");
6226 		return FAILED;
6227 	}
6228 
6229 	/* This is a good time to check if controller lockup has occurred */
6230 	if (detect_controller_lockup(h)) {
6231 		hpsa_show_dev_msg(KERN_WARNING, h, dev,
6232 					"ABORT FAILED, new lockup detected");
6233 		return FAILED;
6234 	}
6235 
6236 	/* Check that controller supports some kind of task abort */
6237 	if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
6238 		!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6239 		return FAILED;
6240 
6241 	memset(msg, 0, sizeof(msg));
6242 	ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
6243 		h->scsi_host->host_no, sc->device->channel,
6244 		sc->device->id, sc->device->lun,
6245 		"Aborting command", sc);
6246 
6247 	/* Get SCSI command to be aborted */
6248 	abort = (struct CommandList *) sc->host_scribble;
6249 	if (abort == NULL) {
6250 		/* This can happen if the command already completed. */
6251 		return SUCCESS;
6252 	}
6253 	refcount = atomic_inc_return(&abort->refcount);
6254 	if (refcount == 1) { /* Command is done already. */
6255 		cmd_free(h, abort);
6256 		return SUCCESS;
6257 	}
6258 
6259 	/* Don't bother trying the abort if we know it won't work. */
6260 	if (abort->cmd_type != CMD_IOACCEL2 &&
6261 		abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
6262 		cmd_free(h, abort);
6263 		return FAILED;
6264 	}
6265 
6266 	/*
6267 	 * Check that we're aborting the right command.
6268 	 * It's possible the CommandList already completed and got re-used.
6269 	 */
6270 	if (abort->scsi_cmd != sc) {
6271 		cmd_free(h, abort);
6272 		return SUCCESS;
6273 	}
6274 
6275 	abort->abort_pending = true;
6276 	hpsa_get_tag(h, abort, &taglower, &tagupper);
6277 	reply_queue = hpsa_extract_reply_queue(h, abort);
6278 	ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
6279 	as  = abort->scsi_cmd;
6280 	if (as != NULL)
6281 		ml += sprintf(msg+ml,
6282 			"CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
6283 			as->cmd_len, as->cmnd[0], as->cmnd[1],
6284 			as->serial_number);
6285 	dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
6286 	hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
6287 
6288 	/*
6289 	 * Command is in flight, or possibly already completed
6290 	 * by the firmware (but not to the scsi mid layer) but we can't
6291 	 * distinguish which.  Send the abort down.
6292 	 */
6293 	if (wait_for_available_abort_cmd(h)) {
6294 		dev_warn(&h->pdev->dev,
6295 			"%s FAILED, timeout waiting for an abort command to become available.\n",
6296 			msg);
6297 		cmd_free(h, abort);
6298 		return FAILED;
6299 	}
6300 	rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue);
6301 	atomic_inc(&h->abort_cmds_available);
6302 	wake_up_all(&h->abort_cmd_wait_queue);
6303 	if (rc != 0) {
6304 		dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
6305 		hpsa_show_dev_msg(KERN_WARNING, h, dev,
6306 				"FAILED to abort command");
6307 		cmd_free(h, abort);
6308 		return FAILED;
6309 	}
6310 	dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
6311 	wait_event(h->event_sync_wait_queue,
6312 		   abort->scsi_cmd != sc || lockup_detected(h));
6313 	cmd_free(h, abort);
6314 	return !lockup_detected(h) ? SUCCESS : FAILED;
6315 }
6316 
6317 /*
6318  * For operations with an associated SCSI command, a command block is allocated
6319  * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
6320  * block request tag as an index into a table of entries.  cmd_tagged_free() is
6321  * the complement, although cmd_free() may be called instead.
6322  */
6323 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
6324 					    struct scsi_cmnd *scmd)
6325 {
6326 	int idx = hpsa_get_cmd_index(scmd);
6327 	struct CommandList *c = h->cmd_pool + idx;
6328 
6329 	if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
6330 		dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
6331 			idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
6332 		/* The index value comes from the block layer, so if it's out of
6333 		 * bounds, it's probably not our bug.
6334 		 */
6335 		BUG();
6336 	}
6337 
6338 	atomic_inc(&c->refcount);
6339 	if (unlikely(!hpsa_is_cmd_idle(c))) {
6340 		/*
6341 		 * We expect that the SCSI layer will hand us a unique tag
6342 		 * value.  Thus, there should never be a collision here between
6343 		 * two requests...because if the selected command isn't idle
6344 		 * then someone is going to be very disappointed.
6345 		 */
6346 		dev_err(&h->pdev->dev,
6347 			"tag collision (tag=%d) in cmd_tagged_alloc().\n",
6348 			idx);
6349 		if (c->scsi_cmd != NULL)
6350 			scsi_print_command(c->scsi_cmd);
6351 		scsi_print_command(scmd);
6352 	}
6353 
6354 	hpsa_cmd_partial_init(h, idx, c);
6355 	return c;
6356 }
6357 
6358 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
6359 {
6360 	/*
6361 	 * Release our reference to the block.  We don't need to do anything
6362 	 * else to free it, because it is accessed by index.  (There's no point
6363 	 * in checking the result of the decrement, since we cannot guarantee
6364 	 * that there isn't a concurrent abort which is also accessing it.)
6365 	 */
6366 	(void)atomic_dec(&c->refcount);
6367 }
6368 
6369 /*
6370  * For operations that cannot sleep, a command block is allocated at init,
6371  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
6372  * which ones are free or in use.  Lock must be held when calling this.
6373  * cmd_free() is the complement.
6374  * This function never gives up and returns NULL.  If it hangs,
6375  * another thread must call cmd_free() to free some tags.
6376  */
6377 
6378 static struct CommandList *cmd_alloc(struct ctlr_info *h)
6379 {
6380 	struct CommandList *c;
6381 	int refcount, i;
6382 	int offset = 0;
6383 
6384 	/*
6385 	 * There is some *extremely* small but non-zero chance that that
6386 	 * multiple threads could get in here, and one thread could
6387 	 * be scanning through the list of bits looking for a free
6388 	 * one, but the free ones are always behind him, and other
6389 	 * threads sneak in behind him and eat them before he can
6390 	 * get to them, so that while there is always a free one, a
6391 	 * very unlucky thread might be starved anyway, never able to
6392 	 * beat the other threads.  In reality, this happens so
6393 	 * infrequently as to be indistinguishable from never.
6394 	 *
6395 	 * Note that we start allocating commands before the SCSI host structure
6396 	 * is initialized.  Since the search starts at bit zero, this
6397 	 * all works, since we have at least one command structure available;
6398 	 * however, it means that the structures with the low indexes have to be
6399 	 * reserved for driver-initiated requests, while requests from the block
6400 	 * layer will use the higher indexes.
6401 	 */
6402 
6403 	for (;;) {
6404 		i = find_next_zero_bit(h->cmd_pool_bits,
6405 					HPSA_NRESERVED_CMDS,
6406 					offset);
6407 		if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
6408 			offset = 0;
6409 			continue;
6410 		}
6411 		c = h->cmd_pool + i;
6412 		refcount = atomic_inc_return(&c->refcount);
6413 		if (unlikely(refcount > 1)) {
6414 			cmd_free(h, c); /* already in use */
6415 			offset = (i + 1) % HPSA_NRESERVED_CMDS;
6416 			continue;
6417 		}
6418 		set_bit(i & (BITS_PER_LONG - 1),
6419 			h->cmd_pool_bits + (i / BITS_PER_LONG));
6420 		break; /* it's ours now. */
6421 	}
6422 	hpsa_cmd_partial_init(h, i, c);
6423 	return c;
6424 }
6425 
6426 /*
6427  * This is the complementary operation to cmd_alloc().  Note, however, in some
6428  * corner cases it may also be used to free blocks allocated by
6429  * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6430  * the clear-bit is harmless.
6431  */
6432 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
6433 {
6434 	if (atomic_dec_and_test(&c->refcount)) {
6435 		int i;
6436 
6437 		i = c - h->cmd_pool;
6438 		clear_bit(i & (BITS_PER_LONG - 1),
6439 			  h->cmd_pool_bits + (i / BITS_PER_LONG));
6440 	}
6441 }
6442 
6443 #ifdef CONFIG_COMPAT
6444 
6445 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
6446 	void __user *arg)
6447 {
6448 	IOCTL32_Command_struct __user *arg32 =
6449 	    (IOCTL32_Command_struct __user *) arg;
6450 	IOCTL_Command_struct arg64;
6451 	IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
6452 	int err;
6453 	u32 cp;
6454 
6455 	memset(&arg64, 0, sizeof(arg64));
6456 	err = 0;
6457 	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6458 			   sizeof(arg64.LUN_info));
6459 	err |= copy_from_user(&arg64.Request, &arg32->Request,
6460 			   sizeof(arg64.Request));
6461 	err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6462 			   sizeof(arg64.error_info));
6463 	err |= get_user(arg64.buf_size, &arg32->buf_size);
6464 	err |= get_user(cp, &arg32->buf);
6465 	arg64.buf = compat_ptr(cp);
6466 	err |= copy_to_user(p, &arg64, sizeof(arg64));
6467 
6468 	if (err)
6469 		return -EFAULT;
6470 
6471 	err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
6472 	if (err)
6473 		return err;
6474 	err |= copy_in_user(&arg32->error_info, &p->error_info,
6475 			 sizeof(arg32->error_info));
6476 	if (err)
6477 		return -EFAULT;
6478 	return err;
6479 }
6480 
6481 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
6482 	int cmd, void __user *arg)
6483 {
6484 	BIG_IOCTL32_Command_struct __user *arg32 =
6485 	    (BIG_IOCTL32_Command_struct __user *) arg;
6486 	BIG_IOCTL_Command_struct arg64;
6487 	BIG_IOCTL_Command_struct __user *p =
6488 	    compat_alloc_user_space(sizeof(arg64));
6489 	int err;
6490 	u32 cp;
6491 
6492 	memset(&arg64, 0, sizeof(arg64));
6493 	err = 0;
6494 	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
6495 			   sizeof(arg64.LUN_info));
6496 	err |= copy_from_user(&arg64.Request, &arg32->Request,
6497 			   sizeof(arg64.Request));
6498 	err |= copy_from_user(&arg64.error_info, &arg32->error_info,
6499 			   sizeof(arg64.error_info));
6500 	err |= get_user(arg64.buf_size, &arg32->buf_size);
6501 	err |= get_user(arg64.malloc_size, &arg32->malloc_size);
6502 	err |= get_user(cp, &arg32->buf);
6503 	arg64.buf = compat_ptr(cp);
6504 	err |= copy_to_user(p, &arg64, sizeof(arg64));
6505 
6506 	if (err)
6507 		return -EFAULT;
6508 
6509 	err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
6510 	if (err)
6511 		return err;
6512 	err |= copy_in_user(&arg32->error_info, &p->error_info,
6513 			 sizeof(arg32->error_info));
6514 	if (err)
6515 		return -EFAULT;
6516 	return err;
6517 }
6518 
6519 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6520 {
6521 	switch (cmd) {
6522 	case CCISS_GETPCIINFO:
6523 	case CCISS_GETINTINFO:
6524 	case CCISS_SETINTINFO:
6525 	case CCISS_GETNODENAME:
6526 	case CCISS_SETNODENAME:
6527 	case CCISS_GETHEARTBEAT:
6528 	case CCISS_GETBUSTYPES:
6529 	case CCISS_GETFIRMVER:
6530 	case CCISS_GETDRIVVER:
6531 	case CCISS_REVALIDVOLS:
6532 	case CCISS_DEREGDISK:
6533 	case CCISS_REGNEWDISK:
6534 	case CCISS_REGNEWD:
6535 	case CCISS_RESCANDISK:
6536 	case CCISS_GETLUNINFO:
6537 		return hpsa_ioctl(dev, cmd, arg);
6538 
6539 	case CCISS_PASSTHRU32:
6540 		return hpsa_ioctl32_passthru(dev, cmd, arg);
6541 	case CCISS_BIG_PASSTHRU32:
6542 		return hpsa_ioctl32_big_passthru(dev, cmd, arg);
6543 
6544 	default:
6545 		return -ENOIOCTLCMD;
6546 	}
6547 }
6548 #endif
6549 
6550 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
6551 {
6552 	struct hpsa_pci_info pciinfo;
6553 
6554 	if (!argp)
6555 		return -EINVAL;
6556 	pciinfo.domain = pci_domain_nr(h->pdev->bus);
6557 	pciinfo.bus = h->pdev->bus->number;
6558 	pciinfo.dev_fn = h->pdev->devfn;
6559 	pciinfo.board_id = h->board_id;
6560 	if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
6561 		return -EFAULT;
6562 	return 0;
6563 }
6564 
6565 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
6566 {
6567 	DriverVer_type DriverVer;
6568 	unsigned char vmaj, vmin, vsubmin;
6569 	int rc;
6570 
6571 	rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
6572 		&vmaj, &vmin, &vsubmin);
6573 	if (rc != 3) {
6574 		dev_info(&h->pdev->dev, "driver version string '%s' "
6575 			"unrecognized.", HPSA_DRIVER_VERSION);
6576 		vmaj = 0;
6577 		vmin = 0;
6578 		vsubmin = 0;
6579 	}
6580 	DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6581 	if (!argp)
6582 		return -EINVAL;
6583 	if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6584 		return -EFAULT;
6585 	return 0;
6586 }
6587 
6588 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6589 {
6590 	IOCTL_Command_struct iocommand;
6591 	struct CommandList *c;
6592 	char *buff = NULL;
6593 	u64 temp64;
6594 	int rc = 0;
6595 
6596 	if (!argp)
6597 		return -EINVAL;
6598 	if (!capable(CAP_SYS_RAWIO))
6599 		return -EPERM;
6600 	if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6601 		return -EFAULT;
6602 	if ((iocommand.buf_size < 1) &&
6603 	    (iocommand.Request.Type.Direction != XFER_NONE)) {
6604 		return -EINVAL;
6605 	}
6606 	if (iocommand.buf_size > 0) {
6607 		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6608 		if (buff == NULL)
6609 			return -ENOMEM;
6610 		if (iocommand.Request.Type.Direction & XFER_WRITE) {
6611 			/* Copy the data into the buffer we created */
6612 			if (copy_from_user(buff, iocommand.buf,
6613 				iocommand.buf_size)) {
6614 				rc = -EFAULT;
6615 				goto out_kfree;
6616 			}
6617 		} else {
6618 			memset(buff, 0, iocommand.buf_size);
6619 		}
6620 	}
6621 	c = cmd_alloc(h);
6622 
6623 	/* Fill in the command type */
6624 	c->cmd_type = CMD_IOCTL_PEND;
6625 	c->scsi_cmd = SCSI_CMD_BUSY;
6626 	/* Fill in Command Header */
6627 	c->Header.ReplyQueue = 0; /* unused in simple mode */
6628 	if (iocommand.buf_size > 0) {	/* buffer to fill */
6629 		c->Header.SGList = 1;
6630 		c->Header.SGTotal = cpu_to_le16(1);
6631 	} else	{ /* no buffers to fill */
6632 		c->Header.SGList = 0;
6633 		c->Header.SGTotal = cpu_to_le16(0);
6634 	}
6635 	memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6636 
6637 	/* Fill in Request block */
6638 	memcpy(&c->Request, &iocommand.Request,
6639 		sizeof(c->Request));
6640 
6641 	/* Fill in the scatter gather information */
6642 	if (iocommand.buf_size > 0) {
6643 		temp64 = pci_map_single(h->pdev, buff,
6644 			iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
6645 		if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6646 			c->SG[0].Addr = cpu_to_le64(0);
6647 			c->SG[0].Len = cpu_to_le32(0);
6648 			rc = -ENOMEM;
6649 			goto out;
6650 		}
6651 		c->SG[0].Addr = cpu_to_le64(temp64);
6652 		c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6653 		c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6654 	}
6655 	rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6656 					NO_TIMEOUT);
6657 	if (iocommand.buf_size > 0)
6658 		hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
6659 	check_ioctl_unit_attention(h, c);
6660 	if (rc) {
6661 		rc = -EIO;
6662 		goto out;
6663 	}
6664 
6665 	/* Copy the error information out */
6666 	memcpy(&iocommand.error_info, c->err_info,
6667 		sizeof(iocommand.error_info));
6668 	if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6669 		rc = -EFAULT;
6670 		goto out;
6671 	}
6672 	if ((iocommand.Request.Type.Direction & XFER_READ) &&
6673 		iocommand.buf_size > 0) {
6674 		/* Copy the data out of the buffer we created */
6675 		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6676 			rc = -EFAULT;
6677 			goto out;
6678 		}
6679 	}
6680 out:
6681 	cmd_free(h, c);
6682 out_kfree:
6683 	kfree(buff);
6684 	return rc;
6685 }
6686 
6687 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6688 {
6689 	BIG_IOCTL_Command_struct *ioc;
6690 	struct CommandList *c;
6691 	unsigned char **buff = NULL;
6692 	int *buff_size = NULL;
6693 	u64 temp64;
6694 	BYTE sg_used = 0;
6695 	int status = 0;
6696 	u32 left;
6697 	u32 sz;
6698 	BYTE __user *data_ptr;
6699 
6700 	if (!argp)
6701 		return -EINVAL;
6702 	if (!capable(CAP_SYS_RAWIO))
6703 		return -EPERM;
6704 	ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
6705 	if (!ioc) {
6706 		status = -ENOMEM;
6707 		goto cleanup1;
6708 	}
6709 	if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6710 		status = -EFAULT;
6711 		goto cleanup1;
6712 	}
6713 	if ((ioc->buf_size < 1) &&
6714 	    (ioc->Request.Type.Direction != XFER_NONE)) {
6715 		status = -EINVAL;
6716 		goto cleanup1;
6717 	}
6718 	/* Check kmalloc limits  using all SGs */
6719 	if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6720 		status = -EINVAL;
6721 		goto cleanup1;
6722 	}
6723 	if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6724 		status = -EINVAL;
6725 		goto cleanup1;
6726 	}
6727 	buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
6728 	if (!buff) {
6729 		status = -ENOMEM;
6730 		goto cleanup1;
6731 	}
6732 	buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
6733 	if (!buff_size) {
6734 		status = -ENOMEM;
6735 		goto cleanup1;
6736 	}
6737 	left = ioc->buf_size;
6738 	data_ptr = ioc->buf;
6739 	while (left) {
6740 		sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6741 		buff_size[sg_used] = sz;
6742 		buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6743 		if (buff[sg_used] == NULL) {
6744 			status = -ENOMEM;
6745 			goto cleanup1;
6746 		}
6747 		if (ioc->Request.Type.Direction & XFER_WRITE) {
6748 			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6749 				status = -EFAULT;
6750 				goto cleanup1;
6751 			}
6752 		} else
6753 			memset(buff[sg_used], 0, sz);
6754 		left -= sz;
6755 		data_ptr += sz;
6756 		sg_used++;
6757 	}
6758 	c = cmd_alloc(h);
6759 
6760 	c->cmd_type = CMD_IOCTL_PEND;
6761 	c->scsi_cmd = SCSI_CMD_BUSY;
6762 	c->Header.ReplyQueue = 0;
6763 	c->Header.SGList = (u8) sg_used;
6764 	c->Header.SGTotal = cpu_to_le16(sg_used);
6765 	memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6766 	memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6767 	if (ioc->buf_size > 0) {
6768 		int i;
6769 		for (i = 0; i < sg_used; i++) {
6770 			temp64 = pci_map_single(h->pdev, buff[i],
6771 				    buff_size[i], PCI_DMA_BIDIRECTIONAL);
6772 			if (dma_mapping_error(&h->pdev->dev,
6773 							(dma_addr_t) temp64)) {
6774 				c->SG[i].Addr = cpu_to_le64(0);
6775 				c->SG[i].Len = cpu_to_le32(0);
6776 				hpsa_pci_unmap(h->pdev, c, i,
6777 					PCI_DMA_BIDIRECTIONAL);
6778 				status = -ENOMEM;
6779 				goto cleanup0;
6780 			}
6781 			c->SG[i].Addr = cpu_to_le64(temp64);
6782 			c->SG[i].Len = cpu_to_le32(buff_size[i]);
6783 			c->SG[i].Ext = cpu_to_le32(0);
6784 		}
6785 		c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6786 	}
6787 	status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
6788 						NO_TIMEOUT);
6789 	if (sg_used)
6790 		hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
6791 	check_ioctl_unit_attention(h, c);
6792 	if (status) {
6793 		status = -EIO;
6794 		goto cleanup0;
6795 	}
6796 
6797 	/* Copy the error information out */
6798 	memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6799 	if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6800 		status = -EFAULT;
6801 		goto cleanup0;
6802 	}
6803 	if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6804 		int i;
6805 
6806 		/* Copy the data out of the buffer we created */
6807 		BYTE __user *ptr = ioc->buf;
6808 		for (i = 0; i < sg_used; i++) {
6809 			if (copy_to_user(ptr, buff[i], buff_size[i])) {
6810 				status = -EFAULT;
6811 				goto cleanup0;
6812 			}
6813 			ptr += buff_size[i];
6814 		}
6815 	}
6816 	status = 0;
6817 cleanup0:
6818 	cmd_free(h, c);
6819 cleanup1:
6820 	if (buff) {
6821 		int i;
6822 
6823 		for (i = 0; i < sg_used; i++)
6824 			kfree(buff[i]);
6825 		kfree(buff);
6826 	}
6827 	kfree(buff_size);
6828 	kfree(ioc);
6829 	return status;
6830 }
6831 
6832 static void check_ioctl_unit_attention(struct ctlr_info *h,
6833 	struct CommandList *c)
6834 {
6835 	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6836 			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6837 		(void) check_for_unit_attention(h, c);
6838 }
6839 
6840 /*
6841  * ioctl
6842  */
6843 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6844 {
6845 	struct ctlr_info *h;
6846 	void __user *argp = (void __user *)arg;
6847 	int rc;
6848 
6849 	h = sdev_to_hba(dev);
6850 
6851 	switch (cmd) {
6852 	case CCISS_DEREGDISK:
6853 	case CCISS_REGNEWDISK:
6854 	case CCISS_REGNEWD:
6855 		hpsa_scan_start(h->scsi_host);
6856 		return 0;
6857 	case CCISS_GETPCIINFO:
6858 		return hpsa_getpciinfo_ioctl(h, argp);
6859 	case CCISS_GETDRIVVER:
6860 		return hpsa_getdrivver_ioctl(h, argp);
6861 	case CCISS_PASSTHRU:
6862 		if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6863 			return -EAGAIN;
6864 		rc = hpsa_passthru_ioctl(h, argp);
6865 		atomic_inc(&h->passthru_cmds_avail);
6866 		return rc;
6867 	case CCISS_BIG_PASSTHRU:
6868 		if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6869 			return -EAGAIN;
6870 		rc = hpsa_big_passthru_ioctl(h, argp);
6871 		atomic_inc(&h->passthru_cmds_avail);
6872 		return rc;
6873 	default:
6874 		return -ENOTTY;
6875 	}
6876 }
6877 
6878 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6879 				u8 reset_type)
6880 {
6881 	struct CommandList *c;
6882 
6883 	c = cmd_alloc(h);
6884 
6885 	/* fill_cmd can't fail here, no data buffer to map */
6886 	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6887 		RAID_CTLR_LUNID, TYPE_MSG);
6888 	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6889 	c->waiting = NULL;
6890 	enqueue_cmd_and_start_io(h, c);
6891 	/* Don't wait for completion, the reset won't complete.  Don't free
6892 	 * the command either.  This is the last command we will send before
6893 	 * re-initializing everything, so it doesn't matter and won't leak.
6894 	 */
6895 	return;
6896 }
6897 
6898 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6899 	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6900 	int cmd_type)
6901 {
6902 	int pci_dir = XFER_NONE;
6903 	u64 tag; /* for commands to be aborted */
6904 
6905 	c->cmd_type = CMD_IOCTL_PEND;
6906 	c->scsi_cmd = SCSI_CMD_BUSY;
6907 	c->Header.ReplyQueue = 0;
6908 	if (buff != NULL && size > 0) {
6909 		c->Header.SGList = 1;
6910 		c->Header.SGTotal = cpu_to_le16(1);
6911 	} else {
6912 		c->Header.SGList = 0;
6913 		c->Header.SGTotal = cpu_to_le16(0);
6914 	}
6915 	memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6916 
6917 	if (cmd_type == TYPE_CMD) {
6918 		switch (cmd) {
6919 		case HPSA_INQUIRY:
6920 			/* are we trying to read a vital product page */
6921 			if (page_code & VPD_PAGE) {
6922 				c->Request.CDB[1] = 0x01;
6923 				c->Request.CDB[2] = (page_code & 0xff);
6924 			}
6925 			c->Request.CDBLen = 6;
6926 			c->Request.type_attr_dir =
6927 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6928 			c->Request.Timeout = 0;
6929 			c->Request.CDB[0] = HPSA_INQUIRY;
6930 			c->Request.CDB[4] = size & 0xFF;
6931 			break;
6932 		case HPSA_REPORT_LOG:
6933 		case HPSA_REPORT_PHYS:
6934 			/* Talking to controller so It's a physical command
6935 			   mode = 00 target = 0.  Nothing to write.
6936 			 */
6937 			c->Request.CDBLen = 12;
6938 			c->Request.type_attr_dir =
6939 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6940 			c->Request.Timeout = 0;
6941 			c->Request.CDB[0] = cmd;
6942 			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6943 			c->Request.CDB[7] = (size >> 16) & 0xFF;
6944 			c->Request.CDB[8] = (size >> 8) & 0xFF;
6945 			c->Request.CDB[9] = size & 0xFF;
6946 			break;
6947 		case BMIC_SENSE_DIAG_OPTIONS:
6948 			c->Request.CDBLen = 16;
6949 			c->Request.type_attr_dir =
6950 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6951 			c->Request.Timeout = 0;
6952 			/* Spec says this should be BMIC_WRITE */
6953 			c->Request.CDB[0] = BMIC_READ;
6954 			c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
6955 			break;
6956 		case BMIC_SET_DIAG_OPTIONS:
6957 			c->Request.CDBLen = 16;
6958 			c->Request.type_attr_dir =
6959 					TYPE_ATTR_DIR(cmd_type,
6960 						ATTR_SIMPLE, XFER_WRITE);
6961 			c->Request.Timeout = 0;
6962 			c->Request.CDB[0] = BMIC_WRITE;
6963 			c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
6964 			break;
6965 		case HPSA_CACHE_FLUSH:
6966 			c->Request.CDBLen = 12;
6967 			c->Request.type_attr_dir =
6968 					TYPE_ATTR_DIR(cmd_type,
6969 						ATTR_SIMPLE, XFER_WRITE);
6970 			c->Request.Timeout = 0;
6971 			c->Request.CDB[0] = BMIC_WRITE;
6972 			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6973 			c->Request.CDB[7] = (size >> 8) & 0xFF;
6974 			c->Request.CDB[8] = size & 0xFF;
6975 			break;
6976 		case TEST_UNIT_READY:
6977 			c->Request.CDBLen = 6;
6978 			c->Request.type_attr_dir =
6979 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6980 			c->Request.Timeout = 0;
6981 			break;
6982 		case HPSA_GET_RAID_MAP:
6983 			c->Request.CDBLen = 12;
6984 			c->Request.type_attr_dir =
6985 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6986 			c->Request.Timeout = 0;
6987 			c->Request.CDB[0] = HPSA_CISS_READ;
6988 			c->Request.CDB[1] = cmd;
6989 			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6990 			c->Request.CDB[7] = (size >> 16) & 0xFF;
6991 			c->Request.CDB[8] = (size >> 8) & 0xFF;
6992 			c->Request.CDB[9] = size & 0xFF;
6993 			break;
6994 		case BMIC_SENSE_CONTROLLER_PARAMETERS:
6995 			c->Request.CDBLen = 10;
6996 			c->Request.type_attr_dir =
6997 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6998 			c->Request.Timeout = 0;
6999 			c->Request.CDB[0] = BMIC_READ;
7000 			c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
7001 			c->Request.CDB[7] = (size >> 16) & 0xFF;
7002 			c->Request.CDB[8] = (size >> 8) & 0xFF;
7003 			break;
7004 		case BMIC_IDENTIFY_PHYSICAL_DEVICE:
7005 			c->Request.CDBLen = 10;
7006 			c->Request.type_attr_dir =
7007 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
7008 			c->Request.Timeout = 0;
7009 			c->Request.CDB[0] = BMIC_READ;
7010 			c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
7011 			c->Request.CDB[7] = (size >> 16) & 0xFF;
7012 			c->Request.CDB[8] = (size >> 8) & 0XFF;
7013 			break;
7014 		case BMIC_SENSE_SUBSYSTEM_INFORMATION:
7015 			c->Request.CDBLen = 10;
7016 			c->Request.type_attr_dir =
7017 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
7018 			c->Request.Timeout = 0;
7019 			c->Request.CDB[0] = BMIC_READ;
7020 			c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
7021 			c->Request.CDB[7] = (size >> 16) & 0xFF;
7022 			c->Request.CDB[8] = (size >> 8) & 0XFF;
7023 			break;
7024 		case BMIC_SENSE_STORAGE_BOX_PARAMS:
7025 			c->Request.CDBLen = 10;
7026 			c->Request.type_attr_dir =
7027 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
7028 			c->Request.Timeout = 0;
7029 			c->Request.CDB[0] = BMIC_READ;
7030 			c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
7031 			c->Request.CDB[7] = (size >> 16) & 0xFF;
7032 			c->Request.CDB[8] = (size >> 8) & 0XFF;
7033 			break;
7034 		case BMIC_IDENTIFY_CONTROLLER:
7035 			c->Request.CDBLen = 10;
7036 			c->Request.type_attr_dir =
7037 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
7038 			c->Request.Timeout = 0;
7039 			c->Request.CDB[0] = BMIC_READ;
7040 			c->Request.CDB[1] = 0;
7041 			c->Request.CDB[2] = 0;
7042 			c->Request.CDB[3] = 0;
7043 			c->Request.CDB[4] = 0;
7044 			c->Request.CDB[5] = 0;
7045 			c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
7046 			c->Request.CDB[7] = (size >> 16) & 0xFF;
7047 			c->Request.CDB[8] = (size >> 8) & 0XFF;
7048 			c->Request.CDB[9] = 0;
7049 			break;
7050 		default:
7051 			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
7052 			BUG();
7053 			return -1;
7054 		}
7055 	} else if (cmd_type == TYPE_MSG) {
7056 		switch (cmd) {
7057 
7058 		case  HPSA_PHYS_TARGET_RESET:
7059 			c->Request.CDBLen = 16;
7060 			c->Request.type_attr_dir =
7061 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
7062 			c->Request.Timeout = 0; /* Don't time out */
7063 			memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
7064 			c->Request.CDB[0] = HPSA_RESET;
7065 			c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
7066 			/* Physical target reset needs no control bytes 4-7*/
7067 			c->Request.CDB[4] = 0x00;
7068 			c->Request.CDB[5] = 0x00;
7069 			c->Request.CDB[6] = 0x00;
7070 			c->Request.CDB[7] = 0x00;
7071 			break;
7072 		case  HPSA_DEVICE_RESET_MSG:
7073 			c->Request.CDBLen = 16;
7074 			c->Request.type_attr_dir =
7075 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
7076 			c->Request.Timeout = 0; /* Don't time out */
7077 			memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
7078 			c->Request.CDB[0] =  cmd;
7079 			c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
7080 			/* If bytes 4-7 are zero, it means reset the */
7081 			/* LunID device */
7082 			c->Request.CDB[4] = 0x00;
7083 			c->Request.CDB[5] = 0x00;
7084 			c->Request.CDB[6] = 0x00;
7085 			c->Request.CDB[7] = 0x00;
7086 			break;
7087 		case  HPSA_ABORT_MSG:
7088 			memcpy(&tag, buff, sizeof(tag));
7089 			dev_dbg(&h->pdev->dev,
7090 				"Abort Tag:0x%016llx using rqst Tag:0x%016llx",
7091 				tag, c->Header.tag);
7092 			c->Request.CDBLen = 16;
7093 			c->Request.type_attr_dir =
7094 					TYPE_ATTR_DIR(cmd_type,
7095 						ATTR_SIMPLE, XFER_WRITE);
7096 			c->Request.Timeout = 0; /* Don't time out */
7097 			c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
7098 			c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
7099 			c->Request.CDB[2] = 0x00; /* reserved */
7100 			c->Request.CDB[3] = 0x00; /* reserved */
7101 			/* Tag to abort goes in CDB[4]-CDB[11] */
7102 			memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
7103 			c->Request.CDB[12] = 0x00; /* reserved */
7104 			c->Request.CDB[13] = 0x00; /* reserved */
7105 			c->Request.CDB[14] = 0x00; /* reserved */
7106 			c->Request.CDB[15] = 0x00; /* reserved */
7107 		break;
7108 		default:
7109 			dev_warn(&h->pdev->dev, "unknown message type %d\n",
7110 				cmd);
7111 			BUG();
7112 		}
7113 	} else {
7114 		dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
7115 		BUG();
7116 	}
7117 
7118 	switch (GET_DIR(c->Request.type_attr_dir)) {
7119 	case XFER_READ:
7120 		pci_dir = PCI_DMA_FROMDEVICE;
7121 		break;
7122 	case XFER_WRITE:
7123 		pci_dir = PCI_DMA_TODEVICE;
7124 		break;
7125 	case XFER_NONE:
7126 		pci_dir = PCI_DMA_NONE;
7127 		break;
7128 	default:
7129 		pci_dir = PCI_DMA_BIDIRECTIONAL;
7130 	}
7131 	if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
7132 		return -1;
7133 	return 0;
7134 }
7135 
7136 /*
7137  * Map (physical) PCI mem into (virtual) kernel space
7138  */
7139 static void __iomem *remap_pci_mem(ulong base, ulong size)
7140 {
7141 	ulong page_base = ((ulong) base) & PAGE_MASK;
7142 	ulong page_offs = ((ulong) base) - page_base;
7143 	void __iomem *page_remapped = ioremap_nocache(page_base,
7144 		page_offs + size);
7145 
7146 	return page_remapped ? (page_remapped + page_offs) : NULL;
7147 }
7148 
7149 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
7150 {
7151 	return h->access.command_completed(h, q);
7152 }
7153 
7154 static inline bool interrupt_pending(struct ctlr_info *h)
7155 {
7156 	return h->access.intr_pending(h);
7157 }
7158 
7159 static inline long interrupt_not_for_us(struct ctlr_info *h)
7160 {
7161 	return (h->access.intr_pending(h) == 0) ||
7162 		(h->interrupts_enabled == 0);
7163 }
7164 
7165 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
7166 	u32 raw_tag)
7167 {
7168 	if (unlikely(tag_index >= h->nr_cmds)) {
7169 		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
7170 		return 1;
7171 	}
7172 	return 0;
7173 }
7174 
7175 static inline void finish_cmd(struct CommandList *c)
7176 {
7177 	dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
7178 	if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
7179 			|| c->cmd_type == CMD_IOACCEL2))
7180 		complete_scsi_command(c);
7181 	else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
7182 		complete(c->waiting);
7183 }
7184 
7185 /* process completion of an indexed ("direct lookup") command */
7186 static inline void process_indexed_cmd(struct ctlr_info *h,
7187 	u32 raw_tag)
7188 {
7189 	u32 tag_index;
7190 	struct CommandList *c;
7191 
7192 	tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
7193 	if (!bad_tag(h, tag_index, raw_tag)) {
7194 		c = h->cmd_pool + tag_index;
7195 		finish_cmd(c);
7196 	}
7197 }
7198 
7199 /* Some controllers, like p400, will give us one interrupt
7200  * after a soft reset, even if we turned interrupts off.
7201  * Only need to check for this in the hpsa_xxx_discard_completions
7202  * functions.
7203  */
7204 static int ignore_bogus_interrupt(struct ctlr_info *h)
7205 {
7206 	if (likely(!reset_devices))
7207 		return 0;
7208 
7209 	if (likely(h->interrupts_enabled))
7210 		return 0;
7211 
7212 	dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
7213 		"(known firmware bug.)  Ignoring.\n");
7214 
7215 	return 1;
7216 }
7217 
7218 /*
7219  * Convert &h->q[x] (passed to interrupt handlers) back to h.
7220  * Relies on (h-q[x] == x) being true for x such that
7221  * 0 <= x < MAX_REPLY_QUEUES.
7222  */
7223 static struct ctlr_info *queue_to_hba(u8 *queue)
7224 {
7225 	return container_of((queue - *queue), struct ctlr_info, q[0]);
7226 }
7227 
7228 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
7229 {
7230 	struct ctlr_info *h = queue_to_hba(queue);
7231 	u8 q = *(u8 *) queue;
7232 	u32 raw_tag;
7233 
7234 	if (ignore_bogus_interrupt(h))
7235 		return IRQ_NONE;
7236 
7237 	if (interrupt_not_for_us(h))
7238 		return IRQ_NONE;
7239 	h->last_intr_timestamp = get_jiffies_64();
7240 	while (interrupt_pending(h)) {
7241 		raw_tag = get_next_completion(h, q);
7242 		while (raw_tag != FIFO_EMPTY)
7243 			raw_tag = next_command(h, q);
7244 	}
7245 	return IRQ_HANDLED;
7246 }
7247 
7248 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
7249 {
7250 	struct ctlr_info *h = queue_to_hba(queue);
7251 	u32 raw_tag;
7252 	u8 q = *(u8 *) queue;
7253 
7254 	if (ignore_bogus_interrupt(h))
7255 		return IRQ_NONE;
7256 
7257 	h->last_intr_timestamp = get_jiffies_64();
7258 	raw_tag = get_next_completion(h, q);
7259 	while (raw_tag != FIFO_EMPTY)
7260 		raw_tag = next_command(h, q);
7261 	return IRQ_HANDLED;
7262 }
7263 
7264 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
7265 {
7266 	struct ctlr_info *h = queue_to_hba((u8 *) queue);
7267 	u32 raw_tag;
7268 	u8 q = *(u8 *) queue;
7269 
7270 	if (interrupt_not_for_us(h))
7271 		return IRQ_NONE;
7272 	h->last_intr_timestamp = get_jiffies_64();
7273 	while (interrupt_pending(h)) {
7274 		raw_tag = get_next_completion(h, q);
7275 		while (raw_tag != FIFO_EMPTY) {
7276 			process_indexed_cmd(h, raw_tag);
7277 			raw_tag = next_command(h, q);
7278 		}
7279 	}
7280 	return IRQ_HANDLED;
7281 }
7282 
7283 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
7284 {
7285 	struct ctlr_info *h = queue_to_hba(queue);
7286 	u32 raw_tag;
7287 	u8 q = *(u8 *) queue;
7288 
7289 	h->last_intr_timestamp = get_jiffies_64();
7290 	raw_tag = get_next_completion(h, q);
7291 	while (raw_tag != FIFO_EMPTY) {
7292 		process_indexed_cmd(h, raw_tag);
7293 		raw_tag = next_command(h, q);
7294 	}
7295 	return IRQ_HANDLED;
7296 }
7297 
7298 /* Send a message CDB to the firmware. Careful, this only works
7299  * in simple mode, not performant mode due to the tag lookup.
7300  * We only ever use this immediately after a controller reset.
7301  */
7302 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
7303 			unsigned char type)
7304 {
7305 	struct Command {
7306 		struct CommandListHeader CommandHeader;
7307 		struct RequestBlock Request;
7308 		struct ErrDescriptor ErrorDescriptor;
7309 	};
7310 	struct Command *cmd;
7311 	static const size_t cmd_sz = sizeof(*cmd) +
7312 					sizeof(cmd->ErrorDescriptor);
7313 	dma_addr_t paddr64;
7314 	__le32 paddr32;
7315 	u32 tag;
7316 	void __iomem *vaddr;
7317 	int i, err;
7318 
7319 	vaddr = pci_ioremap_bar(pdev, 0);
7320 	if (vaddr == NULL)
7321 		return -ENOMEM;
7322 
7323 	/* The Inbound Post Queue only accepts 32-bit physical addresses for the
7324 	 * CCISS commands, so they must be allocated from the lower 4GiB of
7325 	 * memory.
7326 	 */
7327 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
7328 	if (err) {
7329 		iounmap(vaddr);
7330 		return err;
7331 	}
7332 
7333 	cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
7334 	if (cmd == NULL) {
7335 		iounmap(vaddr);
7336 		return -ENOMEM;
7337 	}
7338 
7339 	/* This must fit, because of the 32-bit consistent DMA mask.  Also,
7340 	 * although there's no guarantee, we assume that the address is at
7341 	 * least 4-byte aligned (most likely, it's page-aligned).
7342 	 */
7343 	paddr32 = cpu_to_le32(paddr64);
7344 
7345 	cmd->CommandHeader.ReplyQueue = 0;
7346 	cmd->CommandHeader.SGList = 0;
7347 	cmd->CommandHeader.SGTotal = cpu_to_le16(0);
7348 	cmd->CommandHeader.tag = cpu_to_le64(paddr64);
7349 	memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
7350 
7351 	cmd->Request.CDBLen = 16;
7352 	cmd->Request.type_attr_dir =
7353 			TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
7354 	cmd->Request.Timeout = 0; /* Don't time out */
7355 	cmd->Request.CDB[0] = opcode;
7356 	cmd->Request.CDB[1] = type;
7357 	memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
7358 	cmd->ErrorDescriptor.Addr =
7359 			cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
7360 	cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
7361 
7362 	writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
7363 
7364 	for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
7365 		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
7366 		if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
7367 			break;
7368 		msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
7369 	}
7370 
7371 	iounmap(vaddr);
7372 
7373 	/* we leak the DMA buffer here ... no choice since the controller could
7374 	 *  still complete the command.
7375 	 */
7376 	if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
7377 		dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
7378 			opcode, type);
7379 		return -ETIMEDOUT;
7380 	}
7381 
7382 	pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
7383 
7384 	if (tag & HPSA_ERROR_BIT) {
7385 		dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
7386 			opcode, type);
7387 		return -EIO;
7388 	}
7389 
7390 	dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
7391 		opcode, type);
7392 	return 0;
7393 }
7394 
7395 #define hpsa_noop(p) hpsa_message(p, 3, 0)
7396 
7397 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
7398 	void __iomem *vaddr, u32 use_doorbell)
7399 {
7400 
7401 	if (use_doorbell) {
7402 		/* For everything after the P600, the PCI power state method
7403 		 * of resetting the controller doesn't work, so we have this
7404 		 * other way using the doorbell register.
7405 		 */
7406 		dev_info(&pdev->dev, "using doorbell to reset controller\n");
7407 		writel(use_doorbell, vaddr + SA5_DOORBELL);
7408 
7409 		/* PMC hardware guys tell us we need a 10 second delay after
7410 		 * doorbell reset and before any attempt to talk to the board
7411 		 * at all to ensure that this actually works and doesn't fall
7412 		 * over in some weird corner cases.
7413 		 */
7414 		msleep(10000);
7415 	} else { /* Try to do it the PCI power state way */
7416 
7417 		/* Quoting from the Open CISS Specification: "The Power
7418 		 * Management Control/Status Register (CSR) controls the power
7419 		 * state of the device.  The normal operating state is D0,
7420 		 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
7421 		 * the controller, place the interface device in D3 then to D0,
7422 		 * this causes a secondary PCI reset which will reset the
7423 		 * controller." */
7424 
7425 		int rc = 0;
7426 
7427 		dev_info(&pdev->dev, "using PCI PM to reset controller\n");
7428 
7429 		/* enter the D3hot power management state */
7430 		rc = pci_set_power_state(pdev, PCI_D3hot);
7431 		if (rc)
7432 			return rc;
7433 
7434 		msleep(500);
7435 
7436 		/* enter the D0 power management state */
7437 		rc = pci_set_power_state(pdev, PCI_D0);
7438 		if (rc)
7439 			return rc;
7440 
7441 		/*
7442 		 * The P600 requires a small delay when changing states.
7443 		 * Otherwise we may think the board did not reset and we bail.
7444 		 * This for kdump only and is particular to the P600.
7445 		 */
7446 		msleep(500);
7447 	}
7448 	return 0;
7449 }
7450 
7451 static void init_driver_version(char *driver_version, int len)
7452 {
7453 	memset(driver_version, 0, len);
7454 	strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
7455 }
7456 
7457 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
7458 {
7459 	char *driver_version;
7460 	int i, size = sizeof(cfgtable->driver_version);
7461 
7462 	driver_version = kmalloc(size, GFP_KERNEL);
7463 	if (!driver_version)
7464 		return -ENOMEM;
7465 
7466 	init_driver_version(driver_version, size);
7467 	for (i = 0; i < size; i++)
7468 		writeb(driver_version[i], &cfgtable->driver_version[i]);
7469 	kfree(driver_version);
7470 	return 0;
7471 }
7472 
7473 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
7474 					  unsigned char *driver_ver)
7475 {
7476 	int i;
7477 
7478 	for (i = 0; i < sizeof(cfgtable->driver_version); i++)
7479 		driver_ver[i] = readb(&cfgtable->driver_version[i]);
7480 }
7481 
7482 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
7483 {
7484 
7485 	char *driver_ver, *old_driver_ver;
7486 	int rc, size = sizeof(cfgtable->driver_version);
7487 
7488 	old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
7489 	if (!old_driver_ver)
7490 		return -ENOMEM;
7491 	driver_ver = old_driver_ver + size;
7492 
7493 	/* After a reset, the 32 bytes of "driver version" in the cfgtable
7494 	 * should have been changed, otherwise we know the reset failed.
7495 	 */
7496 	init_driver_version(old_driver_ver, size);
7497 	read_driver_ver_from_cfgtable(cfgtable, driver_ver);
7498 	rc = !memcmp(driver_ver, old_driver_ver, size);
7499 	kfree(old_driver_ver);
7500 	return rc;
7501 }
7502 /* This does a hard reset of the controller using PCI power management
7503  * states or the using the doorbell register.
7504  */
7505 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
7506 {
7507 	u64 cfg_offset;
7508 	u32 cfg_base_addr;
7509 	u64 cfg_base_addr_index;
7510 	void __iomem *vaddr;
7511 	unsigned long paddr;
7512 	u32 misc_fw_support;
7513 	int rc;
7514 	struct CfgTable __iomem *cfgtable;
7515 	u32 use_doorbell;
7516 	u16 command_register;
7517 
7518 	/* For controllers as old as the P600, this is very nearly
7519 	 * the same thing as
7520 	 *
7521 	 * pci_save_state(pci_dev);
7522 	 * pci_set_power_state(pci_dev, PCI_D3hot);
7523 	 * pci_set_power_state(pci_dev, PCI_D0);
7524 	 * pci_restore_state(pci_dev);
7525 	 *
7526 	 * For controllers newer than the P600, the pci power state
7527 	 * method of resetting doesn't work so we have another way
7528 	 * using the doorbell register.
7529 	 */
7530 
7531 	if (!ctlr_is_resettable(board_id)) {
7532 		dev_warn(&pdev->dev, "Controller not resettable\n");
7533 		return -ENODEV;
7534 	}
7535 
7536 	/* if controller is soft- but not hard resettable... */
7537 	if (!ctlr_is_hard_resettable(board_id))
7538 		return -ENOTSUPP; /* try soft reset later. */
7539 
7540 	/* Save the PCI command register */
7541 	pci_read_config_word(pdev, 4, &command_register);
7542 	pci_save_state(pdev);
7543 
7544 	/* find the first memory BAR, so we can find the cfg table */
7545 	rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
7546 	if (rc)
7547 		return rc;
7548 	vaddr = remap_pci_mem(paddr, 0x250);
7549 	if (!vaddr)
7550 		return -ENOMEM;
7551 
7552 	/* find cfgtable in order to check if reset via doorbell is supported */
7553 	rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
7554 					&cfg_base_addr_index, &cfg_offset);
7555 	if (rc)
7556 		goto unmap_vaddr;
7557 	cfgtable = remap_pci_mem(pci_resource_start(pdev,
7558 		       cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
7559 	if (!cfgtable) {
7560 		rc = -ENOMEM;
7561 		goto unmap_vaddr;
7562 	}
7563 	rc = write_driver_ver_to_cfgtable(cfgtable);
7564 	if (rc)
7565 		goto unmap_cfgtable;
7566 
7567 	/* If reset via doorbell register is supported, use that.
7568 	 * There are two such methods.  Favor the newest method.
7569 	 */
7570 	misc_fw_support = readl(&cfgtable->misc_fw_support);
7571 	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
7572 	if (use_doorbell) {
7573 		use_doorbell = DOORBELL_CTLR_RESET2;
7574 	} else {
7575 		use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
7576 		if (use_doorbell) {
7577 			dev_warn(&pdev->dev,
7578 				"Soft reset not supported. Firmware update is required.\n");
7579 			rc = -ENOTSUPP; /* try soft reset */
7580 			goto unmap_cfgtable;
7581 		}
7582 	}
7583 
7584 	rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
7585 	if (rc)
7586 		goto unmap_cfgtable;
7587 
7588 	pci_restore_state(pdev);
7589 	pci_write_config_word(pdev, 4, command_register);
7590 
7591 	/* Some devices (notably the HP Smart Array 5i Controller)
7592 	   need a little pause here */
7593 	msleep(HPSA_POST_RESET_PAUSE_MSECS);
7594 
7595 	rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
7596 	if (rc) {
7597 		dev_warn(&pdev->dev,
7598 			"Failed waiting for board to become ready after hard reset\n");
7599 		goto unmap_cfgtable;
7600 	}
7601 
7602 	rc = controller_reset_failed(vaddr);
7603 	if (rc < 0)
7604 		goto unmap_cfgtable;
7605 	if (rc) {
7606 		dev_warn(&pdev->dev, "Unable to successfully reset "
7607 			"controller. Will try soft reset.\n");
7608 		rc = -ENOTSUPP;
7609 	} else {
7610 		dev_info(&pdev->dev, "board ready after hard reset.\n");
7611 	}
7612 
7613 unmap_cfgtable:
7614 	iounmap(cfgtable);
7615 
7616 unmap_vaddr:
7617 	iounmap(vaddr);
7618 	return rc;
7619 }
7620 
7621 /*
7622  *  We cannot read the structure directly, for portability we must use
7623  *   the io functions.
7624  *   This is for debug only.
7625  */
7626 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
7627 {
7628 #ifdef HPSA_DEBUG
7629 	int i;
7630 	char temp_name[17];
7631 
7632 	dev_info(dev, "Controller Configuration information\n");
7633 	dev_info(dev, "------------------------------------\n");
7634 	for (i = 0; i < 4; i++)
7635 		temp_name[i] = readb(&(tb->Signature[i]));
7636 	temp_name[4] = '\0';
7637 	dev_info(dev, "   Signature = %s\n", temp_name);
7638 	dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
7639 	dev_info(dev, "   Transport methods supported = 0x%x\n",
7640 	       readl(&(tb->TransportSupport)));
7641 	dev_info(dev, "   Transport methods active = 0x%x\n",
7642 	       readl(&(tb->TransportActive)));
7643 	dev_info(dev, "   Requested transport Method = 0x%x\n",
7644 	       readl(&(tb->HostWrite.TransportRequest)));
7645 	dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
7646 	       readl(&(tb->HostWrite.CoalIntDelay)));
7647 	dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
7648 	       readl(&(tb->HostWrite.CoalIntCount)));
7649 	dev_info(dev, "   Max outstanding commands = %d\n",
7650 	       readl(&(tb->CmdsOutMax)));
7651 	dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7652 	for (i = 0; i < 16; i++)
7653 		temp_name[i] = readb(&(tb->ServerName[i]));
7654 	temp_name[16] = '\0';
7655 	dev_info(dev, "   Server Name = %s\n", temp_name);
7656 	dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
7657 		readl(&(tb->HeartBeat)));
7658 #endif				/* HPSA_DEBUG */
7659 }
7660 
7661 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7662 {
7663 	int i, offset, mem_type, bar_type;
7664 
7665 	if (pci_bar_addr == PCI_BASE_ADDRESS_0)	/* looking for BAR zero? */
7666 		return 0;
7667 	offset = 0;
7668 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7669 		bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7670 		if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7671 			offset += 4;
7672 		else {
7673 			mem_type = pci_resource_flags(pdev, i) &
7674 			    PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7675 			switch (mem_type) {
7676 			case PCI_BASE_ADDRESS_MEM_TYPE_32:
7677 			case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7678 				offset += 4;	/* 32 bit */
7679 				break;
7680 			case PCI_BASE_ADDRESS_MEM_TYPE_64:
7681 				offset += 8;
7682 				break;
7683 			default:	/* reserved in PCI 2.2 */
7684 				dev_warn(&pdev->dev,
7685 				       "base address is invalid\n");
7686 				return -1;
7687 				break;
7688 			}
7689 		}
7690 		if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7691 			return i + 1;
7692 	}
7693 	return -1;
7694 }
7695 
7696 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7697 {
7698 	pci_free_irq_vectors(h->pdev);
7699 	h->msix_vectors = 0;
7700 }
7701 
7702 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7703  * controllers that are capable. If not, we use legacy INTx mode.
7704  */
7705 static int hpsa_interrupt_mode(struct ctlr_info *h)
7706 {
7707 	unsigned int flags = PCI_IRQ_LEGACY;
7708 	int ret;
7709 
7710 	/* Some boards advertise MSI but don't really support it */
7711 	switch (h->board_id) {
7712 	case 0x40700E11:
7713 	case 0x40800E11:
7714 	case 0x40820E11:
7715 	case 0x40830E11:
7716 		break;
7717 	default:
7718 		ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
7719 				PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
7720 		if (ret > 0) {
7721 			h->msix_vectors = ret;
7722 			return 0;
7723 		}
7724 
7725 		flags |= PCI_IRQ_MSI;
7726 		break;
7727 	}
7728 
7729 	ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
7730 	if (ret < 0)
7731 		return ret;
7732 	return 0;
7733 }
7734 
7735 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
7736 {
7737 	int i;
7738 	u32 subsystem_vendor_id, subsystem_device_id;
7739 
7740 	subsystem_vendor_id = pdev->subsystem_vendor;
7741 	subsystem_device_id = pdev->subsystem_device;
7742 	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7743 		    subsystem_vendor_id;
7744 
7745 	for (i = 0; i < ARRAY_SIZE(products); i++)
7746 		if (*board_id == products[i].board_id)
7747 			return i;
7748 
7749 	if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
7750 		subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
7751 		!hpsa_allow_any) {
7752 		dev_warn(&pdev->dev, "unrecognized board ID: "
7753 			"0x%08x, ignoring.\n", *board_id);
7754 			return -ENODEV;
7755 	}
7756 	return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7757 }
7758 
7759 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7760 				    unsigned long *memory_bar)
7761 {
7762 	int i;
7763 
7764 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7765 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7766 			/* addressing mode bits already removed */
7767 			*memory_bar = pci_resource_start(pdev, i);
7768 			dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7769 				*memory_bar);
7770 			return 0;
7771 		}
7772 	dev_warn(&pdev->dev, "no memory BAR found\n");
7773 	return -ENODEV;
7774 }
7775 
7776 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7777 				     int wait_for_ready)
7778 {
7779 	int i, iterations;
7780 	u32 scratchpad;
7781 	if (wait_for_ready)
7782 		iterations = HPSA_BOARD_READY_ITERATIONS;
7783 	else
7784 		iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7785 
7786 	for (i = 0; i < iterations; i++) {
7787 		scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7788 		if (wait_for_ready) {
7789 			if (scratchpad == HPSA_FIRMWARE_READY)
7790 				return 0;
7791 		} else {
7792 			if (scratchpad != HPSA_FIRMWARE_READY)
7793 				return 0;
7794 		}
7795 		msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7796 	}
7797 	dev_warn(&pdev->dev, "board not ready, timed out.\n");
7798 	return -ENODEV;
7799 }
7800 
7801 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7802 			       u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7803 			       u64 *cfg_offset)
7804 {
7805 	*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7806 	*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7807 	*cfg_base_addr &= (u32) 0x0000ffff;
7808 	*cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7809 	if (*cfg_base_addr_index == -1) {
7810 		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7811 		return -ENODEV;
7812 	}
7813 	return 0;
7814 }
7815 
7816 static void hpsa_free_cfgtables(struct ctlr_info *h)
7817 {
7818 	if (h->transtable) {
7819 		iounmap(h->transtable);
7820 		h->transtable = NULL;
7821 	}
7822 	if (h->cfgtable) {
7823 		iounmap(h->cfgtable);
7824 		h->cfgtable = NULL;
7825 	}
7826 }
7827 
7828 /* Find and map CISS config table and transfer table
7829 + * several items must be unmapped (freed) later
7830 + * */
7831 static int hpsa_find_cfgtables(struct ctlr_info *h)
7832 {
7833 	u64 cfg_offset;
7834 	u32 cfg_base_addr;
7835 	u64 cfg_base_addr_index;
7836 	u32 trans_offset;
7837 	int rc;
7838 
7839 	rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7840 		&cfg_base_addr_index, &cfg_offset);
7841 	if (rc)
7842 		return rc;
7843 	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7844 		       cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7845 	if (!h->cfgtable) {
7846 		dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7847 		return -ENOMEM;
7848 	}
7849 	rc = write_driver_ver_to_cfgtable(h->cfgtable);
7850 	if (rc)
7851 		return rc;
7852 	/* Find performant mode table. */
7853 	trans_offset = readl(&h->cfgtable->TransMethodOffset);
7854 	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7855 				cfg_base_addr_index)+cfg_offset+trans_offset,
7856 				sizeof(*h->transtable));
7857 	if (!h->transtable) {
7858 		dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7859 		hpsa_free_cfgtables(h);
7860 		return -ENOMEM;
7861 	}
7862 	return 0;
7863 }
7864 
7865 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7866 {
7867 #define MIN_MAX_COMMANDS 16
7868 	BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7869 
7870 	h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7871 
7872 	/* Limit commands in memory limited kdump scenario. */
7873 	if (reset_devices && h->max_commands > 32)
7874 		h->max_commands = 32;
7875 
7876 	if (h->max_commands < MIN_MAX_COMMANDS) {
7877 		dev_warn(&h->pdev->dev,
7878 			"Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7879 			h->max_commands,
7880 			MIN_MAX_COMMANDS);
7881 		h->max_commands = MIN_MAX_COMMANDS;
7882 	}
7883 }
7884 
7885 /* If the controller reports that the total max sg entries is greater than 512,
7886  * then we know that chained SG blocks work.  (Original smart arrays did not
7887  * support chained SG blocks and would return zero for max sg entries.)
7888  */
7889 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7890 {
7891 	return h->maxsgentries > 512;
7892 }
7893 
7894 /* Interrogate the hardware for some limits:
7895  * max commands, max SG elements without chaining, and with chaining,
7896  * SG chain block size, etc.
7897  */
7898 static void hpsa_find_board_params(struct ctlr_info *h)
7899 {
7900 	hpsa_get_max_perf_mode_cmds(h);
7901 	h->nr_cmds = h->max_commands;
7902 	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7903 	h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7904 	if (hpsa_supports_chained_sg_blocks(h)) {
7905 		/* Limit in-command s/g elements to 32 save dma'able memory. */
7906 		h->max_cmd_sg_entries = 32;
7907 		h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7908 		h->maxsgentries--; /* save one for chain pointer */
7909 	} else {
7910 		/*
7911 		 * Original smart arrays supported at most 31 s/g entries
7912 		 * embedded inline in the command (trying to use more
7913 		 * would lock up the controller)
7914 		 */
7915 		h->max_cmd_sg_entries = 31;
7916 		h->maxsgentries = 31; /* default to traditional values */
7917 		h->chainsize = 0;
7918 	}
7919 
7920 	/* Find out what task management functions are supported and cache */
7921 	h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7922 	if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7923 		dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7924 	if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7925 		dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7926 	if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7927 		dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7928 }
7929 
7930 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7931 {
7932 	if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7933 		dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7934 		return false;
7935 	}
7936 	return true;
7937 }
7938 
7939 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7940 {
7941 	u32 driver_support;
7942 
7943 	driver_support = readl(&(h->cfgtable->driver_support));
7944 	/* Need to enable prefetch in the SCSI core for 6400 in x86 */
7945 #ifdef CONFIG_X86
7946 	driver_support |= ENABLE_SCSI_PREFETCH;
7947 #endif
7948 	driver_support |= ENABLE_UNIT_ATTN;
7949 	writel(driver_support, &(h->cfgtable->driver_support));
7950 }
7951 
7952 /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
7953  * in a prefetch beyond physical memory.
7954  */
7955 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7956 {
7957 	u32 dma_prefetch;
7958 
7959 	if (h->board_id != 0x3225103C)
7960 		return;
7961 	dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7962 	dma_prefetch |= 0x8000;
7963 	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7964 }
7965 
7966 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7967 {
7968 	int i;
7969 	u32 doorbell_value;
7970 	unsigned long flags;
7971 	/* wait until the clear_event_notify bit 6 is cleared by controller. */
7972 	for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7973 		spin_lock_irqsave(&h->lock, flags);
7974 		doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7975 		spin_unlock_irqrestore(&h->lock, flags);
7976 		if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7977 			goto done;
7978 		/* delay and try again */
7979 		msleep(CLEAR_EVENT_WAIT_INTERVAL);
7980 	}
7981 	return -ENODEV;
7982 done:
7983 	return 0;
7984 }
7985 
7986 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7987 {
7988 	int i;
7989 	u32 doorbell_value;
7990 	unsigned long flags;
7991 
7992 	/* under certain very rare conditions, this can take awhile.
7993 	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7994 	 * as we enter this code.)
7995 	 */
7996 	for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7997 		if (h->remove_in_progress)
7998 			goto done;
7999 		spin_lock_irqsave(&h->lock, flags);
8000 		doorbell_value = readl(h->vaddr + SA5_DOORBELL);
8001 		spin_unlock_irqrestore(&h->lock, flags);
8002 		if (!(doorbell_value & CFGTBL_ChangeReq))
8003 			goto done;
8004 		/* delay and try again */
8005 		msleep(MODE_CHANGE_WAIT_INTERVAL);
8006 	}
8007 	return -ENODEV;
8008 done:
8009 	return 0;
8010 }
8011 
8012 /* return -ENODEV or other reason on error, 0 on success */
8013 static int hpsa_enter_simple_mode(struct ctlr_info *h)
8014 {
8015 	u32 trans_support;
8016 
8017 	trans_support = readl(&(h->cfgtable->TransportSupport));
8018 	if (!(trans_support & SIMPLE_MODE))
8019 		return -ENOTSUPP;
8020 
8021 	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
8022 
8023 	/* Update the field, and then ring the doorbell */
8024 	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
8025 	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
8026 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8027 	if (hpsa_wait_for_mode_change_ack(h))
8028 		goto error;
8029 	print_cfg_table(&h->pdev->dev, h->cfgtable);
8030 	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
8031 		goto error;
8032 	h->transMethod = CFGTBL_Trans_Simple;
8033 	return 0;
8034 error:
8035 	dev_err(&h->pdev->dev, "failed to enter simple mode\n");
8036 	return -ENODEV;
8037 }
8038 
8039 /* free items allocated or mapped by hpsa_pci_init */
8040 static void hpsa_free_pci_init(struct ctlr_info *h)
8041 {
8042 	hpsa_free_cfgtables(h);			/* pci_init 4 */
8043 	iounmap(h->vaddr);			/* pci_init 3 */
8044 	h->vaddr = NULL;
8045 	hpsa_disable_interrupt_mode(h);		/* pci_init 2 */
8046 	/*
8047 	 * call pci_disable_device before pci_release_regions per
8048 	 * Documentation/PCI/pci.txt
8049 	 */
8050 	pci_disable_device(h->pdev);		/* pci_init 1 */
8051 	pci_release_regions(h->pdev);		/* pci_init 2 */
8052 }
8053 
8054 /* several items must be freed later */
8055 static int hpsa_pci_init(struct ctlr_info *h)
8056 {
8057 	int prod_index, err;
8058 
8059 	prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
8060 	if (prod_index < 0)
8061 		return prod_index;
8062 	h->product_name = products[prod_index].product_name;
8063 	h->access = *(products[prod_index].access);
8064 
8065 	h->needs_abort_tags_swizzled =
8066 		ctlr_needs_abort_tags_swizzled(h->board_id);
8067 
8068 	pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
8069 			       PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
8070 
8071 	err = pci_enable_device(h->pdev);
8072 	if (err) {
8073 		dev_err(&h->pdev->dev, "failed to enable PCI device\n");
8074 		pci_disable_device(h->pdev);
8075 		return err;
8076 	}
8077 
8078 	err = pci_request_regions(h->pdev, HPSA);
8079 	if (err) {
8080 		dev_err(&h->pdev->dev,
8081 			"failed to obtain PCI resources\n");
8082 		pci_disable_device(h->pdev);
8083 		return err;
8084 	}
8085 
8086 	pci_set_master(h->pdev);
8087 
8088 	err = hpsa_interrupt_mode(h);
8089 	if (err)
8090 		goto clean1;
8091 	err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
8092 	if (err)
8093 		goto clean2;	/* intmode+region, pci */
8094 	h->vaddr = remap_pci_mem(h->paddr, 0x250);
8095 	if (!h->vaddr) {
8096 		dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
8097 		err = -ENOMEM;
8098 		goto clean2;	/* intmode+region, pci */
8099 	}
8100 	err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8101 	if (err)
8102 		goto clean3;	/* vaddr, intmode+region, pci */
8103 	err = hpsa_find_cfgtables(h);
8104 	if (err)
8105 		goto clean3;	/* vaddr, intmode+region, pci */
8106 	hpsa_find_board_params(h);
8107 
8108 	if (!hpsa_CISS_signature_present(h)) {
8109 		err = -ENODEV;
8110 		goto clean4;	/* cfgtables, vaddr, intmode+region, pci */
8111 	}
8112 	hpsa_set_driver_support_bits(h);
8113 	hpsa_p600_dma_prefetch_quirk(h);
8114 	err = hpsa_enter_simple_mode(h);
8115 	if (err)
8116 		goto clean4;	/* cfgtables, vaddr, intmode+region, pci */
8117 	return 0;
8118 
8119 clean4:	/* cfgtables, vaddr, intmode+region, pci */
8120 	hpsa_free_cfgtables(h);
8121 clean3:	/* vaddr, intmode+region, pci */
8122 	iounmap(h->vaddr);
8123 	h->vaddr = NULL;
8124 clean2:	/* intmode+region, pci */
8125 	hpsa_disable_interrupt_mode(h);
8126 clean1:
8127 	/*
8128 	 * call pci_disable_device before pci_release_regions per
8129 	 * Documentation/PCI/pci.txt
8130 	 */
8131 	pci_disable_device(h->pdev);
8132 	pci_release_regions(h->pdev);
8133 	return err;
8134 }
8135 
8136 static void hpsa_hba_inquiry(struct ctlr_info *h)
8137 {
8138 	int rc;
8139 
8140 #define HBA_INQUIRY_BYTE_COUNT 64
8141 	h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
8142 	if (!h->hba_inquiry_data)
8143 		return;
8144 	rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
8145 		h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
8146 	if (rc != 0) {
8147 		kfree(h->hba_inquiry_data);
8148 		h->hba_inquiry_data = NULL;
8149 	}
8150 }
8151 
8152 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
8153 {
8154 	int rc, i;
8155 	void __iomem *vaddr;
8156 
8157 	if (!reset_devices)
8158 		return 0;
8159 
8160 	/* kdump kernel is loading, we don't know in which state is
8161 	 * the pci interface. The dev->enable_cnt is equal zero
8162 	 * so we call enable+disable, wait a while and switch it on.
8163 	 */
8164 	rc = pci_enable_device(pdev);
8165 	if (rc) {
8166 		dev_warn(&pdev->dev, "Failed to enable PCI device\n");
8167 		return -ENODEV;
8168 	}
8169 	pci_disable_device(pdev);
8170 	msleep(260);			/* a randomly chosen number */
8171 	rc = pci_enable_device(pdev);
8172 	if (rc) {
8173 		dev_warn(&pdev->dev, "failed to enable device.\n");
8174 		return -ENODEV;
8175 	}
8176 
8177 	pci_set_master(pdev);
8178 
8179 	vaddr = pci_ioremap_bar(pdev, 0);
8180 	if (vaddr == NULL) {
8181 		rc = -ENOMEM;
8182 		goto out_disable;
8183 	}
8184 	writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
8185 	iounmap(vaddr);
8186 
8187 	/* Reset the controller with a PCI power-cycle or via doorbell */
8188 	rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
8189 
8190 	/* -ENOTSUPP here means we cannot reset the controller
8191 	 * but it's already (and still) up and running in
8192 	 * "performant mode".  Or, it might be 640x, which can't reset
8193 	 * due to concerns about shared bbwc between 6402/6404 pair.
8194 	 */
8195 	if (rc)
8196 		goto out_disable;
8197 
8198 	/* Now try to get the controller to respond to a no-op */
8199 	dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
8200 	for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
8201 		if (hpsa_noop(pdev) == 0)
8202 			break;
8203 		else
8204 			dev_warn(&pdev->dev, "no-op failed%s\n",
8205 					(i < 11 ? "; re-trying" : ""));
8206 	}
8207 
8208 out_disable:
8209 
8210 	pci_disable_device(pdev);
8211 	return rc;
8212 }
8213 
8214 static void hpsa_free_cmd_pool(struct ctlr_info *h)
8215 {
8216 	kfree(h->cmd_pool_bits);
8217 	h->cmd_pool_bits = NULL;
8218 	if (h->cmd_pool) {
8219 		pci_free_consistent(h->pdev,
8220 				h->nr_cmds * sizeof(struct CommandList),
8221 				h->cmd_pool,
8222 				h->cmd_pool_dhandle);
8223 		h->cmd_pool = NULL;
8224 		h->cmd_pool_dhandle = 0;
8225 	}
8226 	if (h->errinfo_pool) {
8227 		pci_free_consistent(h->pdev,
8228 				h->nr_cmds * sizeof(struct ErrorInfo),
8229 				h->errinfo_pool,
8230 				h->errinfo_pool_dhandle);
8231 		h->errinfo_pool = NULL;
8232 		h->errinfo_pool_dhandle = 0;
8233 	}
8234 }
8235 
8236 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
8237 {
8238 	h->cmd_pool_bits = kzalloc(
8239 		DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
8240 		sizeof(unsigned long), GFP_KERNEL);
8241 	h->cmd_pool = pci_alloc_consistent(h->pdev,
8242 		    h->nr_cmds * sizeof(*h->cmd_pool),
8243 		    &(h->cmd_pool_dhandle));
8244 	h->errinfo_pool = pci_alloc_consistent(h->pdev,
8245 		    h->nr_cmds * sizeof(*h->errinfo_pool),
8246 		    &(h->errinfo_pool_dhandle));
8247 	if ((h->cmd_pool_bits == NULL)
8248 	    || (h->cmd_pool == NULL)
8249 	    || (h->errinfo_pool == NULL)) {
8250 		dev_err(&h->pdev->dev, "out of memory in %s", __func__);
8251 		goto clean_up;
8252 	}
8253 	hpsa_preinitialize_commands(h);
8254 	return 0;
8255 clean_up:
8256 	hpsa_free_cmd_pool(h);
8257 	return -ENOMEM;
8258 }
8259 
8260 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
8261 static void hpsa_free_irqs(struct ctlr_info *h)
8262 {
8263 	int i;
8264 
8265 	if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
8266 		/* Single reply queue, only one irq to free */
8267 		free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
8268 		h->q[h->intr_mode] = 0;
8269 		return;
8270 	}
8271 
8272 	for (i = 0; i < h->msix_vectors; i++) {
8273 		free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
8274 		h->q[i] = 0;
8275 	}
8276 	for (; i < MAX_REPLY_QUEUES; i++)
8277 		h->q[i] = 0;
8278 }
8279 
8280 /* returns 0 on success; cleans up and returns -Enn on error */
8281 static int hpsa_request_irqs(struct ctlr_info *h,
8282 	irqreturn_t (*msixhandler)(int, void *),
8283 	irqreturn_t (*intxhandler)(int, void *))
8284 {
8285 	int rc, i;
8286 
8287 	/*
8288 	 * initialize h->q[x] = x so that interrupt handlers know which
8289 	 * queue to process.
8290 	 */
8291 	for (i = 0; i < MAX_REPLY_QUEUES; i++)
8292 		h->q[i] = (u8) i;
8293 
8294 	if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
8295 		/* If performant mode and MSI-X, use multiple reply queues */
8296 		for (i = 0; i < h->msix_vectors; i++) {
8297 			sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
8298 			rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
8299 					0, h->intrname[i],
8300 					&h->q[i]);
8301 			if (rc) {
8302 				int j;
8303 
8304 				dev_err(&h->pdev->dev,
8305 					"failed to get irq %d for %s\n",
8306 				       pci_irq_vector(h->pdev, i), h->devname);
8307 				for (j = 0; j < i; j++) {
8308 					free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
8309 					h->q[j] = 0;
8310 				}
8311 				for (; j < MAX_REPLY_QUEUES; j++)
8312 					h->q[j] = 0;
8313 				return rc;
8314 			}
8315 		}
8316 	} else {
8317 		/* Use single reply pool */
8318 		if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
8319 			sprintf(h->intrname[0], "%s-msi%s", h->devname,
8320 				h->msix_vectors ? "x" : "");
8321 			rc = request_irq(pci_irq_vector(h->pdev, 0),
8322 				msixhandler, 0,
8323 				h->intrname[0],
8324 				&h->q[h->intr_mode]);
8325 		} else {
8326 			sprintf(h->intrname[h->intr_mode],
8327 				"%s-intx", h->devname);
8328 			rc = request_irq(pci_irq_vector(h->pdev, 0),
8329 				intxhandler, IRQF_SHARED,
8330 				h->intrname[0],
8331 				&h->q[h->intr_mode]);
8332 		}
8333 	}
8334 	if (rc) {
8335 		dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
8336 		       pci_irq_vector(h->pdev, 0), h->devname);
8337 		hpsa_free_irqs(h);
8338 		return -ENODEV;
8339 	}
8340 	return 0;
8341 }
8342 
8343 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
8344 {
8345 	int rc;
8346 	hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
8347 
8348 	dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
8349 	rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
8350 	if (rc) {
8351 		dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
8352 		return rc;
8353 	}
8354 
8355 	dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
8356 	rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
8357 	if (rc) {
8358 		dev_warn(&h->pdev->dev, "Board failed to become ready "
8359 			"after soft reset.\n");
8360 		return rc;
8361 	}
8362 
8363 	return 0;
8364 }
8365 
8366 static void hpsa_free_reply_queues(struct ctlr_info *h)
8367 {
8368 	int i;
8369 
8370 	for (i = 0; i < h->nreply_queues; i++) {
8371 		if (!h->reply_queue[i].head)
8372 			continue;
8373 		pci_free_consistent(h->pdev,
8374 					h->reply_queue_size,
8375 					h->reply_queue[i].head,
8376 					h->reply_queue[i].busaddr);
8377 		h->reply_queue[i].head = NULL;
8378 		h->reply_queue[i].busaddr = 0;
8379 	}
8380 	h->reply_queue_size = 0;
8381 }
8382 
8383 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
8384 {
8385 	hpsa_free_performant_mode(h);		/* init_one 7 */
8386 	hpsa_free_sg_chain_blocks(h);		/* init_one 6 */
8387 	hpsa_free_cmd_pool(h);			/* init_one 5 */
8388 	hpsa_free_irqs(h);			/* init_one 4 */
8389 	scsi_host_put(h->scsi_host);		/* init_one 3 */
8390 	h->scsi_host = NULL;			/* init_one 3 */
8391 	hpsa_free_pci_init(h);			/* init_one 2_5 */
8392 	free_percpu(h->lockup_detected);	/* init_one 2 */
8393 	h->lockup_detected = NULL;		/* init_one 2 */
8394 	if (h->resubmit_wq) {
8395 		destroy_workqueue(h->resubmit_wq);	/* init_one 1 */
8396 		h->resubmit_wq = NULL;
8397 	}
8398 	if (h->rescan_ctlr_wq) {
8399 		destroy_workqueue(h->rescan_ctlr_wq);
8400 		h->rescan_ctlr_wq = NULL;
8401 	}
8402 	kfree(h);				/* init_one 1 */
8403 }
8404 
8405 /* Called when controller lockup detected. */
8406 static void fail_all_outstanding_cmds(struct ctlr_info *h)
8407 {
8408 	int i, refcount;
8409 	struct CommandList *c;
8410 	int failcount = 0;
8411 
8412 	flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
8413 	for (i = 0; i < h->nr_cmds; i++) {
8414 		c = h->cmd_pool + i;
8415 		refcount = atomic_inc_return(&c->refcount);
8416 		if (refcount > 1) {
8417 			c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
8418 			finish_cmd(c);
8419 			atomic_dec(&h->commands_outstanding);
8420 			failcount++;
8421 		}
8422 		cmd_free(h, c);
8423 	}
8424 	dev_warn(&h->pdev->dev,
8425 		"failed %d commands in fail_all\n", failcount);
8426 }
8427 
8428 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
8429 {
8430 	int cpu;
8431 
8432 	for_each_online_cpu(cpu) {
8433 		u32 *lockup_detected;
8434 		lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
8435 		*lockup_detected = value;
8436 	}
8437 	wmb(); /* be sure the per-cpu variables are out to memory */
8438 }
8439 
8440 static void controller_lockup_detected(struct ctlr_info *h)
8441 {
8442 	unsigned long flags;
8443 	u32 lockup_detected;
8444 
8445 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
8446 	spin_lock_irqsave(&h->lock, flags);
8447 	lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
8448 	if (!lockup_detected) {
8449 		/* no heartbeat, but controller gave us a zero. */
8450 		dev_warn(&h->pdev->dev,
8451 			"lockup detected after %d but scratchpad register is zero\n",
8452 			h->heartbeat_sample_interval / HZ);
8453 		lockup_detected = 0xffffffff;
8454 	}
8455 	set_lockup_detected_for_all_cpus(h, lockup_detected);
8456 	spin_unlock_irqrestore(&h->lock, flags);
8457 	dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
8458 			lockup_detected, h->heartbeat_sample_interval / HZ);
8459 	pci_disable_device(h->pdev);
8460 	fail_all_outstanding_cmds(h);
8461 }
8462 
8463 static int detect_controller_lockup(struct ctlr_info *h)
8464 {
8465 	u64 now;
8466 	u32 heartbeat;
8467 	unsigned long flags;
8468 
8469 	now = get_jiffies_64();
8470 	/* If we've received an interrupt recently, we're ok. */
8471 	if (time_after64(h->last_intr_timestamp +
8472 				(h->heartbeat_sample_interval), now))
8473 		return false;
8474 
8475 	/*
8476 	 * If we've already checked the heartbeat recently, we're ok.
8477 	 * This could happen if someone sends us a signal. We
8478 	 * otherwise don't care about signals in this thread.
8479 	 */
8480 	if (time_after64(h->last_heartbeat_timestamp +
8481 				(h->heartbeat_sample_interval), now))
8482 		return false;
8483 
8484 	/* If heartbeat has not changed since we last looked, we're not ok. */
8485 	spin_lock_irqsave(&h->lock, flags);
8486 	heartbeat = readl(&h->cfgtable->HeartBeat);
8487 	spin_unlock_irqrestore(&h->lock, flags);
8488 	if (h->last_heartbeat == heartbeat) {
8489 		controller_lockup_detected(h);
8490 		return true;
8491 	}
8492 
8493 	/* We're ok. */
8494 	h->last_heartbeat = heartbeat;
8495 	h->last_heartbeat_timestamp = now;
8496 	return false;
8497 }
8498 
8499 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
8500 {
8501 	int i;
8502 	char *event_type;
8503 
8504 	if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8505 		return;
8506 
8507 	/* Ask the controller to clear the events we're handling. */
8508 	if ((h->transMethod & (CFGTBL_Trans_io_accel1
8509 			| CFGTBL_Trans_io_accel2)) &&
8510 		(h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
8511 		 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
8512 
8513 		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
8514 			event_type = "state change";
8515 		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
8516 			event_type = "configuration change";
8517 		/* Stop sending new RAID offload reqs via the IO accelerator */
8518 		scsi_block_requests(h->scsi_host);
8519 		for (i = 0; i < h->ndevices; i++) {
8520 			h->dev[i]->offload_enabled = 0;
8521 			h->dev[i]->offload_to_be_enabled = 0;
8522 		}
8523 		hpsa_drain_accel_commands(h);
8524 		/* Set 'accelerator path config change' bit */
8525 		dev_warn(&h->pdev->dev,
8526 			"Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8527 			h->events, event_type);
8528 		writel(h->events, &(h->cfgtable->clear_event_notify));
8529 		/* Set the "clear event notify field update" bit 6 */
8530 		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8531 		/* Wait until ctlr clears 'clear event notify field', bit 6 */
8532 		hpsa_wait_for_clear_event_notify_ack(h);
8533 		scsi_unblock_requests(h->scsi_host);
8534 	} else {
8535 		/* Acknowledge controller notification events. */
8536 		writel(h->events, &(h->cfgtable->clear_event_notify));
8537 		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
8538 		hpsa_wait_for_clear_event_notify_ack(h);
8539 #if 0
8540 		writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8541 		hpsa_wait_for_mode_change_ack(h);
8542 #endif
8543 	}
8544 	return;
8545 }
8546 
8547 /* Check a register on the controller to see if there are configuration
8548  * changes (added/changed/removed logical drives, etc.) which mean that
8549  * we should rescan the controller for devices.
8550  * Also check flag for driver-initiated rescan.
8551  */
8552 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
8553 {
8554 	if (h->drv_req_rescan) {
8555 		h->drv_req_rescan = 0;
8556 		return 1;
8557 	}
8558 
8559 	if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
8560 		return 0;
8561 
8562 	h->events = readl(&(h->cfgtable->event_notify));
8563 	return h->events & RESCAN_REQUIRED_EVENT_BITS;
8564 }
8565 
8566 /*
8567  * Check if any of the offline devices have become ready
8568  */
8569 static int hpsa_offline_devices_ready(struct ctlr_info *h)
8570 {
8571 	unsigned long flags;
8572 	struct offline_device_entry *d;
8573 	struct list_head *this, *tmp;
8574 
8575 	spin_lock_irqsave(&h->offline_device_lock, flags);
8576 	list_for_each_safe(this, tmp, &h->offline_device_list) {
8577 		d = list_entry(this, struct offline_device_entry,
8578 				offline_list);
8579 		spin_unlock_irqrestore(&h->offline_device_lock, flags);
8580 		if (!hpsa_volume_offline(h, d->scsi3addr)) {
8581 			spin_lock_irqsave(&h->offline_device_lock, flags);
8582 			list_del(&d->offline_list);
8583 			spin_unlock_irqrestore(&h->offline_device_lock, flags);
8584 			return 1;
8585 		}
8586 		spin_lock_irqsave(&h->offline_device_lock, flags);
8587 	}
8588 	spin_unlock_irqrestore(&h->offline_device_lock, flags);
8589 	return 0;
8590 }
8591 
8592 static int hpsa_luns_changed(struct ctlr_info *h)
8593 {
8594 	int rc = 1; /* assume there are changes */
8595 	struct ReportLUNdata *logdev = NULL;
8596 
8597 	/* if we can't find out if lun data has changed,
8598 	 * assume that it has.
8599 	 */
8600 
8601 	if (!h->lastlogicals)
8602 		return rc;
8603 
8604 	logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
8605 	if (!logdev)
8606 		return rc;
8607 
8608 	if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
8609 		dev_warn(&h->pdev->dev,
8610 			"report luns failed, can't track lun changes.\n");
8611 		goto out;
8612 	}
8613 	if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
8614 		dev_info(&h->pdev->dev,
8615 			"Lun changes detected.\n");
8616 		memcpy(h->lastlogicals, logdev, sizeof(*logdev));
8617 		goto out;
8618 	} else
8619 		rc = 0; /* no changes detected. */
8620 out:
8621 	kfree(logdev);
8622 	return rc;
8623 }
8624 
8625 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8626 {
8627 	unsigned long flags;
8628 	struct ctlr_info *h = container_of(to_delayed_work(work),
8629 					struct ctlr_info, rescan_ctlr_work);
8630 
8631 
8632 	if (h->remove_in_progress)
8633 		return;
8634 
8635 	/*
8636 	 * Do the scan after the reset
8637 	 */
8638 	if (h->reset_in_progress) {
8639 		h->drv_req_rescan = 1;
8640 		return;
8641 	}
8642 
8643 	if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
8644 		scsi_host_get(h->scsi_host);
8645 		hpsa_ack_ctlr_events(h);
8646 		hpsa_scan_start(h->scsi_host);
8647 		scsi_host_put(h->scsi_host);
8648 	} else if (h->discovery_polling) {
8649 		hpsa_disable_rld_caching(h);
8650 		if (hpsa_luns_changed(h)) {
8651 			struct Scsi_Host *sh = NULL;
8652 
8653 			dev_info(&h->pdev->dev,
8654 				"driver discovery polling rescan.\n");
8655 			sh = scsi_host_get(h->scsi_host);
8656 			if (sh != NULL) {
8657 				hpsa_scan_start(sh);
8658 				scsi_host_put(sh);
8659 			}
8660 		}
8661 	}
8662 	spin_lock_irqsave(&h->lock, flags);
8663 	if (!h->remove_in_progress)
8664 		queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8665 				h->heartbeat_sample_interval);
8666 	spin_unlock_irqrestore(&h->lock, flags);
8667 }
8668 
8669 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8670 {
8671 	unsigned long flags;
8672 	struct ctlr_info *h = container_of(to_delayed_work(work),
8673 					struct ctlr_info, monitor_ctlr_work);
8674 
8675 	detect_controller_lockup(h);
8676 	if (lockup_detected(h))
8677 		return;
8678 
8679 	spin_lock_irqsave(&h->lock, flags);
8680 	if (!h->remove_in_progress)
8681 		schedule_delayed_work(&h->monitor_ctlr_work,
8682 				h->heartbeat_sample_interval);
8683 	spin_unlock_irqrestore(&h->lock, flags);
8684 }
8685 
8686 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8687 						char *name)
8688 {
8689 	struct workqueue_struct *wq = NULL;
8690 
8691 	wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8692 	if (!wq)
8693 		dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8694 
8695 	return wq;
8696 }
8697 
8698 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8699 {
8700 	int dac, rc;
8701 	struct ctlr_info *h;
8702 	int try_soft_reset = 0;
8703 	unsigned long flags;
8704 	u32 board_id;
8705 
8706 	if (number_of_controllers == 0)
8707 		printk(KERN_INFO DRIVER_NAME "\n");
8708 
8709 	rc = hpsa_lookup_board_id(pdev, &board_id);
8710 	if (rc < 0) {
8711 		dev_warn(&pdev->dev, "Board ID not found\n");
8712 		return rc;
8713 	}
8714 
8715 	rc = hpsa_init_reset_devices(pdev, board_id);
8716 	if (rc) {
8717 		if (rc != -ENOTSUPP)
8718 			return rc;
8719 		/* If the reset fails in a particular way (it has no way to do
8720 		 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8721 		 * a soft reset once we get the controller configured up to the
8722 		 * point that it can accept a command.
8723 		 */
8724 		try_soft_reset = 1;
8725 		rc = 0;
8726 	}
8727 
8728 reinit_after_soft_reset:
8729 
8730 	/* Command structures must be aligned on a 32-byte boundary because
8731 	 * the 5 lower bits of the address are used by the hardware. and by
8732 	 * the driver.  See comments in hpsa.h for more info.
8733 	 */
8734 	BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8735 	h = kzalloc(sizeof(*h), GFP_KERNEL);
8736 	if (!h) {
8737 		dev_err(&pdev->dev, "Failed to allocate controller head\n");
8738 		return -ENOMEM;
8739 	}
8740 
8741 	h->pdev = pdev;
8742 
8743 	h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8744 	INIT_LIST_HEAD(&h->offline_device_list);
8745 	spin_lock_init(&h->lock);
8746 	spin_lock_init(&h->offline_device_lock);
8747 	spin_lock_init(&h->scan_lock);
8748 	atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8749 	atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
8750 
8751 	/* Allocate and clear per-cpu variable lockup_detected */
8752 	h->lockup_detected = alloc_percpu(u32);
8753 	if (!h->lockup_detected) {
8754 		dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8755 		rc = -ENOMEM;
8756 		goto clean1;	/* aer/h */
8757 	}
8758 	set_lockup_detected_for_all_cpus(h, 0);
8759 
8760 	rc = hpsa_pci_init(h);
8761 	if (rc)
8762 		goto clean2;	/* lu, aer/h */
8763 
8764 	/* relies on h-> settings made by hpsa_pci_init, including
8765 	 * interrupt_mode h->intr */
8766 	rc = hpsa_scsi_host_alloc(h);
8767 	if (rc)
8768 		goto clean2_5;	/* pci, lu, aer/h */
8769 
8770 	sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8771 	h->ctlr = number_of_controllers;
8772 	number_of_controllers++;
8773 
8774 	/* configure PCI DMA stuff */
8775 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8776 	if (rc == 0) {
8777 		dac = 1;
8778 	} else {
8779 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8780 		if (rc == 0) {
8781 			dac = 0;
8782 		} else {
8783 			dev_err(&pdev->dev, "no suitable DMA available\n");
8784 			goto clean3;	/* shost, pci, lu, aer/h */
8785 		}
8786 	}
8787 
8788 	/* make sure the board interrupts are off */
8789 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
8790 
8791 	rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8792 	if (rc)
8793 		goto clean3;	/* shost, pci, lu, aer/h */
8794 	rc = hpsa_alloc_cmd_pool(h);
8795 	if (rc)
8796 		goto clean4;	/* irq, shost, pci, lu, aer/h */
8797 	rc = hpsa_alloc_sg_chain_blocks(h);
8798 	if (rc)
8799 		goto clean5;	/* cmd, irq, shost, pci, lu, aer/h */
8800 	init_waitqueue_head(&h->scan_wait_queue);
8801 	init_waitqueue_head(&h->abort_cmd_wait_queue);
8802 	init_waitqueue_head(&h->event_sync_wait_queue);
8803 	mutex_init(&h->reset_mutex);
8804 	h->scan_finished = 1; /* no scan currently in progress */
8805 	h->scan_waiting = 0;
8806 
8807 	pci_set_drvdata(pdev, h);
8808 	h->ndevices = 0;
8809 
8810 	spin_lock_init(&h->devlock);
8811 	rc = hpsa_put_ctlr_into_performant_mode(h);
8812 	if (rc)
8813 		goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8814 
8815 	/* create the resubmit workqueue */
8816 	h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8817 	if (!h->rescan_ctlr_wq) {
8818 		rc = -ENOMEM;
8819 		goto clean7;
8820 	}
8821 
8822 	h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8823 	if (!h->resubmit_wq) {
8824 		rc = -ENOMEM;
8825 		goto clean7;	/* aer/h */
8826 	}
8827 
8828 	/*
8829 	 * At this point, the controller is ready to take commands.
8830 	 * Now, if reset_devices and the hard reset didn't work, try
8831 	 * the soft reset and see if that works.
8832 	 */
8833 	if (try_soft_reset) {
8834 
8835 		/* This is kind of gross.  We may or may not get a completion
8836 		 * from the soft reset command, and if we do, then the value
8837 		 * from the fifo may or may not be valid.  So, we wait 10 secs
8838 		 * after the reset throwing away any completions we get during
8839 		 * that time.  Unregister the interrupt handler and register
8840 		 * fake ones to scoop up any residual completions.
8841 		 */
8842 		spin_lock_irqsave(&h->lock, flags);
8843 		h->access.set_intr_mask(h, HPSA_INTR_OFF);
8844 		spin_unlock_irqrestore(&h->lock, flags);
8845 		hpsa_free_irqs(h);
8846 		rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8847 					hpsa_intx_discard_completions);
8848 		if (rc) {
8849 			dev_warn(&h->pdev->dev,
8850 				"Failed to request_irq after soft reset.\n");
8851 			/*
8852 			 * cannot goto clean7 or free_irqs will be called
8853 			 * again. Instead, do its work
8854 			 */
8855 			hpsa_free_performant_mode(h);	/* clean7 */
8856 			hpsa_free_sg_chain_blocks(h);	/* clean6 */
8857 			hpsa_free_cmd_pool(h);		/* clean5 */
8858 			/*
8859 			 * skip hpsa_free_irqs(h) clean4 since that
8860 			 * was just called before request_irqs failed
8861 			 */
8862 			goto clean3;
8863 		}
8864 
8865 		rc = hpsa_kdump_soft_reset(h);
8866 		if (rc)
8867 			/* Neither hard nor soft reset worked, we're hosed. */
8868 			goto clean7;
8869 
8870 		dev_info(&h->pdev->dev, "Board READY.\n");
8871 		dev_info(&h->pdev->dev,
8872 			"Waiting for stale completions to drain.\n");
8873 		h->access.set_intr_mask(h, HPSA_INTR_ON);
8874 		msleep(10000);
8875 		h->access.set_intr_mask(h, HPSA_INTR_OFF);
8876 
8877 		rc = controller_reset_failed(h->cfgtable);
8878 		if (rc)
8879 			dev_info(&h->pdev->dev,
8880 				"Soft reset appears to have failed.\n");
8881 
8882 		/* since the controller's reset, we have to go back and re-init
8883 		 * everything.  Easiest to just forget what we've done and do it
8884 		 * all over again.
8885 		 */
8886 		hpsa_undo_allocations_after_kdump_soft_reset(h);
8887 		try_soft_reset = 0;
8888 		if (rc)
8889 			/* don't goto clean, we already unallocated */
8890 			return -ENODEV;
8891 
8892 		goto reinit_after_soft_reset;
8893 	}
8894 
8895 	/* Enable Accelerated IO path at driver layer */
8896 	h->acciopath_status = 1;
8897 	/* Disable discovery polling.*/
8898 	h->discovery_polling = 0;
8899 
8900 
8901 	/* Turn the interrupts on so we can service requests */
8902 	h->access.set_intr_mask(h, HPSA_INTR_ON);
8903 
8904 	hpsa_hba_inquiry(h);
8905 
8906 	h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
8907 	if (!h->lastlogicals)
8908 		dev_info(&h->pdev->dev,
8909 			"Can't track change to report lun data\n");
8910 
8911 	/* hook into SCSI subsystem */
8912 	rc = hpsa_scsi_add_host(h);
8913 	if (rc)
8914 		goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8915 
8916 	/* Monitor the controller for firmware lockups */
8917 	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8918 	INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8919 	schedule_delayed_work(&h->monitor_ctlr_work,
8920 				h->heartbeat_sample_interval);
8921 	INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8922 	queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8923 				h->heartbeat_sample_interval);
8924 	return 0;
8925 
8926 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8927 	hpsa_free_performant_mode(h);
8928 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
8929 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8930 	hpsa_free_sg_chain_blocks(h);
8931 clean5: /* cmd, irq, shost, pci, lu, aer/h */
8932 	hpsa_free_cmd_pool(h);
8933 clean4: /* irq, shost, pci, lu, aer/h */
8934 	hpsa_free_irqs(h);
8935 clean3: /* shost, pci, lu, aer/h */
8936 	scsi_host_put(h->scsi_host);
8937 	h->scsi_host = NULL;
8938 clean2_5: /* pci, lu, aer/h */
8939 	hpsa_free_pci_init(h);
8940 clean2: /* lu, aer/h */
8941 	if (h->lockup_detected) {
8942 		free_percpu(h->lockup_detected);
8943 		h->lockup_detected = NULL;
8944 	}
8945 clean1:	/* wq/aer/h */
8946 	if (h->resubmit_wq) {
8947 		destroy_workqueue(h->resubmit_wq);
8948 		h->resubmit_wq = NULL;
8949 	}
8950 	if (h->rescan_ctlr_wq) {
8951 		destroy_workqueue(h->rescan_ctlr_wq);
8952 		h->rescan_ctlr_wq = NULL;
8953 	}
8954 	kfree(h);
8955 	return rc;
8956 }
8957 
8958 static void hpsa_flush_cache(struct ctlr_info *h)
8959 {
8960 	char *flush_buf;
8961 	struct CommandList *c;
8962 	int rc;
8963 
8964 	if (unlikely(lockup_detected(h)))
8965 		return;
8966 	flush_buf = kzalloc(4, GFP_KERNEL);
8967 	if (!flush_buf)
8968 		return;
8969 
8970 	c = cmd_alloc(h);
8971 
8972 	if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8973 		RAID_CTLR_LUNID, TYPE_CMD)) {
8974 		goto out;
8975 	}
8976 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8977 					PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
8978 	if (rc)
8979 		goto out;
8980 	if (c->err_info->CommandStatus != 0)
8981 out:
8982 		dev_warn(&h->pdev->dev,
8983 			"error flushing cache on controller\n");
8984 	cmd_free(h, c);
8985 	kfree(flush_buf);
8986 }
8987 
8988 /* Make controller gather fresh report lun data each time we
8989  * send down a report luns request
8990  */
8991 static void hpsa_disable_rld_caching(struct ctlr_info *h)
8992 {
8993 	u32 *options;
8994 	struct CommandList *c;
8995 	int rc;
8996 
8997 	/* Don't bother trying to set diag options if locked up */
8998 	if (unlikely(h->lockup_detected))
8999 		return;
9000 
9001 	options = kzalloc(sizeof(*options), GFP_KERNEL);
9002 	if (!options)
9003 		return;
9004 
9005 	c = cmd_alloc(h);
9006 
9007 	/* first, get the current diag options settings */
9008 	if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
9009 		RAID_CTLR_LUNID, TYPE_CMD))
9010 		goto errout;
9011 
9012 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
9013 		PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
9014 	if ((rc != 0) || (c->err_info->CommandStatus != 0))
9015 		goto errout;
9016 
9017 	/* Now, set the bit for disabling the RLD caching */
9018 	*options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
9019 
9020 	if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
9021 		RAID_CTLR_LUNID, TYPE_CMD))
9022 		goto errout;
9023 
9024 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
9025 		PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
9026 	if ((rc != 0)  || (c->err_info->CommandStatus != 0))
9027 		goto errout;
9028 
9029 	/* Now verify that it got set: */
9030 	if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
9031 		RAID_CTLR_LUNID, TYPE_CMD))
9032 		goto errout;
9033 
9034 	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
9035 		PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
9036 	if ((rc != 0)  || (c->err_info->CommandStatus != 0))
9037 		goto errout;
9038 
9039 	if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
9040 		goto out;
9041 
9042 errout:
9043 	dev_err(&h->pdev->dev,
9044 			"Error: failed to disable report lun data caching.\n");
9045 out:
9046 	cmd_free(h, c);
9047 	kfree(options);
9048 }
9049 
9050 static void hpsa_shutdown(struct pci_dev *pdev)
9051 {
9052 	struct ctlr_info *h;
9053 
9054 	h = pci_get_drvdata(pdev);
9055 	/* Turn board interrupts off  and send the flush cache command
9056 	 * sendcmd will turn off interrupt, and send the flush...
9057 	 * To write all data in the battery backed cache to disks
9058 	 */
9059 	hpsa_flush_cache(h);
9060 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
9061 	hpsa_free_irqs(h);			/* init_one 4 */
9062 	hpsa_disable_interrupt_mode(h);		/* pci_init 2 */
9063 }
9064 
9065 static void hpsa_free_device_info(struct ctlr_info *h)
9066 {
9067 	int i;
9068 
9069 	for (i = 0; i < h->ndevices; i++) {
9070 		kfree(h->dev[i]);
9071 		h->dev[i] = NULL;
9072 	}
9073 }
9074 
9075 static void hpsa_remove_one(struct pci_dev *pdev)
9076 {
9077 	struct ctlr_info *h;
9078 	unsigned long flags;
9079 
9080 	if (pci_get_drvdata(pdev) == NULL) {
9081 		dev_err(&pdev->dev, "unable to remove device\n");
9082 		return;
9083 	}
9084 	h = pci_get_drvdata(pdev);
9085 
9086 	/* Get rid of any controller monitoring work items */
9087 	spin_lock_irqsave(&h->lock, flags);
9088 	h->remove_in_progress = 1;
9089 	spin_unlock_irqrestore(&h->lock, flags);
9090 	cancel_delayed_work_sync(&h->monitor_ctlr_work);
9091 	cancel_delayed_work_sync(&h->rescan_ctlr_work);
9092 	destroy_workqueue(h->rescan_ctlr_wq);
9093 	destroy_workqueue(h->resubmit_wq);
9094 
9095 	/*
9096 	 * Call before disabling interrupts.
9097 	 * scsi_remove_host can trigger I/O operations especially
9098 	 * when multipath is enabled. There can be SYNCHRONIZE CACHE
9099 	 * operations which cannot complete and will hang the system.
9100 	 */
9101 	if (h->scsi_host)
9102 		scsi_remove_host(h->scsi_host);		/* init_one 8 */
9103 	/* includes hpsa_free_irqs - init_one 4 */
9104 	/* includes hpsa_disable_interrupt_mode - pci_init 2 */
9105 	hpsa_shutdown(pdev);
9106 
9107 	hpsa_free_device_info(h);		/* scan */
9108 
9109 	kfree(h->hba_inquiry_data);			/* init_one 10 */
9110 	h->hba_inquiry_data = NULL;			/* init_one 10 */
9111 	hpsa_free_ioaccel2_sg_chain_blocks(h);
9112 	hpsa_free_performant_mode(h);			/* init_one 7 */
9113 	hpsa_free_sg_chain_blocks(h);			/* init_one 6 */
9114 	hpsa_free_cmd_pool(h);				/* init_one 5 */
9115 	kfree(h->lastlogicals);
9116 
9117 	/* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
9118 
9119 	scsi_host_put(h->scsi_host);			/* init_one 3 */
9120 	h->scsi_host = NULL;				/* init_one 3 */
9121 
9122 	/* includes hpsa_disable_interrupt_mode - pci_init 2 */
9123 	hpsa_free_pci_init(h);				/* init_one 2.5 */
9124 
9125 	free_percpu(h->lockup_detected);		/* init_one 2 */
9126 	h->lockup_detected = NULL;			/* init_one 2 */
9127 	/* (void) pci_disable_pcie_error_reporting(pdev); */	/* init_one 1 */
9128 
9129 	hpsa_delete_sas_host(h);
9130 
9131 	kfree(h);					/* init_one 1 */
9132 }
9133 
9134 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
9135 	__attribute__((unused)) pm_message_t state)
9136 {
9137 	return -ENOSYS;
9138 }
9139 
9140 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
9141 {
9142 	return -ENOSYS;
9143 }
9144 
9145 static struct pci_driver hpsa_pci_driver = {
9146 	.name = HPSA,
9147 	.probe = hpsa_init_one,
9148 	.remove = hpsa_remove_one,
9149 	.id_table = hpsa_pci_device_id,	/* id_table */
9150 	.shutdown = hpsa_shutdown,
9151 	.suspend = hpsa_suspend,
9152 	.resume = hpsa_resume,
9153 };
9154 
9155 /* Fill in bucket_map[], given nsgs (the max number of
9156  * scatter gather elements supported) and bucket[],
9157  * which is an array of 8 integers.  The bucket[] array
9158  * contains 8 different DMA transfer sizes (in 16
9159  * byte increments) which the controller uses to fetch
9160  * commands.  This function fills in bucket_map[], which
9161  * maps a given number of scatter gather elements to one of
9162  * the 8 DMA transfer sizes.  The point of it is to allow the
9163  * controller to only do as much DMA as needed to fetch the
9164  * command, with the DMA transfer size encoded in the lower
9165  * bits of the command address.
9166  */
9167 static void  calc_bucket_map(int bucket[], int num_buckets,
9168 	int nsgs, int min_blocks, u32 *bucket_map)
9169 {
9170 	int i, j, b, size;
9171 
9172 	/* Note, bucket_map must have nsgs+1 entries. */
9173 	for (i = 0; i <= nsgs; i++) {
9174 		/* Compute size of a command with i SG entries */
9175 		size = i + min_blocks;
9176 		b = num_buckets; /* Assume the biggest bucket */
9177 		/* Find the bucket that is just big enough */
9178 		for (j = 0; j < num_buckets; j++) {
9179 			if (bucket[j] >= size) {
9180 				b = j;
9181 				break;
9182 			}
9183 		}
9184 		/* for a command with i SG entries, use bucket b. */
9185 		bucket_map[i] = b;
9186 	}
9187 }
9188 
9189 /*
9190  * return -ENODEV on err, 0 on success (or no action)
9191  * allocates numerous items that must be freed later
9192  */
9193 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
9194 {
9195 	int i;
9196 	unsigned long register_value;
9197 	unsigned long transMethod = CFGTBL_Trans_Performant |
9198 			(trans_support & CFGTBL_Trans_use_short_tags) |
9199 				CFGTBL_Trans_enable_directed_msix |
9200 			(trans_support & (CFGTBL_Trans_io_accel1 |
9201 				CFGTBL_Trans_io_accel2));
9202 	struct access_method access = SA5_performant_access;
9203 
9204 	/* This is a bit complicated.  There are 8 registers on
9205 	 * the controller which we write to to tell it 8 different
9206 	 * sizes of commands which there may be.  It's a way of
9207 	 * reducing the DMA done to fetch each command.  Encoded into
9208 	 * each command's tag are 3 bits which communicate to the controller
9209 	 * which of the eight sizes that command fits within.  The size of
9210 	 * each command depends on how many scatter gather entries there are.
9211 	 * Each SG entry requires 16 bytes.  The eight registers are programmed
9212 	 * with the number of 16-byte blocks a command of that size requires.
9213 	 * The smallest command possible requires 5 such 16 byte blocks.
9214 	 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
9215 	 * blocks.  Note, this only extends to the SG entries contained
9216 	 * within the command block, and does not extend to chained blocks
9217 	 * of SG elements.   bft[] contains the eight values we write to
9218 	 * the registers.  They are not evenly distributed, but have more
9219 	 * sizes for small commands, and fewer sizes for larger commands.
9220 	 */
9221 	int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
9222 #define MIN_IOACCEL2_BFT_ENTRY 5
9223 #define HPSA_IOACCEL2_HEADER_SZ 4
9224 	int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
9225 			13, 14, 15, 16, 17, 18, 19,
9226 			HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
9227 	BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
9228 	BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
9229 	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
9230 				 16 * MIN_IOACCEL2_BFT_ENTRY);
9231 	BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
9232 	BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
9233 	/*  5 = 1 s/g entry or 4k
9234 	 *  6 = 2 s/g entry or 8k
9235 	 *  8 = 4 s/g entry or 16k
9236 	 * 10 = 6 s/g entry or 24k
9237 	 */
9238 
9239 	/* If the controller supports either ioaccel method then
9240 	 * we can also use the RAID stack submit path that does not
9241 	 * perform the superfluous readl() after each command submission.
9242 	 */
9243 	if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
9244 		access = SA5_performant_access_no_read;
9245 
9246 	/* Controller spec: zero out this buffer. */
9247 	for (i = 0; i < h->nreply_queues; i++)
9248 		memset(h->reply_queue[i].head, 0, h->reply_queue_size);
9249 
9250 	bft[7] = SG_ENTRIES_IN_CMD + 4;
9251 	calc_bucket_map(bft, ARRAY_SIZE(bft),
9252 				SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
9253 	for (i = 0; i < 8; i++)
9254 		writel(bft[i], &h->transtable->BlockFetch[i]);
9255 
9256 	/* size of controller ring buffer */
9257 	writel(h->max_commands, &h->transtable->RepQSize);
9258 	writel(h->nreply_queues, &h->transtable->RepQCount);
9259 	writel(0, &h->transtable->RepQCtrAddrLow32);
9260 	writel(0, &h->transtable->RepQCtrAddrHigh32);
9261 
9262 	for (i = 0; i < h->nreply_queues; i++) {
9263 		writel(0, &h->transtable->RepQAddr[i].upper);
9264 		writel(h->reply_queue[i].busaddr,
9265 			&h->transtable->RepQAddr[i].lower);
9266 	}
9267 
9268 	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
9269 	writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
9270 	/*
9271 	 * enable outbound interrupt coalescing in accelerator mode;
9272 	 */
9273 	if (trans_support & CFGTBL_Trans_io_accel1) {
9274 		access = SA5_ioaccel_mode1_access;
9275 		writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
9276 		writel(4, &h->cfgtable->HostWrite.CoalIntCount);
9277 	} else
9278 		if (trans_support & CFGTBL_Trans_io_accel2)
9279 			access = SA5_ioaccel_mode2_access;
9280 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9281 	if (hpsa_wait_for_mode_change_ack(h)) {
9282 		dev_err(&h->pdev->dev,
9283 			"performant mode problem - doorbell timeout\n");
9284 		return -ENODEV;
9285 	}
9286 	register_value = readl(&(h->cfgtable->TransportActive));
9287 	if (!(register_value & CFGTBL_Trans_Performant)) {
9288 		dev_err(&h->pdev->dev,
9289 			"performant mode problem - transport not active\n");
9290 		return -ENODEV;
9291 	}
9292 	/* Change the access methods to the performant access methods */
9293 	h->access = access;
9294 	h->transMethod = transMethod;
9295 
9296 	if (!((trans_support & CFGTBL_Trans_io_accel1) ||
9297 		(trans_support & CFGTBL_Trans_io_accel2)))
9298 		return 0;
9299 
9300 	if (trans_support & CFGTBL_Trans_io_accel1) {
9301 		/* Set up I/O accelerator mode */
9302 		for (i = 0; i < h->nreply_queues; i++) {
9303 			writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
9304 			h->reply_queue[i].current_entry =
9305 				readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
9306 		}
9307 		bft[7] = h->ioaccel_maxsg + 8;
9308 		calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
9309 				h->ioaccel1_blockFetchTable);
9310 
9311 		/* initialize all reply queue entries to unused */
9312 		for (i = 0; i < h->nreply_queues; i++)
9313 			memset(h->reply_queue[i].head,
9314 				(u8) IOACCEL_MODE1_REPLY_UNUSED,
9315 				h->reply_queue_size);
9316 
9317 		/* set all the constant fields in the accelerator command
9318 		 * frames once at init time to save CPU cycles later.
9319 		 */
9320 		for (i = 0; i < h->nr_cmds; i++) {
9321 			struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
9322 
9323 			cp->function = IOACCEL1_FUNCTION_SCSIIO;
9324 			cp->err_info = (u32) (h->errinfo_pool_dhandle +
9325 					(i * sizeof(struct ErrorInfo)));
9326 			cp->err_info_len = sizeof(struct ErrorInfo);
9327 			cp->sgl_offset = IOACCEL1_SGLOFFSET;
9328 			cp->host_context_flags =
9329 				cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
9330 			cp->timeout_sec = 0;
9331 			cp->ReplyQueue = 0;
9332 			cp->tag =
9333 				cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
9334 			cp->host_addr =
9335 				cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
9336 					(i * sizeof(struct io_accel1_cmd)));
9337 		}
9338 	} else if (trans_support & CFGTBL_Trans_io_accel2) {
9339 		u64 cfg_offset, cfg_base_addr_index;
9340 		u32 bft2_offset, cfg_base_addr;
9341 		int rc;
9342 
9343 		rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
9344 			&cfg_base_addr_index, &cfg_offset);
9345 		BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
9346 		bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
9347 		calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
9348 				4, h->ioaccel2_blockFetchTable);
9349 		bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
9350 		BUILD_BUG_ON(offsetof(struct CfgTable,
9351 				io_accel_request_size_offset) != 0xb8);
9352 		h->ioaccel2_bft2_regs =
9353 			remap_pci_mem(pci_resource_start(h->pdev,
9354 					cfg_base_addr_index) +
9355 					cfg_offset + bft2_offset,
9356 					ARRAY_SIZE(bft2) *
9357 					sizeof(*h->ioaccel2_bft2_regs));
9358 		for (i = 0; i < ARRAY_SIZE(bft2); i++)
9359 			writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
9360 	}
9361 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
9362 	if (hpsa_wait_for_mode_change_ack(h)) {
9363 		dev_err(&h->pdev->dev,
9364 			"performant mode problem - enabling ioaccel mode\n");
9365 		return -ENODEV;
9366 	}
9367 	return 0;
9368 }
9369 
9370 /* Free ioaccel1 mode command blocks and block fetch table */
9371 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9372 {
9373 	if (h->ioaccel_cmd_pool) {
9374 		pci_free_consistent(h->pdev,
9375 			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9376 			h->ioaccel_cmd_pool,
9377 			h->ioaccel_cmd_pool_dhandle);
9378 		h->ioaccel_cmd_pool = NULL;
9379 		h->ioaccel_cmd_pool_dhandle = 0;
9380 	}
9381 	kfree(h->ioaccel1_blockFetchTable);
9382 	h->ioaccel1_blockFetchTable = NULL;
9383 }
9384 
9385 /* Allocate ioaccel1 mode command blocks and block fetch table */
9386 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
9387 {
9388 	h->ioaccel_maxsg =
9389 		readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9390 	if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
9391 		h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
9392 
9393 	/* Command structures must be aligned on a 128-byte boundary
9394 	 * because the 7 lower bits of the address are used by the
9395 	 * hardware.
9396 	 */
9397 	BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
9398 			IOACCEL1_COMMANDLIST_ALIGNMENT);
9399 	h->ioaccel_cmd_pool =
9400 		pci_alloc_consistent(h->pdev,
9401 			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
9402 			&(h->ioaccel_cmd_pool_dhandle));
9403 
9404 	h->ioaccel1_blockFetchTable =
9405 		kmalloc(((h->ioaccel_maxsg + 1) *
9406 				sizeof(u32)), GFP_KERNEL);
9407 
9408 	if ((h->ioaccel_cmd_pool == NULL) ||
9409 		(h->ioaccel1_blockFetchTable == NULL))
9410 		goto clean_up;
9411 
9412 	memset(h->ioaccel_cmd_pool, 0,
9413 		h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
9414 	return 0;
9415 
9416 clean_up:
9417 	hpsa_free_ioaccel1_cmd_and_bft(h);
9418 	return -ENOMEM;
9419 }
9420 
9421 /* Free ioaccel2 mode command blocks and block fetch table */
9422 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9423 {
9424 	hpsa_free_ioaccel2_sg_chain_blocks(h);
9425 
9426 	if (h->ioaccel2_cmd_pool) {
9427 		pci_free_consistent(h->pdev,
9428 			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9429 			h->ioaccel2_cmd_pool,
9430 			h->ioaccel2_cmd_pool_dhandle);
9431 		h->ioaccel2_cmd_pool = NULL;
9432 		h->ioaccel2_cmd_pool_dhandle = 0;
9433 	}
9434 	kfree(h->ioaccel2_blockFetchTable);
9435 	h->ioaccel2_blockFetchTable = NULL;
9436 }
9437 
9438 /* Allocate ioaccel2 mode command blocks and block fetch table */
9439 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
9440 {
9441 	int rc;
9442 
9443 	/* Allocate ioaccel2 mode command blocks and block fetch table */
9444 
9445 	h->ioaccel_maxsg =
9446 		readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
9447 	if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
9448 		h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
9449 
9450 	BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
9451 			IOACCEL2_COMMANDLIST_ALIGNMENT);
9452 	h->ioaccel2_cmd_pool =
9453 		pci_alloc_consistent(h->pdev,
9454 			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
9455 			&(h->ioaccel2_cmd_pool_dhandle));
9456 
9457 	h->ioaccel2_blockFetchTable =
9458 		kmalloc(((h->ioaccel_maxsg + 1) *
9459 				sizeof(u32)), GFP_KERNEL);
9460 
9461 	if ((h->ioaccel2_cmd_pool == NULL) ||
9462 		(h->ioaccel2_blockFetchTable == NULL)) {
9463 		rc = -ENOMEM;
9464 		goto clean_up;
9465 	}
9466 
9467 	rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
9468 	if (rc)
9469 		goto clean_up;
9470 
9471 	memset(h->ioaccel2_cmd_pool, 0,
9472 		h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
9473 	return 0;
9474 
9475 clean_up:
9476 	hpsa_free_ioaccel2_cmd_and_bft(h);
9477 	return rc;
9478 }
9479 
9480 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9481 static void hpsa_free_performant_mode(struct ctlr_info *h)
9482 {
9483 	kfree(h->blockFetchTable);
9484 	h->blockFetchTable = NULL;
9485 	hpsa_free_reply_queues(h);
9486 	hpsa_free_ioaccel1_cmd_and_bft(h);
9487 	hpsa_free_ioaccel2_cmd_and_bft(h);
9488 }
9489 
9490 /* return -ENODEV on error, 0 on success (or no action)
9491  * allocates numerous items that must be freed later
9492  */
9493 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
9494 {
9495 	u32 trans_support;
9496 	unsigned long transMethod = CFGTBL_Trans_Performant |
9497 					CFGTBL_Trans_use_short_tags;
9498 	int i, rc;
9499 
9500 	if (hpsa_simple_mode)
9501 		return 0;
9502 
9503 	trans_support = readl(&(h->cfgtable->TransportSupport));
9504 	if (!(trans_support & PERFORMANT_MODE))
9505 		return 0;
9506 
9507 	/* Check for I/O accelerator mode support */
9508 	if (trans_support & CFGTBL_Trans_io_accel1) {
9509 		transMethod |= CFGTBL_Trans_io_accel1 |
9510 				CFGTBL_Trans_enable_directed_msix;
9511 		rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
9512 		if (rc)
9513 			return rc;
9514 	} else if (trans_support & CFGTBL_Trans_io_accel2) {
9515 		transMethod |= CFGTBL_Trans_io_accel2 |
9516 				CFGTBL_Trans_enable_directed_msix;
9517 		rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
9518 		if (rc)
9519 			return rc;
9520 	}
9521 
9522 	h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
9523 	hpsa_get_max_perf_mode_cmds(h);
9524 	/* Performant mode ring buffer and supporting data structures */
9525 	h->reply_queue_size = h->max_commands * sizeof(u64);
9526 
9527 	for (i = 0; i < h->nreply_queues; i++) {
9528 		h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
9529 						h->reply_queue_size,
9530 						&(h->reply_queue[i].busaddr));
9531 		if (!h->reply_queue[i].head) {
9532 			rc = -ENOMEM;
9533 			goto clean1;	/* rq, ioaccel */
9534 		}
9535 		h->reply_queue[i].size = h->max_commands;
9536 		h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
9537 		h->reply_queue[i].current_entry = 0;
9538 	}
9539 
9540 	/* Need a block fetch table for performant mode */
9541 	h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
9542 				sizeof(u32)), GFP_KERNEL);
9543 	if (!h->blockFetchTable) {
9544 		rc = -ENOMEM;
9545 		goto clean1;	/* rq, ioaccel */
9546 	}
9547 
9548 	rc = hpsa_enter_performant_mode(h, trans_support);
9549 	if (rc)
9550 		goto clean2;	/* bft, rq, ioaccel */
9551 	return 0;
9552 
9553 clean2:	/* bft, rq, ioaccel */
9554 	kfree(h->blockFetchTable);
9555 	h->blockFetchTable = NULL;
9556 clean1:	/* rq, ioaccel */
9557 	hpsa_free_reply_queues(h);
9558 	hpsa_free_ioaccel1_cmd_and_bft(h);
9559 	hpsa_free_ioaccel2_cmd_and_bft(h);
9560 	return rc;
9561 }
9562 
9563 static int is_accelerated_cmd(struct CommandList *c)
9564 {
9565 	return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
9566 }
9567 
9568 static void hpsa_drain_accel_commands(struct ctlr_info *h)
9569 {
9570 	struct CommandList *c = NULL;
9571 	int i, accel_cmds_out;
9572 	int refcount;
9573 
9574 	do { /* wait for all outstanding ioaccel commands to drain out */
9575 		accel_cmds_out = 0;
9576 		for (i = 0; i < h->nr_cmds; i++) {
9577 			c = h->cmd_pool + i;
9578 			refcount = atomic_inc_return(&c->refcount);
9579 			if (refcount > 1) /* Command is allocated */
9580 				accel_cmds_out += is_accelerated_cmd(c);
9581 			cmd_free(h, c);
9582 		}
9583 		if (accel_cmds_out <= 0)
9584 			break;
9585 		msleep(100);
9586 	} while (1);
9587 }
9588 
9589 static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
9590 				struct hpsa_sas_port *hpsa_sas_port)
9591 {
9592 	struct hpsa_sas_phy *hpsa_sas_phy;
9593 	struct sas_phy *phy;
9594 
9595 	hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
9596 	if (!hpsa_sas_phy)
9597 		return NULL;
9598 
9599 	phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
9600 		hpsa_sas_port->next_phy_index);
9601 	if (!phy) {
9602 		kfree(hpsa_sas_phy);
9603 		return NULL;
9604 	}
9605 
9606 	hpsa_sas_port->next_phy_index++;
9607 	hpsa_sas_phy->phy = phy;
9608 	hpsa_sas_phy->parent_port = hpsa_sas_port;
9609 
9610 	return hpsa_sas_phy;
9611 }
9612 
9613 static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9614 {
9615 	struct sas_phy *phy = hpsa_sas_phy->phy;
9616 
9617 	sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
9618 	sas_phy_free(phy);
9619 	if (hpsa_sas_phy->added_to_port)
9620 		list_del(&hpsa_sas_phy->phy_list_entry);
9621 	kfree(hpsa_sas_phy);
9622 }
9623 
9624 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
9625 {
9626 	int rc;
9627 	struct hpsa_sas_port *hpsa_sas_port;
9628 	struct sas_phy *phy;
9629 	struct sas_identify *identify;
9630 
9631 	hpsa_sas_port = hpsa_sas_phy->parent_port;
9632 	phy = hpsa_sas_phy->phy;
9633 
9634 	identify = &phy->identify;
9635 	memset(identify, 0, sizeof(*identify));
9636 	identify->sas_address = hpsa_sas_port->sas_address;
9637 	identify->device_type = SAS_END_DEVICE;
9638 	identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9639 	identify->target_port_protocols = SAS_PROTOCOL_STP;
9640 	phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9641 	phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
9642 	phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
9643 	phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
9644 	phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
9645 
9646 	rc = sas_phy_add(hpsa_sas_phy->phy);
9647 	if (rc)
9648 		return rc;
9649 
9650 	sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
9651 	list_add_tail(&hpsa_sas_phy->phy_list_entry,
9652 			&hpsa_sas_port->phy_list_head);
9653 	hpsa_sas_phy->added_to_port = true;
9654 
9655 	return 0;
9656 }
9657 
9658 static int
9659 	hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
9660 				struct sas_rphy *rphy)
9661 {
9662 	struct sas_identify *identify;
9663 
9664 	identify = &rphy->identify;
9665 	identify->sas_address = hpsa_sas_port->sas_address;
9666 	identify->initiator_port_protocols = SAS_PROTOCOL_STP;
9667 	identify->target_port_protocols = SAS_PROTOCOL_STP;
9668 
9669 	return sas_rphy_add(rphy);
9670 }
9671 
9672 static struct hpsa_sas_port
9673 	*hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
9674 				u64 sas_address)
9675 {
9676 	int rc;
9677 	struct hpsa_sas_port *hpsa_sas_port;
9678 	struct sas_port *port;
9679 
9680 	hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
9681 	if (!hpsa_sas_port)
9682 		return NULL;
9683 
9684 	INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
9685 	hpsa_sas_port->parent_node = hpsa_sas_node;
9686 
9687 	port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
9688 	if (!port)
9689 		goto free_hpsa_port;
9690 
9691 	rc = sas_port_add(port);
9692 	if (rc)
9693 		goto free_sas_port;
9694 
9695 	hpsa_sas_port->port = port;
9696 	hpsa_sas_port->sas_address = sas_address;
9697 	list_add_tail(&hpsa_sas_port->port_list_entry,
9698 			&hpsa_sas_node->port_list_head);
9699 
9700 	return hpsa_sas_port;
9701 
9702 free_sas_port:
9703 	sas_port_free(port);
9704 free_hpsa_port:
9705 	kfree(hpsa_sas_port);
9706 
9707 	return NULL;
9708 }
9709 
9710 static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
9711 {
9712 	struct hpsa_sas_phy *hpsa_sas_phy;
9713 	struct hpsa_sas_phy *next;
9714 
9715 	list_for_each_entry_safe(hpsa_sas_phy, next,
9716 			&hpsa_sas_port->phy_list_head, phy_list_entry)
9717 		hpsa_free_sas_phy(hpsa_sas_phy);
9718 
9719 	sas_port_delete(hpsa_sas_port->port);
9720 	list_del(&hpsa_sas_port->port_list_entry);
9721 	kfree(hpsa_sas_port);
9722 }
9723 
9724 static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
9725 {
9726 	struct hpsa_sas_node *hpsa_sas_node;
9727 
9728 	hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
9729 	if (hpsa_sas_node) {
9730 		hpsa_sas_node->parent_dev = parent_dev;
9731 		INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
9732 	}
9733 
9734 	return hpsa_sas_node;
9735 }
9736 
9737 static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
9738 {
9739 	struct hpsa_sas_port *hpsa_sas_port;
9740 	struct hpsa_sas_port *next;
9741 
9742 	if (!hpsa_sas_node)
9743 		return;
9744 
9745 	list_for_each_entry_safe(hpsa_sas_port, next,
9746 			&hpsa_sas_node->port_list_head, port_list_entry)
9747 		hpsa_free_sas_port(hpsa_sas_port);
9748 
9749 	kfree(hpsa_sas_node);
9750 }
9751 
9752 static struct hpsa_scsi_dev_t
9753 	*hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
9754 					struct sas_rphy *rphy)
9755 {
9756 	int i;
9757 	struct hpsa_scsi_dev_t *device;
9758 
9759 	for (i = 0; i < h->ndevices; i++) {
9760 		device = h->dev[i];
9761 		if (!device->sas_port)
9762 			continue;
9763 		if (device->sas_port->rphy == rphy)
9764 			return device;
9765 	}
9766 
9767 	return NULL;
9768 }
9769 
9770 static int hpsa_add_sas_host(struct ctlr_info *h)
9771 {
9772 	int rc;
9773 	struct device *parent_dev;
9774 	struct hpsa_sas_node *hpsa_sas_node;
9775 	struct hpsa_sas_port *hpsa_sas_port;
9776 	struct hpsa_sas_phy *hpsa_sas_phy;
9777 
9778 	parent_dev = &h->scsi_host->shost_gendev;
9779 
9780 	hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
9781 	if (!hpsa_sas_node)
9782 		return -ENOMEM;
9783 
9784 	hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
9785 	if (!hpsa_sas_port) {
9786 		rc = -ENODEV;
9787 		goto free_sas_node;
9788 	}
9789 
9790 	hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
9791 	if (!hpsa_sas_phy) {
9792 		rc = -ENODEV;
9793 		goto free_sas_port;
9794 	}
9795 
9796 	rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
9797 	if (rc)
9798 		goto free_sas_phy;
9799 
9800 	h->sas_host = hpsa_sas_node;
9801 
9802 	return 0;
9803 
9804 free_sas_phy:
9805 	hpsa_free_sas_phy(hpsa_sas_phy);
9806 free_sas_port:
9807 	hpsa_free_sas_port(hpsa_sas_port);
9808 free_sas_node:
9809 	hpsa_free_sas_node(hpsa_sas_node);
9810 
9811 	return rc;
9812 }
9813 
9814 static void hpsa_delete_sas_host(struct ctlr_info *h)
9815 {
9816 	hpsa_free_sas_node(h->sas_host);
9817 }
9818 
9819 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
9820 				struct hpsa_scsi_dev_t *device)
9821 {
9822 	int rc;
9823 	struct hpsa_sas_port *hpsa_sas_port;
9824 	struct sas_rphy *rphy;
9825 
9826 	hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
9827 	if (!hpsa_sas_port)
9828 		return -ENOMEM;
9829 
9830 	rphy = sas_end_device_alloc(hpsa_sas_port->port);
9831 	if (!rphy) {
9832 		rc = -ENODEV;
9833 		goto free_sas_port;
9834 	}
9835 
9836 	hpsa_sas_port->rphy = rphy;
9837 	device->sas_port = hpsa_sas_port;
9838 
9839 	rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
9840 	if (rc)
9841 		goto free_sas_port;
9842 
9843 	return 0;
9844 
9845 free_sas_port:
9846 	hpsa_free_sas_port(hpsa_sas_port);
9847 	device->sas_port = NULL;
9848 
9849 	return rc;
9850 }
9851 
9852 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
9853 {
9854 	if (device->sas_port) {
9855 		hpsa_free_sas_port(device->sas_port);
9856 		device->sas_port = NULL;
9857 	}
9858 }
9859 
9860 static int
9861 hpsa_sas_get_linkerrors(struct sas_phy *phy)
9862 {
9863 	return 0;
9864 }
9865 
9866 static int
9867 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9868 {
9869 	*identifier = 0;
9870 	return 0;
9871 }
9872 
9873 static int
9874 hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
9875 {
9876 	return -ENXIO;
9877 }
9878 
9879 static int
9880 hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
9881 {
9882 	return 0;
9883 }
9884 
9885 static int
9886 hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
9887 {
9888 	return 0;
9889 }
9890 
9891 static int
9892 hpsa_sas_phy_setup(struct sas_phy *phy)
9893 {
9894 	return 0;
9895 }
9896 
9897 static void
9898 hpsa_sas_phy_release(struct sas_phy *phy)
9899 {
9900 }
9901 
9902 static int
9903 hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
9904 {
9905 	return -EINVAL;
9906 }
9907 
9908 /* SMP = Serial Management Protocol */
9909 static int
9910 hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
9911 struct request *req)
9912 {
9913 	return -EINVAL;
9914 }
9915 
9916 static struct sas_function_template hpsa_sas_transport_functions = {
9917 	.get_linkerrors = hpsa_sas_get_linkerrors,
9918 	.get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
9919 	.get_bay_identifier = hpsa_sas_get_bay_identifier,
9920 	.phy_reset = hpsa_sas_phy_reset,
9921 	.phy_enable = hpsa_sas_phy_enable,
9922 	.phy_setup = hpsa_sas_phy_setup,
9923 	.phy_release = hpsa_sas_phy_release,
9924 	.set_phy_speed = hpsa_sas_phy_speed,
9925 	.smp_handler = hpsa_sas_smp_handler,
9926 };
9927 
9928 /*
9929  *  This is it.  Register the PCI driver information for the cards we control
9930  *  the OS will call our registered routines when it finds one of our cards.
9931  */
9932 static int __init hpsa_init(void)
9933 {
9934 	int rc;
9935 
9936 	hpsa_sas_transport_template =
9937 		sas_attach_transport(&hpsa_sas_transport_functions);
9938 	if (!hpsa_sas_transport_template)
9939 		return -ENODEV;
9940 
9941 	rc = pci_register_driver(&hpsa_pci_driver);
9942 
9943 	if (rc)
9944 		sas_release_transport(hpsa_sas_transport_template);
9945 
9946 	return rc;
9947 }
9948 
9949 static void __exit hpsa_cleanup(void)
9950 {
9951 	pci_unregister_driver(&hpsa_pci_driver);
9952 	sas_release_transport(hpsa_sas_transport_template);
9953 }
9954 
9955 static void __attribute__((unused)) verify_offsets(void)
9956 {
9957 #define VERIFY_OFFSET(member, offset) \
9958 	BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9959 
9960 	VERIFY_OFFSET(structure_size, 0);
9961 	VERIFY_OFFSET(volume_blk_size, 4);
9962 	VERIFY_OFFSET(volume_blk_cnt, 8);
9963 	VERIFY_OFFSET(phys_blk_shift, 16);
9964 	VERIFY_OFFSET(parity_rotation_shift, 17);
9965 	VERIFY_OFFSET(strip_size, 18);
9966 	VERIFY_OFFSET(disk_starting_blk, 20);
9967 	VERIFY_OFFSET(disk_blk_cnt, 28);
9968 	VERIFY_OFFSET(data_disks_per_row, 36);
9969 	VERIFY_OFFSET(metadata_disks_per_row, 38);
9970 	VERIFY_OFFSET(row_cnt, 40);
9971 	VERIFY_OFFSET(layout_map_count, 42);
9972 	VERIFY_OFFSET(flags, 44);
9973 	VERIFY_OFFSET(dekindex, 46);
9974 	/* VERIFY_OFFSET(reserved, 48 */
9975 	VERIFY_OFFSET(data, 64);
9976 
9977 #undef VERIFY_OFFSET
9978 
9979 #define VERIFY_OFFSET(member, offset) \
9980 	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9981 
9982 	VERIFY_OFFSET(IU_type, 0);
9983 	VERIFY_OFFSET(direction, 1);
9984 	VERIFY_OFFSET(reply_queue, 2);
9985 	/* VERIFY_OFFSET(reserved1, 3);  */
9986 	VERIFY_OFFSET(scsi_nexus, 4);
9987 	VERIFY_OFFSET(Tag, 8);
9988 	VERIFY_OFFSET(cdb, 16);
9989 	VERIFY_OFFSET(cciss_lun, 32);
9990 	VERIFY_OFFSET(data_len, 40);
9991 	VERIFY_OFFSET(cmd_priority_task_attr, 44);
9992 	VERIFY_OFFSET(sg_count, 45);
9993 	/* VERIFY_OFFSET(reserved3 */
9994 	VERIFY_OFFSET(err_ptr, 48);
9995 	VERIFY_OFFSET(err_len, 56);
9996 	/* VERIFY_OFFSET(reserved4  */
9997 	VERIFY_OFFSET(sg, 64);
9998 
9999 #undef VERIFY_OFFSET
10000 
10001 #define VERIFY_OFFSET(member, offset) \
10002 	BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
10003 
10004 	VERIFY_OFFSET(dev_handle, 0x00);
10005 	VERIFY_OFFSET(reserved1, 0x02);
10006 	VERIFY_OFFSET(function, 0x03);
10007 	VERIFY_OFFSET(reserved2, 0x04);
10008 	VERIFY_OFFSET(err_info, 0x0C);
10009 	VERIFY_OFFSET(reserved3, 0x10);
10010 	VERIFY_OFFSET(err_info_len, 0x12);
10011 	VERIFY_OFFSET(reserved4, 0x13);
10012 	VERIFY_OFFSET(sgl_offset, 0x14);
10013 	VERIFY_OFFSET(reserved5, 0x15);
10014 	VERIFY_OFFSET(transfer_len, 0x1C);
10015 	VERIFY_OFFSET(reserved6, 0x20);
10016 	VERIFY_OFFSET(io_flags, 0x24);
10017 	VERIFY_OFFSET(reserved7, 0x26);
10018 	VERIFY_OFFSET(LUN, 0x34);
10019 	VERIFY_OFFSET(control, 0x3C);
10020 	VERIFY_OFFSET(CDB, 0x40);
10021 	VERIFY_OFFSET(reserved8, 0x50);
10022 	VERIFY_OFFSET(host_context_flags, 0x60);
10023 	VERIFY_OFFSET(timeout_sec, 0x62);
10024 	VERIFY_OFFSET(ReplyQueue, 0x64);
10025 	VERIFY_OFFSET(reserved9, 0x65);
10026 	VERIFY_OFFSET(tag, 0x68);
10027 	VERIFY_OFFSET(host_addr, 0x70);
10028 	VERIFY_OFFSET(CISS_LUN, 0x78);
10029 	VERIFY_OFFSET(SG, 0x78 + 8);
10030 #undef VERIFY_OFFSET
10031 }
10032 
10033 module_init(hpsa_init);
10034 module_exit(hpsa_cleanup);
10035