xref: /openbmc/linux/drivers/scsi/hpsa.c (revision 7acf570c)
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4  *
5  *    This program is free software; you can redistribute it and/or modify
6  *    it under the terms of the GNU General Public License as published by
7  *    the Free Software Foundation; version 2 of the License.
8  *
9  *    This program is distributed in the hope that it will be useful,
10  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
13  *
14  *    You should have received a copy of the GNU General Public License
15  *    along with this program; if not, write to the Free Software
16  *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  *
18  *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
19  *
20  */
21 
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/fs.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
37 #include <linux/io.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu-defs.h>
52 #include <linux/percpu.h>
53 #include <asm/unaligned.h>
54 #include <asm/div64.h>
55 #include "hpsa_cmd.h"
56 #include "hpsa.h"
57 
58 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
59 #define HPSA_DRIVER_VERSION "3.4.4-1"
60 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
61 #define HPSA "hpsa"
62 
63 /* How long to wait (in milliseconds) for board to go into simple mode */
64 #define MAX_CONFIG_WAIT 30000
65 #define MAX_IOCTL_CONFIG_WAIT 1000
66 
67 /*define how many times we will try a command because of bus resets */
68 #define MAX_CMD_RETRIES 3
69 
70 /* Embedded module documentation macros - see modules.h */
71 MODULE_AUTHOR("Hewlett-Packard Company");
72 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
73 	HPSA_DRIVER_VERSION);
74 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
75 MODULE_VERSION(HPSA_DRIVER_VERSION);
76 MODULE_LICENSE("GPL");
77 
78 static int hpsa_allow_any;
79 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
80 MODULE_PARM_DESC(hpsa_allow_any,
81 		"Allow hpsa driver to access unknown HP Smart Array hardware");
82 static int hpsa_simple_mode;
83 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
84 MODULE_PARM_DESC(hpsa_simple_mode,
85 	"Use 'simple mode' rather than 'performant mode'");
86 
87 /* define the PCI info for the cards we can control */
88 static const struct pci_device_id hpsa_pci_device_id[] = {
89 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
90 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
91 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
92 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
93 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
94 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
95 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
96 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
97 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
98 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
99 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
100 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
101 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
102 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
103 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
104 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
105 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
106 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
107 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
108 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
109 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
110 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
111 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
112 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
113 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
114 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
115 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
116 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
117 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
118 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
119 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
120 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
121 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
122 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
123 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
124 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
125 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
126 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
127 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
128 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
129 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
130 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
131 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
132 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
133 	{PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
134 	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,
135 		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
136 	{0,}
137 };
138 
139 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
140 
141 /*  board_id = Subsystem Device ID & Vendor ID
142  *  product = Marketing Name for the board
143  *  access = Address of the struct of function pointers
144  */
145 static struct board_type products[] = {
146 	{0x3241103C, "Smart Array P212", &SA5_access},
147 	{0x3243103C, "Smart Array P410", &SA5_access},
148 	{0x3245103C, "Smart Array P410i", &SA5_access},
149 	{0x3247103C, "Smart Array P411", &SA5_access},
150 	{0x3249103C, "Smart Array P812", &SA5_access},
151 	{0x324A103C, "Smart Array P712m", &SA5_access},
152 	{0x324B103C, "Smart Array P711m", &SA5_access},
153 	{0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
154 	{0x3350103C, "Smart Array P222", &SA5_access},
155 	{0x3351103C, "Smart Array P420", &SA5_access},
156 	{0x3352103C, "Smart Array P421", &SA5_access},
157 	{0x3353103C, "Smart Array P822", &SA5_access},
158 	{0x3354103C, "Smart Array P420i", &SA5_access},
159 	{0x3355103C, "Smart Array P220i", &SA5_access},
160 	{0x3356103C, "Smart Array P721m", &SA5_access},
161 	{0x1921103C, "Smart Array P830i", &SA5_access},
162 	{0x1922103C, "Smart Array P430", &SA5_access},
163 	{0x1923103C, "Smart Array P431", &SA5_access},
164 	{0x1924103C, "Smart Array P830", &SA5_access},
165 	{0x1926103C, "Smart Array P731m", &SA5_access},
166 	{0x1928103C, "Smart Array P230i", &SA5_access},
167 	{0x1929103C, "Smart Array P530", &SA5_access},
168 	{0x21BD103C, "Smart Array", &SA5_access},
169 	{0x21BE103C, "Smart Array", &SA5_access},
170 	{0x21BF103C, "Smart Array", &SA5_access},
171 	{0x21C0103C, "Smart Array", &SA5_access},
172 	{0x21C1103C, "Smart Array", &SA5_access},
173 	{0x21C2103C, "Smart Array", &SA5_access},
174 	{0x21C3103C, "Smart Array", &SA5_access},
175 	{0x21C4103C, "Smart Array", &SA5_access},
176 	{0x21C5103C, "Smart Array", &SA5_access},
177 	{0x21C6103C, "Smart Array", &SA5_access},
178 	{0x21C7103C, "Smart Array", &SA5_access},
179 	{0x21C8103C, "Smart Array", &SA5_access},
180 	{0x21C9103C, "Smart Array", &SA5_access},
181 	{0x21CA103C, "Smart Array", &SA5_access},
182 	{0x21CB103C, "Smart Array", &SA5_access},
183 	{0x21CC103C, "Smart Array", &SA5_access},
184 	{0x21CD103C, "Smart Array", &SA5_access},
185 	{0x21CE103C, "Smart Array", &SA5_access},
186 	{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
187 	{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
188 	{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
189 	{0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
190 	{0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
191 	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
192 };
193 
194 static int number_of_controllers;
195 
196 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
197 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
198 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
199 
200 #ifdef CONFIG_COMPAT
201 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
202 	void __user *arg);
203 #endif
204 
205 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
206 static struct CommandList *cmd_alloc(struct ctlr_info *h);
207 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
208 	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
209 	int cmd_type);
210 static void hpsa_free_cmd_pool(struct ctlr_info *h);
211 #define VPD_PAGE (1 << 8)
212 
213 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
214 static void hpsa_scan_start(struct Scsi_Host *);
215 static int hpsa_scan_finished(struct Scsi_Host *sh,
216 	unsigned long elapsed_time);
217 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
218 
219 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
220 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
221 static int hpsa_slave_alloc(struct scsi_device *sdev);
222 static void hpsa_slave_destroy(struct scsi_device *sdev);
223 
224 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
225 static int check_for_unit_attention(struct ctlr_info *h,
226 	struct CommandList *c);
227 static void check_ioctl_unit_attention(struct ctlr_info *h,
228 	struct CommandList *c);
229 /* performant mode helper functions */
230 static void calc_bucket_map(int *bucket, int num_buckets,
231 	int nsgs, int min_blocks, u32 *bucket_map);
232 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
233 static inline u32 next_command(struct ctlr_info *h, u8 q);
234 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
235 			       u32 *cfg_base_addr, u64 *cfg_base_addr_index,
236 			       u64 *cfg_offset);
237 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
238 				    unsigned long *memory_bar);
239 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
240 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
241 				     int wait_for_ready);
242 static inline void finish_cmd(struct CommandList *c);
243 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
244 #define BOARD_NOT_READY 0
245 #define BOARD_READY 1
246 static void hpsa_drain_accel_commands(struct ctlr_info *h);
247 static void hpsa_flush_cache(struct ctlr_info *h);
248 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
249 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
250 	u8 *scsi3addr);
251 
252 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
253 {
254 	unsigned long *priv = shost_priv(sdev->host);
255 	return (struct ctlr_info *) *priv;
256 }
257 
258 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
259 {
260 	unsigned long *priv = shost_priv(sh);
261 	return (struct ctlr_info *) *priv;
262 }
263 
264 static int check_for_unit_attention(struct ctlr_info *h,
265 	struct CommandList *c)
266 {
267 	if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
268 		return 0;
269 
270 	switch (c->err_info->SenseInfo[12]) {
271 	case STATE_CHANGED:
272 		dev_warn(&h->pdev->dev, HPSA "%d: a state change "
273 			"detected, command retried\n", h->ctlr);
274 		break;
275 	case LUN_FAILED:
276 		dev_warn(&h->pdev->dev,
277 			HPSA "%d: LUN failure detected\n", h->ctlr);
278 		break;
279 	case REPORT_LUNS_CHANGED:
280 		dev_warn(&h->pdev->dev,
281 			HPSA "%d: report LUN data changed\n", h->ctlr);
282 	/*
283 	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
284 	 * target (array) devices.
285 	 */
286 		break;
287 	case POWER_OR_RESET:
288 		dev_warn(&h->pdev->dev, HPSA "%d: a power on "
289 			"or device reset detected\n", h->ctlr);
290 		break;
291 	case UNIT_ATTENTION_CLEARED:
292 		dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
293 		    "cleared by another initiator\n", h->ctlr);
294 		break;
295 	default:
296 		dev_warn(&h->pdev->dev, HPSA "%d: unknown "
297 			"unit attention detected\n", h->ctlr);
298 		break;
299 	}
300 	return 1;
301 }
302 
303 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
304 {
305 	if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
306 		(c->err_info->ScsiStatus != SAM_STAT_BUSY &&
307 		 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
308 		return 0;
309 	dev_warn(&h->pdev->dev, HPSA "device busy");
310 	return 1;
311 }
312 
313 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
314 					 struct device_attribute *attr,
315 					 const char *buf, size_t count)
316 {
317 	int status, len;
318 	struct ctlr_info *h;
319 	struct Scsi_Host *shost = class_to_shost(dev);
320 	char tmpbuf[10];
321 
322 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
323 		return -EACCES;
324 	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
325 	strncpy(tmpbuf, buf, len);
326 	tmpbuf[len] = '\0';
327 	if (sscanf(tmpbuf, "%d", &status) != 1)
328 		return -EINVAL;
329 	h = shost_to_hba(shost);
330 	h->acciopath_status = !!status;
331 	dev_warn(&h->pdev->dev,
332 		"hpsa: HP SSD Smart Path %s via sysfs update.\n",
333 		h->acciopath_status ? "enabled" : "disabled");
334 	return count;
335 }
336 
337 static ssize_t host_store_raid_offload_debug(struct device *dev,
338 					 struct device_attribute *attr,
339 					 const char *buf, size_t count)
340 {
341 	int debug_level, len;
342 	struct ctlr_info *h;
343 	struct Scsi_Host *shost = class_to_shost(dev);
344 	char tmpbuf[10];
345 
346 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
347 		return -EACCES;
348 	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
349 	strncpy(tmpbuf, buf, len);
350 	tmpbuf[len] = '\0';
351 	if (sscanf(tmpbuf, "%d", &debug_level) != 1)
352 		return -EINVAL;
353 	if (debug_level < 0)
354 		debug_level = 0;
355 	h = shost_to_hba(shost);
356 	h->raid_offload_debug = debug_level;
357 	dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
358 		h->raid_offload_debug);
359 	return count;
360 }
361 
362 static ssize_t host_store_rescan(struct device *dev,
363 				 struct device_attribute *attr,
364 				 const char *buf, size_t count)
365 {
366 	struct ctlr_info *h;
367 	struct Scsi_Host *shost = class_to_shost(dev);
368 	h = shost_to_hba(shost);
369 	hpsa_scan_start(h->scsi_host);
370 	return count;
371 }
372 
373 static ssize_t host_show_firmware_revision(struct device *dev,
374 	     struct device_attribute *attr, char *buf)
375 {
376 	struct ctlr_info *h;
377 	struct Scsi_Host *shost = class_to_shost(dev);
378 	unsigned char *fwrev;
379 
380 	h = shost_to_hba(shost);
381 	if (!h->hba_inquiry_data)
382 		return 0;
383 	fwrev = &h->hba_inquiry_data[32];
384 	return snprintf(buf, 20, "%c%c%c%c\n",
385 		fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
386 }
387 
388 static ssize_t host_show_commands_outstanding(struct device *dev,
389 	     struct device_attribute *attr, char *buf)
390 {
391 	struct Scsi_Host *shost = class_to_shost(dev);
392 	struct ctlr_info *h = shost_to_hba(shost);
393 
394 	return snprintf(buf, 20, "%d\n",
395 			atomic_read(&h->commands_outstanding));
396 }
397 
398 static ssize_t host_show_transport_mode(struct device *dev,
399 	struct device_attribute *attr, char *buf)
400 {
401 	struct ctlr_info *h;
402 	struct Scsi_Host *shost = class_to_shost(dev);
403 
404 	h = shost_to_hba(shost);
405 	return snprintf(buf, 20, "%s\n",
406 		h->transMethod & CFGTBL_Trans_Performant ?
407 			"performant" : "simple");
408 }
409 
410 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
411 	struct device_attribute *attr, char *buf)
412 {
413 	struct ctlr_info *h;
414 	struct Scsi_Host *shost = class_to_shost(dev);
415 
416 	h = shost_to_hba(shost);
417 	return snprintf(buf, 30, "HP SSD Smart Path %s\n",
418 		(h->acciopath_status == 1) ?  "enabled" : "disabled");
419 }
420 
421 /* List of controllers which cannot be hard reset on kexec with reset_devices */
422 static u32 unresettable_controller[] = {
423 	0x324a103C, /* Smart Array P712m */
424 	0x324b103C, /* SmartArray P711m */
425 	0x3223103C, /* Smart Array P800 */
426 	0x3234103C, /* Smart Array P400 */
427 	0x3235103C, /* Smart Array P400i */
428 	0x3211103C, /* Smart Array E200i */
429 	0x3212103C, /* Smart Array E200 */
430 	0x3213103C, /* Smart Array E200i */
431 	0x3214103C, /* Smart Array E200i */
432 	0x3215103C, /* Smart Array E200i */
433 	0x3237103C, /* Smart Array E500 */
434 	0x323D103C, /* Smart Array P700m */
435 	0x40800E11, /* Smart Array 5i */
436 	0x409C0E11, /* Smart Array 6400 */
437 	0x409D0E11, /* Smart Array 6400 EM */
438 	0x40700E11, /* Smart Array 5300 */
439 	0x40820E11, /* Smart Array 532 */
440 	0x40830E11, /* Smart Array 5312 */
441 	0x409A0E11, /* Smart Array 641 */
442 	0x409B0E11, /* Smart Array 642 */
443 	0x40910E11, /* Smart Array 6i */
444 };
445 
446 /* List of controllers which cannot even be soft reset */
447 static u32 soft_unresettable_controller[] = {
448 	0x40800E11, /* Smart Array 5i */
449 	0x40700E11, /* Smart Array 5300 */
450 	0x40820E11, /* Smart Array 532 */
451 	0x40830E11, /* Smart Array 5312 */
452 	0x409A0E11, /* Smart Array 641 */
453 	0x409B0E11, /* Smart Array 642 */
454 	0x40910E11, /* Smart Array 6i */
455 	/* Exclude 640x boards.  These are two pci devices in one slot
456 	 * which share a battery backed cache module.  One controls the
457 	 * cache, the other accesses the cache through the one that controls
458 	 * it.  If we reset the one controlling the cache, the other will
459 	 * likely not be happy.  Just forbid resetting this conjoined mess.
460 	 * The 640x isn't really supported by hpsa anyway.
461 	 */
462 	0x409C0E11, /* Smart Array 6400 */
463 	0x409D0E11, /* Smart Array 6400 EM */
464 };
465 
466 static int ctlr_is_hard_resettable(u32 board_id)
467 {
468 	int i;
469 
470 	for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
471 		if (unresettable_controller[i] == board_id)
472 			return 0;
473 	return 1;
474 }
475 
476 static int ctlr_is_soft_resettable(u32 board_id)
477 {
478 	int i;
479 
480 	for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
481 		if (soft_unresettable_controller[i] == board_id)
482 			return 0;
483 	return 1;
484 }
485 
486 static int ctlr_is_resettable(u32 board_id)
487 {
488 	return ctlr_is_hard_resettable(board_id) ||
489 		ctlr_is_soft_resettable(board_id);
490 }
491 
492 static ssize_t host_show_resettable(struct device *dev,
493 	struct device_attribute *attr, char *buf)
494 {
495 	struct ctlr_info *h;
496 	struct Scsi_Host *shost = class_to_shost(dev);
497 
498 	h = shost_to_hba(shost);
499 	return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
500 }
501 
502 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
503 {
504 	return (scsi3addr[3] & 0xC0) == 0x40;
505 }
506 
507 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
508 	"1(+0)ADM", "UNKNOWN"
509 };
510 #define HPSA_RAID_0	0
511 #define HPSA_RAID_4	1
512 #define HPSA_RAID_1	2	/* also used for RAID 10 */
513 #define HPSA_RAID_5	3	/* also used for RAID 50 */
514 #define HPSA_RAID_51	4
515 #define HPSA_RAID_6	5	/* also used for RAID 60 */
516 #define HPSA_RAID_ADM	6	/* also used for RAID 1+0 ADM */
517 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
518 
519 static ssize_t raid_level_show(struct device *dev,
520 	     struct device_attribute *attr, char *buf)
521 {
522 	ssize_t l = 0;
523 	unsigned char rlevel;
524 	struct ctlr_info *h;
525 	struct scsi_device *sdev;
526 	struct hpsa_scsi_dev_t *hdev;
527 	unsigned long flags;
528 
529 	sdev = to_scsi_device(dev);
530 	h = sdev_to_hba(sdev);
531 	spin_lock_irqsave(&h->lock, flags);
532 	hdev = sdev->hostdata;
533 	if (!hdev) {
534 		spin_unlock_irqrestore(&h->lock, flags);
535 		return -ENODEV;
536 	}
537 
538 	/* Is this even a logical drive? */
539 	if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
540 		spin_unlock_irqrestore(&h->lock, flags);
541 		l = snprintf(buf, PAGE_SIZE, "N/A\n");
542 		return l;
543 	}
544 
545 	rlevel = hdev->raid_level;
546 	spin_unlock_irqrestore(&h->lock, flags);
547 	if (rlevel > RAID_UNKNOWN)
548 		rlevel = RAID_UNKNOWN;
549 	l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
550 	return l;
551 }
552 
553 static ssize_t lunid_show(struct device *dev,
554 	     struct device_attribute *attr, char *buf)
555 {
556 	struct ctlr_info *h;
557 	struct scsi_device *sdev;
558 	struct hpsa_scsi_dev_t *hdev;
559 	unsigned long flags;
560 	unsigned char lunid[8];
561 
562 	sdev = to_scsi_device(dev);
563 	h = sdev_to_hba(sdev);
564 	spin_lock_irqsave(&h->lock, flags);
565 	hdev = sdev->hostdata;
566 	if (!hdev) {
567 		spin_unlock_irqrestore(&h->lock, flags);
568 		return -ENODEV;
569 	}
570 	memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
571 	spin_unlock_irqrestore(&h->lock, flags);
572 	return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
573 		lunid[0], lunid[1], lunid[2], lunid[3],
574 		lunid[4], lunid[5], lunid[6], lunid[7]);
575 }
576 
577 static ssize_t unique_id_show(struct device *dev,
578 	     struct device_attribute *attr, char *buf)
579 {
580 	struct ctlr_info *h;
581 	struct scsi_device *sdev;
582 	struct hpsa_scsi_dev_t *hdev;
583 	unsigned long flags;
584 	unsigned char sn[16];
585 
586 	sdev = to_scsi_device(dev);
587 	h = sdev_to_hba(sdev);
588 	spin_lock_irqsave(&h->lock, flags);
589 	hdev = sdev->hostdata;
590 	if (!hdev) {
591 		spin_unlock_irqrestore(&h->lock, flags);
592 		return -ENODEV;
593 	}
594 	memcpy(sn, hdev->device_id, sizeof(sn));
595 	spin_unlock_irqrestore(&h->lock, flags);
596 	return snprintf(buf, 16 * 2 + 2,
597 			"%02X%02X%02X%02X%02X%02X%02X%02X"
598 			"%02X%02X%02X%02X%02X%02X%02X%02X\n",
599 			sn[0], sn[1], sn[2], sn[3],
600 			sn[4], sn[5], sn[6], sn[7],
601 			sn[8], sn[9], sn[10], sn[11],
602 			sn[12], sn[13], sn[14], sn[15]);
603 }
604 
605 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
606 	     struct device_attribute *attr, char *buf)
607 {
608 	struct ctlr_info *h;
609 	struct scsi_device *sdev;
610 	struct hpsa_scsi_dev_t *hdev;
611 	unsigned long flags;
612 	int offload_enabled;
613 
614 	sdev = to_scsi_device(dev);
615 	h = sdev_to_hba(sdev);
616 	spin_lock_irqsave(&h->lock, flags);
617 	hdev = sdev->hostdata;
618 	if (!hdev) {
619 		spin_unlock_irqrestore(&h->lock, flags);
620 		return -ENODEV;
621 	}
622 	offload_enabled = hdev->offload_enabled;
623 	spin_unlock_irqrestore(&h->lock, flags);
624 	return snprintf(buf, 20, "%d\n", offload_enabled);
625 }
626 
627 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
628 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
629 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
630 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
631 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
632 			host_show_hp_ssd_smart_path_enabled, NULL);
633 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
634 		host_show_hp_ssd_smart_path_status,
635 		host_store_hp_ssd_smart_path_status);
636 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
637 			host_store_raid_offload_debug);
638 static DEVICE_ATTR(firmware_revision, S_IRUGO,
639 	host_show_firmware_revision, NULL);
640 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
641 	host_show_commands_outstanding, NULL);
642 static DEVICE_ATTR(transport_mode, S_IRUGO,
643 	host_show_transport_mode, NULL);
644 static DEVICE_ATTR(resettable, S_IRUGO,
645 	host_show_resettable, NULL);
646 
647 static struct device_attribute *hpsa_sdev_attrs[] = {
648 	&dev_attr_raid_level,
649 	&dev_attr_lunid,
650 	&dev_attr_unique_id,
651 	&dev_attr_hp_ssd_smart_path_enabled,
652 	NULL,
653 };
654 
655 static struct device_attribute *hpsa_shost_attrs[] = {
656 	&dev_attr_rescan,
657 	&dev_attr_firmware_revision,
658 	&dev_attr_commands_outstanding,
659 	&dev_attr_transport_mode,
660 	&dev_attr_resettable,
661 	&dev_attr_hp_ssd_smart_path_status,
662 	&dev_attr_raid_offload_debug,
663 	NULL,
664 };
665 
666 static struct scsi_host_template hpsa_driver_template = {
667 	.module			= THIS_MODULE,
668 	.name			= HPSA,
669 	.proc_name		= HPSA,
670 	.queuecommand		= hpsa_scsi_queue_command,
671 	.scan_start		= hpsa_scan_start,
672 	.scan_finished		= hpsa_scan_finished,
673 	.change_queue_depth	= hpsa_change_queue_depth,
674 	.this_id		= -1,
675 	.use_clustering		= ENABLE_CLUSTERING,
676 	.eh_abort_handler	= hpsa_eh_abort_handler,
677 	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
678 	.ioctl			= hpsa_ioctl,
679 	.slave_alloc		= hpsa_slave_alloc,
680 	.slave_destroy		= hpsa_slave_destroy,
681 #ifdef CONFIG_COMPAT
682 	.compat_ioctl		= hpsa_compat_ioctl,
683 #endif
684 	.sdev_attrs = hpsa_sdev_attrs,
685 	.shost_attrs = hpsa_shost_attrs,
686 	.max_sectors = 8192,
687 	.no_write_same = 1,
688 };
689 
690 static inline u32 next_command(struct ctlr_info *h, u8 q)
691 {
692 	u32 a;
693 	struct reply_queue_buffer *rq = &h->reply_queue[q];
694 
695 	if (h->transMethod & CFGTBL_Trans_io_accel1)
696 		return h->access.command_completed(h, q);
697 
698 	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
699 		return h->access.command_completed(h, q);
700 
701 	if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
702 		a = rq->head[rq->current_entry];
703 		rq->current_entry++;
704 		atomic_dec(&h->commands_outstanding);
705 	} else {
706 		a = FIFO_EMPTY;
707 	}
708 	/* Check for wraparound */
709 	if (rq->current_entry == h->max_commands) {
710 		rq->current_entry = 0;
711 		rq->wraparound ^= 1;
712 	}
713 	return a;
714 }
715 
716 /*
717  * There are some special bits in the bus address of the
718  * command that we have to set for the controller to know
719  * how to process the command:
720  *
721  * Normal performant mode:
722  * bit 0: 1 means performant mode, 0 means simple mode.
723  * bits 1-3 = block fetch table entry
724  * bits 4-6 = command type (== 0)
725  *
726  * ioaccel1 mode:
727  * bit 0 = "performant mode" bit.
728  * bits 1-3 = block fetch table entry
729  * bits 4-6 = command type (== 110)
730  * (command type is needed because ioaccel1 mode
731  * commands are submitted through the same register as normal
732  * mode commands, so this is how the controller knows whether
733  * the command is normal mode or ioaccel1 mode.)
734  *
735  * ioaccel2 mode:
736  * bit 0 = "performant mode" bit.
737  * bits 1-4 = block fetch table entry (note extra bit)
738  * bits 4-6 = not needed, because ioaccel2 mode has
739  * a separate special register for submitting commands.
740  */
741 
742 /* set_performant_mode: Modify the tag for cciss performant
743  * set bit 0 for pull model, bits 3-1 for block fetch
744  * register number
745  */
746 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
747 {
748 	if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
749 		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
750 		if (likely(h->msix_vector > 0))
751 			c->Header.ReplyQueue =
752 				raw_smp_processor_id() % h->nreply_queues;
753 	}
754 }
755 
756 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
757 						struct CommandList *c)
758 {
759 	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
760 
761 	/* Tell the controller to post the reply to the queue for this
762 	 * processor.  This seems to give the best I/O throughput.
763 	 */
764 	cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
765 	/* Set the bits in the address sent down to include:
766 	 *  - performant mode bit (bit 0)
767 	 *  - pull count (bits 1-3)
768 	 *  - command type (bits 4-6)
769 	 */
770 	c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
771 					IOACCEL1_BUSADDR_CMDTYPE;
772 }
773 
774 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
775 						struct CommandList *c)
776 {
777 	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
778 
779 	/* Tell the controller to post the reply to the queue for this
780 	 * processor.  This seems to give the best I/O throughput.
781 	 */
782 	cp->reply_queue = smp_processor_id() % h->nreply_queues;
783 	/* Set the bits in the address sent down to include:
784 	 *  - performant mode bit not used in ioaccel mode 2
785 	 *  - pull count (bits 0-3)
786 	 *  - command type isn't needed for ioaccel2
787 	 */
788 	c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
789 }
790 
791 static int is_firmware_flash_cmd(u8 *cdb)
792 {
793 	return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
794 }
795 
796 /*
797  * During firmware flash, the heartbeat register may not update as frequently
798  * as it should.  So we dial down lockup detection during firmware flash. and
799  * dial it back up when firmware flash completes.
800  */
801 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
802 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
803 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
804 		struct CommandList *c)
805 {
806 	if (!is_firmware_flash_cmd(c->Request.CDB))
807 		return;
808 	atomic_inc(&h->firmware_flash_in_progress);
809 	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
810 }
811 
812 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
813 		struct CommandList *c)
814 {
815 	if (is_firmware_flash_cmd(c->Request.CDB) &&
816 		atomic_dec_and_test(&h->firmware_flash_in_progress))
817 		h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
818 }
819 
820 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
821 	struct CommandList *c)
822 {
823 	switch (c->cmd_type) {
824 	case CMD_IOACCEL1:
825 		set_ioaccel1_performant_mode(h, c);
826 		break;
827 	case CMD_IOACCEL2:
828 		set_ioaccel2_performant_mode(h, c);
829 		break;
830 	default:
831 		set_performant_mode(h, c);
832 	}
833 	dial_down_lockup_detection_during_fw_flash(h, c);
834 	atomic_inc(&h->commands_outstanding);
835 	h->access.submit_command(h, c);
836 }
837 
838 static inline int is_hba_lunid(unsigned char scsi3addr[])
839 {
840 	return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
841 }
842 
843 static inline int is_scsi_rev_5(struct ctlr_info *h)
844 {
845 	if (!h->hba_inquiry_data)
846 		return 0;
847 	if ((h->hba_inquiry_data[2] & 0x07) == 5)
848 		return 1;
849 	return 0;
850 }
851 
852 static int hpsa_find_target_lun(struct ctlr_info *h,
853 	unsigned char scsi3addr[], int bus, int *target, int *lun)
854 {
855 	/* finds an unused bus, target, lun for a new physical device
856 	 * assumes h->devlock is held
857 	 */
858 	int i, found = 0;
859 	DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
860 
861 	bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
862 
863 	for (i = 0; i < h->ndevices; i++) {
864 		if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
865 			__set_bit(h->dev[i]->target, lun_taken);
866 	}
867 
868 	i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
869 	if (i < HPSA_MAX_DEVICES) {
870 		/* *bus = 1; */
871 		*target = i;
872 		*lun = 0;
873 		found = 1;
874 	}
875 	return !found;
876 }
877 
878 /* Add an entry into h->dev[] array. */
879 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
880 		struct hpsa_scsi_dev_t *device,
881 		struct hpsa_scsi_dev_t *added[], int *nadded)
882 {
883 	/* assumes h->devlock is held */
884 	int n = h->ndevices;
885 	int i;
886 	unsigned char addr1[8], addr2[8];
887 	struct hpsa_scsi_dev_t *sd;
888 
889 	if (n >= HPSA_MAX_DEVICES) {
890 		dev_err(&h->pdev->dev, "too many devices, some will be "
891 			"inaccessible.\n");
892 		return -1;
893 	}
894 
895 	/* physical devices do not have lun or target assigned until now. */
896 	if (device->lun != -1)
897 		/* Logical device, lun is already assigned. */
898 		goto lun_assigned;
899 
900 	/* If this device a non-zero lun of a multi-lun device
901 	 * byte 4 of the 8-byte LUN addr will contain the logical
902 	 * unit no, zero otherwise.
903 	 */
904 	if (device->scsi3addr[4] == 0) {
905 		/* This is not a non-zero lun of a multi-lun device */
906 		if (hpsa_find_target_lun(h, device->scsi3addr,
907 			device->bus, &device->target, &device->lun) != 0)
908 			return -1;
909 		goto lun_assigned;
910 	}
911 
912 	/* This is a non-zero lun of a multi-lun device.
913 	 * Search through our list and find the device which
914 	 * has the same 8 byte LUN address, excepting byte 4.
915 	 * Assign the same bus and target for this new LUN.
916 	 * Use the logical unit number from the firmware.
917 	 */
918 	memcpy(addr1, device->scsi3addr, 8);
919 	addr1[4] = 0;
920 	for (i = 0; i < n; i++) {
921 		sd = h->dev[i];
922 		memcpy(addr2, sd->scsi3addr, 8);
923 		addr2[4] = 0;
924 		/* differ only in byte 4? */
925 		if (memcmp(addr1, addr2, 8) == 0) {
926 			device->bus = sd->bus;
927 			device->target = sd->target;
928 			device->lun = device->scsi3addr[4];
929 			break;
930 		}
931 	}
932 	if (device->lun == -1) {
933 		dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
934 			" suspect firmware bug or unsupported hardware "
935 			"configuration.\n");
936 			return -1;
937 	}
938 
939 lun_assigned:
940 
941 	h->dev[n] = device;
942 	h->ndevices++;
943 	added[*nadded] = device;
944 	(*nadded)++;
945 
946 	/* initially, (before registering with scsi layer) we don't
947 	 * know our hostno and we don't want to print anything first
948 	 * time anyway (the scsi layer's inquiries will show that info)
949 	 */
950 	/* if (hostno != -1) */
951 		dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
952 			scsi_device_type(device->devtype), hostno,
953 			device->bus, device->target, device->lun);
954 	return 0;
955 }
956 
957 /* Update an entry in h->dev[] array. */
958 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
959 	int entry, struct hpsa_scsi_dev_t *new_entry)
960 {
961 	/* assumes h->devlock is held */
962 	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
963 
964 	/* Raid level changed. */
965 	h->dev[entry]->raid_level = new_entry->raid_level;
966 
967 	/* Raid offload parameters changed. */
968 	h->dev[entry]->offload_config = new_entry->offload_config;
969 	h->dev[entry]->offload_enabled = new_entry->offload_enabled;
970 	h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
971 	h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
972 	h->dev[entry]->raid_map = new_entry->raid_map;
973 
974 	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
975 		scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
976 		new_entry->target, new_entry->lun);
977 }
978 
979 /* Replace an entry from h->dev[] array. */
980 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
981 	int entry, struct hpsa_scsi_dev_t *new_entry,
982 	struct hpsa_scsi_dev_t *added[], int *nadded,
983 	struct hpsa_scsi_dev_t *removed[], int *nremoved)
984 {
985 	/* assumes h->devlock is held */
986 	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
987 	removed[*nremoved] = h->dev[entry];
988 	(*nremoved)++;
989 
990 	/*
991 	 * New physical devices won't have target/lun assigned yet
992 	 * so we need to preserve the values in the slot we are replacing.
993 	 */
994 	if (new_entry->target == -1) {
995 		new_entry->target = h->dev[entry]->target;
996 		new_entry->lun = h->dev[entry]->lun;
997 	}
998 
999 	h->dev[entry] = new_entry;
1000 	added[*nadded] = new_entry;
1001 	(*nadded)++;
1002 	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
1003 		scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
1004 			new_entry->target, new_entry->lun);
1005 }
1006 
1007 /* Remove an entry from h->dev[] array. */
1008 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1009 	struct hpsa_scsi_dev_t *removed[], int *nremoved)
1010 {
1011 	/* assumes h->devlock is held */
1012 	int i;
1013 	struct hpsa_scsi_dev_t *sd;
1014 
1015 	BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1016 
1017 	sd = h->dev[entry];
1018 	removed[*nremoved] = h->dev[entry];
1019 	(*nremoved)++;
1020 
1021 	for (i = entry; i < h->ndevices-1; i++)
1022 		h->dev[i] = h->dev[i+1];
1023 	h->ndevices--;
1024 	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
1025 		scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
1026 		sd->lun);
1027 }
1028 
1029 #define SCSI3ADDR_EQ(a, b) ( \
1030 	(a)[7] == (b)[7] && \
1031 	(a)[6] == (b)[6] && \
1032 	(a)[5] == (b)[5] && \
1033 	(a)[4] == (b)[4] && \
1034 	(a)[3] == (b)[3] && \
1035 	(a)[2] == (b)[2] && \
1036 	(a)[1] == (b)[1] && \
1037 	(a)[0] == (b)[0])
1038 
1039 static void fixup_botched_add(struct ctlr_info *h,
1040 	struct hpsa_scsi_dev_t *added)
1041 {
1042 	/* called when scsi_add_device fails in order to re-adjust
1043 	 * h->dev[] to match the mid layer's view.
1044 	 */
1045 	unsigned long flags;
1046 	int i, j;
1047 
1048 	spin_lock_irqsave(&h->lock, flags);
1049 	for (i = 0; i < h->ndevices; i++) {
1050 		if (h->dev[i] == added) {
1051 			for (j = i; j < h->ndevices-1; j++)
1052 				h->dev[j] = h->dev[j+1];
1053 			h->ndevices--;
1054 			break;
1055 		}
1056 	}
1057 	spin_unlock_irqrestore(&h->lock, flags);
1058 	kfree(added);
1059 }
1060 
1061 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1062 	struct hpsa_scsi_dev_t *dev2)
1063 {
1064 	/* we compare everything except lun and target as these
1065 	 * are not yet assigned.  Compare parts likely
1066 	 * to differ first
1067 	 */
1068 	if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1069 		sizeof(dev1->scsi3addr)) != 0)
1070 		return 0;
1071 	if (memcmp(dev1->device_id, dev2->device_id,
1072 		sizeof(dev1->device_id)) != 0)
1073 		return 0;
1074 	if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1075 		return 0;
1076 	if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1077 		return 0;
1078 	if (dev1->devtype != dev2->devtype)
1079 		return 0;
1080 	if (dev1->bus != dev2->bus)
1081 		return 0;
1082 	return 1;
1083 }
1084 
1085 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1086 	struct hpsa_scsi_dev_t *dev2)
1087 {
1088 	/* Device attributes that can change, but don't mean
1089 	 * that the device is a different device, nor that the OS
1090 	 * needs to be told anything about the change.
1091 	 */
1092 	if (dev1->raid_level != dev2->raid_level)
1093 		return 1;
1094 	if (dev1->offload_config != dev2->offload_config)
1095 		return 1;
1096 	if (dev1->offload_enabled != dev2->offload_enabled)
1097 		return 1;
1098 	return 0;
1099 }
1100 
1101 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
1102  * and return needle location in *index.  If scsi3addr matches, but not
1103  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1104  * location in *index.
1105  * In the case of a minor device attribute change, such as RAID level, just
1106  * return DEVICE_UPDATED, along with the updated device's location in index.
1107  * If needle not found, return DEVICE_NOT_FOUND.
1108  */
1109 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1110 	struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1111 	int *index)
1112 {
1113 	int i;
1114 #define DEVICE_NOT_FOUND 0
1115 #define DEVICE_CHANGED 1
1116 #define DEVICE_SAME 2
1117 #define DEVICE_UPDATED 3
1118 	for (i = 0; i < haystack_size; i++) {
1119 		if (haystack[i] == NULL) /* previously removed. */
1120 			continue;
1121 		if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1122 			*index = i;
1123 			if (device_is_the_same(needle, haystack[i])) {
1124 				if (device_updated(needle, haystack[i]))
1125 					return DEVICE_UPDATED;
1126 				return DEVICE_SAME;
1127 			} else {
1128 				/* Keep offline devices offline */
1129 				if (needle->volume_offline)
1130 					return DEVICE_NOT_FOUND;
1131 				return DEVICE_CHANGED;
1132 			}
1133 		}
1134 	}
1135 	*index = -1;
1136 	return DEVICE_NOT_FOUND;
1137 }
1138 
1139 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1140 					unsigned char scsi3addr[])
1141 {
1142 	struct offline_device_entry *device;
1143 	unsigned long flags;
1144 
1145 	/* Check to see if device is already on the list */
1146 	spin_lock_irqsave(&h->offline_device_lock, flags);
1147 	list_for_each_entry(device, &h->offline_device_list, offline_list) {
1148 		if (memcmp(device->scsi3addr, scsi3addr,
1149 			sizeof(device->scsi3addr)) == 0) {
1150 			spin_unlock_irqrestore(&h->offline_device_lock, flags);
1151 			return;
1152 		}
1153 	}
1154 	spin_unlock_irqrestore(&h->offline_device_lock, flags);
1155 
1156 	/* Device is not on the list, add it. */
1157 	device = kmalloc(sizeof(*device), GFP_KERNEL);
1158 	if (!device) {
1159 		dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1160 		return;
1161 	}
1162 	memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1163 	spin_lock_irqsave(&h->offline_device_lock, flags);
1164 	list_add_tail(&device->offline_list, &h->offline_device_list);
1165 	spin_unlock_irqrestore(&h->offline_device_lock, flags);
1166 }
1167 
1168 /* Print a message explaining various offline volume states */
1169 static void hpsa_show_volume_status(struct ctlr_info *h,
1170 	struct hpsa_scsi_dev_t *sd)
1171 {
1172 	if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1173 		dev_info(&h->pdev->dev,
1174 			"C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1175 			h->scsi_host->host_no,
1176 			sd->bus, sd->target, sd->lun);
1177 	switch (sd->volume_offline) {
1178 	case HPSA_LV_OK:
1179 		break;
1180 	case HPSA_LV_UNDERGOING_ERASE:
1181 		dev_info(&h->pdev->dev,
1182 			"C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1183 			h->scsi_host->host_no,
1184 			sd->bus, sd->target, sd->lun);
1185 		break;
1186 	case HPSA_LV_UNDERGOING_RPI:
1187 		dev_info(&h->pdev->dev,
1188 			"C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1189 			h->scsi_host->host_no,
1190 			sd->bus, sd->target, sd->lun);
1191 		break;
1192 	case HPSA_LV_PENDING_RPI:
1193 		dev_info(&h->pdev->dev,
1194 				"C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1195 				h->scsi_host->host_no,
1196 				sd->bus, sd->target, sd->lun);
1197 		break;
1198 	case HPSA_LV_ENCRYPTED_NO_KEY:
1199 		dev_info(&h->pdev->dev,
1200 			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1201 			h->scsi_host->host_no,
1202 			sd->bus, sd->target, sd->lun);
1203 		break;
1204 	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1205 		dev_info(&h->pdev->dev,
1206 			"C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1207 			h->scsi_host->host_no,
1208 			sd->bus, sd->target, sd->lun);
1209 		break;
1210 	case HPSA_LV_UNDERGOING_ENCRYPTION:
1211 		dev_info(&h->pdev->dev,
1212 			"C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1213 			h->scsi_host->host_no,
1214 			sd->bus, sd->target, sd->lun);
1215 		break;
1216 	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1217 		dev_info(&h->pdev->dev,
1218 			"C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1219 			h->scsi_host->host_no,
1220 			sd->bus, sd->target, sd->lun);
1221 		break;
1222 	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1223 		dev_info(&h->pdev->dev,
1224 			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1225 			h->scsi_host->host_no,
1226 			sd->bus, sd->target, sd->lun);
1227 		break;
1228 	case HPSA_LV_PENDING_ENCRYPTION:
1229 		dev_info(&h->pdev->dev,
1230 			"C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1231 			h->scsi_host->host_no,
1232 			sd->bus, sd->target, sd->lun);
1233 		break;
1234 	case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1235 		dev_info(&h->pdev->dev,
1236 			"C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1237 			h->scsi_host->host_no,
1238 			sd->bus, sd->target, sd->lun);
1239 		break;
1240 	}
1241 }
1242 
1243 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1244 	struct hpsa_scsi_dev_t *sd[], int nsds)
1245 {
1246 	/* sd contains scsi3 addresses and devtypes, and inquiry
1247 	 * data.  This function takes what's in sd to be the current
1248 	 * reality and updates h->dev[] to reflect that reality.
1249 	 */
1250 	int i, entry, device_change, changes = 0;
1251 	struct hpsa_scsi_dev_t *csd;
1252 	unsigned long flags;
1253 	struct hpsa_scsi_dev_t **added, **removed;
1254 	int nadded, nremoved;
1255 	struct Scsi_Host *sh = NULL;
1256 
1257 	added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1258 	removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1259 
1260 	if (!added || !removed) {
1261 		dev_warn(&h->pdev->dev, "out of memory in "
1262 			"adjust_hpsa_scsi_table\n");
1263 		goto free_and_out;
1264 	}
1265 
1266 	spin_lock_irqsave(&h->devlock, flags);
1267 
1268 	/* find any devices in h->dev[] that are not in
1269 	 * sd[] and remove them from h->dev[], and for any
1270 	 * devices which have changed, remove the old device
1271 	 * info and add the new device info.
1272 	 * If minor device attributes change, just update
1273 	 * the existing device structure.
1274 	 */
1275 	i = 0;
1276 	nremoved = 0;
1277 	nadded = 0;
1278 	while (i < h->ndevices) {
1279 		csd = h->dev[i];
1280 		device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1281 		if (device_change == DEVICE_NOT_FOUND) {
1282 			changes++;
1283 			hpsa_scsi_remove_entry(h, hostno, i,
1284 				removed, &nremoved);
1285 			continue; /* remove ^^^, hence i not incremented */
1286 		} else if (device_change == DEVICE_CHANGED) {
1287 			changes++;
1288 			hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1289 				added, &nadded, removed, &nremoved);
1290 			/* Set it to NULL to prevent it from being freed
1291 			 * at the bottom of hpsa_update_scsi_devices()
1292 			 */
1293 			sd[entry] = NULL;
1294 		} else if (device_change == DEVICE_UPDATED) {
1295 			hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
1296 		}
1297 		i++;
1298 	}
1299 
1300 	/* Now, make sure every device listed in sd[] is also
1301 	 * listed in h->dev[], adding them if they aren't found
1302 	 */
1303 
1304 	for (i = 0; i < nsds; i++) {
1305 		if (!sd[i]) /* if already added above. */
1306 			continue;
1307 
1308 		/* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1309 		 * as the SCSI mid-layer does not handle such devices well.
1310 		 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1311 		 * at 160Hz, and prevents the system from coming up.
1312 		 */
1313 		if (sd[i]->volume_offline) {
1314 			hpsa_show_volume_status(h, sd[i]);
1315 			dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
1316 				h->scsi_host->host_no,
1317 				sd[i]->bus, sd[i]->target, sd[i]->lun);
1318 			continue;
1319 		}
1320 
1321 		device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1322 					h->ndevices, &entry);
1323 		if (device_change == DEVICE_NOT_FOUND) {
1324 			changes++;
1325 			if (hpsa_scsi_add_entry(h, hostno, sd[i],
1326 				added, &nadded) != 0)
1327 				break;
1328 			sd[i] = NULL; /* prevent from being freed later. */
1329 		} else if (device_change == DEVICE_CHANGED) {
1330 			/* should never happen... */
1331 			changes++;
1332 			dev_warn(&h->pdev->dev,
1333 				"device unexpectedly changed.\n");
1334 			/* but if it does happen, we just ignore that device */
1335 		}
1336 	}
1337 	spin_unlock_irqrestore(&h->devlock, flags);
1338 
1339 	/* Monitor devices which are in one of several NOT READY states to be
1340 	 * brought online later. This must be done without holding h->devlock,
1341 	 * so don't touch h->dev[]
1342 	 */
1343 	for (i = 0; i < nsds; i++) {
1344 		if (!sd[i]) /* if already added above. */
1345 			continue;
1346 		if (sd[i]->volume_offline)
1347 			hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1348 	}
1349 
1350 	/* Don't notify scsi mid layer of any changes the first time through
1351 	 * (or if there are no changes) scsi_scan_host will do it later the
1352 	 * first time through.
1353 	 */
1354 	if (hostno == -1 || !changes)
1355 		goto free_and_out;
1356 
1357 	sh = h->scsi_host;
1358 	/* Notify scsi mid layer of any removed devices */
1359 	for (i = 0; i < nremoved; i++) {
1360 		struct scsi_device *sdev =
1361 			scsi_device_lookup(sh, removed[i]->bus,
1362 				removed[i]->target, removed[i]->lun);
1363 		if (sdev != NULL) {
1364 			scsi_remove_device(sdev);
1365 			scsi_device_put(sdev);
1366 		} else {
1367 			/* We don't expect to get here.
1368 			 * future cmds to this device will get selection
1369 			 * timeout as if the device was gone.
1370 			 */
1371 			dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1372 				" for removal.", hostno, removed[i]->bus,
1373 				removed[i]->target, removed[i]->lun);
1374 		}
1375 		kfree(removed[i]);
1376 		removed[i] = NULL;
1377 	}
1378 
1379 	/* Notify scsi mid layer of any added devices */
1380 	for (i = 0; i < nadded; i++) {
1381 		if (scsi_add_device(sh, added[i]->bus,
1382 			added[i]->target, added[i]->lun) == 0)
1383 			continue;
1384 		dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1385 			"device not added.\n", hostno, added[i]->bus,
1386 			added[i]->target, added[i]->lun);
1387 		/* now we have to remove it from h->dev,
1388 		 * since it didn't get added to scsi mid layer
1389 		 */
1390 		fixup_botched_add(h, added[i]);
1391 	}
1392 
1393 free_and_out:
1394 	kfree(added);
1395 	kfree(removed);
1396 }
1397 
1398 /*
1399  * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1400  * Assume's h->devlock is held.
1401  */
1402 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1403 	int bus, int target, int lun)
1404 {
1405 	int i;
1406 	struct hpsa_scsi_dev_t *sd;
1407 
1408 	for (i = 0; i < h->ndevices; i++) {
1409 		sd = h->dev[i];
1410 		if (sd->bus == bus && sd->target == target && sd->lun == lun)
1411 			return sd;
1412 	}
1413 	return NULL;
1414 }
1415 
1416 /* link sdev->hostdata to our per-device structure. */
1417 static int hpsa_slave_alloc(struct scsi_device *sdev)
1418 {
1419 	struct hpsa_scsi_dev_t *sd;
1420 	unsigned long flags;
1421 	struct ctlr_info *h;
1422 
1423 	h = sdev_to_hba(sdev);
1424 	spin_lock_irqsave(&h->devlock, flags);
1425 	sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1426 		sdev_id(sdev), sdev->lun);
1427 	if (sd != NULL)
1428 		sdev->hostdata = sd;
1429 	spin_unlock_irqrestore(&h->devlock, flags);
1430 	return 0;
1431 }
1432 
1433 static void hpsa_slave_destroy(struct scsi_device *sdev)
1434 {
1435 	/* nothing to do. */
1436 }
1437 
1438 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1439 {
1440 	int i;
1441 
1442 	if (!h->cmd_sg_list)
1443 		return;
1444 	for (i = 0; i < h->nr_cmds; i++) {
1445 		kfree(h->cmd_sg_list[i]);
1446 		h->cmd_sg_list[i] = NULL;
1447 	}
1448 	kfree(h->cmd_sg_list);
1449 	h->cmd_sg_list = NULL;
1450 }
1451 
1452 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1453 {
1454 	int i;
1455 
1456 	if (h->chainsize <= 0)
1457 		return 0;
1458 
1459 	h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1460 				GFP_KERNEL);
1461 	if (!h->cmd_sg_list) {
1462 		dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
1463 		return -ENOMEM;
1464 	}
1465 	for (i = 0; i < h->nr_cmds; i++) {
1466 		h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1467 						h->chainsize, GFP_KERNEL);
1468 		if (!h->cmd_sg_list[i]) {
1469 			dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
1470 			goto clean;
1471 		}
1472 	}
1473 	return 0;
1474 
1475 clean:
1476 	hpsa_free_sg_chain_blocks(h);
1477 	return -ENOMEM;
1478 }
1479 
1480 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1481 	struct CommandList *c)
1482 {
1483 	struct SGDescriptor *chain_sg, *chain_block;
1484 	u64 temp64;
1485 	u32 chain_len;
1486 
1487 	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1488 	chain_block = h->cmd_sg_list[c->cmdindex];
1489 	chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1490 	chain_len = sizeof(*chain_sg) *
1491 		(le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
1492 	chain_sg->Len = cpu_to_le32(chain_len);
1493 	temp64 = pci_map_single(h->pdev, chain_block, chain_len,
1494 				PCI_DMA_TODEVICE);
1495 	if (dma_mapping_error(&h->pdev->dev, temp64)) {
1496 		/* prevent subsequent unmapping */
1497 		chain_sg->Addr = cpu_to_le64(0);
1498 		return -1;
1499 	}
1500 	chain_sg->Addr = cpu_to_le64(temp64);
1501 	return 0;
1502 }
1503 
1504 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1505 	struct CommandList *c)
1506 {
1507 	struct SGDescriptor *chain_sg;
1508 
1509 	if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
1510 		return;
1511 
1512 	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1513 	pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1514 			le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
1515 }
1516 
1517 
1518 /* Decode the various types of errors on ioaccel2 path.
1519  * Return 1 for any error that should generate a RAID path retry.
1520  * Return 0 for errors that don't require a RAID path retry.
1521  */
1522 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1523 					struct CommandList *c,
1524 					struct scsi_cmnd *cmd,
1525 					struct io_accel2_cmd *c2)
1526 {
1527 	int data_len;
1528 	int retry = 0;
1529 
1530 	switch (c2->error_data.serv_response) {
1531 	case IOACCEL2_SERV_RESPONSE_COMPLETE:
1532 		switch (c2->error_data.status) {
1533 		case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1534 			break;
1535 		case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1536 			dev_warn(&h->pdev->dev,
1537 				"%s: task complete with check condition.\n",
1538 				"HP SSD Smart Path");
1539 			cmd->result |= SAM_STAT_CHECK_CONDITION;
1540 			if (c2->error_data.data_present !=
1541 					IOACCEL2_SENSE_DATA_PRESENT) {
1542 				memset(cmd->sense_buffer, 0,
1543 					SCSI_SENSE_BUFFERSIZE);
1544 				break;
1545 			}
1546 			/* copy the sense data */
1547 			data_len = c2->error_data.sense_data_len;
1548 			if (data_len > SCSI_SENSE_BUFFERSIZE)
1549 				data_len = SCSI_SENSE_BUFFERSIZE;
1550 			if (data_len > sizeof(c2->error_data.sense_data_buff))
1551 				data_len =
1552 					sizeof(c2->error_data.sense_data_buff);
1553 			memcpy(cmd->sense_buffer,
1554 				c2->error_data.sense_data_buff, data_len);
1555 			retry = 1;
1556 			break;
1557 		case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1558 			dev_warn(&h->pdev->dev,
1559 				"%s: task complete with BUSY status.\n",
1560 				"HP SSD Smart Path");
1561 			retry = 1;
1562 			break;
1563 		case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1564 			dev_warn(&h->pdev->dev,
1565 				"%s: task complete with reservation conflict.\n",
1566 				"HP SSD Smart Path");
1567 			retry = 1;
1568 			break;
1569 		case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1570 			/* Make scsi midlayer do unlimited retries */
1571 			cmd->result = DID_IMM_RETRY << 16;
1572 			break;
1573 		case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1574 			dev_warn(&h->pdev->dev,
1575 				"%s: task complete with aborted status.\n",
1576 				"HP SSD Smart Path");
1577 			retry = 1;
1578 			break;
1579 		default:
1580 			dev_warn(&h->pdev->dev,
1581 				"%s: task complete with unrecognized status: 0x%02x\n",
1582 				"HP SSD Smart Path", c2->error_data.status);
1583 			retry = 1;
1584 			break;
1585 		}
1586 		break;
1587 	case IOACCEL2_SERV_RESPONSE_FAILURE:
1588 		/* don't expect to get here. */
1589 		dev_warn(&h->pdev->dev,
1590 			"unexpected delivery or target failure, status = 0x%02x\n",
1591 			c2->error_data.status);
1592 		retry = 1;
1593 		break;
1594 	case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1595 		break;
1596 	case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1597 		break;
1598 	case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1599 		dev_warn(&h->pdev->dev, "task management function rejected.\n");
1600 		retry = 1;
1601 		break;
1602 	case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1603 		dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1604 		break;
1605 	default:
1606 		dev_warn(&h->pdev->dev,
1607 			"%s: Unrecognized server response: 0x%02x\n",
1608 			"HP SSD Smart Path",
1609 			c2->error_data.serv_response);
1610 		retry = 1;
1611 		break;
1612 	}
1613 
1614 	return retry;	/* retry on raid path? */
1615 }
1616 
1617 static void process_ioaccel2_completion(struct ctlr_info *h,
1618 		struct CommandList *c, struct scsi_cmnd *cmd,
1619 		struct hpsa_scsi_dev_t *dev)
1620 {
1621 	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1622 	int raid_retry = 0;
1623 
1624 	/* check for good status */
1625 	if (likely(c2->error_data.serv_response == 0 &&
1626 			c2->error_data.status == 0)) {
1627 		cmd_free(h, c);
1628 		cmd->scsi_done(cmd);
1629 		return;
1630 	}
1631 
1632 	/* Any RAID offload error results in retry which will use
1633 	 * the normal I/O path so the controller can handle whatever's
1634 	 * wrong.
1635 	 */
1636 	if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1637 		c2->error_data.serv_response ==
1638 			IOACCEL2_SERV_RESPONSE_FAILURE) {
1639 		dev->offload_enabled = 0;
1640 		cmd->result = DID_SOFT_ERROR << 16;
1641 		cmd_free(h, c);
1642 		cmd->scsi_done(cmd);
1643 		return;
1644 	}
1645 	raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
1646 	/* If error found, disable Smart Path,
1647 	 * force a retry on the standard path.
1648 	 */
1649 	if (raid_retry) {
1650 		dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
1651 			"HP SSD Smart Path");
1652 		dev->offload_enabled = 0; /* Disable Smart Path */
1653 		cmd->result = DID_SOFT_ERROR << 16;
1654 	}
1655 	cmd_free(h, c);
1656 	cmd->scsi_done(cmd);
1657 }
1658 
1659 static void complete_scsi_command(struct CommandList *cp)
1660 {
1661 	struct scsi_cmnd *cmd;
1662 	struct ctlr_info *h;
1663 	struct ErrorInfo *ei;
1664 	struct hpsa_scsi_dev_t *dev;
1665 
1666 	unsigned char sense_key;
1667 	unsigned char asc;      /* additional sense code */
1668 	unsigned char ascq;     /* additional sense code qualifier */
1669 	unsigned long sense_data_size;
1670 
1671 	ei = cp->err_info;
1672 	cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1673 	h = cp->h;
1674 	dev = cmd->device->hostdata;
1675 
1676 	scsi_dma_unmap(cmd); /* undo the DMA mappings */
1677 	if ((cp->cmd_type == CMD_SCSI) &&
1678 		(le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
1679 		hpsa_unmap_sg_chain_block(h, cp);
1680 
1681 	cmd->result = (DID_OK << 16); 		/* host byte */
1682 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
1683 
1684 	if (cp->cmd_type == CMD_IOACCEL2)
1685 		return process_ioaccel2_completion(h, cp, cmd, dev);
1686 
1687 	cmd->result |= ei->ScsiStatus;
1688 
1689 	scsi_set_resid(cmd, ei->ResidualCnt);
1690 	if (ei->CommandStatus == 0) {
1691 		cmd_free(h, cp);
1692 		cmd->scsi_done(cmd);
1693 		return;
1694 	}
1695 
1696 	/* copy the sense data */
1697 	if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1698 		sense_data_size = SCSI_SENSE_BUFFERSIZE;
1699 	else
1700 		sense_data_size = sizeof(ei->SenseInfo);
1701 	if (ei->SenseLen < sense_data_size)
1702 		sense_data_size = ei->SenseLen;
1703 
1704 	memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1705 
1706 	/* For I/O accelerator commands, copy over some fields to the normal
1707 	 * CISS header used below for error handling.
1708 	 */
1709 	if (cp->cmd_type == CMD_IOACCEL1) {
1710 		struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1711 		cp->Header.SGList = scsi_sg_count(cmd);
1712 		cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
1713 		cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
1714 			IOACCEL1_IOFLAGS_CDBLEN_MASK;
1715 		cp->Header.tag = c->tag;
1716 		memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1717 		memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
1718 
1719 		/* Any RAID offload error results in retry which will use
1720 		 * the normal I/O path so the controller can handle whatever's
1721 		 * wrong.
1722 		 */
1723 		if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1724 			if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1725 				dev->offload_enabled = 0;
1726 			cmd->result = DID_SOFT_ERROR << 16;
1727 			cmd_free(h, cp);
1728 			cmd->scsi_done(cmd);
1729 			return;
1730 		}
1731 	}
1732 
1733 	/* an error has occurred */
1734 	switch (ei->CommandStatus) {
1735 
1736 	case CMD_TARGET_STATUS:
1737 		if (ei->ScsiStatus) {
1738 			/* Get sense key */
1739 			sense_key = 0xf & ei->SenseInfo[2];
1740 			/* Get additional sense code */
1741 			asc = ei->SenseInfo[12];
1742 			/* Get addition sense code qualifier */
1743 			ascq = ei->SenseInfo[13];
1744 		}
1745 		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1746 			if (sense_key == ABORTED_COMMAND) {
1747 				cmd->result |= DID_SOFT_ERROR << 16;
1748 				break;
1749 			}
1750 			break;
1751 		}
1752 		/* Problem was not a check condition
1753 		 * Pass it up to the upper layers...
1754 		 */
1755 		if (ei->ScsiStatus) {
1756 			dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1757 				"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1758 				"Returning result: 0x%x\n",
1759 				cp, ei->ScsiStatus,
1760 				sense_key, asc, ascq,
1761 				cmd->result);
1762 		} else {  /* scsi status is zero??? How??? */
1763 			dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1764 				"Returning no connection.\n", cp),
1765 
1766 			/* Ordinarily, this case should never happen,
1767 			 * but there is a bug in some released firmware
1768 			 * revisions that allows it to happen if, for
1769 			 * example, a 4100 backplane loses power and
1770 			 * the tape drive is in it.  We assume that
1771 			 * it's a fatal error of some kind because we
1772 			 * can't show that it wasn't. We will make it
1773 			 * look like selection timeout since that is
1774 			 * the most common reason for this to occur,
1775 			 * and it's severe enough.
1776 			 */
1777 
1778 			cmd->result = DID_NO_CONNECT << 16;
1779 		}
1780 		break;
1781 
1782 	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1783 		break;
1784 	case CMD_DATA_OVERRUN:
1785 		dev_warn(&h->pdev->dev, "cp %p has"
1786 			" completed with data overrun "
1787 			"reported\n", cp);
1788 		break;
1789 	case CMD_INVALID: {
1790 		/* print_bytes(cp, sizeof(*cp), 1, 0);
1791 		print_cmd(cp); */
1792 		/* We get CMD_INVALID if you address a non-existent device
1793 		 * instead of a selection timeout (no response).  You will
1794 		 * see this if you yank out a drive, then try to access it.
1795 		 * This is kind of a shame because it means that any other
1796 		 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1797 		 * missing target. */
1798 		cmd->result = DID_NO_CONNECT << 16;
1799 	}
1800 		break;
1801 	case CMD_PROTOCOL_ERR:
1802 		cmd->result = DID_ERROR << 16;
1803 		dev_warn(&h->pdev->dev, "cp %p has "
1804 			"protocol error\n", cp);
1805 		break;
1806 	case CMD_HARDWARE_ERR:
1807 		cmd->result = DID_ERROR << 16;
1808 		dev_warn(&h->pdev->dev, "cp %p had  hardware error\n", cp);
1809 		break;
1810 	case CMD_CONNECTION_LOST:
1811 		cmd->result = DID_ERROR << 16;
1812 		dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1813 		break;
1814 	case CMD_ABORTED:
1815 		cmd->result = DID_ABORT << 16;
1816 		dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1817 				cp, ei->ScsiStatus);
1818 		break;
1819 	case CMD_ABORT_FAILED:
1820 		cmd->result = DID_ERROR << 16;
1821 		dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1822 		break;
1823 	case CMD_UNSOLICITED_ABORT:
1824 		cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1825 		dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1826 			"abort\n", cp);
1827 		break;
1828 	case CMD_TIMEOUT:
1829 		cmd->result = DID_TIME_OUT << 16;
1830 		dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1831 		break;
1832 	case CMD_UNABORTABLE:
1833 		cmd->result = DID_ERROR << 16;
1834 		dev_warn(&h->pdev->dev, "Command unabortable\n");
1835 		break;
1836 	case CMD_IOACCEL_DISABLED:
1837 		/* This only handles the direct pass-through case since RAID
1838 		 * offload is handled above.  Just attempt a retry.
1839 		 */
1840 		cmd->result = DID_SOFT_ERROR << 16;
1841 		dev_warn(&h->pdev->dev,
1842 				"cp %p had HP SSD Smart Path error\n", cp);
1843 		break;
1844 	default:
1845 		cmd->result = DID_ERROR << 16;
1846 		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1847 				cp, ei->CommandStatus);
1848 	}
1849 	cmd_free(h, cp);
1850 	cmd->scsi_done(cmd);
1851 }
1852 
1853 static void hpsa_pci_unmap(struct pci_dev *pdev,
1854 	struct CommandList *c, int sg_used, int data_direction)
1855 {
1856 	int i;
1857 
1858 	for (i = 0; i < sg_used; i++)
1859 		pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
1860 				le32_to_cpu(c->SG[i].Len),
1861 				data_direction);
1862 }
1863 
1864 static int hpsa_map_one(struct pci_dev *pdev,
1865 		struct CommandList *cp,
1866 		unsigned char *buf,
1867 		size_t buflen,
1868 		int data_direction)
1869 {
1870 	u64 addr64;
1871 
1872 	if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1873 		cp->Header.SGList = 0;
1874 		cp->Header.SGTotal = cpu_to_le16(0);
1875 		return 0;
1876 	}
1877 
1878 	addr64 = pci_map_single(pdev, buf, buflen, data_direction);
1879 	if (dma_mapping_error(&pdev->dev, addr64)) {
1880 		/* Prevent subsequent unmap of something never mapped */
1881 		cp->Header.SGList = 0;
1882 		cp->Header.SGTotal = cpu_to_le16(0);
1883 		return -1;
1884 	}
1885 	cp->SG[0].Addr = cpu_to_le64(addr64);
1886 	cp->SG[0].Len = cpu_to_le32(buflen);
1887 	cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
1888 	cp->Header.SGList = 1;   /* no. SGs contig in this cmd */
1889 	cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
1890 	return 0;
1891 }
1892 
1893 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1894 	struct CommandList *c)
1895 {
1896 	DECLARE_COMPLETION_ONSTACK(wait);
1897 
1898 	c->waiting = &wait;
1899 	enqueue_cmd_and_start_io(h, c);
1900 	wait_for_completion(&wait);
1901 }
1902 
1903 static u32 lockup_detected(struct ctlr_info *h)
1904 {
1905 	int cpu;
1906 	u32 rc, *lockup_detected;
1907 
1908 	cpu = get_cpu();
1909 	lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
1910 	rc = *lockup_detected;
1911 	put_cpu();
1912 	return rc;
1913 }
1914 
1915 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1916 	struct CommandList *c)
1917 {
1918 	/* If controller lockup detected, fake a hardware error. */
1919 	if (unlikely(lockup_detected(h)))
1920 		c->err_info->CommandStatus = CMD_HARDWARE_ERR;
1921 	else
1922 		hpsa_scsi_do_simple_cmd_core(h, c);
1923 }
1924 
1925 #define MAX_DRIVER_CMD_RETRIES 25
1926 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1927 	struct CommandList *c, int data_direction)
1928 {
1929 	int backoff_time = 10, retry_count = 0;
1930 
1931 	do {
1932 		memset(c->err_info, 0, sizeof(*c->err_info));
1933 		hpsa_scsi_do_simple_cmd_core(h, c);
1934 		retry_count++;
1935 		if (retry_count > 3) {
1936 			msleep(backoff_time);
1937 			if (backoff_time < 1000)
1938 				backoff_time *= 2;
1939 		}
1940 	} while ((check_for_unit_attention(h, c) ||
1941 			check_for_busy(h, c)) &&
1942 			retry_count <= MAX_DRIVER_CMD_RETRIES);
1943 	hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1944 }
1945 
1946 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
1947 				struct CommandList *c)
1948 {
1949 	const u8 *cdb = c->Request.CDB;
1950 	const u8 *lun = c->Header.LUN.LunAddrBytes;
1951 
1952 	dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
1953 	" CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1954 		txt, lun[0], lun[1], lun[2], lun[3],
1955 		lun[4], lun[5], lun[6], lun[7],
1956 		cdb[0], cdb[1], cdb[2], cdb[3],
1957 		cdb[4], cdb[5], cdb[6], cdb[7],
1958 		cdb[8], cdb[9], cdb[10], cdb[11],
1959 		cdb[12], cdb[13], cdb[14], cdb[15]);
1960 }
1961 
1962 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
1963 			struct CommandList *cp)
1964 {
1965 	const struct ErrorInfo *ei = cp->err_info;
1966 	struct device *d = &cp->h->pdev->dev;
1967 	const u8 *sd = ei->SenseInfo;
1968 
1969 	switch (ei->CommandStatus) {
1970 	case CMD_TARGET_STATUS:
1971 		hpsa_print_cmd(h, "SCSI status", cp);
1972 		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
1973 			dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
1974 				sd[2] & 0x0f, sd[12], sd[13]);
1975 		else
1976 			dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
1977 		if (ei->ScsiStatus == 0)
1978 			dev_warn(d, "SCSI status is abnormally zero.  "
1979 			"(probably indicates selection timeout "
1980 			"reported incorrectly due to a known "
1981 			"firmware bug, circa July, 2001.)\n");
1982 		break;
1983 	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1984 		break;
1985 	case CMD_DATA_OVERRUN:
1986 		hpsa_print_cmd(h, "overrun condition", cp);
1987 		break;
1988 	case CMD_INVALID: {
1989 		/* controller unfortunately reports SCSI passthru's
1990 		 * to non-existent targets as invalid commands.
1991 		 */
1992 		hpsa_print_cmd(h, "invalid command", cp);
1993 		dev_warn(d, "probably means device no longer present\n");
1994 		}
1995 		break;
1996 	case CMD_PROTOCOL_ERR:
1997 		hpsa_print_cmd(h, "protocol error", cp);
1998 		break;
1999 	case CMD_HARDWARE_ERR:
2000 		hpsa_print_cmd(h, "hardware error", cp);
2001 		break;
2002 	case CMD_CONNECTION_LOST:
2003 		hpsa_print_cmd(h, "connection lost", cp);
2004 		break;
2005 	case CMD_ABORTED:
2006 		hpsa_print_cmd(h, "aborted", cp);
2007 		break;
2008 	case CMD_ABORT_FAILED:
2009 		hpsa_print_cmd(h, "abort failed", cp);
2010 		break;
2011 	case CMD_UNSOLICITED_ABORT:
2012 		hpsa_print_cmd(h, "unsolicited abort", cp);
2013 		break;
2014 	case CMD_TIMEOUT:
2015 		hpsa_print_cmd(h, "timed out", cp);
2016 		break;
2017 	case CMD_UNABORTABLE:
2018 		hpsa_print_cmd(h, "unabortable", cp);
2019 		break;
2020 	default:
2021 		hpsa_print_cmd(h, "unknown status", cp);
2022 		dev_warn(d, "Unknown command status %x\n",
2023 				ei->CommandStatus);
2024 	}
2025 }
2026 
2027 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2028 			u16 page, unsigned char *buf,
2029 			unsigned char bufsize)
2030 {
2031 	int rc = IO_OK;
2032 	struct CommandList *c;
2033 	struct ErrorInfo *ei;
2034 
2035 	c = cmd_alloc(h);
2036 
2037 	if (c == NULL) {			/* trouble... */
2038 		dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2039 		return -ENOMEM;
2040 	}
2041 
2042 	if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2043 			page, scsi3addr, TYPE_CMD)) {
2044 		rc = -1;
2045 		goto out;
2046 	}
2047 	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2048 	ei = c->err_info;
2049 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2050 		hpsa_scsi_interpret_error(h, c);
2051 		rc = -1;
2052 	}
2053 out:
2054 	cmd_free(h, c);
2055 	return rc;
2056 }
2057 
2058 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2059 		unsigned char *scsi3addr, unsigned char page,
2060 		struct bmic_controller_parameters *buf, size_t bufsize)
2061 {
2062 	int rc = IO_OK;
2063 	struct CommandList *c;
2064 	struct ErrorInfo *ei;
2065 
2066 	c = cmd_alloc(h);
2067 	if (c == NULL) {			/* trouble... */
2068 		dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2069 		return -ENOMEM;
2070 	}
2071 
2072 	if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2073 			page, scsi3addr, TYPE_CMD)) {
2074 		rc = -1;
2075 		goto out;
2076 	}
2077 	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2078 	ei = c->err_info;
2079 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2080 		hpsa_scsi_interpret_error(h, c);
2081 		rc = -1;
2082 	}
2083 out:
2084 	cmd_free(h, c);
2085 	return rc;
2086 	}
2087 
2088 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2089 	u8 reset_type)
2090 {
2091 	int rc = IO_OK;
2092 	struct CommandList *c;
2093 	struct ErrorInfo *ei;
2094 
2095 	c = cmd_alloc(h);
2096 
2097 	if (c == NULL) {			/* trouble... */
2098 		dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2099 		return -ENOMEM;
2100 	}
2101 
2102 	/* fill_cmd can't fail here, no data buffer to map. */
2103 	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2104 			scsi3addr, TYPE_MSG);
2105 	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2106 	hpsa_scsi_do_simple_cmd_core(h, c);
2107 	/* no unmap needed here because no data xfer. */
2108 
2109 	ei = c->err_info;
2110 	if (ei->CommandStatus != 0) {
2111 		hpsa_scsi_interpret_error(h, c);
2112 		rc = -1;
2113 	}
2114 	cmd_free(h, c);
2115 	return rc;
2116 }
2117 
2118 static void hpsa_get_raid_level(struct ctlr_info *h,
2119 	unsigned char *scsi3addr, unsigned char *raid_level)
2120 {
2121 	int rc;
2122 	unsigned char *buf;
2123 
2124 	*raid_level = RAID_UNKNOWN;
2125 	buf = kzalloc(64, GFP_KERNEL);
2126 	if (!buf)
2127 		return;
2128 	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2129 	if (rc == 0)
2130 		*raid_level = buf[8];
2131 	if (*raid_level > RAID_UNKNOWN)
2132 		*raid_level = RAID_UNKNOWN;
2133 	kfree(buf);
2134 	return;
2135 }
2136 
2137 #define HPSA_MAP_DEBUG
2138 #ifdef HPSA_MAP_DEBUG
2139 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2140 				struct raid_map_data *map_buff)
2141 {
2142 	struct raid_map_disk_data *dd = &map_buff->data[0];
2143 	int map, row, col;
2144 	u16 map_cnt, row_cnt, disks_per_row;
2145 
2146 	if (rc != 0)
2147 		return;
2148 
2149 	/* Show details only if debugging has been activated. */
2150 	if (h->raid_offload_debug < 2)
2151 		return;
2152 
2153 	dev_info(&h->pdev->dev, "structure_size = %u\n",
2154 				le32_to_cpu(map_buff->structure_size));
2155 	dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2156 			le32_to_cpu(map_buff->volume_blk_size));
2157 	dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2158 			le64_to_cpu(map_buff->volume_blk_cnt));
2159 	dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2160 			map_buff->phys_blk_shift);
2161 	dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2162 			map_buff->parity_rotation_shift);
2163 	dev_info(&h->pdev->dev, "strip_size = %u\n",
2164 			le16_to_cpu(map_buff->strip_size));
2165 	dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2166 			le64_to_cpu(map_buff->disk_starting_blk));
2167 	dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2168 			le64_to_cpu(map_buff->disk_blk_cnt));
2169 	dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2170 			le16_to_cpu(map_buff->data_disks_per_row));
2171 	dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2172 			le16_to_cpu(map_buff->metadata_disks_per_row));
2173 	dev_info(&h->pdev->dev, "row_cnt = %u\n",
2174 			le16_to_cpu(map_buff->row_cnt));
2175 	dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2176 			le16_to_cpu(map_buff->layout_map_count));
2177 	dev_info(&h->pdev->dev, "flags = 0x%x\n",
2178 			le16_to_cpu(map_buff->flags));
2179 	dev_info(&h->pdev->dev, "encrypytion = %s\n",
2180 			le16_to_cpu(map_buff->flags) &
2181 			RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
2182 	dev_info(&h->pdev->dev, "dekindex = %u\n",
2183 			le16_to_cpu(map_buff->dekindex));
2184 	map_cnt = le16_to_cpu(map_buff->layout_map_count);
2185 	for (map = 0; map < map_cnt; map++) {
2186 		dev_info(&h->pdev->dev, "Map%u:\n", map);
2187 		row_cnt = le16_to_cpu(map_buff->row_cnt);
2188 		for (row = 0; row < row_cnt; row++) {
2189 			dev_info(&h->pdev->dev, "  Row%u:\n", row);
2190 			disks_per_row =
2191 				le16_to_cpu(map_buff->data_disks_per_row);
2192 			for (col = 0; col < disks_per_row; col++, dd++)
2193 				dev_info(&h->pdev->dev,
2194 					"    D%02u: h=0x%04x xor=%u,%u\n",
2195 					col, dd->ioaccel_handle,
2196 					dd->xor_mult[0], dd->xor_mult[1]);
2197 			disks_per_row =
2198 				le16_to_cpu(map_buff->metadata_disks_per_row);
2199 			for (col = 0; col < disks_per_row; col++, dd++)
2200 				dev_info(&h->pdev->dev,
2201 					"    M%02u: h=0x%04x xor=%u,%u\n",
2202 					col, dd->ioaccel_handle,
2203 					dd->xor_mult[0], dd->xor_mult[1]);
2204 		}
2205 	}
2206 }
2207 #else
2208 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2209 			__attribute__((unused)) int rc,
2210 			__attribute__((unused)) struct raid_map_data *map_buff)
2211 {
2212 }
2213 #endif
2214 
2215 static int hpsa_get_raid_map(struct ctlr_info *h,
2216 	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2217 {
2218 	int rc = 0;
2219 	struct CommandList *c;
2220 	struct ErrorInfo *ei;
2221 
2222 	c = cmd_alloc(h);
2223 	if (c == NULL) {
2224 		dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2225 		return -ENOMEM;
2226 	}
2227 	if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2228 			sizeof(this_device->raid_map), 0,
2229 			scsi3addr, TYPE_CMD)) {
2230 		dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2231 		cmd_free(h, c);
2232 		return -ENOMEM;
2233 	}
2234 	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2235 	ei = c->err_info;
2236 	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2237 		hpsa_scsi_interpret_error(h, c);
2238 		cmd_free(h, c);
2239 		return -1;
2240 	}
2241 	cmd_free(h, c);
2242 
2243 	/* @todo in the future, dynamically allocate RAID map memory */
2244 	if (le32_to_cpu(this_device->raid_map.structure_size) >
2245 				sizeof(this_device->raid_map)) {
2246 		dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2247 		rc = -1;
2248 	}
2249 	hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2250 	return rc;
2251 }
2252 
2253 static int hpsa_vpd_page_supported(struct ctlr_info *h,
2254 	unsigned char scsi3addr[], u8 page)
2255 {
2256 	int rc;
2257 	int i;
2258 	int pages;
2259 	unsigned char *buf, bufsize;
2260 
2261 	buf = kzalloc(256, GFP_KERNEL);
2262 	if (!buf)
2263 		return 0;
2264 
2265 	/* Get the size of the page list first */
2266 	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2267 				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2268 				buf, HPSA_VPD_HEADER_SZ);
2269 	if (rc != 0)
2270 		goto exit_unsupported;
2271 	pages = buf[3];
2272 	if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2273 		bufsize = pages + HPSA_VPD_HEADER_SZ;
2274 	else
2275 		bufsize = 255;
2276 
2277 	/* Get the whole VPD page list */
2278 	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2279 				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2280 				buf, bufsize);
2281 	if (rc != 0)
2282 		goto exit_unsupported;
2283 
2284 	pages = buf[3];
2285 	for (i = 1; i <= pages; i++)
2286 		if (buf[3 + i] == page)
2287 			goto exit_supported;
2288 exit_unsupported:
2289 	kfree(buf);
2290 	return 0;
2291 exit_supported:
2292 	kfree(buf);
2293 	return 1;
2294 }
2295 
2296 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2297 	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2298 {
2299 	int rc;
2300 	unsigned char *buf;
2301 	u8 ioaccel_status;
2302 
2303 	this_device->offload_config = 0;
2304 	this_device->offload_enabled = 0;
2305 
2306 	buf = kzalloc(64, GFP_KERNEL);
2307 	if (!buf)
2308 		return;
2309 	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2310 		goto out;
2311 	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2312 			VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2313 	if (rc != 0)
2314 		goto out;
2315 
2316 #define IOACCEL_STATUS_BYTE 4
2317 #define OFFLOAD_CONFIGURED_BIT 0x01
2318 #define OFFLOAD_ENABLED_BIT 0x02
2319 	ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2320 	this_device->offload_config =
2321 		!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2322 	if (this_device->offload_config) {
2323 		this_device->offload_enabled =
2324 			!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2325 		if (hpsa_get_raid_map(h, scsi3addr, this_device))
2326 			this_device->offload_enabled = 0;
2327 	}
2328 out:
2329 	kfree(buf);
2330 	return;
2331 }
2332 
2333 /* Get the device id from inquiry page 0x83 */
2334 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2335 	unsigned char *device_id, int buflen)
2336 {
2337 	int rc;
2338 	unsigned char *buf;
2339 
2340 	if (buflen > 16)
2341 		buflen = 16;
2342 	buf = kzalloc(64, GFP_KERNEL);
2343 	if (!buf)
2344 		return -ENOMEM;
2345 	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2346 	if (rc == 0)
2347 		memcpy(device_id, &buf[8], buflen);
2348 	kfree(buf);
2349 	return rc != 0;
2350 }
2351 
2352 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2353 		struct ReportLUNdata *buf, int bufsize,
2354 		int extended_response)
2355 {
2356 	int rc = IO_OK;
2357 	struct CommandList *c;
2358 	unsigned char scsi3addr[8];
2359 	struct ErrorInfo *ei;
2360 
2361 	c = cmd_alloc(h);
2362 	if (c == NULL) {			/* trouble... */
2363 		dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2364 		return -1;
2365 	}
2366 	/* address the controller */
2367 	memset(scsi3addr, 0, sizeof(scsi3addr));
2368 	if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2369 		buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2370 		rc = -1;
2371 		goto out;
2372 	}
2373 	if (extended_response)
2374 		c->Request.CDB[1] = extended_response;
2375 	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2376 	ei = c->err_info;
2377 	if (ei->CommandStatus != 0 &&
2378 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
2379 		hpsa_scsi_interpret_error(h, c);
2380 		rc = -1;
2381 	} else {
2382 		if (buf->extended_response_flag != extended_response) {
2383 			dev_err(&h->pdev->dev,
2384 				"report luns requested format %u, got %u\n",
2385 				extended_response,
2386 				buf->extended_response_flag);
2387 			rc = -1;
2388 		}
2389 	}
2390 out:
2391 	cmd_free(h, c);
2392 	return rc;
2393 }
2394 
2395 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2396 		struct ReportLUNdata *buf,
2397 		int bufsize, int extended_response)
2398 {
2399 	return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
2400 }
2401 
2402 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2403 		struct ReportLUNdata *buf, int bufsize)
2404 {
2405 	return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2406 }
2407 
2408 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2409 	int bus, int target, int lun)
2410 {
2411 	device->bus = bus;
2412 	device->target = target;
2413 	device->lun = lun;
2414 }
2415 
2416 /* Use VPD inquiry to get details of volume status */
2417 static int hpsa_get_volume_status(struct ctlr_info *h,
2418 					unsigned char scsi3addr[])
2419 {
2420 	int rc;
2421 	int status;
2422 	int size;
2423 	unsigned char *buf;
2424 
2425 	buf = kzalloc(64, GFP_KERNEL);
2426 	if (!buf)
2427 		return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2428 
2429 	/* Does controller have VPD for logical volume status? */
2430 	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2431 		goto exit_failed;
2432 
2433 	/* Get the size of the VPD return buffer */
2434 	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2435 					buf, HPSA_VPD_HEADER_SZ);
2436 	if (rc != 0)
2437 		goto exit_failed;
2438 	size = buf[3];
2439 
2440 	/* Now get the whole VPD buffer */
2441 	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2442 					buf, size + HPSA_VPD_HEADER_SZ);
2443 	if (rc != 0)
2444 		goto exit_failed;
2445 	status = buf[4]; /* status byte */
2446 
2447 	kfree(buf);
2448 	return status;
2449 exit_failed:
2450 	kfree(buf);
2451 	return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2452 }
2453 
2454 /* Determine offline status of a volume.
2455  * Return either:
2456  *  0 (not offline)
2457  *  0xff (offline for unknown reasons)
2458  *  # (integer code indicating one of several NOT READY states
2459  *     describing why a volume is to be kept offline)
2460  */
2461 static int hpsa_volume_offline(struct ctlr_info *h,
2462 					unsigned char scsi3addr[])
2463 {
2464 	struct CommandList *c;
2465 	unsigned char *sense, sense_key, asc, ascq;
2466 	int ldstat = 0;
2467 	u16 cmd_status;
2468 	u8 scsi_status;
2469 #define ASC_LUN_NOT_READY 0x04
2470 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2471 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2472 
2473 	c = cmd_alloc(h);
2474 	if (!c)
2475 		return 0;
2476 	(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2477 	hpsa_scsi_do_simple_cmd_core(h, c);
2478 	sense = c->err_info->SenseInfo;
2479 	sense_key = sense[2];
2480 	asc = sense[12];
2481 	ascq = sense[13];
2482 	cmd_status = c->err_info->CommandStatus;
2483 	scsi_status = c->err_info->ScsiStatus;
2484 	cmd_free(h, c);
2485 	/* Is the volume 'not ready'? */
2486 	if (cmd_status != CMD_TARGET_STATUS ||
2487 		scsi_status != SAM_STAT_CHECK_CONDITION ||
2488 		sense_key != NOT_READY ||
2489 		asc != ASC_LUN_NOT_READY)  {
2490 		return 0;
2491 	}
2492 
2493 	/* Determine the reason for not ready state */
2494 	ldstat = hpsa_get_volume_status(h, scsi3addr);
2495 
2496 	/* Keep volume offline in certain cases: */
2497 	switch (ldstat) {
2498 	case HPSA_LV_UNDERGOING_ERASE:
2499 	case HPSA_LV_UNDERGOING_RPI:
2500 	case HPSA_LV_PENDING_RPI:
2501 	case HPSA_LV_ENCRYPTED_NO_KEY:
2502 	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2503 	case HPSA_LV_UNDERGOING_ENCRYPTION:
2504 	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2505 	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2506 		return ldstat;
2507 	case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2508 		/* If VPD status page isn't available,
2509 		 * use ASC/ASCQ to determine state
2510 		 */
2511 		if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2512 			(ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2513 			return ldstat;
2514 		break;
2515 	default:
2516 		break;
2517 	}
2518 	return 0;
2519 }
2520 
2521 static int hpsa_update_device_info(struct ctlr_info *h,
2522 	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2523 	unsigned char *is_OBDR_device)
2524 {
2525 
2526 #define OBDR_SIG_OFFSET 43
2527 #define OBDR_TAPE_SIG "$DR-10"
2528 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2529 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2530 
2531 	unsigned char *inq_buff;
2532 	unsigned char *obdr_sig;
2533 
2534 	inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
2535 	if (!inq_buff)
2536 		goto bail_out;
2537 
2538 	/* Do an inquiry to the device to see what it is. */
2539 	if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2540 		(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2541 		/* Inquiry failed (msg printed already) */
2542 		dev_err(&h->pdev->dev,
2543 			"hpsa_update_device_info: inquiry failed\n");
2544 		goto bail_out;
2545 	}
2546 
2547 	this_device->devtype = (inq_buff[0] & 0x1f);
2548 	memcpy(this_device->scsi3addr, scsi3addr, 8);
2549 	memcpy(this_device->vendor, &inq_buff[8],
2550 		sizeof(this_device->vendor));
2551 	memcpy(this_device->model, &inq_buff[16],
2552 		sizeof(this_device->model));
2553 	memset(this_device->device_id, 0,
2554 		sizeof(this_device->device_id));
2555 	hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2556 		sizeof(this_device->device_id));
2557 
2558 	if (this_device->devtype == TYPE_DISK &&
2559 		is_logical_dev_addr_mode(scsi3addr)) {
2560 		int volume_offline;
2561 
2562 		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
2563 		if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2564 			hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2565 		volume_offline = hpsa_volume_offline(h, scsi3addr);
2566 		if (volume_offline < 0 || volume_offline > 0xff)
2567 			volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2568 		this_device->volume_offline = volume_offline & 0xff;
2569 	} else {
2570 		this_device->raid_level = RAID_UNKNOWN;
2571 		this_device->offload_config = 0;
2572 		this_device->offload_enabled = 0;
2573 		this_device->volume_offline = 0;
2574 	}
2575 
2576 	if (is_OBDR_device) {
2577 		/* See if this is a One-Button-Disaster-Recovery device
2578 		 * by looking for "$DR-10" at offset 43 in inquiry data.
2579 		 */
2580 		obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2581 		*is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2582 					strncmp(obdr_sig, OBDR_TAPE_SIG,
2583 						OBDR_SIG_LEN) == 0);
2584 	}
2585 
2586 	kfree(inq_buff);
2587 	return 0;
2588 
2589 bail_out:
2590 	kfree(inq_buff);
2591 	return 1;
2592 }
2593 
2594 static unsigned char *ext_target_model[] = {
2595 	"MSA2012",
2596 	"MSA2024",
2597 	"MSA2312",
2598 	"MSA2324",
2599 	"P2000 G3 SAS",
2600 	"MSA 2040 SAS",
2601 	NULL,
2602 };
2603 
2604 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
2605 {
2606 	int i;
2607 
2608 	for (i = 0; ext_target_model[i]; i++)
2609 		if (strncmp(device->model, ext_target_model[i],
2610 			strlen(ext_target_model[i])) == 0)
2611 			return 1;
2612 	return 0;
2613 }
2614 
2615 /* Helper function to assign bus, target, lun mapping of devices.
2616  * Puts non-external target logical volumes on bus 0, external target logical
2617  * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2618  * Logical drive target and lun are assigned at this time, but
2619  * physical device lun and target assignment are deferred (assigned
2620  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2621  */
2622 static void figure_bus_target_lun(struct ctlr_info *h,
2623 	u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
2624 {
2625 	u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
2626 
2627 	if (!is_logical_dev_addr_mode(lunaddrbytes)) {
2628 		/* physical device, target and lun filled in later */
2629 		if (is_hba_lunid(lunaddrbytes))
2630 			hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
2631 		else
2632 			/* defer target, lun assignment for physical devices */
2633 			hpsa_set_bus_target_lun(device, 2, -1, -1);
2634 		return;
2635 	}
2636 	/* It's a logical device */
2637 	if (is_ext_target(h, device)) {
2638 		/* external target way, put logicals on bus 1
2639 		 * and match target/lun numbers box
2640 		 * reports, other smart array, bus 0, target 0, match lunid
2641 		 */
2642 		hpsa_set_bus_target_lun(device,
2643 			1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
2644 		return;
2645 	}
2646 	hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
2647 }
2648 
2649 /*
2650  * If there is no lun 0 on a target, linux won't find any devices.
2651  * For the external targets (arrays), we have to manually detect the enclosure
2652  * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2653  * it for some reason.  *tmpdevice is the target we're adding,
2654  * this_device is a pointer into the current element of currentsd[]
2655  * that we're building up in update_scsi_devices(), below.
2656  * lunzerobits is a bitmap that tracks which targets already have a
2657  * lun 0 assigned.
2658  * Returns 1 if an enclosure was added, 0 if not.
2659  */
2660 static int add_ext_target_dev(struct ctlr_info *h,
2661 	struct hpsa_scsi_dev_t *tmpdevice,
2662 	struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
2663 	unsigned long lunzerobits[], int *n_ext_target_devs)
2664 {
2665 	unsigned char scsi3addr[8];
2666 
2667 	if (test_bit(tmpdevice->target, lunzerobits))
2668 		return 0; /* There is already a lun 0 on this target. */
2669 
2670 	if (!is_logical_dev_addr_mode(lunaddrbytes))
2671 		return 0; /* It's the logical targets that may lack lun 0. */
2672 
2673 	if (!is_ext_target(h, tmpdevice))
2674 		return 0; /* Only external target devices have this problem. */
2675 
2676 	if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
2677 		return 0;
2678 
2679 	memset(scsi3addr, 0, 8);
2680 	scsi3addr[3] = tmpdevice->target;
2681 	if (is_hba_lunid(scsi3addr))
2682 		return 0; /* Don't add the RAID controller here. */
2683 
2684 	if (is_scsi_rev_5(h))
2685 		return 0; /* p1210m doesn't need to do this. */
2686 
2687 	if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
2688 		dev_warn(&h->pdev->dev, "Maximum number of external "
2689 			"target devices exceeded.  Check your hardware "
2690 			"configuration.");
2691 		return 0;
2692 	}
2693 
2694 	if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
2695 		return 0;
2696 	(*n_ext_target_devs)++;
2697 	hpsa_set_bus_target_lun(this_device,
2698 				tmpdevice->bus, tmpdevice->target, 0);
2699 	set_bit(tmpdevice->target, lunzerobits);
2700 	return 1;
2701 }
2702 
2703 /*
2704  * Get address of physical disk used for an ioaccel2 mode command:
2705  *	1. Extract ioaccel2 handle from the command.
2706  *	2. Find a matching ioaccel2 handle from list of physical disks.
2707  *	3. Return:
2708  *		1 and set scsi3addr to address of matching physical
2709  *		0 if no matching physical disk was found.
2710  */
2711 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2712 	struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2713 {
2714 	struct ReportExtendedLUNdata *physicals = NULL;
2715 	int responsesize = 24;	/* size of physical extended response */
2716 	int extended = 2;	/* flag forces reporting 'other dev info'. */
2717 	int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2718 	u32 nphysicals = 0;	/* number of reported physical devs */
2719 	int found = 0;		/* found match (1) or not (0) */
2720 	u32 find;		/* handle we need to match */
2721 	int i;
2722 	struct scsi_cmnd *scmd;	/* scsi command within request being aborted */
2723 	struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2724 	struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2725 	__le32 it_nexus;	/* 4 byte device handle for the ioaccel2 cmd */
2726 	__le32 scsi_nexus;	/* 4 byte device handle for the ioaccel2 cmd */
2727 
2728 	if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2729 		return 0; /* no match */
2730 
2731 	/* point to the ioaccel2 device handle */
2732 	c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2733 	if (c2a == NULL)
2734 		return 0; /* no match */
2735 
2736 	scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2737 	if (scmd == NULL)
2738 		return 0; /* no match */
2739 
2740 	d = scmd->device->hostdata;
2741 	if (d == NULL)
2742 		return 0; /* no match */
2743 
2744 	it_nexus = cpu_to_le32(d->ioaccel_handle);
2745 	scsi_nexus = c2a->scsi_nexus;
2746 	find = le32_to_cpu(c2a->scsi_nexus);
2747 
2748 	if (h->raid_offload_debug > 0)
2749 		dev_info(&h->pdev->dev,
2750 			"%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2751 			__func__, scsi_nexus,
2752 			d->device_id[0], d->device_id[1], d->device_id[2],
2753 			d->device_id[3], d->device_id[4], d->device_id[5],
2754 			d->device_id[6], d->device_id[7], d->device_id[8],
2755 			d->device_id[9], d->device_id[10], d->device_id[11],
2756 			d->device_id[12], d->device_id[13], d->device_id[14],
2757 			d->device_id[15]);
2758 
2759 	/* Get the list of physical devices */
2760 	physicals = kzalloc(reportsize, GFP_KERNEL);
2761 	if (physicals == NULL)
2762 		return 0;
2763 	if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
2764 		reportsize, extended)) {
2765 		dev_err(&h->pdev->dev,
2766 			"Can't lookup %s device handle: report physical LUNs failed.\n",
2767 			"HP SSD Smart Path");
2768 		kfree(physicals);
2769 		return 0;
2770 	}
2771 	nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2772 							responsesize;
2773 
2774 	/* find ioaccel2 handle in list of physicals: */
2775 	for (i = 0; i < nphysicals; i++) {
2776 		struct ext_report_lun_entry *entry = &physicals->LUN[i];
2777 
2778 		/* handle is in bytes 28-31 of each lun */
2779 		if (entry->ioaccel_handle != find)
2780 			continue; /* didn't match */
2781 		found = 1;
2782 		memcpy(scsi3addr, entry->lunid, 8);
2783 		if (h->raid_offload_debug > 0)
2784 			dev_info(&h->pdev->dev,
2785 				"%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
2786 				__func__, find,
2787 				entry->ioaccel_handle, scsi3addr);
2788 		break; /* found it */
2789 	}
2790 
2791 	kfree(physicals);
2792 	if (found)
2793 		return 1;
2794 	else
2795 		return 0;
2796 
2797 }
2798 /*
2799  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
2800  * logdev.  The number of luns in physdev and logdev are returned in
2801  * *nphysicals and *nlogicals, respectively.
2802  * Returns 0 on success, -1 otherwise.
2803  */
2804 static int hpsa_gather_lun_info(struct ctlr_info *h,
2805 	int reportphyslunsize, int reportloglunsize,
2806 	struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
2807 	struct ReportLUNdata *logdev, u32 *nlogicals)
2808 {
2809 	int physical_entry_size = 8;
2810 
2811 	*physical_mode = 0;
2812 
2813 	/* For I/O accelerator mode we need to read physical device handles */
2814 	if (h->transMethod & CFGTBL_Trans_io_accel1 ||
2815 		h->transMethod & CFGTBL_Trans_io_accel2) {
2816 		*physical_mode = HPSA_REPORT_PHYS_EXTENDED;
2817 		physical_entry_size = 24;
2818 	}
2819 	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportphyslunsize,
2820 							*physical_mode)) {
2821 		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2822 		return -1;
2823 	}
2824 	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
2825 							physical_entry_size;
2826 	if (*nphysicals > HPSA_MAX_PHYS_LUN) {
2827 		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
2828 			"  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2829 			*nphysicals - HPSA_MAX_PHYS_LUN);
2830 		*nphysicals = HPSA_MAX_PHYS_LUN;
2831 	}
2832 	if (hpsa_scsi_do_report_log_luns(h, logdev, reportloglunsize)) {
2833 		dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2834 		return -1;
2835 	}
2836 	*nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
2837 	/* Reject Logicals in excess of our max capability. */
2838 	if (*nlogicals > HPSA_MAX_LUN) {
2839 		dev_warn(&h->pdev->dev,
2840 			"maximum logical LUNs (%d) exceeded.  "
2841 			"%d LUNs ignored.\n", HPSA_MAX_LUN,
2842 			*nlogicals - HPSA_MAX_LUN);
2843 			*nlogicals = HPSA_MAX_LUN;
2844 	}
2845 	if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
2846 		dev_warn(&h->pdev->dev,
2847 			"maximum logical + physical LUNs (%d) exceeded. "
2848 			"%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2849 			*nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
2850 		*nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
2851 	}
2852 	return 0;
2853 }
2854 
2855 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
2856 	int i, int nphysicals, int nlogicals,
2857 	struct ReportExtendedLUNdata *physdev_list,
2858 	struct ReportLUNdata *logdev_list)
2859 {
2860 	/* Helper function, figure out where the LUN ID info is coming from
2861 	 * given index i, lists of physical and logical devices, where in
2862 	 * the list the raid controller is supposed to appear (first or last)
2863 	 */
2864 
2865 	int logicals_start = nphysicals + (raid_ctlr_position == 0);
2866 	int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
2867 
2868 	if (i == raid_ctlr_position)
2869 		return RAID_CTLR_LUNID;
2870 
2871 	if (i < logicals_start)
2872 		return &physdev_list->LUN[i -
2873 				(raid_ctlr_position == 0)].lunid[0];
2874 
2875 	if (i < last_device)
2876 		return &logdev_list->LUN[i - nphysicals -
2877 			(raid_ctlr_position == 0)][0];
2878 	BUG();
2879 	return NULL;
2880 }
2881 
2882 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
2883 {
2884 	int rc;
2885 	int hba_mode_enabled;
2886 	struct bmic_controller_parameters *ctlr_params;
2887 	ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
2888 		GFP_KERNEL);
2889 
2890 	if (!ctlr_params)
2891 		return -ENOMEM;
2892 	rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
2893 		sizeof(struct bmic_controller_parameters));
2894 	if (rc) {
2895 		kfree(ctlr_params);
2896 		return rc;
2897 	}
2898 
2899 	hba_mode_enabled =
2900 		((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
2901 	kfree(ctlr_params);
2902 	return hba_mode_enabled;
2903 }
2904 
2905 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
2906 {
2907 	/* the idea here is we could get notified
2908 	 * that some devices have changed, so we do a report
2909 	 * physical luns and report logical luns cmd, and adjust
2910 	 * our list of devices accordingly.
2911 	 *
2912 	 * The scsi3addr's of devices won't change so long as the
2913 	 * adapter is not reset.  That means we can rescan and
2914 	 * tell which devices we already know about, vs. new
2915 	 * devices, vs.  disappearing devices.
2916 	 */
2917 	struct ReportExtendedLUNdata *physdev_list = NULL;
2918 	struct ReportLUNdata *logdev_list = NULL;
2919 	u32 nphysicals = 0;
2920 	u32 nlogicals = 0;
2921 	int physical_mode = 0;
2922 	u32 ndev_allocated = 0;
2923 	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
2924 	int ncurrent = 0;
2925 	int i, n_ext_target_devs, ndevs_to_allocate;
2926 	int raid_ctlr_position;
2927 	int rescan_hba_mode;
2928 	DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
2929 
2930 	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
2931 	physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
2932 	logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
2933 	tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
2934 
2935 	if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
2936 		dev_err(&h->pdev->dev, "out of memory\n");
2937 		goto out;
2938 	}
2939 	memset(lunzerobits, 0, sizeof(lunzerobits));
2940 
2941 	rescan_hba_mode = hpsa_hba_mode_enabled(h);
2942 	if (rescan_hba_mode < 0)
2943 		goto out;
2944 
2945 	if (!h->hba_mode_enabled && rescan_hba_mode)
2946 		dev_warn(&h->pdev->dev, "HBA mode enabled\n");
2947 	else if (h->hba_mode_enabled && !rescan_hba_mode)
2948 		dev_warn(&h->pdev->dev, "HBA mode disabled\n");
2949 
2950 	h->hba_mode_enabled = rescan_hba_mode;
2951 
2952 	if (hpsa_gather_lun_info(h,
2953 			sizeof(*physdev_list), sizeof(*logdev_list),
2954 			(struct ReportLUNdata *) physdev_list, &nphysicals,
2955 			&physical_mode, logdev_list, &nlogicals))
2956 		goto out;
2957 
2958 	/* We might see up to the maximum number of logical and physical disks
2959 	 * plus external target devices, and a device for the local RAID
2960 	 * controller.
2961 	 */
2962 	ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
2963 
2964 	/* Allocate the per device structures */
2965 	for (i = 0; i < ndevs_to_allocate; i++) {
2966 		if (i >= HPSA_MAX_DEVICES) {
2967 			dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
2968 				"  %d devices ignored.\n", HPSA_MAX_DEVICES,
2969 				ndevs_to_allocate - HPSA_MAX_DEVICES);
2970 			break;
2971 		}
2972 
2973 		currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
2974 		if (!currentsd[i]) {
2975 			dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
2976 				__FILE__, __LINE__);
2977 			goto out;
2978 		}
2979 		ndev_allocated++;
2980 	}
2981 
2982 	if (is_scsi_rev_5(h))
2983 		raid_ctlr_position = 0;
2984 	else
2985 		raid_ctlr_position = nphysicals + nlogicals;
2986 
2987 	/* adjust our table of devices */
2988 	n_ext_target_devs = 0;
2989 	for (i = 0; i < nphysicals + nlogicals + 1; i++) {
2990 		u8 *lunaddrbytes, is_OBDR = 0;
2991 
2992 		/* Figure out where the LUN ID info is coming from */
2993 		lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
2994 			i, nphysicals, nlogicals, physdev_list, logdev_list);
2995 		/* skip masked physical devices. */
2996 		if (lunaddrbytes[3] & 0xC0 &&
2997 			i < nphysicals + (raid_ctlr_position == 0))
2998 			continue;
2999 
3000 		/* Get device type, vendor, model, device id */
3001 		if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3002 							&is_OBDR))
3003 			continue; /* skip it if we can't talk to it. */
3004 		figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3005 		this_device = currentsd[ncurrent];
3006 
3007 		/*
3008 		 * For external target devices, we have to insert a LUN 0 which
3009 		 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3010 		 * is nonetheless an enclosure device there.  We have to
3011 		 * present that otherwise linux won't find anything if
3012 		 * there is no lun 0.
3013 		 */
3014 		if (add_ext_target_dev(h, tmpdevice, this_device,
3015 				lunaddrbytes, lunzerobits,
3016 				&n_ext_target_devs)) {
3017 			ncurrent++;
3018 			this_device = currentsd[ncurrent];
3019 		}
3020 
3021 		*this_device = *tmpdevice;
3022 
3023 		switch (this_device->devtype) {
3024 		case TYPE_ROM:
3025 			/* We don't *really* support actual CD-ROM devices,
3026 			 * just "One Button Disaster Recovery" tape drive
3027 			 * which temporarily pretends to be a CD-ROM drive.
3028 			 * So we check that the device is really an OBDR tape
3029 			 * device by checking for "$DR-10" in bytes 43-48 of
3030 			 * the inquiry data.
3031 			 */
3032 			if (is_OBDR)
3033 				ncurrent++;
3034 			break;
3035 		case TYPE_DISK:
3036 			if (h->hba_mode_enabled) {
3037 				/* never use raid mapper in HBA mode */
3038 				this_device->offload_enabled = 0;
3039 				ncurrent++;
3040 				break;
3041 			} else if (h->acciopath_status) {
3042 				if (i >= nphysicals) {
3043 					ncurrent++;
3044 					break;
3045 				}
3046 			} else {
3047 				if (i < nphysicals)
3048 					break;
3049 				ncurrent++;
3050 				break;
3051 			}
3052 			if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
3053 				memcpy(&this_device->ioaccel_handle,
3054 					&lunaddrbytes[20],
3055 					sizeof(this_device->ioaccel_handle));
3056 				ncurrent++;
3057 			}
3058 			break;
3059 		case TYPE_TAPE:
3060 		case TYPE_MEDIUM_CHANGER:
3061 			ncurrent++;
3062 			break;
3063 		case TYPE_RAID:
3064 			/* Only present the Smartarray HBA as a RAID controller.
3065 			 * If it's a RAID controller other than the HBA itself
3066 			 * (an external RAID controller, MSA500 or similar)
3067 			 * don't present it.
3068 			 */
3069 			if (!is_hba_lunid(lunaddrbytes))
3070 				break;
3071 			ncurrent++;
3072 			break;
3073 		default:
3074 			break;
3075 		}
3076 		if (ncurrent >= HPSA_MAX_DEVICES)
3077 			break;
3078 	}
3079 	adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3080 out:
3081 	kfree(tmpdevice);
3082 	for (i = 0; i < ndev_allocated; i++)
3083 		kfree(currentsd[i]);
3084 	kfree(currentsd);
3085 	kfree(physdev_list);
3086 	kfree(logdev_list);
3087 }
3088 
3089 /*
3090  * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3091  * dma mapping  and fills in the scatter gather entries of the
3092  * hpsa command, cp.
3093  */
3094 static int hpsa_scatter_gather(struct ctlr_info *h,
3095 		struct CommandList *cp,
3096 		struct scsi_cmnd *cmd)
3097 {
3098 	unsigned int len;
3099 	struct scatterlist *sg;
3100 	u64 addr64;
3101 	int use_sg, i, sg_index, chained;
3102 	struct SGDescriptor *curr_sg;
3103 
3104 	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3105 
3106 	use_sg = scsi_dma_map(cmd);
3107 	if (use_sg < 0)
3108 		return use_sg;
3109 
3110 	if (!use_sg)
3111 		goto sglist_finished;
3112 
3113 	curr_sg = cp->SG;
3114 	chained = 0;
3115 	sg_index = 0;
3116 	scsi_for_each_sg(cmd, sg, use_sg, i) {
3117 		if (i == h->max_cmd_sg_entries - 1 &&
3118 			use_sg > h->max_cmd_sg_entries) {
3119 			chained = 1;
3120 			curr_sg = h->cmd_sg_list[cp->cmdindex];
3121 			sg_index = 0;
3122 		}
3123 		addr64 = (u64) sg_dma_address(sg);
3124 		len  = sg_dma_len(sg);
3125 		curr_sg->Addr = cpu_to_le64(addr64);
3126 		curr_sg->Len = cpu_to_le32(len);
3127 		curr_sg->Ext = cpu_to_le32(0);
3128 		curr_sg++;
3129 	}
3130 	(--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3131 
3132 	if (use_sg + chained > h->maxSG)
3133 		h->maxSG = use_sg + chained;
3134 
3135 	if (chained) {
3136 		cp->Header.SGList = h->max_cmd_sg_entries;
3137 		cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3138 		if (hpsa_map_sg_chain_block(h, cp)) {
3139 			scsi_dma_unmap(cmd);
3140 			return -1;
3141 		}
3142 		return 0;
3143 	}
3144 
3145 sglist_finished:
3146 
3147 	cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
3148 	cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
3149 	return 0;
3150 }
3151 
3152 #define IO_ACCEL_INELIGIBLE (1)
3153 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3154 {
3155 	int is_write = 0;
3156 	u32 block;
3157 	u32 block_cnt;
3158 
3159 	/* Perform some CDB fixups if needed using 10 byte reads/writes only */
3160 	switch (cdb[0]) {
3161 	case WRITE_6:
3162 	case WRITE_12:
3163 		is_write = 1;
3164 	case READ_6:
3165 	case READ_12:
3166 		if (*cdb_len == 6) {
3167 			block = (((u32) cdb[2]) << 8) | cdb[3];
3168 			block_cnt = cdb[4];
3169 		} else {
3170 			BUG_ON(*cdb_len != 12);
3171 			block = (((u32) cdb[2]) << 24) |
3172 				(((u32) cdb[3]) << 16) |
3173 				(((u32) cdb[4]) << 8) |
3174 				cdb[5];
3175 			block_cnt =
3176 				(((u32) cdb[6]) << 24) |
3177 				(((u32) cdb[7]) << 16) |
3178 				(((u32) cdb[8]) << 8) |
3179 				cdb[9];
3180 		}
3181 		if (block_cnt > 0xffff)
3182 			return IO_ACCEL_INELIGIBLE;
3183 
3184 		cdb[0] = is_write ? WRITE_10 : READ_10;
3185 		cdb[1] = 0;
3186 		cdb[2] = (u8) (block >> 24);
3187 		cdb[3] = (u8) (block >> 16);
3188 		cdb[4] = (u8) (block >> 8);
3189 		cdb[5] = (u8) (block);
3190 		cdb[6] = 0;
3191 		cdb[7] = (u8) (block_cnt >> 8);
3192 		cdb[8] = (u8) (block_cnt);
3193 		cdb[9] = 0;
3194 		*cdb_len = 10;
3195 		break;
3196 	}
3197 	return 0;
3198 }
3199 
3200 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3201 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3202 	u8 *scsi3addr)
3203 {
3204 	struct scsi_cmnd *cmd = c->scsi_cmd;
3205 	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3206 	unsigned int len;
3207 	unsigned int total_len = 0;
3208 	struct scatterlist *sg;
3209 	u64 addr64;
3210 	int use_sg, i;
3211 	struct SGDescriptor *curr_sg;
3212 	u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3213 
3214 	/* TODO: implement chaining support */
3215 	if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3216 		return IO_ACCEL_INELIGIBLE;
3217 
3218 	BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3219 
3220 	if (fixup_ioaccel_cdb(cdb, &cdb_len))
3221 		return IO_ACCEL_INELIGIBLE;
3222 
3223 	c->cmd_type = CMD_IOACCEL1;
3224 
3225 	/* Adjust the DMA address to point to the accelerated command buffer */
3226 	c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3227 				(c->cmdindex * sizeof(*cp));
3228 	BUG_ON(c->busaddr & 0x0000007F);
3229 
3230 	use_sg = scsi_dma_map(cmd);
3231 	if (use_sg < 0)
3232 		return use_sg;
3233 
3234 	if (use_sg) {
3235 		curr_sg = cp->SG;
3236 		scsi_for_each_sg(cmd, sg, use_sg, i) {
3237 			addr64 = (u64) sg_dma_address(sg);
3238 			len  = sg_dma_len(sg);
3239 			total_len += len;
3240 			curr_sg->Addr = cpu_to_le64(addr64);
3241 			curr_sg->Len = cpu_to_le32(len);
3242 			curr_sg->Ext = cpu_to_le32(0);
3243 			curr_sg++;
3244 		}
3245 		(--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3246 
3247 		switch (cmd->sc_data_direction) {
3248 		case DMA_TO_DEVICE:
3249 			control |= IOACCEL1_CONTROL_DATA_OUT;
3250 			break;
3251 		case DMA_FROM_DEVICE:
3252 			control |= IOACCEL1_CONTROL_DATA_IN;
3253 			break;
3254 		case DMA_NONE:
3255 			control |= IOACCEL1_CONTROL_NODATAXFER;
3256 			break;
3257 		default:
3258 			dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3259 			cmd->sc_data_direction);
3260 			BUG();
3261 			break;
3262 		}
3263 	} else {
3264 		control |= IOACCEL1_CONTROL_NODATAXFER;
3265 	}
3266 
3267 	c->Header.SGList = use_sg;
3268 	/* Fill out the command structure to submit */
3269 	cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3270 	cp->transfer_len = cpu_to_le32(total_len);
3271 	cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3272 			(cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
3273 	cp->control = cpu_to_le32(control);
3274 	memcpy(cp->CDB, cdb, cdb_len);
3275 	memcpy(cp->CISS_LUN, scsi3addr, 8);
3276 	/* Tag was already set at init time. */
3277 	enqueue_cmd_and_start_io(h, c);
3278 	return 0;
3279 }
3280 
3281 /*
3282  * Queue a command directly to a device behind the controller using the
3283  * I/O accelerator path.
3284  */
3285 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3286 	struct CommandList *c)
3287 {
3288 	struct scsi_cmnd *cmd = c->scsi_cmd;
3289 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3290 
3291 	return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3292 		cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
3293 }
3294 
3295 /*
3296  * Set encryption parameters for the ioaccel2 request
3297  */
3298 static void set_encrypt_ioaccel2(struct ctlr_info *h,
3299 	struct CommandList *c, struct io_accel2_cmd *cp)
3300 {
3301 	struct scsi_cmnd *cmd = c->scsi_cmd;
3302 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3303 	struct raid_map_data *map = &dev->raid_map;
3304 	u64 first_block;
3305 
3306 	BUG_ON(!(dev->offload_config && dev->offload_enabled));
3307 
3308 	/* Are we doing encryption on this device */
3309 	if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
3310 		return;
3311 	/* Set the data encryption key index. */
3312 	cp->dekindex = map->dekindex;
3313 
3314 	/* Set the encryption enable flag, encoded into direction field. */
3315 	cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3316 
3317 	/* Set encryption tweak values based on logical block address
3318 	 * If block size is 512, tweak value is LBA.
3319 	 * For other block sizes, tweak is (LBA * block size)/ 512)
3320 	 */
3321 	switch (cmd->cmnd[0]) {
3322 	/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3323 	case WRITE_6:
3324 	case READ_6:
3325 		first_block = get_unaligned_be16(&cmd->cmnd[2]);
3326 		break;
3327 	case WRITE_10:
3328 	case READ_10:
3329 	/* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3330 	case WRITE_12:
3331 	case READ_12:
3332 		first_block = get_unaligned_be32(&cmd->cmnd[2]);
3333 		break;
3334 	case WRITE_16:
3335 	case READ_16:
3336 		first_block = get_unaligned_be64(&cmd->cmnd[2]);
3337 		break;
3338 	default:
3339 		dev_err(&h->pdev->dev,
3340 			"ERROR: %s: size (0x%x) not supported for encryption\n",
3341 			__func__, cmd->cmnd[0]);
3342 		BUG();
3343 		break;
3344 	}
3345 
3346 	if (le32_to_cpu(map->volume_blk_size) != 512)
3347 		first_block = first_block *
3348 				le32_to_cpu(map->volume_blk_size)/512;
3349 
3350 	cp->tweak_lower = cpu_to_le32(first_block);
3351 	cp->tweak_upper = cpu_to_le32(first_block >> 32);
3352 }
3353 
3354 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3355 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3356 	u8 *scsi3addr)
3357 {
3358 	struct scsi_cmnd *cmd = c->scsi_cmd;
3359 	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3360 	struct ioaccel2_sg_element *curr_sg;
3361 	int use_sg, i;
3362 	struct scatterlist *sg;
3363 	u64 addr64;
3364 	u32 len;
3365 	u32 total_len = 0;
3366 
3367 	if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3368 		return IO_ACCEL_INELIGIBLE;
3369 
3370 	if (fixup_ioaccel_cdb(cdb, &cdb_len))
3371 		return IO_ACCEL_INELIGIBLE;
3372 	c->cmd_type = CMD_IOACCEL2;
3373 	/* Adjust the DMA address to point to the accelerated command buffer */
3374 	c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3375 				(c->cmdindex * sizeof(*cp));
3376 	BUG_ON(c->busaddr & 0x0000007F);
3377 
3378 	memset(cp, 0, sizeof(*cp));
3379 	cp->IU_type = IOACCEL2_IU_TYPE;
3380 
3381 	use_sg = scsi_dma_map(cmd);
3382 	if (use_sg < 0)
3383 		return use_sg;
3384 
3385 	if (use_sg) {
3386 		BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3387 		curr_sg = cp->sg;
3388 		scsi_for_each_sg(cmd, sg, use_sg, i) {
3389 			addr64 = (u64) sg_dma_address(sg);
3390 			len  = sg_dma_len(sg);
3391 			total_len += len;
3392 			curr_sg->address = cpu_to_le64(addr64);
3393 			curr_sg->length = cpu_to_le32(len);
3394 			curr_sg->reserved[0] = 0;
3395 			curr_sg->reserved[1] = 0;
3396 			curr_sg->reserved[2] = 0;
3397 			curr_sg->chain_indicator = 0;
3398 			curr_sg++;
3399 		}
3400 
3401 		switch (cmd->sc_data_direction) {
3402 		case DMA_TO_DEVICE:
3403 			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3404 			cp->direction |= IOACCEL2_DIR_DATA_OUT;
3405 			break;
3406 		case DMA_FROM_DEVICE:
3407 			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3408 			cp->direction |= IOACCEL2_DIR_DATA_IN;
3409 			break;
3410 		case DMA_NONE:
3411 			cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3412 			cp->direction |= IOACCEL2_DIR_NO_DATA;
3413 			break;
3414 		default:
3415 			dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3416 				cmd->sc_data_direction);
3417 			BUG();
3418 			break;
3419 		}
3420 	} else {
3421 		cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3422 		cp->direction |= IOACCEL2_DIR_NO_DATA;
3423 	}
3424 
3425 	/* Set encryption parameters, if necessary */
3426 	set_encrypt_ioaccel2(h, c, cp);
3427 
3428 	cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
3429 	cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
3430 	memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3431 
3432 	/* fill in sg elements */
3433 	cp->sg_count = (u8) use_sg;
3434 
3435 	cp->data_len = cpu_to_le32(total_len);
3436 	cp->err_ptr = cpu_to_le64(c->busaddr +
3437 			offsetof(struct io_accel2_cmd, error_data));
3438 	cp->err_len = cpu_to_le32(sizeof(cp->error_data));
3439 
3440 	enqueue_cmd_and_start_io(h, c);
3441 	return 0;
3442 }
3443 
3444 /*
3445  * Queue a command to the correct I/O accelerator path.
3446  */
3447 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3448 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3449 	u8 *scsi3addr)
3450 {
3451 	if (h->transMethod & CFGTBL_Trans_io_accel1)
3452 		return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3453 						cdb, cdb_len, scsi3addr);
3454 	else
3455 		return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3456 						cdb, cdb_len, scsi3addr);
3457 }
3458 
3459 static void raid_map_helper(struct raid_map_data *map,
3460 		int offload_to_mirror, u32 *map_index, u32 *current_group)
3461 {
3462 	if (offload_to_mirror == 0)  {
3463 		/* use physical disk in the first mirrored group. */
3464 		*map_index %= le16_to_cpu(map->data_disks_per_row);
3465 		return;
3466 	}
3467 	do {
3468 		/* determine mirror group that *map_index indicates */
3469 		*current_group = *map_index /
3470 			le16_to_cpu(map->data_disks_per_row);
3471 		if (offload_to_mirror == *current_group)
3472 			continue;
3473 		if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
3474 			/* select map index from next group */
3475 			*map_index += le16_to_cpu(map->data_disks_per_row);
3476 			(*current_group)++;
3477 		} else {
3478 			/* select map index from first group */
3479 			*map_index %= le16_to_cpu(map->data_disks_per_row);
3480 			*current_group = 0;
3481 		}
3482 	} while (offload_to_mirror != *current_group);
3483 }
3484 
3485 /*
3486  * Attempt to perform offload RAID mapping for a logical volume I/O.
3487  */
3488 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3489 	struct CommandList *c)
3490 {
3491 	struct scsi_cmnd *cmd = c->scsi_cmd;
3492 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3493 	struct raid_map_data *map = &dev->raid_map;
3494 	struct raid_map_disk_data *dd = &map->data[0];
3495 	int is_write = 0;
3496 	u32 map_index;
3497 	u64 first_block, last_block;
3498 	u32 block_cnt;
3499 	u32 blocks_per_row;
3500 	u64 first_row, last_row;
3501 	u32 first_row_offset, last_row_offset;
3502 	u32 first_column, last_column;
3503 	u64 r0_first_row, r0_last_row;
3504 	u32 r5or6_blocks_per_row;
3505 	u64 r5or6_first_row, r5or6_last_row;
3506 	u32 r5or6_first_row_offset, r5or6_last_row_offset;
3507 	u32 r5or6_first_column, r5or6_last_column;
3508 	u32 total_disks_per_row;
3509 	u32 stripesize;
3510 	u32 first_group, last_group, current_group;
3511 	u32 map_row;
3512 	u32 disk_handle;
3513 	u64 disk_block;
3514 	u32 disk_block_cnt;
3515 	u8 cdb[16];
3516 	u8 cdb_len;
3517 	u16 strip_size;
3518 #if BITS_PER_LONG == 32
3519 	u64 tmpdiv;
3520 #endif
3521 	int offload_to_mirror;
3522 
3523 	BUG_ON(!(dev->offload_config && dev->offload_enabled));
3524 
3525 	/* check for valid opcode, get LBA and block count */
3526 	switch (cmd->cmnd[0]) {
3527 	case WRITE_6:
3528 		is_write = 1;
3529 	case READ_6:
3530 		first_block =
3531 			(((u64) cmd->cmnd[2]) << 8) |
3532 			cmd->cmnd[3];
3533 		block_cnt = cmd->cmnd[4];
3534 		if (block_cnt == 0)
3535 			block_cnt = 256;
3536 		break;
3537 	case WRITE_10:
3538 		is_write = 1;
3539 	case READ_10:
3540 		first_block =
3541 			(((u64) cmd->cmnd[2]) << 24) |
3542 			(((u64) cmd->cmnd[3]) << 16) |
3543 			(((u64) cmd->cmnd[4]) << 8) |
3544 			cmd->cmnd[5];
3545 		block_cnt =
3546 			(((u32) cmd->cmnd[7]) << 8) |
3547 			cmd->cmnd[8];
3548 		break;
3549 	case WRITE_12:
3550 		is_write = 1;
3551 	case READ_12:
3552 		first_block =
3553 			(((u64) cmd->cmnd[2]) << 24) |
3554 			(((u64) cmd->cmnd[3]) << 16) |
3555 			(((u64) cmd->cmnd[4]) << 8) |
3556 			cmd->cmnd[5];
3557 		block_cnt =
3558 			(((u32) cmd->cmnd[6]) << 24) |
3559 			(((u32) cmd->cmnd[7]) << 16) |
3560 			(((u32) cmd->cmnd[8]) << 8) |
3561 		cmd->cmnd[9];
3562 		break;
3563 	case WRITE_16:
3564 		is_write = 1;
3565 	case READ_16:
3566 		first_block =
3567 			(((u64) cmd->cmnd[2]) << 56) |
3568 			(((u64) cmd->cmnd[3]) << 48) |
3569 			(((u64) cmd->cmnd[4]) << 40) |
3570 			(((u64) cmd->cmnd[5]) << 32) |
3571 			(((u64) cmd->cmnd[6]) << 24) |
3572 			(((u64) cmd->cmnd[7]) << 16) |
3573 			(((u64) cmd->cmnd[8]) << 8) |
3574 			cmd->cmnd[9];
3575 		block_cnt =
3576 			(((u32) cmd->cmnd[10]) << 24) |
3577 			(((u32) cmd->cmnd[11]) << 16) |
3578 			(((u32) cmd->cmnd[12]) << 8) |
3579 			cmd->cmnd[13];
3580 		break;
3581 	default:
3582 		return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3583 	}
3584 	last_block = first_block + block_cnt - 1;
3585 
3586 	/* check for write to non-RAID-0 */
3587 	if (is_write && dev->raid_level != 0)
3588 		return IO_ACCEL_INELIGIBLE;
3589 
3590 	/* check for invalid block or wraparound */
3591 	if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
3592 		last_block < first_block)
3593 		return IO_ACCEL_INELIGIBLE;
3594 
3595 	/* calculate stripe information for the request */
3596 	blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
3597 				le16_to_cpu(map->strip_size);
3598 	strip_size = le16_to_cpu(map->strip_size);
3599 #if BITS_PER_LONG == 32
3600 	tmpdiv = first_block;
3601 	(void) do_div(tmpdiv, blocks_per_row);
3602 	first_row = tmpdiv;
3603 	tmpdiv = last_block;
3604 	(void) do_div(tmpdiv, blocks_per_row);
3605 	last_row = tmpdiv;
3606 	first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3607 	last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3608 	tmpdiv = first_row_offset;
3609 	(void) do_div(tmpdiv, strip_size);
3610 	first_column = tmpdiv;
3611 	tmpdiv = last_row_offset;
3612 	(void) do_div(tmpdiv, strip_size);
3613 	last_column = tmpdiv;
3614 #else
3615 	first_row = first_block / blocks_per_row;
3616 	last_row = last_block / blocks_per_row;
3617 	first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3618 	last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3619 	first_column = first_row_offset / strip_size;
3620 	last_column = last_row_offset / strip_size;
3621 #endif
3622 
3623 	/* if this isn't a single row/column then give to the controller */
3624 	if ((first_row != last_row) || (first_column != last_column))
3625 		return IO_ACCEL_INELIGIBLE;
3626 
3627 	/* proceeding with driver mapping */
3628 	total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
3629 				le16_to_cpu(map->metadata_disks_per_row);
3630 	map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3631 				le16_to_cpu(map->row_cnt);
3632 	map_index = (map_row * total_disks_per_row) + first_column;
3633 
3634 	switch (dev->raid_level) {
3635 	case HPSA_RAID_0:
3636 		break; /* nothing special to do */
3637 	case HPSA_RAID_1:
3638 		/* Handles load balance across RAID 1 members.
3639 		 * (2-drive R1 and R10 with even # of drives.)
3640 		 * Appropriate for SSDs, not optimal for HDDs
3641 		 */
3642 		BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
3643 		if (dev->offload_to_mirror)
3644 			map_index += le16_to_cpu(map->data_disks_per_row);
3645 		dev->offload_to_mirror = !dev->offload_to_mirror;
3646 		break;
3647 	case HPSA_RAID_ADM:
3648 		/* Handles N-way mirrors  (R1-ADM)
3649 		 * and R10 with # of drives divisible by 3.)
3650 		 */
3651 		BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
3652 
3653 		offload_to_mirror = dev->offload_to_mirror;
3654 		raid_map_helper(map, offload_to_mirror,
3655 				&map_index, &current_group);
3656 		/* set mirror group to use next time */
3657 		offload_to_mirror =
3658 			(offload_to_mirror >=
3659 			le16_to_cpu(map->layout_map_count) - 1)
3660 			? 0 : offload_to_mirror + 1;
3661 		dev->offload_to_mirror = offload_to_mirror;
3662 		/* Avoid direct use of dev->offload_to_mirror within this
3663 		 * function since multiple threads might simultaneously
3664 		 * increment it beyond the range of dev->layout_map_count -1.
3665 		 */
3666 		break;
3667 	case HPSA_RAID_5:
3668 	case HPSA_RAID_6:
3669 		if (le16_to_cpu(map->layout_map_count) <= 1)
3670 			break;
3671 
3672 		/* Verify first and last block are in same RAID group */
3673 		r5or6_blocks_per_row =
3674 			le16_to_cpu(map->strip_size) *
3675 			le16_to_cpu(map->data_disks_per_row);
3676 		BUG_ON(r5or6_blocks_per_row == 0);
3677 		stripesize = r5or6_blocks_per_row *
3678 			le16_to_cpu(map->layout_map_count);
3679 #if BITS_PER_LONG == 32
3680 		tmpdiv = first_block;
3681 		first_group = do_div(tmpdiv, stripesize);
3682 		tmpdiv = first_group;
3683 		(void) do_div(tmpdiv, r5or6_blocks_per_row);
3684 		first_group = tmpdiv;
3685 		tmpdiv = last_block;
3686 		last_group = do_div(tmpdiv, stripesize);
3687 		tmpdiv = last_group;
3688 		(void) do_div(tmpdiv, r5or6_blocks_per_row);
3689 		last_group = tmpdiv;
3690 #else
3691 		first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3692 		last_group = (last_block % stripesize) / r5or6_blocks_per_row;
3693 #endif
3694 		if (first_group != last_group)
3695 			return IO_ACCEL_INELIGIBLE;
3696 
3697 		/* Verify request is in a single row of RAID 5/6 */
3698 #if BITS_PER_LONG == 32
3699 		tmpdiv = first_block;
3700 		(void) do_div(tmpdiv, stripesize);
3701 		first_row = r5or6_first_row = r0_first_row = tmpdiv;
3702 		tmpdiv = last_block;
3703 		(void) do_div(tmpdiv, stripesize);
3704 		r5or6_last_row = r0_last_row = tmpdiv;
3705 #else
3706 		first_row = r5or6_first_row = r0_first_row =
3707 						first_block / stripesize;
3708 		r5or6_last_row = r0_last_row = last_block / stripesize;
3709 #endif
3710 		if (r5or6_first_row != r5or6_last_row)
3711 			return IO_ACCEL_INELIGIBLE;
3712 
3713 
3714 		/* Verify request is in a single column */
3715 #if BITS_PER_LONG == 32
3716 		tmpdiv = first_block;
3717 		first_row_offset = do_div(tmpdiv, stripesize);
3718 		tmpdiv = first_row_offset;
3719 		first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3720 		r5or6_first_row_offset = first_row_offset;
3721 		tmpdiv = last_block;
3722 		r5or6_last_row_offset = do_div(tmpdiv, stripesize);
3723 		tmpdiv = r5or6_last_row_offset;
3724 		r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
3725 		tmpdiv = r5or6_first_row_offset;
3726 		(void) do_div(tmpdiv, map->strip_size);
3727 		first_column = r5or6_first_column = tmpdiv;
3728 		tmpdiv = r5or6_last_row_offset;
3729 		(void) do_div(tmpdiv, map->strip_size);
3730 		r5or6_last_column = tmpdiv;
3731 #else
3732 		first_row_offset = r5or6_first_row_offset =
3733 			(u32)((first_block % stripesize) %
3734 						r5or6_blocks_per_row);
3735 
3736 		r5or6_last_row_offset =
3737 			(u32)((last_block % stripesize) %
3738 						r5or6_blocks_per_row);
3739 
3740 		first_column = r5or6_first_column =
3741 			r5or6_first_row_offset / le16_to_cpu(map->strip_size);
3742 		r5or6_last_column =
3743 			r5or6_last_row_offset / le16_to_cpu(map->strip_size);
3744 #endif
3745 		if (r5or6_first_column != r5or6_last_column)
3746 			return IO_ACCEL_INELIGIBLE;
3747 
3748 		/* Request is eligible */
3749 		map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3750 			le16_to_cpu(map->row_cnt);
3751 
3752 		map_index = (first_group *
3753 			(le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
3754 			(map_row * total_disks_per_row) + first_column;
3755 		break;
3756 	default:
3757 		return IO_ACCEL_INELIGIBLE;
3758 	}
3759 
3760 	disk_handle = dd[map_index].ioaccel_handle;
3761 	disk_block = le64_to_cpu(map->disk_starting_blk) +
3762 			first_row * le16_to_cpu(map->strip_size) +
3763 			(first_row_offset - first_column *
3764 			le16_to_cpu(map->strip_size));
3765 	disk_block_cnt = block_cnt;
3766 
3767 	/* handle differing logical/physical block sizes */
3768 	if (map->phys_blk_shift) {
3769 		disk_block <<= map->phys_blk_shift;
3770 		disk_block_cnt <<= map->phys_blk_shift;
3771 	}
3772 	BUG_ON(disk_block_cnt > 0xffff);
3773 
3774 	/* build the new CDB for the physical disk I/O */
3775 	if (disk_block > 0xffffffff) {
3776 		cdb[0] = is_write ? WRITE_16 : READ_16;
3777 		cdb[1] = 0;
3778 		cdb[2] = (u8) (disk_block >> 56);
3779 		cdb[3] = (u8) (disk_block >> 48);
3780 		cdb[4] = (u8) (disk_block >> 40);
3781 		cdb[5] = (u8) (disk_block >> 32);
3782 		cdb[6] = (u8) (disk_block >> 24);
3783 		cdb[7] = (u8) (disk_block >> 16);
3784 		cdb[8] = (u8) (disk_block >> 8);
3785 		cdb[9] = (u8) (disk_block);
3786 		cdb[10] = (u8) (disk_block_cnt >> 24);
3787 		cdb[11] = (u8) (disk_block_cnt >> 16);
3788 		cdb[12] = (u8) (disk_block_cnt >> 8);
3789 		cdb[13] = (u8) (disk_block_cnt);
3790 		cdb[14] = 0;
3791 		cdb[15] = 0;
3792 		cdb_len = 16;
3793 	} else {
3794 		cdb[0] = is_write ? WRITE_10 : READ_10;
3795 		cdb[1] = 0;
3796 		cdb[2] = (u8) (disk_block >> 24);
3797 		cdb[3] = (u8) (disk_block >> 16);
3798 		cdb[4] = (u8) (disk_block >> 8);
3799 		cdb[5] = (u8) (disk_block);
3800 		cdb[6] = 0;
3801 		cdb[7] = (u8) (disk_block_cnt >> 8);
3802 		cdb[8] = (u8) (disk_block_cnt);
3803 		cdb[9] = 0;
3804 		cdb_len = 10;
3805 	}
3806 	return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3807 						dev->scsi3addr);
3808 }
3809 
3810 /* Running in struct Scsi_Host->host_lock less mode */
3811 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
3812 {
3813 	struct ctlr_info *h;
3814 	struct hpsa_scsi_dev_t *dev;
3815 	unsigned char scsi3addr[8];
3816 	struct CommandList *c;
3817 	int rc = 0;
3818 
3819 	/* Get the ptr to our adapter structure out of cmd->host. */
3820 	h = sdev_to_hba(cmd->device);
3821 	dev = cmd->device->hostdata;
3822 	if (!dev) {
3823 		cmd->result = DID_NO_CONNECT << 16;
3824 		cmd->scsi_done(cmd);
3825 		return 0;
3826 	}
3827 	memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3828 
3829 	if (unlikely(lockup_detected(h))) {
3830 		cmd->result = DID_ERROR << 16;
3831 		cmd->scsi_done(cmd);
3832 		return 0;
3833 	}
3834 	c = cmd_alloc(h);
3835 	if (c == NULL) {			/* trouble... */
3836 		dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
3837 		return SCSI_MLQUEUE_HOST_BUSY;
3838 	}
3839 
3840 	/* Fill in the command list header */
3841 	/* save c in case we have to abort it  */
3842 	cmd->host_scribble = (unsigned char *) c;
3843 
3844 	c->cmd_type = CMD_SCSI;
3845 	c->scsi_cmd = cmd;
3846 
3847 	/* Call alternate submit routine for I/O accelerated commands.
3848 	 * Retries always go down the normal I/O path.
3849 	 */
3850 	if (likely(cmd->retries == 0 &&
3851 		cmd->request->cmd_type == REQ_TYPE_FS &&
3852 		h->acciopath_status)) {
3853 		if (dev->offload_enabled) {
3854 			rc = hpsa_scsi_ioaccel_raid_map(h, c);
3855 			if (rc == 0)
3856 				return 0; /* Sent on ioaccel path */
3857 			if (rc < 0) {   /* scsi_dma_map failed. */
3858 				cmd_free(h, c);
3859 				return SCSI_MLQUEUE_HOST_BUSY;
3860 			}
3861 		} else if (dev->ioaccel_handle) {
3862 			rc = hpsa_scsi_ioaccel_direct_map(h, c);
3863 			if (rc == 0)
3864 				return 0; /* Sent on direct map path */
3865 			if (rc < 0) {   /* scsi_dma_map failed. */
3866 				cmd_free(h, c);
3867 				return SCSI_MLQUEUE_HOST_BUSY;
3868 			}
3869 		}
3870 	}
3871 
3872 	c->Header.ReplyQueue = 0;  /* unused in simple mode */
3873 	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
3874 	c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
3875 
3876 	/* Fill in the request block... */
3877 
3878 	c->Request.Timeout = 0;
3879 	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
3880 	BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
3881 	c->Request.CDBLen = cmd->cmd_len;
3882 	memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
3883 	switch (cmd->sc_data_direction) {
3884 	case DMA_TO_DEVICE:
3885 		c->Request.type_attr_dir =
3886 			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
3887 		break;
3888 	case DMA_FROM_DEVICE:
3889 		c->Request.type_attr_dir =
3890 			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
3891 		break;
3892 	case DMA_NONE:
3893 		c->Request.type_attr_dir =
3894 			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
3895 		break;
3896 	case DMA_BIDIRECTIONAL:
3897 		/* This can happen if a buggy application does a scsi passthru
3898 		 * and sets both inlen and outlen to non-zero. ( see
3899 		 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
3900 		 */
3901 
3902 		c->Request.type_attr_dir =
3903 			TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
3904 		/* This is technically wrong, and hpsa controllers should
3905 		 * reject it with CMD_INVALID, which is the most correct
3906 		 * response, but non-fibre backends appear to let it
3907 		 * slide by, and give the same results as if this field
3908 		 * were set correctly.  Either way is acceptable for
3909 		 * our purposes here.
3910 		 */
3911 
3912 		break;
3913 
3914 	default:
3915 		dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3916 			cmd->sc_data_direction);
3917 		BUG();
3918 		break;
3919 	}
3920 
3921 	if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
3922 		cmd_free(h, c);
3923 		return SCSI_MLQUEUE_HOST_BUSY;
3924 	}
3925 	enqueue_cmd_and_start_io(h, c);
3926 	/* the cmd'll come back via intr handler in complete_scsi_command()  */
3927 	return 0;
3928 }
3929 
3930 static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
3931 {
3932 	unsigned long flags;
3933 
3934 	/*
3935 	 * Don't let rescans be initiated on a controller known
3936 	 * to be locked up.  If the controller locks up *during*
3937 	 * a rescan, that thread is probably hosed, but at least
3938 	 * we can prevent new rescan threads from piling up on a
3939 	 * locked up controller.
3940 	 */
3941 	if (unlikely(lockup_detected(h))) {
3942 		spin_lock_irqsave(&h->scan_lock, flags);
3943 		h->scan_finished = 1;
3944 		wake_up_all(&h->scan_wait_queue);
3945 		spin_unlock_irqrestore(&h->scan_lock, flags);
3946 		return 1;
3947 	}
3948 	return 0;
3949 }
3950 
3951 static void hpsa_scan_start(struct Scsi_Host *sh)
3952 {
3953 	struct ctlr_info *h = shost_to_hba(sh);
3954 	unsigned long flags;
3955 
3956 	if (do_not_scan_if_controller_locked_up(h))
3957 		return;
3958 
3959 	/* wait until any scan already in progress is finished. */
3960 	while (1) {
3961 		spin_lock_irqsave(&h->scan_lock, flags);
3962 		if (h->scan_finished)
3963 			break;
3964 		spin_unlock_irqrestore(&h->scan_lock, flags);
3965 		wait_event(h->scan_wait_queue, h->scan_finished);
3966 		/* Note: We don't need to worry about a race between this
3967 		 * thread and driver unload because the midlayer will
3968 		 * have incremented the reference count, so unload won't
3969 		 * happen if we're in here.
3970 		 */
3971 	}
3972 	h->scan_finished = 0; /* mark scan as in progress */
3973 	spin_unlock_irqrestore(&h->scan_lock, flags);
3974 
3975 	if (do_not_scan_if_controller_locked_up(h))
3976 		return;
3977 
3978 	hpsa_update_scsi_devices(h, h->scsi_host->host_no);
3979 
3980 	spin_lock_irqsave(&h->scan_lock, flags);
3981 	h->scan_finished = 1; /* mark scan as finished. */
3982 	wake_up_all(&h->scan_wait_queue);
3983 	spin_unlock_irqrestore(&h->scan_lock, flags);
3984 }
3985 
3986 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
3987 {
3988 	struct ctlr_info *h = sdev_to_hba(sdev);
3989 
3990 	if (qdepth < 1)
3991 		qdepth = 1;
3992 	else
3993 		if (qdepth > h->nr_cmds)
3994 			qdepth = h->nr_cmds;
3995 	scsi_change_queue_depth(sdev, qdepth);
3996 	return sdev->queue_depth;
3997 }
3998 
3999 static int hpsa_scan_finished(struct Scsi_Host *sh,
4000 	unsigned long elapsed_time)
4001 {
4002 	struct ctlr_info *h = shost_to_hba(sh);
4003 	unsigned long flags;
4004 	int finished;
4005 
4006 	spin_lock_irqsave(&h->scan_lock, flags);
4007 	finished = h->scan_finished;
4008 	spin_unlock_irqrestore(&h->scan_lock, flags);
4009 	return finished;
4010 }
4011 
4012 static void hpsa_unregister_scsi(struct ctlr_info *h)
4013 {
4014 	/* we are being forcibly unloaded, and may not refuse. */
4015 	scsi_remove_host(h->scsi_host);
4016 	scsi_host_put(h->scsi_host);
4017 	h->scsi_host = NULL;
4018 }
4019 
4020 static int hpsa_register_scsi(struct ctlr_info *h)
4021 {
4022 	struct Scsi_Host *sh;
4023 	int error;
4024 
4025 	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4026 	if (sh == NULL)
4027 		goto fail;
4028 
4029 	sh->io_port = 0;
4030 	sh->n_io_port = 0;
4031 	sh->this_id = -1;
4032 	sh->max_channel = 3;
4033 	sh->max_cmd_len = MAX_COMMAND_SIZE;
4034 	sh->max_lun = HPSA_MAX_LUN;
4035 	sh->max_id = HPSA_MAX_LUN;
4036 	sh->can_queue = h->nr_cmds -
4037 			HPSA_CMDS_RESERVED_FOR_ABORTS -
4038 			HPSA_CMDS_RESERVED_FOR_DRIVER -
4039 			HPSA_MAX_CONCURRENT_PASSTHRUS;
4040 	if (h->hba_mode_enabled)
4041 		sh->cmd_per_lun = 7;
4042 	else
4043 		sh->cmd_per_lun = sh->can_queue;
4044 	sh->sg_tablesize = h->maxsgentries;
4045 	h->scsi_host = sh;
4046 	sh->hostdata[0] = (unsigned long) h;
4047 	sh->irq = h->intr[h->intr_mode];
4048 	sh->unique_id = sh->irq;
4049 	error = scsi_add_host(sh, &h->pdev->dev);
4050 	if (error)
4051 		goto fail_host_put;
4052 	scsi_scan_host(sh);
4053 	return 0;
4054 
4055  fail_host_put:
4056 	dev_err(&h->pdev->dev, "%s: scsi_add_host"
4057 		" failed for controller %d\n", __func__, h->ctlr);
4058 	scsi_host_put(sh);
4059 	return error;
4060  fail:
4061 	dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4062 		" failed for controller %d\n", __func__, h->ctlr);
4063 	return -ENOMEM;
4064 }
4065 
4066 static int wait_for_device_to_become_ready(struct ctlr_info *h,
4067 	unsigned char lunaddr[])
4068 {
4069 	int rc;
4070 	int count = 0;
4071 	int waittime = 1; /* seconds */
4072 	struct CommandList *c;
4073 
4074 	c = cmd_alloc(h);
4075 	if (!c) {
4076 		dev_warn(&h->pdev->dev, "out of memory in "
4077 			"wait_for_device_to_become_ready.\n");
4078 		return IO_ERROR;
4079 	}
4080 
4081 	/* Send test unit ready until device ready, or give up. */
4082 	while (count < HPSA_TUR_RETRY_LIMIT) {
4083 
4084 		/* Wait for a bit.  do this first, because if we send
4085 		 * the TUR right away, the reset will just abort it.
4086 		 */
4087 		msleep(1000 * waittime);
4088 		count++;
4089 		rc = 0; /* Device ready. */
4090 
4091 		/* Increase wait time with each try, up to a point. */
4092 		if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4093 			waittime = waittime * 2;
4094 
4095 		/* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4096 		(void) fill_cmd(c, TEST_UNIT_READY, h,
4097 				NULL, 0, 0, lunaddr, TYPE_CMD);
4098 		hpsa_scsi_do_simple_cmd_core(h, c);
4099 		/* no unmap needed here because no data xfer. */
4100 
4101 		if (c->err_info->CommandStatus == CMD_SUCCESS)
4102 			break;
4103 
4104 		if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4105 			c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4106 			(c->err_info->SenseInfo[2] == NO_SENSE ||
4107 			c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4108 			break;
4109 
4110 		dev_warn(&h->pdev->dev, "waiting %d secs "
4111 			"for device to become ready.\n", waittime);
4112 		rc = 1; /* device not ready. */
4113 	}
4114 
4115 	if (rc)
4116 		dev_warn(&h->pdev->dev, "giving up on device.\n");
4117 	else
4118 		dev_warn(&h->pdev->dev, "device is ready.\n");
4119 
4120 	cmd_free(h, c);
4121 	return rc;
4122 }
4123 
4124 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4125  * complaining.  Doing a host- or bus-reset can't do anything good here.
4126  */
4127 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4128 {
4129 	int rc;
4130 	struct ctlr_info *h;
4131 	struct hpsa_scsi_dev_t *dev;
4132 
4133 	/* find the controller to which the command to be aborted was sent */
4134 	h = sdev_to_hba(scsicmd->device);
4135 	if (h == NULL) /* paranoia */
4136 		return FAILED;
4137 	dev = scsicmd->device->hostdata;
4138 	if (!dev) {
4139 		dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4140 			"device lookup failed.\n");
4141 		return FAILED;
4142 	}
4143 	dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
4144 		h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4145 	/* send a reset to the SCSI LUN which the command was sent to */
4146 	rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
4147 	if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4148 		return SUCCESS;
4149 
4150 	dev_warn(&h->pdev->dev, "resetting device failed.\n");
4151 	return FAILED;
4152 }
4153 
4154 static void swizzle_abort_tag(u8 *tag)
4155 {
4156 	u8 original_tag[8];
4157 
4158 	memcpy(original_tag, tag, 8);
4159 	tag[0] = original_tag[3];
4160 	tag[1] = original_tag[2];
4161 	tag[2] = original_tag[1];
4162 	tag[3] = original_tag[0];
4163 	tag[4] = original_tag[7];
4164 	tag[5] = original_tag[6];
4165 	tag[6] = original_tag[5];
4166 	tag[7] = original_tag[4];
4167 }
4168 
4169 static void hpsa_get_tag(struct ctlr_info *h,
4170 	struct CommandList *c, __le32 *taglower, __le32 *tagupper)
4171 {
4172 	u64 tag;
4173 	if (c->cmd_type == CMD_IOACCEL1) {
4174 		struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4175 			&h->ioaccel_cmd_pool[c->cmdindex];
4176 		tag = le64_to_cpu(cm1->tag);
4177 		*tagupper = cpu_to_le32(tag >> 32);
4178 		*taglower = cpu_to_le32(tag);
4179 		return;
4180 	}
4181 	if (c->cmd_type == CMD_IOACCEL2) {
4182 		struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4183 			&h->ioaccel2_cmd_pool[c->cmdindex];
4184 		/* upper tag not used in ioaccel2 mode */
4185 		memset(tagupper, 0, sizeof(*tagupper));
4186 		*taglower = cm2->Tag;
4187 		return;
4188 	}
4189 	tag = le64_to_cpu(c->Header.tag);
4190 	*tagupper = cpu_to_le32(tag >> 32);
4191 	*taglower = cpu_to_le32(tag);
4192 }
4193 
4194 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4195 	struct CommandList *abort, int swizzle)
4196 {
4197 	int rc = IO_OK;
4198 	struct CommandList *c;
4199 	struct ErrorInfo *ei;
4200 	__le32 tagupper, taglower;
4201 
4202 	c = cmd_alloc(h);
4203 	if (c == NULL) {	/* trouble... */
4204 		dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4205 		return -ENOMEM;
4206 	}
4207 
4208 	/* fill_cmd can't fail here, no buffer to map */
4209 	(void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
4210 		0, 0, scsi3addr, TYPE_MSG);
4211 	if (swizzle)
4212 		swizzle_abort_tag(&c->Request.CDB[4]);
4213 	hpsa_scsi_do_simple_cmd_core(h, c);
4214 	hpsa_get_tag(h, abort, &taglower, &tagupper);
4215 	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
4216 		__func__, tagupper, taglower);
4217 	/* no unmap needed here because no data xfer. */
4218 
4219 	ei = c->err_info;
4220 	switch (ei->CommandStatus) {
4221 	case CMD_SUCCESS:
4222 		break;
4223 	case CMD_UNABORTABLE: /* Very common, don't make noise. */
4224 		rc = -1;
4225 		break;
4226 	default:
4227 		dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
4228 			__func__, tagupper, taglower);
4229 		hpsa_scsi_interpret_error(h, c);
4230 		rc = -1;
4231 		break;
4232 	}
4233 	cmd_free(h, c);
4234 	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4235 		__func__, tagupper, taglower);
4236 	return rc;
4237 }
4238 
4239 /* ioaccel2 path firmware cannot handle abort task requests.
4240  * Change abort requests to physical target reset, and send to the
4241  * address of the physical disk used for the ioaccel 2 command.
4242  * Return 0 on success (IO_OK)
4243  *	 -1 on failure
4244  */
4245 
4246 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4247 	unsigned char *scsi3addr, struct CommandList *abort)
4248 {
4249 	int rc = IO_OK;
4250 	struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4251 	struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4252 	unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4253 	unsigned char *psa = &phys_scsi3addr[0];
4254 
4255 	/* Get a pointer to the hpsa logical device. */
4256 	scmd = (struct scsi_cmnd *) abort->scsi_cmd;
4257 	dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4258 	if (dev == NULL) {
4259 		dev_warn(&h->pdev->dev,
4260 			"Cannot abort: no device pointer for command.\n");
4261 			return -1; /* not abortable */
4262 	}
4263 
4264 	if (h->raid_offload_debug > 0)
4265 		dev_info(&h->pdev->dev,
4266 			"Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4267 			h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4268 			scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4269 			scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4270 
4271 	if (!dev->offload_enabled) {
4272 		dev_warn(&h->pdev->dev,
4273 			"Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4274 		return -1; /* not abortable */
4275 	}
4276 
4277 	/* Incoming scsi3addr is logical addr. We need physical disk addr. */
4278 	if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4279 		dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4280 		return -1; /* not abortable */
4281 	}
4282 
4283 	/* send the reset */
4284 	if (h->raid_offload_debug > 0)
4285 		dev_info(&h->pdev->dev,
4286 			"Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4287 			psa[0], psa[1], psa[2], psa[3],
4288 			psa[4], psa[5], psa[6], psa[7]);
4289 	rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
4290 	if (rc != 0) {
4291 		dev_warn(&h->pdev->dev,
4292 			"Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4293 			psa[0], psa[1], psa[2], psa[3],
4294 			psa[4], psa[5], psa[6], psa[7]);
4295 		return rc; /* failed to reset */
4296 	}
4297 
4298 	/* wait for device to recover */
4299 	if (wait_for_device_to_become_ready(h, psa) != 0) {
4300 		dev_warn(&h->pdev->dev,
4301 			"Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4302 			psa[0], psa[1], psa[2], psa[3],
4303 			psa[4], psa[5], psa[6], psa[7]);
4304 		return -1;  /* failed to recover */
4305 	}
4306 
4307 	/* device recovered */
4308 	dev_info(&h->pdev->dev,
4309 		"Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4310 		psa[0], psa[1], psa[2], psa[3],
4311 		psa[4], psa[5], psa[6], psa[7]);
4312 
4313 	return rc; /* success */
4314 }
4315 
4316 /* Some Smart Arrays need the abort tag swizzled, and some don't.  It's hard to
4317  * tell which kind we're dealing with, so we send the abort both ways.  There
4318  * shouldn't be any collisions between swizzled and unswizzled tags due to the
4319  * way we construct our tags but we check anyway in case the assumptions which
4320  * make this true someday become false.
4321  */
4322 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4323 	unsigned char *scsi3addr, struct CommandList *abort)
4324 {
4325 	/* ioccelerator mode 2 commands should be aborted via the
4326 	 * accelerated path, since RAID path is unaware of these commands,
4327 	 * but underlying firmware can't handle abort TMF.
4328 	 * Change abort to physical device reset.
4329 	 */
4330 	if (abort->cmd_type == CMD_IOACCEL2)
4331 		return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
4332 
4333 	return hpsa_send_abort(h, scsi3addr, abort, 0) &&
4334 			hpsa_send_abort(h, scsi3addr, abort, 1);
4335 }
4336 
4337 /* Send an abort for the specified command.
4338  *	If the device and controller support it,
4339  *		send a task abort request.
4340  */
4341 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4342 {
4343 
4344 	int i, rc;
4345 	struct ctlr_info *h;
4346 	struct hpsa_scsi_dev_t *dev;
4347 	struct CommandList *abort; /* pointer to command to be aborted */
4348 	struct scsi_cmnd *as;	/* ptr to scsi cmd inside aborted command. */
4349 	char msg[256];		/* For debug messaging. */
4350 	int ml = 0;
4351 	__le32 tagupper, taglower;
4352 
4353 	/* Find the controller of the command to be aborted */
4354 	h = sdev_to_hba(sc->device);
4355 	if (WARN(h == NULL,
4356 			"ABORT REQUEST FAILED, Controller lookup failed.\n"))
4357 		return FAILED;
4358 
4359 	/* Check that controller supports some kind of task abort */
4360 	if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4361 		!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4362 		return FAILED;
4363 
4364 	memset(msg, 0, sizeof(msg));
4365 	ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ",
4366 		h->scsi_host->host_no, sc->device->channel,
4367 		sc->device->id, sc->device->lun);
4368 
4369 	/* Find the device of the command to be aborted */
4370 	dev = sc->device->hostdata;
4371 	if (!dev) {
4372 		dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4373 				msg);
4374 		return FAILED;
4375 	}
4376 
4377 	/* Get SCSI command to be aborted */
4378 	abort = (struct CommandList *) sc->host_scribble;
4379 	if (abort == NULL) {
4380 		dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
4381 				msg);
4382 		return FAILED;
4383 	}
4384 	hpsa_get_tag(h, abort, &taglower, &tagupper);
4385 	ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
4386 	as  = (struct scsi_cmnd *) abort->scsi_cmd;
4387 	if (as != NULL)
4388 		ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4389 			as->cmnd[0], as->serial_number);
4390 	dev_dbg(&h->pdev->dev, "%s\n", msg);
4391 	dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
4392 		h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4393 	/*
4394 	 * Command is in flight, or possibly already completed
4395 	 * by the firmware (but not to the scsi mid layer) but we can't
4396 	 * distinguish which.  Send the abort down.
4397 	 */
4398 	rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
4399 	if (rc != 0) {
4400 		dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
4401 		dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
4402 			h->scsi_host->host_no,
4403 			dev->bus, dev->target, dev->lun);
4404 		return FAILED;
4405 	}
4406 	dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4407 
4408 	/* If the abort(s) above completed and actually aborted the
4409 	 * command, then the command to be aborted should already be
4410 	 * completed.  If not, wait around a bit more to see if they
4411 	 * manage to complete normally.
4412 	 */
4413 #define ABORT_COMPLETE_WAIT_SECS 30
4414 	for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
4415 		if (test_bit(abort->cmdindex & (BITS_PER_LONG - 1),
4416 				h->cmd_pool_bits +
4417 				(abort->cmdindex / BITS_PER_LONG)))
4418 			msleep(100);
4419 		else
4420 			return SUCCESS;
4421 	}
4422 	dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4423 		msg, ABORT_COMPLETE_WAIT_SECS);
4424 	return FAILED;
4425 }
4426 
4427 
4428 /*
4429  * For operations that cannot sleep, a command block is allocated at init,
4430  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4431  * which ones are free or in use.  Lock must be held when calling this.
4432  * cmd_free() is the complement.
4433  */
4434 static struct CommandList *cmd_alloc(struct ctlr_info *h)
4435 {
4436 	struct CommandList *c;
4437 	int i;
4438 	union u64bit temp64;
4439 	dma_addr_t cmd_dma_handle, err_dma_handle;
4440 	int loopcount;
4441 
4442 	/* There is some *extremely* small but non-zero chance that that
4443 	 * multiple threads could get in here, and one thread could
4444 	 * be scanning through the list of bits looking for a free
4445 	 * one, but the free ones are always behind him, and other
4446 	 * threads sneak in behind him and eat them before he can
4447 	 * get to them, so that while there is always a free one, a
4448 	 * very unlucky thread might be starved anyway, never able to
4449 	 * beat the other threads.  In reality, this happens so
4450 	 * infrequently as to be indistinguishable from never.
4451 	 */
4452 
4453 	loopcount = 0;
4454 	do {
4455 		i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
4456 		if (i == h->nr_cmds)
4457 			i = 0;
4458 		loopcount++;
4459 	} while (test_and_set_bit(i & (BITS_PER_LONG - 1),
4460 		  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0 &&
4461 		loopcount < 10);
4462 
4463 	/* Thread got starved?  We do not expect this to ever happen. */
4464 	if (loopcount >= 10)
4465 		return NULL;
4466 
4467 	c = h->cmd_pool + i;
4468 	memset(c, 0, sizeof(*c));
4469 	c->Header.tag = cpu_to_le64((u64) i << DIRECT_LOOKUP_SHIFT);
4470 	cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c);
4471 	c->err_info = h->errinfo_pool + i;
4472 	memset(c->err_info, 0, sizeof(*c->err_info));
4473 	err_dma_handle = h->errinfo_pool_dhandle
4474 	    + i * sizeof(*c->err_info);
4475 
4476 	c->cmdindex = i;
4477 
4478 	c->busaddr = (u32) cmd_dma_handle;
4479 	temp64.val = (u64) err_dma_handle;
4480 	c->ErrDesc.Addr = cpu_to_le64(err_dma_handle);
4481 	c->ErrDesc.Len = cpu_to_le32(sizeof(*c->err_info));
4482 
4483 	c->h = h;
4484 	return c;
4485 }
4486 
4487 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4488 {
4489 	int i;
4490 
4491 	i = c - h->cmd_pool;
4492 	clear_bit(i & (BITS_PER_LONG - 1),
4493 		  h->cmd_pool_bits + (i / BITS_PER_LONG));
4494 }
4495 
4496 #ifdef CONFIG_COMPAT
4497 
4498 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
4499 	void __user *arg)
4500 {
4501 	IOCTL32_Command_struct __user *arg32 =
4502 	    (IOCTL32_Command_struct __user *) arg;
4503 	IOCTL_Command_struct arg64;
4504 	IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
4505 	int err;
4506 	u32 cp;
4507 
4508 	memset(&arg64, 0, sizeof(arg64));
4509 	err = 0;
4510 	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4511 			   sizeof(arg64.LUN_info));
4512 	err |= copy_from_user(&arg64.Request, &arg32->Request,
4513 			   sizeof(arg64.Request));
4514 	err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4515 			   sizeof(arg64.error_info));
4516 	err |= get_user(arg64.buf_size, &arg32->buf_size);
4517 	err |= get_user(cp, &arg32->buf);
4518 	arg64.buf = compat_ptr(cp);
4519 	err |= copy_to_user(p, &arg64, sizeof(arg64));
4520 
4521 	if (err)
4522 		return -EFAULT;
4523 
4524 	err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
4525 	if (err)
4526 		return err;
4527 	err |= copy_in_user(&arg32->error_info, &p->error_info,
4528 			 sizeof(arg32->error_info));
4529 	if (err)
4530 		return -EFAULT;
4531 	return err;
4532 }
4533 
4534 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4535 	int cmd, void __user *arg)
4536 {
4537 	BIG_IOCTL32_Command_struct __user *arg32 =
4538 	    (BIG_IOCTL32_Command_struct __user *) arg;
4539 	BIG_IOCTL_Command_struct arg64;
4540 	BIG_IOCTL_Command_struct __user *p =
4541 	    compat_alloc_user_space(sizeof(arg64));
4542 	int err;
4543 	u32 cp;
4544 
4545 	memset(&arg64, 0, sizeof(arg64));
4546 	err = 0;
4547 	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4548 			   sizeof(arg64.LUN_info));
4549 	err |= copy_from_user(&arg64.Request, &arg32->Request,
4550 			   sizeof(arg64.Request));
4551 	err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4552 			   sizeof(arg64.error_info));
4553 	err |= get_user(arg64.buf_size, &arg32->buf_size);
4554 	err |= get_user(arg64.malloc_size, &arg32->malloc_size);
4555 	err |= get_user(cp, &arg32->buf);
4556 	arg64.buf = compat_ptr(cp);
4557 	err |= copy_to_user(p, &arg64, sizeof(arg64));
4558 
4559 	if (err)
4560 		return -EFAULT;
4561 
4562 	err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
4563 	if (err)
4564 		return err;
4565 	err |= copy_in_user(&arg32->error_info, &p->error_info,
4566 			 sizeof(arg32->error_info));
4567 	if (err)
4568 		return -EFAULT;
4569 	return err;
4570 }
4571 
4572 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
4573 {
4574 	switch (cmd) {
4575 	case CCISS_GETPCIINFO:
4576 	case CCISS_GETINTINFO:
4577 	case CCISS_SETINTINFO:
4578 	case CCISS_GETNODENAME:
4579 	case CCISS_SETNODENAME:
4580 	case CCISS_GETHEARTBEAT:
4581 	case CCISS_GETBUSTYPES:
4582 	case CCISS_GETFIRMVER:
4583 	case CCISS_GETDRIVVER:
4584 	case CCISS_REVALIDVOLS:
4585 	case CCISS_DEREGDISK:
4586 	case CCISS_REGNEWDISK:
4587 	case CCISS_REGNEWD:
4588 	case CCISS_RESCANDISK:
4589 	case CCISS_GETLUNINFO:
4590 		return hpsa_ioctl(dev, cmd, arg);
4591 
4592 	case CCISS_PASSTHRU32:
4593 		return hpsa_ioctl32_passthru(dev, cmd, arg);
4594 	case CCISS_BIG_PASSTHRU32:
4595 		return hpsa_ioctl32_big_passthru(dev, cmd, arg);
4596 
4597 	default:
4598 		return -ENOIOCTLCMD;
4599 	}
4600 }
4601 #endif
4602 
4603 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
4604 {
4605 	struct hpsa_pci_info pciinfo;
4606 
4607 	if (!argp)
4608 		return -EINVAL;
4609 	pciinfo.domain = pci_domain_nr(h->pdev->bus);
4610 	pciinfo.bus = h->pdev->bus->number;
4611 	pciinfo.dev_fn = h->pdev->devfn;
4612 	pciinfo.board_id = h->board_id;
4613 	if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
4614 		return -EFAULT;
4615 	return 0;
4616 }
4617 
4618 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
4619 {
4620 	DriverVer_type DriverVer;
4621 	unsigned char vmaj, vmin, vsubmin;
4622 	int rc;
4623 
4624 	rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
4625 		&vmaj, &vmin, &vsubmin);
4626 	if (rc != 3) {
4627 		dev_info(&h->pdev->dev, "driver version string '%s' "
4628 			"unrecognized.", HPSA_DRIVER_VERSION);
4629 		vmaj = 0;
4630 		vmin = 0;
4631 		vsubmin = 0;
4632 	}
4633 	DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
4634 	if (!argp)
4635 		return -EINVAL;
4636 	if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
4637 		return -EFAULT;
4638 	return 0;
4639 }
4640 
4641 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4642 {
4643 	IOCTL_Command_struct iocommand;
4644 	struct CommandList *c;
4645 	char *buff = NULL;
4646 	u64 temp64;
4647 	int rc = 0;
4648 
4649 	if (!argp)
4650 		return -EINVAL;
4651 	if (!capable(CAP_SYS_RAWIO))
4652 		return -EPERM;
4653 	if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
4654 		return -EFAULT;
4655 	if ((iocommand.buf_size < 1) &&
4656 	    (iocommand.Request.Type.Direction != XFER_NONE)) {
4657 		return -EINVAL;
4658 	}
4659 	if (iocommand.buf_size > 0) {
4660 		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4661 		if (buff == NULL)
4662 			return -EFAULT;
4663 		if (iocommand.Request.Type.Direction & XFER_WRITE) {
4664 			/* Copy the data into the buffer we created */
4665 			if (copy_from_user(buff, iocommand.buf,
4666 				iocommand.buf_size)) {
4667 				rc = -EFAULT;
4668 				goto out_kfree;
4669 			}
4670 		} else {
4671 			memset(buff, 0, iocommand.buf_size);
4672 		}
4673 	}
4674 	c = cmd_alloc(h);
4675 	if (c == NULL) {
4676 		rc = -ENOMEM;
4677 		goto out_kfree;
4678 	}
4679 	/* Fill in the command type */
4680 	c->cmd_type = CMD_IOCTL_PEND;
4681 	/* Fill in Command Header */
4682 	c->Header.ReplyQueue = 0; /* unused in simple mode */
4683 	if (iocommand.buf_size > 0) {	/* buffer to fill */
4684 		c->Header.SGList = 1;
4685 		c->Header.SGTotal = cpu_to_le16(1);
4686 	} else	{ /* no buffers to fill */
4687 		c->Header.SGList = 0;
4688 		c->Header.SGTotal = cpu_to_le16(0);
4689 	}
4690 	memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4691 
4692 	/* Fill in Request block */
4693 	memcpy(&c->Request, &iocommand.Request,
4694 		sizeof(c->Request));
4695 
4696 	/* Fill in the scatter gather information */
4697 	if (iocommand.buf_size > 0) {
4698 		temp64 = pci_map_single(h->pdev, buff,
4699 			iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4700 		if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
4701 			c->SG[0].Addr = cpu_to_le64(0);
4702 			c->SG[0].Len = cpu_to_le32(0);
4703 			rc = -ENOMEM;
4704 			goto out;
4705 		}
4706 		c->SG[0].Addr = cpu_to_le64(temp64);
4707 		c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
4708 		c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
4709 	}
4710 	hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
4711 	if (iocommand.buf_size > 0)
4712 		hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
4713 	check_ioctl_unit_attention(h, c);
4714 
4715 	/* Copy the error information out */
4716 	memcpy(&iocommand.error_info, c->err_info,
4717 		sizeof(iocommand.error_info));
4718 	if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
4719 		rc = -EFAULT;
4720 		goto out;
4721 	}
4722 	if ((iocommand.Request.Type.Direction & XFER_READ) &&
4723 		iocommand.buf_size > 0) {
4724 		/* Copy the data out of the buffer we created */
4725 		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
4726 			rc = -EFAULT;
4727 			goto out;
4728 		}
4729 	}
4730 out:
4731 	cmd_free(h, c);
4732 out_kfree:
4733 	kfree(buff);
4734 	return rc;
4735 }
4736 
4737 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4738 {
4739 	BIG_IOCTL_Command_struct *ioc;
4740 	struct CommandList *c;
4741 	unsigned char **buff = NULL;
4742 	int *buff_size = NULL;
4743 	u64 temp64;
4744 	BYTE sg_used = 0;
4745 	int status = 0;
4746 	u32 left;
4747 	u32 sz;
4748 	BYTE __user *data_ptr;
4749 
4750 	if (!argp)
4751 		return -EINVAL;
4752 	if (!capable(CAP_SYS_RAWIO))
4753 		return -EPERM;
4754 	ioc = (BIG_IOCTL_Command_struct *)
4755 	    kmalloc(sizeof(*ioc), GFP_KERNEL);
4756 	if (!ioc) {
4757 		status = -ENOMEM;
4758 		goto cleanup1;
4759 	}
4760 	if (copy_from_user(ioc, argp, sizeof(*ioc))) {
4761 		status = -EFAULT;
4762 		goto cleanup1;
4763 	}
4764 	if ((ioc->buf_size < 1) &&
4765 	    (ioc->Request.Type.Direction != XFER_NONE)) {
4766 		status = -EINVAL;
4767 		goto cleanup1;
4768 	}
4769 	/* Check kmalloc limits  using all SGs */
4770 	if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
4771 		status = -EINVAL;
4772 		goto cleanup1;
4773 	}
4774 	if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
4775 		status = -EINVAL;
4776 		goto cleanup1;
4777 	}
4778 	buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
4779 	if (!buff) {
4780 		status = -ENOMEM;
4781 		goto cleanup1;
4782 	}
4783 	buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
4784 	if (!buff_size) {
4785 		status = -ENOMEM;
4786 		goto cleanup1;
4787 	}
4788 	left = ioc->buf_size;
4789 	data_ptr = ioc->buf;
4790 	while (left) {
4791 		sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
4792 		buff_size[sg_used] = sz;
4793 		buff[sg_used] = kmalloc(sz, GFP_KERNEL);
4794 		if (buff[sg_used] == NULL) {
4795 			status = -ENOMEM;
4796 			goto cleanup1;
4797 		}
4798 		if (ioc->Request.Type.Direction & XFER_WRITE) {
4799 			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
4800 				status = -EFAULT;
4801 				goto cleanup1;
4802 			}
4803 		} else
4804 			memset(buff[sg_used], 0, sz);
4805 		left -= sz;
4806 		data_ptr += sz;
4807 		sg_used++;
4808 	}
4809 	c = cmd_alloc(h);
4810 	if (c == NULL) {
4811 		status = -ENOMEM;
4812 		goto cleanup1;
4813 	}
4814 	c->cmd_type = CMD_IOCTL_PEND;
4815 	c->Header.ReplyQueue = 0;
4816 	c->Header.SGList = (u8) sg_used;
4817 	c->Header.SGTotal = cpu_to_le16(sg_used);
4818 	memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
4819 	memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
4820 	if (ioc->buf_size > 0) {
4821 		int i;
4822 		for (i = 0; i < sg_used; i++) {
4823 			temp64 = pci_map_single(h->pdev, buff[i],
4824 				    buff_size[i], PCI_DMA_BIDIRECTIONAL);
4825 			if (dma_mapping_error(&h->pdev->dev,
4826 							(dma_addr_t) temp64)) {
4827 				c->SG[i].Addr = cpu_to_le64(0);
4828 				c->SG[i].Len = cpu_to_le32(0);
4829 				hpsa_pci_unmap(h->pdev, c, i,
4830 					PCI_DMA_BIDIRECTIONAL);
4831 				status = -ENOMEM;
4832 				goto cleanup0;
4833 			}
4834 			c->SG[i].Addr = cpu_to_le64(temp64);
4835 			c->SG[i].Len = cpu_to_le32(buff_size[i]);
4836 			c->SG[i].Ext = cpu_to_le32(0);
4837 		}
4838 		c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
4839 	}
4840 	hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
4841 	if (sg_used)
4842 		hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
4843 	check_ioctl_unit_attention(h, c);
4844 	/* Copy the error information out */
4845 	memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
4846 	if (copy_to_user(argp, ioc, sizeof(*ioc))) {
4847 		status = -EFAULT;
4848 		goto cleanup0;
4849 	}
4850 	if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
4851 		int i;
4852 
4853 		/* Copy the data out of the buffer we created */
4854 		BYTE __user *ptr = ioc->buf;
4855 		for (i = 0; i < sg_used; i++) {
4856 			if (copy_to_user(ptr, buff[i], buff_size[i])) {
4857 				status = -EFAULT;
4858 				goto cleanup0;
4859 			}
4860 			ptr += buff_size[i];
4861 		}
4862 	}
4863 	status = 0;
4864 cleanup0:
4865 	cmd_free(h, c);
4866 cleanup1:
4867 	if (buff) {
4868 		int i;
4869 
4870 		for (i = 0; i < sg_used; i++)
4871 			kfree(buff[i]);
4872 		kfree(buff);
4873 	}
4874 	kfree(buff_size);
4875 	kfree(ioc);
4876 	return status;
4877 }
4878 
4879 static void check_ioctl_unit_attention(struct ctlr_info *h,
4880 	struct CommandList *c)
4881 {
4882 	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4883 			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
4884 		(void) check_for_unit_attention(h, c);
4885 }
4886 
4887 static int increment_passthru_count(struct ctlr_info *h)
4888 {
4889 	unsigned long flags;
4890 
4891 	spin_lock_irqsave(&h->passthru_count_lock, flags);
4892 	if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) {
4893 		spin_unlock_irqrestore(&h->passthru_count_lock, flags);
4894 		return -1;
4895 	}
4896 	h->passthru_count++;
4897 	spin_unlock_irqrestore(&h->passthru_count_lock, flags);
4898 	return 0;
4899 }
4900 
4901 static void decrement_passthru_count(struct ctlr_info *h)
4902 {
4903 	unsigned long flags;
4904 
4905 	spin_lock_irqsave(&h->passthru_count_lock, flags);
4906 	if (h->passthru_count <= 0) {
4907 		spin_unlock_irqrestore(&h->passthru_count_lock, flags);
4908 		/* not expecting to get here. */
4909 		dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n");
4910 		return;
4911 	}
4912 	h->passthru_count--;
4913 	spin_unlock_irqrestore(&h->passthru_count_lock, flags);
4914 }
4915 
4916 /*
4917  * ioctl
4918  */
4919 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
4920 {
4921 	struct ctlr_info *h;
4922 	void __user *argp = (void __user *)arg;
4923 	int rc;
4924 
4925 	h = sdev_to_hba(dev);
4926 
4927 	switch (cmd) {
4928 	case CCISS_DEREGDISK:
4929 	case CCISS_REGNEWDISK:
4930 	case CCISS_REGNEWD:
4931 		hpsa_scan_start(h->scsi_host);
4932 		return 0;
4933 	case CCISS_GETPCIINFO:
4934 		return hpsa_getpciinfo_ioctl(h, argp);
4935 	case CCISS_GETDRIVVER:
4936 		return hpsa_getdrivver_ioctl(h, argp);
4937 	case CCISS_PASSTHRU:
4938 		if (increment_passthru_count(h))
4939 			return -EAGAIN;
4940 		rc = hpsa_passthru_ioctl(h, argp);
4941 		decrement_passthru_count(h);
4942 		return rc;
4943 	case CCISS_BIG_PASSTHRU:
4944 		if (increment_passthru_count(h))
4945 			return -EAGAIN;
4946 		rc = hpsa_big_passthru_ioctl(h, argp);
4947 		decrement_passthru_count(h);
4948 		return rc;
4949 	default:
4950 		return -ENOTTY;
4951 	}
4952 }
4953 
4954 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
4955 				u8 reset_type)
4956 {
4957 	struct CommandList *c;
4958 
4959 	c = cmd_alloc(h);
4960 	if (!c)
4961 		return -ENOMEM;
4962 	/* fill_cmd can't fail here, no data buffer to map */
4963 	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
4964 		RAID_CTLR_LUNID, TYPE_MSG);
4965 	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
4966 	c->waiting = NULL;
4967 	enqueue_cmd_and_start_io(h, c);
4968 	/* Don't wait for completion, the reset won't complete.  Don't free
4969 	 * the command either.  This is the last command we will send before
4970 	 * re-initializing everything, so it doesn't matter and won't leak.
4971 	 */
4972 	return 0;
4973 }
4974 
4975 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
4976 	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
4977 	int cmd_type)
4978 {
4979 	int pci_dir = XFER_NONE;
4980 	struct CommandList *a; /* for commands to be aborted */
4981 
4982 	c->cmd_type = CMD_IOCTL_PEND;
4983 	c->Header.ReplyQueue = 0;
4984 	if (buff != NULL && size > 0) {
4985 		c->Header.SGList = 1;
4986 		c->Header.SGTotal = cpu_to_le16(1);
4987 	} else {
4988 		c->Header.SGList = 0;
4989 		c->Header.SGTotal = cpu_to_le16(0);
4990 	}
4991 	memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
4992 
4993 	if (cmd_type == TYPE_CMD) {
4994 		switch (cmd) {
4995 		case HPSA_INQUIRY:
4996 			/* are we trying to read a vital product page */
4997 			if (page_code & VPD_PAGE) {
4998 				c->Request.CDB[1] = 0x01;
4999 				c->Request.CDB[2] = (page_code & 0xff);
5000 			}
5001 			c->Request.CDBLen = 6;
5002 			c->Request.type_attr_dir =
5003 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5004 			c->Request.Timeout = 0;
5005 			c->Request.CDB[0] = HPSA_INQUIRY;
5006 			c->Request.CDB[4] = size & 0xFF;
5007 			break;
5008 		case HPSA_REPORT_LOG:
5009 		case HPSA_REPORT_PHYS:
5010 			/* Talking to controller so It's a physical command
5011 			   mode = 00 target = 0.  Nothing to write.
5012 			 */
5013 			c->Request.CDBLen = 12;
5014 			c->Request.type_attr_dir =
5015 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5016 			c->Request.Timeout = 0;
5017 			c->Request.CDB[0] = cmd;
5018 			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5019 			c->Request.CDB[7] = (size >> 16) & 0xFF;
5020 			c->Request.CDB[8] = (size >> 8) & 0xFF;
5021 			c->Request.CDB[9] = size & 0xFF;
5022 			break;
5023 		case HPSA_CACHE_FLUSH:
5024 			c->Request.CDBLen = 12;
5025 			c->Request.type_attr_dir =
5026 					TYPE_ATTR_DIR(cmd_type,
5027 						ATTR_SIMPLE, XFER_WRITE);
5028 			c->Request.Timeout = 0;
5029 			c->Request.CDB[0] = BMIC_WRITE;
5030 			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
5031 			c->Request.CDB[7] = (size >> 8) & 0xFF;
5032 			c->Request.CDB[8] = size & 0xFF;
5033 			break;
5034 		case TEST_UNIT_READY:
5035 			c->Request.CDBLen = 6;
5036 			c->Request.type_attr_dir =
5037 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5038 			c->Request.Timeout = 0;
5039 			break;
5040 		case HPSA_GET_RAID_MAP:
5041 			c->Request.CDBLen = 12;
5042 			c->Request.type_attr_dir =
5043 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5044 			c->Request.Timeout = 0;
5045 			c->Request.CDB[0] = HPSA_CISS_READ;
5046 			c->Request.CDB[1] = cmd;
5047 			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5048 			c->Request.CDB[7] = (size >> 16) & 0xFF;
5049 			c->Request.CDB[8] = (size >> 8) & 0xFF;
5050 			c->Request.CDB[9] = size & 0xFF;
5051 			break;
5052 		case BMIC_SENSE_CONTROLLER_PARAMETERS:
5053 			c->Request.CDBLen = 10;
5054 			c->Request.type_attr_dir =
5055 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5056 			c->Request.Timeout = 0;
5057 			c->Request.CDB[0] = BMIC_READ;
5058 			c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5059 			c->Request.CDB[7] = (size >> 16) & 0xFF;
5060 			c->Request.CDB[8] = (size >> 8) & 0xFF;
5061 			break;
5062 		default:
5063 			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5064 			BUG();
5065 			return -1;
5066 		}
5067 	} else if (cmd_type == TYPE_MSG) {
5068 		switch (cmd) {
5069 
5070 		case  HPSA_DEVICE_RESET_MSG:
5071 			c->Request.CDBLen = 16;
5072 			c->Request.type_attr_dir =
5073 				TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5074 			c->Request.Timeout = 0; /* Don't time out */
5075 			memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5076 			c->Request.CDB[0] =  cmd;
5077 			c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
5078 			/* If bytes 4-7 are zero, it means reset the */
5079 			/* LunID device */
5080 			c->Request.CDB[4] = 0x00;
5081 			c->Request.CDB[5] = 0x00;
5082 			c->Request.CDB[6] = 0x00;
5083 			c->Request.CDB[7] = 0x00;
5084 			break;
5085 		case  HPSA_ABORT_MSG:
5086 			a = buff;       /* point to command to be aborted */
5087 			dev_dbg(&h->pdev->dev,
5088 				"Abort Tag:0x%016llx request Tag:0x%016llx",
5089 				a->Header.tag, c->Header.tag);
5090 			c->Request.CDBLen = 16;
5091 			c->Request.type_attr_dir =
5092 					TYPE_ATTR_DIR(cmd_type,
5093 						ATTR_SIMPLE, XFER_WRITE);
5094 			c->Request.Timeout = 0; /* Don't time out */
5095 			c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5096 			c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5097 			c->Request.CDB[2] = 0x00; /* reserved */
5098 			c->Request.CDB[3] = 0x00; /* reserved */
5099 			/* Tag to abort goes in CDB[4]-CDB[11] */
5100 			memcpy(&c->Request.CDB[4], &a->Header.tag,
5101 				sizeof(a->Header.tag));
5102 			c->Request.CDB[12] = 0x00; /* reserved */
5103 			c->Request.CDB[13] = 0x00; /* reserved */
5104 			c->Request.CDB[14] = 0x00; /* reserved */
5105 			c->Request.CDB[15] = 0x00; /* reserved */
5106 		break;
5107 		default:
5108 			dev_warn(&h->pdev->dev, "unknown message type %d\n",
5109 				cmd);
5110 			BUG();
5111 		}
5112 	} else {
5113 		dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5114 		BUG();
5115 	}
5116 
5117 	switch (GET_DIR(c->Request.type_attr_dir)) {
5118 	case XFER_READ:
5119 		pci_dir = PCI_DMA_FROMDEVICE;
5120 		break;
5121 	case XFER_WRITE:
5122 		pci_dir = PCI_DMA_TODEVICE;
5123 		break;
5124 	case XFER_NONE:
5125 		pci_dir = PCI_DMA_NONE;
5126 		break;
5127 	default:
5128 		pci_dir = PCI_DMA_BIDIRECTIONAL;
5129 	}
5130 	if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5131 		return -1;
5132 	return 0;
5133 }
5134 
5135 /*
5136  * Map (physical) PCI mem into (virtual) kernel space
5137  */
5138 static void __iomem *remap_pci_mem(ulong base, ulong size)
5139 {
5140 	ulong page_base = ((ulong) base) & PAGE_MASK;
5141 	ulong page_offs = ((ulong) base) - page_base;
5142 	void __iomem *page_remapped = ioremap_nocache(page_base,
5143 		page_offs + size);
5144 
5145 	return page_remapped ? (page_remapped + page_offs) : NULL;
5146 }
5147 
5148 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
5149 {
5150 	return h->access.command_completed(h, q);
5151 }
5152 
5153 static inline bool interrupt_pending(struct ctlr_info *h)
5154 {
5155 	return h->access.intr_pending(h);
5156 }
5157 
5158 static inline long interrupt_not_for_us(struct ctlr_info *h)
5159 {
5160 	return (h->access.intr_pending(h) == 0) ||
5161 		(h->interrupts_enabled == 0);
5162 }
5163 
5164 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5165 	u32 raw_tag)
5166 {
5167 	if (unlikely(tag_index >= h->nr_cmds)) {
5168 		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5169 		return 1;
5170 	}
5171 	return 0;
5172 }
5173 
5174 static inline void finish_cmd(struct CommandList *c)
5175 {
5176 	dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
5177 	if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5178 			|| c->cmd_type == CMD_IOACCEL2))
5179 		complete_scsi_command(c);
5180 	else if (c->cmd_type == CMD_IOCTL_PEND)
5181 		complete(c->waiting);
5182 }
5183 
5184 
5185 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
5186 {
5187 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5188 #define HPSA_SIMPLE_ERROR_BITS 0x03
5189 	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
5190 		return tag & ~HPSA_SIMPLE_ERROR_BITS;
5191 	return tag & ~HPSA_PERF_ERROR_BITS;
5192 }
5193 
5194 /* process completion of an indexed ("direct lookup") command */
5195 static inline void process_indexed_cmd(struct ctlr_info *h,
5196 	u32 raw_tag)
5197 {
5198 	u32 tag_index;
5199 	struct CommandList *c;
5200 
5201 	tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
5202 	if (!bad_tag(h, tag_index, raw_tag)) {
5203 		c = h->cmd_pool + tag_index;
5204 		finish_cmd(c);
5205 	}
5206 }
5207 
5208 /* Some controllers, like p400, will give us one interrupt
5209  * after a soft reset, even if we turned interrupts off.
5210  * Only need to check for this in the hpsa_xxx_discard_completions
5211  * functions.
5212  */
5213 static int ignore_bogus_interrupt(struct ctlr_info *h)
5214 {
5215 	if (likely(!reset_devices))
5216 		return 0;
5217 
5218 	if (likely(h->interrupts_enabled))
5219 		return 0;
5220 
5221 	dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5222 		"(known firmware bug.)  Ignoring.\n");
5223 
5224 	return 1;
5225 }
5226 
5227 /*
5228  * Convert &h->q[x] (passed to interrupt handlers) back to h.
5229  * Relies on (h-q[x] == x) being true for x such that
5230  * 0 <= x < MAX_REPLY_QUEUES.
5231  */
5232 static struct ctlr_info *queue_to_hba(u8 *queue)
5233 {
5234 	return container_of((queue - *queue), struct ctlr_info, q[0]);
5235 }
5236 
5237 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5238 {
5239 	struct ctlr_info *h = queue_to_hba(queue);
5240 	u8 q = *(u8 *) queue;
5241 	u32 raw_tag;
5242 
5243 	if (ignore_bogus_interrupt(h))
5244 		return IRQ_NONE;
5245 
5246 	if (interrupt_not_for_us(h))
5247 		return IRQ_NONE;
5248 	h->last_intr_timestamp = get_jiffies_64();
5249 	while (interrupt_pending(h)) {
5250 		raw_tag = get_next_completion(h, q);
5251 		while (raw_tag != FIFO_EMPTY)
5252 			raw_tag = next_command(h, q);
5253 	}
5254 	return IRQ_HANDLED;
5255 }
5256 
5257 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
5258 {
5259 	struct ctlr_info *h = queue_to_hba(queue);
5260 	u32 raw_tag;
5261 	u8 q = *(u8 *) queue;
5262 
5263 	if (ignore_bogus_interrupt(h))
5264 		return IRQ_NONE;
5265 
5266 	h->last_intr_timestamp = get_jiffies_64();
5267 	raw_tag = get_next_completion(h, q);
5268 	while (raw_tag != FIFO_EMPTY)
5269 		raw_tag = next_command(h, q);
5270 	return IRQ_HANDLED;
5271 }
5272 
5273 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
5274 {
5275 	struct ctlr_info *h = queue_to_hba((u8 *) queue);
5276 	u32 raw_tag;
5277 	u8 q = *(u8 *) queue;
5278 
5279 	if (interrupt_not_for_us(h))
5280 		return IRQ_NONE;
5281 	h->last_intr_timestamp = get_jiffies_64();
5282 	while (interrupt_pending(h)) {
5283 		raw_tag = get_next_completion(h, q);
5284 		while (raw_tag != FIFO_EMPTY) {
5285 			process_indexed_cmd(h, raw_tag);
5286 			raw_tag = next_command(h, q);
5287 		}
5288 	}
5289 	return IRQ_HANDLED;
5290 }
5291 
5292 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
5293 {
5294 	struct ctlr_info *h = queue_to_hba(queue);
5295 	u32 raw_tag;
5296 	u8 q = *(u8 *) queue;
5297 
5298 	h->last_intr_timestamp = get_jiffies_64();
5299 	raw_tag = get_next_completion(h, q);
5300 	while (raw_tag != FIFO_EMPTY) {
5301 		process_indexed_cmd(h, raw_tag);
5302 		raw_tag = next_command(h, q);
5303 	}
5304 	return IRQ_HANDLED;
5305 }
5306 
5307 /* Send a message CDB to the firmware. Careful, this only works
5308  * in simple mode, not performant mode due to the tag lookup.
5309  * We only ever use this immediately after a controller reset.
5310  */
5311 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5312 			unsigned char type)
5313 {
5314 	struct Command {
5315 		struct CommandListHeader CommandHeader;
5316 		struct RequestBlock Request;
5317 		struct ErrDescriptor ErrorDescriptor;
5318 	};
5319 	struct Command *cmd;
5320 	static const size_t cmd_sz = sizeof(*cmd) +
5321 					sizeof(cmd->ErrorDescriptor);
5322 	dma_addr_t paddr64;
5323 	__le32 paddr32;
5324 	u32 tag;
5325 	void __iomem *vaddr;
5326 	int i, err;
5327 
5328 	vaddr = pci_ioremap_bar(pdev, 0);
5329 	if (vaddr == NULL)
5330 		return -ENOMEM;
5331 
5332 	/* The Inbound Post Queue only accepts 32-bit physical addresses for the
5333 	 * CCISS commands, so they must be allocated from the lower 4GiB of
5334 	 * memory.
5335 	 */
5336 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5337 	if (err) {
5338 		iounmap(vaddr);
5339 		return err;
5340 	}
5341 
5342 	cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5343 	if (cmd == NULL) {
5344 		iounmap(vaddr);
5345 		return -ENOMEM;
5346 	}
5347 
5348 	/* This must fit, because of the 32-bit consistent DMA mask.  Also,
5349 	 * although there's no guarantee, we assume that the address is at
5350 	 * least 4-byte aligned (most likely, it's page-aligned).
5351 	 */
5352 	paddr32 = cpu_to_le32(paddr64);
5353 
5354 	cmd->CommandHeader.ReplyQueue = 0;
5355 	cmd->CommandHeader.SGList = 0;
5356 	cmd->CommandHeader.SGTotal = cpu_to_le16(0);
5357 	cmd->CommandHeader.tag = cpu_to_le64(paddr64);
5358 	memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5359 
5360 	cmd->Request.CDBLen = 16;
5361 	cmd->Request.type_attr_dir =
5362 			TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
5363 	cmd->Request.Timeout = 0; /* Don't time out */
5364 	cmd->Request.CDB[0] = opcode;
5365 	cmd->Request.CDB[1] = type;
5366 	memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5367 	cmd->ErrorDescriptor.Addr =
5368 			cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
5369 	cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
5370 
5371 	writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
5372 
5373 	for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5374 		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
5375 		if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
5376 			break;
5377 		msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5378 	}
5379 
5380 	iounmap(vaddr);
5381 
5382 	/* we leak the DMA buffer here ... no choice since the controller could
5383 	 *  still complete the command.
5384 	 */
5385 	if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5386 		dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5387 			opcode, type);
5388 		return -ETIMEDOUT;
5389 	}
5390 
5391 	pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5392 
5393 	if (tag & HPSA_ERROR_BIT) {
5394 		dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5395 			opcode, type);
5396 		return -EIO;
5397 	}
5398 
5399 	dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5400 		opcode, type);
5401 	return 0;
5402 }
5403 
5404 #define hpsa_noop(p) hpsa_message(p, 3, 0)
5405 
5406 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5407 	void __iomem *vaddr, u32 use_doorbell)
5408 {
5409 
5410 	if (use_doorbell) {
5411 		/* For everything after the P600, the PCI power state method
5412 		 * of resetting the controller doesn't work, so we have this
5413 		 * other way using the doorbell register.
5414 		 */
5415 		dev_info(&pdev->dev, "using doorbell to reset controller\n");
5416 		writel(use_doorbell, vaddr + SA5_DOORBELL);
5417 
5418 		/* PMC hardware guys tell us we need a 10 second delay after
5419 		 * doorbell reset and before any attempt to talk to the board
5420 		 * at all to ensure that this actually works and doesn't fall
5421 		 * over in some weird corner cases.
5422 		 */
5423 		msleep(10000);
5424 	} else { /* Try to do it the PCI power state way */
5425 
5426 		/* Quoting from the Open CISS Specification: "The Power
5427 		 * Management Control/Status Register (CSR) controls the power
5428 		 * state of the device.  The normal operating state is D0,
5429 		 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
5430 		 * the controller, place the interface device in D3 then to D0,
5431 		 * this causes a secondary PCI reset which will reset the
5432 		 * controller." */
5433 
5434 		int rc = 0;
5435 
5436 		dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5437 
5438 		/* enter the D3hot power management state */
5439 		rc = pci_set_power_state(pdev, PCI_D3hot);
5440 		if (rc)
5441 			return rc;
5442 
5443 		msleep(500);
5444 
5445 		/* enter the D0 power management state */
5446 		rc = pci_set_power_state(pdev, PCI_D0);
5447 		if (rc)
5448 			return rc;
5449 
5450 		/*
5451 		 * The P600 requires a small delay when changing states.
5452 		 * Otherwise we may think the board did not reset and we bail.
5453 		 * This for kdump only and is particular to the P600.
5454 		 */
5455 		msleep(500);
5456 	}
5457 	return 0;
5458 }
5459 
5460 static void init_driver_version(char *driver_version, int len)
5461 {
5462 	memset(driver_version, 0, len);
5463 	strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
5464 }
5465 
5466 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
5467 {
5468 	char *driver_version;
5469 	int i, size = sizeof(cfgtable->driver_version);
5470 
5471 	driver_version = kmalloc(size, GFP_KERNEL);
5472 	if (!driver_version)
5473 		return -ENOMEM;
5474 
5475 	init_driver_version(driver_version, size);
5476 	for (i = 0; i < size; i++)
5477 		writeb(driver_version[i], &cfgtable->driver_version[i]);
5478 	kfree(driver_version);
5479 	return 0;
5480 }
5481 
5482 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
5483 					  unsigned char *driver_ver)
5484 {
5485 	int i;
5486 
5487 	for (i = 0; i < sizeof(cfgtable->driver_version); i++)
5488 		driver_ver[i] = readb(&cfgtable->driver_version[i]);
5489 }
5490 
5491 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
5492 {
5493 
5494 	char *driver_ver, *old_driver_ver;
5495 	int rc, size = sizeof(cfgtable->driver_version);
5496 
5497 	old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
5498 	if (!old_driver_ver)
5499 		return -ENOMEM;
5500 	driver_ver = old_driver_ver + size;
5501 
5502 	/* After a reset, the 32 bytes of "driver version" in the cfgtable
5503 	 * should have been changed, otherwise we know the reset failed.
5504 	 */
5505 	init_driver_version(old_driver_ver, size);
5506 	read_driver_ver_from_cfgtable(cfgtable, driver_ver);
5507 	rc = !memcmp(driver_ver, old_driver_ver, size);
5508 	kfree(old_driver_ver);
5509 	return rc;
5510 }
5511 /* This does a hard reset of the controller using PCI power management
5512  * states or the using the doorbell register.
5513  */
5514 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
5515 {
5516 	u64 cfg_offset;
5517 	u32 cfg_base_addr;
5518 	u64 cfg_base_addr_index;
5519 	void __iomem *vaddr;
5520 	unsigned long paddr;
5521 	u32 misc_fw_support;
5522 	int rc;
5523 	struct CfgTable __iomem *cfgtable;
5524 	u32 use_doorbell;
5525 	u32 board_id;
5526 	u16 command_register;
5527 
5528 	/* For controllers as old as the P600, this is very nearly
5529 	 * the same thing as
5530 	 *
5531 	 * pci_save_state(pci_dev);
5532 	 * pci_set_power_state(pci_dev, PCI_D3hot);
5533 	 * pci_set_power_state(pci_dev, PCI_D0);
5534 	 * pci_restore_state(pci_dev);
5535 	 *
5536 	 * For controllers newer than the P600, the pci power state
5537 	 * method of resetting doesn't work so we have another way
5538 	 * using the doorbell register.
5539 	 */
5540 
5541 	rc = hpsa_lookup_board_id(pdev, &board_id);
5542 	if (rc < 0) {
5543 		dev_warn(&pdev->dev, "Board ID not found\n");
5544 		return rc;
5545 	}
5546 	if (!ctlr_is_resettable(board_id)) {
5547 		dev_warn(&pdev->dev, "Controller not resettable\n");
5548 		return -ENODEV;
5549 	}
5550 
5551 	/* if controller is soft- but not hard resettable... */
5552 	if (!ctlr_is_hard_resettable(board_id))
5553 		return -ENOTSUPP; /* try soft reset later. */
5554 
5555 	/* Save the PCI command register */
5556 	pci_read_config_word(pdev, 4, &command_register);
5557 	pci_save_state(pdev);
5558 
5559 	/* find the first memory BAR, so we can find the cfg table */
5560 	rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
5561 	if (rc)
5562 		return rc;
5563 	vaddr = remap_pci_mem(paddr, 0x250);
5564 	if (!vaddr)
5565 		return -ENOMEM;
5566 
5567 	/* find cfgtable in order to check if reset via doorbell is supported */
5568 	rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
5569 					&cfg_base_addr_index, &cfg_offset);
5570 	if (rc)
5571 		goto unmap_vaddr;
5572 	cfgtable = remap_pci_mem(pci_resource_start(pdev,
5573 		       cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
5574 	if (!cfgtable) {
5575 		rc = -ENOMEM;
5576 		goto unmap_vaddr;
5577 	}
5578 	rc = write_driver_ver_to_cfgtable(cfgtable);
5579 	if (rc)
5580 		goto unmap_cfgtable;
5581 
5582 	/* If reset via doorbell register is supported, use that.
5583 	 * There are two such methods.  Favor the newest method.
5584 	 */
5585 	misc_fw_support = readl(&cfgtable->misc_fw_support);
5586 	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
5587 	if (use_doorbell) {
5588 		use_doorbell = DOORBELL_CTLR_RESET2;
5589 	} else {
5590 		use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
5591 		if (use_doorbell) {
5592 			dev_warn(&pdev->dev,
5593 				"Soft reset not supported. Firmware update is required.\n");
5594 			rc = -ENOTSUPP; /* try soft reset */
5595 			goto unmap_cfgtable;
5596 		}
5597 	}
5598 
5599 	rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
5600 	if (rc)
5601 		goto unmap_cfgtable;
5602 
5603 	pci_restore_state(pdev);
5604 	pci_write_config_word(pdev, 4, command_register);
5605 
5606 	/* Some devices (notably the HP Smart Array 5i Controller)
5607 	   need a little pause here */
5608 	msleep(HPSA_POST_RESET_PAUSE_MSECS);
5609 
5610 	rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
5611 	if (rc) {
5612 		dev_warn(&pdev->dev,
5613 			"Failed waiting for board to become ready after hard reset\n");
5614 		goto unmap_cfgtable;
5615 	}
5616 
5617 	rc = controller_reset_failed(vaddr);
5618 	if (rc < 0)
5619 		goto unmap_cfgtable;
5620 	if (rc) {
5621 		dev_warn(&pdev->dev, "Unable to successfully reset "
5622 			"controller. Will try soft reset.\n");
5623 		rc = -ENOTSUPP;
5624 	} else {
5625 		dev_info(&pdev->dev, "board ready after hard reset.\n");
5626 	}
5627 
5628 unmap_cfgtable:
5629 	iounmap(cfgtable);
5630 
5631 unmap_vaddr:
5632 	iounmap(vaddr);
5633 	return rc;
5634 }
5635 
5636 /*
5637  *  We cannot read the structure directly, for portability we must use
5638  *   the io functions.
5639  *   This is for debug only.
5640  */
5641 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
5642 {
5643 #ifdef HPSA_DEBUG
5644 	int i;
5645 	char temp_name[17];
5646 
5647 	dev_info(dev, "Controller Configuration information\n");
5648 	dev_info(dev, "------------------------------------\n");
5649 	for (i = 0; i < 4; i++)
5650 		temp_name[i] = readb(&(tb->Signature[i]));
5651 	temp_name[4] = '\0';
5652 	dev_info(dev, "   Signature = %s\n", temp_name);
5653 	dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
5654 	dev_info(dev, "   Transport methods supported = 0x%x\n",
5655 	       readl(&(tb->TransportSupport)));
5656 	dev_info(dev, "   Transport methods active = 0x%x\n",
5657 	       readl(&(tb->TransportActive)));
5658 	dev_info(dev, "   Requested transport Method = 0x%x\n",
5659 	       readl(&(tb->HostWrite.TransportRequest)));
5660 	dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
5661 	       readl(&(tb->HostWrite.CoalIntDelay)));
5662 	dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
5663 	       readl(&(tb->HostWrite.CoalIntCount)));
5664 	dev_info(dev, "   Max outstanding commands = %d\n",
5665 	       readl(&(tb->CmdsOutMax)));
5666 	dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
5667 	for (i = 0; i < 16; i++)
5668 		temp_name[i] = readb(&(tb->ServerName[i]));
5669 	temp_name[16] = '\0';
5670 	dev_info(dev, "   Server Name = %s\n", temp_name);
5671 	dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
5672 		readl(&(tb->HeartBeat)));
5673 #endif				/* HPSA_DEBUG */
5674 }
5675 
5676 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
5677 {
5678 	int i, offset, mem_type, bar_type;
5679 
5680 	if (pci_bar_addr == PCI_BASE_ADDRESS_0)	/* looking for BAR zero? */
5681 		return 0;
5682 	offset = 0;
5683 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5684 		bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
5685 		if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
5686 			offset += 4;
5687 		else {
5688 			mem_type = pci_resource_flags(pdev, i) &
5689 			    PCI_BASE_ADDRESS_MEM_TYPE_MASK;
5690 			switch (mem_type) {
5691 			case PCI_BASE_ADDRESS_MEM_TYPE_32:
5692 			case PCI_BASE_ADDRESS_MEM_TYPE_1M:
5693 				offset += 4;	/* 32 bit */
5694 				break;
5695 			case PCI_BASE_ADDRESS_MEM_TYPE_64:
5696 				offset += 8;
5697 				break;
5698 			default:	/* reserved in PCI 2.2 */
5699 				dev_warn(&pdev->dev,
5700 				       "base address is invalid\n");
5701 				return -1;
5702 				break;
5703 			}
5704 		}
5705 		if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
5706 			return i + 1;
5707 	}
5708 	return -1;
5709 }
5710 
5711 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
5712  * controllers that are capable. If not, we use legacy INTx mode.
5713  */
5714 
5715 static void hpsa_interrupt_mode(struct ctlr_info *h)
5716 {
5717 #ifdef CONFIG_PCI_MSI
5718 	int err, i;
5719 	struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
5720 
5721 	for (i = 0; i < MAX_REPLY_QUEUES; i++) {
5722 		hpsa_msix_entries[i].vector = 0;
5723 		hpsa_msix_entries[i].entry = i;
5724 	}
5725 
5726 	/* Some boards advertise MSI but don't really support it */
5727 	if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
5728 	    (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
5729 		goto default_int_mode;
5730 	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
5731 		dev_info(&h->pdev->dev, "MSI-X capable controller\n");
5732 		h->msix_vector = MAX_REPLY_QUEUES;
5733 		if (h->msix_vector > num_online_cpus())
5734 			h->msix_vector = num_online_cpus();
5735 		err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
5736 					    1, h->msix_vector);
5737 		if (err < 0) {
5738 			dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
5739 			h->msix_vector = 0;
5740 			goto single_msi_mode;
5741 		} else if (err < h->msix_vector) {
5742 			dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
5743 			       "available\n", err);
5744 		}
5745 		h->msix_vector = err;
5746 		for (i = 0; i < h->msix_vector; i++)
5747 			h->intr[i] = hpsa_msix_entries[i].vector;
5748 		return;
5749 	}
5750 single_msi_mode:
5751 	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
5752 		dev_info(&h->pdev->dev, "MSI capable controller\n");
5753 		if (!pci_enable_msi(h->pdev))
5754 			h->msi_vector = 1;
5755 		else
5756 			dev_warn(&h->pdev->dev, "MSI init failed\n");
5757 	}
5758 default_int_mode:
5759 #endif				/* CONFIG_PCI_MSI */
5760 	/* if we get here we're going to use the default interrupt mode */
5761 	h->intr[h->intr_mode] = h->pdev->irq;
5762 }
5763 
5764 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
5765 {
5766 	int i;
5767 	u32 subsystem_vendor_id, subsystem_device_id;
5768 
5769 	subsystem_vendor_id = pdev->subsystem_vendor;
5770 	subsystem_device_id = pdev->subsystem_device;
5771 	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
5772 		    subsystem_vendor_id;
5773 
5774 	for (i = 0; i < ARRAY_SIZE(products); i++)
5775 		if (*board_id == products[i].board_id)
5776 			return i;
5777 
5778 	if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
5779 		subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
5780 		!hpsa_allow_any) {
5781 		dev_warn(&pdev->dev, "unrecognized board ID: "
5782 			"0x%08x, ignoring.\n", *board_id);
5783 			return -ENODEV;
5784 	}
5785 	return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
5786 }
5787 
5788 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
5789 				    unsigned long *memory_bar)
5790 {
5791 	int i;
5792 
5793 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
5794 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
5795 			/* addressing mode bits already removed */
5796 			*memory_bar = pci_resource_start(pdev, i);
5797 			dev_dbg(&pdev->dev, "memory BAR = %lx\n",
5798 				*memory_bar);
5799 			return 0;
5800 		}
5801 	dev_warn(&pdev->dev, "no memory BAR found\n");
5802 	return -ENODEV;
5803 }
5804 
5805 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
5806 				     int wait_for_ready)
5807 {
5808 	int i, iterations;
5809 	u32 scratchpad;
5810 	if (wait_for_ready)
5811 		iterations = HPSA_BOARD_READY_ITERATIONS;
5812 	else
5813 		iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
5814 
5815 	for (i = 0; i < iterations; i++) {
5816 		scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
5817 		if (wait_for_ready) {
5818 			if (scratchpad == HPSA_FIRMWARE_READY)
5819 				return 0;
5820 		} else {
5821 			if (scratchpad != HPSA_FIRMWARE_READY)
5822 				return 0;
5823 		}
5824 		msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
5825 	}
5826 	dev_warn(&pdev->dev, "board not ready, timed out.\n");
5827 	return -ENODEV;
5828 }
5829 
5830 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
5831 			       u32 *cfg_base_addr, u64 *cfg_base_addr_index,
5832 			       u64 *cfg_offset)
5833 {
5834 	*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
5835 	*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
5836 	*cfg_base_addr &= (u32) 0x0000ffff;
5837 	*cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
5838 	if (*cfg_base_addr_index == -1) {
5839 		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
5840 		return -ENODEV;
5841 	}
5842 	return 0;
5843 }
5844 
5845 static int hpsa_find_cfgtables(struct ctlr_info *h)
5846 {
5847 	u64 cfg_offset;
5848 	u32 cfg_base_addr;
5849 	u64 cfg_base_addr_index;
5850 	u32 trans_offset;
5851 	int rc;
5852 
5853 	rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
5854 		&cfg_base_addr_index, &cfg_offset);
5855 	if (rc)
5856 		return rc;
5857 	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
5858 		       cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
5859 	if (!h->cfgtable) {
5860 		dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
5861 		return -ENOMEM;
5862 	}
5863 	rc = write_driver_ver_to_cfgtable(h->cfgtable);
5864 	if (rc)
5865 		return rc;
5866 	/* Find performant mode table. */
5867 	trans_offset = readl(&h->cfgtable->TransMethodOffset);
5868 	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
5869 				cfg_base_addr_index)+cfg_offset+trans_offset,
5870 				sizeof(*h->transtable));
5871 	if (!h->transtable)
5872 		return -ENOMEM;
5873 	return 0;
5874 }
5875 
5876 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
5877 {
5878 	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
5879 
5880 	/* Limit commands in memory limited kdump scenario. */
5881 	if (reset_devices && h->max_commands > 32)
5882 		h->max_commands = 32;
5883 
5884 	if (h->max_commands < 16) {
5885 		dev_warn(&h->pdev->dev, "Controller reports "
5886 			"max supported commands of %d, an obvious lie. "
5887 			"Using 16.  Ensure that firmware is up to date.\n",
5888 			h->max_commands);
5889 		h->max_commands = 16;
5890 	}
5891 }
5892 
5893 /* If the controller reports that the total max sg entries is greater than 512,
5894  * then we know that chained SG blocks work.  (Original smart arrays did not
5895  * support chained SG blocks and would return zero for max sg entries.)
5896  */
5897 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
5898 {
5899 	return h->maxsgentries > 512;
5900 }
5901 
5902 /* Interrogate the hardware for some limits:
5903  * max commands, max SG elements without chaining, and with chaining,
5904  * SG chain block size, etc.
5905  */
5906 static void hpsa_find_board_params(struct ctlr_info *h)
5907 {
5908 	hpsa_get_max_perf_mode_cmds(h);
5909 	h->nr_cmds = h->max_commands;
5910 	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
5911 	h->fw_support = readl(&(h->cfgtable->misc_fw_support));
5912 	if (hpsa_supports_chained_sg_blocks(h)) {
5913 		/* Limit in-command s/g elements to 32 save dma'able memory. */
5914 		h->max_cmd_sg_entries = 32;
5915 		h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
5916 		h->maxsgentries--; /* save one for chain pointer */
5917 	} else {
5918 		/*
5919 		 * Original smart arrays supported at most 31 s/g entries
5920 		 * embedded inline in the command (trying to use more
5921 		 * would lock up the controller)
5922 		 */
5923 		h->max_cmd_sg_entries = 31;
5924 		h->maxsgentries = 31; /* default to traditional values */
5925 		h->chainsize = 0;
5926 	}
5927 
5928 	/* Find out what task management functions are supported and cache */
5929 	h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
5930 	if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
5931 		dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
5932 	if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5933 		dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
5934 }
5935 
5936 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
5937 {
5938 	if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
5939 		dev_err(&h->pdev->dev, "not a valid CISS config table\n");
5940 		return false;
5941 	}
5942 	return true;
5943 }
5944 
5945 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
5946 {
5947 	u32 driver_support;
5948 
5949 	driver_support = readl(&(h->cfgtable->driver_support));
5950 	/* Need to enable prefetch in the SCSI core for 6400 in x86 */
5951 #ifdef CONFIG_X86
5952 	driver_support |= ENABLE_SCSI_PREFETCH;
5953 #endif
5954 	driver_support |= ENABLE_UNIT_ATTN;
5955 	writel(driver_support, &(h->cfgtable->driver_support));
5956 }
5957 
5958 /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
5959  * in a prefetch beyond physical memory.
5960  */
5961 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
5962 {
5963 	u32 dma_prefetch;
5964 
5965 	if (h->board_id != 0x3225103C)
5966 		return;
5967 	dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
5968 	dma_prefetch |= 0x8000;
5969 	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
5970 }
5971 
5972 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
5973 {
5974 	int i;
5975 	u32 doorbell_value;
5976 	unsigned long flags;
5977 	/* wait until the clear_event_notify bit 6 is cleared by controller. */
5978 	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
5979 		spin_lock_irqsave(&h->lock, flags);
5980 		doorbell_value = readl(h->vaddr + SA5_DOORBELL);
5981 		spin_unlock_irqrestore(&h->lock, flags);
5982 		if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
5983 			break;
5984 		/* delay and try again */
5985 		msleep(20);
5986 	}
5987 }
5988 
5989 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
5990 {
5991 	int i;
5992 	u32 doorbell_value;
5993 	unsigned long flags;
5994 
5995 	/* under certain very rare conditions, this can take awhile.
5996 	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
5997 	 * as we enter this code.)
5998 	 */
5999 	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6000 		spin_lock_irqsave(&h->lock, flags);
6001 		doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6002 		spin_unlock_irqrestore(&h->lock, flags);
6003 		if (!(doorbell_value & CFGTBL_ChangeReq))
6004 			break;
6005 		/* delay and try again */
6006 		usleep_range(10000, 20000);
6007 	}
6008 }
6009 
6010 static int hpsa_enter_simple_mode(struct ctlr_info *h)
6011 {
6012 	u32 trans_support;
6013 
6014 	trans_support = readl(&(h->cfgtable->TransportSupport));
6015 	if (!(trans_support & SIMPLE_MODE))
6016 		return -ENOTSUPP;
6017 
6018 	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
6019 
6020 	/* Update the field, and then ring the doorbell */
6021 	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
6022 	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
6023 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6024 	hpsa_wait_for_mode_change_ack(h);
6025 	print_cfg_table(&h->pdev->dev, h->cfgtable);
6026 	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6027 		goto error;
6028 	h->transMethod = CFGTBL_Trans_Simple;
6029 	return 0;
6030 error:
6031 	dev_err(&h->pdev->dev, "failed to enter simple mode\n");
6032 	return -ENODEV;
6033 }
6034 
6035 static int hpsa_pci_init(struct ctlr_info *h)
6036 {
6037 	int prod_index, err;
6038 
6039 	prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6040 	if (prod_index < 0)
6041 		return prod_index;
6042 	h->product_name = products[prod_index].product_name;
6043 	h->access = *(products[prod_index].access);
6044 
6045 	pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6046 			       PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6047 
6048 	err = pci_enable_device(h->pdev);
6049 	if (err) {
6050 		dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
6051 		return err;
6052 	}
6053 
6054 	err = pci_request_regions(h->pdev, HPSA);
6055 	if (err) {
6056 		dev_err(&h->pdev->dev,
6057 			"cannot obtain PCI resources, aborting\n");
6058 		return err;
6059 	}
6060 
6061 	pci_set_master(h->pdev);
6062 
6063 	hpsa_interrupt_mode(h);
6064 	err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6065 	if (err)
6066 		goto err_out_free_res;
6067 	h->vaddr = remap_pci_mem(h->paddr, 0x250);
6068 	if (!h->vaddr) {
6069 		err = -ENOMEM;
6070 		goto err_out_free_res;
6071 	}
6072 	err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
6073 	if (err)
6074 		goto err_out_free_res;
6075 	err = hpsa_find_cfgtables(h);
6076 	if (err)
6077 		goto err_out_free_res;
6078 	hpsa_find_board_params(h);
6079 
6080 	if (!hpsa_CISS_signature_present(h)) {
6081 		err = -ENODEV;
6082 		goto err_out_free_res;
6083 	}
6084 	hpsa_set_driver_support_bits(h);
6085 	hpsa_p600_dma_prefetch_quirk(h);
6086 	err = hpsa_enter_simple_mode(h);
6087 	if (err)
6088 		goto err_out_free_res;
6089 	return 0;
6090 
6091 err_out_free_res:
6092 	if (h->transtable)
6093 		iounmap(h->transtable);
6094 	if (h->cfgtable)
6095 		iounmap(h->cfgtable);
6096 	if (h->vaddr)
6097 		iounmap(h->vaddr);
6098 	pci_disable_device(h->pdev);
6099 	pci_release_regions(h->pdev);
6100 	return err;
6101 }
6102 
6103 static void hpsa_hba_inquiry(struct ctlr_info *h)
6104 {
6105 	int rc;
6106 
6107 #define HBA_INQUIRY_BYTE_COUNT 64
6108 	h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6109 	if (!h->hba_inquiry_data)
6110 		return;
6111 	rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6112 		h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6113 	if (rc != 0) {
6114 		kfree(h->hba_inquiry_data);
6115 		h->hba_inquiry_data = NULL;
6116 	}
6117 }
6118 
6119 static int hpsa_init_reset_devices(struct pci_dev *pdev)
6120 {
6121 	int rc, i;
6122 	void __iomem *vaddr;
6123 
6124 	if (!reset_devices)
6125 		return 0;
6126 
6127 	/* kdump kernel is loading, we don't know in which state is
6128 	 * the pci interface. The dev->enable_cnt is equal zero
6129 	 * so we call enable+disable, wait a while and switch it on.
6130 	 */
6131 	rc = pci_enable_device(pdev);
6132 	if (rc) {
6133 		dev_warn(&pdev->dev, "Failed to enable PCI device\n");
6134 		return -ENODEV;
6135 	}
6136 	pci_disable_device(pdev);
6137 	msleep(260);			/* a randomly chosen number */
6138 	rc = pci_enable_device(pdev);
6139 	if (rc) {
6140 		dev_warn(&pdev->dev, "failed to enable device.\n");
6141 		return -ENODEV;
6142 	}
6143 
6144 	pci_set_master(pdev);
6145 
6146 	vaddr = pci_ioremap_bar(pdev, 0);
6147 	if (vaddr == NULL) {
6148 		rc = -ENOMEM;
6149 		goto out_disable;
6150 	}
6151 	writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
6152 	iounmap(vaddr);
6153 
6154 	/* Reset the controller with a PCI power-cycle or via doorbell */
6155 	rc = hpsa_kdump_hard_reset_controller(pdev);
6156 
6157 	/* -ENOTSUPP here means we cannot reset the controller
6158 	 * but it's already (and still) up and running in
6159 	 * "performant mode".  Or, it might be 640x, which can't reset
6160 	 * due to concerns about shared bbwc between 6402/6404 pair.
6161 	 */
6162 	if (rc)
6163 		goto out_disable;
6164 
6165 	/* Now try to get the controller to respond to a no-op */
6166 	dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
6167 	for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6168 		if (hpsa_noop(pdev) == 0)
6169 			break;
6170 		else
6171 			dev_warn(&pdev->dev, "no-op failed%s\n",
6172 					(i < 11 ? "; re-trying" : ""));
6173 	}
6174 
6175 out_disable:
6176 
6177 	pci_disable_device(pdev);
6178 	return rc;
6179 }
6180 
6181 static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
6182 {
6183 	h->cmd_pool_bits = kzalloc(
6184 		DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6185 		sizeof(unsigned long), GFP_KERNEL);
6186 	h->cmd_pool = pci_alloc_consistent(h->pdev,
6187 		    h->nr_cmds * sizeof(*h->cmd_pool),
6188 		    &(h->cmd_pool_dhandle));
6189 	h->errinfo_pool = pci_alloc_consistent(h->pdev,
6190 		    h->nr_cmds * sizeof(*h->errinfo_pool),
6191 		    &(h->errinfo_pool_dhandle));
6192 	if ((h->cmd_pool_bits == NULL)
6193 	    || (h->cmd_pool == NULL)
6194 	    || (h->errinfo_pool == NULL)) {
6195 		dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6196 		goto clean_up;
6197 	}
6198 	return 0;
6199 clean_up:
6200 	hpsa_free_cmd_pool(h);
6201 	return -ENOMEM;
6202 }
6203 
6204 static void hpsa_free_cmd_pool(struct ctlr_info *h)
6205 {
6206 	kfree(h->cmd_pool_bits);
6207 	if (h->cmd_pool)
6208 		pci_free_consistent(h->pdev,
6209 			    h->nr_cmds * sizeof(struct CommandList),
6210 			    h->cmd_pool, h->cmd_pool_dhandle);
6211 	if (h->ioaccel2_cmd_pool)
6212 		pci_free_consistent(h->pdev,
6213 			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6214 			h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
6215 	if (h->errinfo_pool)
6216 		pci_free_consistent(h->pdev,
6217 			    h->nr_cmds * sizeof(struct ErrorInfo),
6218 			    h->errinfo_pool,
6219 			    h->errinfo_pool_dhandle);
6220 	if (h->ioaccel_cmd_pool)
6221 		pci_free_consistent(h->pdev,
6222 			h->nr_cmds * sizeof(struct io_accel1_cmd),
6223 			h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6224 }
6225 
6226 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6227 {
6228 	int i, cpu;
6229 
6230 	cpu = cpumask_first(cpu_online_mask);
6231 	for (i = 0; i < h->msix_vector; i++) {
6232 		irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
6233 		cpu = cpumask_next(cpu, cpu_online_mask);
6234 	}
6235 }
6236 
6237 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
6238 static void hpsa_free_irqs(struct ctlr_info *h)
6239 {
6240 	int i;
6241 
6242 	if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6243 		/* Single reply queue, only one irq to free */
6244 		i = h->intr_mode;
6245 		irq_set_affinity_hint(h->intr[i], NULL);
6246 		free_irq(h->intr[i], &h->q[i]);
6247 		return;
6248 	}
6249 
6250 	for (i = 0; i < h->msix_vector; i++) {
6251 		irq_set_affinity_hint(h->intr[i], NULL);
6252 		free_irq(h->intr[i], &h->q[i]);
6253 	}
6254 	for (; i < MAX_REPLY_QUEUES; i++)
6255 		h->q[i] = 0;
6256 }
6257 
6258 /* returns 0 on success; cleans up and returns -Enn on error */
6259 static int hpsa_request_irqs(struct ctlr_info *h,
6260 	irqreturn_t (*msixhandler)(int, void *),
6261 	irqreturn_t (*intxhandler)(int, void *))
6262 {
6263 	int rc, i;
6264 
6265 	/*
6266 	 * initialize h->q[x] = x so that interrupt handlers know which
6267 	 * queue to process.
6268 	 */
6269 	for (i = 0; i < MAX_REPLY_QUEUES; i++)
6270 		h->q[i] = (u8) i;
6271 
6272 	if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
6273 		/* If performant mode and MSI-X, use multiple reply queues */
6274 		for (i = 0; i < h->msix_vector; i++) {
6275 			rc = request_irq(h->intr[i], msixhandler,
6276 					0, h->devname,
6277 					&h->q[i]);
6278 			if (rc) {
6279 				int j;
6280 
6281 				dev_err(&h->pdev->dev,
6282 					"failed to get irq %d for %s\n",
6283 				       h->intr[i], h->devname);
6284 				for (j = 0; j < i; j++) {
6285 					free_irq(h->intr[j], &h->q[j]);
6286 					h->q[j] = 0;
6287 				}
6288 				for (; j < MAX_REPLY_QUEUES; j++)
6289 					h->q[j] = 0;
6290 				return rc;
6291 			}
6292 		}
6293 		hpsa_irq_affinity_hints(h);
6294 	} else {
6295 		/* Use single reply pool */
6296 		if (h->msix_vector > 0 || h->msi_vector) {
6297 			rc = request_irq(h->intr[h->intr_mode],
6298 				msixhandler, 0, h->devname,
6299 				&h->q[h->intr_mode]);
6300 		} else {
6301 			rc = request_irq(h->intr[h->intr_mode],
6302 				intxhandler, IRQF_SHARED, h->devname,
6303 				&h->q[h->intr_mode]);
6304 		}
6305 	}
6306 	if (rc) {
6307 		dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6308 		       h->intr[h->intr_mode], h->devname);
6309 		return -ENODEV;
6310 	}
6311 	return 0;
6312 }
6313 
6314 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
6315 {
6316 	if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6317 		HPSA_RESET_TYPE_CONTROLLER)) {
6318 		dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6319 		return -EIO;
6320 	}
6321 
6322 	dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6323 	if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6324 		dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6325 		return -1;
6326 	}
6327 
6328 	dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6329 	if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6330 		dev_warn(&h->pdev->dev, "Board failed to become ready "
6331 			"after soft reset.\n");
6332 		return -1;
6333 	}
6334 
6335 	return 0;
6336 }
6337 
6338 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
6339 {
6340 	hpsa_free_irqs(h);
6341 #ifdef CONFIG_PCI_MSI
6342 	if (h->msix_vector) {
6343 		if (h->pdev->msix_enabled)
6344 			pci_disable_msix(h->pdev);
6345 	} else if (h->msi_vector) {
6346 		if (h->pdev->msi_enabled)
6347 			pci_disable_msi(h->pdev);
6348 	}
6349 #endif /* CONFIG_PCI_MSI */
6350 }
6351 
6352 static void hpsa_free_reply_queues(struct ctlr_info *h)
6353 {
6354 	int i;
6355 
6356 	for (i = 0; i < h->nreply_queues; i++) {
6357 		if (!h->reply_queue[i].head)
6358 			continue;
6359 		pci_free_consistent(h->pdev, h->reply_queue_size,
6360 			h->reply_queue[i].head, h->reply_queue[i].busaddr);
6361 		h->reply_queue[i].head = NULL;
6362 		h->reply_queue[i].busaddr = 0;
6363 	}
6364 }
6365 
6366 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6367 {
6368 	hpsa_free_irqs_and_disable_msix(h);
6369 	hpsa_free_sg_chain_blocks(h);
6370 	hpsa_free_cmd_pool(h);
6371 	kfree(h->ioaccel1_blockFetchTable);
6372 	kfree(h->blockFetchTable);
6373 	hpsa_free_reply_queues(h);
6374 	if (h->vaddr)
6375 		iounmap(h->vaddr);
6376 	if (h->transtable)
6377 		iounmap(h->transtable);
6378 	if (h->cfgtable)
6379 		iounmap(h->cfgtable);
6380 	pci_disable_device(h->pdev);
6381 	pci_release_regions(h->pdev);
6382 	kfree(h);
6383 }
6384 
6385 /* Called when controller lockup detected. */
6386 static void fail_all_outstanding_cmds(struct ctlr_info *h)
6387 {
6388 	int i;
6389 	struct CommandList *c = NULL;
6390 
6391 	for (i = 0; i < h->nr_cmds; i++) {
6392 		if (!test_bit(i & (BITS_PER_LONG - 1),
6393 				h->cmd_pool_bits + (i / BITS_PER_LONG)))
6394 			continue;
6395 		c = h->cmd_pool + i;
6396 		c->err_info->CommandStatus = CMD_HARDWARE_ERR;
6397 		finish_cmd(c);
6398 	}
6399 }
6400 
6401 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6402 {
6403 	int i, cpu;
6404 
6405 	cpu = cpumask_first(cpu_online_mask);
6406 	for (i = 0; i < num_online_cpus(); i++) {
6407 		u32 *lockup_detected;
6408 		lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6409 		*lockup_detected = value;
6410 		cpu = cpumask_next(cpu, cpu_online_mask);
6411 	}
6412 	wmb(); /* be sure the per-cpu variables are out to memory */
6413 }
6414 
6415 static void controller_lockup_detected(struct ctlr_info *h)
6416 {
6417 	unsigned long flags;
6418 	u32 lockup_detected;
6419 
6420 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
6421 	spin_lock_irqsave(&h->lock, flags);
6422 	lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6423 	if (!lockup_detected) {
6424 		/* no heartbeat, but controller gave us a zero. */
6425 		dev_warn(&h->pdev->dev,
6426 			"lockup detected but scratchpad register is zero\n");
6427 		lockup_detected = 0xffffffff;
6428 	}
6429 	set_lockup_detected_for_all_cpus(h, lockup_detected);
6430 	spin_unlock_irqrestore(&h->lock, flags);
6431 	dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6432 			lockup_detected);
6433 	pci_disable_device(h->pdev);
6434 	spin_lock_irqsave(&h->lock, flags);
6435 	fail_all_outstanding_cmds(h);
6436 	spin_unlock_irqrestore(&h->lock, flags);
6437 }
6438 
6439 static void detect_controller_lockup(struct ctlr_info *h)
6440 {
6441 	u64 now;
6442 	u32 heartbeat;
6443 	unsigned long flags;
6444 
6445 	now = get_jiffies_64();
6446 	/* If we've received an interrupt recently, we're ok. */
6447 	if (time_after64(h->last_intr_timestamp +
6448 				(h->heartbeat_sample_interval), now))
6449 		return;
6450 
6451 	/*
6452 	 * If we've already checked the heartbeat recently, we're ok.
6453 	 * This could happen if someone sends us a signal. We
6454 	 * otherwise don't care about signals in this thread.
6455 	 */
6456 	if (time_after64(h->last_heartbeat_timestamp +
6457 				(h->heartbeat_sample_interval), now))
6458 		return;
6459 
6460 	/* If heartbeat has not changed since we last looked, we're not ok. */
6461 	spin_lock_irqsave(&h->lock, flags);
6462 	heartbeat = readl(&h->cfgtable->HeartBeat);
6463 	spin_unlock_irqrestore(&h->lock, flags);
6464 	if (h->last_heartbeat == heartbeat) {
6465 		controller_lockup_detected(h);
6466 		return;
6467 	}
6468 
6469 	/* We're ok. */
6470 	h->last_heartbeat = heartbeat;
6471 	h->last_heartbeat_timestamp = now;
6472 }
6473 
6474 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
6475 {
6476 	int i;
6477 	char *event_type;
6478 
6479 	/* Ask the controller to clear the events we're handling. */
6480 	if ((h->transMethod & (CFGTBL_Trans_io_accel1
6481 			| CFGTBL_Trans_io_accel2)) &&
6482 		(h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
6483 		 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
6484 
6485 		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
6486 			event_type = "state change";
6487 		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
6488 			event_type = "configuration change";
6489 		/* Stop sending new RAID offload reqs via the IO accelerator */
6490 		scsi_block_requests(h->scsi_host);
6491 		for (i = 0; i < h->ndevices; i++)
6492 			h->dev[i]->offload_enabled = 0;
6493 		hpsa_drain_accel_commands(h);
6494 		/* Set 'accelerator path config change' bit */
6495 		dev_warn(&h->pdev->dev,
6496 			"Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6497 			h->events, event_type);
6498 		writel(h->events, &(h->cfgtable->clear_event_notify));
6499 		/* Set the "clear event notify field update" bit 6 */
6500 		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6501 		/* Wait until ctlr clears 'clear event notify field', bit 6 */
6502 		hpsa_wait_for_clear_event_notify_ack(h);
6503 		scsi_unblock_requests(h->scsi_host);
6504 	} else {
6505 		/* Acknowledge controller notification events. */
6506 		writel(h->events, &(h->cfgtable->clear_event_notify));
6507 		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6508 		hpsa_wait_for_clear_event_notify_ack(h);
6509 #if 0
6510 		writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6511 		hpsa_wait_for_mode_change_ack(h);
6512 #endif
6513 	}
6514 	return;
6515 }
6516 
6517 /* Check a register on the controller to see if there are configuration
6518  * changes (added/changed/removed logical drives, etc.) which mean that
6519  * we should rescan the controller for devices.
6520  * Also check flag for driver-initiated rescan.
6521  */
6522 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
6523 {
6524 	if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
6525 		return 0;
6526 
6527 	h->events = readl(&(h->cfgtable->event_notify));
6528 	return h->events & RESCAN_REQUIRED_EVENT_BITS;
6529 }
6530 
6531 /*
6532  * Check if any of the offline devices have become ready
6533  */
6534 static int hpsa_offline_devices_ready(struct ctlr_info *h)
6535 {
6536 	unsigned long flags;
6537 	struct offline_device_entry *d;
6538 	struct list_head *this, *tmp;
6539 
6540 	spin_lock_irqsave(&h->offline_device_lock, flags);
6541 	list_for_each_safe(this, tmp, &h->offline_device_list) {
6542 		d = list_entry(this, struct offline_device_entry,
6543 				offline_list);
6544 		spin_unlock_irqrestore(&h->offline_device_lock, flags);
6545 		if (!hpsa_volume_offline(h, d->scsi3addr)) {
6546 			spin_lock_irqsave(&h->offline_device_lock, flags);
6547 			list_del(&d->offline_list);
6548 			spin_unlock_irqrestore(&h->offline_device_lock, flags);
6549 			return 1;
6550 		}
6551 		spin_lock_irqsave(&h->offline_device_lock, flags);
6552 	}
6553 	spin_unlock_irqrestore(&h->offline_device_lock, flags);
6554 	return 0;
6555 }
6556 
6557 
6558 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6559 {
6560 	unsigned long flags;
6561 	struct ctlr_info *h = container_of(to_delayed_work(work),
6562 					struct ctlr_info, monitor_ctlr_work);
6563 	detect_controller_lockup(h);
6564 	if (lockup_detected(h))
6565 		return;
6566 
6567 	if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6568 		scsi_host_get(h->scsi_host);
6569 		hpsa_ack_ctlr_events(h);
6570 		hpsa_scan_start(h->scsi_host);
6571 		scsi_host_put(h->scsi_host);
6572 	}
6573 
6574 	spin_lock_irqsave(&h->lock, flags);
6575 	if (h->remove_in_progress) {
6576 		spin_unlock_irqrestore(&h->lock, flags);
6577 		return;
6578 	}
6579 	schedule_delayed_work(&h->monitor_ctlr_work,
6580 				h->heartbeat_sample_interval);
6581 	spin_unlock_irqrestore(&h->lock, flags);
6582 }
6583 
6584 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6585 {
6586 	int dac, rc;
6587 	struct ctlr_info *h;
6588 	int try_soft_reset = 0;
6589 	unsigned long flags;
6590 
6591 	if (number_of_controllers == 0)
6592 		printk(KERN_INFO DRIVER_NAME "\n");
6593 
6594 	rc = hpsa_init_reset_devices(pdev);
6595 	if (rc) {
6596 		if (rc != -ENOTSUPP)
6597 			return rc;
6598 		/* If the reset fails in a particular way (it has no way to do
6599 		 * a proper hard reset, so returns -ENOTSUPP) we can try to do
6600 		 * a soft reset once we get the controller configured up to the
6601 		 * point that it can accept a command.
6602 		 */
6603 		try_soft_reset = 1;
6604 		rc = 0;
6605 	}
6606 
6607 reinit_after_soft_reset:
6608 
6609 	/* Command structures must be aligned on a 32-byte boundary because
6610 	 * the 5 lower bits of the address are used by the hardware. and by
6611 	 * the driver.  See comments in hpsa.h for more info.
6612 	 */
6613 	BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
6614 	h = kzalloc(sizeof(*h), GFP_KERNEL);
6615 	if (!h)
6616 		return -ENOMEM;
6617 
6618 	h->pdev = pdev;
6619 	h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
6620 	INIT_LIST_HEAD(&h->offline_device_list);
6621 	spin_lock_init(&h->lock);
6622 	spin_lock_init(&h->offline_device_lock);
6623 	spin_lock_init(&h->scan_lock);
6624 	spin_lock_init(&h->passthru_count_lock);
6625 
6626 	/* Allocate and clear per-cpu variable lockup_detected */
6627 	h->lockup_detected = alloc_percpu(u32);
6628 	if (!h->lockup_detected) {
6629 		rc = -ENOMEM;
6630 		goto clean1;
6631 	}
6632 	set_lockup_detected_for_all_cpus(h, 0);
6633 
6634 	rc = hpsa_pci_init(h);
6635 	if (rc != 0)
6636 		goto clean1;
6637 
6638 	sprintf(h->devname, HPSA "%d", number_of_controllers);
6639 	h->ctlr = number_of_controllers;
6640 	number_of_controllers++;
6641 
6642 	/* configure PCI DMA stuff */
6643 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
6644 	if (rc == 0) {
6645 		dac = 1;
6646 	} else {
6647 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6648 		if (rc == 0) {
6649 			dac = 0;
6650 		} else {
6651 			dev_err(&pdev->dev, "no suitable DMA available\n");
6652 			goto clean1;
6653 		}
6654 	}
6655 
6656 	/* make sure the board interrupts are off */
6657 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
6658 
6659 	if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
6660 		goto clean2;
6661 	dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
6662 	       h->devname, pdev->device,
6663 	       h->intr[h->intr_mode], dac ? "" : " not");
6664 	rc = hpsa_allocate_cmd_pool(h);
6665 	if (rc)
6666 		goto clean2_and_free_irqs;
6667 	if (hpsa_allocate_sg_chain_blocks(h))
6668 		goto clean4;
6669 	init_waitqueue_head(&h->scan_wait_queue);
6670 	h->scan_finished = 1; /* no scan currently in progress */
6671 
6672 	pci_set_drvdata(pdev, h);
6673 	h->ndevices = 0;
6674 	h->hba_mode_enabled = 0;
6675 	h->scsi_host = NULL;
6676 	spin_lock_init(&h->devlock);
6677 	hpsa_put_ctlr_into_performant_mode(h);
6678 
6679 	/* At this point, the controller is ready to take commands.
6680 	 * Now, if reset_devices and the hard reset didn't work, try
6681 	 * the soft reset and see if that works.
6682 	 */
6683 	if (try_soft_reset) {
6684 
6685 		/* This is kind of gross.  We may or may not get a completion
6686 		 * from the soft reset command, and if we do, then the value
6687 		 * from the fifo may or may not be valid.  So, we wait 10 secs
6688 		 * after the reset throwing away any completions we get during
6689 		 * that time.  Unregister the interrupt handler and register
6690 		 * fake ones to scoop up any residual completions.
6691 		 */
6692 		spin_lock_irqsave(&h->lock, flags);
6693 		h->access.set_intr_mask(h, HPSA_INTR_OFF);
6694 		spin_unlock_irqrestore(&h->lock, flags);
6695 		hpsa_free_irqs(h);
6696 		rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
6697 					hpsa_intx_discard_completions);
6698 		if (rc) {
6699 			dev_warn(&h->pdev->dev,
6700 				"Failed to request_irq after soft reset.\n");
6701 			goto clean4;
6702 		}
6703 
6704 		rc = hpsa_kdump_soft_reset(h);
6705 		if (rc)
6706 			/* Neither hard nor soft reset worked, we're hosed. */
6707 			goto clean4;
6708 
6709 		dev_info(&h->pdev->dev, "Board READY.\n");
6710 		dev_info(&h->pdev->dev,
6711 			"Waiting for stale completions to drain.\n");
6712 		h->access.set_intr_mask(h, HPSA_INTR_ON);
6713 		msleep(10000);
6714 		h->access.set_intr_mask(h, HPSA_INTR_OFF);
6715 
6716 		rc = controller_reset_failed(h->cfgtable);
6717 		if (rc)
6718 			dev_info(&h->pdev->dev,
6719 				"Soft reset appears to have failed.\n");
6720 
6721 		/* since the controller's reset, we have to go back and re-init
6722 		 * everything.  Easiest to just forget what we've done and do it
6723 		 * all over again.
6724 		 */
6725 		hpsa_undo_allocations_after_kdump_soft_reset(h);
6726 		try_soft_reset = 0;
6727 		if (rc)
6728 			/* don't go to clean4, we already unallocated */
6729 			return -ENODEV;
6730 
6731 		goto reinit_after_soft_reset;
6732 	}
6733 
6734 		/* Enable Accelerated IO path at driver layer */
6735 		h->acciopath_status = 1;
6736 
6737 
6738 	/* Turn the interrupts on so we can service requests */
6739 	h->access.set_intr_mask(h, HPSA_INTR_ON);
6740 
6741 	hpsa_hba_inquiry(h);
6742 	hpsa_register_scsi(h);	/* hook ourselves into SCSI subsystem */
6743 
6744 	/* Monitor the controller for firmware lockups */
6745 	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
6746 	INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
6747 	schedule_delayed_work(&h->monitor_ctlr_work,
6748 				h->heartbeat_sample_interval);
6749 	return 0;
6750 
6751 clean4:
6752 	hpsa_free_sg_chain_blocks(h);
6753 	hpsa_free_cmd_pool(h);
6754 clean2_and_free_irqs:
6755 	hpsa_free_irqs(h);
6756 clean2:
6757 clean1:
6758 	if (h->lockup_detected)
6759 		free_percpu(h->lockup_detected);
6760 	kfree(h);
6761 	return rc;
6762 }
6763 
6764 static void hpsa_flush_cache(struct ctlr_info *h)
6765 {
6766 	char *flush_buf;
6767 	struct CommandList *c;
6768 
6769 	/* Don't bother trying to flush the cache if locked up */
6770 	if (unlikely(lockup_detected(h)))
6771 		return;
6772 	flush_buf = kzalloc(4, GFP_KERNEL);
6773 	if (!flush_buf)
6774 		return;
6775 
6776 	c = cmd_alloc(h);
6777 	if (!c) {
6778 		dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
6779 		goto out_of_memory;
6780 	}
6781 	if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
6782 		RAID_CTLR_LUNID, TYPE_CMD)) {
6783 		goto out;
6784 	}
6785 	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
6786 	if (c->err_info->CommandStatus != 0)
6787 out:
6788 		dev_warn(&h->pdev->dev,
6789 			"error flushing cache on controller\n");
6790 	cmd_free(h, c);
6791 out_of_memory:
6792 	kfree(flush_buf);
6793 }
6794 
6795 static void hpsa_shutdown(struct pci_dev *pdev)
6796 {
6797 	struct ctlr_info *h;
6798 
6799 	h = pci_get_drvdata(pdev);
6800 	/* Turn board interrupts off  and send the flush cache command
6801 	 * sendcmd will turn off interrupt, and send the flush...
6802 	 * To write all data in the battery backed cache to disks
6803 	 */
6804 	hpsa_flush_cache(h);
6805 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
6806 	hpsa_free_irqs_and_disable_msix(h);
6807 }
6808 
6809 static void hpsa_free_device_info(struct ctlr_info *h)
6810 {
6811 	int i;
6812 
6813 	for (i = 0; i < h->ndevices; i++)
6814 		kfree(h->dev[i]);
6815 }
6816 
6817 static void hpsa_remove_one(struct pci_dev *pdev)
6818 {
6819 	struct ctlr_info *h;
6820 	unsigned long flags;
6821 
6822 	if (pci_get_drvdata(pdev) == NULL) {
6823 		dev_err(&pdev->dev, "unable to remove device\n");
6824 		return;
6825 	}
6826 	h = pci_get_drvdata(pdev);
6827 
6828 	/* Get rid of any controller monitoring work items */
6829 	spin_lock_irqsave(&h->lock, flags);
6830 	h->remove_in_progress = 1;
6831 	cancel_delayed_work(&h->monitor_ctlr_work);
6832 	spin_unlock_irqrestore(&h->lock, flags);
6833 
6834 	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */
6835 	hpsa_shutdown(pdev);
6836 	iounmap(h->vaddr);
6837 	iounmap(h->transtable);
6838 	iounmap(h->cfgtable);
6839 	hpsa_free_device_info(h);
6840 	hpsa_free_sg_chain_blocks(h);
6841 	pci_free_consistent(h->pdev,
6842 		h->nr_cmds * sizeof(struct CommandList),
6843 		h->cmd_pool, h->cmd_pool_dhandle);
6844 	pci_free_consistent(h->pdev,
6845 		h->nr_cmds * sizeof(struct ErrorInfo),
6846 		h->errinfo_pool, h->errinfo_pool_dhandle);
6847 	hpsa_free_reply_queues(h);
6848 	kfree(h->cmd_pool_bits);
6849 	kfree(h->blockFetchTable);
6850 	kfree(h->ioaccel1_blockFetchTable);
6851 	kfree(h->ioaccel2_blockFetchTable);
6852 	kfree(h->hba_inquiry_data);
6853 	pci_disable_device(pdev);
6854 	pci_release_regions(pdev);
6855 	free_percpu(h->lockup_detected);
6856 	kfree(h);
6857 }
6858 
6859 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
6860 	__attribute__((unused)) pm_message_t state)
6861 {
6862 	return -ENOSYS;
6863 }
6864 
6865 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
6866 {
6867 	return -ENOSYS;
6868 }
6869 
6870 static struct pci_driver hpsa_pci_driver = {
6871 	.name = HPSA,
6872 	.probe = hpsa_init_one,
6873 	.remove = hpsa_remove_one,
6874 	.id_table = hpsa_pci_device_id,	/* id_table */
6875 	.shutdown = hpsa_shutdown,
6876 	.suspend = hpsa_suspend,
6877 	.resume = hpsa_resume,
6878 };
6879 
6880 /* Fill in bucket_map[], given nsgs (the max number of
6881  * scatter gather elements supported) and bucket[],
6882  * which is an array of 8 integers.  The bucket[] array
6883  * contains 8 different DMA transfer sizes (in 16
6884  * byte increments) which the controller uses to fetch
6885  * commands.  This function fills in bucket_map[], which
6886  * maps a given number of scatter gather elements to one of
6887  * the 8 DMA transfer sizes.  The point of it is to allow the
6888  * controller to only do as much DMA as needed to fetch the
6889  * command, with the DMA transfer size encoded in the lower
6890  * bits of the command address.
6891  */
6892 static void  calc_bucket_map(int bucket[], int num_buckets,
6893 	int nsgs, int min_blocks, u32 *bucket_map)
6894 {
6895 	int i, j, b, size;
6896 
6897 	/* Note, bucket_map must have nsgs+1 entries. */
6898 	for (i = 0; i <= nsgs; i++) {
6899 		/* Compute size of a command with i SG entries */
6900 		size = i + min_blocks;
6901 		b = num_buckets; /* Assume the biggest bucket */
6902 		/* Find the bucket that is just big enough */
6903 		for (j = 0; j < num_buckets; j++) {
6904 			if (bucket[j] >= size) {
6905 				b = j;
6906 				break;
6907 			}
6908 		}
6909 		/* for a command with i SG entries, use bucket b. */
6910 		bucket_map[i] = b;
6911 	}
6912 }
6913 
6914 static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
6915 {
6916 	int i;
6917 	unsigned long register_value;
6918 	unsigned long transMethod = CFGTBL_Trans_Performant |
6919 			(trans_support & CFGTBL_Trans_use_short_tags) |
6920 				CFGTBL_Trans_enable_directed_msix |
6921 			(trans_support & (CFGTBL_Trans_io_accel1 |
6922 				CFGTBL_Trans_io_accel2));
6923 	struct access_method access = SA5_performant_access;
6924 
6925 	/* This is a bit complicated.  There are 8 registers on
6926 	 * the controller which we write to to tell it 8 different
6927 	 * sizes of commands which there may be.  It's a way of
6928 	 * reducing the DMA done to fetch each command.  Encoded into
6929 	 * each command's tag are 3 bits which communicate to the controller
6930 	 * which of the eight sizes that command fits within.  The size of
6931 	 * each command depends on how many scatter gather entries there are.
6932 	 * Each SG entry requires 16 bytes.  The eight registers are programmed
6933 	 * with the number of 16-byte blocks a command of that size requires.
6934 	 * The smallest command possible requires 5 such 16 byte blocks.
6935 	 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
6936 	 * blocks.  Note, this only extends to the SG entries contained
6937 	 * within the command block, and does not extend to chained blocks
6938 	 * of SG elements.   bft[] contains the eight values we write to
6939 	 * the registers.  They are not evenly distributed, but have more
6940 	 * sizes for small commands, and fewer sizes for larger commands.
6941 	 */
6942 	int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
6943 #define MIN_IOACCEL2_BFT_ENTRY 5
6944 #define HPSA_IOACCEL2_HEADER_SZ 4
6945 	int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
6946 			13, 14, 15, 16, 17, 18, 19,
6947 			HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
6948 	BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
6949 	BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
6950 	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
6951 				 16 * MIN_IOACCEL2_BFT_ENTRY);
6952 	BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
6953 	BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
6954 	/*  5 = 1 s/g entry or 4k
6955 	 *  6 = 2 s/g entry or 8k
6956 	 *  8 = 4 s/g entry or 16k
6957 	 * 10 = 6 s/g entry or 24k
6958 	 */
6959 
6960 	/* If the controller supports either ioaccel method then
6961 	 * we can also use the RAID stack submit path that does not
6962 	 * perform the superfluous readl() after each command submission.
6963 	 */
6964 	if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
6965 		access = SA5_performant_access_no_read;
6966 
6967 	/* Controller spec: zero out this buffer. */
6968 	for (i = 0; i < h->nreply_queues; i++)
6969 		memset(h->reply_queue[i].head, 0, h->reply_queue_size);
6970 
6971 	bft[7] = SG_ENTRIES_IN_CMD + 4;
6972 	calc_bucket_map(bft, ARRAY_SIZE(bft),
6973 				SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
6974 	for (i = 0; i < 8; i++)
6975 		writel(bft[i], &h->transtable->BlockFetch[i]);
6976 
6977 	/* size of controller ring buffer */
6978 	writel(h->max_commands, &h->transtable->RepQSize);
6979 	writel(h->nreply_queues, &h->transtable->RepQCount);
6980 	writel(0, &h->transtable->RepQCtrAddrLow32);
6981 	writel(0, &h->transtable->RepQCtrAddrHigh32);
6982 
6983 	for (i = 0; i < h->nreply_queues; i++) {
6984 		writel(0, &h->transtable->RepQAddr[i].upper);
6985 		writel(h->reply_queue[i].busaddr,
6986 			&h->transtable->RepQAddr[i].lower);
6987 	}
6988 
6989 	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
6990 	writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
6991 	/*
6992 	 * enable outbound interrupt coalescing in accelerator mode;
6993 	 */
6994 	if (trans_support & CFGTBL_Trans_io_accel1) {
6995 		access = SA5_ioaccel_mode1_access;
6996 		writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
6997 		writel(4, &h->cfgtable->HostWrite.CoalIntCount);
6998 	} else {
6999 		if (trans_support & CFGTBL_Trans_io_accel2) {
7000 			access = SA5_ioaccel_mode2_access;
7001 			writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7002 			writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7003 		}
7004 	}
7005 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7006 	hpsa_wait_for_mode_change_ack(h);
7007 	register_value = readl(&(h->cfgtable->TransportActive));
7008 	if (!(register_value & CFGTBL_Trans_Performant)) {
7009 		dev_err(&h->pdev->dev,
7010 			"performant mode problem - transport not active\n");
7011 		return;
7012 	}
7013 	/* Change the access methods to the performant access methods */
7014 	h->access = access;
7015 	h->transMethod = transMethod;
7016 
7017 	if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7018 		(trans_support & CFGTBL_Trans_io_accel2)))
7019 		return;
7020 
7021 	if (trans_support & CFGTBL_Trans_io_accel1) {
7022 		/* Set up I/O accelerator mode */
7023 		for (i = 0; i < h->nreply_queues; i++) {
7024 			writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7025 			h->reply_queue[i].current_entry =
7026 				readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7027 		}
7028 		bft[7] = h->ioaccel_maxsg + 8;
7029 		calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7030 				h->ioaccel1_blockFetchTable);
7031 
7032 		/* initialize all reply queue entries to unused */
7033 		for (i = 0; i < h->nreply_queues; i++)
7034 			memset(h->reply_queue[i].head,
7035 				(u8) IOACCEL_MODE1_REPLY_UNUSED,
7036 				h->reply_queue_size);
7037 
7038 		/* set all the constant fields in the accelerator command
7039 		 * frames once at init time to save CPU cycles later.
7040 		 */
7041 		for (i = 0; i < h->nr_cmds; i++) {
7042 			struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7043 
7044 			cp->function = IOACCEL1_FUNCTION_SCSIIO;
7045 			cp->err_info = (u32) (h->errinfo_pool_dhandle +
7046 					(i * sizeof(struct ErrorInfo)));
7047 			cp->err_info_len = sizeof(struct ErrorInfo);
7048 			cp->sgl_offset = IOACCEL1_SGLOFFSET;
7049 			cp->host_context_flags =
7050 				cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
7051 			cp->timeout_sec = 0;
7052 			cp->ReplyQueue = 0;
7053 			cp->tag =
7054 				cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
7055 			cp->host_addr =
7056 				cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
7057 					(i * sizeof(struct io_accel1_cmd)));
7058 		}
7059 	} else if (trans_support & CFGTBL_Trans_io_accel2) {
7060 		u64 cfg_offset, cfg_base_addr_index;
7061 		u32 bft2_offset, cfg_base_addr;
7062 		int rc;
7063 
7064 		rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7065 			&cfg_base_addr_index, &cfg_offset);
7066 		BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7067 		bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7068 		calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7069 				4, h->ioaccel2_blockFetchTable);
7070 		bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7071 		BUILD_BUG_ON(offsetof(struct CfgTable,
7072 				io_accel_request_size_offset) != 0xb8);
7073 		h->ioaccel2_bft2_regs =
7074 			remap_pci_mem(pci_resource_start(h->pdev,
7075 					cfg_base_addr_index) +
7076 					cfg_offset + bft2_offset,
7077 					ARRAY_SIZE(bft2) *
7078 					sizeof(*h->ioaccel2_bft2_regs));
7079 		for (i = 0; i < ARRAY_SIZE(bft2); i++)
7080 			writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
7081 	}
7082 	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7083 	hpsa_wait_for_mode_change_ack(h);
7084 }
7085 
7086 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7087 {
7088 	h->ioaccel_maxsg =
7089 		readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7090 	if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7091 		h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7092 
7093 	/* Command structures must be aligned on a 128-byte boundary
7094 	 * because the 7 lower bits of the address are used by the
7095 	 * hardware.
7096 	 */
7097 	BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7098 			IOACCEL1_COMMANDLIST_ALIGNMENT);
7099 	h->ioaccel_cmd_pool =
7100 		pci_alloc_consistent(h->pdev,
7101 			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7102 			&(h->ioaccel_cmd_pool_dhandle));
7103 
7104 	h->ioaccel1_blockFetchTable =
7105 		kmalloc(((h->ioaccel_maxsg + 1) *
7106 				sizeof(u32)), GFP_KERNEL);
7107 
7108 	if ((h->ioaccel_cmd_pool == NULL) ||
7109 		(h->ioaccel1_blockFetchTable == NULL))
7110 		goto clean_up;
7111 
7112 	memset(h->ioaccel_cmd_pool, 0,
7113 		h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7114 	return 0;
7115 
7116 clean_up:
7117 	if (h->ioaccel_cmd_pool)
7118 		pci_free_consistent(h->pdev,
7119 			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7120 			h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7121 	kfree(h->ioaccel1_blockFetchTable);
7122 	return 1;
7123 }
7124 
7125 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7126 {
7127 	/* Allocate ioaccel2 mode command blocks and block fetch table */
7128 
7129 	h->ioaccel_maxsg =
7130 		readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7131 	if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7132 		h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7133 
7134 	BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7135 			IOACCEL2_COMMANDLIST_ALIGNMENT);
7136 	h->ioaccel2_cmd_pool =
7137 		pci_alloc_consistent(h->pdev,
7138 			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7139 			&(h->ioaccel2_cmd_pool_dhandle));
7140 
7141 	h->ioaccel2_blockFetchTable =
7142 		kmalloc(((h->ioaccel_maxsg + 1) *
7143 				sizeof(u32)), GFP_KERNEL);
7144 
7145 	if ((h->ioaccel2_cmd_pool == NULL) ||
7146 		(h->ioaccel2_blockFetchTable == NULL))
7147 		goto clean_up;
7148 
7149 	memset(h->ioaccel2_cmd_pool, 0,
7150 		h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7151 	return 0;
7152 
7153 clean_up:
7154 	if (h->ioaccel2_cmd_pool)
7155 		pci_free_consistent(h->pdev,
7156 			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7157 			h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7158 	kfree(h->ioaccel2_blockFetchTable);
7159 	return 1;
7160 }
7161 
7162 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7163 {
7164 	u32 trans_support;
7165 	unsigned long transMethod = CFGTBL_Trans_Performant |
7166 					CFGTBL_Trans_use_short_tags;
7167 	int i;
7168 
7169 	if (hpsa_simple_mode)
7170 		return;
7171 
7172 	trans_support = readl(&(h->cfgtable->TransportSupport));
7173 	if (!(trans_support & PERFORMANT_MODE))
7174 		return;
7175 
7176 	/* Check for I/O accelerator mode support */
7177 	if (trans_support & CFGTBL_Trans_io_accel1) {
7178 		transMethod |= CFGTBL_Trans_io_accel1 |
7179 				CFGTBL_Trans_enable_directed_msix;
7180 		if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7181 			goto clean_up;
7182 	} else {
7183 		if (trans_support & CFGTBL_Trans_io_accel2) {
7184 				transMethod |= CFGTBL_Trans_io_accel2 |
7185 				CFGTBL_Trans_enable_directed_msix;
7186 		if (ioaccel2_alloc_cmds_and_bft(h))
7187 			goto clean_up;
7188 		}
7189 	}
7190 
7191 	h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
7192 	hpsa_get_max_perf_mode_cmds(h);
7193 	/* Performant mode ring buffer and supporting data structures */
7194 	h->reply_queue_size = h->max_commands * sizeof(u64);
7195 
7196 	for (i = 0; i < h->nreply_queues; i++) {
7197 		h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7198 						h->reply_queue_size,
7199 						&(h->reply_queue[i].busaddr));
7200 		if (!h->reply_queue[i].head)
7201 			goto clean_up;
7202 		h->reply_queue[i].size = h->max_commands;
7203 		h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
7204 		h->reply_queue[i].current_entry = 0;
7205 	}
7206 
7207 	/* Need a block fetch table for performant mode */
7208 	h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
7209 				sizeof(u32)), GFP_KERNEL);
7210 	if (!h->blockFetchTable)
7211 		goto clean_up;
7212 
7213 	hpsa_enter_performant_mode(h, trans_support);
7214 	return;
7215 
7216 clean_up:
7217 	hpsa_free_reply_queues(h);
7218 	kfree(h->blockFetchTable);
7219 }
7220 
7221 static int is_accelerated_cmd(struct CommandList *c)
7222 {
7223 	return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7224 }
7225 
7226 static void hpsa_drain_accel_commands(struct ctlr_info *h)
7227 {
7228 	struct CommandList *c = NULL;
7229 	int i, accel_cmds_out;
7230 
7231 	do { /* wait for all outstanding ioaccel commands to drain out */
7232 		accel_cmds_out = 0;
7233 		for (i = 0; i < h->nr_cmds; i++) {
7234 			if (!test_bit(i & (BITS_PER_LONG - 1),
7235 					h->cmd_pool_bits + (i / BITS_PER_LONG)))
7236 				continue;
7237 			c = h->cmd_pool + i;
7238 			accel_cmds_out += is_accelerated_cmd(c);
7239 		}
7240 		if (accel_cmds_out <= 0)
7241 				break;
7242 		msleep(100);
7243 	} while (1);
7244 }
7245 
7246 /*
7247  *  This is it.  Register the PCI driver information for the cards we control
7248  *  the OS will call our registered routines when it finds one of our cards.
7249  */
7250 static int __init hpsa_init(void)
7251 {
7252 	return pci_register_driver(&hpsa_pci_driver);
7253 }
7254 
7255 static void __exit hpsa_cleanup(void)
7256 {
7257 	pci_unregister_driver(&hpsa_pci_driver);
7258 }
7259 
7260 static void __attribute__((unused)) verify_offsets(void)
7261 {
7262 #define VERIFY_OFFSET(member, offset) \
7263 	BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7264 
7265 	VERIFY_OFFSET(structure_size, 0);
7266 	VERIFY_OFFSET(volume_blk_size, 4);
7267 	VERIFY_OFFSET(volume_blk_cnt, 8);
7268 	VERIFY_OFFSET(phys_blk_shift, 16);
7269 	VERIFY_OFFSET(parity_rotation_shift, 17);
7270 	VERIFY_OFFSET(strip_size, 18);
7271 	VERIFY_OFFSET(disk_starting_blk, 20);
7272 	VERIFY_OFFSET(disk_blk_cnt, 28);
7273 	VERIFY_OFFSET(data_disks_per_row, 36);
7274 	VERIFY_OFFSET(metadata_disks_per_row, 38);
7275 	VERIFY_OFFSET(row_cnt, 40);
7276 	VERIFY_OFFSET(layout_map_count, 42);
7277 	VERIFY_OFFSET(flags, 44);
7278 	VERIFY_OFFSET(dekindex, 46);
7279 	/* VERIFY_OFFSET(reserved, 48 */
7280 	VERIFY_OFFSET(data, 64);
7281 
7282 #undef VERIFY_OFFSET
7283 
7284 #define VERIFY_OFFSET(member, offset) \
7285 	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7286 
7287 	VERIFY_OFFSET(IU_type, 0);
7288 	VERIFY_OFFSET(direction, 1);
7289 	VERIFY_OFFSET(reply_queue, 2);
7290 	/* VERIFY_OFFSET(reserved1, 3);  */
7291 	VERIFY_OFFSET(scsi_nexus, 4);
7292 	VERIFY_OFFSET(Tag, 8);
7293 	VERIFY_OFFSET(cdb, 16);
7294 	VERIFY_OFFSET(cciss_lun, 32);
7295 	VERIFY_OFFSET(data_len, 40);
7296 	VERIFY_OFFSET(cmd_priority_task_attr, 44);
7297 	VERIFY_OFFSET(sg_count, 45);
7298 	/* VERIFY_OFFSET(reserved3 */
7299 	VERIFY_OFFSET(err_ptr, 48);
7300 	VERIFY_OFFSET(err_len, 56);
7301 	/* VERIFY_OFFSET(reserved4  */
7302 	VERIFY_OFFSET(sg, 64);
7303 
7304 #undef VERIFY_OFFSET
7305 
7306 #define VERIFY_OFFSET(member, offset) \
7307 	BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7308 
7309 	VERIFY_OFFSET(dev_handle, 0x00);
7310 	VERIFY_OFFSET(reserved1, 0x02);
7311 	VERIFY_OFFSET(function, 0x03);
7312 	VERIFY_OFFSET(reserved2, 0x04);
7313 	VERIFY_OFFSET(err_info, 0x0C);
7314 	VERIFY_OFFSET(reserved3, 0x10);
7315 	VERIFY_OFFSET(err_info_len, 0x12);
7316 	VERIFY_OFFSET(reserved4, 0x13);
7317 	VERIFY_OFFSET(sgl_offset, 0x14);
7318 	VERIFY_OFFSET(reserved5, 0x15);
7319 	VERIFY_OFFSET(transfer_len, 0x1C);
7320 	VERIFY_OFFSET(reserved6, 0x20);
7321 	VERIFY_OFFSET(io_flags, 0x24);
7322 	VERIFY_OFFSET(reserved7, 0x26);
7323 	VERIFY_OFFSET(LUN, 0x34);
7324 	VERIFY_OFFSET(control, 0x3C);
7325 	VERIFY_OFFSET(CDB, 0x40);
7326 	VERIFY_OFFSET(reserved8, 0x50);
7327 	VERIFY_OFFSET(host_context_flags, 0x60);
7328 	VERIFY_OFFSET(timeout_sec, 0x62);
7329 	VERIFY_OFFSET(ReplyQueue, 0x64);
7330 	VERIFY_OFFSET(reserved9, 0x65);
7331 	VERIFY_OFFSET(tag, 0x68);
7332 	VERIFY_OFFSET(host_addr, 0x70);
7333 	VERIFY_OFFSET(CISS_LUN, 0x78);
7334 	VERIFY_OFFSET(SG, 0x78 + 8);
7335 #undef VERIFY_OFFSET
7336 }
7337 
7338 module_init(hpsa_init);
7339 module_exit(hpsa_cleanup);
7340