1 /*
2  *  Linux MegaRAID driver for SAS based RAID controllers
3  *
4  *  Copyright (c) 2003-2013  LSI Corporation
5  *  Copyright (c) 2013-2014  Avago Technologies
6  *
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation; either version 2
10  *  of the License, or (at your option) any later version.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License
18  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  *
20  *  Authors: Avago Technologies
21  *           Sreenivas Bagalkote
22  *           Sumant Patro
23  *           Bo Yang
24  *           Adam Radford
25  *           Kashyap Desai <kashyap.desai@avagotech.com>
26  *           Sumit Saxena <sumit.saxena@avagotech.com>
27  *
28  *  Send feedback to: megaraidlinux.pdl@avagotech.com
29  *
30  *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31  *  San Jose, California 95131
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/slab.h>
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
47 #include <linux/fs.h>
48 #include <linux/compat.h>
49 #include <linux/blkdev.h>
50 #include <linux/mutex.h>
51 #include <linux/poll.h>
52 #include <linux/vmalloc.h>
53 
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsi_tcq.h>
59 #include "megaraid_sas_fusion.h"
60 #include "megaraid_sas.h"
61 
62 /*
63  * Number of sectors per IO command
64  * Will be set in megasas_init_mfi if user does not provide
65  */
66 static unsigned int max_sectors;
67 module_param_named(max_sectors, max_sectors, int, 0);
68 MODULE_PARM_DESC(max_sectors,
69 	"Maximum number of sectors per IO command");
70 
71 static int msix_disable;
72 module_param(msix_disable, int, S_IRUGO);
73 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
74 
75 static unsigned int msix_vectors;
76 module_param(msix_vectors, int, S_IRUGO);
77 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
78 
79 static int allow_vf_ioctls;
80 module_param(allow_vf_ioctls, int, S_IRUGO);
81 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
82 
83 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
84 module_param(throttlequeuedepth, int, S_IRUGO);
85 MODULE_PARM_DESC(throttlequeuedepth,
86 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
87 
88 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
89 module_param(resetwaittime, int, S_IRUGO);
90 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
91 		 "before resetting adapter. Default: 180");
92 
93 int smp_affinity_enable = 1;
94 module_param(smp_affinity_enable, int, S_IRUGO);
95 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
96 
97 int rdpq_enable = 1;
98 module_param(rdpq_enable, int, S_IRUGO);
99 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
100 
101 unsigned int dual_qdepth_disable;
102 module_param(dual_qdepth_disable, int, S_IRUGO);
103 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
104 
105 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
106 module_param(scmd_timeout, int, S_IRUGO);
107 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
108 
109 MODULE_LICENSE("GPL");
110 MODULE_VERSION(MEGASAS_VERSION);
111 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
112 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
113 
114 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
115 static int megasas_get_pd_list(struct megasas_instance *instance);
116 static int megasas_ld_list_query(struct megasas_instance *instance,
117 				 u8 query_type);
118 static int megasas_issue_init_mfi(struct megasas_instance *instance);
119 static int megasas_register_aen(struct megasas_instance *instance,
120 				u32 seq_num, u32 class_locale_word);
121 static void megasas_get_pd_info(struct megasas_instance *instance,
122 				struct scsi_device *sdev);
123 static int megasas_get_target_prop(struct megasas_instance *instance,
124 				   struct scsi_device *sdev);
125 /*
126  * PCI ID table for all supported controllers
127  */
128 static struct pci_device_id megasas_pci_table[] = {
129 
130 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
131 	/* xscale IOP */
132 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
133 	/* ppc IOP */
134 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
135 	/* ppc IOP */
136 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
137 	/* gen2*/
138 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
139 	/* gen2*/
140 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
141 	/* skinny*/
142 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
143 	/* skinny*/
144 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
145 	/* xscale IOP, vega */
146 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
147 	/* xscale IOP */
148 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
149 	/* Fusion */
150 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
151 	/* Plasma */
152 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
153 	/* Invader */
154 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
155 	/* Fury */
156 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
157 	/* Intruder */
158 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
159 	/* Intruder 24 port*/
160 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
161 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
162 	/* VENTURA */
163 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
164 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
165 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
166 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
167 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
168 	{}
169 };
170 
171 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
172 
173 static int megasas_mgmt_majorno;
174 struct megasas_mgmt_info megasas_mgmt_info;
175 static struct fasync_struct *megasas_async_queue;
176 static DEFINE_MUTEX(megasas_async_queue_mutex);
177 
178 static int megasas_poll_wait_aen;
179 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
180 static u32 support_poll_for_event;
181 u32 megasas_dbg_lvl;
182 static u32 support_device_change;
183 
184 /* define lock for aen poll */
185 spinlock_t poll_aen_lock;
186 
187 void
188 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
189 		     u8 alt_status);
190 static u32
191 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
192 static int
193 megasas_adp_reset_gen2(struct megasas_instance *instance,
194 		       struct megasas_register_set __iomem *reg_set);
195 static irqreturn_t megasas_isr(int irq, void *devp);
196 static u32
197 megasas_init_adapter_mfi(struct megasas_instance *instance);
198 u32
199 megasas_build_and_issue_cmd(struct megasas_instance *instance,
200 			    struct scsi_cmnd *scmd);
201 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
202 int
203 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
204 	int seconds);
205 void megasas_fusion_ocr_wq(struct work_struct *work);
206 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
207 					 int initial);
208 
209 void
210 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
211 {
212 	instance->instancet->fire_cmd(instance,
213 		cmd->frame_phys_addr, 0, instance->reg_set);
214 	return;
215 }
216 
217 /**
218  * megasas_get_cmd -	Get a command from the free pool
219  * @instance:		Adapter soft state
220  *
221  * Returns a free command from the pool
222  */
223 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
224 						  *instance)
225 {
226 	unsigned long flags;
227 	struct megasas_cmd *cmd = NULL;
228 
229 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
230 
231 	if (!list_empty(&instance->cmd_pool)) {
232 		cmd = list_entry((&instance->cmd_pool)->next,
233 				 struct megasas_cmd, list);
234 		list_del_init(&cmd->list);
235 	} else {
236 		dev_err(&instance->pdev->dev, "Command pool empty!\n");
237 	}
238 
239 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
240 	return cmd;
241 }
242 
243 /**
244  * megasas_return_cmd -	Return a cmd to free command pool
245  * @instance:		Adapter soft state
246  * @cmd:		Command packet to be returned to free command pool
247  */
248 void
249 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
250 {
251 	unsigned long flags;
252 	u32 blk_tags;
253 	struct megasas_cmd_fusion *cmd_fusion;
254 	struct fusion_context *fusion = instance->ctrl_context;
255 
256 	/* This flag is used only for fusion adapter.
257 	 * Wait for Interrupt for Polled mode DCMD
258 	 */
259 	if (cmd->flags & DRV_DCMD_POLLED_MODE)
260 		return;
261 
262 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
263 
264 	if (fusion) {
265 		blk_tags = instance->max_scsi_cmds + cmd->index;
266 		cmd_fusion = fusion->cmd_list[blk_tags];
267 		megasas_return_cmd_fusion(instance, cmd_fusion);
268 	}
269 	cmd->scmd = NULL;
270 	cmd->frame_count = 0;
271 	cmd->flags = 0;
272 	memset(cmd->frame, 0, instance->mfi_frame_size);
273 	cmd->frame->io.context = cpu_to_le32(cmd->index);
274 	if (!fusion && reset_devices)
275 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
276 	list_add(&cmd->list, (&instance->cmd_pool)->next);
277 
278 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
279 
280 }
281 
282 static const char *
283 format_timestamp(uint32_t timestamp)
284 {
285 	static char buffer[32];
286 
287 	if ((timestamp & 0xff000000) == 0xff000000)
288 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
289 		0x00ffffff);
290 	else
291 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
292 	return buffer;
293 }
294 
295 static const char *
296 format_class(int8_t class)
297 {
298 	static char buffer[6];
299 
300 	switch (class) {
301 	case MFI_EVT_CLASS_DEBUG:
302 		return "debug";
303 	case MFI_EVT_CLASS_PROGRESS:
304 		return "progress";
305 	case MFI_EVT_CLASS_INFO:
306 		return "info";
307 	case MFI_EVT_CLASS_WARNING:
308 		return "WARN";
309 	case MFI_EVT_CLASS_CRITICAL:
310 		return "CRIT";
311 	case MFI_EVT_CLASS_FATAL:
312 		return "FATAL";
313 	case MFI_EVT_CLASS_DEAD:
314 		return "DEAD";
315 	default:
316 		snprintf(buffer, sizeof(buffer), "%d", class);
317 		return buffer;
318 	}
319 }
320 
321 /**
322   * megasas_decode_evt: Decode FW AEN event and print critical event
323   * for information.
324   * @instance:			Adapter soft state
325   */
326 static void
327 megasas_decode_evt(struct megasas_instance *instance)
328 {
329 	struct megasas_evt_detail *evt_detail = instance->evt_detail;
330 	union megasas_evt_class_locale class_locale;
331 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
332 
333 	if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
334 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
335 			le32_to_cpu(evt_detail->seq_num),
336 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
337 			(class_locale.members.locale),
338 			format_class(class_locale.members.class),
339 			evt_detail->description);
340 }
341 
342 /**
343 *	The following functions are defined for xscale
344 *	(deviceid : 1064R, PERC5) controllers
345 */
346 
347 /**
348  * megasas_enable_intr_xscale -	Enables interrupts
349  * @regs:			MFI register set
350  */
351 static inline void
352 megasas_enable_intr_xscale(struct megasas_instance *instance)
353 {
354 	struct megasas_register_set __iomem *regs;
355 
356 	regs = instance->reg_set;
357 	writel(0, &(regs)->outbound_intr_mask);
358 
359 	/* Dummy readl to force pci flush */
360 	readl(&regs->outbound_intr_mask);
361 }
362 
363 /**
364  * megasas_disable_intr_xscale -Disables interrupt
365  * @regs:			MFI register set
366  */
367 static inline void
368 megasas_disable_intr_xscale(struct megasas_instance *instance)
369 {
370 	struct megasas_register_set __iomem *regs;
371 	u32 mask = 0x1f;
372 
373 	regs = instance->reg_set;
374 	writel(mask, &regs->outbound_intr_mask);
375 	/* Dummy readl to force pci flush */
376 	readl(&regs->outbound_intr_mask);
377 }
378 
379 /**
380  * megasas_read_fw_status_reg_xscale - returns the current FW status value
381  * @regs:			MFI register set
382  */
383 static u32
384 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
385 {
386 	return readl(&(regs)->outbound_msg_0);
387 }
388 /**
389  * megasas_clear_interrupt_xscale -	Check & clear interrupt
390  * @regs:				MFI register set
391  */
392 static int
393 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
394 {
395 	u32 status;
396 	u32 mfiStatus = 0;
397 
398 	/*
399 	 * Check if it is our interrupt
400 	 */
401 	status = readl(&regs->outbound_intr_status);
402 
403 	if (status & MFI_OB_INTR_STATUS_MASK)
404 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
405 	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
406 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
407 
408 	/*
409 	 * Clear the interrupt by writing back the same value
410 	 */
411 	if (mfiStatus)
412 		writel(status, &regs->outbound_intr_status);
413 
414 	/* Dummy readl to force pci flush */
415 	readl(&regs->outbound_intr_status);
416 
417 	return mfiStatus;
418 }
419 
420 /**
421  * megasas_fire_cmd_xscale -	Sends command to the FW
422  * @frame_phys_addr :		Physical address of cmd
423  * @frame_count :		Number of frames for the command
424  * @regs :			MFI register set
425  */
426 static inline void
427 megasas_fire_cmd_xscale(struct megasas_instance *instance,
428 		dma_addr_t frame_phys_addr,
429 		u32 frame_count,
430 		struct megasas_register_set __iomem *regs)
431 {
432 	unsigned long flags;
433 
434 	spin_lock_irqsave(&instance->hba_lock, flags);
435 	writel((frame_phys_addr >> 3)|(frame_count),
436 	       &(regs)->inbound_queue_port);
437 	spin_unlock_irqrestore(&instance->hba_lock, flags);
438 }
439 
440 /**
441  * megasas_adp_reset_xscale -  For controller reset
442  * @regs:                              MFI register set
443  */
444 static int
445 megasas_adp_reset_xscale(struct megasas_instance *instance,
446 	struct megasas_register_set __iomem *regs)
447 {
448 	u32 i;
449 	u32 pcidata;
450 
451 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
452 
453 	for (i = 0; i < 3; i++)
454 		msleep(1000); /* sleep for 3 secs */
455 	pcidata  = 0;
456 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
457 	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
458 	if (pcidata & 0x2) {
459 		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
460 		pcidata &= ~0x2;
461 		pci_write_config_dword(instance->pdev,
462 				MFI_1068_PCSR_OFFSET, pcidata);
463 
464 		for (i = 0; i < 2; i++)
465 			msleep(1000); /* need to wait 2 secs again */
466 
467 		pcidata  = 0;
468 		pci_read_config_dword(instance->pdev,
469 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
470 		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
471 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
472 			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
473 			pcidata = 0;
474 			pci_write_config_dword(instance->pdev,
475 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
476 		}
477 	}
478 	return 0;
479 }
480 
481 /**
482  * megasas_check_reset_xscale -	For controller reset check
483  * @regs:				MFI register set
484  */
485 static int
486 megasas_check_reset_xscale(struct megasas_instance *instance,
487 		struct megasas_register_set __iomem *regs)
488 {
489 	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
490 	    (le32_to_cpu(*instance->consumer) ==
491 		MEGASAS_ADPRESET_INPROG_SIGN))
492 		return 1;
493 	return 0;
494 }
495 
496 static struct megasas_instance_template megasas_instance_template_xscale = {
497 
498 	.fire_cmd = megasas_fire_cmd_xscale,
499 	.enable_intr = megasas_enable_intr_xscale,
500 	.disable_intr = megasas_disable_intr_xscale,
501 	.clear_intr = megasas_clear_intr_xscale,
502 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
503 	.adp_reset = megasas_adp_reset_xscale,
504 	.check_reset = megasas_check_reset_xscale,
505 	.service_isr = megasas_isr,
506 	.tasklet = megasas_complete_cmd_dpc,
507 	.init_adapter = megasas_init_adapter_mfi,
508 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
509 	.issue_dcmd = megasas_issue_dcmd,
510 };
511 
512 /**
513 *	This is the end of set of functions & definitions specific
514 *	to xscale (deviceid : 1064R, PERC5) controllers
515 */
516 
517 /**
518 *	The following functions are defined for ppc (deviceid : 0x60)
519 *	controllers
520 */
521 
522 /**
523  * megasas_enable_intr_ppc -	Enables interrupts
524  * @regs:			MFI register set
525  */
526 static inline void
527 megasas_enable_intr_ppc(struct megasas_instance *instance)
528 {
529 	struct megasas_register_set __iomem *regs;
530 
531 	regs = instance->reg_set;
532 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
533 
534 	writel(~0x80000000, &(regs)->outbound_intr_mask);
535 
536 	/* Dummy readl to force pci flush */
537 	readl(&regs->outbound_intr_mask);
538 }
539 
540 /**
541  * megasas_disable_intr_ppc -	Disable interrupt
542  * @regs:			MFI register set
543  */
544 static inline void
545 megasas_disable_intr_ppc(struct megasas_instance *instance)
546 {
547 	struct megasas_register_set __iomem *regs;
548 	u32 mask = 0xFFFFFFFF;
549 
550 	regs = instance->reg_set;
551 	writel(mask, &regs->outbound_intr_mask);
552 	/* Dummy readl to force pci flush */
553 	readl(&regs->outbound_intr_mask);
554 }
555 
556 /**
557  * megasas_read_fw_status_reg_ppc - returns the current FW status value
558  * @regs:			MFI register set
559  */
560 static u32
561 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
562 {
563 	return readl(&(regs)->outbound_scratch_pad);
564 }
565 
566 /**
567  * megasas_clear_interrupt_ppc -	Check & clear interrupt
568  * @regs:				MFI register set
569  */
570 static int
571 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
572 {
573 	u32 status, mfiStatus = 0;
574 
575 	/*
576 	 * Check if it is our interrupt
577 	 */
578 	status = readl(&regs->outbound_intr_status);
579 
580 	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
581 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
582 
583 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
584 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
585 
586 	/*
587 	 * Clear the interrupt by writing back the same value
588 	 */
589 	writel(status, &regs->outbound_doorbell_clear);
590 
591 	/* Dummy readl to force pci flush */
592 	readl(&regs->outbound_doorbell_clear);
593 
594 	return mfiStatus;
595 }
596 
597 /**
598  * megasas_fire_cmd_ppc -	Sends command to the FW
599  * @frame_phys_addr :		Physical address of cmd
600  * @frame_count :		Number of frames for the command
601  * @regs :			MFI register set
602  */
603 static inline void
604 megasas_fire_cmd_ppc(struct megasas_instance *instance,
605 		dma_addr_t frame_phys_addr,
606 		u32 frame_count,
607 		struct megasas_register_set __iomem *regs)
608 {
609 	unsigned long flags;
610 
611 	spin_lock_irqsave(&instance->hba_lock, flags);
612 	writel((frame_phys_addr | (frame_count<<1))|1,
613 			&(regs)->inbound_queue_port);
614 	spin_unlock_irqrestore(&instance->hba_lock, flags);
615 }
616 
617 /**
618  * megasas_check_reset_ppc -	For controller reset check
619  * @regs:				MFI register set
620  */
621 static int
622 megasas_check_reset_ppc(struct megasas_instance *instance,
623 			struct megasas_register_set __iomem *regs)
624 {
625 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
626 		return 1;
627 
628 	return 0;
629 }
630 
631 static struct megasas_instance_template megasas_instance_template_ppc = {
632 
633 	.fire_cmd = megasas_fire_cmd_ppc,
634 	.enable_intr = megasas_enable_intr_ppc,
635 	.disable_intr = megasas_disable_intr_ppc,
636 	.clear_intr = megasas_clear_intr_ppc,
637 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
638 	.adp_reset = megasas_adp_reset_xscale,
639 	.check_reset = megasas_check_reset_ppc,
640 	.service_isr = megasas_isr,
641 	.tasklet = megasas_complete_cmd_dpc,
642 	.init_adapter = megasas_init_adapter_mfi,
643 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
644 	.issue_dcmd = megasas_issue_dcmd,
645 };
646 
647 /**
648  * megasas_enable_intr_skinny -	Enables interrupts
649  * @regs:			MFI register set
650  */
651 static inline void
652 megasas_enable_intr_skinny(struct megasas_instance *instance)
653 {
654 	struct megasas_register_set __iomem *regs;
655 
656 	regs = instance->reg_set;
657 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
658 
659 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
660 
661 	/* Dummy readl to force pci flush */
662 	readl(&regs->outbound_intr_mask);
663 }
664 
665 /**
666  * megasas_disable_intr_skinny -	Disables interrupt
667  * @regs:			MFI register set
668  */
669 static inline void
670 megasas_disable_intr_skinny(struct megasas_instance *instance)
671 {
672 	struct megasas_register_set __iomem *regs;
673 	u32 mask = 0xFFFFFFFF;
674 
675 	regs = instance->reg_set;
676 	writel(mask, &regs->outbound_intr_mask);
677 	/* Dummy readl to force pci flush */
678 	readl(&regs->outbound_intr_mask);
679 }
680 
681 /**
682  * megasas_read_fw_status_reg_skinny - returns the current FW status value
683  * @regs:			MFI register set
684  */
685 static u32
686 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
687 {
688 	return readl(&(regs)->outbound_scratch_pad);
689 }
690 
691 /**
692  * megasas_clear_interrupt_skinny -	Check & clear interrupt
693  * @regs:				MFI register set
694  */
695 static int
696 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
697 {
698 	u32 status;
699 	u32 mfiStatus = 0;
700 
701 	/*
702 	 * Check if it is our interrupt
703 	 */
704 	status = readl(&regs->outbound_intr_status);
705 
706 	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
707 		return 0;
708 	}
709 
710 	/*
711 	 * Check if it is our interrupt
712 	 */
713 	if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
714 	    MFI_STATE_FAULT) {
715 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
716 	} else
717 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
718 
719 	/*
720 	 * Clear the interrupt by writing back the same value
721 	 */
722 	writel(status, &regs->outbound_intr_status);
723 
724 	/*
725 	 * dummy read to flush PCI
726 	 */
727 	readl(&regs->outbound_intr_status);
728 
729 	return mfiStatus;
730 }
731 
732 /**
733  * megasas_fire_cmd_skinny -	Sends command to the FW
734  * @frame_phys_addr :		Physical address of cmd
735  * @frame_count :		Number of frames for the command
736  * @regs :			MFI register set
737  */
738 static inline void
739 megasas_fire_cmd_skinny(struct megasas_instance *instance,
740 			dma_addr_t frame_phys_addr,
741 			u32 frame_count,
742 			struct megasas_register_set __iomem *regs)
743 {
744 	unsigned long flags;
745 
746 	spin_lock_irqsave(&instance->hba_lock, flags);
747 	writel(upper_32_bits(frame_phys_addr),
748 	       &(regs)->inbound_high_queue_port);
749 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
750 	       &(regs)->inbound_low_queue_port);
751 	mmiowb();
752 	spin_unlock_irqrestore(&instance->hba_lock, flags);
753 }
754 
755 /**
756  * megasas_check_reset_skinny -	For controller reset check
757  * @regs:				MFI register set
758  */
759 static int
760 megasas_check_reset_skinny(struct megasas_instance *instance,
761 				struct megasas_register_set __iomem *regs)
762 {
763 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
764 		return 1;
765 
766 	return 0;
767 }
768 
769 static struct megasas_instance_template megasas_instance_template_skinny = {
770 
771 	.fire_cmd = megasas_fire_cmd_skinny,
772 	.enable_intr = megasas_enable_intr_skinny,
773 	.disable_intr = megasas_disable_intr_skinny,
774 	.clear_intr = megasas_clear_intr_skinny,
775 	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
776 	.adp_reset = megasas_adp_reset_gen2,
777 	.check_reset = megasas_check_reset_skinny,
778 	.service_isr = megasas_isr,
779 	.tasklet = megasas_complete_cmd_dpc,
780 	.init_adapter = megasas_init_adapter_mfi,
781 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
782 	.issue_dcmd = megasas_issue_dcmd,
783 };
784 
785 
786 /**
787 *	The following functions are defined for gen2 (deviceid : 0x78 0x79)
788 *	controllers
789 */
790 
791 /**
792  * megasas_enable_intr_gen2 -  Enables interrupts
793  * @regs:                      MFI register set
794  */
795 static inline void
796 megasas_enable_intr_gen2(struct megasas_instance *instance)
797 {
798 	struct megasas_register_set __iomem *regs;
799 
800 	regs = instance->reg_set;
801 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
802 
803 	/* write ~0x00000005 (4 & 1) to the intr mask*/
804 	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
805 
806 	/* Dummy readl to force pci flush */
807 	readl(&regs->outbound_intr_mask);
808 }
809 
810 /**
811  * megasas_disable_intr_gen2 - Disables interrupt
812  * @regs:                      MFI register set
813  */
814 static inline void
815 megasas_disable_intr_gen2(struct megasas_instance *instance)
816 {
817 	struct megasas_register_set __iomem *regs;
818 	u32 mask = 0xFFFFFFFF;
819 
820 	regs = instance->reg_set;
821 	writel(mask, &regs->outbound_intr_mask);
822 	/* Dummy readl to force pci flush */
823 	readl(&regs->outbound_intr_mask);
824 }
825 
826 /**
827  * megasas_read_fw_status_reg_gen2 - returns the current FW status value
828  * @regs:                      MFI register set
829  */
830 static u32
831 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
832 {
833 	return readl(&(regs)->outbound_scratch_pad);
834 }
835 
836 /**
837  * megasas_clear_interrupt_gen2 -      Check & clear interrupt
838  * @regs:                              MFI register set
839  */
840 static int
841 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
842 {
843 	u32 status;
844 	u32 mfiStatus = 0;
845 
846 	/*
847 	 * Check if it is our interrupt
848 	 */
849 	status = readl(&regs->outbound_intr_status);
850 
851 	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
852 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
853 	}
854 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
855 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
856 	}
857 
858 	/*
859 	 * Clear the interrupt by writing back the same value
860 	 */
861 	if (mfiStatus)
862 		writel(status, &regs->outbound_doorbell_clear);
863 
864 	/* Dummy readl to force pci flush */
865 	readl(&regs->outbound_intr_status);
866 
867 	return mfiStatus;
868 }
869 /**
870  * megasas_fire_cmd_gen2 -     Sends command to the FW
871  * @frame_phys_addr :          Physical address of cmd
872  * @frame_count :              Number of frames for the command
873  * @regs :                     MFI register set
874  */
875 static inline void
876 megasas_fire_cmd_gen2(struct megasas_instance *instance,
877 			dma_addr_t frame_phys_addr,
878 			u32 frame_count,
879 			struct megasas_register_set __iomem *regs)
880 {
881 	unsigned long flags;
882 
883 	spin_lock_irqsave(&instance->hba_lock, flags);
884 	writel((frame_phys_addr | (frame_count<<1))|1,
885 			&(regs)->inbound_queue_port);
886 	spin_unlock_irqrestore(&instance->hba_lock, flags);
887 }
888 
889 /**
890  * megasas_adp_reset_gen2 -	For controller reset
891  * @regs:				MFI register set
892  */
893 static int
894 megasas_adp_reset_gen2(struct megasas_instance *instance,
895 			struct megasas_register_set __iomem *reg_set)
896 {
897 	u32 retry = 0 ;
898 	u32 HostDiag;
899 	u32 __iomem *seq_offset = &reg_set->seq_offset;
900 	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
901 
902 	if (instance->instancet == &megasas_instance_template_skinny) {
903 		seq_offset = &reg_set->fusion_seq_offset;
904 		hostdiag_offset = &reg_set->fusion_host_diag;
905 	}
906 
907 	writel(0, seq_offset);
908 	writel(4, seq_offset);
909 	writel(0xb, seq_offset);
910 	writel(2, seq_offset);
911 	writel(7, seq_offset);
912 	writel(0xd, seq_offset);
913 
914 	msleep(1000);
915 
916 	HostDiag = (u32)readl(hostdiag_offset);
917 
918 	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
919 		msleep(100);
920 		HostDiag = (u32)readl(hostdiag_offset);
921 		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
922 					retry, HostDiag);
923 
924 		if (retry++ >= 100)
925 			return 1;
926 
927 	}
928 
929 	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
930 
931 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
932 
933 	ssleep(10);
934 
935 	HostDiag = (u32)readl(hostdiag_offset);
936 	while (HostDiag & DIAG_RESET_ADAPTER) {
937 		msleep(100);
938 		HostDiag = (u32)readl(hostdiag_offset);
939 		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
940 				retry, HostDiag);
941 
942 		if (retry++ >= 1000)
943 			return 1;
944 
945 	}
946 	return 0;
947 }
948 
949 /**
950  * megasas_check_reset_gen2 -	For controller reset check
951  * @regs:				MFI register set
952  */
953 static int
954 megasas_check_reset_gen2(struct megasas_instance *instance,
955 		struct megasas_register_set __iomem *regs)
956 {
957 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
958 		return 1;
959 
960 	return 0;
961 }
962 
963 static struct megasas_instance_template megasas_instance_template_gen2 = {
964 
965 	.fire_cmd = megasas_fire_cmd_gen2,
966 	.enable_intr = megasas_enable_intr_gen2,
967 	.disable_intr = megasas_disable_intr_gen2,
968 	.clear_intr = megasas_clear_intr_gen2,
969 	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
970 	.adp_reset = megasas_adp_reset_gen2,
971 	.check_reset = megasas_check_reset_gen2,
972 	.service_isr = megasas_isr,
973 	.tasklet = megasas_complete_cmd_dpc,
974 	.init_adapter = megasas_init_adapter_mfi,
975 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
976 	.issue_dcmd = megasas_issue_dcmd,
977 };
978 
979 /**
980 *	This is the end of set of functions & definitions
981 *       specific to gen2 (deviceid : 0x78, 0x79) controllers
982 */
983 
984 /*
985  * Template added for TB (Fusion)
986  */
987 extern struct megasas_instance_template megasas_instance_template_fusion;
988 
989 /**
990  * megasas_issue_polled -	Issues a polling command
991  * @instance:			Adapter soft state
992  * @cmd:			Command packet to be issued
993  *
994  * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
995  */
996 int
997 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
998 {
999 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1000 
1001 	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1002 	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1003 
1004 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1005 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1006 			__func__, __LINE__);
1007 		return DCMD_NOT_FIRED;
1008 	}
1009 
1010 	instance->instancet->issue_dcmd(instance, cmd);
1011 
1012 	return wait_and_poll(instance, cmd, instance->requestorId ?
1013 			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1014 }
1015 
1016 /**
1017  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1018  * @instance:			Adapter soft state
1019  * @cmd:			Command to be issued
1020  * @timeout:			Timeout in seconds
1021  *
1022  * This function waits on an event for the command to be returned from ISR.
1023  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1024  * Used to issue ioctl commands.
1025  */
1026 int
1027 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1028 			  struct megasas_cmd *cmd, int timeout)
1029 {
1030 	int ret = 0;
1031 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1032 
1033 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1034 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1035 			__func__, __LINE__);
1036 		return DCMD_NOT_FIRED;
1037 	}
1038 
1039 	instance->instancet->issue_dcmd(instance, cmd);
1040 
1041 	if (timeout) {
1042 		ret = wait_event_timeout(instance->int_cmd_wait_q,
1043 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1044 		if (!ret) {
1045 			dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
1046 				__func__, __LINE__);
1047 			return DCMD_TIMEOUT;
1048 		}
1049 	} else
1050 		wait_event(instance->int_cmd_wait_q,
1051 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1052 
1053 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1054 		DCMD_SUCCESS : DCMD_FAILED;
1055 }
1056 
1057 /**
1058  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1059  * @instance:				Adapter soft state
1060  * @cmd_to_abort:			Previously issued cmd to be aborted
1061  * @timeout:				Timeout in seconds
1062  *
1063  * MFI firmware can abort previously issued AEN comamnd (automatic event
1064  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1065  * cmd and waits for return status.
1066  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1067  */
1068 static int
1069 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1070 				struct megasas_cmd *cmd_to_abort, int timeout)
1071 {
1072 	struct megasas_cmd *cmd;
1073 	struct megasas_abort_frame *abort_fr;
1074 	int ret = 0;
1075 
1076 	cmd = megasas_get_cmd(instance);
1077 
1078 	if (!cmd)
1079 		return -1;
1080 
1081 	abort_fr = &cmd->frame->abort;
1082 
1083 	/*
1084 	 * Prepare and issue the abort frame
1085 	 */
1086 	abort_fr->cmd = MFI_CMD_ABORT;
1087 	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1088 	abort_fr->flags = cpu_to_le16(0);
1089 	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1090 	abort_fr->abort_mfi_phys_addr_lo =
1091 		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1092 	abort_fr->abort_mfi_phys_addr_hi =
1093 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1094 
1095 	cmd->sync_cmd = 1;
1096 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1097 
1098 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1099 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1100 			__func__, __LINE__);
1101 		return DCMD_NOT_FIRED;
1102 	}
1103 
1104 	instance->instancet->issue_dcmd(instance, cmd);
1105 
1106 	if (timeout) {
1107 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1108 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1109 		if (!ret) {
1110 			dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
1111 				__func__, __LINE__);
1112 			return DCMD_TIMEOUT;
1113 		}
1114 	} else
1115 		wait_event(instance->abort_cmd_wait_q,
1116 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1117 
1118 	cmd->sync_cmd = 0;
1119 
1120 	megasas_return_cmd(instance, cmd);
1121 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1122 		DCMD_SUCCESS : DCMD_FAILED;
1123 }
1124 
1125 /**
1126  * megasas_make_sgl32 -	Prepares 32-bit SGL
1127  * @instance:		Adapter soft state
1128  * @scp:		SCSI command from the mid-layer
1129  * @mfi_sgl:		SGL to be filled in
1130  *
1131  * If successful, this function returns the number of SG elements. Otherwise,
1132  * it returnes -1.
1133  */
1134 static int
1135 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1136 		   union megasas_sgl *mfi_sgl)
1137 {
1138 	int i;
1139 	int sge_count;
1140 	struct scatterlist *os_sgl;
1141 
1142 	sge_count = scsi_dma_map(scp);
1143 	BUG_ON(sge_count < 0);
1144 
1145 	if (sge_count) {
1146 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1147 			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1148 			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1149 		}
1150 	}
1151 	return sge_count;
1152 }
1153 
1154 /**
1155  * megasas_make_sgl64 -	Prepares 64-bit SGL
1156  * @instance:		Adapter soft state
1157  * @scp:		SCSI command from the mid-layer
1158  * @mfi_sgl:		SGL to be filled in
1159  *
1160  * If successful, this function returns the number of SG elements. Otherwise,
1161  * it returnes -1.
1162  */
1163 static int
1164 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1165 		   union megasas_sgl *mfi_sgl)
1166 {
1167 	int i;
1168 	int sge_count;
1169 	struct scatterlist *os_sgl;
1170 
1171 	sge_count = scsi_dma_map(scp);
1172 	BUG_ON(sge_count < 0);
1173 
1174 	if (sge_count) {
1175 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1176 			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1177 			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1178 		}
1179 	}
1180 	return sge_count;
1181 }
1182 
1183 /**
1184  * megasas_make_sgl_skinny - Prepares IEEE SGL
1185  * @instance:           Adapter soft state
1186  * @scp:                SCSI command from the mid-layer
1187  * @mfi_sgl:            SGL to be filled in
1188  *
1189  * If successful, this function returns the number of SG elements. Otherwise,
1190  * it returnes -1.
1191  */
1192 static int
1193 megasas_make_sgl_skinny(struct megasas_instance *instance,
1194 		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1195 {
1196 	int i;
1197 	int sge_count;
1198 	struct scatterlist *os_sgl;
1199 
1200 	sge_count = scsi_dma_map(scp);
1201 
1202 	if (sge_count) {
1203 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1204 			mfi_sgl->sge_skinny[i].length =
1205 				cpu_to_le32(sg_dma_len(os_sgl));
1206 			mfi_sgl->sge_skinny[i].phys_addr =
1207 				cpu_to_le64(sg_dma_address(os_sgl));
1208 			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1209 		}
1210 	}
1211 	return sge_count;
1212 }
1213 
1214  /**
1215  * megasas_get_frame_count - Computes the number of frames
1216  * @frame_type		: type of frame- io or pthru frame
1217  * @sge_count		: number of sg elements
1218  *
1219  * Returns the number of frames required for numnber of sge's (sge_count)
1220  */
1221 
1222 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1223 			u8 sge_count, u8 frame_type)
1224 {
1225 	int num_cnt;
1226 	int sge_bytes;
1227 	u32 sge_sz;
1228 	u32 frame_count = 0;
1229 
1230 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1231 	    sizeof(struct megasas_sge32);
1232 
1233 	if (instance->flag_ieee) {
1234 		sge_sz = sizeof(struct megasas_sge_skinny);
1235 	}
1236 
1237 	/*
1238 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1239 	 * 3 SGEs for 32-bit SGLs for ldio &
1240 	 * 1 SGEs for 64-bit SGLs and
1241 	 * 2 SGEs for 32-bit SGLs for pthru frame
1242 	 */
1243 	if (unlikely(frame_type == PTHRU_FRAME)) {
1244 		if (instance->flag_ieee == 1) {
1245 			num_cnt = sge_count - 1;
1246 		} else if (IS_DMA64)
1247 			num_cnt = sge_count - 1;
1248 		else
1249 			num_cnt = sge_count - 2;
1250 	} else {
1251 		if (instance->flag_ieee == 1) {
1252 			num_cnt = sge_count - 1;
1253 		} else if (IS_DMA64)
1254 			num_cnt = sge_count - 2;
1255 		else
1256 			num_cnt = sge_count - 3;
1257 	}
1258 
1259 	if (num_cnt > 0) {
1260 		sge_bytes = sge_sz * num_cnt;
1261 
1262 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1263 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1264 	}
1265 	/* Main frame */
1266 	frame_count += 1;
1267 
1268 	if (frame_count > 7)
1269 		frame_count = 8;
1270 	return frame_count;
1271 }
1272 
1273 /**
1274  * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1275  * @instance:		Adapter soft state
1276  * @scp:		SCSI command
1277  * @cmd:		Command to be prepared in
1278  *
1279  * This function prepares CDB commands. These are typcially pass-through
1280  * commands to the devices.
1281  */
1282 static int
1283 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1284 		   struct megasas_cmd *cmd)
1285 {
1286 	u32 is_logical;
1287 	u32 device_id;
1288 	u16 flags = 0;
1289 	struct megasas_pthru_frame *pthru;
1290 
1291 	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1292 	device_id = MEGASAS_DEV_INDEX(scp);
1293 	pthru = (struct megasas_pthru_frame *)cmd->frame;
1294 
1295 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1296 		flags = MFI_FRAME_DIR_WRITE;
1297 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1298 		flags = MFI_FRAME_DIR_READ;
1299 	else if (scp->sc_data_direction == PCI_DMA_NONE)
1300 		flags = MFI_FRAME_DIR_NONE;
1301 
1302 	if (instance->flag_ieee == 1) {
1303 		flags |= MFI_FRAME_IEEE;
1304 	}
1305 
1306 	/*
1307 	 * Prepare the DCDB frame
1308 	 */
1309 	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1310 	pthru->cmd_status = 0x0;
1311 	pthru->scsi_status = 0x0;
1312 	pthru->target_id = device_id;
1313 	pthru->lun = scp->device->lun;
1314 	pthru->cdb_len = scp->cmd_len;
1315 	pthru->timeout = 0;
1316 	pthru->pad_0 = 0;
1317 	pthru->flags = cpu_to_le16(flags);
1318 	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1319 
1320 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1321 
1322 	/*
1323 	 * If the command is for the tape device, set the
1324 	 * pthru timeout to the os layer timeout value.
1325 	 */
1326 	if (scp->device->type == TYPE_TAPE) {
1327 		if ((scp->request->timeout / HZ) > 0xFFFF)
1328 			pthru->timeout = cpu_to_le16(0xFFFF);
1329 		else
1330 			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1331 	}
1332 
1333 	/*
1334 	 * Construct SGL
1335 	 */
1336 	if (instance->flag_ieee == 1) {
1337 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1338 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1339 						      &pthru->sgl);
1340 	} else if (IS_DMA64) {
1341 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1342 		pthru->sge_count = megasas_make_sgl64(instance, scp,
1343 						      &pthru->sgl);
1344 	} else
1345 		pthru->sge_count = megasas_make_sgl32(instance, scp,
1346 						      &pthru->sgl);
1347 
1348 	if (pthru->sge_count > instance->max_num_sge) {
1349 		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1350 			pthru->sge_count);
1351 		return 0;
1352 	}
1353 
1354 	/*
1355 	 * Sense info specific
1356 	 */
1357 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1358 	pthru->sense_buf_phys_addr_hi =
1359 		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1360 	pthru->sense_buf_phys_addr_lo =
1361 		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1362 
1363 	/*
1364 	 * Compute the total number of frames this command consumes. FW uses
1365 	 * this number to pull sufficient number of frames from host memory.
1366 	 */
1367 	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1368 							PTHRU_FRAME);
1369 
1370 	return cmd->frame_count;
1371 }
1372 
1373 /**
1374  * megasas_build_ldio -	Prepares IOs to logical devices
1375  * @instance:		Adapter soft state
1376  * @scp:		SCSI command
1377  * @cmd:		Command to be prepared
1378  *
1379  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1380  */
1381 static int
1382 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1383 		   struct megasas_cmd *cmd)
1384 {
1385 	u32 device_id;
1386 	u8 sc = scp->cmnd[0];
1387 	u16 flags = 0;
1388 	struct megasas_io_frame *ldio;
1389 
1390 	device_id = MEGASAS_DEV_INDEX(scp);
1391 	ldio = (struct megasas_io_frame *)cmd->frame;
1392 
1393 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1394 		flags = MFI_FRAME_DIR_WRITE;
1395 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1396 		flags = MFI_FRAME_DIR_READ;
1397 
1398 	if (instance->flag_ieee == 1) {
1399 		flags |= MFI_FRAME_IEEE;
1400 	}
1401 
1402 	/*
1403 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1404 	 */
1405 	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1406 	ldio->cmd_status = 0x0;
1407 	ldio->scsi_status = 0x0;
1408 	ldio->target_id = device_id;
1409 	ldio->timeout = 0;
1410 	ldio->reserved_0 = 0;
1411 	ldio->pad_0 = 0;
1412 	ldio->flags = cpu_to_le16(flags);
1413 	ldio->start_lba_hi = 0;
1414 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1415 
1416 	/*
1417 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1418 	 */
1419 	if (scp->cmd_len == 6) {
1420 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1421 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1422 						 ((u32) scp->cmnd[2] << 8) |
1423 						 (u32) scp->cmnd[3]);
1424 
1425 		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1426 	}
1427 
1428 	/*
1429 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1430 	 */
1431 	else if (scp->cmd_len == 10) {
1432 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1433 					      ((u32) scp->cmnd[7] << 8));
1434 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1435 						 ((u32) scp->cmnd[3] << 16) |
1436 						 ((u32) scp->cmnd[4] << 8) |
1437 						 (u32) scp->cmnd[5]);
1438 	}
1439 
1440 	/*
1441 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1442 	 */
1443 	else if (scp->cmd_len == 12) {
1444 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1445 					      ((u32) scp->cmnd[7] << 16) |
1446 					      ((u32) scp->cmnd[8] << 8) |
1447 					      (u32) scp->cmnd[9]);
1448 
1449 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1450 						 ((u32) scp->cmnd[3] << 16) |
1451 						 ((u32) scp->cmnd[4] << 8) |
1452 						 (u32) scp->cmnd[5]);
1453 	}
1454 
1455 	/*
1456 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1457 	 */
1458 	else if (scp->cmd_len == 16) {
1459 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1460 					      ((u32) scp->cmnd[11] << 16) |
1461 					      ((u32) scp->cmnd[12] << 8) |
1462 					      (u32) scp->cmnd[13]);
1463 
1464 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1465 						 ((u32) scp->cmnd[7] << 16) |
1466 						 ((u32) scp->cmnd[8] << 8) |
1467 						 (u32) scp->cmnd[9]);
1468 
1469 		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1470 						 ((u32) scp->cmnd[3] << 16) |
1471 						 ((u32) scp->cmnd[4] << 8) |
1472 						 (u32) scp->cmnd[5]);
1473 
1474 	}
1475 
1476 	/*
1477 	 * Construct SGL
1478 	 */
1479 	if (instance->flag_ieee) {
1480 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1481 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1482 					      &ldio->sgl);
1483 	} else if (IS_DMA64) {
1484 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1485 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1486 	} else
1487 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1488 
1489 	if (ldio->sge_count > instance->max_num_sge) {
1490 		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1491 			ldio->sge_count);
1492 		return 0;
1493 	}
1494 
1495 	/*
1496 	 * Sense info specific
1497 	 */
1498 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1499 	ldio->sense_buf_phys_addr_hi = 0;
1500 	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1501 
1502 	/*
1503 	 * Compute the total number of frames this command consumes. FW uses
1504 	 * this number to pull sufficient number of frames from host memory.
1505 	 */
1506 	cmd->frame_count = megasas_get_frame_count(instance,
1507 			ldio->sge_count, IO_FRAME);
1508 
1509 	return cmd->frame_count;
1510 }
1511 
1512 /**
1513  * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1514  *				and whether it's RW or non RW
1515  * @scmd:			SCSI command
1516  *
1517  */
1518 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1519 {
1520 	int ret;
1521 
1522 	switch (cmd->cmnd[0]) {
1523 	case READ_10:
1524 	case WRITE_10:
1525 	case READ_12:
1526 	case WRITE_12:
1527 	case READ_6:
1528 	case WRITE_6:
1529 	case READ_16:
1530 	case WRITE_16:
1531 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1532 			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1533 		break;
1534 	default:
1535 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1536 			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1537 	}
1538 	return ret;
1539 }
1540 
1541  /**
1542  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1543  *					in FW
1544  * @instance:				Adapter soft state
1545  */
1546 static inline void
1547 megasas_dump_pending_frames(struct megasas_instance *instance)
1548 {
1549 	struct megasas_cmd *cmd;
1550 	int i,n;
1551 	union megasas_sgl *mfi_sgl;
1552 	struct megasas_io_frame *ldio;
1553 	struct megasas_pthru_frame *pthru;
1554 	u32 sgcount;
1555 	u16 max_cmd = instance->max_fw_cmds;
1556 
1557 	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1558 	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1559 	if (IS_DMA64)
1560 		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1561 	else
1562 		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1563 
1564 	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1565 	for (i = 0; i < max_cmd; i++) {
1566 		cmd = instance->cmd_list[i];
1567 		if (!cmd->scmd)
1568 			continue;
1569 		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1570 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1571 			ldio = (struct megasas_io_frame *)cmd->frame;
1572 			mfi_sgl = &ldio->sgl;
1573 			sgcount = ldio->sge_count;
1574 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1575 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1576 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1577 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1578 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1579 		} else {
1580 			pthru = (struct megasas_pthru_frame *) cmd->frame;
1581 			mfi_sgl = &pthru->sgl;
1582 			sgcount = pthru->sge_count;
1583 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1584 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1585 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1586 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1587 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1588 		}
1589 		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1590 			for (n = 0; n < sgcount; n++) {
1591 				if (IS_DMA64)
1592 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1593 						le32_to_cpu(mfi_sgl->sge64[n].length),
1594 						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1595 				else
1596 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1597 						le32_to_cpu(mfi_sgl->sge32[n].length),
1598 						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1599 			}
1600 		}
1601 	} /*for max_cmd*/
1602 	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1603 	for (i = 0; i < max_cmd; i++) {
1604 
1605 		cmd = instance->cmd_list[i];
1606 
1607 		if (cmd->sync_cmd == 1)
1608 			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1609 	}
1610 	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1611 }
1612 
1613 u32
1614 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1615 			    struct scsi_cmnd *scmd)
1616 {
1617 	struct megasas_cmd *cmd;
1618 	u32 frame_count;
1619 
1620 	cmd = megasas_get_cmd(instance);
1621 	if (!cmd)
1622 		return SCSI_MLQUEUE_HOST_BUSY;
1623 
1624 	/*
1625 	 * Logical drive command
1626 	 */
1627 	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1628 		frame_count = megasas_build_ldio(instance, scmd, cmd);
1629 	else
1630 		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1631 
1632 	if (!frame_count)
1633 		goto out_return_cmd;
1634 
1635 	cmd->scmd = scmd;
1636 	scmd->SCp.ptr = (char *)cmd;
1637 
1638 	/*
1639 	 * Issue the command to the FW
1640 	 */
1641 	atomic_inc(&instance->fw_outstanding);
1642 
1643 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1644 				cmd->frame_count-1, instance->reg_set);
1645 
1646 	return 0;
1647 out_return_cmd:
1648 	megasas_return_cmd(instance, cmd);
1649 	return SCSI_MLQUEUE_HOST_BUSY;
1650 }
1651 
1652 
1653 /**
1654  * megasas_queue_command -	Queue entry point
1655  * @scmd:			SCSI command to be queued
1656  * @done:			Callback entry point
1657  */
1658 static int
1659 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1660 {
1661 	struct megasas_instance *instance;
1662 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1663 
1664 	instance = (struct megasas_instance *)
1665 	    scmd->device->host->hostdata;
1666 
1667 	if (instance->unload == 1) {
1668 		scmd->result = DID_NO_CONNECT << 16;
1669 		scmd->scsi_done(scmd);
1670 		return 0;
1671 	}
1672 
1673 	if (instance->issuepend_done == 0)
1674 		return SCSI_MLQUEUE_HOST_BUSY;
1675 
1676 
1677 	/* Check for an mpio path and adjust behavior */
1678 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1679 		if (megasas_check_mpio_paths(instance, scmd) ==
1680 		    (DID_REQUEUE << 16)) {
1681 			return SCSI_MLQUEUE_HOST_BUSY;
1682 		} else {
1683 			scmd->result = DID_NO_CONNECT << 16;
1684 			scmd->scsi_done(scmd);
1685 			return 0;
1686 		}
1687 	}
1688 
1689 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1690 		scmd->result = DID_NO_CONNECT << 16;
1691 		scmd->scsi_done(scmd);
1692 		return 0;
1693 	}
1694 
1695 	mr_device_priv_data = scmd->device->hostdata;
1696 	if (!mr_device_priv_data) {
1697 		scmd->result = DID_NO_CONNECT << 16;
1698 		scmd->scsi_done(scmd);
1699 		return 0;
1700 	}
1701 
1702 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1703 		return SCSI_MLQUEUE_HOST_BUSY;
1704 
1705 	if (mr_device_priv_data->tm_busy)
1706 		return SCSI_MLQUEUE_DEVICE_BUSY;
1707 
1708 
1709 	scmd->result = 0;
1710 
1711 	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1712 	    (scmd->device->id >= instance->fw_supported_vd_count ||
1713 		scmd->device->lun)) {
1714 		scmd->result = DID_BAD_TARGET << 16;
1715 		goto out_done;
1716 	}
1717 
1718 	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1719 	    MEGASAS_IS_LOGICAL(scmd->device) &&
1720 	    (!instance->fw_sync_cache_support)) {
1721 		scmd->result = DID_OK << 16;
1722 		goto out_done;
1723 	}
1724 
1725 	return instance->instancet->build_and_issue_cmd(instance, scmd);
1726 
1727  out_done:
1728 	scmd->scsi_done(scmd);
1729 	return 0;
1730 }
1731 
1732 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1733 {
1734 	int i;
1735 
1736 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1737 
1738 		if ((megasas_mgmt_info.instance[i]) &&
1739 		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1740 			return megasas_mgmt_info.instance[i];
1741 	}
1742 
1743 	return NULL;
1744 }
1745 
1746 /*
1747 * megasas_set_dynamic_target_properties -
1748 * Device property set by driver may not be static and it is required to be
1749 * updated after OCR
1750 *
1751 * set tm_capable.
1752 * set dma alignment (only for eedp protection enable vd).
1753 *
1754 * @sdev: OS provided scsi device
1755 *
1756 * Returns void
1757 */
1758 void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
1759 {
1760 	u16 pd_index = 0, ld;
1761 	u32 device_id;
1762 	struct megasas_instance *instance;
1763 	struct fusion_context *fusion;
1764 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1765 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1766 	struct MR_LD_RAID *raid;
1767 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1768 
1769 	instance = megasas_lookup_instance(sdev->host->host_no);
1770 	fusion = instance->ctrl_context;
1771 	mr_device_priv_data = sdev->hostdata;
1772 
1773 	if (!fusion || !mr_device_priv_data)
1774 		return;
1775 
1776 	if (MEGASAS_IS_LOGICAL(sdev)) {
1777 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1778 					+ sdev->id;
1779 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1780 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1781 		if (ld >= instance->fw_supported_vd_count)
1782 			return;
1783 		raid = MR_LdRaidGet(ld, local_map_ptr);
1784 
1785 		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1786 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1787 
1788 		mr_device_priv_data->is_tm_capable =
1789 			raid->capability.tmCapable;
1790 	} else if (instance->use_seqnum_jbod_fp) {
1791 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1792 			sdev->id;
1793 		pd_sync = (void *)fusion->pd_seq_sync
1794 				[(instance->pd_seq_map_id - 1) & 1];
1795 		mr_device_priv_data->is_tm_capable =
1796 			pd_sync->seq[pd_index].capability.tmCapable;
1797 	}
1798 }
1799 
1800 /*
1801  * megasas_set_nvme_device_properties -
1802  * set nomerges=2
1803  * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1804  * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1805  *
1806  * MR firmware provides value in KB. Caller of this function converts
1807  * kb into bytes.
1808  *
1809  * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1810  * MR firmware provides value 128 as (32 * 4K) = 128K.
1811  *
1812  * @sdev:				scsi device
1813  * @max_io_size:				maximum io transfer size
1814  *
1815  */
1816 static inline void
1817 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1818 {
1819 	struct megasas_instance *instance;
1820 	u32 mr_nvme_pg_size;
1821 
1822 	instance = (struct megasas_instance *)sdev->host->hostdata;
1823 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1824 				MR_DEFAULT_NVME_PAGE_SIZE);
1825 
1826 	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1827 
1828 	queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1829 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1830 }
1831 
1832 
1833 /*
1834  * megasas_set_static_target_properties -
1835  * Device property set by driver are static and it is not required to be
1836  * updated after OCR.
1837  *
1838  * set io timeout
1839  * set device queue depth
1840  * set nvme device properties. see - megasas_set_nvme_device_properties
1841  *
1842  * @sdev:				scsi device
1843  * @is_target_prop			true, if fw provided target properties.
1844  */
1845 static void megasas_set_static_target_properties(struct scsi_device *sdev,
1846 						 bool is_target_prop)
1847 {
1848 	u16	target_index = 0;
1849 	u8 interface_type;
1850 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1851 	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1852 	u32 tgt_device_qd;
1853 	struct megasas_instance *instance;
1854 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1855 
1856 	instance = megasas_lookup_instance(sdev->host->host_no);
1857 	mr_device_priv_data = sdev->hostdata;
1858 	interface_type  = mr_device_priv_data->interface_type;
1859 
1860 	/*
1861 	 * The RAID firmware may require extended timeouts.
1862 	 */
1863 	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1864 
1865 	target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1866 
1867 	switch (interface_type) {
1868 	case SAS_PD:
1869 		device_qd = MEGASAS_SAS_QD;
1870 		break;
1871 	case SATA_PD:
1872 		device_qd = MEGASAS_SATA_QD;
1873 		break;
1874 	case NVME_PD:
1875 		device_qd = MEGASAS_NVME_QD;
1876 		break;
1877 	}
1878 
1879 	if (is_target_prop) {
1880 		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1881 		if (tgt_device_qd &&
1882 		    (tgt_device_qd <= instance->host->can_queue))
1883 			device_qd = tgt_device_qd;
1884 
1885 		/* max_io_size_kb will be set to non zero for
1886 		 * nvme based vd and syspd.
1887 		 */
1888 		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1889 	}
1890 
1891 	if (instance->nvme_page_size && max_io_size_kb)
1892 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
1893 
1894 	scsi_change_queue_depth(sdev, device_qd);
1895 
1896 }
1897 
1898 
1899 static int megasas_slave_configure(struct scsi_device *sdev)
1900 {
1901 	u16 pd_index = 0;
1902 	struct megasas_instance *instance;
1903 	int ret_target_prop = DCMD_FAILED;
1904 	bool is_target_prop = false;
1905 
1906 	instance = megasas_lookup_instance(sdev->host->host_no);
1907 	if (instance->pd_list_not_supported) {
1908 		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
1909 			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1910 				sdev->id;
1911 			if (instance->pd_list[pd_index].driveState !=
1912 				MR_PD_STATE_SYSTEM)
1913 				return -ENXIO;
1914 		}
1915 	}
1916 
1917 	mutex_lock(&instance->hba_mutex);
1918 	/* Send DCMD to Firmware and cache the information */
1919 	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
1920 		megasas_get_pd_info(instance, sdev);
1921 
1922 	/* Some ventura firmware may not have instance->nvme_page_size set.
1923 	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
1924 	 */
1925 	if ((instance->tgt_prop) && (instance->nvme_page_size))
1926 		ret_target_prop = megasas_get_target_prop(instance, sdev);
1927 
1928 	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
1929 	megasas_set_static_target_properties(sdev, is_target_prop);
1930 
1931 	mutex_unlock(&instance->hba_mutex);
1932 
1933 	/* This sdev property may change post OCR */
1934 	megasas_set_dynamic_target_properties(sdev);
1935 
1936 	return 0;
1937 }
1938 
1939 static int megasas_slave_alloc(struct scsi_device *sdev)
1940 {
1941 	u16 pd_index = 0;
1942 	struct megasas_instance *instance ;
1943 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1944 
1945 	instance = megasas_lookup_instance(sdev->host->host_no);
1946 	if (!MEGASAS_IS_LOGICAL(sdev)) {
1947 		/*
1948 		 * Open the OS scan to the SYSTEM PD
1949 		 */
1950 		pd_index =
1951 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1952 			sdev->id;
1953 		if ((instance->pd_list_not_supported ||
1954 			instance->pd_list[pd_index].driveState ==
1955 			MR_PD_STATE_SYSTEM)) {
1956 			goto scan_target;
1957 		}
1958 		return -ENXIO;
1959 	}
1960 
1961 scan_target:
1962 	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
1963 					GFP_KERNEL);
1964 	if (!mr_device_priv_data)
1965 		return -ENOMEM;
1966 	sdev->hostdata = mr_device_priv_data;
1967 
1968 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
1969 		   instance->r1_ldio_hint_default);
1970 	return 0;
1971 }
1972 
1973 static void megasas_slave_destroy(struct scsi_device *sdev)
1974 {
1975 	kfree(sdev->hostdata);
1976 	sdev->hostdata = NULL;
1977 }
1978 
1979 /*
1980 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
1981 *                                       kill adapter
1982 * @instance:				Adapter soft state
1983 *
1984 */
1985 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
1986 {
1987 	int i;
1988 	struct megasas_cmd *cmd_mfi;
1989 	struct megasas_cmd_fusion *cmd_fusion;
1990 	struct fusion_context *fusion = instance->ctrl_context;
1991 
1992 	/* Find all outstanding ioctls */
1993 	if (fusion) {
1994 		for (i = 0; i < instance->max_fw_cmds; i++) {
1995 			cmd_fusion = fusion->cmd_list[i];
1996 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
1997 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
1998 				if (cmd_mfi->sync_cmd &&
1999 				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2000 					cmd_mfi->frame->hdr.cmd_status =
2001 							MFI_STAT_WRONG_STATE;
2002 					megasas_complete_cmd(instance,
2003 							     cmd_mfi, DID_OK);
2004 				}
2005 			}
2006 		}
2007 	} else {
2008 		for (i = 0; i < instance->max_fw_cmds; i++) {
2009 			cmd_mfi = instance->cmd_list[i];
2010 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2011 				MFI_CMD_ABORT)
2012 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2013 		}
2014 	}
2015 }
2016 
2017 
2018 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2019 {
2020 	/* Set critical error to block I/O & ioctls in case caller didn't */
2021 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2022 	/* Wait 1 second to ensure IO or ioctls in build have posted */
2023 	msleep(1000);
2024 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2025 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2026 		(instance->ctrl_context)) {
2027 		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2028 		/* Flush */
2029 		readl(&instance->reg_set->doorbell);
2030 		if (instance->requestorId && instance->peerIsPresent)
2031 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2032 	} else {
2033 		writel(MFI_STOP_ADP,
2034 			&instance->reg_set->inbound_doorbell);
2035 	}
2036 	/* Complete outstanding ioctls when adapter is killed */
2037 	megasas_complete_outstanding_ioctls(instance);
2038 }
2039 
2040  /**
2041   * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2042   *					restored to max value
2043   * @instance:			Adapter soft state
2044   *
2045   */
2046 void
2047 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2048 {
2049 	unsigned long flags;
2050 
2051 	if (instance->flag & MEGASAS_FW_BUSY
2052 	    && time_after(jiffies, instance->last_time + 5 * HZ)
2053 	    && atomic_read(&instance->fw_outstanding) <
2054 	    instance->throttlequeuedepth + 1) {
2055 
2056 		spin_lock_irqsave(instance->host->host_lock, flags);
2057 		instance->flag &= ~MEGASAS_FW_BUSY;
2058 
2059 		instance->host->can_queue = instance->cur_can_queue;
2060 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2061 	}
2062 }
2063 
2064 /**
2065  * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2066  * @instance_addr:			Address of adapter soft state
2067  *
2068  * Tasklet to complete cmds
2069  */
2070 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2071 {
2072 	u32 producer;
2073 	u32 consumer;
2074 	u32 context;
2075 	struct megasas_cmd *cmd;
2076 	struct megasas_instance *instance =
2077 				(struct megasas_instance *)instance_addr;
2078 	unsigned long flags;
2079 
2080 	/* If we have already declared adapter dead, donot complete cmds */
2081 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2082 		return;
2083 
2084 	spin_lock_irqsave(&instance->completion_lock, flags);
2085 
2086 	producer = le32_to_cpu(*instance->producer);
2087 	consumer = le32_to_cpu(*instance->consumer);
2088 
2089 	while (consumer != producer) {
2090 		context = le32_to_cpu(instance->reply_queue[consumer]);
2091 		if (context >= instance->max_fw_cmds) {
2092 			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2093 				context);
2094 			BUG();
2095 		}
2096 
2097 		cmd = instance->cmd_list[context];
2098 
2099 		megasas_complete_cmd(instance, cmd, DID_OK);
2100 
2101 		consumer++;
2102 		if (consumer == (instance->max_fw_cmds + 1)) {
2103 			consumer = 0;
2104 		}
2105 	}
2106 
2107 	*instance->consumer = cpu_to_le32(producer);
2108 
2109 	spin_unlock_irqrestore(&instance->completion_lock, flags);
2110 
2111 	/*
2112 	 * Check if we can restore can_queue
2113 	 */
2114 	megasas_check_and_restore_queue_depth(instance);
2115 }
2116 
2117 /**
2118  * megasas_start_timer - Initializes a timer object
2119  * @instance:		Adapter soft state
2120  * @timer:		timer object to be initialized
2121  * @fn:			timer function
2122  * @interval:		time interval between timer function call
2123  *
2124  */
2125 void megasas_start_timer(struct megasas_instance *instance,
2126 			struct timer_list *timer,
2127 			void *fn, unsigned long interval)
2128 {
2129 	init_timer(timer);
2130 	timer->expires = jiffies + interval;
2131 	timer->data = (unsigned long)instance;
2132 	timer->function = fn;
2133 	add_timer(timer);
2134 }
2135 
2136 static void
2137 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2138 
2139 static void
2140 process_fw_state_change_wq(struct work_struct *work);
2141 
2142 void megasas_do_ocr(struct megasas_instance *instance)
2143 {
2144 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2145 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2146 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2147 		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2148 	}
2149 	instance->instancet->disable_intr(instance);
2150 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2151 	instance->issuepend_done = 0;
2152 
2153 	atomic_set(&instance->fw_outstanding, 0);
2154 	megasas_internal_reset_defer_cmds(instance);
2155 	process_fw_state_change_wq(&instance->work_init);
2156 }
2157 
2158 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2159 					    int initial)
2160 {
2161 	struct megasas_cmd *cmd;
2162 	struct megasas_dcmd_frame *dcmd;
2163 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2164 	dma_addr_t new_affiliation_111_h;
2165 	int ld, retval = 0;
2166 	u8 thisVf;
2167 
2168 	cmd = megasas_get_cmd(instance);
2169 
2170 	if (!cmd) {
2171 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2172 		       "Failed to get cmd for scsi%d\n",
2173 			instance->host->host_no);
2174 		return -ENOMEM;
2175 	}
2176 
2177 	dcmd = &cmd->frame->dcmd;
2178 
2179 	if (!instance->vf_affiliation_111) {
2180 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2181 		       "affiliation for scsi%d\n", instance->host->host_no);
2182 		megasas_return_cmd(instance, cmd);
2183 		return -ENOMEM;
2184 	}
2185 
2186 	if (initial)
2187 			memset(instance->vf_affiliation_111, 0,
2188 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2189 	else {
2190 		new_affiliation_111 =
2191 			pci_alloc_consistent(instance->pdev,
2192 					     sizeof(struct MR_LD_VF_AFFILIATION_111),
2193 					     &new_affiliation_111_h);
2194 		if (!new_affiliation_111) {
2195 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2196 			       "memory for new affiliation for scsi%d\n",
2197 			       instance->host->host_no);
2198 			megasas_return_cmd(instance, cmd);
2199 			return -ENOMEM;
2200 		}
2201 		memset(new_affiliation_111, 0,
2202 		       sizeof(struct MR_LD_VF_AFFILIATION_111));
2203 	}
2204 
2205 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2206 
2207 	dcmd->cmd = MFI_CMD_DCMD;
2208 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2209 	dcmd->sge_count = 1;
2210 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2211 	dcmd->timeout = 0;
2212 	dcmd->pad_0 = 0;
2213 	dcmd->data_xfer_len =
2214 		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2215 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2216 
2217 	if (initial)
2218 		dcmd->sgl.sge32[0].phys_addr =
2219 			cpu_to_le32(instance->vf_affiliation_111_h);
2220 	else
2221 		dcmd->sgl.sge32[0].phys_addr =
2222 			cpu_to_le32(new_affiliation_111_h);
2223 
2224 	dcmd->sgl.sge32[0].length = cpu_to_le32(
2225 		sizeof(struct MR_LD_VF_AFFILIATION_111));
2226 
2227 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2228 	       "scsi%d\n", instance->host->host_no);
2229 
2230 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2231 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2232 		       " failed with status 0x%x for scsi%d\n",
2233 		       dcmd->cmd_status, instance->host->host_no);
2234 		retval = 1; /* Do a scan if we couldn't get affiliation */
2235 		goto out;
2236 	}
2237 
2238 	if (!initial) {
2239 		thisVf = new_affiliation_111->thisVf;
2240 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2241 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2242 			    new_affiliation_111->map[ld].policy[thisVf]) {
2243 				dev_warn(&instance->pdev->dev, "SR-IOV: "
2244 				       "Got new LD/VF affiliation for scsi%d\n",
2245 				       instance->host->host_no);
2246 				memcpy(instance->vf_affiliation_111,
2247 				       new_affiliation_111,
2248 				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2249 				retval = 1;
2250 				goto out;
2251 			}
2252 	}
2253 out:
2254 	if (new_affiliation_111) {
2255 		pci_free_consistent(instance->pdev,
2256 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2257 				    new_affiliation_111,
2258 				    new_affiliation_111_h);
2259 	}
2260 
2261 	megasas_return_cmd(instance, cmd);
2262 
2263 	return retval;
2264 }
2265 
2266 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2267 					    int initial)
2268 {
2269 	struct megasas_cmd *cmd;
2270 	struct megasas_dcmd_frame *dcmd;
2271 	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2272 	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2273 	dma_addr_t new_affiliation_h;
2274 	int i, j, retval = 0, found = 0, doscan = 0;
2275 	u8 thisVf;
2276 
2277 	cmd = megasas_get_cmd(instance);
2278 
2279 	if (!cmd) {
2280 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2281 		       "Failed to get cmd for scsi%d\n",
2282 		       instance->host->host_no);
2283 		return -ENOMEM;
2284 	}
2285 
2286 	dcmd = &cmd->frame->dcmd;
2287 
2288 	if (!instance->vf_affiliation) {
2289 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2290 		       "affiliation for scsi%d\n", instance->host->host_no);
2291 		megasas_return_cmd(instance, cmd);
2292 		return -ENOMEM;
2293 	}
2294 
2295 	if (initial)
2296 		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2297 		       sizeof(struct MR_LD_VF_AFFILIATION));
2298 	else {
2299 		new_affiliation =
2300 			pci_alloc_consistent(instance->pdev,
2301 					     (MAX_LOGICAL_DRIVES + 1) *
2302 					     sizeof(struct MR_LD_VF_AFFILIATION),
2303 					     &new_affiliation_h);
2304 		if (!new_affiliation) {
2305 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2306 			       "memory for new affiliation for scsi%d\n",
2307 			       instance->host->host_no);
2308 			megasas_return_cmd(instance, cmd);
2309 			return -ENOMEM;
2310 		}
2311 		memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2312 		       sizeof(struct MR_LD_VF_AFFILIATION));
2313 	}
2314 
2315 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2316 
2317 	dcmd->cmd = MFI_CMD_DCMD;
2318 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2319 	dcmd->sge_count = 1;
2320 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2321 	dcmd->timeout = 0;
2322 	dcmd->pad_0 = 0;
2323 	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2324 		sizeof(struct MR_LD_VF_AFFILIATION));
2325 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2326 
2327 	if (initial)
2328 		dcmd->sgl.sge32[0].phys_addr =
2329 			cpu_to_le32(instance->vf_affiliation_h);
2330 	else
2331 		dcmd->sgl.sge32[0].phys_addr =
2332 			cpu_to_le32(new_affiliation_h);
2333 
2334 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2335 		sizeof(struct MR_LD_VF_AFFILIATION));
2336 
2337 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2338 	       "scsi%d\n", instance->host->host_no);
2339 
2340 
2341 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2342 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2343 		       " failed with status 0x%x for scsi%d\n",
2344 		       dcmd->cmd_status, instance->host->host_no);
2345 		retval = 1; /* Do a scan if we couldn't get affiliation */
2346 		goto out;
2347 	}
2348 
2349 	if (!initial) {
2350 		if (!new_affiliation->ldCount) {
2351 			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2352 			       "affiliation for passive path for scsi%d\n",
2353 			       instance->host->host_no);
2354 			retval = 1;
2355 			goto out;
2356 		}
2357 		newmap = new_affiliation->map;
2358 		savedmap = instance->vf_affiliation->map;
2359 		thisVf = new_affiliation->thisVf;
2360 		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2361 			found = 0;
2362 			for (j = 0; j < instance->vf_affiliation->ldCount;
2363 			     j++) {
2364 				if (newmap->ref.targetId ==
2365 				    savedmap->ref.targetId) {
2366 					found = 1;
2367 					if (newmap->policy[thisVf] !=
2368 					    savedmap->policy[thisVf]) {
2369 						doscan = 1;
2370 						goto out;
2371 					}
2372 				}
2373 				savedmap = (struct MR_LD_VF_MAP *)
2374 					((unsigned char *)savedmap +
2375 					 savedmap->size);
2376 			}
2377 			if (!found && newmap->policy[thisVf] !=
2378 			    MR_LD_ACCESS_HIDDEN) {
2379 				doscan = 1;
2380 				goto out;
2381 			}
2382 			newmap = (struct MR_LD_VF_MAP *)
2383 				((unsigned char *)newmap + newmap->size);
2384 		}
2385 
2386 		newmap = new_affiliation->map;
2387 		savedmap = instance->vf_affiliation->map;
2388 
2389 		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2390 			found = 0;
2391 			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2392 				if (savedmap->ref.targetId ==
2393 				    newmap->ref.targetId) {
2394 					found = 1;
2395 					if (savedmap->policy[thisVf] !=
2396 					    newmap->policy[thisVf]) {
2397 						doscan = 1;
2398 						goto out;
2399 					}
2400 				}
2401 				newmap = (struct MR_LD_VF_MAP *)
2402 					((unsigned char *)newmap +
2403 					 newmap->size);
2404 			}
2405 			if (!found && savedmap->policy[thisVf] !=
2406 			    MR_LD_ACCESS_HIDDEN) {
2407 				doscan = 1;
2408 				goto out;
2409 			}
2410 			savedmap = (struct MR_LD_VF_MAP *)
2411 				((unsigned char *)savedmap +
2412 				 savedmap->size);
2413 		}
2414 	}
2415 out:
2416 	if (doscan) {
2417 		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2418 		       "affiliation for scsi%d\n", instance->host->host_no);
2419 		memcpy(instance->vf_affiliation, new_affiliation,
2420 		       new_affiliation->size);
2421 		retval = 1;
2422 	}
2423 
2424 	if (new_affiliation)
2425 		pci_free_consistent(instance->pdev,
2426 				    (MAX_LOGICAL_DRIVES + 1) *
2427 				    sizeof(struct MR_LD_VF_AFFILIATION),
2428 				    new_affiliation, new_affiliation_h);
2429 	megasas_return_cmd(instance, cmd);
2430 
2431 	return retval;
2432 }
2433 
2434 /* This function will get the current SR-IOV LD/VF affiliation */
2435 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2436 	int initial)
2437 {
2438 	int retval;
2439 
2440 	if (instance->PlasmaFW111)
2441 		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2442 	else
2443 		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2444 	return retval;
2445 }
2446 
2447 /* This function will tell FW to start the SR-IOV heartbeat */
2448 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2449 					 int initial)
2450 {
2451 	struct megasas_cmd *cmd;
2452 	struct megasas_dcmd_frame *dcmd;
2453 	int retval = 0;
2454 
2455 	cmd = megasas_get_cmd(instance);
2456 
2457 	if (!cmd) {
2458 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2459 		       "Failed to get cmd for scsi%d\n",
2460 		       instance->host->host_no);
2461 		return -ENOMEM;
2462 	}
2463 
2464 	dcmd = &cmd->frame->dcmd;
2465 
2466 	if (initial) {
2467 		instance->hb_host_mem =
2468 			pci_zalloc_consistent(instance->pdev,
2469 					      sizeof(struct MR_CTRL_HB_HOST_MEM),
2470 					      &instance->hb_host_mem_h);
2471 		if (!instance->hb_host_mem) {
2472 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2473 			       " memory for heartbeat host memory for scsi%d\n",
2474 			       instance->host->host_no);
2475 			retval = -ENOMEM;
2476 			goto out;
2477 		}
2478 	}
2479 
2480 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2481 
2482 	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2483 	dcmd->cmd = MFI_CMD_DCMD;
2484 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2485 	dcmd->sge_count = 1;
2486 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2487 	dcmd->timeout = 0;
2488 	dcmd->pad_0 = 0;
2489 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2490 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2491 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
2492 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2493 
2494 	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2495 	       instance->host->host_no);
2496 
2497 	if (instance->ctrl_context && !instance->mask_interrupts)
2498 		retval = megasas_issue_blocked_cmd(instance, cmd,
2499 			MEGASAS_ROUTINE_WAIT_TIME_VF);
2500 	else
2501 		retval = megasas_issue_polled(instance, cmd);
2502 
2503 	if (retval) {
2504 		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2505 			"_MEM_ALLOC DCMD %s for scsi%d\n",
2506 			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2507 			"timed out" : "failed", instance->host->host_no);
2508 		retval = 1;
2509 	}
2510 
2511 out:
2512 	megasas_return_cmd(instance, cmd);
2513 
2514 	return retval;
2515 }
2516 
2517 /* Handler for SR-IOV heartbeat */
2518 void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
2519 {
2520 	struct megasas_instance *instance =
2521 		(struct megasas_instance *)instance_addr;
2522 
2523 	if (instance->hb_host_mem->HB.fwCounter !=
2524 	    instance->hb_host_mem->HB.driverCounter) {
2525 		instance->hb_host_mem->HB.driverCounter =
2526 			instance->hb_host_mem->HB.fwCounter;
2527 		mod_timer(&instance->sriov_heartbeat_timer,
2528 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2529 	} else {
2530 		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2531 		       "completed for scsi%d\n", instance->host->host_no);
2532 		schedule_work(&instance->work_init);
2533 	}
2534 }
2535 
2536 /**
2537  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2538  * @instance:				Adapter soft state
2539  *
2540  * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2541  * complete all its outstanding commands. Returns error if one or more IOs
2542  * are pending after this time period. It also marks the controller dead.
2543  */
2544 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2545 {
2546 	int i, sl, outstanding;
2547 	u32 reset_index;
2548 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2549 	unsigned long flags;
2550 	struct list_head clist_local;
2551 	struct megasas_cmd *reset_cmd;
2552 	u32 fw_state;
2553 
2554 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2555 		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2556 		__func__, __LINE__);
2557 		return FAILED;
2558 	}
2559 
2560 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2561 
2562 		INIT_LIST_HEAD(&clist_local);
2563 		spin_lock_irqsave(&instance->hba_lock, flags);
2564 		list_splice_init(&instance->internal_reset_pending_q,
2565 				&clist_local);
2566 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2567 
2568 		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2569 		for (i = 0; i < wait_time; i++) {
2570 			msleep(1000);
2571 			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2572 				break;
2573 		}
2574 
2575 		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2576 			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2577 			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2578 			return FAILED;
2579 		}
2580 
2581 		reset_index = 0;
2582 		while (!list_empty(&clist_local)) {
2583 			reset_cmd = list_entry((&clist_local)->next,
2584 						struct megasas_cmd, list);
2585 			list_del_init(&reset_cmd->list);
2586 			if (reset_cmd->scmd) {
2587 				reset_cmd->scmd->result = DID_REQUEUE << 16;
2588 				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2589 					reset_index, reset_cmd,
2590 					reset_cmd->scmd->cmnd[0]);
2591 
2592 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2593 				megasas_return_cmd(instance, reset_cmd);
2594 			} else if (reset_cmd->sync_cmd) {
2595 				dev_notice(&instance->pdev->dev, "%p synch cmds"
2596 						"reset queue\n",
2597 						reset_cmd);
2598 
2599 				reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2600 				instance->instancet->fire_cmd(instance,
2601 						reset_cmd->frame_phys_addr,
2602 						0, instance->reg_set);
2603 			} else {
2604 				dev_notice(&instance->pdev->dev, "%p unexpected"
2605 					"cmds lst\n",
2606 					reset_cmd);
2607 			}
2608 			reset_index++;
2609 		}
2610 
2611 		return SUCCESS;
2612 	}
2613 
2614 	for (i = 0; i < resetwaittime; i++) {
2615 		outstanding = atomic_read(&instance->fw_outstanding);
2616 
2617 		if (!outstanding)
2618 			break;
2619 
2620 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2621 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2622 			       "commands to complete\n",i,outstanding);
2623 			/*
2624 			 * Call cmd completion routine. Cmd to be
2625 			 * be completed directly without depending on isr.
2626 			 */
2627 			megasas_complete_cmd_dpc((unsigned long)instance);
2628 		}
2629 
2630 		msleep(1000);
2631 	}
2632 
2633 	i = 0;
2634 	outstanding = atomic_read(&instance->fw_outstanding);
2635 	fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2636 
2637 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2638 		goto no_outstanding;
2639 
2640 	if (instance->disableOnlineCtrlReset)
2641 		goto kill_hba_and_failed;
2642 	do {
2643 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2644 			dev_info(&instance->pdev->dev,
2645 				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
2646 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2647 			if (i == 3)
2648 				goto kill_hba_and_failed;
2649 			megasas_do_ocr(instance);
2650 
2651 			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2652 				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2653 				__func__, __LINE__);
2654 				return FAILED;
2655 			}
2656 			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2657 				__func__, __LINE__);
2658 
2659 			for (sl = 0; sl < 10; sl++)
2660 				msleep(500);
2661 
2662 			outstanding = atomic_read(&instance->fw_outstanding);
2663 
2664 			fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2665 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2666 				goto no_outstanding;
2667 		}
2668 		i++;
2669 	} while (i <= 3);
2670 
2671 no_outstanding:
2672 
2673 	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2674 		__func__, __LINE__);
2675 	return SUCCESS;
2676 
2677 kill_hba_and_failed:
2678 
2679 	/* Reset not supported, kill adapter */
2680 	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2681 		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2682 		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2683 		atomic_read(&instance->fw_outstanding));
2684 	megasas_dump_pending_frames(instance);
2685 	megaraid_sas_kill_hba(instance);
2686 
2687 	return FAILED;
2688 }
2689 
2690 /**
2691  * megasas_generic_reset -	Generic reset routine
2692  * @scmd:			Mid-layer SCSI command
2693  *
2694  * This routine implements a generic reset handler for device, bus and host
2695  * reset requests. Device, bus and host specific reset handlers can use this
2696  * function after they do their specific tasks.
2697  */
2698 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2699 {
2700 	int ret_val;
2701 	struct megasas_instance *instance;
2702 
2703 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2704 
2705 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2706 		 scmd->cmnd[0], scmd->retries);
2707 
2708 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2709 		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2710 		return FAILED;
2711 	}
2712 
2713 	ret_val = megasas_wait_for_outstanding(instance);
2714 	if (ret_val == SUCCESS)
2715 		dev_notice(&instance->pdev->dev, "reset successful\n");
2716 	else
2717 		dev_err(&instance->pdev->dev, "failed to do reset\n");
2718 
2719 	return ret_val;
2720 }
2721 
2722 /**
2723  * megasas_reset_timer - quiesce the adapter if required
2724  * @scmd:		scsi cmnd
2725  *
2726  * Sets the FW busy flag and reduces the host->can_queue if the
2727  * cmd has not been completed within the timeout period.
2728  */
2729 static enum
2730 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2731 {
2732 	struct megasas_instance *instance;
2733 	unsigned long flags;
2734 
2735 	if (time_after(jiffies, scmd->jiffies_at_alloc +
2736 				(scmd_timeout * 2) * HZ)) {
2737 		return BLK_EH_NOT_HANDLED;
2738 	}
2739 
2740 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2741 	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2742 		/* FW is busy, throttle IO */
2743 		spin_lock_irqsave(instance->host->host_lock, flags);
2744 
2745 		instance->host->can_queue = instance->throttlequeuedepth;
2746 		instance->last_time = jiffies;
2747 		instance->flag |= MEGASAS_FW_BUSY;
2748 
2749 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2750 	}
2751 	return BLK_EH_RESET_TIMER;
2752 }
2753 
2754 /**
2755  * megasas_dump_frame -	This function will dump MPT/MFI frame
2756  */
2757 static inline void
2758 megasas_dump_frame(void *mpi_request, int sz)
2759 {
2760 	int i;
2761 	__le32 *mfp = (__le32 *)mpi_request;
2762 
2763 	printk(KERN_INFO "IO request frame:\n\t");
2764 	for (i = 0; i < sz / sizeof(__le32); i++) {
2765 		if (i && ((i % 8) == 0))
2766 			printk("\n\t");
2767 		printk("%08x ", le32_to_cpu(mfp[i]));
2768 	}
2769 	printk("\n");
2770 }
2771 
2772 /**
2773  * megasas_reset_bus_host -	Bus & host reset handler entry point
2774  */
2775 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2776 {
2777 	int ret;
2778 	struct megasas_instance *instance;
2779 
2780 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2781 
2782 	scmd_printk(KERN_INFO, scmd,
2783 		"Controller reset is requested due to IO timeout\n"
2784 		"SCSI command pointer: (%p)\t SCSI host state: %d\t"
2785 		" SCSI host busy: %d\t FW outstanding: %d\n",
2786 		scmd, scmd->device->host->shost_state,
2787 		atomic_read((atomic_t *)&scmd->device->host->host_busy),
2788 		atomic_read(&instance->fw_outstanding));
2789 
2790 	/*
2791 	 * First wait for all commands to complete
2792 	 */
2793 	if (instance->ctrl_context) {
2794 		struct megasas_cmd_fusion *cmd;
2795 		cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2796 		if (cmd)
2797 			megasas_dump_frame(cmd->io_request,
2798 				MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
2799 		ret = megasas_reset_fusion(scmd->device->host,
2800 				SCSIIO_TIMEOUT_OCR);
2801 	} else
2802 		ret = megasas_generic_reset(scmd);
2803 
2804 	return ret;
2805 }
2806 
2807 /**
2808  * megasas_task_abort - Issues task abort request to firmware
2809  *			(supported only for fusion adapters)
2810  * @scmd:		SCSI command pointer
2811  */
2812 static int megasas_task_abort(struct scsi_cmnd *scmd)
2813 {
2814 	int ret;
2815 	struct megasas_instance *instance;
2816 
2817 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2818 
2819 	if (instance->ctrl_context)
2820 		ret = megasas_task_abort_fusion(scmd);
2821 	else {
2822 		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
2823 		ret = FAILED;
2824 	}
2825 
2826 	return ret;
2827 }
2828 
2829 /**
2830  * megasas_reset_target:  Issues target reset request to firmware
2831  *                        (supported only for fusion adapters)
2832  * @scmd:                 SCSI command pointer
2833  */
2834 static int megasas_reset_target(struct scsi_cmnd *scmd)
2835 {
2836 	int ret;
2837 	struct megasas_instance *instance;
2838 
2839 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2840 
2841 	if (instance->ctrl_context)
2842 		ret = megasas_reset_target_fusion(scmd);
2843 	else {
2844 		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
2845 		ret = FAILED;
2846 	}
2847 
2848 	return ret;
2849 }
2850 
2851 /**
2852  * megasas_bios_param - Returns disk geometry for a disk
2853  * @sdev:		device handle
2854  * @bdev:		block device
2855  * @capacity:		drive capacity
2856  * @geom:		geometry parameters
2857  */
2858 static int
2859 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2860 		 sector_t capacity, int geom[])
2861 {
2862 	int heads;
2863 	int sectors;
2864 	sector_t cylinders;
2865 	unsigned long tmp;
2866 
2867 	/* Default heads (64) & sectors (32) */
2868 	heads = 64;
2869 	sectors = 32;
2870 
2871 	tmp = heads * sectors;
2872 	cylinders = capacity;
2873 
2874 	sector_div(cylinders, tmp);
2875 
2876 	/*
2877 	 * Handle extended translation size for logical drives > 1Gb
2878 	 */
2879 
2880 	if (capacity >= 0x200000) {
2881 		heads = 255;
2882 		sectors = 63;
2883 		tmp = heads*sectors;
2884 		cylinders = capacity;
2885 		sector_div(cylinders, tmp);
2886 	}
2887 
2888 	geom[0] = heads;
2889 	geom[1] = sectors;
2890 	geom[2] = cylinders;
2891 
2892 	return 0;
2893 }
2894 
2895 static void megasas_aen_polling(struct work_struct *work);
2896 
2897 /**
2898  * megasas_service_aen -	Processes an event notification
2899  * @instance:			Adapter soft state
2900  * @cmd:			AEN command completed by the ISR
2901  *
2902  * For AEN, driver sends a command down to FW that is held by the FW till an
2903  * event occurs. When an event of interest occurs, FW completes the command
2904  * that it was previously holding.
2905  *
2906  * This routines sends SIGIO signal to processes that have registered with the
2907  * driver for AEN.
2908  */
2909 static void
2910 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2911 {
2912 	unsigned long flags;
2913 
2914 	/*
2915 	 * Don't signal app if it is just an aborted previously registered aen
2916 	 */
2917 	if ((!cmd->abort_aen) && (instance->unload == 0)) {
2918 		spin_lock_irqsave(&poll_aen_lock, flags);
2919 		megasas_poll_wait_aen = 1;
2920 		spin_unlock_irqrestore(&poll_aen_lock, flags);
2921 		wake_up(&megasas_poll_wait);
2922 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
2923 	}
2924 	else
2925 		cmd->abort_aen = 0;
2926 
2927 	instance->aen_cmd = NULL;
2928 
2929 	megasas_return_cmd(instance, cmd);
2930 
2931 	if ((instance->unload == 0) &&
2932 		((instance->issuepend_done == 1))) {
2933 		struct megasas_aen_event *ev;
2934 
2935 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2936 		if (!ev) {
2937 			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
2938 		} else {
2939 			ev->instance = instance;
2940 			instance->ev = ev;
2941 			INIT_DELAYED_WORK(&ev->hotplug_work,
2942 					  megasas_aen_polling);
2943 			schedule_delayed_work(&ev->hotplug_work, 0);
2944 		}
2945 	}
2946 }
2947 
2948 static ssize_t
2949 megasas_fw_crash_buffer_store(struct device *cdev,
2950 	struct device_attribute *attr, const char *buf, size_t count)
2951 {
2952 	struct Scsi_Host *shost = class_to_shost(cdev);
2953 	struct megasas_instance *instance =
2954 		(struct megasas_instance *) shost->hostdata;
2955 	int val = 0;
2956 	unsigned long flags;
2957 
2958 	if (kstrtoint(buf, 0, &val) != 0)
2959 		return -EINVAL;
2960 
2961 	spin_lock_irqsave(&instance->crashdump_lock, flags);
2962 	instance->fw_crash_buffer_offset = val;
2963 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2964 	return strlen(buf);
2965 }
2966 
2967 static ssize_t
2968 megasas_fw_crash_buffer_show(struct device *cdev,
2969 	struct device_attribute *attr, char *buf)
2970 {
2971 	struct Scsi_Host *shost = class_to_shost(cdev);
2972 	struct megasas_instance *instance =
2973 		(struct megasas_instance *) shost->hostdata;
2974 	u32 size;
2975 	unsigned long buff_addr;
2976 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
2977 	unsigned long src_addr;
2978 	unsigned long flags;
2979 	u32 buff_offset;
2980 
2981 	spin_lock_irqsave(&instance->crashdump_lock, flags);
2982 	buff_offset = instance->fw_crash_buffer_offset;
2983 	if (!instance->crash_dump_buf &&
2984 		!((instance->fw_crash_state == AVAILABLE) ||
2985 		(instance->fw_crash_state == COPYING))) {
2986 		dev_err(&instance->pdev->dev,
2987 			"Firmware crash dump is not available\n");
2988 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2989 		return -EINVAL;
2990 	}
2991 
2992 	buff_addr = (unsigned long) buf;
2993 
2994 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
2995 		dev_err(&instance->pdev->dev,
2996 			"Firmware crash dump offset is out of range\n");
2997 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2998 		return 0;
2999 	}
3000 
3001 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3002 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3003 
3004 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3005 		(buff_offset % dmachunk);
3006 	memcpy(buf, (void *)src_addr, size);
3007 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3008 
3009 	return size;
3010 }
3011 
3012 static ssize_t
3013 megasas_fw_crash_buffer_size_show(struct device *cdev,
3014 	struct device_attribute *attr, char *buf)
3015 {
3016 	struct Scsi_Host *shost = class_to_shost(cdev);
3017 	struct megasas_instance *instance =
3018 		(struct megasas_instance *) shost->hostdata;
3019 
3020 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3021 		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3022 }
3023 
3024 static ssize_t
3025 megasas_fw_crash_state_store(struct device *cdev,
3026 	struct device_attribute *attr, const char *buf, size_t count)
3027 {
3028 	struct Scsi_Host *shost = class_to_shost(cdev);
3029 	struct megasas_instance *instance =
3030 		(struct megasas_instance *) shost->hostdata;
3031 	int val = 0;
3032 	unsigned long flags;
3033 
3034 	if (kstrtoint(buf, 0, &val) != 0)
3035 		return -EINVAL;
3036 
3037 	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3038 		dev_err(&instance->pdev->dev, "application updates invalid "
3039 			"firmware crash state\n");
3040 		return -EINVAL;
3041 	}
3042 
3043 	instance->fw_crash_state = val;
3044 
3045 	if ((val == COPIED) || (val == COPY_ERROR)) {
3046 		spin_lock_irqsave(&instance->crashdump_lock, flags);
3047 		megasas_free_host_crash_buffer(instance);
3048 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3049 		if (val == COPY_ERROR)
3050 			dev_info(&instance->pdev->dev, "application failed to "
3051 				"copy Firmware crash dump\n");
3052 		else
3053 			dev_info(&instance->pdev->dev, "Firmware crash dump "
3054 				"copied successfully\n");
3055 	}
3056 	return strlen(buf);
3057 }
3058 
3059 static ssize_t
3060 megasas_fw_crash_state_show(struct device *cdev,
3061 	struct device_attribute *attr, char *buf)
3062 {
3063 	struct Scsi_Host *shost = class_to_shost(cdev);
3064 	struct megasas_instance *instance =
3065 		(struct megasas_instance *) shost->hostdata;
3066 
3067 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3068 }
3069 
3070 static ssize_t
3071 megasas_page_size_show(struct device *cdev,
3072 	struct device_attribute *attr, char *buf)
3073 {
3074 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3075 }
3076 
3077 static ssize_t
3078 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3079 	char *buf)
3080 {
3081 	struct Scsi_Host *shost = class_to_shost(cdev);
3082 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3083 
3084 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3085 }
3086 
3087 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
3088 	megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
3089 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
3090 	megasas_fw_crash_buffer_size_show, NULL);
3091 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
3092 	megasas_fw_crash_state_show, megasas_fw_crash_state_store);
3093 static DEVICE_ATTR(page_size, S_IRUGO,
3094 	megasas_page_size_show, NULL);
3095 static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
3096 	megasas_ldio_outstanding_show, NULL);
3097 
3098 struct device_attribute *megaraid_host_attrs[] = {
3099 	&dev_attr_fw_crash_buffer_size,
3100 	&dev_attr_fw_crash_buffer,
3101 	&dev_attr_fw_crash_state,
3102 	&dev_attr_page_size,
3103 	&dev_attr_ldio_outstanding,
3104 	NULL,
3105 };
3106 
3107 /*
3108  * Scsi host template for megaraid_sas driver
3109  */
3110 static struct scsi_host_template megasas_template = {
3111 
3112 	.module = THIS_MODULE,
3113 	.name = "Avago SAS based MegaRAID driver",
3114 	.proc_name = "megaraid_sas",
3115 	.slave_configure = megasas_slave_configure,
3116 	.slave_alloc = megasas_slave_alloc,
3117 	.slave_destroy = megasas_slave_destroy,
3118 	.queuecommand = megasas_queue_command,
3119 	.eh_target_reset_handler = megasas_reset_target,
3120 	.eh_abort_handler = megasas_task_abort,
3121 	.eh_host_reset_handler = megasas_reset_bus_host,
3122 	.eh_timed_out = megasas_reset_timer,
3123 	.shost_attrs = megaraid_host_attrs,
3124 	.bios_param = megasas_bios_param,
3125 	.use_clustering = ENABLE_CLUSTERING,
3126 	.change_queue_depth = scsi_change_queue_depth,
3127 	.no_write_same = 1,
3128 };
3129 
3130 /**
3131  * megasas_complete_int_cmd -	Completes an internal command
3132  * @instance:			Adapter soft state
3133  * @cmd:			Command to be completed
3134  *
3135  * The megasas_issue_blocked_cmd() function waits for a command to complete
3136  * after it issues a command. This function wakes up that waiting routine by
3137  * calling wake_up() on the wait queue.
3138  */
3139 static void
3140 megasas_complete_int_cmd(struct megasas_instance *instance,
3141 			 struct megasas_cmd *cmd)
3142 {
3143 	cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3144 	wake_up(&instance->int_cmd_wait_q);
3145 }
3146 
3147 /**
3148  * megasas_complete_abort -	Completes aborting a command
3149  * @instance:			Adapter soft state
3150  * @cmd:			Cmd that was issued to abort another cmd
3151  *
3152  * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3153  * after it issues an abort on a previously issued command. This function
3154  * wakes up all functions waiting on the same wait queue.
3155  */
3156 static void
3157 megasas_complete_abort(struct megasas_instance *instance,
3158 		       struct megasas_cmd *cmd)
3159 {
3160 	if (cmd->sync_cmd) {
3161 		cmd->sync_cmd = 0;
3162 		cmd->cmd_status_drv = 0;
3163 		wake_up(&instance->abort_cmd_wait_q);
3164 	}
3165 }
3166 
3167 /**
3168  * megasas_complete_cmd -	Completes a command
3169  * @instance:			Adapter soft state
3170  * @cmd:			Command to be completed
3171  * @alt_status:			If non-zero, use this value as status to
3172  *				SCSI mid-layer instead of the value returned
3173  *				by the FW. This should be used if caller wants
3174  *				an alternate status (as in the case of aborted
3175  *				commands)
3176  */
3177 void
3178 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3179 		     u8 alt_status)
3180 {
3181 	int exception = 0;
3182 	struct megasas_header *hdr = &cmd->frame->hdr;
3183 	unsigned long flags;
3184 	struct fusion_context *fusion = instance->ctrl_context;
3185 	u32 opcode, status;
3186 
3187 	/* flag for the retry reset */
3188 	cmd->retry_for_fw_reset = 0;
3189 
3190 	if (cmd->scmd)
3191 		cmd->scmd->SCp.ptr = NULL;
3192 
3193 	switch (hdr->cmd) {
3194 	case MFI_CMD_INVALID:
3195 		/* Some older 1068 controller FW may keep a pended
3196 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3197 		   when booting the kdump kernel.  Ignore this command to
3198 		   prevent a kernel panic on shutdown of the kdump kernel. */
3199 		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3200 		       "completed\n");
3201 		dev_warn(&instance->pdev->dev, "If you have a controller "
3202 		       "other than PERC5, please upgrade your firmware\n");
3203 		break;
3204 	case MFI_CMD_PD_SCSI_IO:
3205 	case MFI_CMD_LD_SCSI_IO:
3206 
3207 		/*
3208 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3209 		 * issued either through an IO path or an IOCTL path. If it
3210 		 * was via IOCTL, we will send it to internal completion.
3211 		 */
3212 		if (cmd->sync_cmd) {
3213 			cmd->sync_cmd = 0;
3214 			megasas_complete_int_cmd(instance, cmd);
3215 			break;
3216 		}
3217 
3218 	case MFI_CMD_LD_READ:
3219 	case MFI_CMD_LD_WRITE:
3220 
3221 		if (alt_status) {
3222 			cmd->scmd->result = alt_status << 16;
3223 			exception = 1;
3224 		}
3225 
3226 		if (exception) {
3227 
3228 			atomic_dec(&instance->fw_outstanding);
3229 
3230 			scsi_dma_unmap(cmd->scmd);
3231 			cmd->scmd->scsi_done(cmd->scmd);
3232 			megasas_return_cmd(instance, cmd);
3233 
3234 			break;
3235 		}
3236 
3237 		switch (hdr->cmd_status) {
3238 
3239 		case MFI_STAT_OK:
3240 			cmd->scmd->result = DID_OK << 16;
3241 			break;
3242 
3243 		case MFI_STAT_SCSI_IO_FAILED:
3244 		case MFI_STAT_LD_INIT_IN_PROGRESS:
3245 			cmd->scmd->result =
3246 			    (DID_ERROR << 16) | hdr->scsi_status;
3247 			break;
3248 
3249 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3250 
3251 			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3252 
3253 			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3254 				memset(cmd->scmd->sense_buffer, 0,
3255 				       SCSI_SENSE_BUFFERSIZE);
3256 				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3257 				       hdr->sense_len);
3258 
3259 				cmd->scmd->result |= DRIVER_SENSE << 24;
3260 			}
3261 
3262 			break;
3263 
3264 		case MFI_STAT_LD_OFFLINE:
3265 		case MFI_STAT_DEVICE_NOT_FOUND:
3266 			cmd->scmd->result = DID_BAD_TARGET << 16;
3267 			break;
3268 
3269 		default:
3270 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3271 			       hdr->cmd_status);
3272 			cmd->scmd->result = DID_ERROR << 16;
3273 			break;
3274 		}
3275 
3276 		atomic_dec(&instance->fw_outstanding);
3277 
3278 		scsi_dma_unmap(cmd->scmd);
3279 		cmd->scmd->scsi_done(cmd->scmd);
3280 		megasas_return_cmd(instance, cmd);
3281 
3282 		break;
3283 
3284 	case MFI_CMD_SMP:
3285 	case MFI_CMD_STP:
3286 	case MFI_CMD_DCMD:
3287 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3288 		/* Check for LD map update */
3289 		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3290 			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3291 			fusion->fast_path_io = 0;
3292 			spin_lock_irqsave(instance->host->host_lock, flags);
3293 			instance->map_update_cmd = NULL;
3294 			if (cmd->frame->hdr.cmd_status != 0) {
3295 				if (cmd->frame->hdr.cmd_status !=
3296 				    MFI_STAT_NOT_FOUND)
3297 					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3298 					       cmd->frame->hdr.cmd_status);
3299 				else {
3300 					megasas_return_cmd(instance, cmd);
3301 					spin_unlock_irqrestore(
3302 						instance->host->host_lock,
3303 						flags);
3304 					break;
3305 				}
3306 			} else
3307 				instance->map_id++;
3308 			megasas_return_cmd(instance, cmd);
3309 
3310 			/*
3311 			 * Set fast path IO to ZERO.
3312 			 * Validate Map will set proper value.
3313 			 * Meanwhile all IOs will go as LD IO.
3314 			 */
3315 			if (MR_ValidateMapInfo(instance))
3316 				fusion->fast_path_io = 1;
3317 			else
3318 				fusion->fast_path_io = 0;
3319 			megasas_sync_map_info(instance);
3320 			spin_unlock_irqrestore(instance->host->host_lock,
3321 					       flags);
3322 			break;
3323 		}
3324 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3325 		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3326 			spin_lock_irqsave(&poll_aen_lock, flags);
3327 			megasas_poll_wait_aen = 0;
3328 			spin_unlock_irqrestore(&poll_aen_lock, flags);
3329 		}
3330 
3331 		/* FW has an updated PD sequence */
3332 		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3333 			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3334 
3335 			spin_lock_irqsave(instance->host->host_lock, flags);
3336 			status = cmd->frame->hdr.cmd_status;
3337 			instance->jbod_seq_cmd = NULL;
3338 			megasas_return_cmd(instance, cmd);
3339 
3340 			if (status == MFI_STAT_OK) {
3341 				instance->pd_seq_map_id++;
3342 				/* Re-register a pd sync seq num cmd */
3343 				if (megasas_sync_pd_seq_num(instance, true))
3344 					instance->use_seqnum_jbod_fp = false;
3345 			} else
3346 				instance->use_seqnum_jbod_fp = false;
3347 
3348 			spin_unlock_irqrestore(instance->host->host_lock, flags);
3349 			break;
3350 		}
3351 
3352 		/*
3353 		 * See if got an event notification
3354 		 */
3355 		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3356 			megasas_service_aen(instance, cmd);
3357 		else
3358 			megasas_complete_int_cmd(instance, cmd);
3359 
3360 		break;
3361 
3362 	case MFI_CMD_ABORT:
3363 		/*
3364 		 * Cmd issued to abort another cmd returned
3365 		 */
3366 		megasas_complete_abort(instance, cmd);
3367 		break;
3368 
3369 	default:
3370 		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3371 		       hdr->cmd);
3372 		break;
3373 	}
3374 }
3375 
3376 /**
3377  * megasas_issue_pending_cmds_again -	issue all pending cmds
3378  *					in FW again because of the fw reset
3379  * @instance:				Adapter soft state
3380  */
3381 static inline void
3382 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3383 {
3384 	struct megasas_cmd *cmd;
3385 	struct list_head clist_local;
3386 	union megasas_evt_class_locale class_locale;
3387 	unsigned long flags;
3388 	u32 seq_num;
3389 
3390 	INIT_LIST_HEAD(&clist_local);
3391 	spin_lock_irqsave(&instance->hba_lock, flags);
3392 	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3393 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3394 
3395 	while (!list_empty(&clist_local)) {
3396 		cmd = list_entry((&clist_local)->next,
3397 					struct megasas_cmd, list);
3398 		list_del_init(&cmd->list);
3399 
3400 		if (cmd->sync_cmd || cmd->scmd) {
3401 			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3402 				"detected to be pending while HBA reset\n",
3403 					cmd, cmd->scmd, cmd->sync_cmd);
3404 
3405 			cmd->retry_for_fw_reset++;
3406 
3407 			if (cmd->retry_for_fw_reset == 3) {
3408 				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3409 					"was tried multiple times during reset."
3410 					"Shutting down the HBA\n",
3411 					cmd, cmd->scmd, cmd->sync_cmd);
3412 				instance->instancet->disable_intr(instance);
3413 				atomic_set(&instance->fw_reset_no_pci_access, 1);
3414 				megaraid_sas_kill_hba(instance);
3415 				return;
3416 			}
3417 		}
3418 
3419 		if (cmd->sync_cmd == 1) {
3420 			if (cmd->scmd) {
3421 				dev_notice(&instance->pdev->dev, "unexpected"
3422 					"cmd attached to internal command!\n");
3423 			}
3424 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3425 						"on the internal reset queue,"
3426 						"issue it again.\n", cmd);
3427 			cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3428 			instance->instancet->fire_cmd(instance,
3429 							cmd->frame_phys_addr,
3430 							0, instance->reg_set);
3431 		} else if (cmd->scmd) {
3432 			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3433 			"detected on the internal queue, issue again.\n",
3434 			cmd, cmd->scmd->cmnd[0]);
3435 
3436 			atomic_inc(&instance->fw_outstanding);
3437 			instance->instancet->fire_cmd(instance,
3438 					cmd->frame_phys_addr,
3439 					cmd->frame_count-1, instance->reg_set);
3440 		} else {
3441 			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3442 				"internal reset defer list while re-issue!!\n",
3443 				cmd);
3444 		}
3445 	}
3446 
3447 	if (instance->aen_cmd) {
3448 		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3449 		megasas_return_cmd(instance, instance->aen_cmd);
3450 
3451 		instance->aen_cmd = NULL;
3452 	}
3453 
3454 	/*
3455 	 * Initiate AEN (Asynchronous Event Notification)
3456 	 */
3457 	seq_num = instance->last_seq_num;
3458 	class_locale.members.reserved = 0;
3459 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3460 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3461 
3462 	megasas_register_aen(instance, seq_num, class_locale.word);
3463 }
3464 
3465 /**
3466  * Move the internal reset pending commands to a deferred queue.
3467  *
3468  * We move the commands pending at internal reset time to a
3469  * pending queue. This queue would be flushed after successful
3470  * completion of the internal reset sequence. if the internal reset
3471  * did not complete in time, the kernel reset handler would flush
3472  * these commands.
3473  **/
3474 static void
3475 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3476 {
3477 	struct megasas_cmd *cmd;
3478 	int i;
3479 	u16 max_cmd = instance->max_fw_cmds;
3480 	u32 defer_index;
3481 	unsigned long flags;
3482 
3483 	defer_index = 0;
3484 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3485 	for (i = 0; i < max_cmd; i++) {
3486 		cmd = instance->cmd_list[i];
3487 		if (cmd->sync_cmd == 1 || cmd->scmd) {
3488 			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3489 					"on the defer queue as internal\n",
3490 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3491 
3492 			if (!list_empty(&cmd->list)) {
3493 				dev_notice(&instance->pdev->dev, "ERROR while"
3494 					" moving this cmd:%p, %d %p, it was"
3495 					"discovered on some list?\n",
3496 					cmd, cmd->sync_cmd, cmd->scmd);
3497 
3498 				list_del_init(&cmd->list);
3499 			}
3500 			defer_index++;
3501 			list_add_tail(&cmd->list,
3502 				&instance->internal_reset_pending_q);
3503 		}
3504 	}
3505 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3506 }
3507 
3508 
3509 static void
3510 process_fw_state_change_wq(struct work_struct *work)
3511 {
3512 	struct megasas_instance *instance =
3513 		container_of(work, struct megasas_instance, work_init);
3514 	u32 wait;
3515 	unsigned long flags;
3516 
3517     if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3518 		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3519 				atomic_read(&instance->adprecovery));
3520 		return ;
3521 	}
3522 
3523 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3524 		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3525 					"state, restarting it...\n");
3526 
3527 		instance->instancet->disable_intr(instance);
3528 		atomic_set(&instance->fw_outstanding, 0);
3529 
3530 		atomic_set(&instance->fw_reset_no_pci_access, 1);
3531 		instance->instancet->adp_reset(instance, instance->reg_set);
3532 		atomic_set(&instance->fw_reset_no_pci_access, 0);
3533 
3534 		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3535 					"initiating next stage...\n");
3536 
3537 		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3538 					"state 2 starting...\n");
3539 
3540 		/* waiting for about 20 second before start the second init */
3541 		for (wait = 0; wait < 30; wait++) {
3542 			msleep(1000);
3543 		}
3544 
3545 		if (megasas_transition_to_ready(instance, 1)) {
3546 			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3547 
3548 			atomic_set(&instance->fw_reset_no_pci_access, 1);
3549 			megaraid_sas_kill_hba(instance);
3550 			return ;
3551 		}
3552 
3553 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3554 			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3555 			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3556 			) {
3557 			*instance->consumer = *instance->producer;
3558 		} else {
3559 			*instance->consumer = 0;
3560 			*instance->producer = 0;
3561 		}
3562 
3563 		megasas_issue_init_mfi(instance);
3564 
3565 		spin_lock_irqsave(&instance->hba_lock, flags);
3566 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3567 		spin_unlock_irqrestore(&instance->hba_lock, flags);
3568 		instance->instancet->enable_intr(instance);
3569 
3570 		megasas_issue_pending_cmds_again(instance);
3571 		instance->issuepend_done = 1;
3572 	}
3573 }
3574 
3575 /**
3576  * megasas_deplete_reply_queue -	Processes all completed commands
3577  * @instance:				Adapter soft state
3578  * @alt_status:				Alternate status to be returned to
3579  *					SCSI mid-layer instead of the status
3580  *					returned by the FW
3581  * Note: this must be called with hba lock held
3582  */
3583 static int
3584 megasas_deplete_reply_queue(struct megasas_instance *instance,
3585 					u8 alt_status)
3586 {
3587 	u32 mfiStatus;
3588 	u32 fw_state;
3589 
3590 	if ((mfiStatus = instance->instancet->check_reset(instance,
3591 					instance->reg_set)) == 1) {
3592 		return IRQ_HANDLED;
3593 	}
3594 
3595 	if ((mfiStatus = instance->instancet->clear_intr(
3596 						instance->reg_set)
3597 						) == 0) {
3598 		/* Hardware may not set outbound_intr_status in MSI-X mode */
3599 		if (!instance->msix_vectors)
3600 			return IRQ_NONE;
3601 	}
3602 
3603 	instance->mfiStatus = mfiStatus;
3604 
3605 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3606 		fw_state = instance->instancet->read_fw_status_reg(
3607 				instance->reg_set) & MFI_STATE_MASK;
3608 
3609 		if (fw_state != MFI_STATE_FAULT) {
3610 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
3611 						fw_state);
3612 		}
3613 
3614 		if ((fw_state == MFI_STATE_FAULT) &&
3615 				(instance->disableOnlineCtrlReset == 0)) {
3616 			dev_notice(&instance->pdev->dev, "wait adp restart\n");
3617 
3618 			if ((instance->pdev->device ==
3619 					PCI_DEVICE_ID_LSI_SAS1064R) ||
3620 				(instance->pdev->device ==
3621 					PCI_DEVICE_ID_DELL_PERC5) ||
3622 				(instance->pdev->device ==
3623 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3624 
3625 				*instance->consumer =
3626 					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3627 			}
3628 
3629 
3630 			instance->instancet->disable_intr(instance);
3631 			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3632 			instance->issuepend_done = 0;
3633 
3634 			atomic_set(&instance->fw_outstanding, 0);
3635 			megasas_internal_reset_defer_cmds(instance);
3636 
3637 			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3638 					fw_state, atomic_read(&instance->adprecovery));
3639 
3640 			schedule_work(&instance->work_init);
3641 			return IRQ_HANDLED;
3642 
3643 		} else {
3644 			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3645 				fw_state, instance->disableOnlineCtrlReset);
3646 		}
3647 	}
3648 
3649 	tasklet_schedule(&instance->isr_tasklet);
3650 	return IRQ_HANDLED;
3651 }
3652 /**
3653  * megasas_isr - isr entry point
3654  */
3655 static irqreturn_t megasas_isr(int irq, void *devp)
3656 {
3657 	struct megasas_irq_context *irq_context = devp;
3658 	struct megasas_instance *instance = irq_context->instance;
3659 	unsigned long flags;
3660 	irqreturn_t rc;
3661 
3662 	if (atomic_read(&instance->fw_reset_no_pci_access))
3663 		return IRQ_HANDLED;
3664 
3665 	spin_lock_irqsave(&instance->hba_lock, flags);
3666 	rc = megasas_deplete_reply_queue(instance, DID_OK);
3667 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3668 
3669 	return rc;
3670 }
3671 
3672 /**
3673  * megasas_transition_to_ready -	Move the FW to READY state
3674  * @instance:				Adapter soft state
3675  *
3676  * During the initialization, FW passes can potentially be in any one of
3677  * several possible states. If the FW in operational, waiting-for-handshake
3678  * states, driver must take steps to bring it to ready state. Otherwise, it
3679  * has to wait for the ready state.
3680  */
3681 int
3682 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3683 {
3684 	int i;
3685 	u8 max_wait;
3686 	u32 fw_state;
3687 	u32 cur_state;
3688 	u32 abs_state, curr_abs_state;
3689 
3690 	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
3691 	fw_state = abs_state & MFI_STATE_MASK;
3692 
3693 	if (fw_state != MFI_STATE_READY)
3694 		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3695 		       " state\n");
3696 
3697 	while (fw_state != MFI_STATE_READY) {
3698 
3699 		switch (fw_state) {
3700 
3701 		case MFI_STATE_FAULT:
3702 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3703 			if (ocr) {
3704 				max_wait = MEGASAS_RESET_WAIT_TIME;
3705 				cur_state = MFI_STATE_FAULT;
3706 				break;
3707 			} else
3708 				return -ENODEV;
3709 
3710 		case MFI_STATE_WAIT_HANDSHAKE:
3711 			/*
3712 			 * Set the CLR bit in inbound doorbell
3713 			 */
3714 			if ((instance->pdev->device ==
3715 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3716 				(instance->pdev->device ==
3717 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3718 				(instance->ctrl_context))
3719 				writel(
3720 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3721 				  &instance->reg_set->doorbell);
3722 			else
3723 				writel(
3724 				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3725 					&instance->reg_set->inbound_doorbell);
3726 
3727 			max_wait = MEGASAS_RESET_WAIT_TIME;
3728 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
3729 			break;
3730 
3731 		case MFI_STATE_BOOT_MESSAGE_PENDING:
3732 			if ((instance->pdev->device ==
3733 			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3734 				(instance->pdev->device ==
3735 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3736 				(instance->ctrl_context))
3737 				writel(MFI_INIT_HOTPLUG,
3738 				       &instance->reg_set->doorbell);
3739 			else
3740 				writel(MFI_INIT_HOTPLUG,
3741 					&instance->reg_set->inbound_doorbell);
3742 
3743 			max_wait = MEGASAS_RESET_WAIT_TIME;
3744 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3745 			break;
3746 
3747 		case MFI_STATE_OPERATIONAL:
3748 			/*
3749 			 * Bring it to READY state; assuming max wait 10 secs
3750 			 */
3751 			instance->instancet->disable_intr(instance);
3752 			if ((instance->pdev->device ==
3753 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3754 				(instance->pdev->device ==
3755 				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
3756 				(instance->ctrl_context)) {
3757 				writel(MFI_RESET_FLAGS,
3758 					&instance->reg_set->doorbell);
3759 
3760 				if (instance->ctrl_context) {
3761 					for (i = 0; i < (10 * 1000); i += 20) {
3762 						if (readl(
3763 							    &instance->
3764 							    reg_set->
3765 							    doorbell) & 1)
3766 							msleep(20);
3767 						else
3768 							break;
3769 					}
3770 				}
3771 			} else
3772 				writel(MFI_RESET_FLAGS,
3773 					&instance->reg_set->inbound_doorbell);
3774 
3775 			max_wait = MEGASAS_RESET_WAIT_TIME;
3776 			cur_state = MFI_STATE_OPERATIONAL;
3777 			break;
3778 
3779 		case MFI_STATE_UNDEFINED:
3780 			/*
3781 			 * This state should not last for more than 2 seconds
3782 			 */
3783 			max_wait = MEGASAS_RESET_WAIT_TIME;
3784 			cur_state = MFI_STATE_UNDEFINED;
3785 			break;
3786 
3787 		case MFI_STATE_BB_INIT:
3788 			max_wait = MEGASAS_RESET_WAIT_TIME;
3789 			cur_state = MFI_STATE_BB_INIT;
3790 			break;
3791 
3792 		case MFI_STATE_FW_INIT:
3793 			max_wait = MEGASAS_RESET_WAIT_TIME;
3794 			cur_state = MFI_STATE_FW_INIT;
3795 			break;
3796 
3797 		case MFI_STATE_FW_INIT_2:
3798 			max_wait = MEGASAS_RESET_WAIT_TIME;
3799 			cur_state = MFI_STATE_FW_INIT_2;
3800 			break;
3801 
3802 		case MFI_STATE_DEVICE_SCAN:
3803 			max_wait = MEGASAS_RESET_WAIT_TIME;
3804 			cur_state = MFI_STATE_DEVICE_SCAN;
3805 			break;
3806 
3807 		case MFI_STATE_FLUSH_CACHE:
3808 			max_wait = MEGASAS_RESET_WAIT_TIME;
3809 			cur_state = MFI_STATE_FLUSH_CACHE;
3810 			break;
3811 
3812 		default:
3813 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3814 			       fw_state);
3815 			return -ENODEV;
3816 		}
3817 
3818 		/*
3819 		 * The cur_state should not last for more than max_wait secs
3820 		 */
3821 		for (i = 0; i < (max_wait * 1000); i++) {
3822 			curr_abs_state = instance->instancet->
3823 				read_fw_status_reg(instance->reg_set);
3824 
3825 			if (abs_state == curr_abs_state) {
3826 				msleep(1);
3827 			} else
3828 				break;
3829 		}
3830 
3831 		/*
3832 		 * Return error if fw_state hasn't changed after max_wait
3833 		 */
3834 		if (curr_abs_state == abs_state) {
3835 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3836 			       "in %d secs\n", fw_state, max_wait);
3837 			return -ENODEV;
3838 		}
3839 
3840 		abs_state = curr_abs_state;
3841 		fw_state = curr_abs_state & MFI_STATE_MASK;
3842 	}
3843 	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3844 
3845 	return 0;
3846 }
3847 
3848 /**
3849  * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
3850  * @instance:				Adapter soft state
3851  */
3852 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3853 {
3854 	int i;
3855 	u16 max_cmd = instance->max_mfi_cmds;
3856 	struct megasas_cmd *cmd;
3857 
3858 	if (!instance->frame_dma_pool)
3859 		return;
3860 
3861 	/*
3862 	 * Return all frames to pool
3863 	 */
3864 	for (i = 0; i < max_cmd; i++) {
3865 
3866 		cmd = instance->cmd_list[i];
3867 
3868 		if (cmd->frame)
3869 			dma_pool_free(instance->frame_dma_pool, cmd->frame,
3870 				      cmd->frame_phys_addr);
3871 
3872 		if (cmd->sense)
3873 			dma_pool_free(instance->sense_dma_pool, cmd->sense,
3874 				      cmd->sense_phys_addr);
3875 	}
3876 
3877 	/*
3878 	 * Now destroy the pool itself
3879 	 */
3880 	dma_pool_destroy(instance->frame_dma_pool);
3881 	dma_pool_destroy(instance->sense_dma_pool);
3882 
3883 	instance->frame_dma_pool = NULL;
3884 	instance->sense_dma_pool = NULL;
3885 }
3886 
3887 /**
3888  * megasas_create_frame_pool -	Creates DMA pool for cmd frames
3889  * @instance:			Adapter soft state
3890  *
3891  * Each command packet has an embedded DMA memory buffer that is used for
3892  * filling MFI frame and the SG list that immediately follows the frame. This
3893  * function creates those DMA memory buffers for each command packet by using
3894  * PCI pool facility.
3895  */
3896 static int megasas_create_frame_pool(struct megasas_instance *instance)
3897 {
3898 	int i;
3899 	u16 max_cmd;
3900 	u32 sge_sz;
3901 	u32 frame_count;
3902 	struct megasas_cmd *cmd;
3903 
3904 	max_cmd = instance->max_mfi_cmds;
3905 
3906 	/*
3907 	 * Size of our frame is 64 bytes for MFI frame, followed by max SG
3908 	 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
3909 	 */
3910 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
3911 	    sizeof(struct megasas_sge32);
3912 
3913 	if (instance->flag_ieee)
3914 		sge_sz = sizeof(struct megasas_sge_skinny);
3915 
3916 	/*
3917 	 * For MFI controllers.
3918 	 * max_num_sge = 60
3919 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
3920 	 * Total 960 byte (15 MFI frame of 64 byte)
3921 	 *
3922 	 * Fusion adapter require only 3 extra frame.
3923 	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
3924 	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
3925 	 * Total 192 byte (3 MFI frame of 64 byte)
3926 	 */
3927 	frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
3928 	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
3929 	/*
3930 	 * Use DMA pool facility provided by PCI layer
3931 	 */
3932 	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
3933 					&instance->pdev->dev,
3934 					instance->mfi_frame_size, 256, 0);
3935 
3936 	if (!instance->frame_dma_pool) {
3937 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
3938 		return -ENOMEM;
3939 	}
3940 
3941 	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
3942 						   &instance->pdev->dev, 128,
3943 						   4, 0);
3944 
3945 	if (!instance->sense_dma_pool) {
3946 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
3947 
3948 		dma_pool_destroy(instance->frame_dma_pool);
3949 		instance->frame_dma_pool = NULL;
3950 
3951 		return -ENOMEM;
3952 	}
3953 
3954 	/*
3955 	 * Allocate and attach a frame to each of the commands in cmd_list.
3956 	 * By making cmd->index as the context instead of the &cmd, we can
3957 	 * always use 32bit context regardless of the architecture
3958 	 */
3959 	for (i = 0; i < max_cmd; i++) {
3960 
3961 		cmd = instance->cmd_list[i];
3962 
3963 		cmd->frame = dma_pool_alloc(instance->frame_dma_pool,
3964 					    GFP_KERNEL, &cmd->frame_phys_addr);
3965 
3966 		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
3967 					    GFP_KERNEL, &cmd->sense_phys_addr);
3968 
3969 		/*
3970 		 * megasas_teardown_frame_pool() takes care of freeing
3971 		 * whatever has been allocated
3972 		 */
3973 		if (!cmd->frame || !cmd->sense) {
3974 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
3975 			megasas_teardown_frame_pool(instance);
3976 			return -ENOMEM;
3977 		}
3978 
3979 		memset(cmd->frame, 0, instance->mfi_frame_size);
3980 		cmd->frame->io.context = cpu_to_le32(cmd->index);
3981 		cmd->frame->io.pad_0 = 0;
3982 		if (!instance->ctrl_context && reset_devices)
3983 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
3984 	}
3985 
3986 	return 0;
3987 }
3988 
3989 /**
3990  * megasas_free_cmds -	Free all the cmds in the free cmd pool
3991  * @instance:		Adapter soft state
3992  */
3993 void megasas_free_cmds(struct megasas_instance *instance)
3994 {
3995 	int i;
3996 
3997 	/* First free the MFI frame pool */
3998 	megasas_teardown_frame_pool(instance);
3999 
4000 	/* Free all the commands in the cmd_list */
4001 	for (i = 0; i < instance->max_mfi_cmds; i++)
4002 
4003 		kfree(instance->cmd_list[i]);
4004 
4005 	/* Free the cmd_list buffer itself */
4006 	kfree(instance->cmd_list);
4007 	instance->cmd_list = NULL;
4008 
4009 	INIT_LIST_HEAD(&instance->cmd_pool);
4010 }
4011 
4012 /**
4013  * megasas_alloc_cmds -	Allocates the command packets
4014  * @instance:		Adapter soft state
4015  *
4016  * Each command that is issued to the FW, whether IO commands from the OS or
4017  * internal commands like IOCTLs, are wrapped in local data structure called
4018  * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4019  * the FW.
4020  *
4021  * Each frame has a 32-bit field called context (tag). This context is used
4022  * to get back the megasas_cmd from the frame when a frame gets completed in
4023  * the ISR. Typically the address of the megasas_cmd itself would be used as
4024  * the context. But we wanted to keep the differences between 32 and 64 bit
4025  * systems to the mininum. We always use 32 bit integers for the context. In
4026  * this driver, the 32 bit values are the indices into an array cmd_list.
4027  * This array is used only to look up the megasas_cmd given the context. The
4028  * free commands themselves are maintained in a linked list called cmd_pool.
4029  */
4030 int megasas_alloc_cmds(struct megasas_instance *instance)
4031 {
4032 	int i;
4033 	int j;
4034 	u16 max_cmd;
4035 	struct megasas_cmd *cmd;
4036 	struct fusion_context *fusion;
4037 
4038 	fusion = instance->ctrl_context;
4039 	max_cmd = instance->max_mfi_cmds;
4040 
4041 	/*
4042 	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4043 	 * Allocate the dynamic array first and then allocate individual
4044 	 * commands.
4045 	 */
4046 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4047 
4048 	if (!instance->cmd_list) {
4049 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4050 		return -ENOMEM;
4051 	}
4052 
4053 	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4054 
4055 	for (i = 0; i < max_cmd; i++) {
4056 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4057 						GFP_KERNEL);
4058 
4059 		if (!instance->cmd_list[i]) {
4060 
4061 			for (j = 0; j < i; j++)
4062 				kfree(instance->cmd_list[j]);
4063 
4064 			kfree(instance->cmd_list);
4065 			instance->cmd_list = NULL;
4066 
4067 			return -ENOMEM;
4068 		}
4069 	}
4070 
4071 	for (i = 0; i < max_cmd; i++) {
4072 		cmd = instance->cmd_list[i];
4073 		memset(cmd, 0, sizeof(struct megasas_cmd));
4074 		cmd->index = i;
4075 		cmd->scmd = NULL;
4076 		cmd->instance = instance;
4077 
4078 		list_add_tail(&cmd->list, &instance->cmd_pool);
4079 	}
4080 
4081 	/*
4082 	 * Create a frame pool and assign one frame to each cmd
4083 	 */
4084 	if (megasas_create_frame_pool(instance)) {
4085 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4086 		megasas_free_cmds(instance);
4087 	}
4088 
4089 	return 0;
4090 }
4091 
4092 /*
4093  * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4094  * @instance:				Adapter soft state
4095  *
4096  * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4097  * or FW is not under OCR.
4098  */
4099 inline int
4100 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4101 
4102 	if (!instance->ctrl_context)
4103 		return KILL_ADAPTER;
4104 	else if (instance->unload ||
4105 			test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
4106 		return IGNORE_TIMEOUT;
4107 	else
4108 		return INITIATE_OCR;
4109 }
4110 
4111 static void
4112 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4113 {
4114 	int ret;
4115 	struct megasas_cmd *cmd;
4116 	struct megasas_dcmd_frame *dcmd;
4117 
4118 	struct MR_PRIV_DEVICE *mr_device_priv_data;
4119 	u16 device_id = 0;
4120 
4121 	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4122 	cmd = megasas_get_cmd(instance);
4123 
4124 	if (!cmd) {
4125 		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4126 		return;
4127 	}
4128 
4129 	dcmd = &cmd->frame->dcmd;
4130 
4131 	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4132 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4133 
4134 	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4135 	dcmd->cmd = MFI_CMD_DCMD;
4136 	dcmd->cmd_status = 0xFF;
4137 	dcmd->sge_count = 1;
4138 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4139 	dcmd->timeout = 0;
4140 	dcmd->pad_0 = 0;
4141 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4142 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4143 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
4144 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
4145 
4146 	if (instance->ctrl_context && !instance->mask_interrupts)
4147 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4148 	else
4149 		ret = megasas_issue_polled(instance, cmd);
4150 
4151 	switch (ret) {
4152 	case DCMD_SUCCESS:
4153 		mr_device_priv_data = sdev->hostdata;
4154 		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4155 		mr_device_priv_data->interface_type =
4156 				instance->pd_info->state.ddf.pdType.intf;
4157 		break;
4158 
4159 	case DCMD_TIMEOUT:
4160 
4161 		switch (dcmd_timeout_ocr_possible(instance)) {
4162 		case INITIATE_OCR:
4163 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4164 			megasas_reset_fusion(instance->host,
4165 				MFI_IO_TIMEOUT_OCR);
4166 			break;
4167 		case KILL_ADAPTER:
4168 			megaraid_sas_kill_hba(instance);
4169 			break;
4170 		case IGNORE_TIMEOUT:
4171 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4172 				__func__, __LINE__);
4173 			break;
4174 		}
4175 
4176 		break;
4177 	}
4178 
4179 	if (ret != DCMD_TIMEOUT)
4180 		megasas_return_cmd(instance, cmd);
4181 
4182 	return;
4183 }
4184 /*
4185  * megasas_get_pd_list_info -	Returns FW's pd_list structure
4186  * @instance:				Adapter soft state
4187  * @pd_list:				pd_list structure
4188  *
4189  * Issues an internal command (DCMD) to get the FW's controller PD
4190  * list structure.  This information is mainly used to find out SYSTEM
4191  * supported by the FW.
4192  */
4193 static int
4194 megasas_get_pd_list(struct megasas_instance *instance)
4195 {
4196 	int ret = 0, pd_index = 0;
4197 	struct megasas_cmd *cmd;
4198 	struct megasas_dcmd_frame *dcmd;
4199 	struct MR_PD_LIST *ci;
4200 	struct MR_PD_ADDRESS *pd_addr;
4201 	dma_addr_t ci_h = 0;
4202 
4203 	if (instance->pd_list_not_supported) {
4204 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4205 		"not supported by firmware\n");
4206 		return ret;
4207 	}
4208 
4209 	cmd = megasas_get_cmd(instance);
4210 
4211 	if (!cmd) {
4212 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4213 		return -ENOMEM;
4214 	}
4215 
4216 	dcmd = &cmd->frame->dcmd;
4217 
4218 	ci = pci_alloc_consistent(instance->pdev,
4219 		  MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
4220 
4221 	if (!ci) {
4222 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
4223 		megasas_return_cmd(instance, cmd);
4224 		return -ENOMEM;
4225 	}
4226 
4227 	memset(ci, 0, sizeof(*ci));
4228 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4229 
4230 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4231 	dcmd->mbox.b[1] = 0;
4232 	dcmd->cmd = MFI_CMD_DCMD;
4233 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4234 	dcmd->sge_count = 1;
4235 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4236 	dcmd->timeout = 0;
4237 	dcmd->pad_0 = 0;
4238 	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4239 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4240 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4241 	dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4242 
4243 	if (instance->ctrl_context && !instance->mask_interrupts)
4244 		ret = megasas_issue_blocked_cmd(instance, cmd,
4245 			MFI_IO_TIMEOUT_SECS);
4246 	else
4247 		ret = megasas_issue_polled(instance, cmd);
4248 
4249 	switch (ret) {
4250 	case DCMD_FAILED:
4251 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4252 			"failed/not supported by firmware\n");
4253 
4254 		if (instance->ctrl_context)
4255 			megaraid_sas_kill_hba(instance);
4256 		else
4257 			instance->pd_list_not_supported = 1;
4258 		break;
4259 	case DCMD_TIMEOUT:
4260 
4261 		switch (dcmd_timeout_ocr_possible(instance)) {
4262 		case INITIATE_OCR:
4263 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4264 			/*
4265 			 * DCMD failed from AEN path.
4266 			 * AEN path already hold reset_mutex to avoid PCI access
4267 			 * while OCR is in progress.
4268 			 */
4269 			mutex_unlock(&instance->reset_mutex);
4270 			megasas_reset_fusion(instance->host,
4271 						MFI_IO_TIMEOUT_OCR);
4272 			mutex_lock(&instance->reset_mutex);
4273 			break;
4274 		case KILL_ADAPTER:
4275 			megaraid_sas_kill_hba(instance);
4276 			break;
4277 		case IGNORE_TIMEOUT:
4278 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4279 				__func__, __LINE__);
4280 			break;
4281 		}
4282 
4283 		break;
4284 
4285 	case DCMD_SUCCESS:
4286 		pd_addr = ci->addr;
4287 
4288 		if ((le32_to_cpu(ci->count) >
4289 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4290 			break;
4291 
4292 		memset(instance->local_pd_list, 0,
4293 				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4294 
4295 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4296 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4297 					le16_to_cpu(pd_addr->deviceId);
4298 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4299 					pd_addr->scsiDevType;
4300 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4301 					MR_PD_STATE_SYSTEM;
4302 			pd_addr++;
4303 		}
4304 
4305 		memcpy(instance->pd_list, instance->local_pd_list,
4306 			sizeof(instance->pd_list));
4307 		break;
4308 
4309 	}
4310 
4311 	pci_free_consistent(instance->pdev,
4312 				MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
4313 				ci, ci_h);
4314 
4315 	if (ret != DCMD_TIMEOUT)
4316 		megasas_return_cmd(instance, cmd);
4317 
4318 	return ret;
4319 }
4320 
4321 /*
4322  * megasas_get_ld_list_info -	Returns FW's ld_list structure
4323  * @instance:				Adapter soft state
4324  * @ld_list:				ld_list structure
4325  *
4326  * Issues an internal command (DCMD) to get the FW's controller PD
4327  * list structure.  This information is mainly used to find out SYSTEM
4328  * supported by the FW.
4329  */
4330 static int
4331 megasas_get_ld_list(struct megasas_instance *instance)
4332 {
4333 	int ret = 0, ld_index = 0, ids = 0;
4334 	struct megasas_cmd *cmd;
4335 	struct megasas_dcmd_frame *dcmd;
4336 	struct MR_LD_LIST *ci;
4337 	dma_addr_t ci_h = 0;
4338 	u32 ld_count;
4339 
4340 	cmd = megasas_get_cmd(instance);
4341 
4342 	if (!cmd) {
4343 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4344 		return -ENOMEM;
4345 	}
4346 
4347 	dcmd = &cmd->frame->dcmd;
4348 
4349 	ci = pci_alloc_consistent(instance->pdev,
4350 				sizeof(struct MR_LD_LIST),
4351 				&ci_h);
4352 
4353 	if (!ci) {
4354 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
4355 		megasas_return_cmd(instance, cmd);
4356 		return -ENOMEM;
4357 	}
4358 
4359 	memset(ci, 0, sizeof(*ci));
4360 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4361 
4362 	if (instance->supportmax256vd)
4363 		dcmd->mbox.b[0] = 1;
4364 	dcmd->cmd = MFI_CMD_DCMD;
4365 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4366 	dcmd->sge_count = 1;
4367 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4368 	dcmd->timeout = 0;
4369 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4370 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4371 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4372 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
4373 	dcmd->pad_0  = 0;
4374 
4375 	if (instance->ctrl_context && !instance->mask_interrupts)
4376 		ret = megasas_issue_blocked_cmd(instance, cmd,
4377 			MFI_IO_TIMEOUT_SECS);
4378 	else
4379 		ret = megasas_issue_polled(instance, cmd);
4380 
4381 	ld_count = le32_to_cpu(ci->ldCount);
4382 
4383 	switch (ret) {
4384 	case DCMD_FAILED:
4385 		megaraid_sas_kill_hba(instance);
4386 		break;
4387 	case DCMD_TIMEOUT:
4388 
4389 		switch (dcmd_timeout_ocr_possible(instance)) {
4390 		case INITIATE_OCR:
4391 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4392 			/*
4393 			 * DCMD failed from AEN path.
4394 			 * AEN path already hold reset_mutex to avoid PCI access
4395 			 * while OCR is in progress.
4396 			 */
4397 			mutex_unlock(&instance->reset_mutex);
4398 			megasas_reset_fusion(instance->host,
4399 						MFI_IO_TIMEOUT_OCR);
4400 			mutex_lock(&instance->reset_mutex);
4401 			break;
4402 		case KILL_ADAPTER:
4403 			megaraid_sas_kill_hba(instance);
4404 			break;
4405 		case IGNORE_TIMEOUT:
4406 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4407 				__func__, __LINE__);
4408 			break;
4409 		}
4410 
4411 		break;
4412 
4413 	case DCMD_SUCCESS:
4414 		if (ld_count > instance->fw_supported_vd_count)
4415 			break;
4416 
4417 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4418 
4419 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4420 			if (ci->ldList[ld_index].state != 0) {
4421 				ids = ci->ldList[ld_index].ref.targetId;
4422 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4423 			}
4424 		}
4425 
4426 		break;
4427 	}
4428 
4429 	pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
4430 
4431 	if (ret != DCMD_TIMEOUT)
4432 		megasas_return_cmd(instance, cmd);
4433 
4434 	return ret;
4435 }
4436 
4437 /**
4438  * megasas_ld_list_query -	Returns FW's ld_list structure
4439  * @instance:				Adapter soft state
4440  * @ld_list:				ld_list structure
4441  *
4442  * Issues an internal command (DCMD) to get the FW's controller PD
4443  * list structure.  This information is mainly used to find out SYSTEM
4444  * supported by the FW.
4445  */
4446 static int
4447 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4448 {
4449 	int ret = 0, ld_index = 0, ids = 0;
4450 	struct megasas_cmd *cmd;
4451 	struct megasas_dcmd_frame *dcmd;
4452 	struct MR_LD_TARGETID_LIST *ci;
4453 	dma_addr_t ci_h = 0;
4454 	u32 tgtid_count;
4455 
4456 	cmd = megasas_get_cmd(instance);
4457 
4458 	if (!cmd) {
4459 		dev_warn(&instance->pdev->dev,
4460 		         "megasas_ld_list_query: Failed to get cmd\n");
4461 		return -ENOMEM;
4462 	}
4463 
4464 	dcmd = &cmd->frame->dcmd;
4465 
4466 	ci = pci_alloc_consistent(instance->pdev,
4467 				  sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
4468 
4469 	if (!ci) {
4470 		dev_warn(&instance->pdev->dev,
4471 		         "Failed to alloc mem for ld_list_query\n");
4472 		megasas_return_cmd(instance, cmd);
4473 		return -ENOMEM;
4474 	}
4475 
4476 	memset(ci, 0, sizeof(*ci));
4477 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4478 
4479 	dcmd->mbox.b[0] = query_type;
4480 	if (instance->supportmax256vd)
4481 		dcmd->mbox.b[2] = 1;
4482 
4483 	dcmd->cmd = MFI_CMD_DCMD;
4484 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4485 	dcmd->sge_count = 1;
4486 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4487 	dcmd->timeout = 0;
4488 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4489 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4490 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4491 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4492 	dcmd->pad_0  = 0;
4493 
4494 	if (instance->ctrl_context && !instance->mask_interrupts)
4495 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4496 	else
4497 		ret = megasas_issue_polled(instance, cmd);
4498 
4499 	switch (ret) {
4500 	case DCMD_FAILED:
4501 		dev_info(&instance->pdev->dev,
4502 			"DCMD not supported by firmware - %s %d\n",
4503 				__func__, __LINE__);
4504 		ret = megasas_get_ld_list(instance);
4505 		break;
4506 	case DCMD_TIMEOUT:
4507 		switch (dcmd_timeout_ocr_possible(instance)) {
4508 		case INITIATE_OCR:
4509 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4510 			/*
4511 			 * DCMD failed from AEN path.
4512 			 * AEN path already hold reset_mutex to avoid PCI access
4513 			 * while OCR is in progress.
4514 			 */
4515 			mutex_unlock(&instance->reset_mutex);
4516 			megasas_reset_fusion(instance->host,
4517 						MFI_IO_TIMEOUT_OCR);
4518 			mutex_lock(&instance->reset_mutex);
4519 			break;
4520 		case KILL_ADAPTER:
4521 			megaraid_sas_kill_hba(instance);
4522 			break;
4523 		case IGNORE_TIMEOUT:
4524 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4525 				__func__, __LINE__);
4526 			break;
4527 		}
4528 
4529 		break;
4530 	case DCMD_SUCCESS:
4531 		tgtid_count = le32_to_cpu(ci->count);
4532 
4533 		if ((tgtid_count > (instance->fw_supported_vd_count)))
4534 			break;
4535 
4536 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4537 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4538 			ids = ci->targetId[ld_index];
4539 			instance->ld_ids[ids] = ci->targetId[ld_index];
4540 		}
4541 
4542 		break;
4543 	}
4544 
4545 	pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
4546 		    ci, ci_h);
4547 
4548 	if (ret != DCMD_TIMEOUT)
4549 		megasas_return_cmd(instance, cmd);
4550 
4551 	return ret;
4552 }
4553 
4554 /*
4555  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4556  * instance			 : Controller's instance
4557 */
4558 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4559 {
4560 	struct fusion_context *fusion;
4561 	u32 ventura_map_sz = 0;
4562 
4563 	fusion = instance->ctrl_context;
4564 	/* For MFI based controllers return dummy success */
4565 	if (!fusion)
4566 		return;
4567 
4568 	instance->supportmax256vd =
4569 		instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
4570 	/* Below is additional check to address future FW enhancement */
4571 	if (instance->ctrl_info->max_lds > 64)
4572 		instance->supportmax256vd = 1;
4573 
4574 	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4575 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4576 	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4577 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4578 	if (instance->supportmax256vd) {
4579 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4580 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4581 	} else {
4582 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4583 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4584 	}
4585 
4586 	dev_info(&instance->pdev->dev,
4587 		"firmware type\t: %s\n",
4588 		instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4589 		"Legacy(64 VD) firmware");
4590 
4591 	if (instance->max_raid_mapsize) {
4592 		ventura_map_sz = instance->max_raid_mapsize *
4593 						MR_MIN_MAP_SIZE; /* 64k */
4594 		fusion->current_map_sz = ventura_map_sz;
4595 		fusion->max_map_sz = ventura_map_sz;
4596 	} else {
4597 		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
4598 					(sizeof(struct MR_LD_SPAN_MAP) *
4599 					(instance->fw_supported_vd_count - 1));
4600 		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
4601 
4602 		fusion->max_map_sz =
4603 			max(fusion->old_map_sz, fusion->new_map_sz);
4604 
4605 		if (instance->supportmax256vd)
4606 			fusion->current_map_sz = fusion->new_map_sz;
4607 		else
4608 			fusion->current_map_sz = fusion->old_map_sz;
4609 	}
4610 	/* irrespective of FW raid maps, driver raid map is constant */
4611 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4612 }
4613 
4614 /**
4615  * megasas_get_controller_info -	Returns FW's controller structure
4616  * @instance:				Adapter soft state
4617  *
4618  * Issues an internal command (DCMD) to get the FW's controller structure.
4619  * This information is mainly used to find out the maximum IO transfer per
4620  * command supported by the FW.
4621  */
4622 int
4623 megasas_get_ctrl_info(struct megasas_instance *instance)
4624 {
4625 	int ret = 0;
4626 	struct megasas_cmd *cmd;
4627 	struct megasas_dcmd_frame *dcmd;
4628 	struct megasas_ctrl_info *ci;
4629 	struct megasas_ctrl_info *ctrl_info;
4630 	dma_addr_t ci_h = 0;
4631 
4632 	ctrl_info = instance->ctrl_info;
4633 
4634 	cmd = megasas_get_cmd(instance);
4635 
4636 	if (!cmd) {
4637 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4638 		return -ENOMEM;
4639 	}
4640 
4641 	dcmd = &cmd->frame->dcmd;
4642 
4643 	ci = pci_alloc_consistent(instance->pdev,
4644 				  sizeof(struct megasas_ctrl_info), &ci_h);
4645 
4646 	if (!ci) {
4647 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
4648 		megasas_return_cmd(instance, cmd);
4649 		return -ENOMEM;
4650 	}
4651 
4652 	memset(ci, 0, sizeof(*ci));
4653 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4654 
4655 	dcmd->cmd = MFI_CMD_DCMD;
4656 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4657 	dcmd->sge_count = 1;
4658 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4659 	dcmd->timeout = 0;
4660 	dcmd->pad_0 = 0;
4661 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4662 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4663 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4664 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4665 	dcmd->mbox.b[0] = 1;
4666 
4667 	if (instance->ctrl_context && !instance->mask_interrupts)
4668 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4669 	else
4670 		ret = megasas_issue_polled(instance, cmd);
4671 
4672 	switch (ret) {
4673 	case DCMD_SUCCESS:
4674 		memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
4675 		/* Save required controller information in
4676 		 * CPU endianness format.
4677 		 */
4678 		le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4679 		le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4680 		le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4681 		le16_to_cpus((u16 *)&ctrl_info->adapter_operations4);
4682 
4683 		/* Update the latest Ext VD info.
4684 		 * From Init path, store current firmware details.
4685 		 * From OCR path, detect any firmware properties changes.
4686 		 * in case of Firmware upgrade without system reboot.
4687 		 */
4688 		megasas_update_ext_vd_details(instance);
4689 		instance->use_seqnum_jbod_fp =
4690 			ctrl_info->adapterOperations3.useSeqNumJbodFP;
4691 		instance->support_morethan256jbod =
4692 			ctrl_info->adapter_operations4.support_pd_map_target_id;
4693 
4694 		/*Check whether controller is iMR or MR */
4695 		instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
4696 		dev_info(&instance->pdev->dev,
4697 			"controller type\t: %s(%dMB)\n",
4698 			instance->is_imr ? "iMR" : "MR",
4699 			le16_to_cpu(ctrl_info->memory_size));
4700 
4701 		instance->disableOnlineCtrlReset =
4702 			ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
4703 		instance->secure_jbod_support =
4704 			ctrl_info->adapterOperations3.supportSecurityonJBOD;
4705 		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4706 			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4707 		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4708 			instance->secure_jbod_support ? "Yes" : "No");
4709 		break;
4710 
4711 	case DCMD_TIMEOUT:
4712 		switch (dcmd_timeout_ocr_possible(instance)) {
4713 		case INITIATE_OCR:
4714 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4715 			megasas_reset_fusion(instance->host,
4716 				MFI_IO_TIMEOUT_OCR);
4717 			break;
4718 		case KILL_ADAPTER:
4719 			megaraid_sas_kill_hba(instance);
4720 			break;
4721 		case IGNORE_TIMEOUT:
4722 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4723 				__func__, __LINE__);
4724 			break;
4725 		}
4726 	case DCMD_FAILED:
4727 		megaraid_sas_kill_hba(instance);
4728 		break;
4729 
4730 	}
4731 
4732 	pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
4733 			    ci, ci_h);
4734 
4735 	megasas_return_cmd(instance, cmd);
4736 
4737 
4738 	return ret;
4739 }
4740 
4741 /*
4742  * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
4743  *					to firmware
4744  *
4745  * @instance:				Adapter soft state
4746  * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
4747 					MR_CRASH_BUF_TURN_OFF = 0
4748 					MR_CRASH_BUF_TURN_ON = 1
4749  * @return 0 on success non-zero on failure.
4750  * Issues an internal command (DCMD) to set parameters for crash dump feature.
4751  * Driver will send address of crash dump DMA buffer and set mbox to tell FW
4752  * that driver supports crash dump feature. This DCMD will be sent only if
4753  * crash dump feature is supported by the FW.
4754  *
4755  */
4756 int megasas_set_crash_dump_params(struct megasas_instance *instance,
4757 	u8 crash_buf_state)
4758 {
4759 	int ret = 0;
4760 	struct megasas_cmd *cmd;
4761 	struct megasas_dcmd_frame *dcmd;
4762 
4763 	cmd = megasas_get_cmd(instance);
4764 
4765 	if (!cmd) {
4766 		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
4767 		return -ENOMEM;
4768 	}
4769 
4770 
4771 	dcmd = &cmd->frame->dcmd;
4772 
4773 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4774 	dcmd->mbox.b[0] = crash_buf_state;
4775 	dcmd->cmd = MFI_CMD_DCMD;
4776 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4777 	dcmd->sge_count = 1;
4778 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
4779 	dcmd->timeout = 0;
4780 	dcmd->pad_0 = 0;
4781 	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4782 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4783 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
4784 	dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4785 
4786 	if (instance->ctrl_context && !instance->mask_interrupts)
4787 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4788 	else
4789 		ret = megasas_issue_polled(instance, cmd);
4790 
4791 	if (ret == DCMD_TIMEOUT) {
4792 		switch (dcmd_timeout_ocr_possible(instance)) {
4793 		case INITIATE_OCR:
4794 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4795 			megasas_reset_fusion(instance->host,
4796 					MFI_IO_TIMEOUT_OCR);
4797 			break;
4798 		case KILL_ADAPTER:
4799 			megaraid_sas_kill_hba(instance);
4800 			break;
4801 		case IGNORE_TIMEOUT:
4802 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4803 				__func__, __LINE__);
4804 			break;
4805 		}
4806 	} else
4807 		megasas_return_cmd(instance, cmd);
4808 
4809 	return ret;
4810 }
4811 
4812 /**
4813  * megasas_issue_init_mfi -	Initializes the FW
4814  * @instance:		Adapter soft state
4815  *
4816  * Issues the INIT MFI cmd
4817  */
4818 static int
4819 megasas_issue_init_mfi(struct megasas_instance *instance)
4820 {
4821 	__le32 context;
4822 	struct megasas_cmd *cmd;
4823 	struct megasas_init_frame *init_frame;
4824 	struct megasas_init_queue_info *initq_info;
4825 	dma_addr_t init_frame_h;
4826 	dma_addr_t initq_info_h;
4827 
4828 	/*
4829 	 * Prepare a init frame. Note the init frame points to queue info
4830 	 * structure. Each frame has SGL allocated after first 64 bytes. For
4831 	 * this frame - since we don't need any SGL - we use SGL's space as
4832 	 * queue info structure
4833 	 *
4834 	 * We will not get a NULL command below. We just created the pool.
4835 	 */
4836 	cmd = megasas_get_cmd(instance);
4837 
4838 	init_frame = (struct megasas_init_frame *)cmd->frame;
4839 	initq_info = (struct megasas_init_queue_info *)
4840 		((unsigned long)init_frame + 64);
4841 
4842 	init_frame_h = cmd->frame_phys_addr;
4843 	initq_info_h = init_frame_h + 64;
4844 
4845 	context = init_frame->context;
4846 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
4847 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
4848 	init_frame->context = context;
4849 
4850 	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
4851 	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
4852 
4853 	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
4854 	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
4855 
4856 	init_frame->cmd = MFI_CMD_INIT;
4857 	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
4858 	init_frame->queue_info_new_phys_addr_lo =
4859 		cpu_to_le32(lower_32_bits(initq_info_h));
4860 	init_frame->queue_info_new_phys_addr_hi =
4861 		cpu_to_le32(upper_32_bits(initq_info_h));
4862 
4863 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
4864 
4865 	/*
4866 	 * disable the intr before firing the init frame to FW
4867 	 */
4868 	instance->instancet->disable_intr(instance);
4869 
4870 	/*
4871 	 * Issue the init frame in polled mode
4872 	 */
4873 
4874 	if (megasas_issue_polled(instance, cmd)) {
4875 		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
4876 		megasas_return_cmd(instance, cmd);
4877 		goto fail_fw_init;
4878 	}
4879 
4880 	megasas_return_cmd(instance, cmd);
4881 
4882 	return 0;
4883 
4884 fail_fw_init:
4885 	return -EINVAL;
4886 }
4887 
4888 static u32
4889 megasas_init_adapter_mfi(struct megasas_instance *instance)
4890 {
4891 	struct megasas_register_set __iomem *reg_set;
4892 	u32 context_sz;
4893 	u32 reply_q_sz;
4894 
4895 	reg_set = instance->reg_set;
4896 
4897 	/*
4898 	 * Get various operational parameters from status register
4899 	 */
4900 	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
4901 	/*
4902 	 * Reduce the max supported cmds by 1. This is to ensure that the
4903 	 * reply_q_sz (1 more than the max cmd that driver may send)
4904 	 * does not exceed max cmds that the FW can support
4905 	 */
4906 	instance->max_fw_cmds = instance->max_fw_cmds-1;
4907 	instance->max_mfi_cmds = instance->max_fw_cmds;
4908 	instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
4909 					0x10;
4910 	/*
4911 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
4912 	 * are reserved for IOCTL + driver's internal DCMDs.
4913 	 */
4914 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4915 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4916 		instance->max_scsi_cmds = (instance->max_fw_cmds -
4917 			MEGASAS_SKINNY_INT_CMDS);
4918 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4919 	} else {
4920 		instance->max_scsi_cmds = (instance->max_fw_cmds -
4921 			MEGASAS_INT_CMDS);
4922 		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
4923 	}
4924 
4925 	instance->cur_can_queue = instance->max_scsi_cmds;
4926 	/*
4927 	 * Create a pool of commands
4928 	 */
4929 	if (megasas_alloc_cmds(instance))
4930 		goto fail_alloc_cmds;
4931 
4932 	/*
4933 	 * Allocate memory for reply queue. Length of reply queue should
4934 	 * be _one_ more than the maximum commands handled by the firmware.
4935 	 *
4936 	 * Note: When FW completes commands, it places corresponding contex
4937 	 * values in this circular reply queue. This circular queue is a fairly
4938 	 * typical producer-consumer queue. FW is the producer (of completed
4939 	 * commands) and the driver is the consumer.
4940 	 */
4941 	context_sz = sizeof(u32);
4942 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
4943 
4944 	instance->reply_queue = pci_alloc_consistent(instance->pdev,
4945 						     reply_q_sz,
4946 						     &instance->reply_queue_h);
4947 
4948 	if (!instance->reply_queue) {
4949 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
4950 		goto fail_reply_queue;
4951 	}
4952 
4953 	if (megasas_issue_init_mfi(instance))
4954 		goto fail_fw_init;
4955 
4956 	if (megasas_get_ctrl_info(instance)) {
4957 		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4958 			"Fail from %s %d\n", instance->unique_id,
4959 			__func__, __LINE__);
4960 		goto fail_fw_init;
4961 	}
4962 
4963 	instance->fw_support_ieee = 0;
4964 	instance->fw_support_ieee =
4965 		(instance->instancet->read_fw_status_reg(reg_set) &
4966 		0x04000000);
4967 
4968 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
4969 			instance->fw_support_ieee);
4970 
4971 	if (instance->fw_support_ieee)
4972 		instance->flag_ieee = 1;
4973 
4974 	return 0;
4975 
4976 fail_fw_init:
4977 
4978 	pci_free_consistent(instance->pdev, reply_q_sz,
4979 			    instance->reply_queue, instance->reply_queue_h);
4980 fail_reply_queue:
4981 	megasas_free_cmds(instance);
4982 
4983 fail_alloc_cmds:
4984 	return 1;
4985 }
4986 
4987 /*
4988  * megasas_setup_irqs_ioapic -		register legacy interrupts.
4989  * @instance:				Adapter soft state
4990  *
4991  * Do not enable interrupt, only setup ISRs.
4992  *
4993  * Return 0 on success.
4994  */
4995 static int
4996 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
4997 {
4998 	struct pci_dev *pdev;
4999 
5000 	pdev = instance->pdev;
5001 	instance->irq_context[0].instance = instance;
5002 	instance->irq_context[0].MSIxIndex = 0;
5003 	if (request_irq(pci_irq_vector(pdev, 0),
5004 			instance->instancet->service_isr, IRQF_SHARED,
5005 			"megasas", &instance->irq_context[0])) {
5006 		dev_err(&instance->pdev->dev,
5007 				"Failed to register IRQ from %s %d\n",
5008 				__func__, __LINE__);
5009 		return -1;
5010 	}
5011 	return 0;
5012 }
5013 
5014 /**
5015  * megasas_setup_irqs_msix -		register MSI-x interrupts.
5016  * @instance:				Adapter soft state
5017  * @is_probe:				Driver probe check
5018  *
5019  * Do not enable interrupt, only setup ISRs.
5020  *
5021  * Return 0 on success.
5022  */
5023 static int
5024 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5025 {
5026 	int i, j;
5027 	struct pci_dev *pdev;
5028 
5029 	pdev = instance->pdev;
5030 
5031 	/* Try MSI-x */
5032 	for (i = 0; i < instance->msix_vectors; i++) {
5033 		instance->irq_context[i].instance = instance;
5034 		instance->irq_context[i].MSIxIndex = i;
5035 		if (request_irq(pci_irq_vector(pdev, i),
5036 			instance->instancet->service_isr, 0, "megasas",
5037 			&instance->irq_context[i])) {
5038 			dev_err(&instance->pdev->dev,
5039 				"Failed to register IRQ for vector %d.\n", i);
5040 			for (j = 0; j < i; j++)
5041 				free_irq(pci_irq_vector(pdev, j),
5042 					 &instance->irq_context[j]);
5043 			/* Retry irq register for IO_APIC*/
5044 			instance->msix_vectors = 0;
5045 			if (is_probe) {
5046 				pci_free_irq_vectors(instance->pdev);
5047 				return megasas_setup_irqs_ioapic(instance);
5048 			} else {
5049 				return -1;
5050 			}
5051 		}
5052 	}
5053 	return 0;
5054 }
5055 
5056 /*
5057  * megasas_destroy_irqs-		unregister interrupts.
5058  * @instance:				Adapter soft state
5059  * return:				void
5060  */
5061 static void
5062 megasas_destroy_irqs(struct megasas_instance *instance) {
5063 
5064 	int i;
5065 
5066 	if (instance->msix_vectors)
5067 		for (i = 0; i < instance->msix_vectors; i++) {
5068 			free_irq(pci_irq_vector(instance->pdev, i),
5069 				 &instance->irq_context[i]);
5070 		}
5071 	else
5072 		free_irq(pci_irq_vector(instance->pdev, 0),
5073 			 &instance->irq_context[0]);
5074 }
5075 
5076 /**
5077  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5078  * @instance:				Adapter soft state
5079  * @is_probe:				Driver probe check
5080  *
5081  * Return 0 on success.
5082  */
5083 void
5084 megasas_setup_jbod_map(struct megasas_instance *instance)
5085 {
5086 	int i;
5087 	struct fusion_context *fusion = instance->ctrl_context;
5088 	u32 pd_seq_map_sz;
5089 
5090 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5091 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5092 
5093 	if (reset_devices || !fusion ||
5094 		!instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
5095 		dev_info(&instance->pdev->dev,
5096 			"Jbod map is not supported %s %d\n",
5097 			__func__, __LINE__);
5098 		instance->use_seqnum_jbod_fp = false;
5099 		return;
5100 	}
5101 
5102 	if (fusion->pd_seq_sync[0])
5103 		goto skip_alloc;
5104 
5105 	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5106 		fusion->pd_seq_sync[i] = dma_alloc_coherent
5107 			(&instance->pdev->dev, pd_seq_map_sz,
5108 			&fusion->pd_seq_phys[i], GFP_KERNEL);
5109 		if (!fusion->pd_seq_sync[i]) {
5110 			dev_err(&instance->pdev->dev,
5111 				"Failed to allocate memory from %s %d\n",
5112 				__func__, __LINE__);
5113 			if (i == 1) {
5114 				dma_free_coherent(&instance->pdev->dev,
5115 					pd_seq_map_sz, fusion->pd_seq_sync[0],
5116 					fusion->pd_seq_phys[0]);
5117 				fusion->pd_seq_sync[0] = NULL;
5118 			}
5119 			instance->use_seqnum_jbod_fp = false;
5120 			return;
5121 		}
5122 	}
5123 
5124 skip_alloc:
5125 	if (!megasas_sync_pd_seq_num(instance, false) &&
5126 		!megasas_sync_pd_seq_num(instance, true))
5127 		instance->use_seqnum_jbod_fp = true;
5128 	else
5129 		instance->use_seqnum_jbod_fp = false;
5130 }
5131 
5132 /**
5133  * megasas_init_fw -	Initializes the FW
5134  * @instance:		Adapter soft state
5135  *
5136  * This is the main function for initializing firmware
5137  */
5138 
5139 static int megasas_init_fw(struct megasas_instance *instance)
5140 {
5141 	u32 max_sectors_1;
5142 	u32 max_sectors_2, tmp_sectors, msix_enable;
5143 	u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
5144 	resource_size_t base_addr;
5145 	struct megasas_register_set __iomem *reg_set;
5146 	struct megasas_ctrl_info *ctrl_info = NULL;
5147 	unsigned long bar_list;
5148 	int i, j, loop, fw_msix_count = 0;
5149 	struct IOV_111 *iovPtr;
5150 	struct fusion_context *fusion;
5151 
5152 	fusion = instance->ctrl_context;
5153 
5154 	/* Find first memory bar */
5155 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5156 	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5157 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5158 					 "megasas: LSI")) {
5159 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5160 		return -EBUSY;
5161 	}
5162 
5163 	base_addr = pci_resource_start(instance->pdev, instance->bar);
5164 	instance->reg_set = ioremap_nocache(base_addr, 8192);
5165 
5166 	if (!instance->reg_set) {
5167 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5168 		goto fail_ioremap;
5169 	}
5170 
5171 	reg_set = instance->reg_set;
5172 
5173 	if (fusion)
5174 		instance->instancet = &megasas_instance_template_fusion;
5175 	else {
5176 		switch (instance->pdev->device) {
5177 		case PCI_DEVICE_ID_LSI_SAS1078R:
5178 		case PCI_DEVICE_ID_LSI_SAS1078DE:
5179 			instance->instancet = &megasas_instance_template_ppc;
5180 			break;
5181 		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5182 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5183 			instance->instancet = &megasas_instance_template_gen2;
5184 			break;
5185 		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5186 		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5187 			instance->instancet = &megasas_instance_template_skinny;
5188 			break;
5189 		case PCI_DEVICE_ID_LSI_SAS1064R:
5190 		case PCI_DEVICE_ID_DELL_PERC5:
5191 		default:
5192 			instance->instancet = &megasas_instance_template_xscale;
5193 			instance->pd_list_not_supported = 1;
5194 			break;
5195 		}
5196 	}
5197 
5198 	if (megasas_transition_to_ready(instance, 0)) {
5199 		atomic_set(&instance->fw_reset_no_pci_access, 1);
5200 		instance->instancet->adp_reset
5201 			(instance, instance->reg_set);
5202 		atomic_set(&instance->fw_reset_no_pci_access, 0);
5203 		dev_info(&instance->pdev->dev,
5204 			"FW restarted successfully from %s!\n",
5205 			__func__);
5206 
5207 		/*waitting for about 30 second before retry*/
5208 		ssleep(30);
5209 
5210 		if (megasas_transition_to_ready(instance, 0))
5211 			goto fail_ready_state;
5212 	}
5213 
5214 	if (instance->is_ventura) {
5215 		scratch_pad_3 =
5216 			readl(&instance->reg_set->outbound_scratch_pad_3);
5217 		instance->max_raid_mapsize = ((scratch_pad_3 >>
5218 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5219 			MR_MAX_RAID_MAP_SIZE_MASK);
5220 	}
5221 
5222 	/* Check if MSI-X is supported while in ready state */
5223 	msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
5224 		       0x4000000) >> 0x1a;
5225 	if (msix_enable && !msix_disable) {
5226 		int irq_flags = PCI_IRQ_MSIX;
5227 
5228 		scratch_pad_2 = readl
5229 			(&instance->reg_set->outbound_scratch_pad_2);
5230 		/* Check max MSI-X vectors */
5231 		if (fusion) {
5232 			if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/
5233 				instance->msix_vectors = (scratch_pad_2
5234 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5235 				fw_msix_count = instance->msix_vectors;
5236 			} else { /* Invader series supports more than 8 MSI-x vectors*/
5237 				instance->msix_vectors = ((scratch_pad_2
5238 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5239 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5240 				if (instance->msix_vectors > 16)
5241 					instance->msix_combined = true;
5242 
5243 				if (rdpq_enable)
5244 					instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5245 								1 : 0;
5246 				fw_msix_count = instance->msix_vectors;
5247 				/* Save 1-15 reply post index address to local memory
5248 				 * Index 0 is already saved from reg offset
5249 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5250 				 */
5251 				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5252 					instance->reply_post_host_index_addr[loop] =
5253 						(u32 __iomem *)
5254 						((u8 __iomem *)instance->reg_set +
5255 						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5256 						+ (loop * 0x10));
5257 				}
5258 			}
5259 			if (msix_vectors)
5260 				instance->msix_vectors = min(msix_vectors,
5261 					instance->msix_vectors);
5262 		} else /* MFI adapters */
5263 			instance->msix_vectors = 1;
5264 		/* Don't bother allocating more MSI-X vectors than cpus */
5265 		instance->msix_vectors = min(instance->msix_vectors,
5266 					     (unsigned int)num_online_cpus());
5267 		if (smp_affinity_enable)
5268 			irq_flags |= PCI_IRQ_AFFINITY;
5269 		i = pci_alloc_irq_vectors(instance->pdev, 1,
5270 					  instance->msix_vectors, irq_flags);
5271 		if (i > 0)
5272 			instance->msix_vectors = i;
5273 		else
5274 			instance->msix_vectors = 0;
5275 	}
5276 	/*
5277 	 * MSI-X host index 0 is common for all adapter.
5278 	 * It is used for all MPT based Adapters.
5279 	 */
5280 	if (instance->msix_combined) {
5281 		instance->reply_post_host_index_addr[0] =
5282 				(u32 *)((u8 *)instance->reg_set +
5283 				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
5284 	} else {
5285 		instance->reply_post_host_index_addr[0] =
5286 			(u32 *)((u8 *)instance->reg_set +
5287 			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5288 	}
5289 
5290 	if (!instance->msix_vectors) {
5291 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5292 		if (i < 0)
5293 			goto fail_setup_irqs;
5294 	}
5295 
5296 	dev_info(&instance->pdev->dev,
5297 		"firmware supports msix\t: (%d)", fw_msix_count);
5298 	dev_info(&instance->pdev->dev,
5299 		"current msix/online cpus\t: (%d/%d)\n",
5300 		instance->msix_vectors, (unsigned int)num_online_cpus());
5301 	dev_info(&instance->pdev->dev,
5302 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
5303 
5304 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5305 		(unsigned long)instance);
5306 
5307 	instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
5308 				GFP_KERNEL);
5309 	if (instance->ctrl_info == NULL)
5310 		goto fail_init_adapter;
5311 
5312 	/*
5313 	 * Below are default value for legacy Firmware.
5314 	 * non-fusion based controllers
5315 	 */
5316 	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5317 	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5318 	/* Get operational params, sge flags, send init cmd to controller */
5319 	if (instance->instancet->init_adapter(instance))
5320 		goto fail_init_adapter;
5321 
5322 	if (instance->is_ventura) {
5323 		scratch_pad_4 =
5324 			readl(&instance->reg_set->outbound_scratch_pad_4);
5325 		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
5326 			MR_DEFAULT_NVME_PAGE_SHIFT)
5327 			instance->nvme_page_size =
5328 				(1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
5329 
5330 		dev_info(&instance->pdev->dev,
5331 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
5332 	}
5333 
5334 	if (instance->msix_vectors ?
5335 		megasas_setup_irqs_msix(instance, 1) :
5336 		megasas_setup_irqs_ioapic(instance))
5337 		goto fail_init_adapter;
5338 
5339 	instance->instancet->enable_intr(instance);
5340 
5341 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
5342 
5343 	megasas_setup_jbod_map(instance);
5344 
5345 	/** for passthrough
5346 	 * the following function will get the PD LIST.
5347 	 */
5348 	memset(instance->pd_list, 0,
5349 		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5350 	if (megasas_get_pd_list(instance) < 0) {
5351 		dev_err(&instance->pdev->dev, "failed to get PD list\n");
5352 		goto fail_get_ld_pd_list;
5353 	}
5354 
5355 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5356 
5357 	/* stream detection initialization */
5358 	if (instance->is_ventura && fusion) {
5359 		fusion->stream_detect_by_ld =
5360 			kzalloc(sizeof(struct LD_STREAM_DETECT *)
5361 			* MAX_LOGICAL_DRIVES_EXT,
5362 			GFP_KERNEL);
5363 		if (!fusion->stream_detect_by_ld) {
5364 			dev_err(&instance->pdev->dev,
5365 				"unable to allocate stream detection for pool of LDs\n");
5366 			goto fail_get_ld_pd_list;
5367 		}
5368 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
5369 			fusion->stream_detect_by_ld[i] =
5370 				kmalloc(sizeof(struct LD_STREAM_DETECT),
5371 				GFP_KERNEL);
5372 			if (!fusion->stream_detect_by_ld[i]) {
5373 				dev_err(&instance->pdev->dev,
5374 					"unable to allocate stream detect by LD\n ");
5375 				for (j = 0; j < i; ++j)
5376 					kfree(fusion->stream_detect_by_ld[j]);
5377 				kfree(fusion->stream_detect_by_ld);
5378 				fusion->stream_detect_by_ld = NULL;
5379 				goto fail_get_ld_pd_list;
5380 			}
5381 			fusion->stream_detect_by_ld[i]->mru_bit_map
5382 				= MR_STREAM_BITMAP;
5383 		}
5384 	}
5385 
5386 	if (megasas_ld_list_query(instance,
5387 				  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5388 		goto fail_get_ld_pd_list;
5389 
5390 	/*
5391 	 * Compute the max allowed sectors per IO: The controller info has two
5392 	 * limits on max sectors. Driver should use the minimum of these two.
5393 	 *
5394 	 * 1 << stripe_sz_ops.min = max sectors per strip
5395 	 *
5396 	 * Note that older firmwares ( < FW ver 30) didn't report information
5397 	 * to calculate max_sectors_1. So the number ended up as zero always.
5398 	 */
5399 	tmp_sectors = 0;
5400 	ctrl_info = instance->ctrl_info;
5401 
5402 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5403 		le16_to_cpu(ctrl_info->max_strips_per_io);
5404 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
5405 
5406 	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
5407 
5408 	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
5409 	instance->passive = ctrl_info->cluster.passive;
5410 	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
5411 	instance->UnevenSpanSupport =
5412 		ctrl_info->adapterOperations2.supportUnevenSpans;
5413 	if (instance->UnevenSpanSupport) {
5414 		struct fusion_context *fusion = instance->ctrl_context;
5415 		if (MR_ValidateMapInfo(instance))
5416 			fusion->fast_path_io = 1;
5417 		else
5418 			fusion->fast_path_io = 0;
5419 
5420 	}
5421 	if (ctrl_info->host_interface.SRIOV) {
5422 		instance->requestorId = ctrl_info->iov.requestorId;
5423 		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
5424 			if (!ctrl_info->adapterOperations2.activePassive)
5425 			    instance->PlasmaFW111 = 1;
5426 
5427 			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
5428 			    instance->PlasmaFW111 ? "1.11" : "new");
5429 
5430 			if (instance->PlasmaFW111) {
5431 			    iovPtr = (struct IOV_111 *)
5432 				((unsigned char *)ctrl_info + IOV_111_OFFSET);
5433 			    instance->requestorId = iovPtr->requestorId;
5434 			}
5435 		}
5436 		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
5437 			instance->requestorId);
5438 	}
5439 
5440 	instance->crash_dump_fw_support =
5441 		ctrl_info->adapterOperations3.supportCrashDump;
5442 	instance->crash_dump_drv_support =
5443 		(instance->crash_dump_fw_support &&
5444 		instance->crash_dump_buf);
5445 	if (instance->crash_dump_drv_support)
5446 		megasas_set_crash_dump_params(instance,
5447 			MR_CRASH_BUF_TURN_OFF);
5448 
5449 	else {
5450 		if (instance->crash_dump_buf)
5451 			pci_free_consistent(instance->pdev,
5452 				CRASH_DMA_BUF_SIZE,
5453 				instance->crash_dump_buf,
5454 				instance->crash_dump_h);
5455 		instance->crash_dump_buf = NULL;
5456 	}
5457 
5458 
5459 	dev_info(&instance->pdev->dev,
5460 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
5461 		le16_to_cpu(ctrl_info->pci.vendor_id),
5462 		le16_to_cpu(ctrl_info->pci.device_id),
5463 		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
5464 		le16_to_cpu(ctrl_info->pci.sub_device_id));
5465 	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
5466 		instance->UnevenSpanSupport ? "yes" : "no");
5467 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
5468 		instance->crash_dump_drv_support ? "yes" : "no");
5469 	dev_info(&instance->pdev->dev, "jbod sync map		: %s\n",
5470 		instance->use_seqnum_jbod_fp ? "yes" : "no");
5471 
5472 
5473 	instance->max_sectors_per_req = instance->max_num_sge *
5474 						SGE_BUFFER_SIZE / 512;
5475 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
5476 		instance->max_sectors_per_req = tmp_sectors;
5477 
5478 	/* Check for valid throttlequeuedepth module parameter */
5479 	if (throttlequeuedepth &&
5480 			throttlequeuedepth <= instance->max_scsi_cmds)
5481 		instance->throttlequeuedepth = throttlequeuedepth;
5482 	else
5483 		instance->throttlequeuedepth =
5484 				MEGASAS_THROTTLE_QUEUE_DEPTH;
5485 
5486 	if ((resetwaittime < 1) ||
5487 	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
5488 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
5489 
5490 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
5491 		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
5492 
5493 	/* Launch SR-IOV heartbeat timer */
5494 	if (instance->requestorId) {
5495 		if (!megasas_sriov_start_heartbeat(instance, 1))
5496 			megasas_start_timer(instance,
5497 					    &instance->sriov_heartbeat_timer,
5498 					    megasas_sriov_heartbeat_handler,
5499 					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
5500 		else
5501 			instance->skip_heartbeat_timer_del = 1;
5502 	}
5503 
5504 	return 0;
5505 
5506 fail_get_ld_pd_list:
5507 	instance->instancet->disable_intr(instance);
5508 fail_init_adapter:
5509 	megasas_destroy_irqs(instance);
5510 fail_setup_irqs:
5511 	if (instance->msix_vectors)
5512 		pci_free_irq_vectors(instance->pdev);
5513 	instance->msix_vectors = 0;
5514 fail_ready_state:
5515 	kfree(instance->ctrl_info);
5516 	instance->ctrl_info = NULL;
5517 	iounmap(instance->reg_set);
5518 
5519 fail_ioremap:
5520 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5521 
5522 	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5523 		__func__, __LINE__);
5524 	return -EINVAL;
5525 }
5526 
5527 /**
5528  * megasas_release_mfi -	Reverses the FW initialization
5529  * @instance:			Adapter soft state
5530  */
5531 static void megasas_release_mfi(struct megasas_instance *instance)
5532 {
5533 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5534 
5535 	if (instance->reply_queue)
5536 		pci_free_consistent(instance->pdev, reply_q_sz,
5537 			    instance->reply_queue, instance->reply_queue_h);
5538 
5539 	megasas_free_cmds(instance);
5540 
5541 	iounmap(instance->reg_set);
5542 
5543 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5544 }
5545 
5546 /**
5547  * megasas_get_seq_num -	Gets latest event sequence numbers
5548  * @instance:			Adapter soft state
5549  * @eli:			FW event log sequence numbers information
5550  *
5551  * FW maintains a log of all events in a non-volatile area. Upper layers would
5552  * usually find out the latest sequence number of the events, the seq number at
5553  * the boot etc. They would "read" all the events below the latest seq number
5554  * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
5555  * number), they would subsribe to AEN (asynchronous event notification) and
5556  * wait for the events to happen.
5557  */
5558 static int
5559 megasas_get_seq_num(struct megasas_instance *instance,
5560 		    struct megasas_evt_log_info *eli)
5561 {
5562 	struct megasas_cmd *cmd;
5563 	struct megasas_dcmd_frame *dcmd;
5564 	struct megasas_evt_log_info *el_info;
5565 	dma_addr_t el_info_h = 0;
5566 
5567 	cmd = megasas_get_cmd(instance);
5568 
5569 	if (!cmd) {
5570 		return -ENOMEM;
5571 	}
5572 
5573 	dcmd = &cmd->frame->dcmd;
5574 	el_info = pci_alloc_consistent(instance->pdev,
5575 				       sizeof(struct megasas_evt_log_info),
5576 				       &el_info_h);
5577 
5578 	if (!el_info) {
5579 		megasas_return_cmd(instance, cmd);
5580 		return -ENOMEM;
5581 	}
5582 
5583 	memset(el_info, 0, sizeof(*el_info));
5584 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5585 
5586 	dcmd->cmd = MFI_CMD_DCMD;
5587 	dcmd->cmd_status = 0x0;
5588 	dcmd->sge_count = 1;
5589 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5590 	dcmd->timeout = 0;
5591 	dcmd->pad_0 = 0;
5592 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5593 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5594 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
5595 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5596 
5597 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
5598 		DCMD_SUCCESS) {
5599 		/*
5600 		 * Copy the data back into callers buffer
5601 		 */
5602 		eli->newest_seq_num = el_info->newest_seq_num;
5603 		eli->oldest_seq_num = el_info->oldest_seq_num;
5604 		eli->clear_seq_num = el_info->clear_seq_num;
5605 		eli->shutdown_seq_num = el_info->shutdown_seq_num;
5606 		eli->boot_seq_num = el_info->boot_seq_num;
5607 	} else
5608 		dev_err(&instance->pdev->dev, "DCMD failed "
5609 			"from %s\n", __func__);
5610 
5611 	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
5612 			    el_info, el_info_h);
5613 
5614 	megasas_return_cmd(instance, cmd);
5615 
5616 	return 0;
5617 }
5618 
5619 /**
5620  * megasas_register_aen -	Registers for asynchronous event notification
5621  * @instance:			Adapter soft state
5622  * @seq_num:			The starting sequence number
5623  * @class_locale:		Class of the event
5624  *
5625  * This function subscribes for AEN for events beyond the @seq_num. It requests
5626  * to be notified if and only if the event is of type @class_locale
5627  */
5628 static int
5629 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5630 		     u32 class_locale_word)
5631 {
5632 	int ret_val;
5633 	struct megasas_cmd *cmd;
5634 	struct megasas_dcmd_frame *dcmd;
5635 	union megasas_evt_class_locale curr_aen;
5636 	union megasas_evt_class_locale prev_aen;
5637 
5638 	/*
5639 	 * If there an AEN pending already (aen_cmd), check if the
5640 	 * class_locale of that pending AEN is inclusive of the new
5641 	 * AEN request we currently have. If it is, then we don't have
5642 	 * to do anything. In other words, whichever events the current
5643 	 * AEN request is subscribing to, have already been subscribed
5644 	 * to.
5645 	 *
5646 	 * If the old_cmd is _not_ inclusive, then we have to abort
5647 	 * that command, form a class_locale that is superset of both
5648 	 * old and current and re-issue to the FW
5649 	 */
5650 
5651 	curr_aen.word = class_locale_word;
5652 
5653 	if (instance->aen_cmd) {
5654 
5655 		prev_aen.word =
5656 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
5657 
5658 		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
5659 		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
5660 			dev_info(&instance->pdev->dev,
5661 				 "%s %d out of range class %d send by application\n",
5662 				 __func__, __LINE__, curr_aen.members.class);
5663 			return 0;
5664 		}
5665 
5666 		/*
5667 		 * A class whose enum value is smaller is inclusive of all
5668 		 * higher values. If a PROGRESS (= -1) was previously
5669 		 * registered, then a new registration requests for higher
5670 		 * classes need not be sent to FW. They are automatically
5671 		 * included.
5672 		 *
5673 		 * Locale numbers don't have such hierarchy. They are bitmap
5674 		 * values
5675 		 */
5676 		if ((prev_aen.members.class <= curr_aen.members.class) &&
5677 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
5678 		      curr_aen.members.locale)) {
5679 			/*
5680 			 * Previously issued event registration includes
5681 			 * current request. Nothing to do.
5682 			 */
5683 			return 0;
5684 		} else {
5685 			curr_aen.members.locale |= prev_aen.members.locale;
5686 
5687 			if (prev_aen.members.class < curr_aen.members.class)
5688 				curr_aen.members.class = prev_aen.members.class;
5689 
5690 			instance->aen_cmd->abort_aen = 1;
5691 			ret_val = megasas_issue_blocked_abort_cmd(instance,
5692 								  instance->
5693 								  aen_cmd, 30);
5694 
5695 			if (ret_val) {
5696 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
5697 				       "previous AEN command\n");
5698 				return ret_val;
5699 			}
5700 		}
5701 	}
5702 
5703 	cmd = megasas_get_cmd(instance);
5704 
5705 	if (!cmd)
5706 		return -ENOMEM;
5707 
5708 	dcmd = &cmd->frame->dcmd;
5709 
5710 	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
5711 
5712 	/*
5713 	 * Prepare DCMD for aen registration
5714 	 */
5715 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5716 
5717 	dcmd->cmd = MFI_CMD_DCMD;
5718 	dcmd->cmd_status = 0x0;
5719 	dcmd->sge_count = 1;
5720 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5721 	dcmd->timeout = 0;
5722 	dcmd->pad_0 = 0;
5723 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
5724 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
5725 	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
5726 	instance->last_seq_num = seq_num;
5727 	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
5728 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
5729 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
5730 
5731 	if (instance->aen_cmd != NULL) {
5732 		megasas_return_cmd(instance, cmd);
5733 		return 0;
5734 	}
5735 
5736 	/*
5737 	 * Store reference to the cmd used to register for AEN. When an
5738 	 * application wants us to register for AEN, we have to abort this
5739 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
5740 	 */
5741 	instance->aen_cmd = cmd;
5742 
5743 	/*
5744 	 * Issue the aen registration frame
5745 	 */
5746 	instance->instancet->issue_dcmd(instance, cmd);
5747 
5748 	return 0;
5749 }
5750 
5751 /* megasas_get_target_prop - Send DCMD with below details to firmware.
5752  *
5753  * This DCMD will fetch few properties of LD/system PD defined
5754  * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
5755  *
5756  * DCMD send by drivers whenever new target is added to the OS.
5757  *
5758  * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
5759  * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
5760  *                       0 = system PD, 1 = LD.
5761  * dcmd.mbox.s[1]      - TargetID for LD/system PD.
5762  * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
5763  *
5764  * @instance:		Adapter soft state
5765  * @sdev:		OS provided scsi device
5766  *
5767  * Returns 0 on success non-zero on failure.
5768  */
5769 static int
5770 megasas_get_target_prop(struct megasas_instance *instance,
5771 			struct scsi_device *sdev)
5772 {
5773 	int ret;
5774 	struct megasas_cmd *cmd;
5775 	struct megasas_dcmd_frame *dcmd;
5776 	u16 targetId = (sdev->channel % 2) + sdev->id;
5777 
5778 	cmd = megasas_get_cmd(instance);
5779 
5780 	if (!cmd) {
5781 		dev_err(&instance->pdev->dev,
5782 			"Failed to get cmd %s\n", __func__);
5783 		return -ENOMEM;
5784 	}
5785 
5786 	dcmd = &cmd->frame->dcmd;
5787 
5788 	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
5789 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5790 	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
5791 
5792 	dcmd->mbox.s[1] = cpu_to_le16(targetId);
5793 	dcmd->cmd = MFI_CMD_DCMD;
5794 	dcmd->cmd_status = 0xFF;
5795 	dcmd->sge_count = 1;
5796 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5797 	dcmd->timeout = 0;
5798 	dcmd->pad_0 = 0;
5799 	dcmd->data_xfer_len =
5800 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5801 	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
5802 	dcmd->sgl.sge32[0].phys_addr =
5803 		cpu_to_le32(instance->tgt_prop_h);
5804 	dcmd->sgl.sge32[0].length =
5805 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5806 
5807 	if (instance->ctrl_context && !instance->mask_interrupts)
5808 		ret = megasas_issue_blocked_cmd(instance,
5809 						cmd, MFI_IO_TIMEOUT_SECS);
5810 	else
5811 		ret = megasas_issue_polled(instance, cmd);
5812 
5813 	switch (ret) {
5814 	case DCMD_TIMEOUT:
5815 		switch (dcmd_timeout_ocr_possible(instance)) {
5816 		case INITIATE_OCR:
5817 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5818 			megasas_reset_fusion(instance->host,
5819 					     MFI_IO_TIMEOUT_OCR);
5820 			break;
5821 		case KILL_ADAPTER:
5822 			megaraid_sas_kill_hba(instance);
5823 			break;
5824 		case IGNORE_TIMEOUT:
5825 			dev_info(&instance->pdev->dev,
5826 				 "Ignore DCMD timeout: %s %d\n",
5827 				 __func__, __LINE__);
5828 			break;
5829 		}
5830 		break;
5831 
5832 	default:
5833 		megasas_return_cmd(instance, cmd);
5834 	}
5835 	if (ret != DCMD_SUCCESS)
5836 		dev_err(&instance->pdev->dev,
5837 			"return from %s %d return value %d\n",
5838 			__func__, __LINE__, ret);
5839 
5840 	return ret;
5841 }
5842 
5843 /**
5844  * megasas_start_aen -	Subscribes to AEN during driver load time
5845  * @instance:		Adapter soft state
5846  */
5847 static int megasas_start_aen(struct megasas_instance *instance)
5848 {
5849 	struct megasas_evt_log_info eli;
5850 	union megasas_evt_class_locale class_locale;
5851 
5852 	/*
5853 	 * Get the latest sequence number from FW
5854 	 */
5855 	memset(&eli, 0, sizeof(eli));
5856 
5857 	if (megasas_get_seq_num(instance, &eli))
5858 		return -1;
5859 
5860 	/*
5861 	 * Register AEN with FW for latest sequence number plus 1
5862 	 */
5863 	class_locale.members.reserved = 0;
5864 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
5865 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
5866 
5867 	return megasas_register_aen(instance,
5868 			le32_to_cpu(eli.newest_seq_num) + 1,
5869 			class_locale.word);
5870 }
5871 
5872 /**
5873  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
5874  * @instance:		Adapter soft state
5875  */
5876 static int megasas_io_attach(struct megasas_instance *instance)
5877 {
5878 	struct Scsi_Host *host = instance->host;
5879 
5880 	/*
5881 	 * Export parameters required by SCSI mid-layer
5882 	 */
5883 	host->unique_id = instance->unique_id;
5884 	host->can_queue = instance->max_scsi_cmds;
5885 	host->this_id = instance->init_id;
5886 	host->sg_tablesize = instance->max_num_sge;
5887 
5888 	if (instance->fw_support_ieee)
5889 		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
5890 
5891 	/*
5892 	 * Check if the module parameter value for max_sectors can be used
5893 	 */
5894 	if (max_sectors && max_sectors < instance->max_sectors_per_req)
5895 		instance->max_sectors_per_req = max_sectors;
5896 	else {
5897 		if (max_sectors) {
5898 			if (((instance->pdev->device ==
5899 				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
5900 				(instance->pdev->device ==
5901 				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
5902 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
5903 				instance->max_sectors_per_req = max_sectors;
5904 			} else {
5905 			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
5906 				"and <= %d (or < 1MB for GEN2 controller)\n",
5907 				instance->max_sectors_per_req);
5908 			}
5909 		}
5910 	}
5911 
5912 	host->max_sectors = instance->max_sectors_per_req;
5913 	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
5914 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
5915 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
5916 	host->max_lun = MEGASAS_MAX_LUN;
5917 	host->max_cmd_len = 16;
5918 
5919 	/*
5920 	 * Notify the mid-layer about the new controller
5921 	 */
5922 	if (scsi_add_host(host, &instance->pdev->dev)) {
5923 		dev_err(&instance->pdev->dev,
5924 			"Failed to add host from %s %d\n",
5925 			__func__, __LINE__);
5926 		return -ENODEV;
5927 	}
5928 
5929 	return 0;
5930 }
5931 
5932 static int
5933 megasas_set_dma_mask(struct pci_dev *pdev)
5934 {
5935 	/*
5936 	 * All our controllers are capable of performing 64-bit DMA
5937 	 */
5938 	if (IS_DMA64) {
5939 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
5940 
5941 			if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5942 				goto fail_set_dma_mask;
5943 		}
5944 	} else {
5945 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5946 			goto fail_set_dma_mask;
5947 	}
5948 	/*
5949 	 * Ensure that all data structures are allocated in 32-bit
5950 	 * memory.
5951 	 */
5952 	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
5953 		/* Try 32bit DMA mask and 32 bit Consistent dma mask */
5954 		if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
5955 			&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
5956 			dev_info(&pdev->dev, "set 32bit DMA mask"
5957 				"and 32 bit consistent mask\n");
5958 		else
5959 			goto fail_set_dma_mask;
5960 	}
5961 
5962 	return 0;
5963 
5964 fail_set_dma_mask:
5965 	return 1;
5966 }
5967 
5968 /**
5969  * megasas_probe_one -	PCI hotplug entry point
5970  * @pdev:		PCI device structure
5971  * @id:			PCI ids of supported hotplugged adapter
5972  */
5973 static int megasas_probe_one(struct pci_dev *pdev,
5974 			     const struct pci_device_id *id)
5975 {
5976 	int rval, pos;
5977 	struct Scsi_Host *host;
5978 	struct megasas_instance *instance;
5979 	u16 control = 0;
5980 	struct fusion_context *fusion = NULL;
5981 
5982 	/* Reset MSI-X in the kdump kernel */
5983 	if (reset_devices) {
5984 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
5985 		if (pos) {
5986 			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
5987 					     &control);
5988 			if (control & PCI_MSIX_FLAGS_ENABLE) {
5989 				dev_info(&pdev->dev, "resetting MSI-X\n");
5990 				pci_write_config_word(pdev,
5991 						      pos + PCI_MSIX_FLAGS,
5992 						      control &
5993 						      ~PCI_MSIX_FLAGS_ENABLE);
5994 			}
5995 		}
5996 	}
5997 
5998 	/*
5999 	 * PCI prepping: enable device set bus mastering and dma mask
6000 	 */
6001 	rval = pci_enable_device_mem(pdev);
6002 
6003 	if (rval) {
6004 		return rval;
6005 	}
6006 
6007 	pci_set_master(pdev);
6008 
6009 	if (megasas_set_dma_mask(pdev))
6010 		goto fail_set_dma_mask;
6011 
6012 	host = scsi_host_alloc(&megasas_template,
6013 			       sizeof(struct megasas_instance));
6014 
6015 	if (!host) {
6016 		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
6017 		goto fail_alloc_instance;
6018 	}
6019 
6020 	instance = (struct megasas_instance *)host->hostdata;
6021 	memset(instance, 0, sizeof(*instance));
6022 	atomic_set(&instance->fw_reset_no_pci_access, 0);
6023 	instance->pdev = pdev;
6024 
6025 	switch (instance->pdev->device) {
6026 	case PCI_DEVICE_ID_LSI_VENTURA:
6027 	case PCI_DEVICE_ID_LSI_HARPOON:
6028 	case PCI_DEVICE_ID_LSI_TOMCAT:
6029 	case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6030 	case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6031 	     instance->is_ventura = true;
6032 	case PCI_DEVICE_ID_LSI_FUSION:
6033 	case PCI_DEVICE_ID_LSI_PLASMA:
6034 	case PCI_DEVICE_ID_LSI_INVADER:
6035 	case PCI_DEVICE_ID_LSI_FURY:
6036 	case PCI_DEVICE_ID_LSI_INTRUDER:
6037 	case PCI_DEVICE_ID_LSI_INTRUDER_24:
6038 	case PCI_DEVICE_ID_LSI_CUTLASS_52:
6039 	case PCI_DEVICE_ID_LSI_CUTLASS_53:
6040 	{
6041 		if (megasas_alloc_fusion_context(instance)) {
6042 			megasas_free_fusion_context(instance);
6043 			goto fail_alloc_dma_buf;
6044 		}
6045 		fusion = instance->ctrl_context;
6046 
6047 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
6048 			(instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
6049 			fusion->adapter_type = THUNDERBOLT_SERIES;
6050 		else if (instance->is_ventura)
6051 			fusion->adapter_type = VENTURA_SERIES;
6052 		else
6053 			fusion->adapter_type = INVADER_SERIES;
6054 	}
6055 	break;
6056 	default: /* For all other supported controllers */
6057 
6058 		instance->producer =
6059 			pci_alloc_consistent(pdev, sizeof(u32),
6060 					     &instance->producer_h);
6061 		instance->consumer =
6062 			pci_alloc_consistent(pdev, sizeof(u32),
6063 					     &instance->consumer_h);
6064 
6065 		if (!instance->producer || !instance->consumer) {
6066 			dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
6067 			       "memory for producer, consumer\n");
6068 			goto fail_alloc_dma_buf;
6069 		}
6070 
6071 		*instance->producer = 0;
6072 		*instance->consumer = 0;
6073 		break;
6074 	}
6075 
6076 	/* Crash dump feature related initialisation*/
6077 	instance->drv_buf_index = 0;
6078 	instance->drv_buf_alloc = 0;
6079 	instance->crash_dump_fw_support = 0;
6080 	instance->crash_dump_app_support = 0;
6081 	instance->fw_crash_state = UNAVAILABLE;
6082 	spin_lock_init(&instance->crashdump_lock);
6083 	instance->crash_dump_buf = NULL;
6084 
6085 	megasas_poll_wait_aen = 0;
6086 	instance->flag_ieee = 0;
6087 	instance->ev = NULL;
6088 	instance->issuepend_done = 1;
6089 	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
6090 	instance->is_imr = 0;
6091 
6092 	instance->evt_detail = pci_alloc_consistent(pdev,
6093 						    sizeof(struct
6094 							   megasas_evt_detail),
6095 						    &instance->evt_detail_h);
6096 
6097 	if (!instance->evt_detail) {
6098 		dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
6099 		       "event detail structure\n");
6100 		goto fail_alloc_dma_buf;
6101 	}
6102 
6103 	if (!reset_devices) {
6104 		instance->system_info_buf = pci_zalloc_consistent(pdev,
6105 					sizeof(struct MR_DRV_SYSTEM_INFO),
6106 					&instance->system_info_h);
6107 		if (!instance->system_info_buf)
6108 			dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
6109 
6110 		instance->pd_info = pci_alloc_consistent(pdev,
6111 			sizeof(struct MR_PD_INFO), &instance->pd_info_h);
6112 
6113 		if (!instance->pd_info)
6114 			dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
6115 
6116 		instance->tgt_prop = pci_alloc_consistent(pdev,
6117 			sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
6118 
6119 		if (!instance->tgt_prop)
6120 			dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
6121 
6122 		instance->crash_dump_buf = pci_alloc_consistent(pdev,
6123 						CRASH_DMA_BUF_SIZE,
6124 						&instance->crash_dump_h);
6125 		if (!instance->crash_dump_buf)
6126 			dev_err(&pdev->dev, "Can't allocate Firmware "
6127 				"crash dump DMA buffer\n");
6128 	}
6129 
6130 	/*
6131 	 * Initialize locks and queues
6132 	 */
6133 	INIT_LIST_HEAD(&instance->cmd_pool);
6134 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
6135 
6136 	atomic_set(&instance->fw_outstanding,0);
6137 
6138 	init_waitqueue_head(&instance->int_cmd_wait_q);
6139 	init_waitqueue_head(&instance->abort_cmd_wait_q);
6140 
6141 	spin_lock_init(&instance->mfi_pool_lock);
6142 	spin_lock_init(&instance->hba_lock);
6143 	spin_lock_init(&instance->stream_lock);
6144 	spin_lock_init(&instance->completion_lock);
6145 
6146 	mutex_init(&instance->reset_mutex);
6147 	mutex_init(&instance->hba_mutex);
6148 
6149 	/*
6150 	 * Initialize PCI related and misc parameters
6151 	 */
6152 	instance->host = host;
6153 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
6154 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
6155 	instance->ctrl_info = NULL;
6156 
6157 
6158 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
6159 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
6160 		instance->flag_ieee = 1;
6161 
6162 	megasas_dbg_lvl = 0;
6163 	instance->flag = 0;
6164 	instance->unload = 1;
6165 	instance->last_time = 0;
6166 	instance->disableOnlineCtrlReset = 1;
6167 	instance->UnevenSpanSupport = 0;
6168 
6169 	if (instance->ctrl_context) {
6170 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
6171 		INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
6172 	} else
6173 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
6174 
6175 	/*
6176 	 * Initialize MFI Firmware
6177 	 */
6178 	if (megasas_init_fw(instance))
6179 		goto fail_init_mfi;
6180 
6181 	if (instance->requestorId) {
6182 		if (instance->PlasmaFW111) {
6183 			instance->vf_affiliation_111 =
6184 				pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
6185 						     &instance->vf_affiliation_111_h);
6186 			if (!instance->vf_affiliation_111)
6187 				dev_warn(&pdev->dev, "Can't allocate "
6188 				       "memory for VF affiliation buffer\n");
6189 		} else {
6190 			instance->vf_affiliation =
6191 				pci_alloc_consistent(pdev,
6192 						     (MAX_LOGICAL_DRIVES + 1) *
6193 						     sizeof(struct MR_LD_VF_AFFILIATION),
6194 						     &instance->vf_affiliation_h);
6195 			if (!instance->vf_affiliation)
6196 				dev_warn(&pdev->dev, "Can't allocate "
6197 				       "memory for VF affiliation buffer\n");
6198 		}
6199 	}
6200 
6201 	/*
6202 	 * Store instance in PCI softstate
6203 	 */
6204 	pci_set_drvdata(pdev, instance);
6205 
6206 	/*
6207 	 * Add this controller to megasas_mgmt_info structure so that it
6208 	 * can be exported to management applications
6209 	 */
6210 	megasas_mgmt_info.count++;
6211 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
6212 	megasas_mgmt_info.max_index++;
6213 
6214 	/*
6215 	 * Register with SCSI mid-layer
6216 	 */
6217 	if (megasas_io_attach(instance))
6218 		goto fail_io_attach;
6219 
6220 	instance->unload = 0;
6221 	/*
6222 	 * Trigger SCSI to scan our drives
6223 	 */
6224 	scsi_scan_host(host);
6225 
6226 	/*
6227 	 * Initiate AEN (Asynchronous Event Notification)
6228 	 */
6229 	if (megasas_start_aen(instance)) {
6230 		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
6231 		goto fail_start_aen;
6232 	}
6233 
6234 	/* Get current SR-IOV LD/VF affiliation */
6235 	if (instance->requestorId)
6236 		megasas_get_ld_vf_affiliation(instance, 1);
6237 
6238 	return 0;
6239 
6240 fail_start_aen:
6241 fail_io_attach:
6242 	megasas_mgmt_info.count--;
6243 	megasas_mgmt_info.max_index--;
6244 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6245 
6246 	instance->instancet->disable_intr(instance);
6247 	megasas_destroy_irqs(instance);
6248 
6249 	if (instance->ctrl_context)
6250 		megasas_release_fusion(instance);
6251 	else
6252 		megasas_release_mfi(instance);
6253 	if (instance->msix_vectors)
6254 		pci_free_irq_vectors(instance->pdev);
6255 fail_init_mfi:
6256 fail_alloc_dma_buf:
6257 	if (instance->evt_detail)
6258 		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6259 				    instance->evt_detail,
6260 				    instance->evt_detail_h);
6261 
6262 	if (instance->pd_info)
6263 		pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6264 					instance->pd_info,
6265 					instance->pd_info_h);
6266 	if (instance->tgt_prop)
6267 		pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6268 					instance->tgt_prop,
6269 					instance->tgt_prop_h);
6270 	if (instance->producer)
6271 		pci_free_consistent(pdev, sizeof(u32), instance->producer,
6272 				    instance->producer_h);
6273 	if (instance->consumer)
6274 		pci_free_consistent(pdev, sizeof(u32), instance->consumer,
6275 				    instance->consumer_h);
6276 	scsi_host_put(host);
6277 
6278 fail_alloc_instance:
6279 fail_set_dma_mask:
6280 	pci_disable_device(pdev);
6281 
6282 	return -ENODEV;
6283 }
6284 
6285 /**
6286  * megasas_flush_cache -	Requests FW to flush all its caches
6287  * @instance:			Adapter soft state
6288  */
6289 static void megasas_flush_cache(struct megasas_instance *instance)
6290 {
6291 	struct megasas_cmd *cmd;
6292 	struct megasas_dcmd_frame *dcmd;
6293 
6294 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6295 		return;
6296 
6297 	cmd = megasas_get_cmd(instance);
6298 
6299 	if (!cmd)
6300 		return;
6301 
6302 	dcmd = &cmd->frame->dcmd;
6303 
6304 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6305 
6306 	dcmd->cmd = MFI_CMD_DCMD;
6307 	dcmd->cmd_status = 0x0;
6308 	dcmd->sge_count = 0;
6309 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6310 	dcmd->timeout = 0;
6311 	dcmd->pad_0 = 0;
6312 	dcmd->data_xfer_len = 0;
6313 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
6314 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
6315 
6316 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6317 			!= DCMD_SUCCESS) {
6318 		dev_err(&instance->pdev->dev,
6319 			"return from %s %d\n", __func__, __LINE__);
6320 		return;
6321 	}
6322 
6323 	megasas_return_cmd(instance, cmd);
6324 }
6325 
6326 /**
6327  * megasas_shutdown_controller -	Instructs FW to shutdown the controller
6328  * @instance:				Adapter soft state
6329  * @opcode:				Shutdown/Hibernate
6330  */
6331 static void megasas_shutdown_controller(struct megasas_instance *instance,
6332 					u32 opcode)
6333 {
6334 	struct megasas_cmd *cmd;
6335 	struct megasas_dcmd_frame *dcmd;
6336 
6337 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6338 		return;
6339 
6340 	cmd = megasas_get_cmd(instance);
6341 
6342 	if (!cmd)
6343 		return;
6344 
6345 	if (instance->aen_cmd)
6346 		megasas_issue_blocked_abort_cmd(instance,
6347 			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
6348 	if (instance->map_update_cmd)
6349 		megasas_issue_blocked_abort_cmd(instance,
6350 			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
6351 	if (instance->jbod_seq_cmd)
6352 		megasas_issue_blocked_abort_cmd(instance,
6353 			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
6354 
6355 	dcmd = &cmd->frame->dcmd;
6356 
6357 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6358 
6359 	dcmd->cmd = MFI_CMD_DCMD;
6360 	dcmd->cmd_status = 0x0;
6361 	dcmd->sge_count = 0;
6362 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6363 	dcmd->timeout = 0;
6364 	dcmd->pad_0 = 0;
6365 	dcmd->data_xfer_len = 0;
6366 	dcmd->opcode = cpu_to_le32(opcode);
6367 
6368 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6369 			!= DCMD_SUCCESS) {
6370 		dev_err(&instance->pdev->dev,
6371 			"return from %s %d\n", __func__, __LINE__);
6372 		return;
6373 	}
6374 
6375 	megasas_return_cmd(instance, cmd);
6376 }
6377 
6378 #ifdef CONFIG_PM
6379 /**
6380  * megasas_suspend -	driver suspend entry point
6381  * @pdev:		PCI device structure
6382  * @state:		PCI power state to suspend routine
6383  */
6384 static int
6385 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6386 {
6387 	struct Scsi_Host *host;
6388 	struct megasas_instance *instance;
6389 
6390 	instance = pci_get_drvdata(pdev);
6391 	host = instance->host;
6392 	instance->unload = 1;
6393 
6394 	/* Shutdown SR-IOV heartbeat timer */
6395 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6396 		del_timer_sync(&instance->sriov_heartbeat_timer);
6397 
6398 	megasas_flush_cache(instance);
6399 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
6400 
6401 	/* cancel the delayed work if this work still in queue */
6402 	if (instance->ev != NULL) {
6403 		struct megasas_aen_event *ev = instance->ev;
6404 		cancel_delayed_work_sync(&ev->hotplug_work);
6405 		instance->ev = NULL;
6406 	}
6407 
6408 	tasklet_kill(&instance->isr_tasklet);
6409 
6410 	pci_set_drvdata(instance->pdev, instance);
6411 	instance->instancet->disable_intr(instance);
6412 
6413 	megasas_destroy_irqs(instance);
6414 
6415 	if (instance->msix_vectors)
6416 		pci_free_irq_vectors(instance->pdev);
6417 
6418 	pci_save_state(pdev);
6419 	pci_disable_device(pdev);
6420 
6421 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
6422 
6423 	return 0;
6424 }
6425 
6426 /**
6427  * megasas_resume-      driver resume entry point
6428  * @pdev:               PCI device structure
6429  */
6430 static int
6431 megasas_resume(struct pci_dev *pdev)
6432 {
6433 	int rval;
6434 	struct Scsi_Host *host;
6435 	struct megasas_instance *instance;
6436 	int irq_flags = PCI_IRQ_LEGACY;
6437 
6438 	instance = pci_get_drvdata(pdev);
6439 	host = instance->host;
6440 	pci_set_power_state(pdev, PCI_D0);
6441 	pci_enable_wake(pdev, PCI_D0, 0);
6442 	pci_restore_state(pdev);
6443 
6444 	/*
6445 	 * PCI prepping: enable device set bus mastering and dma mask
6446 	 */
6447 	rval = pci_enable_device_mem(pdev);
6448 
6449 	if (rval) {
6450 		dev_err(&pdev->dev, "Enable device failed\n");
6451 		return rval;
6452 	}
6453 
6454 	pci_set_master(pdev);
6455 
6456 	if (megasas_set_dma_mask(pdev))
6457 		goto fail_set_dma_mask;
6458 
6459 	/*
6460 	 * Initialize MFI Firmware
6461 	 */
6462 
6463 	atomic_set(&instance->fw_outstanding, 0);
6464 
6465 	/*
6466 	 * We expect the FW state to be READY
6467 	 */
6468 	if (megasas_transition_to_ready(instance, 0))
6469 		goto fail_ready_state;
6470 
6471 	/* Now re-enable MSI-X */
6472 	if (instance->msix_vectors) {
6473 		irq_flags = PCI_IRQ_MSIX;
6474 		if (smp_affinity_enable)
6475 			irq_flags |= PCI_IRQ_AFFINITY;
6476 	}
6477 	rval = pci_alloc_irq_vectors(instance->pdev, 1,
6478 				     instance->msix_vectors ?
6479 				     instance->msix_vectors : 1, irq_flags);
6480 	if (rval < 0)
6481 		goto fail_reenable_msix;
6482 
6483 	if (instance->ctrl_context) {
6484 		megasas_reset_reply_desc(instance);
6485 		if (megasas_ioc_init_fusion(instance)) {
6486 			megasas_free_cmds(instance);
6487 			megasas_free_cmds_fusion(instance);
6488 			goto fail_init_mfi;
6489 		}
6490 		if (!megasas_get_map_info(instance))
6491 			megasas_sync_map_info(instance);
6492 	} else {
6493 		*instance->producer = 0;
6494 		*instance->consumer = 0;
6495 		if (megasas_issue_init_mfi(instance))
6496 			goto fail_init_mfi;
6497 	}
6498 
6499 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6500 		     (unsigned long)instance);
6501 
6502 	if (instance->msix_vectors ?
6503 			megasas_setup_irqs_msix(instance, 0) :
6504 			megasas_setup_irqs_ioapic(instance))
6505 		goto fail_init_mfi;
6506 
6507 	/* Re-launch SR-IOV heartbeat timer */
6508 	if (instance->requestorId) {
6509 		if (!megasas_sriov_start_heartbeat(instance, 0))
6510 			megasas_start_timer(instance,
6511 					    &instance->sriov_heartbeat_timer,
6512 					    megasas_sriov_heartbeat_handler,
6513 					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
6514 		else {
6515 			instance->skip_heartbeat_timer_del = 1;
6516 			goto fail_init_mfi;
6517 		}
6518 	}
6519 
6520 	instance->instancet->enable_intr(instance);
6521 	megasas_setup_jbod_map(instance);
6522 	instance->unload = 0;
6523 
6524 	/*
6525 	 * Initiate AEN (Asynchronous Event Notification)
6526 	 */
6527 	if (megasas_start_aen(instance))
6528 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
6529 
6530 	return 0;
6531 
6532 fail_init_mfi:
6533 	if (instance->evt_detail)
6534 		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6535 				instance->evt_detail,
6536 				instance->evt_detail_h);
6537 
6538 	if (instance->pd_info)
6539 		pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6540 					instance->pd_info,
6541 					instance->pd_info_h);
6542 	if (instance->tgt_prop)
6543 		pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6544 					instance->tgt_prop,
6545 					instance->tgt_prop_h);
6546 	if (instance->producer)
6547 		pci_free_consistent(pdev, sizeof(u32), instance->producer,
6548 				instance->producer_h);
6549 	if (instance->consumer)
6550 		pci_free_consistent(pdev, sizeof(u32), instance->consumer,
6551 				instance->consumer_h);
6552 	scsi_host_put(host);
6553 
6554 fail_set_dma_mask:
6555 fail_ready_state:
6556 fail_reenable_msix:
6557 
6558 	pci_disable_device(pdev);
6559 
6560 	return -ENODEV;
6561 }
6562 #else
6563 #define megasas_suspend	NULL
6564 #define megasas_resume	NULL
6565 #endif
6566 
6567 static inline int
6568 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
6569 {
6570 	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
6571 	int i;
6572 
6573 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6574 		return 1;
6575 
6576 	for (i = 0; i < wait_time; i++) {
6577 		if (atomic_read(&instance->adprecovery)	== MEGASAS_HBA_OPERATIONAL)
6578 			break;
6579 
6580 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
6581 			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
6582 
6583 		msleep(1000);
6584 	}
6585 
6586 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6587 		dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
6588 			__func__);
6589 		return 1;
6590 	}
6591 
6592 	return 0;
6593 }
6594 
6595 /**
6596  * megasas_detach_one -	PCI hot"un"plug entry point
6597  * @pdev:		PCI device structure
6598  */
6599 static void megasas_detach_one(struct pci_dev *pdev)
6600 {
6601 	int i;
6602 	struct Scsi_Host *host;
6603 	struct megasas_instance *instance;
6604 	struct fusion_context *fusion;
6605 	u32 pd_seq_map_sz;
6606 
6607 	instance = pci_get_drvdata(pdev);
6608 	instance->unload = 1;
6609 	host = instance->host;
6610 	fusion = instance->ctrl_context;
6611 
6612 	/* Shutdown SR-IOV heartbeat timer */
6613 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6614 		del_timer_sync(&instance->sriov_heartbeat_timer);
6615 
6616 	if (instance->fw_crash_state != UNAVAILABLE)
6617 		megasas_free_host_crash_buffer(instance);
6618 	scsi_remove_host(instance->host);
6619 
6620 	if (megasas_wait_for_adapter_operational(instance))
6621 		goto skip_firing_dcmds;
6622 
6623 	megasas_flush_cache(instance);
6624 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6625 
6626 skip_firing_dcmds:
6627 	/* cancel the delayed work if this work still in queue*/
6628 	if (instance->ev != NULL) {
6629 		struct megasas_aen_event *ev = instance->ev;
6630 		cancel_delayed_work_sync(&ev->hotplug_work);
6631 		instance->ev = NULL;
6632 	}
6633 
6634 	/* cancel all wait events */
6635 	wake_up_all(&instance->int_cmd_wait_q);
6636 
6637 	tasklet_kill(&instance->isr_tasklet);
6638 
6639 	/*
6640 	 * Take the instance off the instance array. Note that we will not
6641 	 * decrement the max_index. We let this array be sparse array
6642 	 */
6643 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6644 		if (megasas_mgmt_info.instance[i] == instance) {
6645 			megasas_mgmt_info.count--;
6646 			megasas_mgmt_info.instance[i] = NULL;
6647 
6648 			break;
6649 		}
6650 	}
6651 
6652 	instance->instancet->disable_intr(instance);
6653 
6654 	megasas_destroy_irqs(instance);
6655 
6656 	if (instance->msix_vectors)
6657 		pci_free_irq_vectors(instance->pdev);
6658 
6659 	if (instance->is_ventura) {
6660 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
6661 			kfree(fusion->stream_detect_by_ld[i]);
6662 		kfree(fusion->stream_detect_by_ld);
6663 		fusion->stream_detect_by_ld = NULL;
6664 	}
6665 
6666 
6667 	if (instance->ctrl_context) {
6668 		megasas_release_fusion(instance);
6669 			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
6670 				(sizeof(struct MR_PD_CFG_SEQ) *
6671 					(MAX_PHYSICAL_DEVICES - 1));
6672 		for (i = 0; i < 2 ; i++) {
6673 			if (fusion->ld_map[i])
6674 				dma_free_coherent(&instance->pdev->dev,
6675 						  fusion->max_map_sz,
6676 						  fusion->ld_map[i],
6677 						  fusion->ld_map_phys[i]);
6678 			if (fusion->ld_drv_map[i]) {
6679 				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
6680 					vfree(fusion->ld_drv_map[i]);
6681 				else
6682 					free_pages((ulong)fusion->ld_drv_map[i],
6683 						   fusion->drv_map_pages);
6684 			}
6685 
6686 			if (fusion->pd_seq_sync[i])
6687 				dma_free_coherent(&instance->pdev->dev,
6688 					pd_seq_map_sz,
6689 					fusion->pd_seq_sync[i],
6690 					fusion->pd_seq_phys[i]);
6691 		}
6692 		megasas_free_fusion_context(instance);
6693 	} else {
6694 		megasas_release_mfi(instance);
6695 		pci_free_consistent(pdev, sizeof(u32),
6696 				    instance->producer,
6697 				    instance->producer_h);
6698 		pci_free_consistent(pdev, sizeof(u32),
6699 				    instance->consumer,
6700 				    instance->consumer_h);
6701 	}
6702 
6703 	kfree(instance->ctrl_info);
6704 
6705 	if (instance->evt_detail)
6706 		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6707 				instance->evt_detail, instance->evt_detail_h);
6708 	if (instance->pd_info)
6709 		pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6710 					instance->pd_info,
6711 					instance->pd_info_h);
6712 	if (instance->tgt_prop)
6713 		pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6714 					instance->tgt_prop,
6715 					instance->tgt_prop_h);
6716 	if (instance->vf_affiliation)
6717 		pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6718 				    sizeof(struct MR_LD_VF_AFFILIATION),
6719 				    instance->vf_affiliation,
6720 				    instance->vf_affiliation_h);
6721 
6722 	if (instance->vf_affiliation_111)
6723 		pci_free_consistent(pdev,
6724 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
6725 				    instance->vf_affiliation_111,
6726 				    instance->vf_affiliation_111_h);
6727 
6728 	if (instance->hb_host_mem)
6729 		pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
6730 				    instance->hb_host_mem,
6731 				    instance->hb_host_mem_h);
6732 
6733 	if (instance->crash_dump_buf)
6734 		pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6735 			    instance->crash_dump_buf, instance->crash_dump_h);
6736 
6737 	if (instance->system_info_buf)
6738 		pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6739 				    instance->system_info_buf, instance->system_info_h);
6740 
6741 	scsi_host_put(host);
6742 
6743 	pci_disable_device(pdev);
6744 }
6745 
6746 /**
6747  * megasas_shutdown -	Shutdown entry point
6748  * @device:		Generic device structure
6749  */
6750 static void megasas_shutdown(struct pci_dev *pdev)
6751 {
6752 	struct megasas_instance *instance = pci_get_drvdata(pdev);
6753 
6754 	instance->unload = 1;
6755 
6756 	if (megasas_wait_for_adapter_operational(instance))
6757 		goto skip_firing_dcmds;
6758 
6759 	megasas_flush_cache(instance);
6760 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6761 
6762 skip_firing_dcmds:
6763 	instance->instancet->disable_intr(instance);
6764 	megasas_destroy_irqs(instance);
6765 
6766 	if (instance->msix_vectors)
6767 		pci_free_irq_vectors(instance->pdev);
6768 }
6769 
6770 /**
6771  * megasas_mgmt_open -	char node "open" entry point
6772  */
6773 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
6774 {
6775 	/*
6776 	 * Allow only those users with admin rights
6777 	 */
6778 	if (!capable(CAP_SYS_ADMIN))
6779 		return -EACCES;
6780 
6781 	return 0;
6782 }
6783 
6784 /**
6785  * megasas_mgmt_fasync -	Async notifier registration from applications
6786  *
6787  * This function adds the calling process to a driver global queue. When an
6788  * event occurs, SIGIO will be sent to all processes in this queue.
6789  */
6790 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
6791 {
6792 	int rc;
6793 
6794 	mutex_lock(&megasas_async_queue_mutex);
6795 
6796 	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
6797 
6798 	mutex_unlock(&megasas_async_queue_mutex);
6799 
6800 	if (rc >= 0) {
6801 		/* For sanity check when we get ioctl */
6802 		filep->private_data = filep;
6803 		return 0;
6804 	}
6805 
6806 	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
6807 
6808 	return rc;
6809 }
6810 
6811 /**
6812  * megasas_mgmt_poll -  char node "poll" entry point
6813  * */
6814 static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
6815 {
6816 	unsigned int mask;
6817 	unsigned long flags;
6818 
6819 	poll_wait(file, &megasas_poll_wait, wait);
6820 	spin_lock_irqsave(&poll_aen_lock, flags);
6821 	if (megasas_poll_wait_aen)
6822 		mask = (POLLIN | POLLRDNORM);
6823 	else
6824 		mask = 0;
6825 	megasas_poll_wait_aen = 0;
6826 	spin_unlock_irqrestore(&poll_aen_lock, flags);
6827 	return mask;
6828 }
6829 
6830 /*
6831  * megasas_set_crash_dump_params_ioctl:
6832  *		Send CRASH_DUMP_MODE DCMD to all controllers
6833  * @cmd:	MFI command frame
6834  */
6835 
6836 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
6837 {
6838 	struct megasas_instance *local_instance;
6839 	int i, error = 0;
6840 	int crash_support;
6841 
6842 	crash_support = cmd->frame->dcmd.mbox.w[0];
6843 
6844 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6845 		local_instance = megasas_mgmt_info.instance[i];
6846 		if (local_instance && local_instance->crash_dump_drv_support) {
6847 			if ((atomic_read(&local_instance->adprecovery) ==
6848 				MEGASAS_HBA_OPERATIONAL) &&
6849 				!megasas_set_crash_dump_params(local_instance,
6850 					crash_support)) {
6851 				local_instance->crash_dump_app_support =
6852 					crash_support;
6853 				dev_info(&local_instance->pdev->dev,
6854 					"Application firmware crash "
6855 					"dump mode set success\n");
6856 				error = 0;
6857 			} else {
6858 				dev_info(&local_instance->pdev->dev,
6859 					"Application firmware crash "
6860 					"dump mode set failed\n");
6861 				error = -1;
6862 			}
6863 		}
6864 	}
6865 	return error;
6866 }
6867 
6868 /**
6869  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
6870  * @instance:			Adapter soft state
6871  * @argp:			User's ioctl packet
6872  */
6873 static int
6874 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6875 		      struct megasas_iocpacket __user * user_ioc,
6876 		      struct megasas_iocpacket *ioc)
6877 {
6878 	struct megasas_sge32 *kern_sge32;
6879 	struct megasas_cmd *cmd;
6880 	void *kbuff_arr[MAX_IOCTL_SGE];
6881 	dma_addr_t buf_handle = 0;
6882 	int error = 0, i;
6883 	void *sense = NULL;
6884 	dma_addr_t sense_handle;
6885 	unsigned long *sense_ptr;
6886 	u32 opcode;
6887 
6888 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
6889 
6890 	if (ioc->sge_count > MAX_IOCTL_SGE) {
6891 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
6892 		       ioc->sge_count, MAX_IOCTL_SGE);
6893 		return -EINVAL;
6894 	}
6895 
6896 	cmd = megasas_get_cmd(instance);
6897 	if (!cmd) {
6898 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
6899 		return -ENOMEM;
6900 	}
6901 
6902 	/*
6903 	 * User's IOCTL packet has 2 frames (maximum). Copy those two
6904 	 * frames into our cmd's frames. cmd->frame's context will get
6905 	 * overwritten when we copy from user's frames. So set that value
6906 	 * alone separately
6907 	 */
6908 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
6909 	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
6910 	cmd->frame->hdr.pad_0 = 0;
6911 	cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
6912 					       MFI_FRAME_SGL64 |
6913 					       MFI_FRAME_SENSE64));
6914 	opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
6915 
6916 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
6917 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
6918 			megasas_return_cmd(instance, cmd);
6919 			return -1;
6920 		}
6921 	}
6922 
6923 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
6924 		error = megasas_set_crash_dump_params_ioctl(cmd);
6925 		megasas_return_cmd(instance, cmd);
6926 		return error;
6927 	}
6928 
6929 	/*
6930 	 * The management interface between applications and the fw uses
6931 	 * MFI frames. E.g, RAID configuration changes, LD property changes
6932 	 * etc are accomplishes through different kinds of MFI frames. The
6933 	 * driver needs to care only about substituting user buffers with
6934 	 * kernel buffers in SGLs. The location of SGL is embedded in the
6935 	 * struct iocpacket itself.
6936 	 */
6937 	kern_sge32 = (struct megasas_sge32 *)
6938 	    ((unsigned long)cmd->frame + ioc->sgl_off);
6939 
6940 	/*
6941 	 * For each user buffer, create a mirror buffer and copy in
6942 	 */
6943 	for (i = 0; i < ioc->sge_count; i++) {
6944 		if (!ioc->sgl[i].iov_len)
6945 			continue;
6946 
6947 		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
6948 						    ioc->sgl[i].iov_len,
6949 						    &buf_handle, GFP_KERNEL);
6950 		if (!kbuff_arr[i]) {
6951 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
6952 			       "kernel SGL buffer for IOCTL\n");
6953 			error = -ENOMEM;
6954 			goto out;
6955 		}
6956 
6957 		/*
6958 		 * We don't change the dma_coherent_mask, so
6959 		 * pci_alloc_consistent only returns 32bit addresses
6960 		 */
6961 		kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
6962 		kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
6963 
6964 		/*
6965 		 * We created a kernel buffer corresponding to the
6966 		 * user buffer. Now copy in from the user buffer
6967 		 */
6968 		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
6969 				   (u32) (ioc->sgl[i].iov_len))) {
6970 			error = -EFAULT;
6971 			goto out;
6972 		}
6973 	}
6974 
6975 	if (ioc->sense_len) {
6976 		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
6977 					     &sense_handle, GFP_KERNEL);
6978 		if (!sense) {
6979 			error = -ENOMEM;
6980 			goto out;
6981 		}
6982 
6983 		sense_ptr =
6984 		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
6985 		*sense_ptr = cpu_to_le32(sense_handle);
6986 	}
6987 
6988 	/*
6989 	 * Set the sync_cmd flag so that the ISR knows not to complete this
6990 	 * cmd to the SCSI mid-layer
6991 	 */
6992 	cmd->sync_cmd = 1;
6993 	if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
6994 		cmd->sync_cmd = 0;
6995 		dev_err(&instance->pdev->dev,
6996 			"return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
6997 			__func__, __LINE__, opcode,	cmd->cmd_status_drv);
6998 		return -EBUSY;
6999 	}
7000 
7001 	cmd->sync_cmd = 0;
7002 
7003 	if (instance->unload == 1) {
7004 		dev_info(&instance->pdev->dev, "Driver unload is in progress "
7005 			"don't submit data to application\n");
7006 		goto out;
7007 	}
7008 	/*
7009 	 * copy out the kernel buffers to user buffers
7010 	 */
7011 	for (i = 0; i < ioc->sge_count; i++) {
7012 		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
7013 				 ioc->sgl[i].iov_len)) {
7014 			error = -EFAULT;
7015 			goto out;
7016 		}
7017 	}
7018 
7019 	/*
7020 	 * copy out the sense
7021 	 */
7022 	if (ioc->sense_len) {
7023 		/*
7024 		 * sense_ptr points to the location that has the user
7025 		 * sense buffer address
7026 		 */
7027 		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
7028 				ioc->sense_off);
7029 
7030 		if (copy_to_user((void __user *)((unsigned long)
7031 				 get_unaligned((unsigned long *)sense_ptr)),
7032 				 sense, ioc->sense_len)) {
7033 			dev_err(&instance->pdev->dev, "Failed to copy out to user "
7034 					"sense data\n");
7035 			error = -EFAULT;
7036 			goto out;
7037 		}
7038 	}
7039 
7040 	/*
7041 	 * copy the status codes returned by the fw
7042 	 */
7043 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
7044 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
7045 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
7046 		error = -EFAULT;
7047 	}
7048 
7049 out:
7050 	if (sense) {
7051 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
7052 				    sense, sense_handle);
7053 	}
7054 
7055 	for (i = 0; i < ioc->sge_count; i++) {
7056 		if (kbuff_arr[i]) {
7057 			dma_free_coherent(&instance->pdev->dev,
7058 					  le32_to_cpu(kern_sge32[i].length),
7059 					  kbuff_arr[i],
7060 					  le32_to_cpu(kern_sge32[i].phys_addr));
7061 			kbuff_arr[i] = NULL;
7062 		}
7063 	}
7064 
7065 	megasas_return_cmd(instance, cmd);
7066 	return error;
7067 }
7068 
7069 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
7070 {
7071 	struct megasas_iocpacket __user *user_ioc =
7072 	    (struct megasas_iocpacket __user *)arg;
7073 	struct megasas_iocpacket *ioc;
7074 	struct megasas_instance *instance;
7075 	int error;
7076 	int i;
7077 	unsigned long flags;
7078 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
7079 
7080 	ioc = memdup_user(user_ioc, sizeof(*ioc));
7081 	if (IS_ERR(ioc))
7082 		return PTR_ERR(ioc);
7083 
7084 	instance = megasas_lookup_instance(ioc->host_no);
7085 	if (!instance) {
7086 		error = -ENODEV;
7087 		goto out_kfree_ioc;
7088 	}
7089 
7090 	/* Adjust ioctl wait time for VF mode */
7091 	if (instance->requestorId)
7092 		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7093 
7094 	/* Block ioctls in VF mode */
7095 	if (instance->requestorId && !allow_vf_ioctls) {
7096 		error = -ENODEV;
7097 		goto out_kfree_ioc;
7098 	}
7099 
7100 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7101 		dev_err(&instance->pdev->dev, "Controller in crit error\n");
7102 		error = -ENODEV;
7103 		goto out_kfree_ioc;
7104 	}
7105 
7106 	if (instance->unload == 1) {
7107 		error = -ENODEV;
7108 		goto out_kfree_ioc;
7109 	}
7110 
7111 	if (down_interruptible(&instance->ioctl_sem)) {
7112 		error = -ERESTARTSYS;
7113 		goto out_kfree_ioc;
7114 	}
7115 
7116 	for (i = 0; i < wait_time; i++) {
7117 
7118 		spin_lock_irqsave(&instance->hba_lock, flags);
7119 		if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
7120 			spin_unlock_irqrestore(&instance->hba_lock, flags);
7121 			break;
7122 		}
7123 		spin_unlock_irqrestore(&instance->hba_lock, flags);
7124 
7125 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7126 			dev_notice(&instance->pdev->dev, "waiting"
7127 				"for controller reset to finish\n");
7128 		}
7129 
7130 		msleep(1000);
7131 	}
7132 
7133 	spin_lock_irqsave(&instance->hba_lock, flags);
7134 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
7135 		spin_unlock_irqrestore(&instance->hba_lock, flags);
7136 
7137 		dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
7138 		error = -ENODEV;
7139 		goto out_up;
7140 	}
7141 	spin_unlock_irqrestore(&instance->hba_lock, flags);
7142 
7143 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
7144 out_up:
7145 	up(&instance->ioctl_sem);
7146 
7147 out_kfree_ioc:
7148 	kfree(ioc);
7149 	return error;
7150 }
7151 
7152 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
7153 {
7154 	struct megasas_instance *instance;
7155 	struct megasas_aen aen;
7156 	int error;
7157 	int i;
7158 	unsigned long flags;
7159 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
7160 
7161 	if (file->private_data != file) {
7162 		printk(KERN_DEBUG "megasas: fasync_helper was not "
7163 		       "called first\n");
7164 		return -EINVAL;
7165 	}
7166 
7167 	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
7168 		return -EFAULT;
7169 
7170 	instance = megasas_lookup_instance(aen.host_no);
7171 
7172 	if (!instance)
7173 		return -ENODEV;
7174 
7175 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7176 		return -ENODEV;
7177 	}
7178 
7179 	if (instance->unload == 1) {
7180 		return -ENODEV;
7181 	}
7182 
7183 	for (i = 0; i < wait_time; i++) {
7184 
7185 		spin_lock_irqsave(&instance->hba_lock, flags);
7186 		if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
7187 			spin_unlock_irqrestore(&instance->hba_lock,
7188 						flags);
7189 			break;
7190 		}
7191 
7192 		spin_unlock_irqrestore(&instance->hba_lock, flags);
7193 
7194 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7195 			dev_notice(&instance->pdev->dev, "waiting for"
7196 				"controller reset to finish\n");
7197 		}
7198 
7199 		msleep(1000);
7200 	}
7201 
7202 	spin_lock_irqsave(&instance->hba_lock, flags);
7203 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
7204 		spin_unlock_irqrestore(&instance->hba_lock, flags);
7205 		dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
7206 		return -ENODEV;
7207 	}
7208 	spin_unlock_irqrestore(&instance->hba_lock, flags);
7209 
7210 	mutex_lock(&instance->reset_mutex);
7211 	error = megasas_register_aen(instance, aen.seq_num,
7212 				     aen.class_locale_word);
7213 	mutex_unlock(&instance->reset_mutex);
7214 	return error;
7215 }
7216 
7217 /**
7218  * megasas_mgmt_ioctl -	char node ioctl entry point
7219  */
7220 static long
7221 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7222 {
7223 	switch (cmd) {
7224 	case MEGASAS_IOC_FIRMWARE:
7225 		return megasas_mgmt_ioctl_fw(file, arg);
7226 
7227 	case MEGASAS_IOC_GET_AEN:
7228 		return megasas_mgmt_ioctl_aen(file, arg);
7229 	}
7230 
7231 	return -ENOTTY;
7232 }
7233 
7234 #ifdef CONFIG_COMPAT
7235 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
7236 {
7237 	struct compat_megasas_iocpacket __user *cioc =
7238 	    (struct compat_megasas_iocpacket __user *)arg;
7239 	struct megasas_iocpacket __user *ioc =
7240 	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
7241 	int i;
7242 	int error = 0;
7243 	compat_uptr_t ptr;
7244 	u32 local_sense_off;
7245 	u32 local_sense_len;
7246 	u32 user_sense_off;
7247 
7248 	if (clear_user(ioc, sizeof(*ioc)))
7249 		return -EFAULT;
7250 
7251 	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
7252 	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
7253 	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
7254 	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
7255 	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
7256 	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
7257 		return -EFAULT;
7258 
7259 	/*
7260 	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
7261 	 * sense_len is not null, so prepare the 64bit value under
7262 	 * the same condition.
7263 	 */
7264 	if (get_user(local_sense_off, &ioc->sense_off) ||
7265 		get_user(local_sense_len, &ioc->sense_len) ||
7266 		get_user(user_sense_off, &cioc->sense_off))
7267 		return -EFAULT;
7268 
7269 	if (local_sense_len) {
7270 		void __user **sense_ioc_ptr =
7271 			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
7272 		compat_uptr_t *sense_cioc_ptr =
7273 			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
7274 		if (get_user(ptr, sense_cioc_ptr) ||
7275 		    put_user(compat_ptr(ptr), sense_ioc_ptr))
7276 			return -EFAULT;
7277 	}
7278 
7279 	for (i = 0; i < MAX_IOCTL_SGE; i++) {
7280 		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
7281 		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
7282 		    copy_in_user(&ioc->sgl[i].iov_len,
7283 				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
7284 			return -EFAULT;
7285 	}
7286 
7287 	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
7288 
7289 	if (copy_in_user(&cioc->frame.hdr.cmd_status,
7290 			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
7291 		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
7292 		return -EFAULT;
7293 	}
7294 	return error;
7295 }
7296 
7297 static long
7298 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
7299 			  unsigned long arg)
7300 {
7301 	switch (cmd) {
7302 	case MEGASAS_IOC_FIRMWARE32:
7303 		return megasas_mgmt_compat_ioctl_fw(file, arg);
7304 	case MEGASAS_IOC_GET_AEN:
7305 		return megasas_mgmt_ioctl_aen(file, arg);
7306 	}
7307 
7308 	return -ENOTTY;
7309 }
7310 #endif
7311 
7312 /*
7313  * File operations structure for management interface
7314  */
7315 static const struct file_operations megasas_mgmt_fops = {
7316 	.owner = THIS_MODULE,
7317 	.open = megasas_mgmt_open,
7318 	.fasync = megasas_mgmt_fasync,
7319 	.unlocked_ioctl = megasas_mgmt_ioctl,
7320 	.poll = megasas_mgmt_poll,
7321 #ifdef CONFIG_COMPAT
7322 	.compat_ioctl = megasas_mgmt_compat_ioctl,
7323 #endif
7324 	.llseek = noop_llseek,
7325 };
7326 
7327 /*
7328  * PCI hotplug support registration structure
7329  */
7330 static struct pci_driver megasas_pci_driver = {
7331 
7332 	.name = "megaraid_sas",
7333 	.id_table = megasas_pci_table,
7334 	.probe = megasas_probe_one,
7335 	.remove = megasas_detach_one,
7336 	.suspend = megasas_suspend,
7337 	.resume = megasas_resume,
7338 	.shutdown = megasas_shutdown,
7339 };
7340 
7341 /*
7342  * Sysfs driver attributes
7343  */
7344 static ssize_t version_show(struct device_driver *dd, char *buf)
7345 {
7346 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
7347 			MEGASAS_VERSION);
7348 }
7349 static DRIVER_ATTR_RO(version);
7350 
7351 static ssize_t release_date_show(struct device_driver *dd, char *buf)
7352 {
7353 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
7354 		MEGASAS_RELDATE);
7355 }
7356 static DRIVER_ATTR_RO(release_date);
7357 
7358 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
7359 {
7360 	return sprintf(buf, "%u\n", support_poll_for_event);
7361 }
7362 static DRIVER_ATTR_RO(support_poll_for_event);
7363 
7364 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
7365 {
7366 	return sprintf(buf, "%u\n", support_device_change);
7367 }
7368 static DRIVER_ATTR_RO(support_device_change);
7369 
7370 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
7371 {
7372 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
7373 }
7374 
7375 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
7376 			     size_t count)
7377 {
7378 	int retval = count;
7379 
7380 	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
7381 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
7382 		retval = -EINVAL;
7383 	}
7384 	return retval;
7385 }
7386 static DRIVER_ATTR_RW(dbg_lvl);
7387 
7388 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
7389 {
7390 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
7391 	scsi_remove_device(sdev);
7392 	scsi_device_put(sdev);
7393 }
7394 
7395 static void
7396 megasas_aen_polling(struct work_struct *work)
7397 {
7398 	struct megasas_aen_event *ev =
7399 		container_of(work, struct megasas_aen_event, hotplug_work.work);
7400 	struct megasas_instance *instance = ev->instance;
7401 	union megasas_evt_class_locale class_locale;
7402 	struct  Scsi_Host *host;
7403 	struct  scsi_device *sdev1;
7404 	u16     pd_index = 0;
7405 	u16	ld_index = 0;
7406 	int     i, j, doscan = 0;
7407 	u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
7408 	int error;
7409 	u8  dcmd_ret = DCMD_SUCCESS;
7410 
7411 	if (!instance) {
7412 		printk(KERN_ERR "invalid instance!\n");
7413 		kfree(ev);
7414 		return;
7415 	}
7416 
7417 	/* Adjust event workqueue thread wait time for VF mode */
7418 	if (instance->requestorId)
7419 		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7420 
7421 	/* Don't run the event workqueue thread if OCR is running */
7422 	mutex_lock(&instance->reset_mutex);
7423 
7424 	instance->ev = NULL;
7425 	host = instance->host;
7426 	if (instance->evt_detail) {
7427 		megasas_decode_evt(instance);
7428 
7429 		switch (le32_to_cpu(instance->evt_detail->code)) {
7430 
7431 		case MR_EVT_PD_INSERTED:
7432 		case MR_EVT_PD_REMOVED:
7433 			dcmd_ret = megasas_get_pd_list(instance);
7434 			if (dcmd_ret == DCMD_SUCCESS)
7435 				doscan = SCAN_PD_CHANNEL;
7436 			break;
7437 
7438 		case MR_EVT_LD_OFFLINE:
7439 		case MR_EVT_CFG_CLEARED:
7440 		case MR_EVT_LD_DELETED:
7441 		case MR_EVT_LD_CREATED:
7442 			if (!instance->requestorId ||
7443 				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7444 				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7445 
7446 			if (dcmd_ret == DCMD_SUCCESS)
7447 				doscan = SCAN_VD_CHANNEL;
7448 
7449 			break;
7450 
7451 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
7452 		case MR_EVT_FOREIGN_CFG_IMPORTED:
7453 		case MR_EVT_LD_STATE_CHANGE:
7454 			dcmd_ret = megasas_get_pd_list(instance);
7455 
7456 			if (dcmd_ret != DCMD_SUCCESS)
7457 				break;
7458 
7459 			if (!instance->requestorId ||
7460 				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7461 				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7462 
7463 			if (dcmd_ret != DCMD_SUCCESS)
7464 				break;
7465 
7466 			doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
7467 			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7468 				instance->host->host_no);
7469 			break;
7470 
7471 		case MR_EVT_CTRL_PROP_CHANGED:
7472 				dcmd_ret = megasas_get_ctrl_info(instance);
7473 				break;
7474 		default:
7475 			doscan = 0;
7476 			break;
7477 		}
7478 	} else {
7479 		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
7480 		mutex_unlock(&instance->reset_mutex);
7481 		kfree(ev);
7482 		return;
7483 	}
7484 
7485 	mutex_unlock(&instance->reset_mutex);
7486 
7487 	if (doscan & SCAN_PD_CHANNEL) {
7488 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7489 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7490 				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7491 				sdev1 = scsi_device_lookup(host, i, j, 0);
7492 				if (instance->pd_list[pd_index].driveState ==
7493 							MR_PD_STATE_SYSTEM) {
7494 					if (!sdev1)
7495 						scsi_add_device(host, i, j, 0);
7496 					else
7497 						scsi_device_put(sdev1);
7498 				} else {
7499 					if (sdev1)
7500 						megasas_remove_scsi_device(sdev1);
7501 				}
7502 			}
7503 		}
7504 	}
7505 
7506 	if (doscan & SCAN_VD_CHANNEL) {
7507 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7508 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7509 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7510 				sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7511 				if (instance->ld_ids[ld_index] != 0xff) {
7512 					if (!sdev1)
7513 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7514 					else
7515 						scsi_device_put(sdev1);
7516 				} else {
7517 					if (sdev1)
7518 						megasas_remove_scsi_device(sdev1);
7519 				}
7520 			}
7521 		}
7522 	}
7523 
7524 	if (dcmd_ret == DCMD_SUCCESS)
7525 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7526 	else
7527 		seq_num = instance->last_seq_num;
7528 
7529 	/* Register AEN with FW for latest sequence number plus 1 */
7530 	class_locale.members.reserved = 0;
7531 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
7532 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
7533 
7534 	if (instance->aen_cmd != NULL) {
7535 		kfree(ev);
7536 		return;
7537 	}
7538 
7539 	mutex_lock(&instance->reset_mutex);
7540 	error = megasas_register_aen(instance, seq_num,
7541 					class_locale.word);
7542 	if (error)
7543 		dev_err(&instance->pdev->dev,
7544 			"register aen failed error %x\n", error);
7545 
7546 	mutex_unlock(&instance->reset_mutex);
7547 	kfree(ev);
7548 }
7549 
7550 /**
7551  * megasas_init - Driver load entry point
7552  */
7553 static int __init megasas_init(void)
7554 {
7555 	int rval;
7556 
7557 	/*
7558 	 * Booted in kdump kernel, minimize memory footprints by
7559 	 * disabling few features
7560 	 */
7561 	if (reset_devices) {
7562 		msix_vectors = 1;
7563 		rdpq_enable = 0;
7564 		dual_qdepth_disable = 1;
7565 	}
7566 
7567 	/*
7568 	 * Announce driver version and other information
7569 	 */
7570 	pr_info("megasas: %s\n", MEGASAS_VERSION);
7571 
7572 	spin_lock_init(&poll_aen_lock);
7573 
7574 	support_poll_for_event = 2;
7575 	support_device_change = 1;
7576 
7577 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
7578 
7579 	/*
7580 	 * Register character device node
7581 	 */
7582 	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
7583 
7584 	if (rval < 0) {
7585 		printk(KERN_DEBUG "megasas: failed to open device node\n");
7586 		return rval;
7587 	}
7588 
7589 	megasas_mgmt_majorno = rval;
7590 
7591 	/*
7592 	 * Register ourselves as PCI hotplug module
7593 	 */
7594 	rval = pci_register_driver(&megasas_pci_driver);
7595 
7596 	if (rval) {
7597 		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
7598 		goto err_pcidrv;
7599 	}
7600 
7601 	rval = driver_create_file(&megasas_pci_driver.driver,
7602 				  &driver_attr_version);
7603 	if (rval)
7604 		goto err_dcf_attr_ver;
7605 
7606 	rval = driver_create_file(&megasas_pci_driver.driver,
7607 				  &driver_attr_release_date);
7608 	if (rval)
7609 		goto err_dcf_rel_date;
7610 
7611 	rval = driver_create_file(&megasas_pci_driver.driver,
7612 				&driver_attr_support_poll_for_event);
7613 	if (rval)
7614 		goto err_dcf_support_poll_for_event;
7615 
7616 	rval = driver_create_file(&megasas_pci_driver.driver,
7617 				  &driver_attr_dbg_lvl);
7618 	if (rval)
7619 		goto err_dcf_dbg_lvl;
7620 	rval = driver_create_file(&megasas_pci_driver.driver,
7621 				&driver_attr_support_device_change);
7622 	if (rval)
7623 		goto err_dcf_support_device_change;
7624 
7625 	return rval;
7626 
7627 err_dcf_support_device_change:
7628 	driver_remove_file(&megasas_pci_driver.driver,
7629 			   &driver_attr_dbg_lvl);
7630 err_dcf_dbg_lvl:
7631 	driver_remove_file(&megasas_pci_driver.driver,
7632 			&driver_attr_support_poll_for_event);
7633 err_dcf_support_poll_for_event:
7634 	driver_remove_file(&megasas_pci_driver.driver,
7635 			   &driver_attr_release_date);
7636 err_dcf_rel_date:
7637 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7638 err_dcf_attr_ver:
7639 	pci_unregister_driver(&megasas_pci_driver);
7640 err_pcidrv:
7641 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7642 	return rval;
7643 }
7644 
7645 /**
7646  * megasas_exit - Driver unload entry point
7647  */
7648 static void __exit megasas_exit(void)
7649 {
7650 	driver_remove_file(&megasas_pci_driver.driver,
7651 			   &driver_attr_dbg_lvl);
7652 	driver_remove_file(&megasas_pci_driver.driver,
7653 			&driver_attr_support_poll_for_event);
7654 	driver_remove_file(&megasas_pci_driver.driver,
7655 			&driver_attr_support_device_change);
7656 	driver_remove_file(&megasas_pci_driver.driver,
7657 			   &driver_attr_release_date);
7658 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7659 
7660 	pci_unregister_driver(&megasas_pci_driver);
7661 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7662 }
7663 
7664 module_init(megasas_init);
7665 module_exit(megasas_exit);
7666