1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Linux MegaRAID driver for SAS based RAID controllers
4  *
5  *  Copyright (c) 2003-2013  LSI Corporation
6  *  Copyright (c) 2013-2016  Avago Technologies
7  *  Copyright (c) 2016-2018  Broadcom Inc.
8  *
9  *  Authors: Broadcom Inc.
10  *           Sreenivas Bagalkote
11  *           Sumant Patro
12  *           Bo Yang
13  *           Adam Radford
14  *           Kashyap Desai <kashyap.desai@broadcom.com>
15  *           Sumit Saxena <sumit.saxena@broadcom.com>
16  *
17  *  Send feedback to: megaraidlinux.pdl@broadcom.com
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/uio.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <asm/unaligned.h>
33 #include <linux/fs.h>
34 #include <linux/compat.h>
35 #include <linux/blkdev.h>
36 #include <linux/mutex.h>
37 #include <linux/poll.h>
38 #include <linux/vmalloc.h>
39 #include <linux/irq_poll.h>
40 
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <scsi/scsi_dbg.h>
47 #include "megaraid_sas_fusion.h"
48 #include "megaraid_sas.h"
49 
50 /*
51  * Number of sectors per IO command
52  * Will be set in megasas_init_mfi if user does not provide
53  */
54 static unsigned int max_sectors;
55 module_param_named(max_sectors, max_sectors, int, 0444);
56 MODULE_PARM_DESC(max_sectors,
57 	"Maximum number of sectors per IO command");
58 
59 static int msix_disable;
60 module_param(msix_disable, int, 0444);
61 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
62 
63 static unsigned int msix_vectors;
64 module_param(msix_vectors, int, 0444);
65 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
66 
67 static int allow_vf_ioctls;
68 module_param(allow_vf_ioctls, int, 0444);
69 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
70 
71 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
72 module_param(throttlequeuedepth, int, 0444);
73 MODULE_PARM_DESC(throttlequeuedepth,
74 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
75 
76 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
77 module_param(resetwaittime, int, 0444);
78 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
79 
80 int smp_affinity_enable = 1;
81 module_param(smp_affinity_enable, int, 0444);
82 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
83 
84 int rdpq_enable = 1;
85 module_param(rdpq_enable, int, 0444);
86 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
87 
88 unsigned int dual_qdepth_disable;
89 module_param(dual_qdepth_disable, int, 0444);
90 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
91 
92 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
93 module_param(scmd_timeout, int, 0444);
94 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
95 
96 int perf_mode = -1;
97 module_param(perf_mode, int, 0444);
98 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
99 		"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
100 		"interrupt coalescing is enabled only on high iops queues\n\t\t"
101 		"1 - iops: High iops queues are not allocated &\n\t\t"
102 		"interrupt coalescing is enabled on all queues\n\t\t"
103 		"2 - latency: High iops queues are not allocated &\n\t\t"
104 		"interrupt coalescing is disabled on all queues\n\t\t"
105 		"default mode is 'balanced'"
106 		);
107 
108 int event_log_level = MFI_EVT_CLASS_CRITICAL;
109 module_param(event_log_level, int, 0644);
110 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
111 
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(MEGASAS_VERSION);
114 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
115 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
116 
117 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
118 static int megasas_get_pd_list(struct megasas_instance *instance);
119 static int megasas_ld_list_query(struct megasas_instance *instance,
120 				 u8 query_type);
121 static int megasas_issue_init_mfi(struct megasas_instance *instance);
122 static int megasas_register_aen(struct megasas_instance *instance,
123 				u32 seq_num, u32 class_locale_word);
124 static void megasas_get_pd_info(struct megasas_instance *instance,
125 				struct scsi_device *sdev);
126 
127 /*
128  * PCI ID table for all supported controllers
129  */
130 static struct pci_device_id megasas_pci_table[] = {
131 
132 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
133 	/* xscale IOP */
134 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
135 	/* ppc IOP */
136 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
137 	/* ppc IOP */
138 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
139 	/* gen2*/
140 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
141 	/* gen2*/
142 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
143 	/* skinny*/
144 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
145 	/* skinny*/
146 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
147 	/* xscale IOP, vega */
148 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
149 	/* xscale IOP */
150 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
151 	/* Fusion */
152 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
153 	/* Plasma */
154 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
155 	/* Invader */
156 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
157 	/* Fury */
158 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
159 	/* Intruder */
160 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
161 	/* Intruder 24 port*/
162 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
163 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
164 	/* VENTURA */
165 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
166 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
167 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
168 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
169 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
170 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
171 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
172 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
173 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
174 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
175 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
176 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
177 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
178 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
179 	{}
180 };
181 
182 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
183 
184 static int megasas_mgmt_majorno;
185 struct megasas_mgmt_info megasas_mgmt_info;
186 static struct fasync_struct *megasas_async_queue;
187 static DEFINE_MUTEX(megasas_async_queue_mutex);
188 
189 static int megasas_poll_wait_aen;
190 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
191 static u32 support_poll_for_event;
192 u32 megasas_dbg_lvl;
193 static u32 support_device_change;
194 static bool support_nvme_encapsulation;
195 static bool support_pci_lane_margining;
196 
197 /* define lock for aen poll */
198 spinlock_t poll_aen_lock;
199 
200 extern struct dentry *megasas_debugfs_root;
201 extern void megasas_init_debugfs(void);
202 extern void megasas_exit_debugfs(void);
203 extern void megasas_setup_debugfs(struct megasas_instance *instance);
204 extern void megasas_destroy_debugfs(struct megasas_instance *instance);
205 
206 void
207 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
208 		     u8 alt_status);
209 static u32
210 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
211 static int
212 megasas_adp_reset_gen2(struct megasas_instance *instance,
213 		       struct megasas_register_set __iomem *reg_set);
214 static irqreturn_t megasas_isr(int irq, void *devp);
215 static u32
216 megasas_init_adapter_mfi(struct megasas_instance *instance);
217 u32
218 megasas_build_and_issue_cmd(struct megasas_instance *instance,
219 			    struct scsi_cmnd *scmd);
220 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
221 int
222 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
223 	int seconds);
224 void megasas_fusion_ocr_wq(struct work_struct *work);
225 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
226 					 int initial);
227 static int
228 megasas_set_dma_mask(struct megasas_instance *instance);
229 static int
230 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
231 static inline void
232 megasas_free_ctrl_mem(struct megasas_instance *instance);
233 static inline int
234 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
235 static inline void
236 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
237 static inline void
238 megasas_init_ctrl_params(struct megasas_instance *instance);
239 
240 u32 megasas_readl(struct megasas_instance *instance,
241 		  const volatile void __iomem *addr)
242 {
243 	u32 i = 0, ret_val;
244 	/*
245 	 * Due to a HW errata in Aero controllers, reads to certain
246 	 * Fusion registers could intermittently return all zeroes.
247 	 * This behavior is transient in nature and subsequent reads will
248 	 * return valid value. As a workaround in driver, retry readl for
249 	 * upto three times until a non-zero value is read.
250 	 */
251 	if (instance->adapter_type == AERO_SERIES) {
252 		do {
253 			ret_val = readl(addr);
254 			i++;
255 		} while (ret_val == 0 && i < 3);
256 		return ret_val;
257 	} else {
258 		return readl(addr);
259 	}
260 }
261 
262 /**
263  * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
264  * @instance:			Adapter soft state
265  * @dcmd:			DCMD frame inside MFI command
266  * @dma_addr:			DMA address of buffer to be passed to FW
267  * @dma_len:			Length of DMA buffer to be passed to FW
268  * @return:			void
269  */
270 void megasas_set_dma_settings(struct megasas_instance *instance,
271 			      struct megasas_dcmd_frame *dcmd,
272 			      dma_addr_t dma_addr, u32 dma_len)
273 {
274 	if (instance->consistent_mask_64bit) {
275 		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
276 		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
277 		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
278 
279 	} else {
280 		dcmd->sgl.sge32[0].phys_addr =
281 				cpu_to_le32(lower_32_bits(dma_addr));
282 		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
283 		dcmd->flags = cpu_to_le16(dcmd->flags);
284 	}
285 }
286 
287 static void
288 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
289 {
290 	instance->instancet->fire_cmd(instance,
291 		cmd->frame_phys_addr, 0, instance->reg_set);
292 	return;
293 }
294 
295 /**
296  * megasas_get_cmd -	Get a command from the free pool
297  * @instance:		Adapter soft state
298  *
299  * Returns a free command from the pool
300  */
301 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
302 						  *instance)
303 {
304 	unsigned long flags;
305 	struct megasas_cmd *cmd = NULL;
306 
307 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
308 
309 	if (!list_empty(&instance->cmd_pool)) {
310 		cmd = list_entry((&instance->cmd_pool)->next,
311 				 struct megasas_cmd, list);
312 		list_del_init(&cmd->list);
313 	} else {
314 		dev_err(&instance->pdev->dev, "Command pool empty!\n");
315 	}
316 
317 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
318 	return cmd;
319 }
320 
321 /**
322  * megasas_return_cmd -	Return a cmd to free command pool
323  * @instance:		Adapter soft state
324  * @cmd:		Command packet to be returned to free command pool
325  */
326 void
327 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
328 {
329 	unsigned long flags;
330 	u32 blk_tags;
331 	struct megasas_cmd_fusion *cmd_fusion;
332 	struct fusion_context *fusion = instance->ctrl_context;
333 
334 	/* This flag is used only for fusion adapter.
335 	 * Wait for Interrupt for Polled mode DCMD
336 	 */
337 	if (cmd->flags & DRV_DCMD_POLLED_MODE)
338 		return;
339 
340 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
341 
342 	if (fusion) {
343 		blk_tags = instance->max_scsi_cmds + cmd->index;
344 		cmd_fusion = fusion->cmd_list[blk_tags];
345 		megasas_return_cmd_fusion(instance, cmd_fusion);
346 	}
347 	cmd->scmd = NULL;
348 	cmd->frame_count = 0;
349 	cmd->flags = 0;
350 	memset(cmd->frame, 0, instance->mfi_frame_size);
351 	cmd->frame->io.context = cpu_to_le32(cmd->index);
352 	if (!fusion && reset_devices)
353 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
354 	list_add(&cmd->list, (&instance->cmd_pool)->next);
355 
356 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
357 
358 }
359 
360 static const char *
361 format_timestamp(uint32_t timestamp)
362 {
363 	static char buffer[32];
364 
365 	if ((timestamp & 0xff000000) == 0xff000000)
366 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
367 		0x00ffffff);
368 	else
369 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
370 	return buffer;
371 }
372 
373 static const char *
374 format_class(int8_t class)
375 {
376 	static char buffer[6];
377 
378 	switch (class) {
379 	case MFI_EVT_CLASS_DEBUG:
380 		return "debug";
381 	case MFI_EVT_CLASS_PROGRESS:
382 		return "progress";
383 	case MFI_EVT_CLASS_INFO:
384 		return "info";
385 	case MFI_EVT_CLASS_WARNING:
386 		return "WARN";
387 	case MFI_EVT_CLASS_CRITICAL:
388 		return "CRIT";
389 	case MFI_EVT_CLASS_FATAL:
390 		return "FATAL";
391 	case MFI_EVT_CLASS_DEAD:
392 		return "DEAD";
393 	default:
394 		snprintf(buffer, sizeof(buffer), "%d", class);
395 		return buffer;
396 	}
397 }
398 
399 /**
400   * megasas_decode_evt: Decode FW AEN event and print critical event
401   * for information.
402   * @instance:			Adapter soft state
403   */
404 static void
405 megasas_decode_evt(struct megasas_instance *instance)
406 {
407 	struct megasas_evt_detail *evt_detail = instance->evt_detail;
408 	union megasas_evt_class_locale class_locale;
409 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
410 
411 	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
412 	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
413 		printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
414 		event_log_level = MFI_EVT_CLASS_CRITICAL;
415 	}
416 
417 	if (class_locale.members.class >= event_log_level)
418 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
419 			le32_to_cpu(evt_detail->seq_num),
420 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
421 			(class_locale.members.locale),
422 			format_class(class_locale.members.class),
423 			evt_detail->description);
424 }
425 
426 /**
427 *	The following functions are defined for xscale
428 *	(deviceid : 1064R, PERC5) controllers
429 */
430 
431 /**
432  * megasas_enable_intr_xscale -	Enables interrupts
433  * @regs:			MFI register set
434  */
435 static inline void
436 megasas_enable_intr_xscale(struct megasas_instance *instance)
437 {
438 	struct megasas_register_set __iomem *regs;
439 
440 	regs = instance->reg_set;
441 	writel(0, &(regs)->outbound_intr_mask);
442 
443 	/* Dummy readl to force pci flush */
444 	readl(&regs->outbound_intr_mask);
445 }
446 
447 /**
448  * megasas_disable_intr_xscale -Disables interrupt
449  * @regs:			MFI register set
450  */
451 static inline void
452 megasas_disable_intr_xscale(struct megasas_instance *instance)
453 {
454 	struct megasas_register_set __iomem *regs;
455 	u32 mask = 0x1f;
456 
457 	regs = instance->reg_set;
458 	writel(mask, &regs->outbound_intr_mask);
459 	/* Dummy readl to force pci flush */
460 	readl(&regs->outbound_intr_mask);
461 }
462 
463 /**
464  * megasas_read_fw_status_reg_xscale - returns the current FW status value
465  * @regs:			MFI register set
466  */
467 static u32
468 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
469 {
470 	return readl(&instance->reg_set->outbound_msg_0);
471 }
472 /**
473  * megasas_clear_interrupt_xscale -	Check & clear interrupt
474  * @regs:				MFI register set
475  */
476 static int
477 megasas_clear_intr_xscale(struct megasas_instance *instance)
478 {
479 	u32 status;
480 	u32 mfiStatus = 0;
481 	struct megasas_register_set __iomem *regs;
482 	regs = instance->reg_set;
483 
484 	/*
485 	 * Check if it is our interrupt
486 	 */
487 	status = readl(&regs->outbound_intr_status);
488 
489 	if (status & MFI_OB_INTR_STATUS_MASK)
490 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
491 	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
492 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
493 
494 	/*
495 	 * Clear the interrupt by writing back the same value
496 	 */
497 	if (mfiStatus)
498 		writel(status, &regs->outbound_intr_status);
499 
500 	/* Dummy readl to force pci flush */
501 	readl(&regs->outbound_intr_status);
502 
503 	return mfiStatus;
504 }
505 
506 /**
507  * megasas_fire_cmd_xscale -	Sends command to the FW
508  * @frame_phys_addr :		Physical address of cmd
509  * @frame_count :		Number of frames for the command
510  * @regs :			MFI register set
511  */
512 static inline void
513 megasas_fire_cmd_xscale(struct megasas_instance *instance,
514 		dma_addr_t frame_phys_addr,
515 		u32 frame_count,
516 		struct megasas_register_set __iomem *regs)
517 {
518 	unsigned long flags;
519 
520 	spin_lock_irqsave(&instance->hba_lock, flags);
521 	writel((frame_phys_addr >> 3)|(frame_count),
522 	       &(regs)->inbound_queue_port);
523 	spin_unlock_irqrestore(&instance->hba_lock, flags);
524 }
525 
526 /**
527  * megasas_adp_reset_xscale -  For controller reset
528  * @regs:                              MFI register set
529  */
530 static int
531 megasas_adp_reset_xscale(struct megasas_instance *instance,
532 	struct megasas_register_set __iomem *regs)
533 {
534 	u32 i;
535 	u32 pcidata;
536 
537 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
538 
539 	for (i = 0; i < 3; i++)
540 		msleep(1000); /* sleep for 3 secs */
541 	pcidata  = 0;
542 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
543 	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
544 	if (pcidata & 0x2) {
545 		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
546 		pcidata &= ~0x2;
547 		pci_write_config_dword(instance->pdev,
548 				MFI_1068_PCSR_OFFSET, pcidata);
549 
550 		for (i = 0; i < 2; i++)
551 			msleep(1000); /* need to wait 2 secs again */
552 
553 		pcidata  = 0;
554 		pci_read_config_dword(instance->pdev,
555 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
556 		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
557 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
558 			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
559 			pcidata = 0;
560 			pci_write_config_dword(instance->pdev,
561 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
562 		}
563 	}
564 	return 0;
565 }
566 
567 /**
568  * megasas_check_reset_xscale -	For controller reset check
569  * @regs:				MFI register set
570  */
571 static int
572 megasas_check_reset_xscale(struct megasas_instance *instance,
573 		struct megasas_register_set __iomem *regs)
574 {
575 	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
576 	    (le32_to_cpu(*instance->consumer) ==
577 		MEGASAS_ADPRESET_INPROG_SIGN))
578 		return 1;
579 	return 0;
580 }
581 
582 static struct megasas_instance_template megasas_instance_template_xscale = {
583 
584 	.fire_cmd = megasas_fire_cmd_xscale,
585 	.enable_intr = megasas_enable_intr_xscale,
586 	.disable_intr = megasas_disable_intr_xscale,
587 	.clear_intr = megasas_clear_intr_xscale,
588 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
589 	.adp_reset = megasas_adp_reset_xscale,
590 	.check_reset = megasas_check_reset_xscale,
591 	.service_isr = megasas_isr,
592 	.tasklet = megasas_complete_cmd_dpc,
593 	.init_adapter = megasas_init_adapter_mfi,
594 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
595 	.issue_dcmd = megasas_issue_dcmd,
596 };
597 
598 /**
599 *	This is the end of set of functions & definitions specific
600 *	to xscale (deviceid : 1064R, PERC5) controllers
601 */
602 
603 /**
604 *	The following functions are defined for ppc (deviceid : 0x60)
605 *	controllers
606 */
607 
608 /**
609  * megasas_enable_intr_ppc -	Enables interrupts
610  * @regs:			MFI register set
611  */
612 static inline void
613 megasas_enable_intr_ppc(struct megasas_instance *instance)
614 {
615 	struct megasas_register_set __iomem *regs;
616 
617 	regs = instance->reg_set;
618 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
619 
620 	writel(~0x80000000, &(regs)->outbound_intr_mask);
621 
622 	/* Dummy readl to force pci flush */
623 	readl(&regs->outbound_intr_mask);
624 }
625 
626 /**
627  * megasas_disable_intr_ppc -	Disable interrupt
628  * @regs:			MFI register set
629  */
630 static inline void
631 megasas_disable_intr_ppc(struct megasas_instance *instance)
632 {
633 	struct megasas_register_set __iomem *regs;
634 	u32 mask = 0xFFFFFFFF;
635 
636 	regs = instance->reg_set;
637 	writel(mask, &regs->outbound_intr_mask);
638 	/* Dummy readl to force pci flush */
639 	readl(&regs->outbound_intr_mask);
640 }
641 
642 /**
643  * megasas_read_fw_status_reg_ppc - returns the current FW status value
644  * @regs:			MFI register set
645  */
646 static u32
647 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
648 {
649 	return readl(&instance->reg_set->outbound_scratch_pad_0);
650 }
651 
652 /**
653  * megasas_clear_interrupt_ppc -	Check & clear interrupt
654  * @regs:				MFI register set
655  */
656 static int
657 megasas_clear_intr_ppc(struct megasas_instance *instance)
658 {
659 	u32 status, mfiStatus = 0;
660 	struct megasas_register_set __iomem *regs;
661 	regs = instance->reg_set;
662 
663 	/*
664 	 * Check if it is our interrupt
665 	 */
666 	status = readl(&regs->outbound_intr_status);
667 
668 	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
669 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
670 
671 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
672 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
673 
674 	/*
675 	 * Clear the interrupt by writing back the same value
676 	 */
677 	writel(status, &regs->outbound_doorbell_clear);
678 
679 	/* Dummy readl to force pci flush */
680 	readl(&regs->outbound_doorbell_clear);
681 
682 	return mfiStatus;
683 }
684 
685 /**
686  * megasas_fire_cmd_ppc -	Sends command to the FW
687  * @frame_phys_addr :		Physical address of cmd
688  * @frame_count :		Number of frames for the command
689  * @regs :			MFI register set
690  */
691 static inline void
692 megasas_fire_cmd_ppc(struct megasas_instance *instance,
693 		dma_addr_t frame_phys_addr,
694 		u32 frame_count,
695 		struct megasas_register_set __iomem *regs)
696 {
697 	unsigned long flags;
698 
699 	spin_lock_irqsave(&instance->hba_lock, flags);
700 	writel((frame_phys_addr | (frame_count<<1))|1,
701 			&(regs)->inbound_queue_port);
702 	spin_unlock_irqrestore(&instance->hba_lock, flags);
703 }
704 
705 /**
706  * megasas_check_reset_ppc -	For controller reset check
707  * @regs:				MFI register set
708  */
709 static int
710 megasas_check_reset_ppc(struct megasas_instance *instance,
711 			struct megasas_register_set __iomem *regs)
712 {
713 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
714 		return 1;
715 
716 	return 0;
717 }
718 
719 static struct megasas_instance_template megasas_instance_template_ppc = {
720 
721 	.fire_cmd = megasas_fire_cmd_ppc,
722 	.enable_intr = megasas_enable_intr_ppc,
723 	.disable_intr = megasas_disable_intr_ppc,
724 	.clear_intr = megasas_clear_intr_ppc,
725 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
726 	.adp_reset = megasas_adp_reset_xscale,
727 	.check_reset = megasas_check_reset_ppc,
728 	.service_isr = megasas_isr,
729 	.tasklet = megasas_complete_cmd_dpc,
730 	.init_adapter = megasas_init_adapter_mfi,
731 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
732 	.issue_dcmd = megasas_issue_dcmd,
733 };
734 
735 /**
736  * megasas_enable_intr_skinny -	Enables interrupts
737  * @regs:			MFI register set
738  */
739 static inline void
740 megasas_enable_intr_skinny(struct megasas_instance *instance)
741 {
742 	struct megasas_register_set __iomem *regs;
743 
744 	regs = instance->reg_set;
745 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
746 
747 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
748 
749 	/* Dummy readl to force pci flush */
750 	readl(&regs->outbound_intr_mask);
751 }
752 
753 /**
754  * megasas_disable_intr_skinny -	Disables interrupt
755  * @regs:			MFI register set
756  */
757 static inline void
758 megasas_disable_intr_skinny(struct megasas_instance *instance)
759 {
760 	struct megasas_register_set __iomem *regs;
761 	u32 mask = 0xFFFFFFFF;
762 
763 	regs = instance->reg_set;
764 	writel(mask, &regs->outbound_intr_mask);
765 	/* Dummy readl to force pci flush */
766 	readl(&regs->outbound_intr_mask);
767 }
768 
769 /**
770  * megasas_read_fw_status_reg_skinny - returns the current FW status value
771  * @regs:			MFI register set
772  */
773 static u32
774 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
775 {
776 	return readl(&instance->reg_set->outbound_scratch_pad_0);
777 }
778 
779 /**
780  * megasas_clear_interrupt_skinny -	Check & clear interrupt
781  * @regs:				MFI register set
782  */
783 static int
784 megasas_clear_intr_skinny(struct megasas_instance *instance)
785 {
786 	u32 status;
787 	u32 mfiStatus = 0;
788 	struct megasas_register_set __iomem *regs;
789 	regs = instance->reg_set;
790 
791 	/*
792 	 * Check if it is our interrupt
793 	 */
794 	status = readl(&regs->outbound_intr_status);
795 
796 	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
797 		return 0;
798 	}
799 
800 	/*
801 	 * Check if it is our interrupt
802 	 */
803 	if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
804 	    MFI_STATE_FAULT) {
805 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
806 	} else
807 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
808 
809 	/*
810 	 * Clear the interrupt by writing back the same value
811 	 */
812 	writel(status, &regs->outbound_intr_status);
813 
814 	/*
815 	 * dummy read to flush PCI
816 	 */
817 	readl(&regs->outbound_intr_status);
818 
819 	return mfiStatus;
820 }
821 
822 /**
823  * megasas_fire_cmd_skinny -	Sends command to the FW
824  * @frame_phys_addr :		Physical address of cmd
825  * @frame_count :		Number of frames for the command
826  * @regs :			MFI register set
827  */
828 static inline void
829 megasas_fire_cmd_skinny(struct megasas_instance *instance,
830 			dma_addr_t frame_phys_addr,
831 			u32 frame_count,
832 			struct megasas_register_set __iomem *regs)
833 {
834 	unsigned long flags;
835 
836 	spin_lock_irqsave(&instance->hba_lock, flags);
837 	writel(upper_32_bits(frame_phys_addr),
838 	       &(regs)->inbound_high_queue_port);
839 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
840 	       &(regs)->inbound_low_queue_port);
841 	spin_unlock_irqrestore(&instance->hba_lock, flags);
842 }
843 
844 /**
845  * megasas_check_reset_skinny -	For controller reset check
846  * @regs:				MFI register set
847  */
848 static int
849 megasas_check_reset_skinny(struct megasas_instance *instance,
850 				struct megasas_register_set __iomem *regs)
851 {
852 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
853 		return 1;
854 
855 	return 0;
856 }
857 
858 static struct megasas_instance_template megasas_instance_template_skinny = {
859 
860 	.fire_cmd = megasas_fire_cmd_skinny,
861 	.enable_intr = megasas_enable_intr_skinny,
862 	.disable_intr = megasas_disable_intr_skinny,
863 	.clear_intr = megasas_clear_intr_skinny,
864 	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
865 	.adp_reset = megasas_adp_reset_gen2,
866 	.check_reset = megasas_check_reset_skinny,
867 	.service_isr = megasas_isr,
868 	.tasklet = megasas_complete_cmd_dpc,
869 	.init_adapter = megasas_init_adapter_mfi,
870 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
871 	.issue_dcmd = megasas_issue_dcmd,
872 };
873 
874 
875 /**
876 *	The following functions are defined for gen2 (deviceid : 0x78 0x79)
877 *	controllers
878 */
879 
880 /**
881  * megasas_enable_intr_gen2 -  Enables interrupts
882  * @regs:                      MFI register set
883  */
884 static inline void
885 megasas_enable_intr_gen2(struct megasas_instance *instance)
886 {
887 	struct megasas_register_set __iomem *regs;
888 
889 	regs = instance->reg_set;
890 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
891 
892 	/* write ~0x00000005 (4 & 1) to the intr mask*/
893 	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
894 
895 	/* Dummy readl to force pci flush */
896 	readl(&regs->outbound_intr_mask);
897 }
898 
899 /**
900  * megasas_disable_intr_gen2 - Disables interrupt
901  * @regs:                      MFI register set
902  */
903 static inline void
904 megasas_disable_intr_gen2(struct megasas_instance *instance)
905 {
906 	struct megasas_register_set __iomem *regs;
907 	u32 mask = 0xFFFFFFFF;
908 
909 	regs = instance->reg_set;
910 	writel(mask, &regs->outbound_intr_mask);
911 	/* Dummy readl to force pci flush */
912 	readl(&regs->outbound_intr_mask);
913 }
914 
915 /**
916  * megasas_read_fw_status_reg_gen2 - returns the current FW status value
917  * @regs:                      MFI register set
918  */
919 static u32
920 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
921 {
922 	return readl(&instance->reg_set->outbound_scratch_pad_0);
923 }
924 
925 /**
926  * megasas_clear_interrupt_gen2 -      Check & clear interrupt
927  * @regs:                              MFI register set
928  */
929 static int
930 megasas_clear_intr_gen2(struct megasas_instance *instance)
931 {
932 	u32 status;
933 	u32 mfiStatus = 0;
934 	struct megasas_register_set __iomem *regs;
935 	regs = instance->reg_set;
936 
937 	/*
938 	 * Check if it is our interrupt
939 	 */
940 	status = readl(&regs->outbound_intr_status);
941 
942 	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
943 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
944 	}
945 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
946 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
947 	}
948 
949 	/*
950 	 * Clear the interrupt by writing back the same value
951 	 */
952 	if (mfiStatus)
953 		writel(status, &regs->outbound_doorbell_clear);
954 
955 	/* Dummy readl to force pci flush */
956 	readl(&regs->outbound_intr_status);
957 
958 	return mfiStatus;
959 }
960 /**
961  * megasas_fire_cmd_gen2 -     Sends command to the FW
962  * @frame_phys_addr :          Physical address of cmd
963  * @frame_count :              Number of frames for the command
964  * @regs :                     MFI register set
965  */
966 static inline void
967 megasas_fire_cmd_gen2(struct megasas_instance *instance,
968 			dma_addr_t frame_phys_addr,
969 			u32 frame_count,
970 			struct megasas_register_set __iomem *regs)
971 {
972 	unsigned long flags;
973 
974 	spin_lock_irqsave(&instance->hba_lock, flags);
975 	writel((frame_phys_addr | (frame_count<<1))|1,
976 			&(regs)->inbound_queue_port);
977 	spin_unlock_irqrestore(&instance->hba_lock, flags);
978 }
979 
980 /**
981  * megasas_adp_reset_gen2 -	For controller reset
982  * @regs:				MFI register set
983  */
984 static int
985 megasas_adp_reset_gen2(struct megasas_instance *instance,
986 			struct megasas_register_set __iomem *reg_set)
987 {
988 	u32 retry = 0 ;
989 	u32 HostDiag;
990 	u32 __iomem *seq_offset = &reg_set->seq_offset;
991 	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
992 
993 	if (instance->instancet == &megasas_instance_template_skinny) {
994 		seq_offset = &reg_set->fusion_seq_offset;
995 		hostdiag_offset = &reg_set->fusion_host_diag;
996 	}
997 
998 	writel(0, seq_offset);
999 	writel(4, seq_offset);
1000 	writel(0xb, seq_offset);
1001 	writel(2, seq_offset);
1002 	writel(7, seq_offset);
1003 	writel(0xd, seq_offset);
1004 
1005 	msleep(1000);
1006 
1007 	HostDiag = (u32)readl(hostdiag_offset);
1008 
1009 	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
1010 		msleep(100);
1011 		HostDiag = (u32)readl(hostdiag_offset);
1012 		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
1013 					retry, HostDiag);
1014 
1015 		if (retry++ >= 100)
1016 			return 1;
1017 
1018 	}
1019 
1020 	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
1021 
1022 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
1023 
1024 	ssleep(10);
1025 
1026 	HostDiag = (u32)readl(hostdiag_offset);
1027 	while (HostDiag & DIAG_RESET_ADAPTER) {
1028 		msleep(100);
1029 		HostDiag = (u32)readl(hostdiag_offset);
1030 		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
1031 				retry, HostDiag);
1032 
1033 		if (retry++ >= 1000)
1034 			return 1;
1035 
1036 	}
1037 	return 0;
1038 }
1039 
1040 /**
1041  * megasas_check_reset_gen2 -	For controller reset check
1042  * @regs:				MFI register set
1043  */
1044 static int
1045 megasas_check_reset_gen2(struct megasas_instance *instance,
1046 		struct megasas_register_set __iomem *regs)
1047 {
1048 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1049 		return 1;
1050 
1051 	return 0;
1052 }
1053 
1054 static struct megasas_instance_template megasas_instance_template_gen2 = {
1055 
1056 	.fire_cmd = megasas_fire_cmd_gen2,
1057 	.enable_intr = megasas_enable_intr_gen2,
1058 	.disable_intr = megasas_disable_intr_gen2,
1059 	.clear_intr = megasas_clear_intr_gen2,
1060 	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1061 	.adp_reset = megasas_adp_reset_gen2,
1062 	.check_reset = megasas_check_reset_gen2,
1063 	.service_isr = megasas_isr,
1064 	.tasklet = megasas_complete_cmd_dpc,
1065 	.init_adapter = megasas_init_adapter_mfi,
1066 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1067 	.issue_dcmd = megasas_issue_dcmd,
1068 };
1069 
1070 /**
1071 *	This is the end of set of functions & definitions
1072 *       specific to gen2 (deviceid : 0x78, 0x79) controllers
1073 */
1074 
1075 /*
1076  * Template added for TB (Fusion)
1077  */
1078 extern struct megasas_instance_template megasas_instance_template_fusion;
1079 
1080 /**
1081  * megasas_issue_polled -	Issues a polling command
1082  * @instance:			Adapter soft state
1083  * @cmd:			Command packet to be issued
1084  *
1085  * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1086  */
1087 int
1088 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1089 {
1090 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1091 
1092 	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1093 	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1094 
1095 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1096 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1097 			__func__, __LINE__);
1098 		return DCMD_NOT_FIRED;
1099 	}
1100 
1101 	instance->instancet->issue_dcmd(instance, cmd);
1102 
1103 	return wait_and_poll(instance, cmd, instance->requestorId ?
1104 			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1105 }
1106 
1107 /**
1108  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1109  * @instance:			Adapter soft state
1110  * @cmd:			Command to be issued
1111  * @timeout:			Timeout in seconds
1112  *
1113  * This function waits on an event for the command to be returned from ISR.
1114  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1115  * Used to issue ioctl commands.
1116  */
1117 int
1118 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1119 			  struct megasas_cmd *cmd, int timeout)
1120 {
1121 	int ret = 0;
1122 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1123 
1124 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1125 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1126 			__func__, __LINE__);
1127 		return DCMD_NOT_FIRED;
1128 	}
1129 
1130 	instance->instancet->issue_dcmd(instance, cmd);
1131 
1132 	if (timeout) {
1133 		ret = wait_event_timeout(instance->int_cmd_wait_q,
1134 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1135 		if (!ret) {
1136 			dev_err(&instance->pdev->dev,
1137 				"DCMD(opcode: 0x%x) is timed out, func:%s\n",
1138 				cmd->frame->dcmd.opcode, __func__);
1139 			return DCMD_TIMEOUT;
1140 		}
1141 	} else
1142 		wait_event(instance->int_cmd_wait_q,
1143 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1144 
1145 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1146 		DCMD_SUCCESS : DCMD_FAILED;
1147 }
1148 
1149 /**
1150  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1151  * @instance:				Adapter soft state
1152  * @cmd_to_abort:			Previously issued cmd to be aborted
1153  * @timeout:				Timeout in seconds
1154  *
1155  * MFI firmware can abort previously issued AEN comamnd (automatic event
1156  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1157  * cmd and waits for return status.
1158  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1159  */
1160 static int
1161 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1162 				struct megasas_cmd *cmd_to_abort, int timeout)
1163 {
1164 	struct megasas_cmd *cmd;
1165 	struct megasas_abort_frame *abort_fr;
1166 	int ret = 0;
1167 	u32 opcode;
1168 
1169 	cmd = megasas_get_cmd(instance);
1170 
1171 	if (!cmd)
1172 		return -1;
1173 
1174 	abort_fr = &cmd->frame->abort;
1175 
1176 	/*
1177 	 * Prepare and issue the abort frame
1178 	 */
1179 	abort_fr->cmd = MFI_CMD_ABORT;
1180 	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1181 	abort_fr->flags = cpu_to_le16(0);
1182 	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1183 	abort_fr->abort_mfi_phys_addr_lo =
1184 		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1185 	abort_fr->abort_mfi_phys_addr_hi =
1186 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1187 
1188 	cmd->sync_cmd = 1;
1189 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1190 
1191 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1192 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1193 			__func__, __LINE__);
1194 		return DCMD_NOT_FIRED;
1195 	}
1196 
1197 	instance->instancet->issue_dcmd(instance, cmd);
1198 
1199 	if (timeout) {
1200 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1201 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1202 		if (!ret) {
1203 			opcode = cmd_to_abort->frame->dcmd.opcode;
1204 			dev_err(&instance->pdev->dev,
1205 				"Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
1206 				opcode,  __func__);
1207 			return DCMD_TIMEOUT;
1208 		}
1209 	} else
1210 		wait_event(instance->abort_cmd_wait_q,
1211 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1212 
1213 	cmd->sync_cmd = 0;
1214 
1215 	megasas_return_cmd(instance, cmd);
1216 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1217 		DCMD_SUCCESS : DCMD_FAILED;
1218 }
1219 
1220 /**
1221  * megasas_make_sgl32 -	Prepares 32-bit SGL
1222  * @instance:		Adapter soft state
1223  * @scp:		SCSI command from the mid-layer
1224  * @mfi_sgl:		SGL to be filled in
1225  *
1226  * If successful, this function returns the number of SG elements. Otherwise,
1227  * it returnes -1.
1228  */
1229 static int
1230 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1231 		   union megasas_sgl *mfi_sgl)
1232 {
1233 	int i;
1234 	int sge_count;
1235 	struct scatterlist *os_sgl;
1236 
1237 	sge_count = scsi_dma_map(scp);
1238 	BUG_ON(sge_count < 0);
1239 
1240 	if (sge_count) {
1241 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1242 			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1243 			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1244 		}
1245 	}
1246 	return sge_count;
1247 }
1248 
1249 /**
1250  * megasas_make_sgl64 -	Prepares 64-bit SGL
1251  * @instance:		Adapter soft state
1252  * @scp:		SCSI command from the mid-layer
1253  * @mfi_sgl:		SGL to be filled in
1254  *
1255  * If successful, this function returns the number of SG elements. Otherwise,
1256  * it returnes -1.
1257  */
1258 static int
1259 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1260 		   union megasas_sgl *mfi_sgl)
1261 {
1262 	int i;
1263 	int sge_count;
1264 	struct scatterlist *os_sgl;
1265 
1266 	sge_count = scsi_dma_map(scp);
1267 	BUG_ON(sge_count < 0);
1268 
1269 	if (sge_count) {
1270 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1271 			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1272 			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1273 		}
1274 	}
1275 	return sge_count;
1276 }
1277 
1278 /**
1279  * megasas_make_sgl_skinny - Prepares IEEE SGL
1280  * @instance:           Adapter soft state
1281  * @scp:                SCSI command from the mid-layer
1282  * @mfi_sgl:            SGL to be filled in
1283  *
1284  * If successful, this function returns the number of SG elements. Otherwise,
1285  * it returnes -1.
1286  */
1287 static int
1288 megasas_make_sgl_skinny(struct megasas_instance *instance,
1289 		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1290 {
1291 	int i;
1292 	int sge_count;
1293 	struct scatterlist *os_sgl;
1294 
1295 	sge_count = scsi_dma_map(scp);
1296 
1297 	if (sge_count) {
1298 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1299 			mfi_sgl->sge_skinny[i].length =
1300 				cpu_to_le32(sg_dma_len(os_sgl));
1301 			mfi_sgl->sge_skinny[i].phys_addr =
1302 				cpu_to_le64(sg_dma_address(os_sgl));
1303 			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1304 		}
1305 	}
1306 	return sge_count;
1307 }
1308 
1309  /**
1310  * megasas_get_frame_count - Computes the number of frames
1311  * @frame_type		: type of frame- io or pthru frame
1312  * @sge_count		: number of sg elements
1313  *
1314  * Returns the number of frames required for numnber of sge's (sge_count)
1315  */
1316 
1317 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1318 			u8 sge_count, u8 frame_type)
1319 {
1320 	int num_cnt;
1321 	int sge_bytes;
1322 	u32 sge_sz;
1323 	u32 frame_count = 0;
1324 
1325 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1326 	    sizeof(struct megasas_sge32);
1327 
1328 	if (instance->flag_ieee) {
1329 		sge_sz = sizeof(struct megasas_sge_skinny);
1330 	}
1331 
1332 	/*
1333 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1334 	 * 3 SGEs for 32-bit SGLs for ldio &
1335 	 * 1 SGEs for 64-bit SGLs and
1336 	 * 2 SGEs for 32-bit SGLs for pthru frame
1337 	 */
1338 	if (unlikely(frame_type == PTHRU_FRAME)) {
1339 		if (instance->flag_ieee == 1) {
1340 			num_cnt = sge_count - 1;
1341 		} else if (IS_DMA64)
1342 			num_cnt = sge_count - 1;
1343 		else
1344 			num_cnt = sge_count - 2;
1345 	} else {
1346 		if (instance->flag_ieee == 1) {
1347 			num_cnt = sge_count - 1;
1348 		} else if (IS_DMA64)
1349 			num_cnt = sge_count - 2;
1350 		else
1351 			num_cnt = sge_count - 3;
1352 	}
1353 
1354 	if (num_cnt > 0) {
1355 		sge_bytes = sge_sz * num_cnt;
1356 
1357 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1358 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1359 	}
1360 	/* Main frame */
1361 	frame_count += 1;
1362 
1363 	if (frame_count > 7)
1364 		frame_count = 8;
1365 	return frame_count;
1366 }
1367 
1368 /**
1369  * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1370  * @instance:		Adapter soft state
1371  * @scp:		SCSI command
1372  * @cmd:		Command to be prepared in
1373  *
1374  * This function prepares CDB commands. These are typcially pass-through
1375  * commands to the devices.
1376  */
1377 static int
1378 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1379 		   struct megasas_cmd *cmd)
1380 {
1381 	u32 is_logical;
1382 	u32 device_id;
1383 	u16 flags = 0;
1384 	struct megasas_pthru_frame *pthru;
1385 
1386 	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1387 	device_id = MEGASAS_DEV_INDEX(scp);
1388 	pthru = (struct megasas_pthru_frame *)cmd->frame;
1389 
1390 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1391 		flags = MFI_FRAME_DIR_WRITE;
1392 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1393 		flags = MFI_FRAME_DIR_READ;
1394 	else if (scp->sc_data_direction == DMA_NONE)
1395 		flags = MFI_FRAME_DIR_NONE;
1396 
1397 	if (instance->flag_ieee == 1) {
1398 		flags |= MFI_FRAME_IEEE;
1399 	}
1400 
1401 	/*
1402 	 * Prepare the DCDB frame
1403 	 */
1404 	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1405 	pthru->cmd_status = 0x0;
1406 	pthru->scsi_status = 0x0;
1407 	pthru->target_id = device_id;
1408 	pthru->lun = scp->device->lun;
1409 	pthru->cdb_len = scp->cmd_len;
1410 	pthru->timeout = 0;
1411 	pthru->pad_0 = 0;
1412 	pthru->flags = cpu_to_le16(flags);
1413 	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1414 
1415 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1416 
1417 	/*
1418 	 * If the command is for the tape device, set the
1419 	 * pthru timeout to the os layer timeout value.
1420 	 */
1421 	if (scp->device->type == TYPE_TAPE) {
1422 		if ((scp->request->timeout / HZ) > 0xFFFF)
1423 			pthru->timeout = cpu_to_le16(0xFFFF);
1424 		else
1425 			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1426 	}
1427 
1428 	/*
1429 	 * Construct SGL
1430 	 */
1431 	if (instance->flag_ieee == 1) {
1432 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1433 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1434 						      &pthru->sgl);
1435 	} else if (IS_DMA64) {
1436 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1437 		pthru->sge_count = megasas_make_sgl64(instance, scp,
1438 						      &pthru->sgl);
1439 	} else
1440 		pthru->sge_count = megasas_make_sgl32(instance, scp,
1441 						      &pthru->sgl);
1442 
1443 	if (pthru->sge_count > instance->max_num_sge) {
1444 		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1445 			pthru->sge_count);
1446 		return 0;
1447 	}
1448 
1449 	/*
1450 	 * Sense info specific
1451 	 */
1452 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1453 	pthru->sense_buf_phys_addr_hi =
1454 		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1455 	pthru->sense_buf_phys_addr_lo =
1456 		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1457 
1458 	/*
1459 	 * Compute the total number of frames this command consumes. FW uses
1460 	 * this number to pull sufficient number of frames from host memory.
1461 	 */
1462 	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1463 							PTHRU_FRAME);
1464 
1465 	return cmd->frame_count;
1466 }
1467 
1468 /**
1469  * megasas_build_ldio -	Prepares IOs to logical devices
1470  * @instance:		Adapter soft state
1471  * @scp:		SCSI command
1472  * @cmd:		Command to be prepared
1473  *
1474  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1475  */
1476 static int
1477 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1478 		   struct megasas_cmd *cmd)
1479 {
1480 	u32 device_id;
1481 	u8 sc = scp->cmnd[0];
1482 	u16 flags = 0;
1483 	struct megasas_io_frame *ldio;
1484 
1485 	device_id = MEGASAS_DEV_INDEX(scp);
1486 	ldio = (struct megasas_io_frame *)cmd->frame;
1487 
1488 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1489 		flags = MFI_FRAME_DIR_WRITE;
1490 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1491 		flags = MFI_FRAME_DIR_READ;
1492 
1493 	if (instance->flag_ieee == 1) {
1494 		flags |= MFI_FRAME_IEEE;
1495 	}
1496 
1497 	/*
1498 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1499 	 */
1500 	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1501 	ldio->cmd_status = 0x0;
1502 	ldio->scsi_status = 0x0;
1503 	ldio->target_id = device_id;
1504 	ldio->timeout = 0;
1505 	ldio->reserved_0 = 0;
1506 	ldio->pad_0 = 0;
1507 	ldio->flags = cpu_to_le16(flags);
1508 	ldio->start_lba_hi = 0;
1509 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1510 
1511 	/*
1512 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1513 	 */
1514 	if (scp->cmd_len == 6) {
1515 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1516 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1517 						 ((u32) scp->cmnd[2] << 8) |
1518 						 (u32) scp->cmnd[3]);
1519 
1520 		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1521 	}
1522 
1523 	/*
1524 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1525 	 */
1526 	else if (scp->cmd_len == 10) {
1527 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1528 					      ((u32) scp->cmnd[7] << 8));
1529 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1530 						 ((u32) scp->cmnd[3] << 16) |
1531 						 ((u32) scp->cmnd[4] << 8) |
1532 						 (u32) scp->cmnd[5]);
1533 	}
1534 
1535 	/*
1536 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1537 	 */
1538 	else if (scp->cmd_len == 12) {
1539 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1540 					      ((u32) scp->cmnd[7] << 16) |
1541 					      ((u32) scp->cmnd[8] << 8) |
1542 					      (u32) scp->cmnd[9]);
1543 
1544 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1545 						 ((u32) scp->cmnd[3] << 16) |
1546 						 ((u32) scp->cmnd[4] << 8) |
1547 						 (u32) scp->cmnd[5]);
1548 	}
1549 
1550 	/*
1551 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1552 	 */
1553 	else if (scp->cmd_len == 16) {
1554 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1555 					      ((u32) scp->cmnd[11] << 16) |
1556 					      ((u32) scp->cmnd[12] << 8) |
1557 					      (u32) scp->cmnd[13]);
1558 
1559 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1560 						 ((u32) scp->cmnd[7] << 16) |
1561 						 ((u32) scp->cmnd[8] << 8) |
1562 						 (u32) scp->cmnd[9]);
1563 
1564 		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1565 						 ((u32) scp->cmnd[3] << 16) |
1566 						 ((u32) scp->cmnd[4] << 8) |
1567 						 (u32) scp->cmnd[5]);
1568 
1569 	}
1570 
1571 	/*
1572 	 * Construct SGL
1573 	 */
1574 	if (instance->flag_ieee) {
1575 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1576 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1577 					      &ldio->sgl);
1578 	} else if (IS_DMA64) {
1579 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1580 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1581 	} else
1582 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1583 
1584 	if (ldio->sge_count > instance->max_num_sge) {
1585 		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1586 			ldio->sge_count);
1587 		return 0;
1588 	}
1589 
1590 	/*
1591 	 * Sense info specific
1592 	 */
1593 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1594 	ldio->sense_buf_phys_addr_hi = 0;
1595 	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1596 
1597 	/*
1598 	 * Compute the total number of frames this command consumes. FW uses
1599 	 * this number to pull sufficient number of frames from host memory.
1600 	 */
1601 	cmd->frame_count = megasas_get_frame_count(instance,
1602 			ldio->sge_count, IO_FRAME);
1603 
1604 	return cmd->frame_count;
1605 }
1606 
1607 /**
1608  * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1609  *				and whether it's RW or non RW
1610  * @scmd:			SCSI command
1611  *
1612  */
1613 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1614 {
1615 	int ret;
1616 
1617 	switch (cmd->cmnd[0]) {
1618 	case READ_10:
1619 	case WRITE_10:
1620 	case READ_12:
1621 	case WRITE_12:
1622 	case READ_6:
1623 	case WRITE_6:
1624 	case READ_16:
1625 	case WRITE_16:
1626 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1627 			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1628 		break;
1629 	default:
1630 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1631 			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1632 	}
1633 	return ret;
1634 }
1635 
1636  /**
1637  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1638  *					in FW
1639  * @instance:				Adapter soft state
1640  */
1641 static inline void
1642 megasas_dump_pending_frames(struct megasas_instance *instance)
1643 {
1644 	struct megasas_cmd *cmd;
1645 	int i,n;
1646 	union megasas_sgl *mfi_sgl;
1647 	struct megasas_io_frame *ldio;
1648 	struct megasas_pthru_frame *pthru;
1649 	u32 sgcount;
1650 	u16 max_cmd = instance->max_fw_cmds;
1651 
1652 	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1653 	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1654 	if (IS_DMA64)
1655 		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1656 	else
1657 		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1658 
1659 	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1660 	for (i = 0; i < max_cmd; i++) {
1661 		cmd = instance->cmd_list[i];
1662 		if (!cmd->scmd)
1663 			continue;
1664 		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1665 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1666 			ldio = (struct megasas_io_frame *)cmd->frame;
1667 			mfi_sgl = &ldio->sgl;
1668 			sgcount = ldio->sge_count;
1669 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1670 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1671 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1672 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1673 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1674 		} else {
1675 			pthru = (struct megasas_pthru_frame *) cmd->frame;
1676 			mfi_sgl = &pthru->sgl;
1677 			sgcount = pthru->sge_count;
1678 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1679 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1680 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1681 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1682 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1683 		}
1684 		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1685 			for (n = 0; n < sgcount; n++) {
1686 				if (IS_DMA64)
1687 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1688 						le32_to_cpu(mfi_sgl->sge64[n].length),
1689 						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1690 				else
1691 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1692 						le32_to_cpu(mfi_sgl->sge32[n].length),
1693 						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1694 			}
1695 		}
1696 	} /*for max_cmd*/
1697 	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1698 	for (i = 0; i < max_cmd; i++) {
1699 
1700 		cmd = instance->cmd_list[i];
1701 
1702 		if (cmd->sync_cmd == 1)
1703 			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1704 	}
1705 	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1706 }
1707 
1708 u32
1709 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1710 			    struct scsi_cmnd *scmd)
1711 {
1712 	struct megasas_cmd *cmd;
1713 	u32 frame_count;
1714 
1715 	cmd = megasas_get_cmd(instance);
1716 	if (!cmd)
1717 		return SCSI_MLQUEUE_HOST_BUSY;
1718 
1719 	/*
1720 	 * Logical drive command
1721 	 */
1722 	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1723 		frame_count = megasas_build_ldio(instance, scmd, cmd);
1724 	else
1725 		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1726 
1727 	if (!frame_count)
1728 		goto out_return_cmd;
1729 
1730 	cmd->scmd = scmd;
1731 	scmd->SCp.ptr = (char *)cmd;
1732 
1733 	/*
1734 	 * Issue the command to the FW
1735 	 */
1736 	atomic_inc(&instance->fw_outstanding);
1737 
1738 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1739 				cmd->frame_count-1, instance->reg_set);
1740 
1741 	return 0;
1742 out_return_cmd:
1743 	megasas_return_cmd(instance, cmd);
1744 	return SCSI_MLQUEUE_HOST_BUSY;
1745 }
1746 
1747 
1748 /**
1749  * megasas_queue_command -	Queue entry point
1750  * @scmd:			SCSI command to be queued
1751  * @done:			Callback entry point
1752  */
1753 static int
1754 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1755 {
1756 	struct megasas_instance *instance;
1757 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1758 
1759 	instance = (struct megasas_instance *)
1760 	    scmd->device->host->hostdata;
1761 
1762 	if (instance->unload == 1) {
1763 		scmd->result = DID_NO_CONNECT << 16;
1764 		scmd->scsi_done(scmd);
1765 		return 0;
1766 	}
1767 
1768 	if (instance->issuepend_done == 0)
1769 		return SCSI_MLQUEUE_HOST_BUSY;
1770 
1771 
1772 	/* Check for an mpio path and adjust behavior */
1773 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1774 		if (megasas_check_mpio_paths(instance, scmd) ==
1775 		    (DID_REQUEUE << 16)) {
1776 			return SCSI_MLQUEUE_HOST_BUSY;
1777 		} else {
1778 			scmd->result = DID_NO_CONNECT << 16;
1779 			scmd->scsi_done(scmd);
1780 			return 0;
1781 		}
1782 	}
1783 
1784 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1785 		scmd->result = DID_NO_CONNECT << 16;
1786 		scmd->scsi_done(scmd);
1787 		return 0;
1788 	}
1789 
1790 	mr_device_priv_data = scmd->device->hostdata;
1791 	if (!mr_device_priv_data) {
1792 		scmd->result = DID_NO_CONNECT << 16;
1793 		scmd->scsi_done(scmd);
1794 		return 0;
1795 	}
1796 
1797 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1798 		return SCSI_MLQUEUE_HOST_BUSY;
1799 
1800 	if (mr_device_priv_data->tm_busy)
1801 		return SCSI_MLQUEUE_DEVICE_BUSY;
1802 
1803 
1804 	scmd->result = 0;
1805 
1806 	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1807 	    (scmd->device->id >= instance->fw_supported_vd_count ||
1808 		scmd->device->lun)) {
1809 		scmd->result = DID_BAD_TARGET << 16;
1810 		goto out_done;
1811 	}
1812 
1813 	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1814 	    MEGASAS_IS_LOGICAL(scmd->device) &&
1815 	    (!instance->fw_sync_cache_support)) {
1816 		scmd->result = DID_OK << 16;
1817 		goto out_done;
1818 	}
1819 
1820 	return instance->instancet->build_and_issue_cmd(instance, scmd);
1821 
1822  out_done:
1823 	scmd->scsi_done(scmd);
1824 	return 0;
1825 }
1826 
1827 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1828 {
1829 	int i;
1830 
1831 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1832 
1833 		if ((megasas_mgmt_info.instance[i]) &&
1834 		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1835 			return megasas_mgmt_info.instance[i];
1836 	}
1837 
1838 	return NULL;
1839 }
1840 
1841 /*
1842 * megasas_set_dynamic_target_properties -
1843 * Device property set by driver may not be static and it is required to be
1844 * updated after OCR
1845 *
1846 * set tm_capable.
1847 * set dma alignment (only for eedp protection enable vd).
1848 *
1849 * @sdev: OS provided scsi device
1850 *
1851 * Returns void
1852 */
1853 void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1854 					   bool is_target_prop)
1855 {
1856 	u16 pd_index = 0, ld;
1857 	u32 device_id;
1858 	struct megasas_instance *instance;
1859 	struct fusion_context *fusion;
1860 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1861 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1862 	struct MR_LD_RAID *raid;
1863 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1864 
1865 	instance = megasas_lookup_instance(sdev->host->host_no);
1866 	fusion = instance->ctrl_context;
1867 	mr_device_priv_data = sdev->hostdata;
1868 
1869 	if (!fusion || !mr_device_priv_data)
1870 		return;
1871 
1872 	if (MEGASAS_IS_LOGICAL(sdev)) {
1873 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1874 					+ sdev->id;
1875 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1876 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1877 		if (ld >= instance->fw_supported_vd_count)
1878 			return;
1879 		raid = MR_LdRaidGet(ld, local_map_ptr);
1880 
1881 		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1882 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1883 
1884 		mr_device_priv_data->is_tm_capable =
1885 			raid->capability.tmCapable;
1886 	} else if (instance->use_seqnum_jbod_fp) {
1887 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1888 			sdev->id;
1889 		pd_sync = (void *)fusion->pd_seq_sync
1890 				[(instance->pd_seq_map_id - 1) & 1];
1891 		mr_device_priv_data->is_tm_capable =
1892 			pd_sync->seq[pd_index].capability.tmCapable;
1893 	}
1894 
1895 	if (is_target_prop && instance->tgt_prop->reset_tmo) {
1896 		/*
1897 		 * If FW provides a target reset timeout value, driver will use
1898 		 * it. If not set, fallback to default values.
1899 		 */
1900 		mr_device_priv_data->target_reset_tmo =
1901 			min_t(u8, instance->max_reset_tmo,
1902 			      instance->tgt_prop->reset_tmo);
1903 		mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1904 	} else {
1905 		mr_device_priv_data->target_reset_tmo =
1906 						MEGASAS_DEFAULT_TM_TIMEOUT;
1907 		mr_device_priv_data->task_abort_tmo =
1908 						MEGASAS_DEFAULT_TM_TIMEOUT;
1909 	}
1910 }
1911 
1912 /*
1913  * megasas_set_nvme_device_properties -
1914  * set nomerges=2
1915  * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1916  * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1917  *
1918  * MR firmware provides value in KB. Caller of this function converts
1919  * kb into bytes.
1920  *
1921  * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1922  * MR firmware provides value 128 as (32 * 4K) = 128K.
1923  *
1924  * @sdev:				scsi device
1925  * @max_io_size:				maximum io transfer size
1926  *
1927  */
1928 static inline void
1929 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1930 {
1931 	struct megasas_instance *instance;
1932 	u32 mr_nvme_pg_size;
1933 
1934 	instance = (struct megasas_instance *)sdev->host->hostdata;
1935 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1936 				MR_DEFAULT_NVME_PAGE_SIZE);
1937 
1938 	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1939 
1940 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1941 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1942 }
1943 
1944 
1945 /*
1946  * megasas_set_static_target_properties -
1947  * Device property set by driver are static and it is not required to be
1948  * updated after OCR.
1949  *
1950  * set io timeout
1951  * set device queue depth
1952  * set nvme device properties. see - megasas_set_nvme_device_properties
1953  *
1954  * @sdev:				scsi device
1955  * @is_target_prop			true, if fw provided target properties.
1956  */
1957 static void megasas_set_static_target_properties(struct scsi_device *sdev,
1958 						 bool is_target_prop)
1959 {
1960 	u8 interface_type;
1961 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1962 	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1963 	u32 tgt_device_qd;
1964 	struct megasas_instance *instance;
1965 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1966 
1967 	instance = megasas_lookup_instance(sdev->host->host_no);
1968 	mr_device_priv_data = sdev->hostdata;
1969 	interface_type  = mr_device_priv_data->interface_type;
1970 
1971 	/*
1972 	 * The RAID firmware may require extended timeouts.
1973 	 */
1974 	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1975 
1976 	switch (interface_type) {
1977 	case SAS_PD:
1978 		device_qd = MEGASAS_SAS_QD;
1979 		break;
1980 	case SATA_PD:
1981 		device_qd = MEGASAS_SATA_QD;
1982 		break;
1983 	case NVME_PD:
1984 		device_qd = MEGASAS_NVME_QD;
1985 		break;
1986 	}
1987 
1988 	if (is_target_prop) {
1989 		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1990 		if (tgt_device_qd &&
1991 		    (tgt_device_qd <= instance->host->can_queue))
1992 			device_qd = tgt_device_qd;
1993 
1994 		/* max_io_size_kb will be set to non zero for
1995 		 * nvme based vd and syspd.
1996 		 */
1997 		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1998 	}
1999 
2000 	if (instance->nvme_page_size && max_io_size_kb)
2001 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
2002 
2003 	scsi_change_queue_depth(sdev, device_qd);
2004 
2005 }
2006 
2007 
2008 static int megasas_slave_configure(struct scsi_device *sdev)
2009 {
2010 	u16 pd_index = 0;
2011 	struct megasas_instance *instance;
2012 	int ret_target_prop = DCMD_FAILED;
2013 	bool is_target_prop = false;
2014 
2015 	instance = megasas_lookup_instance(sdev->host->host_no);
2016 	if (instance->pd_list_not_supported) {
2017 		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
2018 			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2019 				sdev->id;
2020 			if (instance->pd_list[pd_index].driveState !=
2021 				MR_PD_STATE_SYSTEM)
2022 				return -ENXIO;
2023 		}
2024 	}
2025 
2026 	mutex_lock(&instance->reset_mutex);
2027 	/* Send DCMD to Firmware and cache the information */
2028 	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
2029 		megasas_get_pd_info(instance, sdev);
2030 
2031 	/* Some ventura firmware may not have instance->nvme_page_size set.
2032 	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
2033 	 */
2034 	if ((instance->tgt_prop) && (instance->nvme_page_size))
2035 		ret_target_prop = megasas_get_target_prop(instance, sdev);
2036 
2037 	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2038 	megasas_set_static_target_properties(sdev, is_target_prop);
2039 
2040 	/* This sdev property may change post OCR */
2041 	megasas_set_dynamic_target_properties(sdev, is_target_prop);
2042 
2043 	mutex_unlock(&instance->reset_mutex);
2044 
2045 	return 0;
2046 }
2047 
2048 static int megasas_slave_alloc(struct scsi_device *sdev)
2049 {
2050 	u16 pd_index = 0;
2051 	struct megasas_instance *instance ;
2052 	struct MR_PRIV_DEVICE *mr_device_priv_data;
2053 
2054 	instance = megasas_lookup_instance(sdev->host->host_no);
2055 	if (!MEGASAS_IS_LOGICAL(sdev)) {
2056 		/*
2057 		 * Open the OS scan to the SYSTEM PD
2058 		 */
2059 		pd_index =
2060 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2061 			sdev->id;
2062 		if ((instance->pd_list_not_supported ||
2063 			instance->pd_list[pd_index].driveState ==
2064 			MR_PD_STATE_SYSTEM)) {
2065 			goto scan_target;
2066 		}
2067 		return -ENXIO;
2068 	}
2069 
2070 scan_target:
2071 	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2072 					GFP_KERNEL);
2073 	if (!mr_device_priv_data)
2074 		return -ENOMEM;
2075 	sdev->hostdata = mr_device_priv_data;
2076 
2077 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2078 		   instance->r1_ldio_hint_default);
2079 	return 0;
2080 }
2081 
2082 static void megasas_slave_destroy(struct scsi_device *sdev)
2083 {
2084 	kfree(sdev->hostdata);
2085 	sdev->hostdata = NULL;
2086 }
2087 
2088 /*
2089 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2090 *                                       kill adapter
2091 * @instance:				Adapter soft state
2092 *
2093 */
2094 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2095 {
2096 	int i;
2097 	struct megasas_cmd *cmd_mfi;
2098 	struct megasas_cmd_fusion *cmd_fusion;
2099 	struct fusion_context *fusion = instance->ctrl_context;
2100 
2101 	/* Find all outstanding ioctls */
2102 	if (fusion) {
2103 		for (i = 0; i < instance->max_fw_cmds; i++) {
2104 			cmd_fusion = fusion->cmd_list[i];
2105 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2106 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2107 				if (cmd_mfi->sync_cmd &&
2108 				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2109 					cmd_mfi->frame->hdr.cmd_status =
2110 							MFI_STAT_WRONG_STATE;
2111 					megasas_complete_cmd(instance,
2112 							     cmd_mfi, DID_OK);
2113 				}
2114 			}
2115 		}
2116 	} else {
2117 		for (i = 0; i < instance->max_fw_cmds; i++) {
2118 			cmd_mfi = instance->cmd_list[i];
2119 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2120 				MFI_CMD_ABORT)
2121 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2122 		}
2123 	}
2124 }
2125 
2126 
2127 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2128 {
2129 	/* Set critical error to block I/O & ioctls in case caller didn't */
2130 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2131 	/* Wait 1 second to ensure IO or ioctls in build have posted */
2132 	msleep(1000);
2133 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2134 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2135 		(instance->adapter_type != MFI_SERIES)) {
2136 		if (!instance->requestorId) {
2137 			writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2138 			/* Flush */
2139 			readl(&instance->reg_set->doorbell);
2140 		}
2141 		if (instance->requestorId && instance->peerIsPresent)
2142 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2143 	} else {
2144 		writel(MFI_STOP_ADP,
2145 			&instance->reg_set->inbound_doorbell);
2146 	}
2147 	/* Complete outstanding ioctls when adapter is killed */
2148 	megasas_complete_outstanding_ioctls(instance);
2149 }
2150 
2151  /**
2152   * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2153   *					restored to max value
2154   * @instance:			Adapter soft state
2155   *
2156   */
2157 void
2158 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2159 {
2160 	unsigned long flags;
2161 
2162 	if (instance->flag & MEGASAS_FW_BUSY
2163 	    && time_after(jiffies, instance->last_time + 5 * HZ)
2164 	    && atomic_read(&instance->fw_outstanding) <
2165 	    instance->throttlequeuedepth + 1) {
2166 
2167 		spin_lock_irqsave(instance->host->host_lock, flags);
2168 		instance->flag &= ~MEGASAS_FW_BUSY;
2169 
2170 		instance->host->can_queue = instance->cur_can_queue;
2171 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2172 	}
2173 }
2174 
2175 /**
2176  * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2177  * @instance_addr:			Address of adapter soft state
2178  *
2179  * Tasklet to complete cmds
2180  */
2181 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2182 {
2183 	u32 producer;
2184 	u32 consumer;
2185 	u32 context;
2186 	struct megasas_cmd *cmd;
2187 	struct megasas_instance *instance =
2188 				(struct megasas_instance *)instance_addr;
2189 	unsigned long flags;
2190 
2191 	/* If we have already declared adapter dead, donot complete cmds */
2192 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2193 		return;
2194 
2195 	spin_lock_irqsave(&instance->completion_lock, flags);
2196 
2197 	producer = le32_to_cpu(*instance->producer);
2198 	consumer = le32_to_cpu(*instance->consumer);
2199 
2200 	while (consumer != producer) {
2201 		context = le32_to_cpu(instance->reply_queue[consumer]);
2202 		if (context >= instance->max_fw_cmds) {
2203 			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2204 				context);
2205 			BUG();
2206 		}
2207 
2208 		cmd = instance->cmd_list[context];
2209 
2210 		megasas_complete_cmd(instance, cmd, DID_OK);
2211 
2212 		consumer++;
2213 		if (consumer == (instance->max_fw_cmds + 1)) {
2214 			consumer = 0;
2215 		}
2216 	}
2217 
2218 	*instance->consumer = cpu_to_le32(producer);
2219 
2220 	spin_unlock_irqrestore(&instance->completion_lock, flags);
2221 
2222 	/*
2223 	 * Check if we can restore can_queue
2224 	 */
2225 	megasas_check_and_restore_queue_depth(instance);
2226 }
2227 
2228 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2229 
2230 /**
2231  * megasas_start_timer - Initializes sriov heartbeat timer object
2232  * @instance:		Adapter soft state
2233  *
2234  */
2235 void megasas_start_timer(struct megasas_instance *instance)
2236 {
2237 	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2238 
2239 	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2240 	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2241 	add_timer(timer);
2242 }
2243 
2244 static void
2245 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2246 
2247 static void
2248 process_fw_state_change_wq(struct work_struct *work);
2249 
2250 static void megasas_do_ocr(struct megasas_instance *instance)
2251 {
2252 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2253 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2254 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2255 		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2256 	}
2257 	instance->instancet->disable_intr(instance);
2258 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2259 	instance->issuepend_done = 0;
2260 
2261 	atomic_set(&instance->fw_outstanding, 0);
2262 	megasas_internal_reset_defer_cmds(instance);
2263 	process_fw_state_change_wq(&instance->work_init);
2264 }
2265 
2266 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2267 					    int initial)
2268 {
2269 	struct megasas_cmd *cmd;
2270 	struct megasas_dcmd_frame *dcmd;
2271 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2272 	dma_addr_t new_affiliation_111_h;
2273 	int ld, retval = 0;
2274 	u8 thisVf;
2275 
2276 	cmd = megasas_get_cmd(instance);
2277 
2278 	if (!cmd) {
2279 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2280 		       "Failed to get cmd for scsi%d\n",
2281 			instance->host->host_no);
2282 		return -ENOMEM;
2283 	}
2284 
2285 	dcmd = &cmd->frame->dcmd;
2286 
2287 	if (!instance->vf_affiliation_111) {
2288 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2289 		       "affiliation for scsi%d\n", instance->host->host_no);
2290 		megasas_return_cmd(instance, cmd);
2291 		return -ENOMEM;
2292 	}
2293 
2294 	if (initial)
2295 			memset(instance->vf_affiliation_111, 0,
2296 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2297 	else {
2298 		new_affiliation_111 =
2299 			dma_alloc_coherent(&instance->pdev->dev,
2300 					   sizeof(struct MR_LD_VF_AFFILIATION_111),
2301 					   &new_affiliation_111_h, GFP_KERNEL);
2302 		if (!new_affiliation_111) {
2303 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2304 			       "memory for new affiliation for scsi%d\n",
2305 			       instance->host->host_no);
2306 			megasas_return_cmd(instance, cmd);
2307 			return -ENOMEM;
2308 		}
2309 	}
2310 
2311 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2312 
2313 	dcmd->cmd = MFI_CMD_DCMD;
2314 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2315 	dcmd->sge_count = 1;
2316 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2317 	dcmd->timeout = 0;
2318 	dcmd->pad_0 = 0;
2319 	dcmd->data_xfer_len =
2320 		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2321 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2322 
2323 	if (initial)
2324 		dcmd->sgl.sge32[0].phys_addr =
2325 			cpu_to_le32(instance->vf_affiliation_111_h);
2326 	else
2327 		dcmd->sgl.sge32[0].phys_addr =
2328 			cpu_to_le32(new_affiliation_111_h);
2329 
2330 	dcmd->sgl.sge32[0].length = cpu_to_le32(
2331 		sizeof(struct MR_LD_VF_AFFILIATION_111));
2332 
2333 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2334 	       "scsi%d\n", instance->host->host_no);
2335 
2336 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2337 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2338 		       " failed with status 0x%x for scsi%d\n",
2339 		       dcmd->cmd_status, instance->host->host_no);
2340 		retval = 1; /* Do a scan if we couldn't get affiliation */
2341 		goto out;
2342 	}
2343 
2344 	if (!initial) {
2345 		thisVf = new_affiliation_111->thisVf;
2346 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2347 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2348 			    new_affiliation_111->map[ld].policy[thisVf]) {
2349 				dev_warn(&instance->pdev->dev, "SR-IOV: "
2350 				       "Got new LD/VF affiliation for scsi%d\n",
2351 				       instance->host->host_no);
2352 				memcpy(instance->vf_affiliation_111,
2353 				       new_affiliation_111,
2354 				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2355 				retval = 1;
2356 				goto out;
2357 			}
2358 	}
2359 out:
2360 	if (new_affiliation_111) {
2361 		dma_free_coherent(&instance->pdev->dev,
2362 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2363 				    new_affiliation_111,
2364 				    new_affiliation_111_h);
2365 	}
2366 
2367 	megasas_return_cmd(instance, cmd);
2368 
2369 	return retval;
2370 }
2371 
2372 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2373 					    int initial)
2374 {
2375 	struct megasas_cmd *cmd;
2376 	struct megasas_dcmd_frame *dcmd;
2377 	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2378 	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2379 	dma_addr_t new_affiliation_h;
2380 	int i, j, retval = 0, found = 0, doscan = 0;
2381 	u8 thisVf;
2382 
2383 	cmd = megasas_get_cmd(instance);
2384 
2385 	if (!cmd) {
2386 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2387 		       "Failed to get cmd for scsi%d\n",
2388 		       instance->host->host_no);
2389 		return -ENOMEM;
2390 	}
2391 
2392 	dcmd = &cmd->frame->dcmd;
2393 
2394 	if (!instance->vf_affiliation) {
2395 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2396 		       "affiliation for scsi%d\n", instance->host->host_no);
2397 		megasas_return_cmd(instance, cmd);
2398 		return -ENOMEM;
2399 	}
2400 
2401 	if (initial)
2402 		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2403 		       sizeof(struct MR_LD_VF_AFFILIATION));
2404 	else {
2405 		new_affiliation =
2406 			dma_alloc_coherent(&instance->pdev->dev,
2407 					   (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2408 					   &new_affiliation_h, GFP_KERNEL);
2409 		if (!new_affiliation) {
2410 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2411 			       "memory for new affiliation for scsi%d\n",
2412 			       instance->host->host_no);
2413 			megasas_return_cmd(instance, cmd);
2414 			return -ENOMEM;
2415 		}
2416 	}
2417 
2418 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2419 
2420 	dcmd->cmd = MFI_CMD_DCMD;
2421 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2422 	dcmd->sge_count = 1;
2423 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2424 	dcmd->timeout = 0;
2425 	dcmd->pad_0 = 0;
2426 	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2427 		sizeof(struct MR_LD_VF_AFFILIATION));
2428 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2429 
2430 	if (initial)
2431 		dcmd->sgl.sge32[0].phys_addr =
2432 			cpu_to_le32(instance->vf_affiliation_h);
2433 	else
2434 		dcmd->sgl.sge32[0].phys_addr =
2435 			cpu_to_le32(new_affiliation_h);
2436 
2437 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2438 		sizeof(struct MR_LD_VF_AFFILIATION));
2439 
2440 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2441 	       "scsi%d\n", instance->host->host_no);
2442 
2443 
2444 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2445 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2446 		       " failed with status 0x%x for scsi%d\n",
2447 		       dcmd->cmd_status, instance->host->host_no);
2448 		retval = 1; /* Do a scan if we couldn't get affiliation */
2449 		goto out;
2450 	}
2451 
2452 	if (!initial) {
2453 		if (!new_affiliation->ldCount) {
2454 			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2455 			       "affiliation for passive path for scsi%d\n",
2456 			       instance->host->host_no);
2457 			retval = 1;
2458 			goto out;
2459 		}
2460 		newmap = new_affiliation->map;
2461 		savedmap = instance->vf_affiliation->map;
2462 		thisVf = new_affiliation->thisVf;
2463 		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2464 			found = 0;
2465 			for (j = 0; j < instance->vf_affiliation->ldCount;
2466 			     j++) {
2467 				if (newmap->ref.targetId ==
2468 				    savedmap->ref.targetId) {
2469 					found = 1;
2470 					if (newmap->policy[thisVf] !=
2471 					    savedmap->policy[thisVf]) {
2472 						doscan = 1;
2473 						goto out;
2474 					}
2475 				}
2476 				savedmap = (struct MR_LD_VF_MAP *)
2477 					((unsigned char *)savedmap +
2478 					 savedmap->size);
2479 			}
2480 			if (!found && newmap->policy[thisVf] !=
2481 			    MR_LD_ACCESS_HIDDEN) {
2482 				doscan = 1;
2483 				goto out;
2484 			}
2485 			newmap = (struct MR_LD_VF_MAP *)
2486 				((unsigned char *)newmap + newmap->size);
2487 		}
2488 
2489 		newmap = new_affiliation->map;
2490 		savedmap = instance->vf_affiliation->map;
2491 
2492 		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2493 			found = 0;
2494 			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2495 				if (savedmap->ref.targetId ==
2496 				    newmap->ref.targetId) {
2497 					found = 1;
2498 					if (savedmap->policy[thisVf] !=
2499 					    newmap->policy[thisVf]) {
2500 						doscan = 1;
2501 						goto out;
2502 					}
2503 				}
2504 				newmap = (struct MR_LD_VF_MAP *)
2505 					((unsigned char *)newmap +
2506 					 newmap->size);
2507 			}
2508 			if (!found && savedmap->policy[thisVf] !=
2509 			    MR_LD_ACCESS_HIDDEN) {
2510 				doscan = 1;
2511 				goto out;
2512 			}
2513 			savedmap = (struct MR_LD_VF_MAP *)
2514 				((unsigned char *)savedmap +
2515 				 savedmap->size);
2516 		}
2517 	}
2518 out:
2519 	if (doscan) {
2520 		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2521 		       "affiliation for scsi%d\n", instance->host->host_no);
2522 		memcpy(instance->vf_affiliation, new_affiliation,
2523 		       new_affiliation->size);
2524 		retval = 1;
2525 	}
2526 
2527 	if (new_affiliation)
2528 		dma_free_coherent(&instance->pdev->dev,
2529 				    (MAX_LOGICAL_DRIVES + 1) *
2530 				    sizeof(struct MR_LD_VF_AFFILIATION),
2531 				    new_affiliation, new_affiliation_h);
2532 	megasas_return_cmd(instance, cmd);
2533 
2534 	return retval;
2535 }
2536 
2537 /* This function will get the current SR-IOV LD/VF affiliation */
2538 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2539 	int initial)
2540 {
2541 	int retval;
2542 
2543 	if (instance->PlasmaFW111)
2544 		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2545 	else
2546 		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2547 	return retval;
2548 }
2549 
2550 /* This function will tell FW to start the SR-IOV heartbeat */
2551 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2552 					 int initial)
2553 {
2554 	struct megasas_cmd *cmd;
2555 	struct megasas_dcmd_frame *dcmd;
2556 	int retval = 0;
2557 
2558 	cmd = megasas_get_cmd(instance);
2559 
2560 	if (!cmd) {
2561 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2562 		       "Failed to get cmd for scsi%d\n",
2563 		       instance->host->host_no);
2564 		return -ENOMEM;
2565 	}
2566 
2567 	dcmd = &cmd->frame->dcmd;
2568 
2569 	if (initial) {
2570 		instance->hb_host_mem =
2571 			dma_alloc_coherent(&instance->pdev->dev,
2572 					   sizeof(struct MR_CTRL_HB_HOST_MEM),
2573 					   &instance->hb_host_mem_h,
2574 					   GFP_KERNEL);
2575 		if (!instance->hb_host_mem) {
2576 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2577 			       " memory for heartbeat host memory for scsi%d\n",
2578 			       instance->host->host_no);
2579 			retval = -ENOMEM;
2580 			goto out;
2581 		}
2582 	}
2583 
2584 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2585 
2586 	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2587 	dcmd->cmd = MFI_CMD_DCMD;
2588 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2589 	dcmd->sge_count = 1;
2590 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2591 	dcmd->timeout = 0;
2592 	dcmd->pad_0 = 0;
2593 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2594 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2595 
2596 	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2597 				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2598 
2599 	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2600 	       instance->host->host_no);
2601 
2602 	if ((instance->adapter_type != MFI_SERIES) &&
2603 	    !instance->mask_interrupts)
2604 		retval = megasas_issue_blocked_cmd(instance, cmd,
2605 			MEGASAS_ROUTINE_WAIT_TIME_VF);
2606 	else
2607 		retval = megasas_issue_polled(instance, cmd);
2608 
2609 	if (retval) {
2610 		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2611 			"_MEM_ALLOC DCMD %s for scsi%d\n",
2612 			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2613 			"timed out" : "failed", instance->host->host_no);
2614 		retval = 1;
2615 	}
2616 
2617 out:
2618 	megasas_return_cmd(instance, cmd);
2619 
2620 	return retval;
2621 }
2622 
2623 /* Handler for SR-IOV heartbeat */
2624 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2625 {
2626 	struct megasas_instance *instance =
2627 		from_timer(instance, t, sriov_heartbeat_timer);
2628 
2629 	if (instance->hb_host_mem->HB.fwCounter !=
2630 	    instance->hb_host_mem->HB.driverCounter) {
2631 		instance->hb_host_mem->HB.driverCounter =
2632 			instance->hb_host_mem->HB.fwCounter;
2633 		mod_timer(&instance->sriov_heartbeat_timer,
2634 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2635 	} else {
2636 		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2637 		       "completed for scsi%d\n", instance->host->host_no);
2638 		schedule_work(&instance->work_init);
2639 	}
2640 }
2641 
2642 /**
2643  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2644  * @instance:				Adapter soft state
2645  *
2646  * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2647  * complete all its outstanding commands. Returns error if one or more IOs
2648  * are pending after this time period. It also marks the controller dead.
2649  */
2650 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2651 {
2652 	int i, sl, outstanding;
2653 	u32 reset_index;
2654 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2655 	unsigned long flags;
2656 	struct list_head clist_local;
2657 	struct megasas_cmd *reset_cmd;
2658 	u32 fw_state;
2659 
2660 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2661 		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2662 		__func__, __LINE__);
2663 		return FAILED;
2664 	}
2665 
2666 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2667 
2668 		INIT_LIST_HEAD(&clist_local);
2669 		spin_lock_irqsave(&instance->hba_lock, flags);
2670 		list_splice_init(&instance->internal_reset_pending_q,
2671 				&clist_local);
2672 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2673 
2674 		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2675 		for (i = 0; i < wait_time; i++) {
2676 			msleep(1000);
2677 			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2678 				break;
2679 		}
2680 
2681 		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2682 			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2683 			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2684 			return FAILED;
2685 		}
2686 
2687 		reset_index = 0;
2688 		while (!list_empty(&clist_local)) {
2689 			reset_cmd = list_entry((&clist_local)->next,
2690 						struct megasas_cmd, list);
2691 			list_del_init(&reset_cmd->list);
2692 			if (reset_cmd->scmd) {
2693 				reset_cmd->scmd->result = DID_REQUEUE << 16;
2694 				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2695 					reset_index, reset_cmd,
2696 					reset_cmd->scmd->cmnd[0]);
2697 
2698 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2699 				megasas_return_cmd(instance, reset_cmd);
2700 			} else if (reset_cmd->sync_cmd) {
2701 				dev_notice(&instance->pdev->dev, "%p synch cmds"
2702 						"reset queue\n",
2703 						reset_cmd);
2704 
2705 				reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2706 				instance->instancet->fire_cmd(instance,
2707 						reset_cmd->frame_phys_addr,
2708 						0, instance->reg_set);
2709 			} else {
2710 				dev_notice(&instance->pdev->dev, "%p unexpected"
2711 					"cmds lst\n",
2712 					reset_cmd);
2713 			}
2714 			reset_index++;
2715 		}
2716 
2717 		return SUCCESS;
2718 	}
2719 
2720 	for (i = 0; i < resetwaittime; i++) {
2721 		outstanding = atomic_read(&instance->fw_outstanding);
2722 
2723 		if (!outstanding)
2724 			break;
2725 
2726 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2727 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2728 			       "commands to complete\n",i,outstanding);
2729 			/*
2730 			 * Call cmd completion routine. Cmd to be
2731 			 * be completed directly without depending on isr.
2732 			 */
2733 			megasas_complete_cmd_dpc((unsigned long)instance);
2734 		}
2735 
2736 		msleep(1000);
2737 	}
2738 
2739 	i = 0;
2740 	outstanding = atomic_read(&instance->fw_outstanding);
2741 	fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2742 
2743 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2744 		goto no_outstanding;
2745 
2746 	if (instance->disableOnlineCtrlReset)
2747 		goto kill_hba_and_failed;
2748 	do {
2749 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2750 			dev_info(&instance->pdev->dev,
2751 				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2752 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2753 			if (i == 3)
2754 				goto kill_hba_and_failed;
2755 			megasas_do_ocr(instance);
2756 
2757 			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2758 				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2759 				__func__, __LINE__);
2760 				return FAILED;
2761 			}
2762 			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2763 				__func__, __LINE__);
2764 
2765 			for (sl = 0; sl < 10; sl++)
2766 				msleep(500);
2767 
2768 			outstanding = atomic_read(&instance->fw_outstanding);
2769 
2770 			fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2771 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2772 				goto no_outstanding;
2773 		}
2774 		i++;
2775 	} while (i <= 3);
2776 
2777 no_outstanding:
2778 
2779 	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2780 		__func__, __LINE__);
2781 	return SUCCESS;
2782 
2783 kill_hba_and_failed:
2784 
2785 	/* Reset not supported, kill adapter */
2786 	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2787 		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2788 		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2789 		atomic_read(&instance->fw_outstanding));
2790 	megasas_dump_pending_frames(instance);
2791 	megaraid_sas_kill_hba(instance);
2792 
2793 	return FAILED;
2794 }
2795 
2796 /**
2797  * megasas_generic_reset -	Generic reset routine
2798  * @scmd:			Mid-layer SCSI command
2799  *
2800  * This routine implements a generic reset handler for device, bus and host
2801  * reset requests. Device, bus and host specific reset handlers can use this
2802  * function after they do their specific tasks.
2803  */
2804 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2805 {
2806 	int ret_val;
2807 	struct megasas_instance *instance;
2808 
2809 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2810 
2811 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2812 		 scmd->cmnd[0], scmd->retries);
2813 
2814 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2815 		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2816 		return FAILED;
2817 	}
2818 
2819 	ret_val = megasas_wait_for_outstanding(instance);
2820 	if (ret_val == SUCCESS)
2821 		dev_notice(&instance->pdev->dev, "reset successful\n");
2822 	else
2823 		dev_err(&instance->pdev->dev, "failed to do reset\n");
2824 
2825 	return ret_val;
2826 }
2827 
2828 /**
2829  * megasas_reset_timer - quiesce the adapter if required
2830  * @scmd:		scsi cmnd
2831  *
2832  * Sets the FW busy flag and reduces the host->can_queue if the
2833  * cmd has not been completed within the timeout period.
2834  */
2835 static enum
2836 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2837 {
2838 	struct megasas_instance *instance;
2839 	unsigned long flags;
2840 
2841 	if (time_after(jiffies, scmd->jiffies_at_alloc +
2842 				(scmd_timeout * 2) * HZ)) {
2843 		return BLK_EH_DONE;
2844 	}
2845 
2846 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2847 	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2848 		/* FW is busy, throttle IO */
2849 		spin_lock_irqsave(instance->host->host_lock, flags);
2850 
2851 		instance->host->can_queue = instance->throttlequeuedepth;
2852 		instance->last_time = jiffies;
2853 		instance->flag |= MEGASAS_FW_BUSY;
2854 
2855 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2856 	}
2857 	return BLK_EH_RESET_TIMER;
2858 }
2859 
2860 /**
2861  * megasas_dump -	This function will print hexdump of provided buffer.
2862  * @buf:		Buffer to be dumped
2863  * @sz:		Size in bytes
2864  * @format:		Different formats of dumping e.g. format=n will
2865  *			cause only 'n' 32 bit words to be dumped in a single
2866  *			line.
2867  */
2868 inline void
2869 megasas_dump(void *buf, int sz, int format)
2870 {
2871 	int i;
2872 	__le32 *buf_loc = (__le32 *)buf;
2873 
2874 	for (i = 0; i < (sz / sizeof(__le32)); i++) {
2875 		if ((i % format) == 0) {
2876 			if (i != 0)
2877 				printk(KERN_CONT "\n");
2878 			printk(KERN_CONT "%08x: ", (i * 4));
2879 		}
2880 		printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
2881 	}
2882 	printk(KERN_CONT "\n");
2883 }
2884 
2885 /**
2886  * megasas_dump_reg_set -	This function will print hexdump of register set
2887  * @buf:			Buffer to be dumped
2888  * @sz:				Size in bytes
2889  * @format:			Different formats of dumping e.g. format=n will
2890  *				cause only 'n' 32 bit words to be dumped in a
2891  *				single line.
2892  */
2893 inline void
2894 megasas_dump_reg_set(void __iomem *reg_set)
2895 {
2896 	unsigned int i, sz = 256;
2897 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2898 
2899 	for (i = 0; i < (sz / sizeof(u32)); i++)
2900 		printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
2901 }
2902 
2903 /**
2904  * megasas_dump_fusion_io -	This function will print key details
2905  *				of SCSI IO
2906  * @scmd:			SCSI command pointer of SCSI IO
2907  */
2908 void
2909 megasas_dump_fusion_io(struct scsi_cmnd *scmd)
2910 {
2911 	struct megasas_cmd_fusion *cmd;
2912 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2913 	struct megasas_instance *instance;
2914 
2915 	cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2916 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2917 
2918 	scmd_printk(KERN_INFO, scmd,
2919 		    "scmd: (0x%p)  retries: 0x%x  allowed: 0x%x\n",
2920 		    scmd, scmd->retries, scmd->allowed);
2921 	scsi_print_command(scmd);
2922 
2923 	if (cmd) {
2924 		req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
2925 		scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
2926 		scmd_printk(KERN_INFO, scmd,
2927 			    "RequestFlags:0x%x  MSIxIndex:0x%x  SMID:0x%x  LMID:0x%x  DevHandle:0x%x\n",
2928 			    req_desc->SCSIIO.RequestFlags,
2929 			    req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
2930 			    req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
2931 
2932 		printk(KERN_INFO "IO request frame:\n");
2933 		megasas_dump(cmd->io_request,
2934 			     MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
2935 		printk(KERN_INFO "Chain frame:\n");
2936 		megasas_dump(cmd->sg_frame,
2937 			     instance->max_chain_frame_sz, 8);
2938 	}
2939 
2940 }
2941 
2942 /*
2943  * megasas_dump_sys_regs - This function will dump system registers through
2944  *			    sysfs.
2945  * @reg_set:		    Pointer to System register set.
2946  * @buf:		    Buffer to which output is to be written.
2947  * @return:		    Number of bytes written to buffer.
2948  */
2949 static inline ssize_t
2950 megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
2951 {
2952 	unsigned int i, sz = 256;
2953 	int bytes_wrote = 0;
2954 	char *loc = (char *)buf;
2955 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2956 
2957 	for (i = 0; i < sz / sizeof(u32); i++) {
2958 		bytes_wrote += snprintf(loc + bytes_wrote, PAGE_SIZE,
2959 					"%08x: %08x\n", (i * 4),
2960 					readl(&reg[i]));
2961 	}
2962 	return bytes_wrote;
2963 }
2964 
2965 /**
2966  * megasas_reset_bus_host -	Bus & host reset handler entry point
2967  */
2968 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2969 {
2970 	int ret;
2971 	struct megasas_instance *instance;
2972 
2973 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2974 
2975 	scmd_printk(KERN_INFO, scmd,
2976 		"OCR is requested due to IO timeout!!\n");
2977 
2978 	scmd_printk(KERN_INFO, scmd,
2979 		"SCSI host state: %d  SCSI host busy: %d  FW outstanding: %d\n",
2980 		scmd->device->host->shost_state,
2981 		scsi_host_busy(scmd->device->host),
2982 		atomic_read(&instance->fw_outstanding));
2983 	/*
2984 	 * First wait for all commands to complete
2985 	 */
2986 	if (instance->adapter_type == MFI_SERIES) {
2987 		ret = megasas_generic_reset(scmd);
2988 	} else {
2989 		megasas_dump_fusion_io(scmd);
2990 		ret = megasas_reset_fusion(scmd->device->host,
2991 				SCSIIO_TIMEOUT_OCR);
2992 	}
2993 
2994 	return ret;
2995 }
2996 
2997 /**
2998  * megasas_task_abort - Issues task abort request to firmware
2999  *			(supported only for fusion adapters)
3000  * @scmd:		SCSI command pointer
3001  */
3002 static int megasas_task_abort(struct scsi_cmnd *scmd)
3003 {
3004 	int ret;
3005 	struct megasas_instance *instance;
3006 
3007 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3008 
3009 	if (instance->adapter_type != MFI_SERIES)
3010 		ret = megasas_task_abort_fusion(scmd);
3011 	else {
3012 		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
3013 		ret = FAILED;
3014 	}
3015 
3016 	return ret;
3017 }
3018 
3019 /**
3020  * megasas_reset_target:  Issues target reset request to firmware
3021  *                        (supported only for fusion adapters)
3022  * @scmd:                 SCSI command pointer
3023  */
3024 static int megasas_reset_target(struct scsi_cmnd *scmd)
3025 {
3026 	int ret;
3027 	struct megasas_instance *instance;
3028 
3029 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3030 
3031 	if (instance->adapter_type != MFI_SERIES)
3032 		ret = megasas_reset_target_fusion(scmd);
3033 	else {
3034 		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
3035 		ret = FAILED;
3036 	}
3037 
3038 	return ret;
3039 }
3040 
3041 /**
3042  * megasas_bios_param - Returns disk geometry for a disk
3043  * @sdev:		device handle
3044  * @bdev:		block device
3045  * @capacity:		drive capacity
3046  * @geom:		geometry parameters
3047  */
3048 static int
3049 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
3050 		 sector_t capacity, int geom[])
3051 {
3052 	int heads;
3053 	int sectors;
3054 	sector_t cylinders;
3055 	unsigned long tmp;
3056 
3057 	/* Default heads (64) & sectors (32) */
3058 	heads = 64;
3059 	sectors = 32;
3060 
3061 	tmp = heads * sectors;
3062 	cylinders = capacity;
3063 
3064 	sector_div(cylinders, tmp);
3065 
3066 	/*
3067 	 * Handle extended translation size for logical drives > 1Gb
3068 	 */
3069 
3070 	if (capacity >= 0x200000) {
3071 		heads = 255;
3072 		sectors = 63;
3073 		tmp = heads*sectors;
3074 		cylinders = capacity;
3075 		sector_div(cylinders, tmp);
3076 	}
3077 
3078 	geom[0] = heads;
3079 	geom[1] = sectors;
3080 	geom[2] = cylinders;
3081 
3082 	return 0;
3083 }
3084 
3085 static void megasas_aen_polling(struct work_struct *work);
3086 
3087 /**
3088  * megasas_service_aen -	Processes an event notification
3089  * @instance:			Adapter soft state
3090  * @cmd:			AEN command completed by the ISR
3091  *
3092  * For AEN, driver sends a command down to FW that is held by the FW till an
3093  * event occurs. When an event of interest occurs, FW completes the command
3094  * that it was previously holding.
3095  *
3096  * This routines sends SIGIO signal to processes that have registered with the
3097  * driver for AEN.
3098  */
3099 static void
3100 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
3101 {
3102 	unsigned long flags;
3103 
3104 	/*
3105 	 * Don't signal app if it is just an aborted previously registered aen
3106 	 */
3107 	if ((!cmd->abort_aen) && (instance->unload == 0)) {
3108 		spin_lock_irqsave(&poll_aen_lock, flags);
3109 		megasas_poll_wait_aen = 1;
3110 		spin_unlock_irqrestore(&poll_aen_lock, flags);
3111 		wake_up(&megasas_poll_wait);
3112 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
3113 	}
3114 	else
3115 		cmd->abort_aen = 0;
3116 
3117 	instance->aen_cmd = NULL;
3118 
3119 	megasas_return_cmd(instance, cmd);
3120 
3121 	if ((instance->unload == 0) &&
3122 		((instance->issuepend_done == 1))) {
3123 		struct megasas_aen_event *ev;
3124 
3125 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3126 		if (!ev) {
3127 			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3128 		} else {
3129 			ev->instance = instance;
3130 			instance->ev = ev;
3131 			INIT_DELAYED_WORK(&ev->hotplug_work,
3132 					  megasas_aen_polling);
3133 			schedule_delayed_work(&ev->hotplug_work, 0);
3134 		}
3135 	}
3136 }
3137 
3138 static ssize_t
3139 fw_crash_buffer_store(struct device *cdev,
3140 	struct device_attribute *attr, const char *buf, size_t count)
3141 {
3142 	struct Scsi_Host *shost = class_to_shost(cdev);
3143 	struct megasas_instance *instance =
3144 		(struct megasas_instance *) shost->hostdata;
3145 	int val = 0;
3146 	unsigned long flags;
3147 
3148 	if (kstrtoint(buf, 0, &val) != 0)
3149 		return -EINVAL;
3150 
3151 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3152 	instance->fw_crash_buffer_offset = val;
3153 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3154 	return strlen(buf);
3155 }
3156 
3157 static ssize_t
3158 fw_crash_buffer_show(struct device *cdev,
3159 	struct device_attribute *attr, char *buf)
3160 {
3161 	struct Scsi_Host *shost = class_to_shost(cdev);
3162 	struct megasas_instance *instance =
3163 		(struct megasas_instance *) shost->hostdata;
3164 	u32 size;
3165 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3166 	unsigned long src_addr;
3167 	unsigned long flags;
3168 	u32 buff_offset;
3169 
3170 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3171 	buff_offset = instance->fw_crash_buffer_offset;
3172 	if (!instance->crash_dump_buf &&
3173 		!((instance->fw_crash_state == AVAILABLE) ||
3174 		(instance->fw_crash_state == COPYING))) {
3175 		dev_err(&instance->pdev->dev,
3176 			"Firmware crash dump is not available\n");
3177 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3178 		return -EINVAL;
3179 	}
3180 
3181 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3182 		dev_err(&instance->pdev->dev,
3183 			"Firmware crash dump offset is out of range\n");
3184 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3185 		return 0;
3186 	}
3187 
3188 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3189 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3190 
3191 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3192 		(buff_offset % dmachunk);
3193 	memcpy(buf, (void *)src_addr, size);
3194 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3195 
3196 	return size;
3197 }
3198 
3199 static ssize_t
3200 fw_crash_buffer_size_show(struct device *cdev,
3201 	struct device_attribute *attr, char *buf)
3202 {
3203 	struct Scsi_Host *shost = class_to_shost(cdev);
3204 	struct megasas_instance *instance =
3205 		(struct megasas_instance *) shost->hostdata;
3206 
3207 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3208 		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3209 }
3210 
3211 static ssize_t
3212 fw_crash_state_store(struct device *cdev,
3213 	struct device_attribute *attr, const char *buf, size_t count)
3214 {
3215 	struct Scsi_Host *shost = class_to_shost(cdev);
3216 	struct megasas_instance *instance =
3217 		(struct megasas_instance *) shost->hostdata;
3218 	int val = 0;
3219 	unsigned long flags;
3220 
3221 	if (kstrtoint(buf, 0, &val) != 0)
3222 		return -EINVAL;
3223 
3224 	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3225 		dev_err(&instance->pdev->dev, "application updates invalid "
3226 			"firmware crash state\n");
3227 		return -EINVAL;
3228 	}
3229 
3230 	instance->fw_crash_state = val;
3231 
3232 	if ((val == COPIED) || (val == COPY_ERROR)) {
3233 		spin_lock_irqsave(&instance->crashdump_lock, flags);
3234 		megasas_free_host_crash_buffer(instance);
3235 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3236 		if (val == COPY_ERROR)
3237 			dev_info(&instance->pdev->dev, "application failed to "
3238 				"copy Firmware crash dump\n");
3239 		else
3240 			dev_info(&instance->pdev->dev, "Firmware crash dump "
3241 				"copied successfully\n");
3242 	}
3243 	return strlen(buf);
3244 }
3245 
3246 static ssize_t
3247 fw_crash_state_show(struct device *cdev,
3248 	struct device_attribute *attr, char *buf)
3249 {
3250 	struct Scsi_Host *shost = class_to_shost(cdev);
3251 	struct megasas_instance *instance =
3252 		(struct megasas_instance *) shost->hostdata;
3253 
3254 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3255 }
3256 
3257 static ssize_t
3258 page_size_show(struct device *cdev,
3259 	struct device_attribute *attr, char *buf)
3260 {
3261 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3262 }
3263 
3264 static ssize_t
3265 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3266 	char *buf)
3267 {
3268 	struct Scsi_Host *shost = class_to_shost(cdev);
3269 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3270 
3271 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3272 }
3273 
3274 static ssize_t
3275 fw_cmds_outstanding_show(struct device *cdev,
3276 				 struct device_attribute *attr, char *buf)
3277 {
3278 	struct Scsi_Host *shost = class_to_shost(cdev);
3279 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3280 
3281 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3282 }
3283 
3284 static ssize_t
3285 dump_system_regs_show(struct device *cdev,
3286 			       struct device_attribute *attr, char *buf)
3287 {
3288 	struct Scsi_Host *shost = class_to_shost(cdev);
3289 	struct megasas_instance *instance =
3290 			(struct megasas_instance *)shost->hostdata;
3291 
3292 	return megasas_dump_sys_regs(instance->reg_set, buf);
3293 }
3294 
3295 static ssize_t
3296 raid_map_id_show(struct device *cdev, struct device_attribute *attr,
3297 			  char *buf)
3298 {
3299 	struct Scsi_Host *shost = class_to_shost(cdev);
3300 	struct megasas_instance *instance =
3301 			(struct megasas_instance *)shost->hostdata;
3302 
3303 	return snprintf(buf, PAGE_SIZE, "%ld\n",
3304 			(unsigned long)instance->map_id);
3305 }
3306 
3307 static DEVICE_ATTR_RW(fw_crash_buffer);
3308 static DEVICE_ATTR_RO(fw_crash_buffer_size);
3309 static DEVICE_ATTR_RW(fw_crash_state);
3310 static DEVICE_ATTR_RO(page_size);
3311 static DEVICE_ATTR_RO(ldio_outstanding);
3312 static DEVICE_ATTR_RO(fw_cmds_outstanding);
3313 static DEVICE_ATTR_RO(dump_system_regs);
3314 static DEVICE_ATTR_RO(raid_map_id);
3315 
3316 static struct device_attribute *megaraid_host_attrs[] = {
3317 	&dev_attr_fw_crash_buffer_size,
3318 	&dev_attr_fw_crash_buffer,
3319 	&dev_attr_fw_crash_state,
3320 	&dev_attr_page_size,
3321 	&dev_attr_ldio_outstanding,
3322 	&dev_attr_fw_cmds_outstanding,
3323 	&dev_attr_dump_system_regs,
3324 	&dev_attr_raid_map_id,
3325 	NULL,
3326 };
3327 
3328 /*
3329  * Scsi host template for megaraid_sas driver
3330  */
3331 static struct scsi_host_template megasas_template = {
3332 
3333 	.module = THIS_MODULE,
3334 	.name = "Avago SAS based MegaRAID driver",
3335 	.proc_name = "megaraid_sas",
3336 	.slave_configure = megasas_slave_configure,
3337 	.slave_alloc = megasas_slave_alloc,
3338 	.slave_destroy = megasas_slave_destroy,
3339 	.queuecommand = megasas_queue_command,
3340 	.eh_target_reset_handler = megasas_reset_target,
3341 	.eh_abort_handler = megasas_task_abort,
3342 	.eh_host_reset_handler = megasas_reset_bus_host,
3343 	.eh_timed_out = megasas_reset_timer,
3344 	.shost_attrs = megaraid_host_attrs,
3345 	.bios_param = megasas_bios_param,
3346 	.change_queue_depth = scsi_change_queue_depth,
3347 	.max_segment_size = 0xffffffff,
3348 	.no_write_same = 1,
3349 };
3350 
3351 /**
3352  * megasas_complete_int_cmd -	Completes an internal command
3353  * @instance:			Adapter soft state
3354  * @cmd:			Command to be completed
3355  *
3356  * The megasas_issue_blocked_cmd() function waits for a command to complete
3357  * after it issues a command. This function wakes up that waiting routine by
3358  * calling wake_up() on the wait queue.
3359  */
3360 static void
3361 megasas_complete_int_cmd(struct megasas_instance *instance,
3362 			 struct megasas_cmd *cmd)
3363 {
3364 	cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3365 	wake_up(&instance->int_cmd_wait_q);
3366 }
3367 
3368 /**
3369  * megasas_complete_abort -	Completes aborting a command
3370  * @instance:			Adapter soft state
3371  * @cmd:			Cmd that was issued to abort another cmd
3372  *
3373  * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3374  * after it issues an abort on a previously issued command. This function
3375  * wakes up all functions waiting on the same wait queue.
3376  */
3377 static void
3378 megasas_complete_abort(struct megasas_instance *instance,
3379 		       struct megasas_cmd *cmd)
3380 {
3381 	if (cmd->sync_cmd) {
3382 		cmd->sync_cmd = 0;
3383 		cmd->cmd_status_drv = 0;
3384 		wake_up(&instance->abort_cmd_wait_q);
3385 	}
3386 }
3387 
3388 /**
3389  * megasas_complete_cmd -	Completes a command
3390  * @instance:			Adapter soft state
3391  * @cmd:			Command to be completed
3392  * @alt_status:			If non-zero, use this value as status to
3393  *				SCSI mid-layer instead of the value returned
3394  *				by the FW. This should be used if caller wants
3395  *				an alternate status (as in the case of aborted
3396  *				commands)
3397  */
3398 void
3399 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3400 		     u8 alt_status)
3401 {
3402 	int exception = 0;
3403 	struct megasas_header *hdr = &cmd->frame->hdr;
3404 	unsigned long flags;
3405 	struct fusion_context *fusion = instance->ctrl_context;
3406 	u32 opcode, status;
3407 
3408 	/* flag for the retry reset */
3409 	cmd->retry_for_fw_reset = 0;
3410 
3411 	if (cmd->scmd)
3412 		cmd->scmd->SCp.ptr = NULL;
3413 
3414 	switch (hdr->cmd) {
3415 	case MFI_CMD_INVALID:
3416 		/* Some older 1068 controller FW may keep a pended
3417 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3418 		   when booting the kdump kernel.  Ignore this command to
3419 		   prevent a kernel panic on shutdown of the kdump kernel. */
3420 		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3421 		       "completed\n");
3422 		dev_warn(&instance->pdev->dev, "If you have a controller "
3423 		       "other than PERC5, please upgrade your firmware\n");
3424 		break;
3425 	case MFI_CMD_PD_SCSI_IO:
3426 	case MFI_CMD_LD_SCSI_IO:
3427 
3428 		/*
3429 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3430 		 * issued either through an IO path or an IOCTL path. If it
3431 		 * was via IOCTL, we will send it to internal completion.
3432 		 */
3433 		if (cmd->sync_cmd) {
3434 			cmd->sync_cmd = 0;
3435 			megasas_complete_int_cmd(instance, cmd);
3436 			break;
3437 		}
3438 		/* fall through */
3439 
3440 	case MFI_CMD_LD_READ:
3441 	case MFI_CMD_LD_WRITE:
3442 
3443 		if (alt_status) {
3444 			cmd->scmd->result = alt_status << 16;
3445 			exception = 1;
3446 		}
3447 
3448 		if (exception) {
3449 
3450 			atomic_dec(&instance->fw_outstanding);
3451 
3452 			scsi_dma_unmap(cmd->scmd);
3453 			cmd->scmd->scsi_done(cmd->scmd);
3454 			megasas_return_cmd(instance, cmd);
3455 
3456 			break;
3457 		}
3458 
3459 		switch (hdr->cmd_status) {
3460 
3461 		case MFI_STAT_OK:
3462 			cmd->scmd->result = DID_OK << 16;
3463 			break;
3464 
3465 		case MFI_STAT_SCSI_IO_FAILED:
3466 		case MFI_STAT_LD_INIT_IN_PROGRESS:
3467 			cmd->scmd->result =
3468 			    (DID_ERROR << 16) | hdr->scsi_status;
3469 			break;
3470 
3471 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3472 
3473 			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3474 
3475 			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3476 				memset(cmd->scmd->sense_buffer, 0,
3477 				       SCSI_SENSE_BUFFERSIZE);
3478 				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3479 				       hdr->sense_len);
3480 
3481 				cmd->scmd->result |= DRIVER_SENSE << 24;
3482 			}
3483 
3484 			break;
3485 
3486 		case MFI_STAT_LD_OFFLINE:
3487 		case MFI_STAT_DEVICE_NOT_FOUND:
3488 			cmd->scmd->result = DID_BAD_TARGET << 16;
3489 			break;
3490 
3491 		default:
3492 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3493 			       hdr->cmd_status);
3494 			cmd->scmd->result = DID_ERROR << 16;
3495 			break;
3496 		}
3497 
3498 		atomic_dec(&instance->fw_outstanding);
3499 
3500 		scsi_dma_unmap(cmd->scmd);
3501 		cmd->scmd->scsi_done(cmd->scmd);
3502 		megasas_return_cmd(instance, cmd);
3503 
3504 		break;
3505 
3506 	case MFI_CMD_SMP:
3507 	case MFI_CMD_STP:
3508 	case MFI_CMD_NVME:
3509 	case MFI_CMD_TOOLBOX:
3510 		megasas_complete_int_cmd(instance, cmd);
3511 		break;
3512 
3513 	case MFI_CMD_DCMD:
3514 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3515 		/* Check for LD map update */
3516 		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3517 			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3518 			fusion->fast_path_io = 0;
3519 			spin_lock_irqsave(instance->host->host_lock, flags);
3520 			status = cmd->frame->hdr.cmd_status;
3521 			instance->map_update_cmd = NULL;
3522 			if (status != MFI_STAT_OK) {
3523 				if (status != MFI_STAT_NOT_FOUND)
3524 					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3525 					       cmd->frame->hdr.cmd_status);
3526 				else {
3527 					megasas_return_cmd(instance, cmd);
3528 					spin_unlock_irqrestore(
3529 						instance->host->host_lock,
3530 						flags);
3531 					break;
3532 				}
3533 			}
3534 
3535 			megasas_return_cmd(instance, cmd);
3536 
3537 			/*
3538 			 * Set fast path IO to ZERO.
3539 			 * Validate Map will set proper value.
3540 			 * Meanwhile all IOs will go as LD IO.
3541 			 */
3542 			if (status == MFI_STAT_OK &&
3543 			    (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3544 				instance->map_id++;
3545 				fusion->fast_path_io = 1;
3546 			} else {
3547 				fusion->fast_path_io = 0;
3548 			}
3549 
3550 			megasas_sync_map_info(instance);
3551 			spin_unlock_irqrestore(instance->host->host_lock,
3552 					       flags);
3553 			break;
3554 		}
3555 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3556 		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3557 			spin_lock_irqsave(&poll_aen_lock, flags);
3558 			megasas_poll_wait_aen = 0;
3559 			spin_unlock_irqrestore(&poll_aen_lock, flags);
3560 		}
3561 
3562 		/* FW has an updated PD sequence */
3563 		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3564 			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3565 
3566 			spin_lock_irqsave(instance->host->host_lock, flags);
3567 			status = cmd->frame->hdr.cmd_status;
3568 			instance->jbod_seq_cmd = NULL;
3569 			megasas_return_cmd(instance, cmd);
3570 
3571 			if (status == MFI_STAT_OK) {
3572 				instance->pd_seq_map_id++;
3573 				/* Re-register a pd sync seq num cmd */
3574 				if (megasas_sync_pd_seq_num(instance, true))
3575 					instance->use_seqnum_jbod_fp = false;
3576 			} else
3577 				instance->use_seqnum_jbod_fp = false;
3578 
3579 			spin_unlock_irqrestore(instance->host->host_lock, flags);
3580 			break;
3581 		}
3582 
3583 		/*
3584 		 * See if got an event notification
3585 		 */
3586 		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3587 			megasas_service_aen(instance, cmd);
3588 		else
3589 			megasas_complete_int_cmd(instance, cmd);
3590 
3591 		break;
3592 
3593 	case MFI_CMD_ABORT:
3594 		/*
3595 		 * Cmd issued to abort another cmd returned
3596 		 */
3597 		megasas_complete_abort(instance, cmd);
3598 		break;
3599 
3600 	default:
3601 		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3602 		       hdr->cmd);
3603 		megasas_complete_int_cmd(instance, cmd);
3604 		break;
3605 	}
3606 }
3607 
3608 /**
3609  * megasas_issue_pending_cmds_again -	issue all pending cmds
3610  *					in FW again because of the fw reset
3611  * @instance:				Adapter soft state
3612  */
3613 static inline void
3614 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3615 {
3616 	struct megasas_cmd *cmd;
3617 	struct list_head clist_local;
3618 	union megasas_evt_class_locale class_locale;
3619 	unsigned long flags;
3620 	u32 seq_num;
3621 
3622 	INIT_LIST_HEAD(&clist_local);
3623 	spin_lock_irqsave(&instance->hba_lock, flags);
3624 	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3625 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3626 
3627 	while (!list_empty(&clist_local)) {
3628 		cmd = list_entry((&clist_local)->next,
3629 					struct megasas_cmd, list);
3630 		list_del_init(&cmd->list);
3631 
3632 		if (cmd->sync_cmd || cmd->scmd) {
3633 			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3634 				"detected to be pending while HBA reset\n",
3635 					cmd, cmd->scmd, cmd->sync_cmd);
3636 
3637 			cmd->retry_for_fw_reset++;
3638 
3639 			if (cmd->retry_for_fw_reset == 3) {
3640 				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3641 					"was tried multiple times during reset."
3642 					"Shutting down the HBA\n",
3643 					cmd, cmd->scmd, cmd->sync_cmd);
3644 				instance->instancet->disable_intr(instance);
3645 				atomic_set(&instance->fw_reset_no_pci_access, 1);
3646 				megaraid_sas_kill_hba(instance);
3647 				return;
3648 			}
3649 		}
3650 
3651 		if (cmd->sync_cmd == 1) {
3652 			if (cmd->scmd) {
3653 				dev_notice(&instance->pdev->dev, "unexpected"
3654 					"cmd attached to internal command!\n");
3655 			}
3656 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3657 						"on the internal reset queue,"
3658 						"issue it again.\n", cmd);
3659 			cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3660 			instance->instancet->fire_cmd(instance,
3661 							cmd->frame_phys_addr,
3662 							0, instance->reg_set);
3663 		} else if (cmd->scmd) {
3664 			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3665 			"detected on the internal queue, issue again.\n",
3666 			cmd, cmd->scmd->cmnd[0]);
3667 
3668 			atomic_inc(&instance->fw_outstanding);
3669 			instance->instancet->fire_cmd(instance,
3670 					cmd->frame_phys_addr,
3671 					cmd->frame_count-1, instance->reg_set);
3672 		} else {
3673 			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3674 				"internal reset defer list while re-issue!!\n",
3675 				cmd);
3676 		}
3677 	}
3678 
3679 	if (instance->aen_cmd) {
3680 		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3681 		megasas_return_cmd(instance, instance->aen_cmd);
3682 
3683 		instance->aen_cmd = NULL;
3684 	}
3685 
3686 	/*
3687 	 * Initiate AEN (Asynchronous Event Notification)
3688 	 */
3689 	seq_num = instance->last_seq_num;
3690 	class_locale.members.reserved = 0;
3691 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3692 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3693 
3694 	megasas_register_aen(instance, seq_num, class_locale.word);
3695 }
3696 
3697 /**
3698  * Move the internal reset pending commands to a deferred queue.
3699  *
3700  * We move the commands pending at internal reset time to a
3701  * pending queue. This queue would be flushed after successful
3702  * completion of the internal reset sequence. if the internal reset
3703  * did not complete in time, the kernel reset handler would flush
3704  * these commands.
3705  **/
3706 static void
3707 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3708 {
3709 	struct megasas_cmd *cmd;
3710 	int i;
3711 	u16 max_cmd = instance->max_fw_cmds;
3712 	u32 defer_index;
3713 	unsigned long flags;
3714 
3715 	defer_index = 0;
3716 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3717 	for (i = 0; i < max_cmd; i++) {
3718 		cmd = instance->cmd_list[i];
3719 		if (cmd->sync_cmd == 1 || cmd->scmd) {
3720 			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3721 					"on the defer queue as internal\n",
3722 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3723 
3724 			if (!list_empty(&cmd->list)) {
3725 				dev_notice(&instance->pdev->dev, "ERROR while"
3726 					" moving this cmd:%p, %d %p, it was"
3727 					"discovered on some list?\n",
3728 					cmd, cmd->sync_cmd, cmd->scmd);
3729 
3730 				list_del_init(&cmd->list);
3731 			}
3732 			defer_index++;
3733 			list_add_tail(&cmd->list,
3734 				&instance->internal_reset_pending_q);
3735 		}
3736 	}
3737 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3738 }
3739 
3740 
3741 static void
3742 process_fw_state_change_wq(struct work_struct *work)
3743 {
3744 	struct megasas_instance *instance =
3745 		container_of(work, struct megasas_instance, work_init);
3746 	u32 wait;
3747 	unsigned long flags;
3748 
3749     if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3750 		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3751 				atomic_read(&instance->adprecovery));
3752 		return ;
3753 	}
3754 
3755 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3756 		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3757 					"state, restarting it...\n");
3758 
3759 		instance->instancet->disable_intr(instance);
3760 		atomic_set(&instance->fw_outstanding, 0);
3761 
3762 		atomic_set(&instance->fw_reset_no_pci_access, 1);
3763 		instance->instancet->adp_reset(instance, instance->reg_set);
3764 		atomic_set(&instance->fw_reset_no_pci_access, 0);
3765 
3766 		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3767 					"initiating next stage...\n");
3768 
3769 		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3770 					"state 2 starting...\n");
3771 
3772 		/* waiting for about 20 second before start the second init */
3773 		for (wait = 0; wait < 30; wait++) {
3774 			msleep(1000);
3775 		}
3776 
3777 		if (megasas_transition_to_ready(instance, 1)) {
3778 			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3779 
3780 			atomic_set(&instance->fw_reset_no_pci_access, 1);
3781 			megaraid_sas_kill_hba(instance);
3782 			return ;
3783 		}
3784 
3785 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3786 			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3787 			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3788 			) {
3789 			*instance->consumer = *instance->producer;
3790 		} else {
3791 			*instance->consumer = 0;
3792 			*instance->producer = 0;
3793 		}
3794 
3795 		megasas_issue_init_mfi(instance);
3796 
3797 		spin_lock_irqsave(&instance->hba_lock, flags);
3798 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3799 		spin_unlock_irqrestore(&instance->hba_lock, flags);
3800 		instance->instancet->enable_intr(instance);
3801 
3802 		megasas_issue_pending_cmds_again(instance);
3803 		instance->issuepend_done = 1;
3804 	}
3805 }
3806 
3807 /**
3808  * megasas_deplete_reply_queue -	Processes all completed commands
3809  * @instance:				Adapter soft state
3810  * @alt_status:				Alternate status to be returned to
3811  *					SCSI mid-layer instead of the status
3812  *					returned by the FW
3813  * Note: this must be called with hba lock held
3814  */
3815 static int
3816 megasas_deplete_reply_queue(struct megasas_instance *instance,
3817 					u8 alt_status)
3818 {
3819 	u32 mfiStatus;
3820 	u32 fw_state;
3821 
3822 	if ((mfiStatus = instance->instancet->check_reset(instance,
3823 					instance->reg_set)) == 1) {
3824 		return IRQ_HANDLED;
3825 	}
3826 
3827 	mfiStatus = instance->instancet->clear_intr(instance);
3828 	if (mfiStatus == 0) {
3829 		/* Hardware may not set outbound_intr_status in MSI-X mode */
3830 		if (!instance->msix_vectors)
3831 			return IRQ_NONE;
3832 	}
3833 
3834 	instance->mfiStatus = mfiStatus;
3835 
3836 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3837 		fw_state = instance->instancet->read_fw_status_reg(
3838 				instance) & MFI_STATE_MASK;
3839 
3840 		if (fw_state != MFI_STATE_FAULT) {
3841 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
3842 						fw_state);
3843 		}
3844 
3845 		if ((fw_state == MFI_STATE_FAULT) &&
3846 				(instance->disableOnlineCtrlReset == 0)) {
3847 			dev_notice(&instance->pdev->dev, "wait adp restart\n");
3848 
3849 			if ((instance->pdev->device ==
3850 					PCI_DEVICE_ID_LSI_SAS1064R) ||
3851 				(instance->pdev->device ==
3852 					PCI_DEVICE_ID_DELL_PERC5) ||
3853 				(instance->pdev->device ==
3854 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3855 
3856 				*instance->consumer =
3857 					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3858 			}
3859 
3860 
3861 			instance->instancet->disable_intr(instance);
3862 			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3863 			instance->issuepend_done = 0;
3864 
3865 			atomic_set(&instance->fw_outstanding, 0);
3866 			megasas_internal_reset_defer_cmds(instance);
3867 
3868 			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3869 					fw_state, atomic_read(&instance->adprecovery));
3870 
3871 			schedule_work(&instance->work_init);
3872 			return IRQ_HANDLED;
3873 
3874 		} else {
3875 			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3876 				fw_state, instance->disableOnlineCtrlReset);
3877 		}
3878 	}
3879 
3880 	tasklet_schedule(&instance->isr_tasklet);
3881 	return IRQ_HANDLED;
3882 }
3883 /**
3884  * megasas_isr - isr entry point
3885  */
3886 static irqreturn_t megasas_isr(int irq, void *devp)
3887 {
3888 	struct megasas_irq_context *irq_context = devp;
3889 	struct megasas_instance *instance = irq_context->instance;
3890 	unsigned long flags;
3891 	irqreturn_t rc;
3892 
3893 	if (atomic_read(&instance->fw_reset_no_pci_access))
3894 		return IRQ_HANDLED;
3895 
3896 	spin_lock_irqsave(&instance->hba_lock, flags);
3897 	rc = megasas_deplete_reply_queue(instance, DID_OK);
3898 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3899 
3900 	return rc;
3901 }
3902 
3903 /**
3904  * megasas_transition_to_ready -	Move the FW to READY state
3905  * @instance:				Adapter soft state
3906  *
3907  * During the initialization, FW passes can potentially be in any one of
3908  * several possible states. If the FW in operational, waiting-for-handshake
3909  * states, driver must take steps to bring it to ready state. Otherwise, it
3910  * has to wait for the ready state.
3911  */
3912 int
3913 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3914 {
3915 	int i;
3916 	u8 max_wait;
3917 	u32 fw_state;
3918 	u32 abs_state, curr_abs_state;
3919 
3920 	abs_state = instance->instancet->read_fw_status_reg(instance);
3921 	fw_state = abs_state & MFI_STATE_MASK;
3922 
3923 	if (fw_state != MFI_STATE_READY)
3924 		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3925 		       " state\n");
3926 
3927 	while (fw_state != MFI_STATE_READY) {
3928 
3929 		switch (fw_state) {
3930 
3931 		case MFI_STATE_FAULT:
3932 			dev_printk(KERN_ERR, &instance->pdev->dev,
3933 				   "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
3934 				   abs_state & MFI_STATE_FAULT_CODE,
3935 				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
3936 			if (ocr) {
3937 				max_wait = MEGASAS_RESET_WAIT_TIME;
3938 				break;
3939 			} else {
3940 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
3941 				megasas_dump_reg_set(instance->reg_set);
3942 				return -ENODEV;
3943 			}
3944 
3945 		case MFI_STATE_WAIT_HANDSHAKE:
3946 			/*
3947 			 * Set the CLR bit in inbound doorbell
3948 			 */
3949 			if ((instance->pdev->device ==
3950 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3951 				(instance->pdev->device ==
3952 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3953 				(instance->adapter_type != MFI_SERIES))
3954 				writel(
3955 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3956 				  &instance->reg_set->doorbell);
3957 			else
3958 				writel(
3959 				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3960 					&instance->reg_set->inbound_doorbell);
3961 
3962 			max_wait = MEGASAS_RESET_WAIT_TIME;
3963 			break;
3964 
3965 		case MFI_STATE_BOOT_MESSAGE_PENDING:
3966 			if ((instance->pdev->device ==
3967 			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3968 				(instance->pdev->device ==
3969 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3970 				(instance->adapter_type != MFI_SERIES))
3971 				writel(MFI_INIT_HOTPLUG,
3972 				       &instance->reg_set->doorbell);
3973 			else
3974 				writel(MFI_INIT_HOTPLUG,
3975 					&instance->reg_set->inbound_doorbell);
3976 
3977 			max_wait = MEGASAS_RESET_WAIT_TIME;
3978 			break;
3979 
3980 		case MFI_STATE_OPERATIONAL:
3981 			/*
3982 			 * Bring it to READY state; assuming max wait 10 secs
3983 			 */
3984 			instance->instancet->disable_intr(instance);
3985 			if ((instance->pdev->device ==
3986 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3987 				(instance->pdev->device ==
3988 				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
3989 				(instance->adapter_type != MFI_SERIES)) {
3990 				writel(MFI_RESET_FLAGS,
3991 					&instance->reg_set->doorbell);
3992 
3993 				if (instance->adapter_type != MFI_SERIES) {
3994 					for (i = 0; i < (10 * 1000); i += 20) {
3995 						if (megasas_readl(
3996 							    instance,
3997 							    &instance->
3998 							    reg_set->
3999 							    doorbell) & 1)
4000 							msleep(20);
4001 						else
4002 							break;
4003 					}
4004 				}
4005 			} else
4006 				writel(MFI_RESET_FLAGS,
4007 					&instance->reg_set->inbound_doorbell);
4008 
4009 			max_wait = MEGASAS_RESET_WAIT_TIME;
4010 			break;
4011 
4012 		case MFI_STATE_UNDEFINED:
4013 			/*
4014 			 * This state should not last for more than 2 seconds
4015 			 */
4016 			max_wait = MEGASAS_RESET_WAIT_TIME;
4017 			break;
4018 
4019 		case MFI_STATE_BB_INIT:
4020 			max_wait = MEGASAS_RESET_WAIT_TIME;
4021 			break;
4022 
4023 		case MFI_STATE_FW_INIT:
4024 			max_wait = MEGASAS_RESET_WAIT_TIME;
4025 			break;
4026 
4027 		case MFI_STATE_FW_INIT_2:
4028 			max_wait = MEGASAS_RESET_WAIT_TIME;
4029 			break;
4030 
4031 		case MFI_STATE_DEVICE_SCAN:
4032 			max_wait = MEGASAS_RESET_WAIT_TIME;
4033 			break;
4034 
4035 		case MFI_STATE_FLUSH_CACHE:
4036 			max_wait = MEGASAS_RESET_WAIT_TIME;
4037 			break;
4038 
4039 		default:
4040 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
4041 			       fw_state);
4042 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4043 			megasas_dump_reg_set(instance->reg_set);
4044 			return -ENODEV;
4045 		}
4046 
4047 		/*
4048 		 * The cur_state should not last for more than max_wait secs
4049 		 */
4050 		for (i = 0; i < max_wait * 50; i++) {
4051 			curr_abs_state = instance->instancet->
4052 				read_fw_status_reg(instance);
4053 
4054 			if (abs_state == curr_abs_state) {
4055 				msleep(20);
4056 			} else
4057 				break;
4058 		}
4059 
4060 		/*
4061 		 * Return error if fw_state hasn't changed after max_wait
4062 		 */
4063 		if (curr_abs_state == abs_state) {
4064 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
4065 			       "in %d secs\n", fw_state, max_wait);
4066 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4067 			megasas_dump_reg_set(instance->reg_set);
4068 			return -ENODEV;
4069 		}
4070 
4071 		abs_state = curr_abs_state;
4072 		fw_state = curr_abs_state & MFI_STATE_MASK;
4073 	}
4074 	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
4075 
4076 	return 0;
4077 }
4078 
4079 /**
4080  * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
4081  * @instance:				Adapter soft state
4082  */
4083 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
4084 {
4085 	int i;
4086 	u16 max_cmd = instance->max_mfi_cmds;
4087 	struct megasas_cmd *cmd;
4088 
4089 	if (!instance->frame_dma_pool)
4090 		return;
4091 
4092 	/*
4093 	 * Return all frames to pool
4094 	 */
4095 	for (i = 0; i < max_cmd; i++) {
4096 
4097 		cmd = instance->cmd_list[i];
4098 
4099 		if (cmd->frame)
4100 			dma_pool_free(instance->frame_dma_pool, cmd->frame,
4101 				      cmd->frame_phys_addr);
4102 
4103 		if (cmd->sense)
4104 			dma_pool_free(instance->sense_dma_pool, cmd->sense,
4105 				      cmd->sense_phys_addr);
4106 	}
4107 
4108 	/*
4109 	 * Now destroy the pool itself
4110 	 */
4111 	dma_pool_destroy(instance->frame_dma_pool);
4112 	dma_pool_destroy(instance->sense_dma_pool);
4113 
4114 	instance->frame_dma_pool = NULL;
4115 	instance->sense_dma_pool = NULL;
4116 }
4117 
4118 /**
4119  * megasas_create_frame_pool -	Creates DMA pool for cmd frames
4120  * @instance:			Adapter soft state
4121  *
4122  * Each command packet has an embedded DMA memory buffer that is used for
4123  * filling MFI frame and the SG list that immediately follows the frame. This
4124  * function creates those DMA memory buffers for each command packet by using
4125  * PCI pool facility.
4126  */
4127 static int megasas_create_frame_pool(struct megasas_instance *instance)
4128 {
4129 	int i;
4130 	u16 max_cmd;
4131 	u32 frame_count;
4132 	struct megasas_cmd *cmd;
4133 
4134 	max_cmd = instance->max_mfi_cmds;
4135 
4136 	/*
4137 	 * For MFI controllers.
4138 	 * max_num_sge = 60
4139 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
4140 	 * Total 960 byte (15 MFI frame of 64 byte)
4141 	 *
4142 	 * Fusion adapter require only 3 extra frame.
4143 	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4144 	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
4145 	 * Total 192 byte (3 MFI frame of 64 byte)
4146 	 */
4147 	frame_count = (instance->adapter_type == MFI_SERIES) ?
4148 			(15 + 1) : (3 + 1);
4149 	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4150 	/*
4151 	 * Use DMA pool facility provided by PCI layer
4152 	 */
4153 	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4154 					&instance->pdev->dev,
4155 					instance->mfi_frame_size, 256, 0);
4156 
4157 	if (!instance->frame_dma_pool) {
4158 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4159 		return -ENOMEM;
4160 	}
4161 
4162 	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4163 						   &instance->pdev->dev, 128,
4164 						   4, 0);
4165 
4166 	if (!instance->sense_dma_pool) {
4167 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4168 
4169 		dma_pool_destroy(instance->frame_dma_pool);
4170 		instance->frame_dma_pool = NULL;
4171 
4172 		return -ENOMEM;
4173 	}
4174 
4175 	/*
4176 	 * Allocate and attach a frame to each of the commands in cmd_list.
4177 	 * By making cmd->index as the context instead of the &cmd, we can
4178 	 * always use 32bit context regardless of the architecture
4179 	 */
4180 	for (i = 0; i < max_cmd; i++) {
4181 
4182 		cmd = instance->cmd_list[i];
4183 
4184 		cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4185 					    GFP_KERNEL, &cmd->frame_phys_addr);
4186 
4187 		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4188 					    GFP_KERNEL, &cmd->sense_phys_addr);
4189 
4190 		/*
4191 		 * megasas_teardown_frame_pool() takes care of freeing
4192 		 * whatever has been allocated
4193 		 */
4194 		if (!cmd->frame || !cmd->sense) {
4195 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4196 			megasas_teardown_frame_pool(instance);
4197 			return -ENOMEM;
4198 		}
4199 
4200 		cmd->frame->io.context = cpu_to_le32(cmd->index);
4201 		cmd->frame->io.pad_0 = 0;
4202 		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4203 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4204 	}
4205 
4206 	return 0;
4207 }
4208 
4209 /**
4210  * megasas_free_cmds -	Free all the cmds in the free cmd pool
4211  * @instance:		Adapter soft state
4212  */
4213 void megasas_free_cmds(struct megasas_instance *instance)
4214 {
4215 	int i;
4216 
4217 	/* First free the MFI frame pool */
4218 	megasas_teardown_frame_pool(instance);
4219 
4220 	/* Free all the commands in the cmd_list */
4221 	for (i = 0; i < instance->max_mfi_cmds; i++)
4222 
4223 		kfree(instance->cmd_list[i]);
4224 
4225 	/* Free the cmd_list buffer itself */
4226 	kfree(instance->cmd_list);
4227 	instance->cmd_list = NULL;
4228 
4229 	INIT_LIST_HEAD(&instance->cmd_pool);
4230 }
4231 
4232 /**
4233  * megasas_alloc_cmds -	Allocates the command packets
4234  * @instance:		Adapter soft state
4235  *
4236  * Each command that is issued to the FW, whether IO commands from the OS or
4237  * internal commands like IOCTLs, are wrapped in local data structure called
4238  * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4239  * the FW.
4240  *
4241  * Each frame has a 32-bit field called context (tag). This context is used
4242  * to get back the megasas_cmd from the frame when a frame gets completed in
4243  * the ISR. Typically the address of the megasas_cmd itself would be used as
4244  * the context. But we wanted to keep the differences between 32 and 64 bit
4245  * systems to the mininum. We always use 32 bit integers for the context. In
4246  * this driver, the 32 bit values are the indices into an array cmd_list.
4247  * This array is used only to look up the megasas_cmd given the context. The
4248  * free commands themselves are maintained in a linked list called cmd_pool.
4249  */
4250 int megasas_alloc_cmds(struct megasas_instance *instance)
4251 {
4252 	int i;
4253 	int j;
4254 	u16 max_cmd;
4255 	struct megasas_cmd *cmd;
4256 
4257 	max_cmd = instance->max_mfi_cmds;
4258 
4259 	/*
4260 	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4261 	 * Allocate the dynamic array first and then allocate individual
4262 	 * commands.
4263 	 */
4264 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4265 
4266 	if (!instance->cmd_list) {
4267 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4268 		return -ENOMEM;
4269 	}
4270 
4271 	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4272 
4273 	for (i = 0; i < max_cmd; i++) {
4274 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4275 						GFP_KERNEL);
4276 
4277 		if (!instance->cmd_list[i]) {
4278 
4279 			for (j = 0; j < i; j++)
4280 				kfree(instance->cmd_list[j]);
4281 
4282 			kfree(instance->cmd_list);
4283 			instance->cmd_list = NULL;
4284 
4285 			return -ENOMEM;
4286 		}
4287 	}
4288 
4289 	for (i = 0; i < max_cmd; i++) {
4290 		cmd = instance->cmd_list[i];
4291 		memset(cmd, 0, sizeof(struct megasas_cmd));
4292 		cmd->index = i;
4293 		cmd->scmd = NULL;
4294 		cmd->instance = instance;
4295 
4296 		list_add_tail(&cmd->list, &instance->cmd_pool);
4297 	}
4298 
4299 	/*
4300 	 * Create a frame pool and assign one frame to each cmd
4301 	 */
4302 	if (megasas_create_frame_pool(instance)) {
4303 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4304 		megasas_free_cmds(instance);
4305 		return -ENOMEM;
4306 	}
4307 
4308 	return 0;
4309 }
4310 
4311 /*
4312  * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4313  * @instance:				Adapter soft state
4314  *
4315  * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4316  * or FW is not under OCR.
4317  */
4318 inline int
4319 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4320 
4321 	if (instance->adapter_type == MFI_SERIES)
4322 		return KILL_ADAPTER;
4323 	else if (instance->unload ||
4324 			test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
4325 		return IGNORE_TIMEOUT;
4326 	else
4327 		return INITIATE_OCR;
4328 }
4329 
4330 static void
4331 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4332 {
4333 	int ret;
4334 	struct megasas_cmd *cmd;
4335 	struct megasas_dcmd_frame *dcmd;
4336 
4337 	struct MR_PRIV_DEVICE *mr_device_priv_data;
4338 	u16 device_id = 0;
4339 
4340 	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4341 	cmd = megasas_get_cmd(instance);
4342 
4343 	if (!cmd) {
4344 		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4345 		return;
4346 	}
4347 
4348 	dcmd = &cmd->frame->dcmd;
4349 
4350 	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4351 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4352 
4353 	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4354 	dcmd->cmd = MFI_CMD_DCMD;
4355 	dcmd->cmd_status = 0xFF;
4356 	dcmd->sge_count = 1;
4357 	dcmd->flags = MFI_FRAME_DIR_READ;
4358 	dcmd->timeout = 0;
4359 	dcmd->pad_0 = 0;
4360 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4361 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4362 
4363 	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4364 				 sizeof(struct MR_PD_INFO));
4365 
4366 	if ((instance->adapter_type != MFI_SERIES) &&
4367 	    !instance->mask_interrupts)
4368 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4369 	else
4370 		ret = megasas_issue_polled(instance, cmd);
4371 
4372 	switch (ret) {
4373 	case DCMD_SUCCESS:
4374 		mr_device_priv_data = sdev->hostdata;
4375 		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4376 		mr_device_priv_data->interface_type =
4377 				instance->pd_info->state.ddf.pdType.intf;
4378 		break;
4379 
4380 	case DCMD_TIMEOUT:
4381 
4382 		switch (dcmd_timeout_ocr_possible(instance)) {
4383 		case INITIATE_OCR:
4384 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4385 			mutex_unlock(&instance->reset_mutex);
4386 			megasas_reset_fusion(instance->host,
4387 				MFI_IO_TIMEOUT_OCR);
4388 			mutex_lock(&instance->reset_mutex);
4389 			break;
4390 		case KILL_ADAPTER:
4391 			megaraid_sas_kill_hba(instance);
4392 			break;
4393 		case IGNORE_TIMEOUT:
4394 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4395 				__func__, __LINE__);
4396 			break;
4397 		}
4398 
4399 		break;
4400 	}
4401 
4402 	if (ret != DCMD_TIMEOUT)
4403 		megasas_return_cmd(instance, cmd);
4404 
4405 	return;
4406 }
4407 /*
4408  * megasas_get_pd_list_info -	Returns FW's pd_list structure
4409  * @instance:				Adapter soft state
4410  * @pd_list:				pd_list structure
4411  *
4412  * Issues an internal command (DCMD) to get the FW's controller PD
4413  * list structure.  This information is mainly used to find out SYSTEM
4414  * supported by the FW.
4415  */
4416 static int
4417 megasas_get_pd_list(struct megasas_instance *instance)
4418 {
4419 	int ret = 0, pd_index = 0;
4420 	struct megasas_cmd *cmd;
4421 	struct megasas_dcmd_frame *dcmd;
4422 	struct MR_PD_LIST *ci;
4423 	struct MR_PD_ADDRESS *pd_addr;
4424 
4425 	if (instance->pd_list_not_supported) {
4426 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4427 		"not supported by firmware\n");
4428 		return ret;
4429 	}
4430 
4431 	ci = instance->pd_list_buf;
4432 
4433 	cmd = megasas_get_cmd(instance);
4434 
4435 	if (!cmd) {
4436 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4437 		return -ENOMEM;
4438 	}
4439 
4440 	dcmd = &cmd->frame->dcmd;
4441 
4442 	memset(ci, 0, sizeof(*ci));
4443 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4444 
4445 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4446 	dcmd->mbox.b[1] = 0;
4447 	dcmd->cmd = MFI_CMD_DCMD;
4448 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4449 	dcmd->sge_count = 1;
4450 	dcmd->flags = MFI_FRAME_DIR_READ;
4451 	dcmd->timeout = 0;
4452 	dcmd->pad_0 = 0;
4453 	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4454 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4455 
4456 	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4457 				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4458 
4459 	if ((instance->adapter_type != MFI_SERIES) &&
4460 	    !instance->mask_interrupts)
4461 		ret = megasas_issue_blocked_cmd(instance, cmd,
4462 			MFI_IO_TIMEOUT_SECS);
4463 	else
4464 		ret = megasas_issue_polled(instance, cmd);
4465 
4466 	switch (ret) {
4467 	case DCMD_FAILED:
4468 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4469 			"failed/not supported by firmware\n");
4470 
4471 		if (instance->adapter_type != MFI_SERIES)
4472 			megaraid_sas_kill_hba(instance);
4473 		else
4474 			instance->pd_list_not_supported = 1;
4475 		break;
4476 	case DCMD_TIMEOUT:
4477 
4478 		switch (dcmd_timeout_ocr_possible(instance)) {
4479 		case INITIATE_OCR:
4480 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4481 			/*
4482 			 * DCMD failed from AEN path.
4483 			 * AEN path already hold reset_mutex to avoid PCI access
4484 			 * while OCR is in progress.
4485 			 */
4486 			mutex_unlock(&instance->reset_mutex);
4487 			megasas_reset_fusion(instance->host,
4488 						MFI_IO_TIMEOUT_OCR);
4489 			mutex_lock(&instance->reset_mutex);
4490 			break;
4491 		case KILL_ADAPTER:
4492 			megaraid_sas_kill_hba(instance);
4493 			break;
4494 		case IGNORE_TIMEOUT:
4495 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4496 				__func__, __LINE__);
4497 			break;
4498 		}
4499 
4500 		break;
4501 
4502 	case DCMD_SUCCESS:
4503 		pd_addr = ci->addr;
4504 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4505 			dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
4506 				 __func__, le32_to_cpu(ci->count));
4507 
4508 		if ((le32_to_cpu(ci->count) >
4509 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4510 			break;
4511 
4512 		memset(instance->local_pd_list, 0,
4513 				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4514 
4515 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4516 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4517 					le16_to_cpu(pd_addr->deviceId);
4518 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4519 					pd_addr->scsiDevType;
4520 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4521 					MR_PD_STATE_SYSTEM;
4522 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4523 				dev_info(&instance->pdev->dev,
4524 					 "PD%d: targetID: 0x%03x deviceType:0x%x\n",
4525 					 pd_index, le16_to_cpu(pd_addr->deviceId),
4526 					 pd_addr->scsiDevType);
4527 			pd_addr++;
4528 		}
4529 
4530 		memcpy(instance->pd_list, instance->local_pd_list,
4531 			sizeof(instance->pd_list));
4532 		break;
4533 
4534 	}
4535 
4536 	if (ret != DCMD_TIMEOUT)
4537 		megasas_return_cmd(instance, cmd);
4538 
4539 	return ret;
4540 }
4541 
4542 /*
4543  * megasas_get_ld_list_info -	Returns FW's ld_list structure
4544  * @instance:				Adapter soft state
4545  * @ld_list:				ld_list structure
4546  *
4547  * Issues an internal command (DCMD) to get the FW's controller PD
4548  * list structure.  This information is mainly used to find out SYSTEM
4549  * supported by the FW.
4550  */
4551 static int
4552 megasas_get_ld_list(struct megasas_instance *instance)
4553 {
4554 	int ret = 0, ld_index = 0, ids = 0;
4555 	struct megasas_cmd *cmd;
4556 	struct megasas_dcmd_frame *dcmd;
4557 	struct MR_LD_LIST *ci;
4558 	dma_addr_t ci_h = 0;
4559 	u32 ld_count;
4560 
4561 	ci = instance->ld_list_buf;
4562 	ci_h = instance->ld_list_buf_h;
4563 
4564 	cmd = megasas_get_cmd(instance);
4565 
4566 	if (!cmd) {
4567 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4568 		return -ENOMEM;
4569 	}
4570 
4571 	dcmd = &cmd->frame->dcmd;
4572 
4573 	memset(ci, 0, sizeof(*ci));
4574 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4575 
4576 	if (instance->supportmax256vd)
4577 		dcmd->mbox.b[0] = 1;
4578 	dcmd->cmd = MFI_CMD_DCMD;
4579 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4580 	dcmd->sge_count = 1;
4581 	dcmd->flags = MFI_FRAME_DIR_READ;
4582 	dcmd->timeout = 0;
4583 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4584 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4585 	dcmd->pad_0  = 0;
4586 
4587 	megasas_set_dma_settings(instance, dcmd, ci_h,
4588 				 sizeof(struct MR_LD_LIST));
4589 
4590 	if ((instance->adapter_type != MFI_SERIES) &&
4591 	    !instance->mask_interrupts)
4592 		ret = megasas_issue_blocked_cmd(instance, cmd,
4593 			MFI_IO_TIMEOUT_SECS);
4594 	else
4595 		ret = megasas_issue_polled(instance, cmd);
4596 
4597 	ld_count = le32_to_cpu(ci->ldCount);
4598 
4599 	switch (ret) {
4600 	case DCMD_FAILED:
4601 		megaraid_sas_kill_hba(instance);
4602 		break;
4603 	case DCMD_TIMEOUT:
4604 
4605 		switch (dcmd_timeout_ocr_possible(instance)) {
4606 		case INITIATE_OCR:
4607 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4608 			/*
4609 			 * DCMD failed from AEN path.
4610 			 * AEN path already hold reset_mutex to avoid PCI access
4611 			 * while OCR is in progress.
4612 			 */
4613 			mutex_unlock(&instance->reset_mutex);
4614 			megasas_reset_fusion(instance->host,
4615 						MFI_IO_TIMEOUT_OCR);
4616 			mutex_lock(&instance->reset_mutex);
4617 			break;
4618 		case KILL_ADAPTER:
4619 			megaraid_sas_kill_hba(instance);
4620 			break;
4621 		case IGNORE_TIMEOUT:
4622 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4623 				__func__, __LINE__);
4624 			break;
4625 		}
4626 
4627 		break;
4628 
4629 	case DCMD_SUCCESS:
4630 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4631 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4632 				 __func__, ld_count);
4633 
4634 		if (ld_count > instance->fw_supported_vd_count)
4635 			break;
4636 
4637 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4638 
4639 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4640 			if (ci->ldList[ld_index].state != 0) {
4641 				ids = ci->ldList[ld_index].ref.targetId;
4642 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4643 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4644 					dev_info(&instance->pdev->dev,
4645 						 "LD%d: targetID: 0x%03x\n",
4646 						 ld_index, ids);
4647 			}
4648 		}
4649 
4650 		break;
4651 	}
4652 
4653 	if (ret != DCMD_TIMEOUT)
4654 		megasas_return_cmd(instance, cmd);
4655 
4656 	return ret;
4657 }
4658 
4659 /**
4660  * megasas_ld_list_query -	Returns FW's ld_list structure
4661  * @instance:				Adapter soft state
4662  * @ld_list:				ld_list structure
4663  *
4664  * Issues an internal command (DCMD) to get the FW's controller PD
4665  * list structure.  This information is mainly used to find out SYSTEM
4666  * supported by the FW.
4667  */
4668 static int
4669 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4670 {
4671 	int ret = 0, ld_index = 0, ids = 0;
4672 	struct megasas_cmd *cmd;
4673 	struct megasas_dcmd_frame *dcmd;
4674 	struct MR_LD_TARGETID_LIST *ci;
4675 	dma_addr_t ci_h = 0;
4676 	u32 tgtid_count;
4677 
4678 	ci = instance->ld_targetid_list_buf;
4679 	ci_h = instance->ld_targetid_list_buf_h;
4680 
4681 	cmd = megasas_get_cmd(instance);
4682 
4683 	if (!cmd) {
4684 		dev_warn(&instance->pdev->dev,
4685 		         "megasas_ld_list_query: Failed to get cmd\n");
4686 		return -ENOMEM;
4687 	}
4688 
4689 	dcmd = &cmd->frame->dcmd;
4690 
4691 	memset(ci, 0, sizeof(*ci));
4692 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4693 
4694 	dcmd->mbox.b[0] = query_type;
4695 	if (instance->supportmax256vd)
4696 		dcmd->mbox.b[2] = 1;
4697 
4698 	dcmd->cmd = MFI_CMD_DCMD;
4699 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4700 	dcmd->sge_count = 1;
4701 	dcmd->flags = MFI_FRAME_DIR_READ;
4702 	dcmd->timeout = 0;
4703 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4704 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4705 	dcmd->pad_0  = 0;
4706 
4707 	megasas_set_dma_settings(instance, dcmd, ci_h,
4708 				 sizeof(struct MR_LD_TARGETID_LIST));
4709 
4710 	if ((instance->adapter_type != MFI_SERIES) &&
4711 	    !instance->mask_interrupts)
4712 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4713 	else
4714 		ret = megasas_issue_polled(instance, cmd);
4715 
4716 	switch (ret) {
4717 	case DCMD_FAILED:
4718 		dev_info(&instance->pdev->dev,
4719 			"DCMD not supported by firmware - %s %d\n",
4720 				__func__, __LINE__);
4721 		ret = megasas_get_ld_list(instance);
4722 		break;
4723 	case DCMD_TIMEOUT:
4724 		switch (dcmd_timeout_ocr_possible(instance)) {
4725 		case INITIATE_OCR:
4726 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4727 			/*
4728 			 * DCMD failed from AEN path.
4729 			 * AEN path already hold reset_mutex to avoid PCI access
4730 			 * while OCR is in progress.
4731 			 */
4732 			mutex_unlock(&instance->reset_mutex);
4733 			megasas_reset_fusion(instance->host,
4734 						MFI_IO_TIMEOUT_OCR);
4735 			mutex_lock(&instance->reset_mutex);
4736 			break;
4737 		case KILL_ADAPTER:
4738 			megaraid_sas_kill_hba(instance);
4739 			break;
4740 		case IGNORE_TIMEOUT:
4741 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4742 				__func__, __LINE__);
4743 			break;
4744 		}
4745 
4746 		break;
4747 	case DCMD_SUCCESS:
4748 		tgtid_count = le32_to_cpu(ci->count);
4749 
4750 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4751 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4752 				 __func__, tgtid_count);
4753 
4754 		if ((tgtid_count > (instance->fw_supported_vd_count)))
4755 			break;
4756 
4757 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4758 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4759 			ids = ci->targetId[ld_index];
4760 			instance->ld_ids[ids] = ci->targetId[ld_index];
4761 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4762 				dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
4763 					 ld_index, ci->targetId[ld_index]);
4764 		}
4765 
4766 		break;
4767 	}
4768 
4769 	if (ret != DCMD_TIMEOUT)
4770 		megasas_return_cmd(instance, cmd);
4771 
4772 	return ret;
4773 }
4774 
4775 /**
4776  * dcmd.opcode            - MR_DCMD_CTRL_DEVICE_LIST_GET
4777  * dcmd.mbox              - reserved
4778  * dcmd.sge IN            - ptr to return MR_HOST_DEVICE_LIST structure
4779  * Desc:    This DCMD will return the combined device list
4780  * Status:  MFI_STAT_OK - List returned successfully
4781  *          MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4782  *                                 disabled
4783  * @instance:			Adapter soft state
4784  * @is_probe:			Driver probe check
4785  * Return:			0 if DCMD succeeded
4786  *				 non-zero if failed
4787  */
4788 static int
4789 megasas_host_device_list_query(struct megasas_instance *instance,
4790 			       bool is_probe)
4791 {
4792 	int ret, i, target_id;
4793 	struct megasas_cmd *cmd;
4794 	struct megasas_dcmd_frame *dcmd;
4795 	struct MR_HOST_DEVICE_LIST *ci;
4796 	u32 count;
4797 	dma_addr_t ci_h;
4798 
4799 	ci = instance->host_device_list_buf;
4800 	ci_h = instance->host_device_list_buf_h;
4801 
4802 	cmd = megasas_get_cmd(instance);
4803 
4804 	if (!cmd) {
4805 		dev_warn(&instance->pdev->dev,
4806 			 "%s: failed to get cmd\n",
4807 			 __func__);
4808 		return -ENOMEM;
4809 	}
4810 
4811 	dcmd = &cmd->frame->dcmd;
4812 
4813 	memset(ci, 0, sizeof(*ci));
4814 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4815 
4816 	dcmd->mbox.b[0] = is_probe ? 0 : 1;
4817 	dcmd->cmd = MFI_CMD_DCMD;
4818 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4819 	dcmd->sge_count = 1;
4820 	dcmd->flags = MFI_FRAME_DIR_READ;
4821 	dcmd->timeout = 0;
4822 	dcmd->pad_0 = 0;
4823 	dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
4824 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
4825 
4826 	megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
4827 
4828 	if (!instance->mask_interrupts) {
4829 		ret = megasas_issue_blocked_cmd(instance, cmd,
4830 						MFI_IO_TIMEOUT_SECS);
4831 	} else {
4832 		ret = megasas_issue_polled(instance, cmd);
4833 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4834 	}
4835 
4836 	switch (ret) {
4837 	case DCMD_SUCCESS:
4838 		/* Fill the internal pd_list and ld_ids array based on
4839 		 * targetIds returned by FW
4840 		 */
4841 		count = le32_to_cpu(ci->count);
4842 
4843 		if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
4844 			break;
4845 
4846 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4847 			dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
4848 				 __func__, count);
4849 
4850 		memset(instance->local_pd_list, 0,
4851 		       MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4852 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4853 		for (i = 0; i < count; i++) {
4854 			target_id = le16_to_cpu(ci->host_device_list[i].target_id);
4855 			if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
4856 				instance->local_pd_list[target_id].tid = target_id;
4857 				instance->local_pd_list[target_id].driveType =
4858 						ci->host_device_list[i].scsi_type;
4859 				instance->local_pd_list[target_id].driveState =
4860 						MR_PD_STATE_SYSTEM;
4861 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4862 					dev_info(&instance->pdev->dev,
4863 						 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
4864 						 i, target_id, ci->host_device_list[i].scsi_type);
4865 			} else {
4866 				instance->ld_ids[target_id] = target_id;
4867 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4868 					dev_info(&instance->pdev->dev,
4869 						 "Device %d: LD targetID: 0x%03x\n",
4870 						 i, target_id);
4871 			}
4872 		}
4873 
4874 		memcpy(instance->pd_list, instance->local_pd_list,
4875 		       sizeof(instance->pd_list));
4876 		break;
4877 
4878 	case DCMD_TIMEOUT:
4879 		switch (dcmd_timeout_ocr_possible(instance)) {
4880 		case INITIATE_OCR:
4881 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4882 			mutex_unlock(&instance->reset_mutex);
4883 			megasas_reset_fusion(instance->host,
4884 				MFI_IO_TIMEOUT_OCR);
4885 			mutex_lock(&instance->reset_mutex);
4886 			break;
4887 		case KILL_ADAPTER:
4888 			megaraid_sas_kill_hba(instance);
4889 			break;
4890 		case IGNORE_TIMEOUT:
4891 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4892 				 __func__, __LINE__);
4893 			break;
4894 		}
4895 		break;
4896 	case DCMD_FAILED:
4897 		dev_err(&instance->pdev->dev,
4898 			"%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
4899 			__func__);
4900 		break;
4901 	}
4902 
4903 	if (ret != DCMD_TIMEOUT)
4904 		megasas_return_cmd(instance, cmd);
4905 
4906 	return ret;
4907 }
4908 
4909 /*
4910  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4911  * instance			 : Controller's instance
4912 */
4913 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4914 {
4915 	struct fusion_context *fusion;
4916 	u32 ventura_map_sz = 0;
4917 
4918 	fusion = instance->ctrl_context;
4919 	/* For MFI based controllers return dummy success */
4920 	if (!fusion)
4921 		return;
4922 
4923 	instance->supportmax256vd =
4924 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
4925 	/* Below is additional check to address future FW enhancement */
4926 	if (instance->ctrl_info_buf->max_lds > 64)
4927 		instance->supportmax256vd = 1;
4928 
4929 	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4930 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4931 	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4932 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4933 	if (instance->supportmax256vd) {
4934 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4935 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4936 	} else {
4937 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4938 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4939 	}
4940 
4941 	dev_info(&instance->pdev->dev,
4942 		"FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
4943 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
4944 		instance->ctrl_info_buf->max_lds);
4945 
4946 	if (instance->max_raid_mapsize) {
4947 		ventura_map_sz = instance->max_raid_mapsize *
4948 						MR_MIN_MAP_SIZE; /* 64k */
4949 		fusion->current_map_sz = ventura_map_sz;
4950 		fusion->max_map_sz = ventura_map_sz;
4951 	} else {
4952 		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
4953 					(sizeof(struct MR_LD_SPAN_MAP) *
4954 					(instance->fw_supported_vd_count - 1));
4955 		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
4956 
4957 		fusion->max_map_sz =
4958 			max(fusion->old_map_sz, fusion->new_map_sz);
4959 
4960 		if (instance->supportmax256vd)
4961 			fusion->current_map_sz = fusion->new_map_sz;
4962 		else
4963 			fusion->current_map_sz = fusion->old_map_sz;
4964 	}
4965 	/* irrespective of FW raid maps, driver raid map is constant */
4966 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4967 }
4968 
4969 /*
4970  * dcmd.opcode                - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
4971  * dcmd.hdr.length            - number of bytes to read
4972  * dcmd.sge                   - Ptr to MR_SNAPDUMP_PROPERTIES
4973  * Desc:			 Fill in snapdump properties
4974  * Status:			 MFI_STAT_OK- Command successful
4975  */
4976 void megasas_get_snapdump_properties(struct megasas_instance *instance)
4977 {
4978 	int ret = 0;
4979 	struct megasas_cmd *cmd;
4980 	struct megasas_dcmd_frame *dcmd;
4981 	struct MR_SNAPDUMP_PROPERTIES *ci;
4982 	dma_addr_t ci_h = 0;
4983 
4984 	ci = instance->snapdump_prop;
4985 	ci_h = instance->snapdump_prop_h;
4986 
4987 	if (!ci)
4988 		return;
4989 
4990 	cmd = megasas_get_cmd(instance);
4991 
4992 	if (!cmd) {
4993 		dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
4994 		return;
4995 	}
4996 
4997 	dcmd = &cmd->frame->dcmd;
4998 
4999 	memset(ci, 0, sizeof(*ci));
5000 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5001 
5002 	dcmd->cmd = MFI_CMD_DCMD;
5003 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5004 	dcmd->sge_count = 1;
5005 	dcmd->flags = MFI_FRAME_DIR_READ;
5006 	dcmd->timeout = 0;
5007 	dcmd->pad_0 = 0;
5008 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
5009 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
5010 
5011 	megasas_set_dma_settings(instance, dcmd, ci_h,
5012 				 sizeof(struct MR_SNAPDUMP_PROPERTIES));
5013 
5014 	if (!instance->mask_interrupts) {
5015 		ret = megasas_issue_blocked_cmd(instance, cmd,
5016 						MFI_IO_TIMEOUT_SECS);
5017 	} else {
5018 		ret = megasas_issue_polled(instance, cmd);
5019 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5020 	}
5021 
5022 	switch (ret) {
5023 	case DCMD_SUCCESS:
5024 		instance->snapdump_wait_time =
5025 			min_t(u8, ci->trigger_min_num_sec_before_ocr,
5026 				MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
5027 		break;
5028 
5029 	case DCMD_TIMEOUT:
5030 		switch (dcmd_timeout_ocr_possible(instance)) {
5031 		case INITIATE_OCR:
5032 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5033 			mutex_unlock(&instance->reset_mutex);
5034 			megasas_reset_fusion(instance->host,
5035 				MFI_IO_TIMEOUT_OCR);
5036 			mutex_lock(&instance->reset_mutex);
5037 			break;
5038 		case KILL_ADAPTER:
5039 			megaraid_sas_kill_hba(instance);
5040 			break;
5041 		case IGNORE_TIMEOUT:
5042 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5043 				__func__, __LINE__);
5044 			break;
5045 		}
5046 	}
5047 
5048 	if (ret != DCMD_TIMEOUT)
5049 		megasas_return_cmd(instance, cmd);
5050 }
5051 
5052 /**
5053  * megasas_get_controller_info -	Returns FW's controller structure
5054  * @instance:				Adapter soft state
5055  *
5056  * Issues an internal command (DCMD) to get the FW's controller structure.
5057  * This information is mainly used to find out the maximum IO transfer per
5058  * command supported by the FW.
5059  */
5060 int
5061 megasas_get_ctrl_info(struct megasas_instance *instance)
5062 {
5063 	int ret = 0;
5064 	struct megasas_cmd *cmd;
5065 	struct megasas_dcmd_frame *dcmd;
5066 	struct megasas_ctrl_info *ci;
5067 	dma_addr_t ci_h = 0;
5068 
5069 	ci = instance->ctrl_info_buf;
5070 	ci_h = instance->ctrl_info_buf_h;
5071 
5072 	cmd = megasas_get_cmd(instance);
5073 
5074 	if (!cmd) {
5075 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
5076 		return -ENOMEM;
5077 	}
5078 
5079 	dcmd = &cmd->frame->dcmd;
5080 
5081 	memset(ci, 0, sizeof(*ci));
5082 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5083 
5084 	dcmd->cmd = MFI_CMD_DCMD;
5085 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5086 	dcmd->sge_count = 1;
5087 	dcmd->flags = MFI_FRAME_DIR_READ;
5088 	dcmd->timeout = 0;
5089 	dcmd->pad_0 = 0;
5090 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
5091 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
5092 	dcmd->mbox.b[0] = 1;
5093 
5094 	megasas_set_dma_settings(instance, dcmd, ci_h,
5095 				 sizeof(struct megasas_ctrl_info));
5096 
5097 	if ((instance->adapter_type != MFI_SERIES) &&
5098 	    !instance->mask_interrupts) {
5099 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5100 	} else {
5101 		ret = megasas_issue_polled(instance, cmd);
5102 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5103 	}
5104 
5105 	switch (ret) {
5106 	case DCMD_SUCCESS:
5107 		/* Save required controller information in
5108 		 * CPU endianness format.
5109 		 */
5110 		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
5111 		le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
5112 		le32_to_cpus((u32 *)&ci->adapterOperations2);
5113 		le32_to_cpus((u32 *)&ci->adapterOperations3);
5114 		le16_to_cpus((u16 *)&ci->adapter_operations4);
5115 		le32_to_cpus((u32 *)&ci->adapter_operations5);
5116 
5117 		/* Update the latest Ext VD info.
5118 		 * From Init path, store current firmware details.
5119 		 * From OCR path, detect any firmware properties changes.
5120 		 * in case of Firmware upgrade without system reboot.
5121 		 */
5122 		megasas_update_ext_vd_details(instance);
5123 		instance->support_seqnum_jbod_fp =
5124 			ci->adapterOperations3.useSeqNumJbodFP;
5125 		instance->support_morethan256jbod =
5126 			ci->adapter_operations4.support_pd_map_target_id;
5127 		instance->support_nvme_passthru =
5128 			ci->adapter_operations4.support_nvme_passthru;
5129 		instance->support_pci_lane_margining =
5130 			ci->adapter_operations5.support_pci_lane_margining;
5131 		instance->task_abort_tmo = ci->TaskAbortTO;
5132 		instance->max_reset_tmo = ci->MaxResetTO;
5133 
5134 		/*Check whether controller is iMR or MR */
5135 		instance->is_imr = (ci->memory_size ? 0 : 1);
5136 
5137 		instance->snapdump_wait_time =
5138 			(ci->properties.on_off_properties2.enable_snap_dump ?
5139 			 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
5140 
5141 		instance->enable_fw_dev_list =
5142 			ci->properties.on_off_properties2.enable_fw_dev_list;
5143 
5144 		dev_info(&instance->pdev->dev,
5145 			"controller type\t: %s(%dMB)\n",
5146 			instance->is_imr ? "iMR" : "MR",
5147 			le16_to_cpu(ci->memory_size));
5148 
5149 		instance->disableOnlineCtrlReset =
5150 			ci->properties.OnOffProperties.disableOnlineCtrlReset;
5151 		instance->secure_jbod_support =
5152 			ci->adapterOperations3.supportSecurityonJBOD;
5153 		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
5154 			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
5155 		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
5156 			instance->secure_jbod_support ? "Yes" : "No");
5157 		dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
5158 			 instance->support_nvme_passthru ? "Yes" : "No");
5159 		dev_info(&instance->pdev->dev,
5160 			 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
5161 			 instance->task_abort_tmo, instance->max_reset_tmo);
5162 		dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
5163 			 instance->support_seqnum_jbod_fp ? "Yes" : "No");
5164 		dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
5165 			 instance->support_pci_lane_margining ? "Yes" : "No");
5166 
5167 		break;
5168 
5169 	case DCMD_TIMEOUT:
5170 		switch (dcmd_timeout_ocr_possible(instance)) {
5171 		case INITIATE_OCR:
5172 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5173 			mutex_unlock(&instance->reset_mutex);
5174 			megasas_reset_fusion(instance->host,
5175 				MFI_IO_TIMEOUT_OCR);
5176 			mutex_lock(&instance->reset_mutex);
5177 			break;
5178 		case KILL_ADAPTER:
5179 			megaraid_sas_kill_hba(instance);
5180 			break;
5181 		case IGNORE_TIMEOUT:
5182 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5183 				__func__, __LINE__);
5184 			break;
5185 		}
5186 		break;
5187 	case DCMD_FAILED:
5188 		megaraid_sas_kill_hba(instance);
5189 		break;
5190 
5191 	}
5192 
5193 	if (ret != DCMD_TIMEOUT)
5194 		megasas_return_cmd(instance, cmd);
5195 
5196 	return ret;
5197 }
5198 
5199 /*
5200  * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
5201  *					to firmware
5202  *
5203  * @instance:				Adapter soft state
5204  * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
5205 					MR_CRASH_BUF_TURN_OFF = 0
5206 					MR_CRASH_BUF_TURN_ON = 1
5207  * @return 0 on success non-zero on failure.
5208  * Issues an internal command (DCMD) to set parameters for crash dump feature.
5209  * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5210  * that driver supports crash dump feature. This DCMD will be sent only if
5211  * crash dump feature is supported by the FW.
5212  *
5213  */
5214 int megasas_set_crash_dump_params(struct megasas_instance *instance,
5215 	u8 crash_buf_state)
5216 {
5217 	int ret = 0;
5218 	struct megasas_cmd *cmd;
5219 	struct megasas_dcmd_frame *dcmd;
5220 
5221 	cmd = megasas_get_cmd(instance);
5222 
5223 	if (!cmd) {
5224 		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5225 		return -ENOMEM;
5226 	}
5227 
5228 
5229 	dcmd = &cmd->frame->dcmd;
5230 
5231 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5232 	dcmd->mbox.b[0] = crash_buf_state;
5233 	dcmd->cmd = MFI_CMD_DCMD;
5234 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5235 	dcmd->sge_count = 1;
5236 	dcmd->flags = MFI_FRAME_DIR_NONE;
5237 	dcmd->timeout = 0;
5238 	dcmd->pad_0 = 0;
5239 	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5240 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5241 
5242 	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5243 				 CRASH_DMA_BUF_SIZE);
5244 
5245 	if ((instance->adapter_type != MFI_SERIES) &&
5246 	    !instance->mask_interrupts)
5247 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5248 	else
5249 		ret = megasas_issue_polled(instance, cmd);
5250 
5251 	if (ret == DCMD_TIMEOUT) {
5252 		switch (dcmd_timeout_ocr_possible(instance)) {
5253 		case INITIATE_OCR:
5254 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5255 			megasas_reset_fusion(instance->host,
5256 					MFI_IO_TIMEOUT_OCR);
5257 			break;
5258 		case KILL_ADAPTER:
5259 			megaraid_sas_kill_hba(instance);
5260 			break;
5261 		case IGNORE_TIMEOUT:
5262 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5263 				__func__, __LINE__);
5264 			break;
5265 		}
5266 	} else
5267 		megasas_return_cmd(instance, cmd);
5268 
5269 	return ret;
5270 }
5271 
5272 /**
5273  * megasas_issue_init_mfi -	Initializes the FW
5274  * @instance:		Adapter soft state
5275  *
5276  * Issues the INIT MFI cmd
5277  */
5278 static int
5279 megasas_issue_init_mfi(struct megasas_instance *instance)
5280 {
5281 	__le32 context;
5282 	struct megasas_cmd *cmd;
5283 	struct megasas_init_frame *init_frame;
5284 	struct megasas_init_queue_info *initq_info;
5285 	dma_addr_t init_frame_h;
5286 	dma_addr_t initq_info_h;
5287 
5288 	/*
5289 	 * Prepare a init frame. Note the init frame points to queue info
5290 	 * structure. Each frame has SGL allocated after first 64 bytes. For
5291 	 * this frame - since we don't need any SGL - we use SGL's space as
5292 	 * queue info structure
5293 	 *
5294 	 * We will not get a NULL command below. We just created the pool.
5295 	 */
5296 	cmd = megasas_get_cmd(instance);
5297 
5298 	init_frame = (struct megasas_init_frame *)cmd->frame;
5299 	initq_info = (struct megasas_init_queue_info *)
5300 		((unsigned long)init_frame + 64);
5301 
5302 	init_frame_h = cmd->frame_phys_addr;
5303 	initq_info_h = init_frame_h + 64;
5304 
5305 	context = init_frame->context;
5306 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5307 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5308 	init_frame->context = context;
5309 
5310 	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5311 	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5312 
5313 	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5314 	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5315 
5316 	init_frame->cmd = MFI_CMD_INIT;
5317 	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5318 	init_frame->queue_info_new_phys_addr_lo =
5319 		cpu_to_le32(lower_32_bits(initq_info_h));
5320 	init_frame->queue_info_new_phys_addr_hi =
5321 		cpu_to_le32(upper_32_bits(initq_info_h));
5322 
5323 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5324 
5325 	/*
5326 	 * disable the intr before firing the init frame to FW
5327 	 */
5328 	instance->instancet->disable_intr(instance);
5329 
5330 	/*
5331 	 * Issue the init frame in polled mode
5332 	 */
5333 
5334 	if (megasas_issue_polled(instance, cmd)) {
5335 		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5336 		megasas_return_cmd(instance, cmd);
5337 		goto fail_fw_init;
5338 	}
5339 
5340 	megasas_return_cmd(instance, cmd);
5341 
5342 	return 0;
5343 
5344 fail_fw_init:
5345 	return -EINVAL;
5346 }
5347 
5348 static u32
5349 megasas_init_adapter_mfi(struct megasas_instance *instance)
5350 {
5351 	u32 context_sz;
5352 	u32 reply_q_sz;
5353 
5354 	/*
5355 	 * Get various operational parameters from status register
5356 	 */
5357 	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5358 	/*
5359 	 * Reduce the max supported cmds by 1. This is to ensure that the
5360 	 * reply_q_sz (1 more than the max cmd that driver may send)
5361 	 * does not exceed max cmds that the FW can support
5362 	 */
5363 	instance->max_fw_cmds = instance->max_fw_cmds-1;
5364 	instance->max_mfi_cmds = instance->max_fw_cmds;
5365 	instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5366 					0x10;
5367 	/*
5368 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5369 	 * are reserved for IOCTL + driver's internal DCMDs.
5370 	 */
5371 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5372 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5373 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5374 			MEGASAS_SKINNY_INT_CMDS);
5375 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5376 	} else {
5377 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5378 			MEGASAS_INT_CMDS);
5379 		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5380 	}
5381 
5382 	instance->cur_can_queue = instance->max_scsi_cmds;
5383 	/*
5384 	 * Create a pool of commands
5385 	 */
5386 	if (megasas_alloc_cmds(instance))
5387 		goto fail_alloc_cmds;
5388 
5389 	/*
5390 	 * Allocate memory for reply queue. Length of reply queue should
5391 	 * be _one_ more than the maximum commands handled by the firmware.
5392 	 *
5393 	 * Note: When FW completes commands, it places corresponding contex
5394 	 * values in this circular reply queue. This circular queue is a fairly
5395 	 * typical producer-consumer queue. FW is the producer (of completed
5396 	 * commands) and the driver is the consumer.
5397 	 */
5398 	context_sz = sizeof(u32);
5399 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5400 
5401 	instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5402 			reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5403 
5404 	if (!instance->reply_queue) {
5405 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5406 		goto fail_reply_queue;
5407 	}
5408 
5409 	if (megasas_issue_init_mfi(instance))
5410 		goto fail_fw_init;
5411 
5412 	if (megasas_get_ctrl_info(instance)) {
5413 		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5414 			"Fail from %s %d\n", instance->unique_id,
5415 			__func__, __LINE__);
5416 		goto fail_fw_init;
5417 	}
5418 
5419 	instance->fw_support_ieee = 0;
5420 	instance->fw_support_ieee =
5421 		(instance->instancet->read_fw_status_reg(instance) &
5422 		0x04000000);
5423 
5424 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5425 			instance->fw_support_ieee);
5426 
5427 	if (instance->fw_support_ieee)
5428 		instance->flag_ieee = 1;
5429 
5430 	return 0;
5431 
5432 fail_fw_init:
5433 
5434 	dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5435 			    instance->reply_queue, instance->reply_queue_h);
5436 fail_reply_queue:
5437 	megasas_free_cmds(instance);
5438 
5439 fail_alloc_cmds:
5440 	return 1;
5441 }
5442 
5443 static
5444 void megasas_setup_irq_poll(struct megasas_instance *instance)
5445 {
5446 	struct megasas_irq_context *irq_ctx;
5447 	u32 count, i;
5448 
5449 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5450 
5451 	/* Initialize IRQ poll */
5452 	for (i = 0; i < count; i++) {
5453 		irq_ctx = &instance->irq_context[i];
5454 		irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
5455 		irq_ctx->irq_poll_scheduled = false;
5456 		irq_poll_init(&irq_ctx->irqpoll,
5457 			      instance->threshold_reply_count,
5458 			      megasas_irqpoll);
5459 	}
5460 }
5461 
5462 /*
5463  * megasas_setup_irqs_ioapic -		register legacy interrupts.
5464  * @instance:				Adapter soft state
5465  *
5466  * Do not enable interrupt, only setup ISRs.
5467  *
5468  * Return 0 on success.
5469  */
5470 static int
5471 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5472 {
5473 	struct pci_dev *pdev;
5474 
5475 	pdev = instance->pdev;
5476 	instance->irq_context[0].instance = instance;
5477 	instance->irq_context[0].MSIxIndex = 0;
5478 	if (request_irq(pci_irq_vector(pdev, 0),
5479 			instance->instancet->service_isr, IRQF_SHARED,
5480 			"megasas", &instance->irq_context[0])) {
5481 		dev_err(&instance->pdev->dev,
5482 				"Failed to register IRQ from %s %d\n",
5483 				__func__, __LINE__);
5484 		return -1;
5485 	}
5486 	instance->perf_mode = MR_LATENCY_PERF_MODE;
5487 	instance->low_latency_index_start = 0;
5488 	return 0;
5489 }
5490 
5491 /**
5492  * megasas_setup_irqs_msix -		register MSI-x interrupts.
5493  * @instance:				Adapter soft state
5494  * @is_probe:				Driver probe check
5495  *
5496  * Do not enable interrupt, only setup ISRs.
5497  *
5498  * Return 0 on success.
5499  */
5500 static int
5501 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5502 {
5503 	int i, j;
5504 	struct pci_dev *pdev;
5505 
5506 	pdev = instance->pdev;
5507 
5508 	/* Try MSI-x */
5509 	for (i = 0; i < instance->msix_vectors; i++) {
5510 		instance->irq_context[i].instance = instance;
5511 		instance->irq_context[i].MSIxIndex = i;
5512 		if (request_irq(pci_irq_vector(pdev, i),
5513 			instance->instancet->service_isr, 0, "megasas",
5514 			&instance->irq_context[i])) {
5515 			dev_err(&instance->pdev->dev,
5516 				"Failed to register IRQ for vector %d.\n", i);
5517 			for (j = 0; j < i; j++)
5518 				free_irq(pci_irq_vector(pdev, j),
5519 					 &instance->irq_context[j]);
5520 			/* Retry irq register for IO_APIC*/
5521 			instance->msix_vectors = 0;
5522 			instance->msix_load_balance = false;
5523 			if (is_probe) {
5524 				pci_free_irq_vectors(instance->pdev);
5525 				return megasas_setup_irqs_ioapic(instance);
5526 			} else {
5527 				return -1;
5528 			}
5529 		}
5530 	}
5531 
5532 	return 0;
5533 }
5534 
5535 /*
5536  * megasas_destroy_irqs-		unregister interrupts.
5537  * @instance:				Adapter soft state
5538  * return:				void
5539  */
5540 static void
5541 megasas_destroy_irqs(struct megasas_instance *instance) {
5542 
5543 	int i;
5544 	int count;
5545 	struct megasas_irq_context *irq_ctx;
5546 
5547 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5548 	if (instance->adapter_type != MFI_SERIES) {
5549 		for (i = 0; i < count; i++) {
5550 			irq_ctx = &instance->irq_context[i];
5551 			irq_poll_disable(&irq_ctx->irqpoll);
5552 		}
5553 	}
5554 
5555 	if (instance->msix_vectors)
5556 		for (i = 0; i < instance->msix_vectors; i++) {
5557 			free_irq(pci_irq_vector(instance->pdev, i),
5558 				 &instance->irq_context[i]);
5559 		}
5560 	else
5561 		free_irq(pci_irq_vector(instance->pdev, 0),
5562 			 &instance->irq_context[0]);
5563 }
5564 
5565 /**
5566  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5567  * @instance:				Adapter soft state
5568  * @is_probe:				Driver probe check
5569  *
5570  * Return 0 on success.
5571  */
5572 void
5573 megasas_setup_jbod_map(struct megasas_instance *instance)
5574 {
5575 	int i;
5576 	struct fusion_context *fusion = instance->ctrl_context;
5577 	u32 pd_seq_map_sz;
5578 
5579 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5580 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5581 
5582 	instance->use_seqnum_jbod_fp =
5583 		instance->support_seqnum_jbod_fp;
5584 	if (reset_devices || !fusion ||
5585 		!instance->support_seqnum_jbod_fp) {
5586 		dev_info(&instance->pdev->dev,
5587 			"JBOD sequence map is disabled %s %d\n",
5588 			__func__, __LINE__);
5589 		instance->use_seqnum_jbod_fp = false;
5590 		return;
5591 	}
5592 
5593 	if (fusion->pd_seq_sync[0])
5594 		goto skip_alloc;
5595 
5596 	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5597 		fusion->pd_seq_sync[i] = dma_alloc_coherent
5598 			(&instance->pdev->dev, pd_seq_map_sz,
5599 			&fusion->pd_seq_phys[i], GFP_KERNEL);
5600 		if (!fusion->pd_seq_sync[i]) {
5601 			dev_err(&instance->pdev->dev,
5602 				"Failed to allocate memory from %s %d\n",
5603 				__func__, __LINE__);
5604 			if (i == 1) {
5605 				dma_free_coherent(&instance->pdev->dev,
5606 					pd_seq_map_sz, fusion->pd_seq_sync[0],
5607 					fusion->pd_seq_phys[0]);
5608 				fusion->pd_seq_sync[0] = NULL;
5609 			}
5610 			instance->use_seqnum_jbod_fp = false;
5611 			return;
5612 		}
5613 	}
5614 
5615 skip_alloc:
5616 	if (!megasas_sync_pd_seq_num(instance, false) &&
5617 		!megasas_sync_pd_seq_num(instance, true))
5618 		instance->use_seqnum_jbod_fp = true;
5619 	else
5620 		instance->use_seqnum_jbod_fp = false;
5621 }
5622 
5623 static void megasas_setup_reply_map(struct megasas_instance *instance)
5624 {
5625 	const struct cpumask *mask;
5626 	unsigned int queue, cpu, low_latency_index_start;
5627 
5628 	low_latency_index_start = instance->low_latency_index_start;
5629 
5630 	for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
5631 		mask = pci_irq_get_affinity(instance->pdev, queue);
5632 		if (!mask)
5633 			goto fallback;
5634 
5635 		for_each_cpu(cpu, mask)
5636 			instance->reply_map[cpu] = queue;
5637 	}
5638 	return;
5639 
5640 fallback:
5641 	queue = low_latency_index_start;
5642 	for_each_possible_cpu(cpu) {
5643 		instance->reply_map[cpu] = queue;
5644 		if (queue == (instance->msix_vectors - 1))
5645 			queue = low_latency_index_start;
5646 		else
5647 			queue++;
5648 	}
5649 }
5650 
5651 /**
5652  * megasas_get_device_list -	Get the PD and LD device list from FW.
5653  * @instance:			Adapter soft state
5654  * @return:			Success or failure
5655  *
5656  * Issue DCMDs to Firmware to get the PD and LD list.
5657  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5658  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5659  */
5660 static
5661 int megasas_get_device_list(struct megasas_instance *instance)
5662 {
5663 	memset(instance->pd_list, 0,
5664 	       (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5665 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5666 
5667 	if (instance->enable_fw_dev_list) {
5668 		if (megasas_host_device_list_query(instance, true))
5669 			return FAILED;
5670 	} else {
5671 		if (megasas_get_pd_list(instance) < 0) {
5672 			dev_err(&instance->pdev->dev, "failed to get PD list\n");
5673 			return FAILED;
5674 		}
5675 
5676 		if (megasas_ld_list_query(instance,
5677 					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5678 			dev_err(&instance->pdev->dev, "failed to get LD list\n");
5679 			return FAILED;
5680 		}
5681 	}
5682 
5683 	return SUCCESS;
5684 }
5685 
5686 /**
5687  * megasas_set_high_iops_queue_affinity_hint -	Set affinity hint for high IOPS queues
5688  * @instance:					Adapter soft state
5689  * return:					void
5690  */
5691 static inline void
5692 megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
5693 {
5694 	int i;
5695 	int local_numa_node;
5696 
5697 	if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
5698 		local_numa_node = dev_to_node(&instance->pdev->dev);
5699 
5700 		for (i = 0; i < instance->low_latency_index_start; i++)
5701 			irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
5702 				cpumask_of_node(local_numa_node));
5703 	}
5704 }
5705 
5706 static int
5707 __megasas_alloc_irq_vectors(struct megasas_instance *instance)
5708 {
5709 	int i, irq_flags;
5710 	struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
5711 	struct irq_affinity *descp = &desc;
5712 
5713 	irq_flags = PCI_IRQ_MSIX;
5714 
5715 	if (instance->smp_affinity_enable)
5716 		irq_flags |= PCI_IRQ_AFFINITY;
5717 	else
5718 		descp = NULL;
5719 
5720 	i = pci_alloc_irq_vectors_affinity(instance->pdev,
5721 		instance->low_latency_index_start,
5722 		instance->msix_vectors, irq_flags, descp);
5723 
5724 	return i;
5725 }
5726 
5727 /**
5728  * megasas_alloc_irq_vectors -	Allocate IRQ vectors/enable MSI-x vectors
5729  * @instance:			Adapter soft state
5730  * return:			void
5731  */
5732 static void
5733 megasas_alloc_irq_vectors(struct megasas_instance *instance)
5734 {
5735 	int i;
5736 	unsigned int num_msix_req;
5737 
5738 	i = __megasas_alloc_irq_vectors(instance);
5739 
5740 	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
5741 	    (i != instance->msix_vectors)) {
5742 		if (instance->msix_vectors)
5743 			pci_free_irq_vectors(instance->pdev);
5744 		/* Disable Balanced IOPS mode and try realloc vectors */
5745 		instance->perf_mode = MR_LATENCY_PERF_MODE;
5746 		instance->low_latency_index_start = 1;
5747 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5748 
5749 		instance->msix_vectors = min(num_msix_req,
5750 				instance->msix_vectors);
5751 
5752 		i = __megasas_alloc_irq_vectors(instance);
5753 
5754 	}
5755 
5756 	dev_info(&instance->pdev->dev,
5757 		"requested/available msix %d/%d\n", instance->msix_vectors, i);
5758 
5759 	if (i > 0)
5760 		instance->msix_vectors = i;
5761 	else
5762 		instance->msix_vectors = 0;
5763 
5764 	if (instance->smp_affinity_enable)
5765 		megasas_set_high_iops_queue_affinity_hint(instance);
5766 }
5767 
5768 /**
5769  * megasas_init_fw -	Initializes the FW
5770  * @instance:		Adapter soft state
5771  *
5772  * This is the main function for initializing firmware
5773  */
5774 
5775 static int megasas_init_fw(struct megasas_instance *instance)
5776 {
5777 	u32 max_sectors_1;
5778 	u32 max_sectors_2, tmp_sectors, msix_enable;
5779 	u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5780 	resource_size_t base_addr;
5781 	void *base_addr_phys;
5782 	struct megasas_ctrl_info *ctrl_info = NULL;
5783 	unsigned long bar_list;
5784 	int i, j, loop;
5785 	struct IOV_111 *iovPtr;
5786 	struct fusion_context *fusion;
5787 	bool intr_coalescing;
5788 	unsigned int num_msix_req;
5789 	u16 lnksta, speed;
5790 
5791 	fusion = instance->ctrl_context;
5792 
5793 	/* Find first memory bar */
5794 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5795 	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5796 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5797 					 "megasas: LSI")) {
5798 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5799 		return -EBUSY;
5800 	}
5801 
5802 	base_addr = pci_resource_start(instance->pdev, instance->bar);
5803 	instance->reg_set = ioremap_nocache(base_addr, 8192);
5804 
5805 	if (!instance->reg_set) {
5806 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5807 		goto fail_ioremap;
5808 	}
5809 
5810 	base_addr_phys = &base_addr;
5811 	dev_printk(KERN_DEBUG, &instance->pdev->dev,
5812 		   "BAR:0x%lx  BAR's base_addr(phys):%pa  mapped virt_addr:0x%p\n",
5813 		   instance->bar, base_addr_phys, instance->reg_set);
5814 
5815 	if (instance->adapter_type != MFI_SERIES)
5816 		instance->instancet = &megasas_instance_template_fusion;
5817 	else {
5818 		switch (instance->pdev->device) {
5819 		case PCI_DEVICE_ID_LSI_SAS1078R:
5820 		case PCI_DEVICE_ID_LSI_SAS1078DE:
5821 			instance->instancet = &megasas_instance_template_ppc;
5822 			break;
5823 		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5824 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5825 			instance->instancet = &megasas_instance_template_gen2;
5826 			break;
5827 		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5828 		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5829 			instance->instancet = &megasas_instance_template_skinny;
5830 			break;
5831 		case PCI_DEVICE_ID_LSI_SAS1064R:
5832 		case PCI_DEVICE_ID_DELL_PERC5:
5833 		default:
5834 			instance->instancet = &megasas_instance_template_xscale;
5835 			instance->pd_list_not_supported = 1;
5836 			break;
5837 		}
5838 	}
5839 
5840 	if (megasas_transition_to_ready(instance, 0)) {
5841 		dev_info(&instance->pdev->dev,
5842 			 "Failed to transition controller to ready from %s!\n",
5843 			 __func__);
5844 		if (instance->adapter_type != MFI_SERIES) {
5845 			status_reg = instance->instancet->read_fw_status_reg(
5846 					instance);
5847 			if (status_reg & MFI_RESET_ADAPTER) {
5848 				if (megasas_adp_reset_wait_for_ready
5849 					(instance, true, 0) == FAILED)
5850 					goto fail_ready_state;
5851 			} else {
5852 				goto fail_ready_state;
5853 			}
5854 		} else {
5855 			atomic_set(&instance->fw_reset_no_pci_access, 1);
5856 			instance->instancet->adp_reset
5857 				(instance, instance->reg_set);
5858 			atomic_set(&instance->fw_reset_no_pci_access, 0);
5859 
5860 			/*waiting for about 30 second before retry*/
5861 			ssleep(30);
5862 
5863 			if (megasas_transition_to_ready(instance, 0))
5864 				goto fail_ready_state;
5865 		}
5866 
5867 		dev_info(&instance->pdev->dev,
5868 			 "FW restarted successfully from %s!\n",
5869 			 __func__);
5870 	}
5871 
5872 	megasas_init_ctrl_params(instance);
5873 
5874 	if (megasas_set_dma_mask(instance))
5875 		goto fail_ready_state;
5876 
5877 	if (megasas_alloc_ctrl_mem(instance))
5878 		goto fail_alloc_dma_buf;
5879 
5880 	if (megasas_alloc_ctrl_dma_buffers(instance))
5881 		goto fail_alloc_dma_buf;
5882 
5883 	fusion = instance->ctrl_context;
5884 
5885 	if (instance->adapter_type >= VENTURA_SERIES) {
5886 		scratch_pad_2 =
5887 			megasas_readl(instance,
5888 				      &instance->reg_set->outbound_scratch_pad_2);
5889 		instance->max_raid_mapsize = ((scratch_pad_2 >>
5890 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5891 			MR_MAX_RAID_MAP_SIZE_MASK);
5892 	}
5893 
5894 	switch (instance->adapter_type) {
5895 	case VENTURA_SERIES:
5896 		fusion->pcie_bw_limitation = true;
5897 		break;
5898 	case AERO_SERIES:
5899 		fusion->r56_div_offload = true;
5900 		break;
5901 	default:
5902 		break;
5903 	}
5904 
5905 	/* Check if MSI-X is supported while in ready state */
5906 	msix_enable = (instance->instancet->read_fw_status_reg(instance) &
5907 		       0x4000000) >> 0x1a;
5908 	if (msix_enable && !msix_disable) {
5909 
5910 		scratch_pad_1 = megasas_readl
5911 			(instance, &instance->reg_set->outbound_scratch_pad_1);
5912 		/* Check max MSI-X vectors */
5913 		if (fusion) {
5914 			if (instance->adapter_type == THUNDERBOLT_SERIES) {
5915 				/* Thunderbolt Series*/
5916 				instance->msix_vectors = (scratch_pad_1
5917 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5918 			} else {
5919 				instance->msix_vectors = ((scratch_pad_1
5920 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5921 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5922 
5923 				/*
5924 				 * For Invader series, > 8 MSI-x vectors
5925 				 * supported by FW/HW implies combined
5926 				 * reply queue mode is enabled.
5927 				 * For Ventura series, > 16 MSI-x vectors
5928 				 * supported by FW/HW implies combined
5929 				 * reply queue mode is enabled.
5930 				 */
5931 				switch (instance->adapter_type) {
5932 				case INVADER_SERIES:
5933 					if (instance->msix_vectors > 8)
5934 						instance->msix_combined = true;
5935 					break;
5936 				case AERO_SERIES:
5937 				case VENTURA_SERIES:
5938 					if (instance->msix_vectors > 16)
5939 						instance->msix_combined = true;
5940 					break;
5941 				}
5942 
5943 				if (rdpq_enable)
5944 					instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
5945 								1 : 0;
5946 
5947 				if (instance->adapter_type >= INVADER_SERIES &&
5948 				    !instance->msix_combined) {
5949 					instance->msix_load_balance = true;
5950 					instance->smp_affinity_enable = false;
5951 				}
5952 
5953 				/* Save 1-15 reply post index address to local memory
5954 				 * Index 0 is already saved from reg offset
5955 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5956 				 */
5957 				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5958 					instance->reply_post_host_index_addr[loop] =
5959 						(u32 __iomem *)
5960 						((u8 __iomem *)instance->reg_set +
5961 						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5962 						+ (loop * 0x10));
5963 				}
5964 			}
5965 
5966 			dev_info(&instance->pdev->dev,
5967 				 "firmware supports msix\t: (%d)",
5968 				 instance->msix_vectors);
5969 			if (msix_vectors)
5970 				instance->msix_vectors = min(msix_vectors,
5971 					instance->msix_vectors);
5972 		} else /* MFI adapters */
5973 			instance->msix_vectors = 1;
5974 
5975 
5976 		/*
5977 		 * For Aero (if some conditions are met), driver will configure a
5978 		 * few additional reply queues with interrupt coalescing enabled.
5979 		 * These queues with interrupt coalescing enabled are called
5980 		 * High IOPS queues and rest of reply queues (based on number of
5981 		 * logical CPUs) are termed as Low latency queues.
5982 		 *
5983 		 * Total Number of reply queues = High IOPS queues + low latency queues
5984 		 *
5985 		 * For rest of fusion adapters, 1 additional reply queue will be
5986 		 * reserved for management commands, rest of reply queues
5987 		 * (based on number of logical CPUs) will be used for IOs and
5988 		 * referenced as IO queues.
5989 		 * Total Number of reply queues = 1 + IO queues
5990 		 *
5991 		 * MFI adapters supports single MSI-x so single reply queue
5992 		 * will be used for IO and management commands.
5993 		 */
5994 
5995 		intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
5996 								true : false;
5997 		if (intr_coalescing &&
5998 			(num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
5999 			(instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
6000 			instance->perf_mode = MR_BALANCED_PERF_MODE;
6001 		else
6002 			instance->perf_mode = MR_LATENCY_PERF_MODE;
6003 
6004 
6005 		if (instance->adapter_type == AERO_SERIES) {
6006 			pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
6007 			speed = lnksta & PCI_EXP_LNKSTA_CLS;
6008 
6009 			/*
6010 			 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
6011 			 * in latency perf mode and enable R1 PCI bandwidth algorithm
6012 			 */
6013 			if (speed < 0x4) {
6014 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6015 				fusion->pcie_bw_limitation = true;
6016 			}
6017 
6018 			/*
6019 			 * Performance mode settings provided through module parameter-perf_mode will
6020 			 * take affect only for:
6021 			 * 1. Aero family of adapters.
6022 			 * 2. When user sets module parameter- perf_mode in range of 0-2.
6023 			 */
6024 			if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
6025 				(perf_mode <= MR_LATENCY_PERF_MODE))
6026 				instance->perf_mode = perf_mode;
6027 			/*
6028 			 * If intr coalescing is not supported by controller FW, then IOPS
6029 			 * and Balanced modes are not feasible.
6030 			 */
6031 			if (!intr_coalescing)
6032 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6033 
6034 		}
6035 
6036 		if (instance->perf_mode == MR_BALANCED_PERF_MODE)
6037 			instance->low_latency_index_start =
6038 				MR_HIGH_IOPS_QUEUE_COUNT;
6039 		else
6040 			instance->low_latency_index_start = 1;
6041 
6042 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
6043 
6044 		instance->msix_vectors = min(num_msix_req,
6045 				instance->msix_vectors);
6046 
6047 		megasas_alloc_irq_vectors(instance);
6048 		if (!instance->msix_vectors)
6049 			instance->msix_load_balance = false;
6050 	}
6051 	/*
6052 	 * MSI-X host index 0 is common for all adapter.
6053 	 * It is used for all MPT based Adapters.
6054 	 */
6055 	if (instance->msix_combined) {
6056 		instance->reply_post_host_index_addr[0] =
6057 				(u32 *)((u8 *)instance->reg_set +
6058 				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
6059 	} else {
6060 		instance->reply_post_host_index_addr[0] =
6061 			(u32 *)((u8 *)instance->reg_set +
6062 			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
6063 	}
6064 
6065 	if (!instance->msix_vectors) {
6066 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6067 		if (i < 0)
6068 			goto fail_init_adapter;
6069 	}
6070 
6071 	megasas_setup_reply_map(instance);
6072 
6073 	dev_info(&instance->pdev->dev,
6074 		"current msix/online cpus\t: (%d/%d)\n",
6075 		instance->msix_vectors, (unsigned int)num_online_cpus());
6076 	dev_info(&instance->pdev->dev,
6077 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
6078 
6079 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6080 		(unsigned long)instance);
6081 
6082 	/*
6083 	 * Below are default value for legacy Firmware.
6084 	 * non-fusion based controllers
6085 	 */
6086 	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
6087 	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
6088 	/* Get operational params, sge flags, send init cmd to controller */
6089 	if (instance->instancet->init_adapter(instance))
6090 		goto fail_init_adapter;
6091 
6092 	if (instance->adapter_type >= VENTURA_SERIES) {
6093 		scratch_pad_3 =
6094 			megasas_readl(instance,
6095 				      &instance->reg_set->outbound_scratch_pad_3);
6096 		if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
6097 			MR_DEFAULT_NVME_PAGE_SHIFT)
6098 			instance->nvme_page_size =
6099 				(1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
6100 
6101 		dev_info(&instance->pdev->dev,
6102 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
6103 	}
6104 
6105 	if (instance->msix_vectors ?
6106 		megasas_setup_irqs_msix(instance, 1) :
6107 		megasas_setup_irqs_ioapic(instance))
6108 		goto fail_init_adapter;
6109 
6110 	if (instance->adapter_type != MFI_SERIES)
6111 		megasas_setup_irq_poll(instance);
6112 
6113 	instance->instancet->enable_intr(instance);
6114 
6115 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
6116 
6117 	megasas_setup_jbod_map(instance);
6118 
6119 	if (megasas_get_device_list(instance) != SUCCESS) {
6120 		dev_err(&instance->pdev->dev,
6121 			"%s: megasas_get_device_list failed\n",
6122 			__func__);
6123 		goto fail_get_ld_pd_list;
6124 	}
6125 
6126 	/* stream detection initialization */
6127 	if (instance->adapter_type >= VENTURA_SERIES) {
6128 		fusion->stream_detect_by_ld =
6129 			kcalloc(MAX_LOGICAL_DRIVES_EXT,
6130 				sizeof(struct LD_STREAM_DETECT *),
6131 				GFP_KERNEL);
6132 		if (!fusion->stream_detect_by_ld) {
6133 			dev_err(&instance->pdev->dev,
6134 				"unable to allocate stream detection for pool of LDs\n");
6135 			goto fail_get_ld_pd_list;
6136 		}
6137 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
6138 			fusion->stream_detect_by_ld[i] =
6139 				kzalloc(sizeof(struct LD_STREAM_DETECT),
6140 				GFP_KERNEL);
6141 			if (!fusion->stream_detect_by_ld[i]) {
6142 				dev_err(&instance->pdev->dev,
6143 					"unable to allocate stream detect by LD\n ");
6144 				for (j = 0; j < i; ++j)
6145 					kfree(fusion->stream_detect_by_ld[j]);
6146 				kfree(fusion->stream_detect_by_ld);
6147 				fusion->stream_detect_by_ld = NULL;
6148 				goto fail_get_ld_pd_list;
6149 			}
6150 			fusion->stream_detect_by_ld[i]->mru_bit_map
6151 				= MR_STREAM_BITMAP;
6152 		}
6153 	}
6154 
6155 	/*
6156 	 * Compute the max allowed sectors per IO: The controller info has two
6157 	 * limits on max sectors. Driver should use the minimum of these two.
6158 	 *
6159 	 * 1 << stripe_sz_ops.min = max sectors per strip
6160 	 *
6161 	 * Note that older firmwares ( < FW ver 30) didn't report information
6162 	 * to calculate max_sectors_1. So the number ended up as zero always.
6163 	 */
6164 	tmp_sectors = 0;
6165 	ctrl_info = instance->ctrl_info_buf;
6166 
6167 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
6168 		le16_to_cpu(ctrl_info->max_strips_per_io);
6169 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
6170 
6171 	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
6172 
6173 	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
6174 	instance->passive = ctrl_info->cluster.passive;
6175 	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
6176 	instance->UnevenSpanSupport =
6177 		ctrl_info->adapterOperations2.supportUnevenSpans;
6178 	if (instance->UnevenSpanSupport) {
6179 		struct fusion_context *fusion = instance->ctrl_context;
6180 		if (MR_ValidateMapInfo(instance, instance->map_id))
6181 			fusion->fast_path_io = 1;
6182 		else
6183 			fusion->fast_path_io = 0;
6184 
6185 	}
6186 	if (ctrl_info->host_interface.SRIOV) {
6187 		instance->requestorId = ctrl_info->iov.requestorId;
6188 		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
6189 			if (!ctrl_info->adapterOperations2.activePassive)
6190 			    instance->PlasmaFW111 = 1;
6191 
6192 			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
6193 			    instance->PlasmaFW111 ? "1.11" : "new");
6194 
6195 			if (instance->PlasmaFW111) {
6196 			    iovPtr = (struct IOV_111 *)
6197 				((unsigned char *)ctrl_info + IOV_111_OFFSET);
6198 			    instance->requestorId = iovPtr->requestorId;
6199 			}
6200 		}
6201 		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
6202 			instance->requestorId);
6203 	}
6204 
6205 	instance->crash_dump_fw_support =
6206 		ctrl_info->adapterOperations3.supportCrashDump;
6207 	instance->crash_dump_drv_support =
6208 		(instance->crash_dump_fw_support &&
6209 		instance->crash_dump_buf);
6210 	if (instance->crash_dump_drv_support)
6211 		megasas_set_crash_dump_params(instance,
6212 			MR_CRASH_BUF_TURN_OFF);
6213 
6214 	else {
6215 		if (instance->crash_dump_buf)
6216 			dma_free_coherent(&instance->pdev->dev,
6217 				CRASH_DMA_BUF_SIZE,
6218 				instance->crash_dump_buf,
6219 				instance->crash_dump_h);
6220 		instance->crash_dump_buf = NULL;
6221 	}
6222 
6223 	if (instance->snapdump_wait_time) {
6224 		megasas_get_snapdump_properties(instance);
6225 		dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
6226 			 instance->snapdump_wait_time);
6227 	}
6228 
6229 	dev_info(&instance->pdev->dev,
6230 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
6231 		le16_to_cpu(ctrl_info->pci.vendor_id),
6232 		le16_to_cpu(ctrl_info->pci.device_id),
6233 		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
6234 		le16_to_cpu(ctrl_info->pci.sub_device_id));
6235 	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
6236 		instance->UnevenSpanSupport ? "yes" : "no");
6237 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
6238 		instance->crash_dump_drv_support ? "yes" : "no");
6239 	dev_info(&instance->pdev->dev, "JBOD sequence map	: %s\n",
6240 		instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
6241 
6242 	instance->max_sectors_per_req = instance->max_num_sge *
6243 						SGE_BUFFER_SIZE / 512;
6244 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
6245 		instance->max_sectors_per_req = tmp_sectors;
6246 
6247 	/* Check for valid throttlequeuedepth module parameter */
6248 	if (throttlequeuedepth &&
6249 			throttlequeuedepth <= instance->max_scsi_cmds)
6250 		instance->throttlequeuedepth = throttlequeuedepth;
6251 	else
6252 		instance->throttlequeuedepth =
6253 				MEGASAS_THROTTLE_QUEUE_DEPTH;
6254 
6255 	if ((resetwaittime < 1) ||
6256 	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
6257 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
6258 
6259 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
6260 		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
6261 
6262 	/* Launch SR-IOV heartbeat timer */
6263 	if (instance->requestorId) {
6264 		if (!megasas_sriov_start_heartbeat(instance, 1)) {
6265 			megasas_start_timer(instance);
6266 		} else {
6267 			instance->skip_heartbeat_timer_del = 1;
6268 			goto fail_get_ld_pd_list;
6269 		}
6270 	}
6271 
6272 	/*
6273 	 * Create and start watchdog thread which will monitor
6274 	 * controller state every 1 sec and trigger OCR when
6275 	 * it enters fault state
6276 	 */
6277 	if (instance->adapter_type != MFI_SERIES)
6278 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
6279 			goto fail_start_watchdog;
6280 
6281 	return 0;
6282 
6283 fail_start_watchdog:
6284 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6285 		del_timer_sync(&instance->sriov_heartbeat_timer);
6286 fail_get_ld_pd_list:
6287 	instance->instancet->disable_intr(instance);
6288 	megasas_destroy_irqs(instance);
6289 fail_init_adapter:
6290 	if (instance->msix_vectors)
6291 		pci_free_irq_vectors(instance->pdev);
6292 	instance->msix_vectors = 0;
6293 fail_alloc_dma_buf:
6294 	megasas_free_ctrl_dma_buffers(instance);
6295 	megasas_free_ctrl_mem(instance);
6296 fail_ready_state:
6297 	iounmap(instance->reg_set);
6298 
6299 fail_ioremap:
6300 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6301 
6302 	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6303 		__func__, __LINE__);
6304 	return -EINVAL;
6305 }
6306 
6307 /**
6308  * megasas_release_mfi -	Reverses the FW initialization
6309  * @instance:			Adapter soft state
6310  */
6311 static void megasas_release_mfi(struct megasas_instance *instance)
6312 {
6313 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
6314 
6315 	if (instance->reply_queue)
6316 		dma_free_coherent(&instance->pdev->dev, reply_q_sz,
6317 			    instance->reply_queue, instance->reply_queue_h);
6318 
6319 	megasas_free_cmds(instance);
6320 
6321 	iounmap(instance->reg_set);
6322 
6323 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6324 }
6325 
6326 /**
6327  * megasas_get_seq_num -	Gets latest event sequence numbers
6328  * @instance:			Adapter soft state
6329  * @eli:			FW event log sequence numbers information
6330  *
6331  * FW maintains a log of all events in a non-volatile area. Upper layers would
6332  * usually find out the latest sequence number of the events, the seq number at
6333  * the boot etc. They would "read" all the events below the latest seq number
6334  * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
6335  * number), they would subsribe to AEN (asynchronous event notification) and
6336  * wait for the events to happen.
6337  */
6338 static int
6339 megasas_get_seq_num(struct megasas_instance *instance,
6340 		    struct megasas_evt_log_info *eli)
6341 {
6342 	struct megasas_cmd *cmd;
6343 	struct megasas_dcmd_frame *dcmd;
6344 	struct megasas_evt_log_info *el_info;
6345 	dma_addr_t el_info_h = 0;
6346 	int ret;
6347 
6348 	cmd = megasas_get_cmd(instance);
6349 
6350 	if (!cmd) {
6351 		return -ENOMEM;
6352 	}
6353 
6354 	dcmd = &cmd->frame->dcmd;
6355 	el_info = dma_alloc_coherent(&instance->pdev->dev,
6356 				     sizeof(struct megasas_evt_log_info),
6357 				     &el_info_h, GFP_KERNEL);
6358 	if (!el_info) {
6359 		megasas_return_cmd(instance, cmd);
6360 		return -ENOMEM;
6361 	}
6362 
6363 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6364 
6365 	dcmd->cmd = MFI_CMD_DCMD;
6366 	dcmd->cmd_status = 0x0;
6367 	dcmd->sge_count = 1;
6368 	dcmd->flags = MFI_FRAME_DIR_READ;
6369 	dcmd->timeout = 0;
6370 	dcmd->pad_0 = 0;
6371 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
6372 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
6373 
6374 	megasas_set_dma_settings(instance, dcmd, el_info_h,
6375 				 sizeof(struct megasas_evt_log_info));
6376 
6377 	ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
6378 	if (ret != DCMD_SUCCESS) {
6379 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6380 			__func__, __LINE__);
6381 		goto dcmd_failed;
6382 	}
6383 
6384 	/*
6385 	 * Copy the data back into callers buffer
6386 	 */
6387 	eli->newest_seq_num = el_info->newest_seq_num;
6388 	eli->oldest_seq_num = el_info->oldest_seq_num;
6389 	eli->clear_seq_num = el_info->clear_seq_num;
6390 	eli->shutdown_seq_num = el_info->shutdown_seq_num;
6391 	eli->boot_seq_num = el_info->boot_seq_num;
6392 
6393 dcmd_failed:
6394 	dma_free_coherent(&instance->pdev->dev,
6395 			sizeof(struct megasas_evt_log_info),
6396 			el_info, el_info_h);
6397 
6398 	megasas_return_cmd(instance, cmd);
6399 
6400 	return ret;
6401 }
6402 
6403 /**
6404  * megasas_register_aen -	Registers for asynchronous event notification
6405  * @instance:			Adapter soft state
6406  * @seq_num:			The starting sequence number
6407  * @class_locale:		Class of the event
6408  *
6409  * This function subscribes for AEN for events beyond the @seq_num. It requests
6410  * to be notified if and only if the event is of type @class_locale
6411  */
6412 static int
6413 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6414 		     u32 class_locale_word)
6415 {
6416 	int ret_val;
6417 	struct megasas_cmd *cmd;
6418 	struct megasas_dcmd_frame *dcmd;
6419 	union megasas_evt_class_locale curr_aen;
6420 	union megasas_evt_class_locale prev_aen;
6421 
6422 	/*
6423 	 * If there an AEN pending already (aen_cmd), check if the
6424 	 * class_locale of that pending AEN is inclusive of the new
6425 	 * AEN request we currently have. If it is, then we don't have
6426 	 * to do anything. In other words, whichever events the current
6427 	 * AEN request is subscribing to, have already been subscribed
6428 	 * to.
6429 	 *
6430 	 * If the old_cmd is _not_ inclusive, then we have to abort
6431 	 * that command, form a class_locale that is superset of both
6432 	 * old and current and re-issue to the FW
6433 	 */
6434 
6435 	curr_aen.word = class_locale_word;
6436 
6437 	if (instance->aen_cmd) {
6438 
6439 		prev_aen.word =
6440 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6441 
6442 		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6443 		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6444 			dev_info(&instance->pdev->dev,
6445 				 "%s %d out of range class %d send by application\n",
6446 				 __func__, __LINE__, curr_aen.members.class);
6447 			return 0;
6448 		}
6449 
6450 		/*
6451 		 * A class whose enum value is smaller is inclusive of all
6452 		 * higher values. If a PROGRESS (= -1) was previously
6453 		 * registered, then a new registration requests for higher
6454 		 * classes need not be sent to FW. They are automatically
6455 		 * included.
6456 		 *
6457 		 * Locale numbers don't have such hierarchy. They are bitmap
6458 		 * values
6459 		 */
6460 		if ((prev_aen.members.class <= curr_aen.members.class) &&
6461 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
6462 		      curr_aen.members.locale)) {
6463 			/*
6464 			 * Previously issued event registration includes
6465 			 * current request. Nothing to do.
6466 			 */
6467 			return 0;
6468 		} else {
6469 			curr_aen.members.locale |= prev_aen.members.locale;
6470 
6471 			if (prev_aen.members.class < curr_aen.members.class)
6472 				curr_aen.members.class = prev_aen.members.class;
6473 
6474 			instance->aen_cmd->abort_aen = 1;
6475 			ret_val = megasas_issue_blocked_abort_cmd(instance,
6476 								  instance->
6477 								  aen_cmd, 30);
6478 
6479 			if (ret_val) {
6480 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6481 				       "previous AEN command\n");
6482 				return ret_val;
6483 			}
6484 		}
6485 	}
6486 
6487 	cmd = megasas_get_cmd(instance);
6488 
6489 	if (!cmd)
6490 		return -ENOMEM;
6491 
6492 	dcmd = &cmd->frame->dcmd;
6493 
6494 	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6495 
6496 	/*
6497 	 * Prepare DCMD for aen registration
6498 	 */
6499 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6500 
6501 	dcmd->cmd = MFI_CMD_DCMD;
6502 	dcmd->cmd_status = 0x0;
6503 	dcmd->sge_count = 1;
6504 	dcmd->flags = MFI_FRAME_DIR_READ;
6505 	dcmd->timeout = 0;
6506 	dcmd->pad_0 = 0;
6507 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6508 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6509 	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6510 	instance->last_seq_num = seq_num;
6511 	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6512 
6513 	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6514 				 sizeof(struct megasas_evt_detail));
6515 
6516 	if (instance->aen_cmd != NULL) {
6517 		megasas_return_cmd(instance, cmd);
6518 		return 0;
6519 	}
6520 
6521 	/*
6522 	 * Store reference to the cmd used to register for AEN. When an
6523 	 * application wants us to register for AEN, we have to abort this
6524 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
6525 	 */
6526 	instance->aen_cmd = cmd;
6527 
6528 	/*
6529 	 * Issue the aen registration frame
6530 	 */
6531 	instance->instancet->issue_dcmd(instance, cmd);
6532 
6533 	return 0;
6534 }
6535 
6536 /* megasas_get_target_prop - Send DCMD with below details to firmware.
6537  *
6538  * This DCMD will fetch few properties of LD/system PD defined
6539  * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6540  *
6541  * DCMD send by drivers whenever new target is added to the OS.
6542  *
6543  * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
6544  * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
6545  *                       0 = system PD, 1 = LD.
6546  * dcmd.mbox.s[1]      - TargetID for LD/system PD.
6547  * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
6548  *
6549  * @instance:		Adapter soft state
6550  * @sdev:		OS provided scsi device
6551  *
6552  * Returns 0 on success non-zero on failure.
6553  */
6554 int
6555 megasas_get_target_prop(struct megasas_instance *instance,
6556 			struct scsi_device *sdev)
6557 {
6558 	int ret;
6559 	struct megasas_cmd *cmd;
6560 	struct megasas_dcmd_frame *dcmd;
6561 	u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
6562 			sdev->id;
6563 
6564 	cmd = megasas_get_cmd(instance);
6565 
6566 	if (!cmd) {
6567 		dev_err(&instance->pdev->dev,
6568 			"Failed to get cmd %s\n", __func__);
6569 		return -ENOMEM;
6570 	}
6571 
6572 	dcmd = &cmd->frame->dcmd;
6573 
6574 	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6575 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6576 	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6577 
6578 	dcmd->mbox.s[1] = cpu_to_le16(targetId);
6579 	dcmd->cmd = MFI_CMD_DCMD;
6580 	dcmd->cmd_status = 0xFF;
6581 	dcmd->sge_count = 1;
6582 	dcmd->flags = MFI_FRAME_DIR_READ;
6583 	dcmd->timeout = 0;
6584 	dcmd->pad_0 = 0;
6585 	dcmd->data_xfer_len =
6586 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6587 	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6588 
6589 	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6590 				 sizeof(struct MR_TARGET_PROPERTIES));
6591 
6592 	if ((instance->adapter_type != MFI_SERIES) &&
6593 	    !instance->mask_interrupts)
6594 		ret = megasas_issue_blocked_cmd(instance,
6595 						cmd, MFI_IO_TIMEOUT_SECS);
6596 	else
6597 		ret = megasas_issue_polled(instance, cmd);
6598 
6599 	switch (ret) {
6600 	case DCMD_TIMEOUT:
6601 		switch (dcmd_timeout_ocr_possible(instance)) {
6602 		case INITIATE_OCR:
6603 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6604 			mutex_unlock(&instance->reset_mutex);
6605 			megasas_reset_fusion(instance->host,
6606 					     MFI_IO_TIMEOUT_OCR);
6607 			mutex_lock(&instance->reset_mutex);
6608 			break;
6609 		case KILL_ADAPTER:
6610 			megaraid_sas_kill_hba(instance);
6611 			break;
6612 		case IGNORE_TIMEOUT:
6613 			dev_info(&instance->pdev->dev,
6614 				 "Ignore DCMD timeout: %s %d\n",
6615 				 __func__, __LINE__);
6616 			break;
6617 		}
6618 		break;
6619 
6620 	default:
6621 		megasas_return_cmd(instance, cmd);
6622 	}
6623 	if (ret != DCMD_SUCCESS)
6624 		dev_err(&instance->pdev->dev,
6625 			"return from %s %d return value %d\n",
6626 			__func__, __LINE__, ret);
6627 
6628 	return ret;
6629 }
6630 
6631 /**
6632  * megasas_start_aen -	Subscribes to AEN during driver load time
6633  * @instance:		Adapter soft state
6634  */
6635 static int megasas_start_aen(struct megasas_instance *instance)
6636 {
6637 	struct megasas_evt_log_info eli;
6638 	union megasas_evt_class_locale class_locale;
6639 
6640 	/*
6641 	 * Get the latest sequence number from FW
6642 	 */
6643 	memset(&eli, 0, sizeof(eli));
6644 
6645 	if (megasas_get_seq_num(instance, &eli))
6646 		return -1;
6647 
6648 	/*
6649 	 * Register AEN with FW for latest sequence number plus 1
6650 	 */
6651 	class_locale.members.reserved = 0;
6652 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
6653 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
6654 
6655 	return megasas_register_aen(instance,
6656 			le32_to_cpu(eli.newest_seq_num) + 1,
6657 			class_locale.word);
6658 }
6659 
6660 /**
6661  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
6662  * @instance:		Adapter soft state
6663  */
6664 static int megasas_io_attach(struct megasas_instance *instance)
6665 {
6666 	struct Scsi_Host *host = instance->host;
6667 
6668 	/*
6669 	 * Export parameters required by SCSI mid-layer
6670 	 */
6671 	host->unique_id = instance->unique_id;
6672 	host->can_queue = instance->max_scsi_cmds;
6673 	host->this_id = instance->init_id;
6674 	host->sg_tablesize = instance->max_num_sge;
6675 
6676 	if (instance->fw_support_ieee)
6677 		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6678 
6679 	/*
6680 	 * Check if the module parameter value for max_sectors can be used
6681 	 */
6682 	if (max_sectors && max_sectors < instance->max_sectors_per_req)
6683 		instance->max_sectors_per_req = max_sectors;
6684 	else {
6685 		if (max_sectors) {
6686 			if (((instance->pdev->device ==
6687 				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6688 				(instance->pdev->device ==
6689 				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6690 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
6691 				instance->max_sectors_per_req = max_sectors;
6692 			} else {
6693 			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6694 				"and <= %d (or < 1MB for GEN2 controller)\n",
6695 				instance->max_sectors_per_req);
6696 			}
6697 		}
6698 	}
6699 
6700 	host->max_sectors = instance->max_sectors_per_req;
6701 	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6702 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6703 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6704 	host->max_lun = MEGASAS_MAX_LUN;
6705 	host->max_cmd_len = 16;
6706 
6707 	/*
6708 	 * Notify the mid-layer about the new controller
6709 	 */
6710 	if (scsi_add_host(host, &instance->pdev->dev)) {
6711 		dev_err(&instance->pdev->dev,
6712 			"Failed to add host from %s %d\n",
6713 			__func__, __LINE__);
6714 		return -ENODEV;
6715 	}
6716 
6717 	return 0;
6718 }
6719 
6720 /**
6721  * megasas_set_dma_mask -	Set DMA mask for supported controllers
6722  *
6723  * @instance:		Adapter soft state
6724  * Description:
6725  *
6726  * For Ventura, driver/FW will operate in 63bit DMA addresses.
6727  *
6728  * For invader-
6729  *	By default, driver/FW will operate in 32bit DMA addresses
6730  *	for consistent DMA mapping but if 32 bit consistent
6731  *	DMA mask fails, driver will try with 63 bit consistent
6732  *	mask provided FW is true 63bit DMA capable
6733  *
6734  * For older controllers(Thunderbolt and MFI based adapters)-
6735  *	driver/FW will operate in 32 bit consistent DMA addresses.
6736  */
6737 static int
6738 megasas_set_dma_mask(struct megasas_instance *instance)
6739 {
6740 	u64 consistent_mask;
6741 	struct pci_dev *pdev;
6742 	u32 scratch_pad_1;
6743 
6744 	pdev = instance->pdev;
6745 	consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6746 				DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6747 
6748 	if (IS_DMA64) {
6749 		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6750 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6751 			goto fail_set_dma_mask;
6752 
6753 		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6754 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6755 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6756 			/*
6757 			 * If 32 bit DMA mask fails, then try for 64 bit mask
6758 			 * for FW capable of handling 64 bit DMA.
6759 			 */
6760 			scratch_pad_1 = megasas_readl
6761 				(instance, &instance->reg_set->outbound_scratch_pad_1);
6762 
6763 			if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6764 				goto fail_set_dma_mask;
6765 			else if (dma_set_mask_and_coherent(&pdev->dev,
6766 							   DMA_BIT_MASK(63)))
6767 				goto fail_set_dma_mask;
6768 		}
6769 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6770 		goto fail_set_dma_mask;
6771 
6772 	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6773 		instance->consistent_mask_64bit = false;
6774 	else
6775 		instance->consistent_mask_64bit = true;
6776 
6777 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6778 		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6779 		 (instance->consistent_mask_64bit ? "63" : "32"));
6780 
6781 	return 0;
6782 
6783 fail_set_dma_mask:
6784 	dev_err(&pdev->dev, "Failed to set DMA mask\n");
6785 	return -1;
6786 
6787 }
6788 
6789 /*
6790  * megasas_set_adapter_type -	Set adapter type.
6791  *				Supported controllers can be divided in
6792  *				different categories-
6793  *					enum MR_ADAPTER_TYPE {
6794  *						MFI_SERIES = 1,
6795  *						THUNDERBOLT_SERIES = 2,
6796  *						INVADER_SERIES = 3,
6797  *						VENTURA_SERIES = 4,
6798  *						AERO_SERIES = 5,
6799  *					};
6800  * @instance:			Adapter soft state
6801  * return:			void
6802  */
6803 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6804 {
6805 	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6806 	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6807 		instance->adapter_type = MFI_SERIES;
6808 	} else {
6809 		switch (instance->pdev->device) {
6810 		case PCI_DEVICE_ID_LSI_AERO_10E1:
6811 		case PCI_DEVICE_ID_LSI_AERO_10E2:
6812 		case PCI_DEVICE_ID_LSI_AERO_10E5:
6813 		case PCI_DEVICE_ID_LSI_AERO_10E6:
6814 			instance->adapter_type = AERO_SERIES;
6815 			break;
6816 		case PCI_DEVICE_ID_LSI_VENTURA:
6817 		case PCI_DEVICE_ID_LSI_CRUSADER:
6818 		case PCI_DEVICE_ID_LSI_HARPOON:
6819 		case PCI_DEVICE_ID_LSI_TOMCAT:
6820 		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6821 		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6822 			instance->adapter_type = VENTURA_SERIES;
6823 			break;
6824 		case PCI_DEVICE_ID_LSI_FUSION:
6825 		case PCI_DEVICE_ID_LSI_PLASMA:
6826 			instance->adapter_type = THUNDERBOLT_SERIES;
6827 			break;
6828 		case PCI_DEVICE_ID_LSI_INVADER:
6829 		case PCI_DEVICE_ID_LSI_INTRUDER:
6830 		case PCI_DEVICE_ID_LSI_INTRUDER_24:
6831 		case PCI_DEVICE_ID_LSI_CUTLASS_52:
6832 		case PCI_DEVICE_ID_LSI_CUTLASS_53:
6833 		case PCI_DEVICE_ID_LSI_FURY:
6834 			instance->adapter_type = INVADER_SERIES;
6835 			break;
6836 		default: /* For all other supported controllers */
6837 			instance->adapter_type = MFI_SERIES;
6838 			break;
6839 		}
6840 	}
6841 }
6842 
6843 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6844 {
6845 	instance->producer = dma_alloc_coherent(&instance->pdev->dev,
6846 			sizeof(u32), &instance->producer_h, GFP_KERNEL);
6847 	instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
6848 			sizeof(u32), &instance->consumer_h, GFP_KERNEL);
6849 
6850 	if (!instance->producer || !instance->consumer) {
6851 		dev_err(&instance->pdev->dev,
6852 			"Failed to allocate memory for producer, consumer\n");
6853 		return -1;
6854 	}
6855 
6856 	*instance->producer = 0;
6857 	*instance->consumer = 0;
6858 	return 0;
6859 }
6860 
6861 /**
6862  * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
6863  *				structures which are not common across MFI
6864  *				adapters and fusion adapters.
6865  *				For MFI based adapters, allocate producer and
6866  *				consumer buffers. For fusion adapters, allocate
6867  *				memory for fusion context.
6868  * @instance:			Adapter soft state
6869  * return:			0 for SUCCESS
6870  */
6871 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6872 {
6873 	instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
6874 				      GFP_KERNEL);
6875 	if (!instance->reply_map)
6876 		return -ENOMEM;
6877 
6878 	switch (instance->adapter_type) {
6879 	case MFI_SERIES:
6880 		if (megasas_alloc_mfi_ctrl_mem(instance))
6881 			goto fail;
6882 		break;
6883 	case AERO_SERIES:
6884 	case VENTURA_SERIES:
6885 	case THUNDERBOLT_SERIES:
6886 	case INVADER_SERIES:
6887 		if (megasas_alloc_fusion_context(instance))
6888 			goto fail;
6889 		break;
6890 	}
6891 
6892 	return 0;
6893  fail:
6894 	kfree(instance->reply_map);
6895 	instance->reply_map = NULL;
6896 	return -ENOMEM;
6897 }
6898 
6899 /*
6900  * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
6901  *				producer, consumer buffers for MFI adapters
6902  *
6903  * @instance -			Adapter soft instance
6904  *
6905  */
6906 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6907 {
6908 	kfree(instance->reply_map);
6909 	if (instance->adapter_type == MFI_SERIES) {
6910 		if (instance->producer)
6911 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
6912 					    instance->producer,
6913 					    instance->producer_h);
6914 		if (instance->consumer)
6915 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
6916 					    instance->consumer,
6917 					    instance->consumer_h);
6918 	} else {
6919 		megasas_free_fusion_context(instance);
6920 	}
6921 }
6922 
6923 /**
6924  * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
6925  *					driver load time
6926  *
6927  * @instance-				Adapter soft instance
6928  * @return-				O for SUCCESS
6929  */
6930 static inline
6931 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6932 {
6933 	struct pci_dev *pdev = instance->pdev;
6934 	struct fusion_context *fusion = instance->ctrl_context;
6935 
6936 	instance->evt_detail = dma_alloc_coherent(&pdev->dev,
6937 			sizeof(struct megasas_evt_detail),
6938 			&instance->evt_detail_h, GFP_KERNEL);
6939 
6940 	if (!instance->evt_detail) {
6941 		dev_err(&instance->pdev->dev,
6942 			"Failed to allocate event detail buffer\n");
6943 		return -ENOMEM;
6944 	}
6945 
6946 	if (fusion) {
6947 		fusion->ioc_init_request =
6948 			dma_alloc_coherent(&pdev->dev,
6949 					   sizeof(struct MPI2_IOC_INIT_REQUEST),
6950 					   &fusion->ioc_init_request_phys,
6951 					   GFP_KERNEL);
6952 
6953 		if (!fusion->ioc_init_request) {
6954 			dev_err(&pdev->dev,
6955 				"Failed to allocate PD list buffer\n");
6956 			return -ENOMEM;
6957 		}
6958 
6959 		instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
6960 				sizeof(struct MR_SNAPDUMP_PROPERTIES),
6961 				&instance->snapdump_prop_h, GFP_KERNEL);
6962 
6963 		if (!instance->snapdump_prop)
6964 			dev_err(&pdev->dev,
6965 				"Failed to allocate snapdump properties buffer\n");
6966 
6967 		instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
6968 							HOST_DEVICE_LIST_SZ,
6969 							&instance->host_device_list_buf_h,
6970 							GFP_KERNEL);
6971 
6972 		if (!instance->host_device_list_buf) {
6973 			dev_err(&pdev->dev,
6974 				"Failed to allocate targetid list buffer\n");
6975 			return -ENOMEM;
6976 		}
6977 
6978 	}
6979 
6980 	instance->pd_list_buf =
6981 		dma_alloc_coherent(&pdev->dev,
6982 				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6983 				     &instance->pd_list_buf_h, GFP_KERNEL);
6984 
6985 	if (!instance->pd_list_buf) {
6986 		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
6987 		return -ENOMEM;
6988 	}
6989 
6990 	instance->ctrl_info_buf =
6991 		dma_alloc_coherent(&pdev->dev,
6992 				     sizeof(struct megasas_ctrl_info),
6993 				     &instance->ctrl_info_buf_h, GFP_KERNEL);
6994 
6995 	if (!instance->ctrl_info_buf) {
6996 		dev_err(&pdev->dev,
6997 			"Failed to allocate controller info buffer\n");
6998 		return -ENOMEM;
6999 	}
7000 
7001 	instance->ld_list_buf =
7002 		dma_alloc_coherent(&pdev->dev,
7003 				     sizeof(struct MR_LD_LIST),
7004 				     &instance->ld_list_buf_h, GFP_KERNEL);
7005 
7006 	if (!instance->ld_list_buf) {
7007 		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
7008 		return -ENOMEM;
7009 	}
7010 
7011 	instance->ld_targetid_list_buf =
7012 		dma_alloc_coherent(&pdev->dev,
7013 				sizeof(struct MR_LD_TARGETID_LIST),
7014 				&instance->ld_targetid_list_buf_h, GFP_KERNEL);
7015 
7016 	if (!instance->ld_targetid_list_buf) {
7017 		dev_err(&pdev->dev,
7018 			"Failed to allocate LD targetid list buffer\n");
7019 		return -ENOMEM;
7020 	}
7021 
7022 	if (!reset_devices) {
7023 		instance->system_info_buf =
7024 			dma_alloc_coherent(&pdev->dev,
7025 					sizeof(struct MR_DRV_SYSTEM_INFO),
7026 					&instance->system_info_h, GFP_KERNEL);
7027 		instance->pd_info =
7028 			dma_alloc_coherent(&pdev->dev,
7029 					sizeof(struct MR_PD_INFO),
7030 					&instance->pd_info_h, GFP_KERNEL);
7031 		instance->tgt_prop =
7032 			dma_alloc_coherent(&pdev->dev,
7033 					sizeof(struct MR_TARGET_PROPERTIES),
7034 					&instance->tgt_prop_h, GFP_KERNEL);
7035 		instance->crash_dump_buf =
7036 			dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7037 					&instance->crash_dump_h, GFP_KERNEL);
7038 
7039 		if (!instance->system_info_buf)
7040 			dev_err(&instance->pdev->dev,
7041 				"Failed to allocate system info buffer\n");
7042 
7043 		if (!instance->pd_info)
7044 			dev_err(&instance->pdev->dev,
7045 				"Failed to allocate pd_info buffer\n");
7046 
7047 		if (!instance->tgt_prop)
7048 			dev_err(&instance->pdev->dev,
7049 				"Failed to allocate tgt_prop buffer\n");
7050 
7051 		if (!instance->crash_dump_buf)
7052 			dev_err(&instance->pdev->dev,
7053 				"Failed to allocate crash dump buffer\n");
7054 	}
7055 
7056 	return 0;
7057 }
7058 
7059 /*
7060  * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
7061  *					during driver load time
7062  *
7063  * @instance-				Adapter soft instance
7064  *
7065  */
7066 static inline
7067 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
7068 {
7069 	struct pci_dev *pdev = instance->pdev;
7070 	struct fusion_context *fusion = instance->ctrl_context;
7071 
7072 	if (instance->evt_detail)
7073 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
7074 				    instance->evt_detail,
7075 				    instance->evt_detail_h);
7076 
7077 	if (fusion && fusion->ioc_init_request)
7078 		dma_free_coherent(&pdev->dev,
7079 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
7080 				  fusion->ioc_init_request,
7081 				  fusion->ioc_init_request_phys);
7082 
7083 	if (instance->pd_list_buf)
7084 		dma_free_coherent(&pdev->dev,
7085 				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7086 				    instance->pd_list_buf,
7087 				    instance->pd_list_buf_h);
7088 
7089 	if (instance->ld_list_buf)
7090 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
7091 				    instance->ld_list_buf,
7092 				    instance->ld_list_buf_h);
7093 
7094 	if (instance->ld_targetid_list_buf)
7095 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
7096 				    instance->ld_targetid_list_buf,
7097 				    instance->ld_targetid_list_buf_h);
7098 
7099 	if (instance->ctrl_info_buf)
7100 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
7101 				    instance->ctrl_info_buf,
7102 				    instance->ctrl_info_buf_h);
7103 
7104 	if (instance->system_info_buf)
7105 		dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
7106 				    instance->system_info_buf,
7107 				    instance->system_info_h);
7108 
7109 	if (instance->pd_info)
7110 		dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
7111 				    instance->pd_info, instance->pd_info_h);
7112 
7113 	if (instance->tgt_prop)
7114 		dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
7115 				    instance->tgt_prop, instance->tgt_prop_h);
7116 
7117 	if (instance->crash_dump_buf)
7118 		dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7119 				    instance->crash_dump_buf,
7120 				    instance->crash_dump_h);
7121 
7122 	if (instance->snapdump_prop)
7123 		dma_free_coherent(&pdev->dev,
7124 				  sizeof(struct MR_SNAPDUMP_PROPERTIES),
7125 				  instance->snapdump_prop,
7126 				  instance->snapdump_prop_h);
7127 
7128 	if (instance->host_device_list_buf)
7129 		dma_free_coherent(&pdev->dev,
7130 				  HOST_DEVICE_LIST_SZ,
7131 				  instance->host_device_list_buf,
7132 				  instance->host_device_list_buf_h);
7133 
7134 }
7135 
7136 /*
7137  * megasas_init_ctrl_params -		Initialize controller's instance
7138  *					parameters before FW init
7139  * @instance -				Adapter soft instance
7140  * @return -				void
7141  */
7142 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
7143 {
7144 	instance->fw_crash_state = UNAVAILABLE;
7145 
7146 	megasas_poll_wait_aen = 0;
7147 	instance->issuepend_done = 1;
7148 	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
7149 
7150 	/*
7151 	 * Initialize locks and queues
7152 	 */
7153 	INIT_LIST_HEAD(&instance->cmd_pool);
7154 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
7155 
7156 	atomic_set(&instance->fw_outstanding, 0);
7157 	atomic64_set(&instance->total_io_count, 0);
7158 
7159 	init_waitqueue_head(&instance->int_cmd_wait_q);
7160 	init_waitqueue_head(&instance->abort_cmd_wait_q);
7161 
7162 	spin_lock_init(&instance->crashdump_lock);
7163 	spin_lock_init(&instance->mfi_pool_lock);
7164 	spin_lock_init(&instance->hba_lock);
7165 	spin_lock_init(&instance->stream_lock);
7166 	spin_lock_init(&instance->completion_lock);
7167 
7168 	mutex_init(&instance->reset_mutex);
7169 
7170 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
7171 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
7172 		instance->flag_ieee = 1;
7173 
7174 	megasas_dbg_lvl = 0;
7175 	instance->flag = 0;
7176 	instance->unload = 1;
7177 	instance->last_time = 0;
7178 	instance->disableOnlineCtrlReset = 1;
7179 	instance->UnevenSpanSupport = 0;
7180 	instance->smp_affinity_enable = smp_affinity_enable ? true : false;
7181 	instance->msix_load_balance = false;
7182 
7183 	if (instance->adapter_type != MFI_SERIES)
7184 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
7185 	else
7186 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
7187 }
7188 
7189 /**
7190  * megasas_probe_one -	PCI hotplug entry point
7191  * @pdev:		PCI device structure
7192  * @id:			PCI ids of supported hotplugged adapter
7193  */
7194 static int megasas_probe_one(struct pci_dev *pdev,
7195 			     const struct pci_device_id *id)
7196 {
7197 	int rval, pos;
7198 	struct Scsi_Host *host;
7199 	struct megasas_instance *instance;
7200 	u16 control = 0;
7201 
7202 	switch (pdev->device) {
7203 	case PCI_DEVICE_ID_LSI_AERO_10E0:
7204 	case PCI_DEVICE_ID_LSI_AERO_10E3:
7205 	case PCI_DEVICE_ID_LSI_AERO_10E4:
7206 	case PCI_DEVICE_ID_LSI_AERO_10E7:
7207 		dev_err(&pdev->dev, "Adapter is in non secure mode\n");
7208 		return 1;
7209 	case PCI_DEVICE_ID_LSI_AERO_10E1:
7210 	case PCI_DEVICE_ID_LSI_AERO_10E5:
7211 		dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
7212 		break;
7213 	}
7214 
7215 	/* Reset MSI-X in the kdump kernel */
7216 	if (reset_devices) {
7217 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
7218 		if (pos) {
7219 			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
7220 					     &control);
7221 			if (control & PCI_MSIX_FLAGS_ENABLE) {
7222 				dev_info(&pdev->dev, "resetting MSI-X\n");
7223 				pci_write_config_word(pdev,
7224 						      pos + PCI_MSIX_FLAGS,
7225 						      control &
7226 						      ~PCI_MSIX_FLAGS_ENABLE);
7227 			}
7228 		}
7229 	}
7230 
7231 	/*
7232 	 * PCI prepping: enable device set bus mastering and dma mask
7233 	 */
7234 	rval = pci_enable_device_mem(pdev);
7235 
7236 	if (rval) {
7237 		return rval;
7238 	}
7239 
7240 	pci_set_master(pdev);
7241 
7242 	host = scsi_host_alloc(&megasas_template,
7243 			       sizeof(struct megasas_instance));
7244 
7245 	if (!host) {
7246 		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
7247 		goto fail_alloc_instance;
7248 	}
7249 
7250 	instance = (struct megasas_instance *)host->hostdata;
7251 	memset(instance, 0, sizeof(*instance));
7252 	atomic_set(&instance->fw_reset_no_pci_access, 0);
7253 
7254 	/*
7255 	 * Initialize PCI related and misc parameters
7256 	 */
7257 	instance->pdev = pdev;
7258 	instance->host = host;
7259 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
7260 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
7261 
7262 	megasas_set_adapter_type(instance);
7263 
7264 	/*
7265 	 * Initialize MFI Firmware
7266 	 */
7267 	if (megasas_init_fw(instance))
7268 		goto fail_init_mfi;
7269 
7270 	if (instance->requestorId) {
7271 		if (instance->PlasmaFW111) {
7272 			instance->vf_affiliation_111 =
7273 				dma_alloc_coherent(&pdev->dev,
7274 					sizeof(struct MR_LD_VF_AFFILIATION_111),
7275 					&instance->vf_affiliation_111_h,
7276 					GFP_KERNEL);
7277 			if (!instance->vf_affiliation_111)
7278 				dev_warn(&pdev->dev, "Can't allocate "
7279 				       "memory for VF affiliation buffer\n");
7280 		} else {
7281 			instance->vf_affiliation =
7282 				dma_alloc_coherent(&pdev->dev,
7283 					(MAX_LOGICAL_DRIVES + 1) *
7284 					sizeof(struct MR_LD_VF_AFFILIATION),
7285 					&instance->vf_affiliation_h,
7286 					GFP_KERNEL);
7287 			if (!instance->vf_affiliation)
7288 				dev_warn(&pdev->dev, "Can't allocate "
7289 				       "memory for VF affiliation buffer\n");
7290 		}
7291 	}
7292 
7293 	/*
7294 	 * Store instance in PCI softstate
7295 	 */
7296 	pci_set_drvdata(pdev, instance);
7297 
7298 	/*
7299 	 * Add this controller to megasas_mgmt_info structure so that it
7300 	 * can be exported to management applications
7301 	 */
7302 	megasas_mgmt_info.count++;
7303 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
7304 	megasas_mgmt_info.max_index++;
7305 
7306 	/*
7307 	 * Register with SCSI mid-layer
7308 	 */
7309 	if (megasas_io_attach(instance))
7310 		goto fail_io_attach;
7311 
7312 	instance->unload = 0;
7313 	/*
7314 	 * Trigger SCSI to scan our drives
7315 	 */
7316 	if (!instance->enable_fw_dev_list ||
7317 	    (instance->host_device_list_buf->count > 0))
7318 		scsi_scan_host(host);
7319 
7320 	/*
7321 	 * Initiate AEN (Asynchronous Event Notification)
7322 	 */
7323 	if (megasas_start_aen(instance)) {
7324 		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
7325 		goto fail_start_aen;
7326 	}
7327 
7328 	megasas_setup_debugfs(instance);
7329 
7330 	/* Get current SR-IOV LD/VF affiliation */
7331 	if (instance->requestorId)
7332 		megasas_get_ld_vf_affiliation(instance, 1);
7333 
7334 	return 0;
7335 
7336 fail_start_aen:
7337 fail_io_attach:
7338 	megasas_mgmt_info.count--;
7339 	megasas_mgmt_info.max_index--;
7340 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
7341 
7342 	instance->instancet->disable_intr(instance);
7343 	megasas_destroy_irqs(instance);
7344 
7345 	if (instance->adapter_type != MFI_SERIES)
7346 		megasas_release_fusion(instance);
7347 	else
7348 		megasas_release_mfi(instance);
7349 	if (instance->msix_vectors)
7350 		pci_free_irq_vectors(instance->pdev);
7351 fail_init_mfi:
7352 	scsi_host_put(host);
7353 fail_alloc_instance:
7354 	pci_disable_device(pdev);
7355 
7356 	return -ENODEV;
7357 }
7358 
7359 /**
7360  * megasas_flush_cache -	Requests FW to flush all its caches
7361  * @instance:			Adapter soft state
7362  */
7363 static void megasas_flush_cache(struct megasas_instance *instance)
7364 {
7365 	struct megasas_cmd *cmd;
7366 	struct megasas_dcmd_frame *dcmd;
7367 
7368 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7369 		return;
7370 
7371 	cmd = megasas_get_cmd(instance);
7372 
7373 	if (!cmd)
7374 		return;
7375 
7376 	dcmd = &cmd->frame->dcmd;
7377 
7378 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7379 
7380 	dcmd->cmd = MFI_CMD_DCMD;
7381 	dcmd->cmd_status = 0x0;
7382 	dcmd->sge_count = 0;
7383 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7384 	dcmd->timeout = 0;
7385 	dcmd->pad_0 = 0;
7386 	dcmd->data_xfer_len = 0;
7387 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
7388 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
7389 
7390 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7391 			!= DCMD_SUCCESS) {
7392 		dev_err(&instance->pdev->dev,
7393 			"return from %s %d\n", __func__, __LINE__);
7394 		return;
7395 	}
7396 
7397 	megasas_return_cmd(instance, cmd);
7398 }
7399 
7400 /**
7401  * megasas_shutdown_controller -	Instructs FW to shutdown the controller
7402  * @instance:				Adapter soft state
7403  * @opcode:				Shutdown/Hibernate
7404  */
7405 static void megasas_shutdown_controller(struct megasas_instance *instance,
7406 					u32 opcode)
7407 {
7408 	struct megasas_cmd *cmd;
7409 	struct megasas_dcmd_frame *dcmd;
7410 
7411 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7412 		return;
7413 
7414 	cmd = megasas_get_cmd(instance);
7415 
7416 	if (!cmd)
7417 		return;
7418 
7419 	if (instance->aen_cmd)
7420 		megasas_issue_blocked_abort_cmd(instance,
7421 			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7422 	if (instance->map_update_cmd)
7423 		megasas_issue_blocked_abort_cmd(instance,
7424 			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7425 	if (instance->jbod_seq_cmd)
7426 		megasas_issue_blocked_abort_cmd(instance,
7427 			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7428 
7429 	dcmd = &cmd->frame->dcmd;
7430 
7431 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7432 
7433 	dcmd->cmd = MFI_CMD_DCMD;
7434 	dcmd->cmd_status = 0x0;
7435 	dcmd->sge_count = 0;
7436 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7437 	dcmd->timeout = 0;
7438 	dcmd->pad_0 = 0;
7439 	dcmd->data_xfer_len = 0;
7440 	dcmd->opcode = cpu_to_le32(opcode);
7441 
7442 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7443 			!= DCMD_SUCCESS) {
7444 		dev_err(&instance->pdev->dev,
7445 			"return from %s %d\n", __func__, __LINE__);
7446 		return;
7447 	}
7448 
7449 	megasas_return_cmd(instance, cmd);
7450 }
7451 
7452 #ifdef CONFIG_PM
7453 /**
7454  * megasas_suspend -	driver suspend entry point
7455  * @pdev:		PCI device structure
7456  * @state:		PCI power state to suspend routine
7457  */
7458 static int
7459 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
7460 {
7461 	struct megasas_instance *instance;
7462 
7463 	instance = pci_get_drvdata(pdev);
7464 
7465 	if (!instance)
7466 		return 0;
7467 
7468 	instance->unload = 1;
7469 
7470 	dev_info(&pdev->dev, "%s is called\n", __func__);
7471 
7472 	/* Shutdown SR-IOV heartbeat timer */
7473 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7474 		del_timer_sync(&instance->sriov_heartbeat_timer);
7475 
7476 	/* Stop the FW fault detection watchdog */
7477 	if (instance->adapter_type != MFI_SERIES)
7478 		megasas_fusion_stop_watchdog(instance);
7479 
7480 	megasas_flush_cache(instance);
7481 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7482 
7483 	/* cancel the delayed work if this work still in queue */
7484 	if (instance->ev != NULL) {
7485 		struct megasas_aen_event *ev = instance->ev;
7486 		cancel_delayed_work_sync(&ev->hotplug_work);
7487 		instance->ev = NULL;
7488 	}
7489 
7490 	tasklet_kill(&instance->isr_tasklet);
7491 
7492 	pci_set_drvdata(instance->pdev, instance);
7493 	instance->instancet->disable_intr(instance);
7494 
7495 	megasas_destroy_irqs(instance);
7496 
7497 	if (instance->msix_vectors)
7498 		pci_free_irq_vectors(instance->pdev);
7499 
7500 	pci_save_state(pdev);
7501 	pci_disable_device(pdev);
7502 
7503 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
7504 
7505 	return 0;
7506 }
7507 
7508 /**
7509  * megasas_resume-      driver resume entry point
7510  * @pdev:               PCI device structure
7511  */
7512 static int
7513 megasas_resume(struct pci_dev *pdev)
7514 {
7515 	int rval;
7516 	struct Scsi_Host *host;
7517 	struct megasas_instance *instance;
7518 	int irq_flags = PCI_IRQ_LEGACY;
7519 
7520 	instance = pci_get_drvdata(pdev);
7521 
7522 	if (!instance)
7523 		return 0;
7524 
7525 	host = instance->host;
7526 	pci_set_power_state(pdev, PCI_D0);
7527 	pci_enable_wake(pdev, PCI_D0, 0);
7528 	pci_restore_state(pdev);
7529 
7530 	dev_info(&pdev->dev, "%s is called\n", __func__);
7531 	/*
7532 	 * PCI prepping: enable device set bus mastering and dma mask
7533 	 */
7534 	rval = pci_enable_device_mem(pdev);
7535 
7536 	if (rval) {
7537 		dev_err(&pdev->dev, "Enable device failed\n");
7538 		return rval;
7539 	}
7540 
7541 	pci_set_master(pdev);
7542 
7543 	/*
7544 	 * We expect the FW state to be READY
7545 	 */
7546 	if (megasas_transition_to_ready(instance, 0))
7547 		goto fail_ready_state;
7548 
7549 	if (megasas_set_dma_mask(instance))
7550 		goto fail_set_dma_mask;
7551 
7552 	/*
7553 	 * Initialize MFI Firmware
7554 	 */
7555 
7556 	atomic_set(&instance->fw_outstanding, 0);
7557 	atomic_set(&instance->ldio_outstanding, 0);
7558 
7559 	/* Now re-enable MSI-X */
7560 	if (instance->msix_vectors) {
7561 		irq_flags = PCI_IRQ_MSIX;
7562 		if (instance->smp_affinity_enable)
7563 			irq_flags |= PCI_IRQ_AFFINITY;
7564 	}
7565 	rval = pci_alloc_irq_vectors(instance->pdev, 1,
7566 				     instance->msix_vectors ?
7567 				     instance->msix_vectors : 1, irq_flags);
7568 	if (rval < 0)
7569 		goto fail_reenable_msix;
7570 
7571 	megasas_setup_reply_map(instance);
7572 
7573 	if (instance->adapter_type != MFI_SERIES) {
7574 		megasas_reset_reply_desc(instance);
7575 		if (megasas_ioc_init_fusion(instance)) {
7576 			megasas_free_cmds(instance);
7577 			megasas_free_cmds_fusion(instance);
7578 			goto fail_init_mfi;
7579 		}
7580 		if (!megasas_get_map_info(instance))
7581 			megasas_sync_map_info(instance);
7582 	} else {
7583 		*instance->producer = 0;
7584 		*instance->consumer = 0;
7585 		if (megasas_issue_init_mfi(instance))
7586 			goto fail_init_mfi;
7587 	}
7588 
7589 	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7590 		goto fail_init_mfi;
7591 
7592 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7593 		     (unsigned long)instance);
7594 
7595 	if (instance->msix_vectors ?
7596 			megasas_setup_irqs_msix(instance, 0) :
7597 			megasas_setup_irqs_ioapic(instance))
7598 		goto fail_init_mfi;
7599 
7600 	if (instance->adapter_type != MFI_SERIES)
7601 		megasas_setup_irq_poll(instance);
7602 
7603 	/* Re-launch SR-IOV heartbeat timer */
7604 	if (instance->requestorId) {
7605 		if (!megasas_sriov_start_heartbeat(instance, 0))
7606 			megasas_start_timer(instance);
7607 		else {
7608 			instance->skip_heartbeat_timer_del = 1;
7609 			goto fail_init_mfi;
7610 		}
7611 	}
7612 
7613 	instance->instancet->enable_intr(instance);
7614 	megasas_setup_jbod_map(instance);
7615 	instance->unload = 0;
7616 
7617 	/*
7618 	 * Initiate AEN (Asynchronous Event Notification)
7619 	 */
7620 	if (megasas_start_aen(instance))
7621 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
7622 
7623 	/* Re-launch FW fault watchdog */
7624 	if (instance->adapter_type != MFI_SERIES)
7625 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7626 			goto fail_start_watchdog;
7627 
7628 	return 0;
7629 
7630 fail_start_watchdog:
7631 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7632 		del_timer_sync(&instance->sriov_heartbeat_timer);
7633 fail_init_mfi:
7634 	megasas_free_ctrl_dma_buffers(instance);
7635 	megasas_free_ctrl_mem(instance);
7636 	scsi_host_put(host);
7637 
7638 fail_reenable_msix:
7639 fail_set_dma_mask:
7640 fail_ready_state:
7641 
7642 	pci_disable_device(pdev);
7643 
7644 	return -ENODEV;
7645 }
7646 #else
7647 #define megasas_suspend	NULL
7648 #define megasas_resume	NULL
7649 #endif
7650 
7651 static inline int
7652 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7653 {
7654 	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7655 	int i;
7656 	u8 adp_state;
7657 
7658 	for (i = 0; i < wait_time; i++) {
7659 		adp_state = atomic_read(&instance->adprecovery);
7660 		if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7661 		    (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7662 			break;
7663 
7664 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7665 			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7666 
7667 		msleep(1000);
7668 	}
7669 
7670 	if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7671 		dev_info(&instance->pdev->dev,
7672 			 "%s HBA failed to become operational, adp_state %d\n",
7673 			 __func__, adp_state);
7674 		return 1;
7675 	}
7676 
7677 	return 0;
7678 }
7679 
7680 /**
7681  * megasas_detach_one -	PCI hot"un"plug entry point
7682  * @pdev:		PCI device structure
7683  */
7684 static void megasas_detach_one(struct pci_dev *pdev)
7685 {
7686 	int i;
7687 	struct Scsi_Host *host;
7688 	struct megasas_instance *instance;
7689 	struct fusion_context *fusion;
7690 	u32 pd_seq_map_sz;
7691 
7692 	instance = pci_get_drvdata(pdev);
7693 
7694 	if (!instance)
7695 		return;
7696 
7697 	host = instance->host;
7698 	fusion = instance->ctrl_context;
7699 
7700 	/* Shutdown SR-IOV heartbeat timer */
7701 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7702 		del_timer_sync(&instance->sriov_heartbeat_timer);
7703 
7704 	/* Stop the FW fault detection watchdog */
7705 	if (instance->adapter_type != MFI_SERIES)
7706 		megasas_fusion_stop_watchdog(instance);
7707 
7708 	if (instance->fw_crash_state != UNAVAILABLE)
7709 		megasas_free_host_crash_buffer(instance);
7710 	scsi_remove_host(instance->host);
7711 	instance->unload = 1;
7712 
7713 	if (megasas_wait_for_adapter_operational(instance))
7714 		goto skip_firing_dcmds;
7715 
7716 	megasas_flush_cache(instance);
7717 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7718 
7719 skip_firing_dcmds:
7720 	/* cancel the delayed work if this work still in queue*/
7721 	if (instance->ev != NULL) {
7722 		struct megasas_aen_event *ev = instance->ev;
7723 		cancel_delayed_work_sync(&ev->hotplug_work);
7724 		instance->ev = NULL;
7725 	}
7726 
7727 	/* cancel all wait events */
7728 	wake_up_all(&instance->int_cmd_wait_q);
7729 
7730 	tasklet_kill(&instance->isr_tasklet);
7731 
7732 	/*
7733 	 * Take the instance off the instance array. Note that we will not
7734 	 * decrement the max_index. We let this array be sparse array
7735 	 */
7736 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7737 		if (megasas_mgmt_info.instance[i] == instance) {
7738 			megasas_mgmt_info.count--;
7739 			megasas_mgmt_info.instance[i] = NULL;
7740 
7741 			break;
7742 		}
7743 	}
7744 
7745 	instance->instancet->disable_intr(instance);
7746 
7747 	megasas_destroy_irqs(instance);
7748 
7749 	if (instance->msix_vectors)
7750 		pci_free_irq_vectors(instance->pdev);
7751 
7752 	if (instance->adapter_type >= VENTURA_SERIES) {
7753 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
7754 			kfree(fusion->stream_detect_by_ld[i]);
7755 		kfree(fusion->stream_detect_by_ld);
7756 		fusion->stream_detect_by_ld = NULL;
7757 	}
7758 
7759 
7760 	if (instance->adapter_type != MFI_SERIES) {
7761 		megasas_release_fusion(instance);
7762 			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
7763 				(sizeof(struct MR_PD_CFG_SEQ) *
7764 					(MAX_PHYSICAL_DEVICES - 1));
7765 		for (i = 0; i < 2 ; i++) {
7766 			if (fusion->ld_map[i])
7767 				dma_free_coherent(&instance->pdev->dev,
7768 						  fusion->max_map_sz,
7769 						  fusion->ld_map[i],
7770 						  fusion->ld_map_phys[i]);
7771 			if (fusion->ld_drv_map[i]) {
7772 				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
7773 					vfree(fusion->ld_drv_map[i]);
7774 				else
7775 					free_pages((ulong)fusion->ld_drv_map[i],
7776 						   fusion->drv_map_pages);
7777 			}
7778 
7779 			if (fusion->pd_seq_sync[i])
7780 				dma_free_coherent(&instance->pdev->dev,
7781 					pd_seq_map_sz,
7782 					fusion->pd_seq_sync[i],
7783 					fusion->pd_seq_phys[i]);
7784 		}
7785 	} else {
7786 		megasas_release_mfi(instance);
7787 	}
7788 
7789 	if (instance->vf_affiliation)
7790 		dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
7791 				    sizeof(struct MR_LD_VF_AFFILIATION),
7792 				    instance->vf_affiliation,
7793 				    instance->vf_affiliation_h);
7794 
7795 	if (instance->vf_affiliation_111)
7796 		dma_free_coherent(&pdev->dev,
7797 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
7798 				    instance->vf_affiliation_111,
7799 				    instance->vf_affiliation_111_h);
7800 
7801 	if (instance->hb_host_mem)
7802 		dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
7803 				    instance->hb_host_mem,
7804 				    instance->hb_host_mem_h);
7805 
7806 	megasas_free_ctrl_dma_buffers(instance);
7807 
7808 	megasas_free_ctrl_mem(instance);
7809 
7810 	megasas_destroy_debugfs(instance);
7811 
7812 	scsi_host_put(host);
7813 
7814 	pci_disable_device(pdev);
7815 }
7816 
7817 /**
7818  * megasas_shutdown -	Shutdown entry point
7819  * @device:		Generic device structure
7820  */
7821 static void megasas_shutdown(struct pci_dev *pdev)
7822 {
7823 	struct megasas_instance *instance = pci_get_drvdata(pdev);
7824 
7825 	if (!instance)
7826 		return;
7827 
7828 	instance->unload = 1;
7829 
7830 	if (megasas_wait_for_adapter_operational(instance))
7831 		goto skip_firing_dcmds;
7832 
7833 	megasas_flush_cache(instance);
7834 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7835 
7836 skip_firing_dcmds:
7837 	instance->instancet->disable_intr(instance);
7838 	megasas_destroy_irqs(instance);
7839 
7840 	if (instance->msix_vectors)
7841 		pci_free_irq_vectors(instance->pdev);
7842 }
7843 
7844 /**
7845  * megasas_mgmt_open -	char node "open" entry point
7846  */
7847 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
7848 {
7849 	/*
7850 	 * Allow only those users with admin rights
7851 	 */
7852 	if (!capable(CAP_SYS_ADMIN))
7853 		return -EACCES;
7854 
7855 	return 0;
7856 }
7857 
7858 /**
7859  * megasas_mgmt_fasync -	Async notifier registration from applications
7860  *
7861  * This function adds the calling process to a driver global queue. When an
7862  * event occurs, SIGIO will be sent to all processes in this queue.
7863  */
7864 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
7865 {
7866 	int rc;
7867 
7868 	mutex_lock(&megasas_async_queue_mutex);
7869 
7870 	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
7871 
7872 	mutex_unlock(&megasas_async_queue_mutex);
7873 
7874 	if (rc >= 0) {
7875 		/* For sanity check when we get ioctl */
7876 		filep->private_data = filep;
7877 		return 0;
7878 	}
7879 
7880 	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
7881 
7882 	return rc;
7883 }
7884 
7885 /**
7886  * megasas_mgmt_poll -  char node "poll" entry point
7887  * */
7888 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
7889 {
7890 	__poll_t mask;
7891 	unsigned long flags;
7892 
7893 	poll_wait(file, &megasas_poll_wait, wait);
7894 	spin_lock_irqsave(&poll_aen_lock, flags);
7895 	if (megasas_poll_wait_aen)
7896 		mask = (EPOLLIN | EPOLLRDNORM);
7897 	else
7898 		mask = 0;
7899 	megasas_poll_wait_aen = 0;
7900 	spin_unlock_irqrestore(&poll_aen_lock, flags);
7901 	return mask;
7902 }
7903 
7904 /*
7905  * megasas_set_crash_dump_params_ioctl:
7906  *		Send CRASH_DUMP_MODE DCMD to all controllers
7907  * @cmd:	MFI command frame
7908  */
7909 
7910 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
7911 {
7912 	struct megasas_instance *local_instance;
7913 	int i, error = 0;
7914 	int crash_support;
7915 
7916 	crash_support = cmd->frame->dcmd.mbox.w[0];
7917 
7918 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7919 		local_instance = megasas_mgmt_info.instance[i];
7920 		if (local_instance && local_instance->crash_dump_drv_support) {
7921 			if ((atomic_read(&local_instance->adprecovery) ==
7922 				MEGASAS_HBA_OPERATIONAL) &&
7923 				!megasas_set_crash_dump_params(local_instance,
7924 					crash_support)) {
7925 				local_instance->crash_dump_app_support =
7926 					crash_support;
7927 				dev_info(&local_instance->pdev->dev,
7928 					"Application firmware crash "
7929 					"dump mode set success\n");
7930 				error = 0;
7931 			} else {
7932 				dev_info(&local_instance->pdev->dev,
7933 					"Application firmware crash "
7934 					"dump mode set failed\n");
7935 				error = -1;
7936 			}
7937 		}
7938 	}
7939 	return error;
7940 }
7941 
7942 /**
7943  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
7944  * @instance:			Adapter soft state
7945  * @argp:			User's ioctl packet
7946  */
7947 static int
7948 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
7949 		      struct megasas_iocpacket __user * user_ioc,
7950 		      struct megasas_iocpacket *ioc)
7951 {
7952 	struct megasas_sge64 *kern_sge64 = NULL;
7953 	struct megasas_sge32 *kern_sge32 = NULL;
7954 	struct megasas_cmd *cmd;
7955 	void *kbuff_arr[MAX_IOCTL_SGE];
7956 	dma_addr_t buf_handle = 0;
7957 	int error = 0, i;
7958 	void *sense = NULL;
7959 	dma_addr_t sense_handle;
7960 	unsigned long *sense_ptr;
7961 	u32 opcode = 0;
7962 
7963 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
7964 
7965 	if (ioc->sge_count > MAX_IOCTL_SGE) {
7966 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
7967 		       ioc->sge_count, MAX_IOCTL_SGE);
7968 		return -EINVAL;
7969 	}
7970 
7971 	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
7972 	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
7973 	    !instance->support_nvme_passthru) ||
7974 	    ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
7975 	    !instance->support_pci_lane_margining)) {
7976 		dev_err(&instance->pdev->dev,
7977 			"Received invalid ioctl command 0x%x\n",
7978 			ioc->frame.hdr.cmd);
7979 		return -ENOTSUPP;
7980 	}
7981 
7982 	cmd = megasas_get_cmd(instance);
7983 	if (!cmd) {
7984 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
7985 		return -ENOMEM;
7986 	}
7987 
7988 	/*
7989 	 * User's IOCTL packet has 2 frames (maximum). Copy those two
7990 	 * frames into our cmd's frames. cmd->frame's context will get
7991 	 * overwritten when we copy from user's frames. So set that value
7992 	 * alone separately
7993 	 */
7994 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
7995 	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
7996 	cmd->frame->hdr.pad_0 = 0;
7997 
7998 	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
7999 
8000 	if (instance->consistent_mask_64bit)
8001 		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
8002 				       MFI_FRAME_SENSE64));
8003 	else
8004 		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
8005 					       MFI_FRAME_SENSE64));
8006 
8007 	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
8008 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
8009 
8010 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
8011 		mutex_lock(&instance->reset_mutex);
8012 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
8013 			megasas_return_cmd(instance, cmd);
8014 			mutex_unlock(&instance->reset_mutex);
8015 			return -1;
8016 		}
8017 		mutex_unlock(&instance->reset_mutex);
8018 	}
8019 
8020 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
8021 		error = megasas_set_crash_dump_params_ioctl(cmd);
8022 		megasas_return_cmd(instance, cmd);
8023 		return error;
8024 	}
8025 
8026 	/*
8027 	 * The management interface between applications and the fw uses
8028 	 * MFI frames. E.g, RAID configuration changes, LD property changes
8029 	 * etc are accomplishes through different kinds of MFI frames. The
8030 	 * driver needs to care only about substituting user buffers with
8031 	 * kernel buffers in SGLs. The location of SGL is embedded in the
8032 	 * struct iocpacket itself.
8033 	 */
8034 	if (instance->consistent_mask_64bit)
8035 		kern_sge64 = (struct megasas_sge64 *)
8036 			((unsigned long)cmd->frame + ioc->sgl_off);
8037 	else
8038 		kern_sge32 = (struct megasas_sge32 *)
8039 			((unsigned long)cmd->frame + ioc->sgl_off);
8040 
8041 	/*
8042 	 * For each user buffer, create a mirror buffer and copy in
8043 	 */
8044 	for (i = 0; i < ioc->sge_count; i++) {
8045 		if (!ioc->sgl[i].iov_len)
8046 			continue;
8047 
8048 		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
8049 						    ioc->sgl[i].iov_len,
8050 						    &buf_handle, GFP_KERNEL);
8051 		if (!kbuff_arr[i]) {
8052 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
8053 			       "kernel SGL buffer for IOCTL\n");
8054 			error = -ENOMEM;
8055 			goto out;
8056 		}
8057 
8058 		/*
8059 		 * We don't change the dma_coherent_mask, so
8060 		 * dma_alloc_coherent only returns 32bit addresses
8061 		 */
8062 		if (instance->consistent_mask_64bit) {
8063 			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
8064 			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8065 		} else {
8066 			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
8067 			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8068 		}
8069 
8070 		/*
8071 		 * We created a kernel buffer corresponding to the
8072 		 * user buffer. Now copy in from the user buffer
8073 		 */
8074 		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
8075 				   (u32) (ioc->sgl[i].iov_len))) {
8076 			error = -EFAULT;
8077 			goto out;
8078 		}
8079 	}
8080 
8081 	if (ioc->sense_len) {
8082 		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
8083 					     &sense_handle, GFP_KERNEL);
8084 		if (!sense) {
8085 			error = -ENOMEM;
8086 			goto out;
8087 		}
8088 
8089 		sense_ptr =
8090 		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
8091 		if (instance->consistent_mask_64bit)
8092 			*sense_ptr = cpu_to_le64(sense_handle);
8093 		else
8094 			*sense_ptr = cpu_to_le32(sense_handle);
8095 	}
8096 
8097 	/*
8098 	 * Set the sync_cmd flag so that the ISR knows not to complete this
8099 	 * cmd to the SCSI mid-layer
8100 	 */
8101 	cmd->sync_cmd = 1;
8102 	if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
8103 		cmd->sync_cmd = 0;
8104 		dev_err(&instance->pdev->dev,
8105 			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
8106 			__func__, __LINE__, cmd->frame->hdr.cmd, opcode,
8107 			cmd->cmd_status_drv);
8108 		return -EBUSY;
8109 	}
8110 
8111 	cmd->sync_cmd = 0;
8112 
8113 	if (instance->unload == 1) {
8114 		dev_info(&instance->pdev->dev, "Driver unload is in progress "
8115 			"don't submit data to application\n");
8116 		goto out;
8117 	}
8118 	/*
8119 	 * copy out the kernel buffers to user buffers
8120 	 */
8121 	for (i = 0; i < ioc->sge_count; i++) {
8122 		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
8123 				 ioc->sgl[i].iov_len)) {
8124 			error = -EFAULT;
8125 			goto out;
8126 		}
8127 	}
8128 
8129 	/*
8130 	 * copy out the sense
8131 	 */
8132 	if (ioc->sense_len) {
8133 		/*
8134 		 * sense_ptr points to the location that has the user
8135 		 * sense buffer address
8136 		 */
8137 		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
8138 				ioc->sense_off);
8139 
8140 		if (copy_to_user((void __user *)((unsigned long)
8141 				 get_unaligned((unsigned long *)sense_ptr)),
8142 				 sense, ioc->sense_len)) {
8143 			dev_err(&instance->pdev->dev, "Failed to copy out to user "
8144 					"sense data\n");
8145 			error = -EFAULT;
8146 			goto out;
8147 		}
8148 	}
8149 
8150 	/*
8151 	 * copy the status codes returned by the fw
8152 	 */
8153 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
8154 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
8155 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
8156 		error = -EFAULT;
8157 	}
8158 
8159 out:
8160 	if (sense) {
8161 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
8162 				    sense, sense_handle);
8163 	}
8164 
8165 	for (i = 0; i < ioc->sge_count; i++) {
8166 		if (kbuff_arr[i]) {
8167 			if (instance->consistent_mask_64bit)
8168 				dma_free_coherent(&instance->pdev->dev,
8169 					le32_to_cpu(kern_sge64[i].length),
8170 					kbuff_arr[i],
8171 					le64_to_cpu(kern_sge64[i].phys_addr));
8172 			else
8173 				dma_free_coherent(&instance->pdev->dev,
8174 					le32_to_cpu(kern_sge32[i].length),
8175 					kbuff_arr[i],
8176 					le32_to_cpu(kern_sge32[i].phys_addr));
8177 			kbuff_arr[i] = NULL;
8178 		}
8179 	}
8180 
8181 	megasas_return_cmd(instance, cmd);
8182 	return error;
8183 }
8184 
8185 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
8186 {
8187 	struct megasas_iocpacket __user *user_ioc =
8188 	    (struct megasas_iocpacket __user *)arg;
8189 	struct megasas_iocpacket *ioc;
8190 	struct megasas_instance *instance;
8191 	int error;
8192 
8193 	ioc = memdup_user(user_ioc, sizeof(*ioc));
8194 	if (IS_ERR(ioc))
8195 		return PTR_ERR(ioc);
8196 
8197 	instance = megasas_lookup_instance(ioc->host_no);
8198 	if (!instance) {
8199 		error = -ENODEV;
8200 		goto out_kfree_ioc;
8201 	}
8202 
8203 	/* Block ioctls in VF mode */
8204 	if (instance->requestorId && !allow_vf_ioctls) {
8205 		error = -ENODEV;
8206 		goto out_kfree_ioc;
8207 	}
8208 
8209 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8210 		dev_err(&instance->pdev->dev, "Controller in crit error\n");
8211 		error = -ENODEV;
8212 		goto out_kfree_ioc;
8213 	}
8214 
8215 	if (instance->unload == 1) {
8216 		error = -ENODEV;
8217 		goto out_kfree_ioc;
8218 	}
8219 
8220 	if (down_interruptible(&instance->ioctl_sem)) {
8221 		error = -ERESTARTSYS;
8222 		goto out_kfree_ioc;
8223 	}
8224 
8225 	if  (megasas_wait_for_adapter_operational(instance)) {
8226 		error = -ENODEV;
8227 		goto out_up;
8228 	}
8229 
8230 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
8231 out_up:
8232 	up(&instance->ioctl_sem);
8233 
8234 out_kfree_ioc:
8235 	kfree(ioc);
8236 	return error;
8237 }
8238 
8239 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
8240 {
8241 	struct megasas_instance *instance;
8242 	struct megasas_aen aen;
8243 	int error;
8244 
8245 	if (file->private_data != file) {
8246 		printk(KERN_DEBUG "megasas: fasync_helper was not "
8247 		       "called first\n");
8248 		return -EINVAL;
8249 	}
8250 
8251 	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
8252 		return -EFAULT;
8253 
8254 	instance = megasas_lookup_instance(aen.host_no);
8255 
8256 	if (!instance)
8257 		return -ENODEV;
8258 
8259 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8260 		return -ENODEV;
8261 	}
8262 
8263 	if (instance->unload == 1) {
8264 		return -ENODEV;
8265 	}
8266 
8267 	if  (megasas_wait_for_adapter_operational(instance))
8268 		return -ENODEV;
8269 
8270 	mutex_lock(&instance->reset_mutex);
8271 	error = megasas_register_aen(instance, aen.seq_num,
8272 				     aen.class_locale_word);
8273 	mutex_unlock(&instance->reset_mutex);
8274 	return error;
8275 }
8276 
8277 /**
8278  * megasas_mgmt_ioctl -	char node ioctl entry point
8279  */
8280 static long
8281 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8282 {
8283 	switch (cmd) {
8284 	case MEGASAS_IOC_FIRMWARE:
8285 		return megasas_mgmt_ioctl_fw(file, arg);
8286 
8287 	case MEGASAS_IOC_GET_AEN:
8288 		return megasas_mgmt_ioctl_aen(file, arg);
8289 	}
8290 
8291 	return -ENOTTY;
8292 }
8293 
8294 #ifdef CONFIG_COMPAT
8295 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
8296 {
8297 	struct compat_megasas_iocpacket __user *cioc =
8298 	    (struct compat_megasas_iocpacket __user *)arg;
8299 	struct megasas_iocpacket __user *ioc =
8300 	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
8301 	int i;
8302 	int error = 0;
8303 	compat_uptr_t ptr;
8304 	u32 local_sense_off;
8305 	u32 local_sense_len;
8306 	u32 user_sense_off;
8307 
8308 	if (clear_user(ioc, sizeof(*ioc)))
8309 		return -EFAULT;
8310 
8311 	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
8312 	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
8313 	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
8314 	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
8315 	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
8316 	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
8317 		return -EFAULT;
8318 
8319 	/*
8320 	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
8321 	 * sense_len is not null, so prepare the 64bit value under
8322 	 * the same condition.
8323 	 */
8324 	if (get_user(local_sense_off, &ioc->sense_off) ||
8325 		get_user(local_sense_len, &ioc->sense_len) ||
8326 		get_user(user_sense_off, &cioc->sense_off))
8327 		return -EFAULT;
8328 
8329 	if (local_sense_off != user_sense_off)
8330 		return -EINVAL;
8331 
8332 	if (local_sense_len) {
8333 		void __user **sense_ioc_ptr =
8334 			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
8335 		compat_uptr_t *sense_cioc_ptr =
8336 			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
8337 		if (get_user(ptr, sense_cioc_ptr) ||
8338 		    put_user(compat_ptr(ptr), sense_ioc_ptr))
8339 			return -EFAULT;
8340 	}
8341 
8342 	for (i = 0; i < MAX_IOCTL_SGE; i++) {
8343 		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
8344 		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
8345 		    copy_in_user(&ioc->sgl[i].iov_len,
8346 				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
8347 			return -EFAULT;
8348 	}
8349 
8350 	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
8351 
8352 	if (copy_in_user(&cioc->frame.hdr.cmd_status,
8353 			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
8354 		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
8355 		return -EFAULT;
8356 	}
8357 	return error;
8358 }
8359 
8360 static long
8361 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
8362 			  unsigned long arg)
8363 {
8364 	switch (cmd) {
8365 	case MEGASAS_IOC_FIRMWARE32:
8366 		return megasas_mgmt_compat_ioctl_fw(file, arg);
8367 	case MEGASAS_IOC_GET_AEN:
8368 		return megasas_mgmt_ioctl_aen(file, arg);
8369 	}
8370 
8371 	return -ENOTTY;
8372 }
8373 #endif
8374 
8375 /*
8376  * File operations structure for management interface
8377  */
8378 static const struct file_operations megasas_mgmt_fops = {
8379 	.owner = THIS_MODULE,
8380 	.open = megasas_mgmt_open,
8381 	.fasync = megasas_mgmt_fasync,
8382 	.unlocked_ioctl = megasas_mgmt_ioctl,
8383 	.poll = megasas_mgmt_poll,
8384 #ifdef CONFIG_COMPAT
8385 	.compat_ioctl = megasas_mgmt_compat_ioctl,
8386 #endif
8387 	.llseek = noop_llseek,
8388 };
8389 
8390 /*
8391  * PCI hotplug support registration structure
8392  */
8393 static struct pci_driver megasas_pci_driver = {
8394 
8395 	.name = "megaraid_sas",
8396 	.id_table = megasas_pci_table,
8397 	.probe = megasas_probe_one,
8398 	.remove = megasas_detach_one,
8399 	.suspend = megasas_suspend,
8400 	.resume = megasas_resume,
8401 	.shutdown = megasas_shutdown,
8402 };
8403 
8404 /*
8405  * Sysfs driver attributes
8406  */
8407 static ssize_t version_show(struct device_driver *dd, char *buf)
8408 {
8409 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
8410 			MEGASAS_VERSION);
8411 }
8412 static DRIVER_ATTR_RO(version);
8413 
8414 static ssize_t release_date_show(struct device_driver *dd, char *buf)
8415 {
8416 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
8417 		MEGASAS_RELDATE);
8418 }
8419 static DRIVER_ATTR_RO(release_date);
8420 
8421 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
8422 {
8423 	return sprintf(buf, "%u\n", support_poll_for_event);
8424 }
8425 static DRIVER_ATTR_RO(support_poll_for_event);
8426 
8427 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
8428 {
8429 	return sprintf(buf, "%u\n", support_device_change);
8430 }
8431 static DRIVER_ATTR_RO(support_device_change);
8432 
8433 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
8434 {
8435 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
8436 }
8437 
8438 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
8439 			     size_t count)
8440 {
8441 	int retval = count;
8442 
8443 	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8444 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8445 		retval = -EINVAL;
8446 	}
8447 	return retval;
8448 }
8449 static DRIVER_ATTR_RW(dbg_lvl);
8450 
8451 static ssize_t
8452 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8453 {
8454 	return sprintf(buf, "%u\n", support_nvme_encapsulation);
8455 }
8456 
8457 static DRIVER_ATTR_RO(support_nvme_encapsulation);
8458 
8459 static ssize_t
8460 support_pci_lane_margining_show(struct device_driver *dd, char *buf)
8461 {
8462 	return sprintf(buf, "%u\n", support_pci_lane_margining);
8463 }
8464 
8465 static DRIVER_ATTR_RO(support_pci_lane_margining);
8466 
8467 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8468 {
8469 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8470 	scsi_remove_device(sdev);
8471 	scsi_device_put(sdev);
8472 }
8473 
8474 /**
8475  * megasas_update_device_list -	Update the PD and LD device list from FW
8476  *				after an AEN event notification
8477  * @instance:			Adapter soft state
8478  * @event_type:			Indicates type of event (PD or LD event)
8479  *
8480  * @return:			Success or failure
8481  *
8482  * Issue DCMDs to Firmware to update the internal device list in driver.
8483  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8484  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8485  */
8486 static
8487 int megasas_update_device_list(struct megasas_instance *instance,
8488 			       int event_type)
8489 {
8490 	int dcmd_ret = DCMD_SUCCESS;
8491 
8492 	if (instance->enable_fw_dev_list) {
8493 		dcmd_ret = megasas_host_device_list_query(instance, false);
8494 		if (dcmd_ret != DCMD_SUCCESS)
8495 			goto out;
8496 	} else {
8497 		if (event_type & SCAN_PD_CHANNEL) {
8498 			dcmd_ret = megasas_get_pd_list(instance);
8499 
8500 			if (dcmd_ret != DCMD_SUCCESS)
8501 				goto out;
8502 		}
8503 
8504 		if (event_type & SCAN_VD_CHANNEL) {
8505 			if (!instance->requestorId ||
8506 			    (instance->requestorId &&
8507 			     megasas_get_ld_vf_affiliation(instance, 0))) {
8508 				dcmd_ret = megasas_ld_list_query(instance,
8509 						MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8510 				if (dcmd_ret != DCMD_SUCCESS)
8511 					goto out;
8512 			}
8513 		}
8514 	}
8515 
8516 out:
8517 	return dcmd_ret;
8518 }
8519 
8520 /**
8521  * megasas_add_remove_devices -	Add/remove devices to SCSI mid-layer
8522  *				after an AEN event notification
8523  * @instance:			Adapter soft state
8524  * @scan_type:			Indicates type of devices (PD/LD) to add
8525  * @return			void
8526  */
8527 static
8528 void megasas_add_remove_devices(struct megasas_instance *instance,
8529 				int scan_type)
8530 {
8531 	int i, j;
8532 	u16 pd_index = 0;
8533 	u16 ld_index = 0;
8534 	u16 channel = 0, id = 0;
8535 	struct Scsi_Host *host;
8536 	struct scsi_device *sdev1;
8537 	struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8538 	struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8539 
8540 	host = instance->host;
8541 
8542 	if (instance->enable_fw_dev_list) {
8543 		targetid_list = instance->host_device_list_buf;
8544 		for (i = 0; i < targetid_list->count; i++) {
8545 			targetid_entry = &targetid_list->host_device_list[i];
8546 			if (targetid_entry->flags.u.bits.is_sys_pd) {
8547 				channel = le16_to_cpu(targetid_entry->target_id) /
8548 						MEGASAS_MAX_DEV_PER_CHANNEL;
8549 				id = le16_to_cpu(targetid_entry->target_id) %
8550 						MEGASAS_MAX_DEV_PER_CHANNEL;
8551 			} else {
8552 				channel = MEGASAS_MAX_PD_CHANNELS +
8553 					  (le16_to_cpu(targetid_entry->target_id) /
8554 					   MEGASAS_MAX_DEV_PER_CHANNEL);
8555 				id = le16_to_cpu(targetid_entry->target_id) %
8556 						MEGASAS_MAX_DEV_PER_CHANNEL;
8557 			}
8558 			sdev1 = scsi_device_lookup(host, channel, id, 0);
8559 			if (!sdev1) {
8560 				scsi_add_device(host, channel, id, 0);
8561 			} else {
8562 				scsi_device_put(sdev1);
8563 			}
8564 		}
8565 	}
8566 
8567 	if (scan_type & SCAN_PD_CHANNEL) {
8568 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8569 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8570 				pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8571 				sdev1 = scsi_device_lookup(host, i, j, 0);
8572 				if (instance->pd_list[pd_index].driveState ==
8573 							MR_PD_STATE_SYSTEM) {
8574 					if (!sdev1)
8575 						scsi_add_device(host, i, j, 0);
8576 					else
8577 						scsi_device_put(sdev1);
8578 				} else {
8579 					if (sdev1)
8580 						megasas_remove_scsi_device(sdev1);
8581 				}
8582 			}
8583 		}
8584 	}
8585 
8586 	if (scan_type & SCAN_VD_CHANNEL) {
8587 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8588 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8589 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8590 				sdev1 = scsi_device_lookup(host,
8591 						MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8592 				if (instance->ld_ids[ld_index] != 0xff) {
8593 					if (!sdev1)
8594 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8595 					else
8596 						scsi_device_put(sdev1);
8597 				} else {
8598 					if (sdev1)
8599 						megasas_remove_scsi_device(sdev1);
8600 				}
8601 			}
8602 		}
8603 	}
8604 
8605 }
8606 
8607 static void
8608 megasas_aen_polling(struct work_struct *work)
8609 {
8610 	struct megasas_aen_event *ev =
8611 		container_of(work, struct megasas_aen_event, hotplug_work.work);
8612 	struct megasas_instance *instance = ev->instance;
8613 	union megasas_evt_class_locale class_locale;
8614 	int event_type = 0;
8615 	u32 seq_num;
8616 	int error;
8617 	u8  dcmd_ret = DCMD_SUCCESS;
8618 
8619 	if (!instance) {
8620 		printk(KERN_ERR "invalid instance!\n");
8621 		kfree(ev);
8622 		return;
8623 	}
8624 
8625 	/* Don't run the event workqueue thread if OCR is running */
8626 	mutex_lock(&instance->reset_mutex);
8627 
8628 	instance->ev = NULL;
8629 	if (instance->evt_detail) {
8630 		megasas_decode_evt(instance);
8631 
8632 		switch (le32_to_cpu(instance->evt_detail->code)) {
8633 
8634 		case MR_EVT_PD_INSERTED:
8635 		case MR_EVT_PD_REMOVED:
8636 			event_type = SCAN_PD_CHANNEL;
8637 			break;
8638 
8639 		case MR_EVT_LD_OFFLINE:
8640 		case MR_EVT_CFG_CLEARED:
8641 		case MR_EVT_LD_DELETED:
8642 		case MR_EVT_LD_CREATED:
8643 			event_type = SCAN_VD_CHANNEL;
8644 			break;
8645 
8646 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8647 		case MR_EVT_FOREIGN_CFG_IMPORTED:
8648 		case MR_EVT_LD_STATE_CHANGE:
8649 			event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8650 			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8651 				instance->host->host_no);
8652 			break;
8653 
8654 		case MR_EVT_CTRL_PROP_CHANGED:
8655 			dcmd_ret = megasas_get_ctrl_info(instance);
8656 			if (dcmd_ret == DCMD_SUCCESS &&
8657 			    instance->snapdump_wait_time) {
8658 				megasas_get_snapdump_properties(instance);
8659 				dev_info(&instance->pdev->dev,
8660 					 "Snap dump wait time\t: %d\n",
8661 					 instance->snapdump_wait_time);
8662 			}
8663 			break;
8664 		default:
8665 			event_type = 0;
8666 			break;
8667 		}
8668 	} else {
8669 		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8670 		mutex_unlock(&instance->reset_mutex);
8671 		kfree(ev);
8672 		return;
8673 	}
8674 
8675 	if (event_type)
8676 		dcmd_ret = megasas_update_device_list(instance, event_type);
8677 
8678 	mutex_unlock(&instance->reset_mutex);
8679 
8680 	if (event_type && dcmd_ret == DCMD_SUCCESS)
8681 		megasas_add_remove_devices(instance, event_type);
8682 
8683 	if (dcmd_ret == DCMD_SUCCESS)
8684 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8685 	else
8686 		seq_num = instance->last_seq_num;
8687 
8688 	/* Register AEN with FW for latest sequence number plus 1 */
8689 	class_locale.members.reserved = 0;
8690 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
8691 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
8692 
8693 	if (instance->aen_cmd != NULL) {
8694 		kfree(ev);
8695 		return;
8696 	}
8697 
8698 	mutex_lock(&instance->reset_mutex);
8699 	error = megasas_register_aen(instance, seq_num,
8700 					class_locale.word);
8701 	if (error)
8702 		dev_err(&instance->pdev->dev,
8703 			"register aen failed error %x\n", error);
8704 
8705 	mutex_unlock(&instance->reset_mutex);
8706 	kfree(ev);
8707 }
8708 
8709 /**
8710  * megasas_init - Driver load entry point
8711  */
8712 static int __init megasas_init(void)
8713 {
8714 	int rval;
8715 
8716 	/*
8717 	 * Booted in kdump kernel, minimize memory footprints by
8718 	 * disabling few features
8719 	 */
8720 	if (reset_devices) {
8721 		msix_vectors = 1;
8722 		rdpq_enable = 0;
8723 		dual_qdepth_disable = 1;
8724 	}
8725 
8726 	/*
8727 	 * Announce driver version and other information
8728 	 */
8729 	pr_info("megasas: %s\n", MEGASAS_VERSION);
8730 
8731 	spin_lock_init(&poll_aen_lock);
8732 
8733 	support_poll_for_event = 2;
8734 	support_device_change = 1;
8735 	support_nvme_encapsulation = true;
8736 	support_pci_lane_margining = true;
8737 
8738 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
8739 
8740 	/*
8741 	 * Register character device node
8742 	 */
8743 	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
8744 
8745 	if (rval < 0) {
8746 		printk(KERN_DEBUG "megasas: failed to open device node\n");
8747 		return rval;
8748 	}
8749 
8750 	megasas_mgmt_majorno = rval;
8751 
8752 	megasas_init_debugfs();
8753 
8754 	/*
8755 	 * Register ourselves as PCI hotplug module
8756 	 */
8757 	rval = pci_register_driver(&megasas_pci_driver);
8758 
8759 	if (rval) {
8760 		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
8761 		goto err_pcidrv;
8762 	}
8763 
8764 	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
8765 	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
8766 		printk(KERN_WARNING "megarid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
8767 		event_log_level = MFI_EVT_CLASS_CRITICAL;
8768 	}
8769 
8770 	rval = driver_create_file(&megasas_pci_driver.driver,
8771 				  &driver_attr_version);
8772 	if (rval)
8773 		goto err_dcf_attr_ver;
8774 
8775 	rval = driver_create_file(&megasas_pci_driver.driver,
8776 				  &driver_attr_release_date);
8777 	if (rval)
8778 		goto err_dcf_rel_date;
8779 
8780 	rval = driver_create_file(&megasas_pci_driver.driver,
8781 				&driver_attr_support_poll_for_event);
8782 	if (rval)
8783 		goto err_dcf_support_poll_for_event;
8784 
8785 	rval = driver_create_file(&megasas_pci_driver.driver,
8786 				  &driver_attr_dbg_lvl);
8787 	if (rval)
8788 		goto err_dcf_dbg_lvl;
8789 	rval = driver_create_file(&megasas_pci_driver.driver,
8790 				&driver_attr_support_device_change);
8791 	if (rval)
8792 		goto err_dcf_support_device_change;
8793 
8794 	rval = driver_create_file(&megasas_pci_driver.driver,
8795 				  &driver_attr_support_nvme_encapsulation);
8796 	if (rval)
8797 		goto err_dcf_support_nvme_encapsulation;
8798 
8799 	rval = driver_create_file(&megasas_pci_driver.driver,
8800 				  &driver_attr_support_pci_lane_margining);
8801 	if (rval)
8802 		goto err_dcf_support_pci_lane_margining;
8803 
8804 	return rval;
8805 
8806 err_dcf_support_pci_lane_margining:
8807 	driver_remove_file(&megasas_pci_driver.driver,
8808 			   &driver_attr_support_nvme_encapsulation);
8809 
8810 err_dcf_support_nvme_encapsulation:
8811 	driver_remove_file(&megasas_pci_driver.driver,
8812 			   &driver_attr_support_device_change);
8813 
8814 err_dcf_support_device_change:
8815 	driver_remove_file(&megasas_pci_driver.driver,
8816 			   &driver_attr_dbg_lvl);
8817 err_dcf_dbg_lvl:
8818 	driver_remove_file(&megasas_pci_driver.driver,
8819 			&driver_attr_support_poll_for_event);
8820 err_dcf_support_poll_for_event:
8821 	driver_remove_file(&megasas_pci_driver.driver,
8822 			   &driver_attr_release_date);
8823 err_dcf_rel_date:
8824 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8825 err_dcf_attr_ver:
8826 	pci_unregister_driver(&megasas_pci_driver);
8827 err_pcidrv:
8828 	megasas_exit_debugfs();
8829 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8830 	return rval;
8831 }
8832 
8833 /**
8834  * megasas_exit - Driver unload entry point
8835  */
8836 static void __exit megasas_exit(void)
8837 {
8838 	driver_remove_file(&megasas_pci_driver.driver,
8839 			   &driver_attr_dbg_lvl);
8840 	driver_remove_file(&megasas_pci_driver.driver,
8841 			&driver_attr_support_poll_for_event);
8842 	driver_remove_file(&megasas_pci_driver.driver,
8843 			&driver_attr_support_device_change);
8844 	driver_remove_file(&megasas_pci_driver.driver,
8845 			   &driver_attr_release_date);
8846 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8847 	driver_remove_file(&megasas_pci_driver.driver,
8848 			   &driver_attr_support_nvme_encapsulation);
8849 	driver_remove_file(&megasas_pci_driver.driver,
8850 			   &driver_attr_support_pci_lane_margining);
8851 
8852 	pci_unregister_driver(&megasas_pci_driver);
8853 	megasas_exit_debugfs();
8854 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8855 }
8856 
8857 module_init(megasas_init);
8858 module_exit(megasas_exit);
8859