1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Linux MegaRAID driver for SAS based RAID controllers
4  *
5  *  Copyright (c) 2003-2013  LSI Corporation
6  *  Copyright (c) 2013-2016  Avago Technologies
7  *  Copyright (c) 2016-2018  Broadcom Inc.
8  *
9  *  Authors: Broadcom Inc.
10  *           Sreenivas Bagalkote
11  *           Sumant Patro
12  *           Bo Yang
13  *           Adam Radford
14  *           Kashyap Desai <kashyap.desai@broadcom.com>
15  *           Sumit Saxena <sumit.saxena@broadcom.com>
16  *
17  *  Send feedback to: megaraidlinux.pdl@broadcom.com
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/uio.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <asm/unaligned.h>
33 #include <linux/fs.h>
34 #include <linux/compat.h>
35 #include <linux/blkdev.h>
36 #include <linux/mutex.h>
37 #include <linux/poll.h>
38 #include <linux/vmalloc.h>
39 #include <linux/irq_poll.h>
40 
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <scsi/scsi_dbg.h>
47 #include "megaraid_sas_fusion.h"
48 #include "megaraid_sas.h"
49 
50 /*
51  * Number of sectors per IO command
52  * Will be set in megasas_init_mfi if user does not provide
53  */
54 static unsigned int max_sectors;
55 module_param_named(max_sectors, max_sectors, int, 0444);
56 MODULE_PARM_DESC(max_sectors,
57 	"Maximum number of sectors per IO command");
58 
59 static int msix_disable;
60 module_param(msix_disable, int, 0444);
61 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
62 
63 static unsigned int msix_vectors;
64 module_param(msix_vectors, int, 0444);
65 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
66 
67 static int allow_vf_ioctls;
68 module_param(allow_vf_ioctls, int, 0444);
69 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
70 
71 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
72 module_param(throttlequeuedepth, int, 0444);
73 MODULE_PARM_DESC(throttlequeuedepth,
74 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
75 
76 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
77 module_param(resetwaittime, int, 0444);
78 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
79 
80 int smp_affinity_enable = 1;
81 module_param(smp_affinity_enable, int, 0444);
82 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
83 
84 int rdpq_enable = 1;
85 module_param(rdpq_enable, int, 0444);
86 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
87 
88 unsigned int dual_qdepth_disable;
89 module_param(dual_qdepth_disable, int, 0444);
90 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
91 
92 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
93 module_param(scmd_timeout, int, 0444);
94 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
95 
96 int perf_mode = -1;
97 module_param(perf_mode, int, 0444);
98 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
99 		"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
100 		"interrupt coalescing is enabled only on high iops queues\n\t\t"
101 		"1 - iops: High iops queues are not allocated &\n\t\t"
102 		"interrupt coalescing is enabled on all queues\n\t\t"
103 		"2 - latency: High iops queues are not allocated &\n\t\t"
104 		"interrupt coalescing is disabled on all queues\n\t\t"
105 		"default mode is 'balanced'"
106 		);
107 
108 int event_log_level = MFI_EVT_CLASS_CRITICAL;
109 module_param(event_log_level, int, 0644);
110 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
111 
112 unsigned int enable_sdev_max_qd;
113 module_param(enable_sdev_max_qd, int, 0444);
114 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
115 
116 MODULE_LICENSE("GPL");
117 MODULE_VERSION(MEGASAS_VERSION);
118 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
119 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
120 
121 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
122 static int megasas_get_pd_list(struct megasas_instance *instance);
123 static int megasas_ld_list_query(struct megasas_instance *instance,
124 				 u8 query_type);
125 static int megasas_issue_init_mfi(struct megasas_instance *instance);
126 static int megasas_register_aen(struct megasas_instance *instance,
127 				u32 seq_num, u32 class_locale_word);
128 static void megasas_get_pd_info(struct megasas_instance *instance,
129 				struct scsi_device *sdev);
130 
131 /*
132  * PCI ID table for all supported controllers
133  */
134 static struct pci_device_id megasas_pci_table[] = {
135 
136 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
137 	/* xscale IOP */
138 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
139 	/* ppc IOP */
140 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
141 	/* ppc IOP */
142 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
143 	/* gen2*/
144 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
145 	/* gen2*/
146 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
147 	/* skinny*/
148 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
149 	/* skinny*/
150 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
151 	/* xscale IOP, vega */
152 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
153 	/* xscale IOP */
154 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
155 	/* Fusion */
156 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
157 	/* Plasma */
158 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
159 	/* Invader */
160 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
161 	/* Fury */
162 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
163 	/* Intruder */
164 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
165 	/* Intruder 24 port*/
166 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
167 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
168 	/* VENTURA */
169 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
170 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
171 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
172 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
173 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
174 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
175 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
176 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
177 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
178 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
179 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
180 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
181 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
182 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
183 	{}
184 };
185 
186 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
187 
188 static int megasas_mgmt_majorno;
189 struct megasas_mgmt_info megasas_mgmt_info;
190 static struct fasync_struct *megasas_async_queue;
191 static DEFINE_MUTEX(megasas_async_queue_mutex);
192 
193 static int megasas_poll_wait_aen;
194 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
195 static u32 support_poll_for_event;
196 u32 megasas_dbg_lvl;
197 static u32 support_device_change;
198 static bool support_nvme_encapsulation;
199 static bool support_pci_lane_margining;
200 
201 /* define lock for aen poll */
202 static spinlock_t poll_aen_lock;
203 
204 extern struct dentry *megasas_debugfs_root;
205 extern void megasas_init_debugfs(void);
206 extern void megasas_exit_debugfs(void);
207 extern void megasas_setup_debugfs(struct megasas_instance *instance);
208 extern void megasas_destroy_debugfs(struct megasas_instance *instance);
209 
210 void
211 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
212 		     u8 alt_status);
213 static u32
214 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
215 static int
216 megasas_adp_reset_gen2(struct megasas_instance *instance,
217 		       struct megasas_register_set __iomem *reg_set);
218 static irqreturn_t megasas_isr(int irq, void *devp);
219 static u32
220 megasas_init_adapter_mfi(struct megasas_instance *instance);
221 u32
222 megasas_build_and_issue_cmd(struct megasas_instance *instance,
223 			    struct scsi_cmnd *scmd);
224 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
225 int
226 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
227 	int seconds);
228 void megasas_fusion_ocr_wq(struct work_struct *work);
229 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
230 					 int initial);
231 static int
232 megasas_set_dma_mask(struct megasas_instance *instance);
233 static int
234 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
235 static inline void
236 megasas_free_ctrl_mem(struct megasas_instance *instance);
237 static inline int
238 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
239 static inline void
240 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
241 static inline void
242 megasas_init_ctrl_params(struct megasas_instance *instance);
243 
244 u32 megasas_readl(struct megasas_instance *instance,
245 		  const volatile void __iomem *addr)
246 {
247 	u32 i = 0, ret_val;
248 	/*
249 	 * Due to a HW errata in Aero controllers, reads to certain
250 	 * Fusion registers could intermittently return all zeroes.
251 	 * This behavior is transient in nature and subsequent reads will
252 	 * return valid value. As a workaround in driver, retry readl for
253 	 * upto three times until a non-zero value is read.
254 	 */
255 	if (instance->adapter_type == AERO_SERIES) {
256 		do {
257 			ret_val = readl(addr);
258 			i++;
259 		} while (ret_val == 0 && i < 3);
260 		return ret_val;
261 	} else {
262 		return readl(addr);
263 	}
264 }
265 
266 /**
267  * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
268  * @instance:			Adapter soft state
269  * @dcmd:			DCMD frame inside MFI command
270  * @dma_addr:			DMA address of buffer to be passed to FW
271  * @dma_len:			Length of DMA buffer to be passed to FW
272  * @return:			void
273  */
274 void megasas_set_dma_settings(struct megasas_instance *instance,
275 			      struct megasas_dcmd_frame *dcmd,
276 			      dma_addr_t dma_addr, u32 dma_len)
277 {
278 	if (instance->consistent_mask_64bit) {
279 		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
280 		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
281 		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
282 
283 	} else {
284 		dcmd->sgl.sge32[0].phys_addr =
285 				cpu_to_le32(lower_32_bits(dma_addr));
286 		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
287 		dcmd->flags = cpu_to_le16(dcmd->flags);
288 	}
289 }
290 
291 static void
292 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
293 {
294 	instance->instancet->fire_cmd(instance,
295 		cmd->frame_phys_addr, 0, instance->reg_set);
296 	return;
297 }
298 
299 /**
300  * megasas_get_cmd -	Get a command from the free pool
301  * @instance:		Adapter soft state
302  *
303  * Returns a free command from the pool
304  */
305 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
306 						  *instance)
307 {
308 	unsigned long flags;
309 	struct megasas_cmd *cmd = NULL;
310 
311 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
312 
313 	if (!list_empty(&instance->cmd_pool)) {
314 		cmd = list_entry((&instance->cmd_pool)->next,
315 				 struct megasas_cmd, list);
316 		list_del_init(&cmd->list);
317 	} else {
318 		dev_err(&instance->pdev->dev, "Command pool empty!\n");
319 	}
320 
321 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
322 	return cmd;
323 }
324 
325 /**
326  * megasas_return_cmd -	Return a cmd to free command pool
327  * @instance:		Adapter soft state
328  * @cmd:		Command packet to be returned to free command pool
329  */
330 void
331 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
332 {
333 	unsigned long flags;
334 	u32 blk_tags;
335 	struct megasas_cmd_fusion *cmd_fusion;
336 	struct fusion_context *fusion = instance->ctrl_context;
337 
338 	/* This flag is used only for fusion adapter.
339 	 * Wait for Interrupt for Polled mode DCMD
340 	 */
341 	if (cmd->flags & DRV_DCMD_POLLED_MODE)
342 		return;
343 
344 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
345 
346 	if (fusion) {
347 		blk_tags = instance->max_scsi_cmds + cmd->index;
348 		cmd_fusion = fusion->cmd_list[blk_tags];
349 		megasas_return_cmd_fusion(instance, cmd_fusion);
350 	}
351 	cmd->scmd = NULL;
352 	cmd->frame_count = 0;
353 	cmd->flags = 0;
354 	memset(cmd->frame, 0, instance->mfi_frame_size);
355 	cmd->frame->io.context = cpu_to_le32(cmd->index);
356 	if (!fusion && reset_devices)
357 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
358 	list_add(&cmd->list, (&instance->cmd_pool)->next);
359 
360 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
361 
362 }
363 
364 static const char *
365 format_timestamp(uint32_t timestamp)
366 {
367 	static char buffer[32];
368 
369 	if ((timestamp & 0xff000000) == 0xff000000)
370 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
371 		0x00ffffff);
372 	else
373 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
374 	return buffer;
375 }
376 
377 static const char *
378 format_class(int8_t class)
379 {
380 	static char buffer[6];
381 
382 	switch (class) {
383 	case MFI_EVT_CLASS_DEBUG:
384 		return "debug";
385 	case MFI_EVT_CLASS_PROGRESS:
386 		return "progress";
387 	case MFI_EVT_CLASS_INFO:
388 		return "info";
389 	case MFI_EVT_CLASS_WARNING:
390 		return "WARN";
391 	case MFI_EVT_CLASS_CRITICAL:
392 		return "CRIT";
393 	case MFI_EVT_CLASS_FATAL:
394 		return "FATAL";
395 	case MFI_EVT_CLASS_DEAD:
396 		return "DEAD";
397 	default:
398 		snprintf(buffer, sizeof(buffer), "%d", class);
399 		return buffer;
400 	}
401 }
402 
403 /**
404   * megasas_decode_evt: Decode FW AEN event and print critical event
405   * for information.
406   * @instance:			Adapter soft state
407   */
408 static void
409 megasas_decode_evt(struct megasas_instance *instance)
410 {
411 	struct megasas_evt_detail *evt_detail = instance->evt_detail;
412 	union megasas_evt_class_locale class_locale;
413 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
414 
415 	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
416 	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
417 		printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
418 		event_log_level = MFI_EVT_CLASS_CRITICAL;
419 	}
420 
421 	if (class_locale.members.class >= event_log_level)
422 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
423 			le32_to_cpu(evt_detail->seq_num),
424 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
425 			(class_locale.members.locale),
426 			format_class(class_locale.members.class),
427 			evt_detail->description);
428 }
429 
430 /**
431 *	The following functions are defined for xscale
432 *	(deviceid : 1064R, PERC5) controllers
433 */
434 
435 /**
436  * megasas_enable_intr_xscale -	Enables interrupts
437  * @regs:			MFI register set
438  */
439 static inline void
440 megasas_enable_intr_xscale(struct megasas_instance *instance)
441 {
442 	struct megasas_register_set __iomem *regs;
443 
444 	regs = instance->reg_set;
445 	writel(0, &(regs)->outbound_intr_mask);
446 
447 	/* Dummy readl to force pci flush */
448 	readl(&regs->outbound_intr_mask);
449 }
450 
451 /**
452  * megasas_disable_intr_xscale -Disables interrupt
453  * @regs:			MFI register set
454  */
455 static inline void
456 megasas_disable_intr_xscale(struct megasas_instance *instance)
457 {
458 	struct megasas_register_set __iomem *regs;
459 	u32 mask = 0x1f;
460 
461 	regs = instance->reg_set;
462 	writel(mask, &regs->outbound_intr_mask);
463 	/* Dummy readl to force pci flush */
464 	readl(&regs->outbound_intr_mask);
465 }
466 
467 /**
468  * megasas_read_fw_status_reg_xscale - returns the current FW status value
469  * @regs:			MFI register set
470  */
471 static u32
472 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
473 {
474 	return readl(&instance->reg_set->outbound_msg_0);
475 }
476 /**
477  * megasas_clear_interrupt_xscale -	Check & clear interrupt
478  * @regs:				MFI register set
479  */
480 static int
481 megasas_clear_intr_xscale(struct megasas_instance *instance)
482 {
483 	u32 status;
484 	u32 mfiStatus = 0;
485 	struct megasas_register_set __iomem *regs;
486 	regs = instance->reg_set;
487 
488 	/*
489 	 * Check if it is our interrupt
490 	 */
491 	status = readl(&regs->outbound_intr_status);
492 
493 	if (status & MFI_OB_INTR_STATUS_MASK)
494 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
495 	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
496 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
497 
498 	/*
499 	 * Clear the interrupt by writing back the same value
500 	 */
501 	if (mfiStatus)
502 		writel(status, &regs->outbound_intr_status);
503 
504 	/* Dummy readl to force pci flush */
505 	readl(&regs->outbound_intr_status);
506 
507 	return mfiStatus;
508 }
509 
510 /**
511  * megasas_fire_cmd_xscale -	Sends command to the FW
512  * @frame_phys_addr :		Physical address of cmd
513  * @frame_count :		Number of frames for the command
514  * @regs :			MFI register set
515  */
516 static inline void
517 megasas_fire_cmd_xscale(struct megasas_instance *instance,
518 		dma_addr_t frame_phys_addr,
519 		u32 frame_count,
520 		struct megasas_register_set __iomem *regs)
521 {
522 	unsigned long flags;
523 
524 	spin_lock_irqsave(&instance->hba_lock, flags);
525 	writel((frame_phys_addr >> 3)|(frame_count),
526 	       &(regs)->inbound_queue_port);
527 	spin_unlock_irqrestore(&instance->hba_lock, flags);
528 }
529 
530 /**
531  * megasas_adp_reset_xscale -  For controller reset
532  * @regs:                              MFI register set
533  */
534 static int
535 megasas_adp_reset_xscale(struct megasas_instance *instance,
536 	struct megasas_register_set __iomem *regs)
537 {
538 	u32 i;
539 	u32 pcidata;
540 
541 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
542 
543 	for (i = 0; i < 3; i++)
544 		msleep(1000); /* sleep for 3 secs */
545 	pcidata  = 0;
546 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
547 	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
548 	if (pcidata & 0x2) {
549 		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
550 		pcidata &= ~0x2;
551 		pci_write_config_dword(instance->pdev,
552 				MFI_1068_PCSR_OFFSET, pcidata);
553 
554 		for (i = 0; i < 2; i++)
555 			msleep(1000); /* need to wait 2 secs again */
556 
557 		pcidata  = 0;
558 		pci_read_config_dword(instance->pdev,
559 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
560 		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
561 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
562 			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
563 			pcidata = 0;
564 			pci_write_config_dword(instance->pdev,
565 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
566 		}
567 	}
568 	return 0;
569 }
570 
571 /**
572  * megasas_check_reset_xscale -	For controller reset check
573  * @regs:				MFI register set
574  */
575 static int
576 megasas_check_reset_xscale(struct megasas_instance *instance,
577 		struct megasas_register_set __iomem *regs)
578 {
579 	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
580 	    (le32_to_cpu(*instance->consumer) ==
581 		MEGASAS_ADPRESET_INPROG_SIGN))
582 		return 1;
583 	return 0;
584 }
585 
586 static struct megasas_instance_template megasas_instance_template_xscale = {
587 
588 	.fire_cmd = megasas_fire_cmd_xscale,
589 	.enable_intr = megasas_enable_intr_xscale,
590 	.disable_intr = megasas_disable_intr_xscale,
591 	.clear_intr = megasas_clear_intr_xscale,
592 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
593 	.adp_reset = megasas_adp_reset_xscale,
594 	.check_reset = megasas_check_reset_xscale,
595 	.service_isr = megasas_isr,
596 	.tasklet = megasas_complete_cmd_dpc,
597 	.init_adapter = megasas_init_adapter_mfi,
598 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
599 	.issue_dcmd = megasas_issue_dcmd,
600 };
601 
602 /**
603 *	This is the end of set of functions & definitions specific
604 *	to xscale (deviceid : 1064R, PERC5) controllers
605 */
606 
607 /**
608 *	The following functions are defined for ppc (deviceid : 0x60)
609 *	controllers
610 */
611 
612 /**
613  * megasas_enable_intr_ppc -	Enables interrupts
614  * @regs:			MFI register set
615  */
616 static inline void
617 megasas_enable_intr_ppc(struct megasas_instance *instance)
618 {
619 	struct megasas_register_set __iomem *regs;
620 
621 	regs = instance->reg_set;
622 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
623 
624 	writel(~0x80000000, &(regs)->outbound_intr_mask);
625 
626 	/* Dummy readl to force pci flush */
627 	readl(&regs->outbound_intr_mask);
628 }
629 
630 /**
631  * megasas_disable_intr_ppc -	Disable interrupt
632  * @regs:			MFI register set
633  */
634 static inline void
635 megasas_disable_intr_ppc(struct megasas_instance *instance)
636 {
637 	struct megasas_register_set __iomem *regs;
638 	u32 mask = 0xFFFFFFFF;
639 
640 	regs = instance->reg_set;
641 	writel(mask, &regs->outbound_intr_mask);
642 	/* Dummy readl to force pci flush */
643 	readl(&regs->outbound_intr_mask);
644 }
645 
646 /**
647  * megasas_read_fw_status_reg_ppc - returns the current FW status value
648  * @regs:			MFI register set
649  */
650 static u32
651 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
652 {
653 	return readl(&instance->reg_set->outbound_scratch_pad_0);
654 }
655 
656 /**
657  * megasas_clear_interrupt_ppc -	Check & clear interrupt
658  * @regs:				MFI register set
659  */
660 static int
661 megasas_clear_intr_ppc(struct megasas_instance *instance)
662 {
663 	u32 status, mfiStatus = 0;
664 	struct megasas_register_set __iomem *regs;
665 	regs = instance->reg_set;
666 
667 	/*
668 	 * Check if it is our interrupt
669 	 */
670 	status = readl(&regs->outbound_intr_status);
671 
672 	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
673 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
674 
675 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
676 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
677 
678 	/*
679 	 * Clear the interrupt by writing back the same value
680 	 */
681 	writel(status, &regs->outbound_doorbell_clear);
682 
683 	/* Dummy readl to force pci flush */
684 	readl(&regs->outbound_doorbell_clear);
685 
686 	return mfiStatus;
687 }
688 
689 /**
690  * megasas_fire_cmd_ppc -	Sends command to the FW
691  * @frame_phys_addr :		Physical address of cmd
692  * @frame_count :		Number of frames for the command
693  * @regs :			MFI register set
694  */
695 static inline void
696 megasas_fire_cmd_ppc(struct megasas_instance *instance,
697 		dma_addr_t frame_phys_addr,
698 		u32 frame_count,
699 		struct megasas_register_set __iomem *regs)
700 {
701 	unsigned long flags;
702 
703 	spin_lock_irqsave(&instance->hba_lock, flags);
704 	writel((frame_phys_addr | (frame_count<<1))|1,
705 			&(regs)->inbound_queue_port);
706 	spin_unlock_irqrestore(&instance->hba_lock, flags);
707 }
708 
709 /**
710  * megasas_check_reset_ppc -	For controller reset check
711  * @regs:				MFI register set
712  */
713 static int
714 megasas_check_reset_ppc(struct megasas_instance *instance,
715 			struct megasas_register_set __iomem *regs)
716 {
717 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
718 		return 1;
719 
720 	return 0;
721 }
722 
723 static struct megasas_instance_template megasas_instance_template_ppc = {
724 
725 	.fire_cmd = megasas_fire_cmd_ppc,
726 	.enable_intr = megasas_enable_intr_ppc,
727 	.disable_intr = megasas_disable_intr_ppc,
728 	.clear_intr = megasas_clear_intr_ppc,
729 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
730 	.adp_reset = megasas_adp_reset_xscale,
731 	.check_reset = megasas_check_reset_ppc,
732 	.service_isr = megasas_isr,
733 	.tasklet = megasas_complete_cmd_dpc,
734 	.init_adapter = megasas_init_adapter_mfi,
735 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
736 	.issue_dcmd = megasas_issue_dcmd,
737 };
738 
739 /**
740  * megasas_enable_intr_skinny -	Enables interrupts
741  * @regs:			MFI register set
742  */
743 static inline void
744 megasas_enable_intr_skinny(struct megasas_instance *instance)
745 {
746 	struct megasas_register_set __iomem *regs;
747 
748 	regs = instance->reg_set;
749 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
750 
751 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
752 
753 	/* Dummy readl to force pci flush */
754 	readl(&regs->outbound_intr_mask);
755 }
756 
757 /**
758  * megasas_disable_intr_skinny -	Disables interrupt
759  * @regs:			MFI register set
760  */
761 static inline void
762 megasas_disable_intr_skinny(struct megasas_instance *instance)
763 {
764 	struct megasas_register_set __iomem *regs;
765 	u32 mask = 0xFFFFFFFF;
766 
767 	regs = instance->reg_set;
768 	writel(mask, &regs->outbound_intr_mask);
769 	/* Dummy readl to force pci flush */
770 	readl(&regs->outbound_intr_mask);
771 }
772 
773 /**
774  * megasas_read_fw_status_reg_skinny - returns the current FW status value
775  * @regs:			MFI register set
776  */
777 static u32
778 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
779 {
780 	return readl(&instance->reg_set->outbound_scratch_pad_0);
781 }
782 
783 /**
784  * megasas_clear_interrupt_skinny -	Check & clear interrupt
785  * @regs:				MFI register set
786  */
787 static int
788 megasas_clear_intr_skinny(struct megasas_instance *instance)
789 {
790 	u32 status;
791 	u32 mfiStatus = 0;
792 	struct megasas_register_set __iomem *regs;
793 	regs = instance->reg_set;
794 
795 	/*
796 	 * Check if it is our interrupt
797 	 */
798 	status = readl(&regs->outbound_intr_status);
799 
800 	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
801 		return 0;
802 	}
803 
804 	/*
805 	 * Check if it is our interrupt
806 	 */
807 	if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
808 	    MFI_STATE_FAULT) {
809 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
810 	} else
811 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
812 
813 	/*
814 	 * Clear the interrupt by writing back the same value
815 	 */
816 	writel(status, &regs->outbound_intr_status);
817 
818 	/*
819 	 * dummy read to flush PCI
820 	 */
821 	readl(&regs->outbound_intr_status);
822 
823 	return mfiStatus;
824 }
825 
826 /**
827  * megasas_fire_cmd_skinny -	Sends command to the FW
828  * @frame_phys_addr :		Physical address of cmd
829  * @frame_count :		Number of frames for the command
830  * @regs :			MFI register set
831  */
832 static inline void
833 megasas_fire_cmd_skinny(struct megasas_instance *instance,
834 			dma_addr_t frame_phys_addr,
835 			u32 frame_count,
836 			struct megasas_register_set __iomem *regs)
837 {
838 	unsigned long flags;
839 
840 	spin_lock_irqsave(&instance->hba_lock, flags);
841 	writel(upper_32_bits(frame_phys_addr),
842 	       &(regs)->inbound_high_queue_port);
843 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
844 	       &(regs)->inbound_low_queue_port);
845 	spin_unlock_irqrestore(&instance->hba_lock, flags);
846 }
847 
848 /**
849  * megasas_check_reset_skinny -	For controller reset check
850  * @regs:				MFI register set
851  */
852 static int
853 megasas_check_reset_skinny(struct megasas_instance *instance,
854 				struct megasas_register_set __iomem *regs)
855 {
856 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
857 		return 1;
858 
859 	return 0;
860 }
861 
862 static struct megasas_instance_template megasas_instance_template_skinny = {
863 
864 	.fire_cmd = megasas_fire_cmd_skinny,
865 	.enable_intr = megasas_enable_intr_skinny,
866 	.disable_intr = megasas_disable_intr_skinny,
867 	.clear_intr = megasas_clear_intr_skinny,
868 	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
869 	.adp_reset = megasas_adp_reset_gen2,
870 	.check_reset = megasas_check_reset_skinny,
871 	.service_isr = megasas_isr,
872 	.tasklet = megasas_complete_cmd_dpc,
873 	.init_adapter = megasas_init_adapter_mfi,
874 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
875 	.issue_dcmd = megasas_issue_dcmd,
876 };
877 
878 
879 /**
880 *	The following functions are defined for gen2 (deviceid : 0x78 0x79)
881 *	controllers
882 */
883 
884 /**
885  * megasas_enable_intr_gen2 -  Enables interrupts
886  * @regs:                      MFI register set
887  */
888 static inline void
889 megasas_enable_intr_gen2(struct megasas_instance *instance)
890 {
891 	struct megasas_register_set __iomem *regs;
892 
893 	regs = instance->reg_set;
894 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
895 
896 	/* write ~0x00000005 (4 & 1) to the intr mask*/
897 	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
898 
899 	/* Dummy readl to force pci flush */
900 	readl(&regs->outbound_intr_mask);
901 }
902 
903 /**
904  * megasas_disable_intr_gen2 - Disables interrupt
905  * @regs:                      MFI register set
906  */
907 static inline void
908 megasas_disable_intr_gen2(struct megasas_instance *instance)
909 {
910 	struct megasas_register_set __iomem *regs;
911 	u32 mask = 0xFFFFFFFF;
912 
913 	regs = instance->reg_set;
914 	writel(mask, &regs->outbound_intr_mask);
915 	/* Dummy readl to force pci flush */
916 	readl(&regs->outbound_intr_mask);
917 }
918 
919 /**
920  * megasas_read_fw_status_reg_gen2 - returns the current FW status value
921  * @regs:                      MFI register set
922  */
923 static u32
924 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
925 {
926 	return readl(&instance->reg_set->outbound_scratch_pad_0);
927 }
928 
929 /**
930  * megasas_clear_interrupt_gen2 -      Check & clear interrupt
931  * @regs:                              MFI register set
932  */
933 static int
934 megasas_clear_intr_gen2(struct megasas_instance *instance)
935 {
936 	u32 status;
937 	u32 mfiStatus = 0;
938 	struct megasas_register_set __iomem *regs;
939 	regs = instance->reg_set;
940 
941 	/*
942 	 * Check if it is our interrupt
943 	 */
944 	status = readl(&regs->outbound_intr_status);
945 
946 	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
947 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
948 	}
949 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
950 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
951 	}
952 
953 	/*
954 	 * Clear the interrupt by writing back the same value
955 	 */
956 	if (mfiStatus)
957 		writel(status, &regs->outbound_doorbell_clear);
958 
959 	/* Dummy readl to force pci flush */
960 	readl(&regs->outbound_intr_status);
961 
962 	return mfiStatus;
963 }
964 /**
965  * megasas_fire_cmd_gen2 -     Sends command to the FW
966  * @frame_phys_addr :          Physical address of cmd
967  * @frame_count :              Number of frames for the command
968  * @regs :                     MFI register set
969  */
970 static inline void
971 megasas_fire_cmd_gen2(struct megasas_instance *instance,
972 			dma_addr_t frame_phys_addr,
973 			u32 frame_count,
974 			struct megasas_register_set __iomem *regs)
975 {
976 	unsigned long flags;
977 
978 	spin_lock_irqsave(&instance->hba_lock, flags);
979 	writel((frame_phys_addr | (frame_count<<1))|1,
980 			&(regs)->inbound_queue_port);
981 	spin_unlock_irqrestore(&instance->hba_lock, flags);
982 }
983 
984 /**
985  * megasas_adp_reset_gen2 -	For controller reset
986  * @regs:				MFI register set
987  */
988 static int
989 megasas_adp_reset_gen2(struct megasas_instance *instance,
990 			struct megasas_register_set __iomem *reg_set)
991 {
992 	u32 retry = 0 ;
993 	u32 HostDiag;
994 	u32 __iomem *seq_offset = &reg_set->seq_offset;
995 	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
996 
997 	if (instance->instancet == &megasas_instance_template_skinny) {
998 		seq_offset = &reg_set->fusion_seq_offset;
999 		hostdiag_offset = &reg_set->fusion_host_diag;
1000 	}
1001 
1002 	writel(0, seq_offset);
1003 	writel(4, seq_offset);
1004 	writel(0xb, seq_offset);
1005 	writel(2, seq_offset);
1006 	writel(7, seq_offset);
1007 	writel(0xd, seq_offset);
1008 
1009 	msleep(1000);
1010 
1011 	HostDiag = (u32)readl(hostdiag_offset);
1012 
1013 	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
1014 		msleep(100);
1015 		HostDiag = (u32)readl(hostdiag_offset);
1016 		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
1017 					retry, HostDiag);
1018 
1019 		if (retry++ >= 100)
1020 			return 1;
1021 
1022 	}
1023 
1024 	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
1025 
1026 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
1027 
1028 	ssleep(10);
1029 
1030 	HostDiag = (u32)readl(hostdiag_offset);
1031 	while (HostDiag & DIAG_RESET_ADAPTER) {
1032 		msleep(100);
1033 		HostDiag = (u32)readl(hostdiag_offset);
1034 		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
1035 				retry, HostDiag);
1036 
1037 		if (retry++ >= 1000)
1038 			return 1;
1039 
1040 	}
1041 	return 0;
1042 }
1043 
1044 /**
1045  * megasas_check_reset_gen2 -	For controller reset check
1046  * @regs:				MFI register set
1047  */
1048 static int
1049 megasas_check_reset_gen2(struct megasas_instance *instance,
1050 		struct megasas_register_set __iomem *regs)
1051 {
1052 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1053 		return 1;
1054 
1055 	return 0;
1056 }
1057 
1058 static struct megasas_instance_template megasas_instance_template_gen2 = {
1059 
1060 	.fire_cmd = megasas_fire_cmd_gen2,
1061 	.enable_intr = megasas_enable_intr_gen2,
1062 	.disable_intr = megasas_disable_intr_gen2,
1063 	.clear_intr = megasas_clear_intr_gen2,
1064 	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1065 	.adp_reset = megasas_adp_reset_gen2,
1066 	.check_reset = megasas_check_reset_gen2,
1067 	.service_isr = megasas_isr,
1068 	.tasklet = megasas_complete_cmd_dpc,
1069 	.init_adapter = megasas_init_adapter_mfi,
1070 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1071 	.issue_dcmd = megasas_issue_dcmd,
1072 };
1073 
1074 /**
1075 *	This is the end of set of functions & definitions
1076 *       specific to gen2 (deviceid : 0x78, 0x79) controllers
1077 */
1078 
1079 /*
1080  * Template added for TB (Fusion)
1081  */
1082 extern struct megasas_instance_template megasas_instance_template_fusion;
1083 
1084 /**
1085  * megasas_issue_polled -	Issues a polling command
1086  * @instance:			Adapter soft state
1087  * @cmd:			Command packet to be issued
1088  *
1089  * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1090  */
1091 int
1092 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1093 {
1094 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1095 
1096 	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1097 	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1098 
1099 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1100 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1101 			__func__, __LINE__);
1102 		return DCMD_INIT;
1103 	}
1104 
1105 	instance->instancet->issue_dcmd(instance, cmd);
1106 
1107 	return wait_and_poll(instance, cmd, instance->requestorId ?
1108 			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1109 }
1110 
1111 /**
1112  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1113  * @instance:			Adapter soft state
1114  * @cmd:			Command to be issued
1115  * @timeout:			Timeout in seconds
1116  *
1117  * This function waits on an event for the command to be returned from ISR.
1118  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1119  * Used to issue ioctl commands.
1120  */
1121 int
1122 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1123 			  struct megasas_cmd *cmd, int timeout)
1124 {
1125 	int ret = 0;
1126 	cmd->cmd_status_drv = DCMD_INIT;
1127 
1128 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1129 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1130 			__func__, __LINE__);
1131 		return DCMD_INIT;
1132 	}
1133 
1134 	instance->instancet->issue_dcmd(instance, cmd);
1135 
1136 	if (timeout) {
1137 		ret = wait_event_timeout(instance->int_cmd_wait_q,
1138 		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1139 		if (!ret) {
1140 			dev_err(&instance->pdev->dev,
1141 				"DCMD(opcode: 0x%x) is timed out, func:%s\n",
1142 				cmd->frame->dcmd.opcode, __func__);
1143 			return DCMD_TIMEOUT;
1144 		}
1145 	} else
1146 		wait_event(instance->int_cmd_wait_q,
1147 				cmd->cmd_status_drv != DCMD_INIT);
1148 
1149 	return cmd->cmd_status_drv;
1150 }
1151 
1152 /**
1153  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1154  * @instance:				Adapter soft state
1155  * @cmd_to_abort:			Previously issued cmd to be aborted
1156  * @timeout:				Timeout in seconds
1157  *
1158  * MFI firmware can abort previously issued AEN comamnd (automatic event
1159  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1160  * cmd and waits for return status.
1161  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1162  */
1163 static int
1164 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1165 				struct megasas_cmd *cmd_to_abort, int timeout)
1166 {
1167 	struct megasas_cmd *cmd;
1168 	struct megasas_abort_frame *abort_fr;
1169 	int ret = 0;
1170 	u32 opcode;
1171 
1172 	cmd = megasas_get_cmd(instance);
1173 
1174 	if (!cmd)
1175 		return -1;
1176 
1177 	abort_fr = &cmd->frame->abort;
1178 
1179 	/*
1180 	 * Prepare and issue the abort frame
1181 	 */
1182 	abort_fr->cmd = MFI_CMD_ABORT;
1183 	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1184 	abort_fr->flags = cpu_to_le16(0);
1185 	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1186 	abort_fr->abort_mfi_phys_addr_lo =
1187 		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1188 	abort_fr->abort_mfi_phys_addr_hi =
1189 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1190 
1191 	cmd->sync_cmd = 1;
1192 	cmd->cmd_status_drv = DCMD_INIT;
1193 
1194 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1195 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1196 			__func__, __LINE__);
1197 		return DCMD_INIT;
1198 	}
1199 
1200 	instance->instancet->issue_dcmd(instance, cmd);
1201 
1202 	if (timeout) {
1203 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1204 		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1205 		if (!ret) {
1206 			opcode = cmd_to_abort->frame->dcmd.opcode;
1207 			dev_err(&instance->pdev->dev,
1208 				"Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
1209 				opcode,  __func__);
1210 			return DCMD_TIMEOUT;
1211 		}
1212 	} else
1213 		wait_event(instance->abort_cmd_wait_q,
1214 		cmd->cmd_status_drv != DCMD_INIT);
1215 
1216 	cmd->sync_cmd = 0;
1217 
1218 	megasas_return_cmd(instance, cmd);
1219 	return cmd->cmd_status_drv;
1220 }
1221 
1222 /**
1223  * megasas_make_sgl32 -	Prepares 32-bit SGL
1224  * @instance:		Adapter soft state
1225  * @scp:		SCSI command from the mid-layer
1226  * @mfi_sgl:		SGL to be filled in
1227  *
1228  * If successful, this function returns the number of SG elements. Otherwise,
1229  * it returnes -1.
1230  */
1231 static int
1232 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1233 		   union megasas_sgl *mfi_sgl)
1234 {
1235 	int i;
1236 	int sge_count;
1237 	struct scatterlist *os_sgl;
1238 
1239 	sge_count = scsi_dma_map(scp);
1240 	BUG_ON(sge_count < 0);
1241 
1242 	if (sge_count) {
1243 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1244 			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1245 			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1246 		}
1247 	}
1248 	return sge_count;
1249 }
1250 
1251 /**
1252  * megasas_make_sgl64 -	Prepares 64-bit SGL
1253  * @instance:		Adapter soft state
1254  * @scp:		SCSI command from the mid-layer
1255  * @mfi_sgl:		SGL to be filled in
1256  *
1257  * If successful, this function returns the number of SG elements. Otherwise,
1258  * it returnes -1.
1259  */
1260 static int
1261 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1262 		   union megasas_sgl *mfi_sgl)
1263 {
1264 	int i;
1265 	int sge_count;
1266 	struct scatterlist *os_sgl;
1267 
1268 	sge_count = scsi_dma_map(scp);
1269 	BUG_ON(sge_count < 0);
1270 
1271 	if (sge_count) {
1272 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1273 			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1274 			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1275 		}
1276 	}
1277 	return sge_count;
1278 }
1279 
1280 /**
1281  * megasas_make_sgl_skinny - Prepares IEEE SGL
1282  * @instance:           Adapter soft state
1283  * @scp:                SCSI command from the mid-layer
1284  * @mfi_sgl:            SGL to be filled in
1285  *
1286  * If successful, this function returns the number of SG elements. Otherwise,
1287  * it returnes -1.
1288  */
1289 static int
1290 megasas_make_sgl_skinny(struct megasas_instance *instance,
1291 		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1292 {
1293 	int i;
1294 	int sge_count;
1295 	struct scatterlist *os_sgl;
1296 
1297 	sge_count = scsi_dma_map(scp);
1298 
1299 	if (sge_count) {
1300 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1301 			mfi_sgl->sge_skinny[i].length =
1302 				cpu_to_le32(sg_dma_len(os_sgl));
1303 			mfi_sgl->sge_skinny[i].phys_addr =
1304 				cpu_to_le64(sg_dma_address(os_sgl));
1305 			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1306 		}
1307 	}
1308 	return sge_count;
1309 }
1310 
1311  /**
1312  * megasas_get_frame_count - Computes the number of frames
1313  * @frame_type		: type of frame- io or pthru frame
1314  * @sge_count		: number of sg elements
1315  *
1316  * Returns the number of frames required for numnber of sge's (sge_count)
1317  */
1318 
1319 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1320 			u8 sge_count, u8 frame_type)
1321 {
1322 	int num_cnt;
1323 	int sge_bytes;
1324 	u32 sge_sz;
1325 	u32 frame_count = 0;
1326 
1327 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1328 	    sizeof(struct megasas_sge32);
1329 
1330 	if (instance->flag_ieee) {
1331 		sge_sz = sizeof(struct megasas_sge_skinny);
1332 	}
1333 
1334 	/*
1335 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1336 	 * 3 SGEs for 32-bit SGLs for ldio &
1337 	 * 1 SGEs for 64-bit SGLs and
1338 	 * 2 SGEs for 32-bit SGLs for pthru frame
1339 	 */
1340 	if (unlikely(frame_type == PTHRU_FRAME)) {
1341 		if (instance->flag_ieee == 1) {
1342 			num_cnt = sge_count - 1;
1343 		} else if (IS_DMA64)
1344 			num_cnt = sge_count - 1;
1345 		else
1346 			num_cnt = sge_count - 2;
1347 	} else {
1348 		if (instance->flag_ieee == 1) {
1349 			num_cnt = sge_count - 1;
1350 		} else if (IS_DMA64)
1351 			num_cnt = sge_count - 2;
1352 		else
1353 			num_cnt = sge_count - 3;
1354 	}
1355 
1356 	if (num_cnt > 0) {
1357 		sge_bytes = sge_sz * num_cnt;
1358 
1359 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1360 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1361 	}
1362 	/* Main frame */
1363 	frame_count += 1;
1364 
1365 	if (frame_count > 7)
1366 		frame_count = 8;
1367 	return frame_count;
1368 }
1369 
1370 /**
1371  * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1372  * @instance:		Adapter soft state
1373  * @scp:		SCSI command
1374  * @cmd:		Command to be prepared in
1375  *
1376  * This function prepares CDB commands. These are typcially pass-through
1377  * commands to the devices.
1378  */
1379 static int
1380 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1381 		   struct megasas_cmd *cmd)
1382 {
1383 	u32 is_logical;
1384 	u32 device_id;
1385 	u16 flags = 0;
1386 	struct megasas_pthru_frame *pthru;
1387 
1388 	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1389 	device_id = MEGASAS_DEV_INDEX(scp);
1390 	pthru = (struct megasas_pthru_frame *)cmd->frame;
1391 
1392 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1393 		flags = MFI_FRAME_DIR_WRITE;
1394 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1395 		flags = MFI_FRAME_DIR_READ;
1396 	else if (scp->sc_data_direction == DMA_NONE)
1397 		flags = MFI_FRAME_DIR_NONE;
1398 
1399 	if (instance->flag_ieee == 1) {
1400 		flags |= MFI_FRAME_IEEE;
1401 	}
1402 
1403 	/*
1404 	 * Prepare the DCDB frame
1405 	 */
1406 	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1407 	pthru->cmd_status = 0x0;
1408 	pthru->scsi_status = 0x0;
1409 	pthru->target_id = device_id;
1410 	pthru->lun = scp->device->lun;
1411 	pthru->cdb_len = scp->cmd_len;
1412 	pthru->timeout = 0;
1413 	pthru->pad_0 = 0;
1414 	pthru->flags = cpu_to_le16(flags);
1415 	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1416 
1417 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1418 
1419 	/*
1420 	 * If the command is for the tape device, set the
1421 	 * pthru timeout to the os layer timeout value.
1422 	 */
1423 	if (scp->device->type == TYPE_TAPE) {
1424 		if ((scp->request->timeout / HZ) > 0xFFFF)
1425 			pthru->timeout = cpu_to_le16(0xFFFF);
1426 		else
1427 			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1428 	}
1429 
1430 	/*
1431 	 * Construct SGL
1432 	 */
1433 	if (instance->flag_ieee == 1) {
1434 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1435 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1436 						      &pthru->sgl);
1437 	} else if (IS_DMA64) {
1438 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1439 		pthru->sge_count = megasas_make_sgl64(instance, scp,
1440 						      &pthru->sgl);
1441 	} else
1442 		pthru->sge_count = megasas_make_sgl32(instance, scp,
1443 						      &pthru->sgl);
1444 
1445 	if (pthru->sge_count > instance->max_num_sge) {
1446 		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1447 			pthru->sge_count);
1448 		return 0;
1449 	}
1450 
1451 	/*
1452 	 * Sense info specific
1453 	 */
1454 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1455 	pthru->sense_buf_phys_addr_hi =
1456 		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1457 	pthru->sense_buf_phys_addr_lo =
1458 		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1459 
1460 	/*
1461 	 * Compute the total number of frames this command consumes. FW uses
1462 	 * this number to pull sufficient number of frames from host memory.
1463 	 */
1464 	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1465 							PTHRU_FRAME);
1466 
1467 	return cmd->frame_count;
1468 }
1469 
1470 /**
1471  * megasas_build_ldio -	Prepares IOs to logical devices
1472  * @instance:		Adapter soft state
1473  * @scp:		SCSI command
1474  * @cmd:		Command to be prepared
1475  *
1476  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1477  */
1478 static int
1479 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1480 		   struct megasas_cmd *cmd)
1481 {
1482 	u32 device_id;
1483 	u8 sc = scp->cmnd[0];
1484 	u16 flags = 0;
1485 	struct megasas_io_frame *ldio;
1486 
1487 	device_id = MEGASAS_DEV_INDEX(scp);
1488 	ldio = (struct megasas_io_frame *)cmd->frame;
1489 
1490 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1491 		flags = MFI_FRAME_DIR_WRITE;
1492 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1493 		flags = MFI_FRAME_DIR_READ;
1494 
1495 	if (instance->flag_ieee == 1) {
1496 		flags |= MFI_FRAME_IEEE;
1497 	}
1498 
1499 	/*
1500 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1501 	 */
1502 	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1503 	ldio->cmd_status = 0x0;
1504 	ldio->scsi_status = 0x0;
1505 	ldio->target_id = device_id;
1506 	ldio->timeout = 0;
1507 	ldio->reserved_0 = 0;
1508 	ldio->pad_0 = 0;
1509 	ldio->flags = cpu_to_le16(flags);
1510 	ldio->start_lba_hi = 0;
1511 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1512 
1513 	/*
1514 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1515 	 */
1516 	if (scp->cmd_len == 6) {
1517 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1518 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1519 						 ((u32) scp->cmnd[2] << 8) |
1520 						 (u32) scp->cmnd[3]);
1521 
1522 		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1523 	}
1524 
1525 	/*
1526 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1527 	 */
1528 	else if (scp->cmd_len == 10) {
1529 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1530 					      ((u32) scp->cmnd[7] << 8));
1531 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1532 						 ((u32) scp->cmnd[3] << 16) |
1533 						 ((u32) scp->cmnd[4] << 8) |
1534 						 (u32) scp->cmnd[5]);
1535 	}
1536 
1537 	/*
1538 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1539 	 */
1540 	else if (scp->cmd_len == 12) {
1541 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1542 					      ((u32) scp->cmnd[7] << 16) |
1543 					      ((u32) scp->cmnd[8] << 8) |
1544 					      (u32) scp->cmnd[9]);
1545 
1546 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1547 						 ((u32) scp->cmnd[3] << 16) |
1548 						 ((u32) scp->cmnd[4] << 8) |
1549 						 (u32) scp->cmnd[5]);
1550 	}
1551 
1552 	/*
1553 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1554 	 */
1555 	else if (scp->cmd_len == 16) {
1556 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1557 					      ((u32) scp->cmnd[11] << 16) |
1558 					      ((u32) scp->cmnd[12] << 8) |
1559 					      (u32) scp->cmnd[13]);
1560 
1561 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1562 						 ((u32) scp->cmnd[7] << 16) |
1563 						 ((u32) scp->cmnd[8] << 8) |
1564 						 (u32) scp->cmnd[9]);
1565 
1566 		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1567 						 ((u32) scp->cmnd[3] << 16) |
1568 						 ((u32) scp->cmnd[4] << 8) |
1569 						 (u32) scp->cmnd[5]);
1570 
1571 	}
1572 
1573 	/*
1574 	 * Construct SGL
1575 	 */
1576 	if (instance->flag_ieee) {
1577 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1578 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1579 					      &ldio->sgl);
1580 	} else if (IS_DMA64) {
1581 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1582 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1583 	} else
1584 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1585 
1586 	if (ldio->sge_count > instance->max_num_sge) {
1587 		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1588 			ldio->sge_count);
1589 		return 0;
1590 	}
1591 
1592 	/*
1593 	 * Sense info specific
1594 	 */
1595 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1596 	ldio->sense_buf_phys_addr_hi = 0;
1597 	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1598 
1599 	/*
1600 	 * Compute the total number of frames this command consumes. FW uses
1601 	 * this number to pull sufficient number of frames from host memory.
1602 	 */
1603 	cmd->frame_count = megasas_get_frame_count(instance,
1604 			ldio->sge_count, IO_FRAME);
1605 
1606 	return cmd->frame_count;
1607 }
1608 
1609 /**
1610  * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1611  *				and whether it's RW or non RW
1612  * @scmd:			SCSI command
1613  *
1614  */
1615 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1616 {
1617 	int ret;
1618 
1619 	switch (cmd->cmnd[0]) {
1620 	case READ_10:
1621 	case WRITE_10:
1622 	case READ_12:
1623 	case WRITE_12:
1624 	case READ_6:
1625 	case WRITE_6:
1626 	case READ_16:
1627 	case WRITE_16:
1628 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1629 			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1630 		break;
1631 	default:
1632 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1633 			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1634 	}
1635 	return ret;
1636 }
1637 
1638  /**
1639  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1640  *					in FW
1641  * @instance:				Adapter soft state
1642  */
1643 static inline void
1644 megasas_dump_pending_frames(struct megasas_instance *instance)
1645 {
1646 	struct megasas_cmd *cmd;
1647 	int i,n;
1648 	union megasas_sgl *mfi_sgl;
1649 	struct megasas_io_frame *ldio;
1650 	struct megasas_pthru_frame *pthru;
1651 	u32 sgcount;
1652 	u16 max_cmd = instance->max_fw_cmds;
1653 
1654 	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1655 	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1656 	if (IS_DMA64)
1657 		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1658 	else
1659 		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1660 
1661 	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1662 	for (i = 0; i < max_cmd; i++) {
1663 		cmd = instance->cmd_list[i];
1664 		if (!cmd->scmd)
1665 			continue;
1666 		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1667 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1668 			ldio = (struct megasas_io_frame *)cmd->frame;
1669 			mfi_sgl = &ldio->sgl;
1670 			sgcount = ldio->sge_count;
1671 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1672 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1673 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1674 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1675 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1676 		} else {
1677 			pthru = (struct megasas_pthru_frame *) cmd->frame;
1678 			mfi_sgl = &pthru->sgl;
1679 			sgcount = pthru->sge_count;
1680 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1681 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1682 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1683 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1684 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1685 		}
1686 		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1687 			for (n = 0; n < sgcount; n++) {
1688 				if (IS_DMA64)
1689 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1690 						le32_to_cpu(mfi_sgl->sge64[n].length),
1691 						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1692 				else
1693 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1694 						le32_to_cpu(mfi_sgl->sge32[n].length),
1695 						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1696 			}
1697 		}
1698 	} /*for max_cmd*/
1699 	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1700 	for (i = 0; i < max_cmd; i++) {
1701 
1702 		cmd = instance->cmd_list[i];
1703 
1704 		if (cmd->sync_cmd == 1)
1705 			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1706 	}
1707 	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1708 }
1709 
1710 u32
1711 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1712 			    struct scsi_cmnd *scmd)
1713 {
1714 	struct megasas_cmd *cmd;
1715 	u32 frame_count;
1716 
1717 	cmd = megasas_get_cmd(instance);
1718 	if (!cmd)
1719 		return SCSI_MLQUEUE_HOST_BUSY;
1720 
1721 	/*
1722 	 * Logical drive command
1723 	 */
1724 	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1725 		frame_count = megasas_build_ldio(instance, scmd, cmd);
1726 	else
1727 		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1728 
1729 	if (!frame_count)
1730 		goto out_return_cmd;
1731 
1732 	cmd->scmd = scmd;
1733 	scmd->SCp.ptr = (char *)cmd;
1734 
1735 	/*
1736 	 * Issue the command to the FW
1737 	 */
1738 	atomic_inc(&instance->fw_outstanding);
1739 
1740 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1741 				cmd->frame_count-1, instance->reg_set);
1742 
1743 	return 0;
1744 out_return_cmd:
1745 	megasas_return_cmd(instance, cmd);
1746 	return SCSI_MLQUEUE_HOST_BUSY;
1747 }
1748 
1749 
1750 /**
1751  * megasas_queue_command -	Queue entry point
1752  * @scmd:			SCSI command to be queued
1753  * @done:			Callback entry point
1754  */
1755 static int
1756 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1757 {
1758 	struct megasas_instance *instance;
1759 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1760 
1761 	instance = (struct megasas_instance *)
1762 	    scmd->device->host->hostdata;
1763 
1764 	if (instance->unload == 1) {
1765 		scmd->result = DID_NO_CONNECT << 16;
1766 		scmd->scsi_done(scmd);
1767 		return 0;
1768 	}
1769 
1770 	if (instance->issuepend_done == 0)
1771 		return SCSI_MLQUEUE_HOST_BUSY;
1772 
1773 
1774 	/* Check for an mpio path and adjust behavior */
1775 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1776 		if (megasas_check_mpio_paths(instance, scmd) ==
1777 		    (DID_REQUEUE << 16)) {
1778 			return SCSI_MLQUEUE_HOST_BUSY;
1779 		} else {
1780 			scmd->result = DID_NO_CONNECT << 16;
1781 			scmd->scsi_done(scmd);
1782 			return 0;
1783 		}
1784 	}
1785 
1786 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1787 		scmd->result = DID_NO_CONNECT << 16;
1788 		scmd->scsi_done(scmd);
1789 		return 0;
1790 	}
1791 
1792 	mr_device_priv_data = scmd->device->hostdata;
1793 	if (!mr_device_priv_data) {
1794 		scmd->result = DID_NO_CONNECT << 16;
1795 		scmd->scsi_done(scmd);
1796 		return 0;
1797 	}
1798 
1799 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1800 		return SCSI_MLQUEUE_HOST_BUSY;
1801 
1802 	if (mr_device_priv_data->tm_busy)
1803 		return SCSI_MLQUEUE_DEVICE_BUSY;
1804 
1805 
1806 	scmd->result = 0;
1807 
1808 	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1809 	    (scmd->device->id >= instance->fw_supported_vd_count ||
1810 		scmd->device->lun)) {
1811 		scmd->result = DID_BAD_TARGET << 16;
1812 		goto out_done;
1813 	}
1814 
1815 	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1816 	    MEGASAS_IS_LOGICAL(scmd->device) &&
1817 	    (!instance->fw_sync_cache_support)) {
1818 		scmd->result = DID_OK << 16;
1819 		goto out_done;
1820 	}
1821 
1822 	return instance->instancet->build_and_issue_cmd(instance, scmd);
1823 
1824  out_done:
1825 	scmd->scsi_done(scmd);
1826 	return 0;
1827 }
1828 
1829 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1830 {
1831 	int i;
1832 
1833 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1834 
1835 		if ((megasas_mgmt_info.instance[i]) &&
1836 		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1837 			return megasas_mgmt_info.instance[i];
1838 	}
1839 
1840 	return NULL;
1841 }
1842 
1843 /*
1844 * megasas_set_dynamic_target_properties -
1845 * Device property set by driver may not be static and it is required to be
1846 * updated after OCR
1847 *
1848 * set tm_capable.
1849 * set dma alignment (only for eedp protection enable vd).
1850 *
1851 * @sdev: OS provided scsi device
1852 *
1853 * Returns void
1854 */
1855 void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1856 					   bool is_target_prop)
1857 {
1858 	u16 pd_index = 0, ld;
1859 	u32 device_id;
1860 	struct megasas_instance *instance;
1861 	struct fusion_context *fusion;
1862 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1863 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1864 	struct MR_LD_RAID *raid;
1865 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1866 
1867 	instance = megasas_lookup_instance(sdev->host->host_no);
1868 	fusion = instance->ctrl_context;
1869 	mr_device_priv_data = sdev->hostdata;
1870 
1871 	if (!fusion || !mr_device_priv_data)
1872 		return;
1873 
1874 	if (MEGASAS_IS_LOGICAL(sdev)) {
1875 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1876 					+ sdev->id;
1877 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1878 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1879 		if (ld >= instance->fw_supported_vd_count)
1880 			return;
1881 		raid = MR_LdRaidGet(ld, local_map_ptr);
1882 
1883 		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1884 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1885 
1886 		mr_device_priv_data->is_tm_capable =
1887 			raid->capability.tmCapable;
1888 
1889 		if (!raid->flags.isEPD)
1890 			sdev->no_write_same = 1;
1891 
1892 	} else if (instance->use_seqnum_jbod_fp) {
1893 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1894 			sdev->id;
1895 		pd_sync = (void *)fusion->pd_seq_sync
1896 				[(instance->pd_seq_map_id - 1) & 1];
1897 		mr_device_priv_data->is_tm_capable =
1898 			pd_sync->seq[pd_index].capability.tmCapable;
1899 	}
1900 
1901 	if (is_target_prop && instance->tgt_prop->reset_tmo) {
1902 		/*
1903 		 * If FW provides a target reset timeout value, driver will use
1904 		 * it. If not set, fallback to default values.
1905 		 */
1906 		mr_device_priv_data->target_reset_tmo =
1907 			min_t(u8, instance->max_reset_tmo,
1908 			      instance->tgt_prop->reset_tmo);
1909 		mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1910 	} else {
1911 		mr_device_priv_data->target_reset_tmo =
1912 						MEGASAS_DEFAULT_TM_TIMEOUT;
1913 		mr_device_priv_data->task_abort_tmo =
1914 						MEGASAS_DEFAULT_TM_TIMEOUT;
1915 	}
1916 }
1917 
1918 /*
1919  * megasas_set_nvme_device_properties -
1920  * set nomerges=2
1921  * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1922  * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1923  *
1924  * MR firmware provides value in KB. Caller of this function converts
1925  * kb into bytes.
1926  *
1927  * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1928  * MR firmware provides value 128 as (32 * 4K) = 128K.
1929  *
1930  * @sdev:				scsi device
1931  * @max_io_size:				maximum io transfer size
1932  *
1933  */
1934 static inline void
1935 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1936 {
1937 	struct megasas_instance *instance;
1938 	u32 mr_nvme_pg_size;
1939 
1940 	instance = (struct megasas_instance *)sdev->host->hostdata;
1941 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1942 				MR_DEFAULT_NVME_PAGE_SIZE);
1943 
1944 	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1945 
1946 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1947 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1948 }
1949 
1950 /*
1951  * megasas_set_fw_assisted_qd -
1952  * set device queue depth to can_queue
1953  * set device queue depth to fw assisted qd
1954  *
1955  * @sdev:				scsi device
1956  * @is_target_prop			true, if fw provided target properties.
1957  */
1958 static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
1959 						 bool is_target_prop)
1960 {
1961 	u8 interface_type;
1962 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1963 	u32 tgt_device_qd;
1964 	struct megasas_instance *instance;
1965 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1966 
1967 	instance = megasas_lookup_instance(sdev->host->host_no);
1968 	mr_device_priv_data = sdev->hostdata;
1969 	interface_type  = mr_device_priv_data->interface_type;
1970 
1971 	switch (interface_type) {
1972 	case SAS_PD:
1973 		device_qd = MEGASAS_SAS_QD;
1974 		break;
1975 	case SATA_PD:
1976 		device_qd = MEGASAS_SATA_QD;
1977 		break;
1978 	case NVME_PD:
1979 		device_qd = MEGASAS_NVME_QD;
1980 		break;
1981 	}
1982 
1983 	if (is_target_prop) {
1984 		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1985 		if (tgt_device_qd &&
1986 		    (tgt_device_qd <= instance->host->can_queue))
1987 			device_qd = tgt_device_qd;
1988 	}
1989 
1990 	if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
1991 		device_qd = instance->host->can_queue;
1992 
1993 	scsi_change_queue_depth(sdev, device_qd);
1994 }
1995 
1996 /*
1997  * megasas_set_static_target_properties -
1998  * Device property set by driver are static and it is not required to be
1999  * updated after OCR.
2000  *
2001  * set io timeout
2002  * set device queue depth
2003  * set nvme device properties. see - megasas_set_nvme_device_properties
2004  *
2005  * @sdev:				scsi device
2006  * @is_target_prop			true, if fw provided target properties.
2007  */
2008 static void megasas_set_static_target_properties(struct scsi_device *sdev,
2009 						 bool is_target_prop)
2010 {
2011 	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
2012 	struct megasas_instance *instance;
2013 
2014 	instance = megasas_lookup_instance(sdev->host->host_no);
2015 
2016 	/*
2017 	 * The RAID firmware may require extended timeouts.
2018 	 */
2019 	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
2020 
2021 	/* max_io_size_kb will be set to non zero for
2022 	 * nvme based vd and syspd.
2023 	 */
2024 	if (is_target_prop)
2025 		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
2026 
2027 	if (instance->nvme_page_size && max_io_size_kb)
2028 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
2029 
2030 	megasas_set_fw_assisted_qd(sdev, is_target_prop);
2031 }
2032 
2033 
2034 static int megasas_slave_configure(struct scsi_device *sdev)
2035 {
2036 	u16 pd_index = 0;
2037 	struct megasas_instance *instance;
2038 	int ret_target_prop = DCMD_FAILED;
2039 	bool is_target_prop = false;
2040 
2041 	instance = megasas_lookup_instance(sdev->host->host_no);
2042 	if (instance->pd_list_not_supported) {
2043 		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
2044 			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2045 				sdev->id;
2046 			if (instance->pd_list[pd_index].driveState !=
2047 				MR_PD_STATE_SYSTEM)
2048 				return -ENXIO;
2049 		}
2050 	}
2051 
2052 	mutex_lock(&instance->reset_mutex);
2053 	/* Send DCMD to Firmware and cache the information */
2054 	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
2055 		megasas_get_pd_info(instance, sdev);
2056 
2057 	/* Some ventura firmware may not have instance->nvme_page_size set.
2058 	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
2059 	 */
2060 	if ((instance->tgt_prop) && (instance->nvme_page_size))
2061 		ret_target_prop = megasas_get_target_prop(instance, sdev);
2062 
2063 	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2064 	megasas_set_static_target_properties(sdev, is_target_prop);
2065 
2066 	/* This sdev property may change post OCR */
2067 	megasas_set_dynamic_target_properties(sdev, is_target_prop);
2068 
2069 	mutex_unlock(&instance->reset_mutex);
2070 
2071 	return 0;
2072 }
2073 
2074 static int megasas_slave_alloc(struct scsi_device *sdev)
2075 {
2076 	u16 pd_index = 0;
2077 	struct megasas_instance *instance ;
2078 	struct MR_PRIV_DEVICE *mr_device_priv_data;
2079 
2080 	instance = megasas_lookup_instance(sdev->host->host_no);
2081 	if (!MEGASAS_IS_LOGICAL(sdev)) {
2082 		/*
2083 		 * Open the OS scan to the SYSTEM PD
2084 		 */
2085 		pd_index =
2086 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2087 			sdev->id;
2088 		if ((instance->pd_list_not_supported ||
2089 			instance->pd_list[pd_index].driveState ==
2090 			MR_PD_STATE_SYSTEM)) {
2091 			goto scan_target;
2092 		}
2093 		return -ENXIO;
2094 	}
2095 
2096 scan_target:
2097 	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2098 					GFP_KERNEL);
2099 	if (!mr_device_priv_data)
2100 		return -ENOMEM;
2101 	sdev->hostdata = mr_device_priv_data;
2102 
2103 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2104 		   instance->r1_ldio_hint_default);
2105 	return 0;
2106 }
2107 
2108 static void megasas_slave_destroy(struct scsi_device *sdev)
2109 {
2110 	kfree(sdev->hostdata);
2111 	sdev->hostdata = NULL;
2112 }
2113 
2114 /*
2115 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2116 *                                       kill adapter
2117 * @instance:				Adapter soft state
2118 *
2119 */
2120 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2121 {
2122 	int i;
2123 	struct megasas_cmd *cmd_mfi;
2124 	struct megasas_cmd_fusion *cmd_fusion;
2125 	struct fusion_context *fusion = instance->ctrl_context;
2126 
2127 	/* Find all outstanding ioctls */
2128 	if (fusion) {
2129 		for (i = 0; i < instance->max_fw_cmds; i++) {
2130 			cmd_fusion = fusion->cmd_list[i];
2131 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2132 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2133 				if (cmd_mfi->sync_cmd &&
2134 				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2135 					cmd_mfi->frame->hdr.cmd_status =
2136 							MFI_STAT_WRONG_STATE;
2137 					megasas_complete_cmd(instance,
2138 							     cmd_mfi, DID_OK);
2139 				}
2140 			}
2141 		}
2142 	} else {
2143 		for (i = 0; i < instance->max_fw_cmds; i++) {
2144 			cmd_mfi = instance->cmd_list[i];
2145 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2146 				MFI_CMD_ABORT)
2147 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2148 		}
2149 	}
2150 }
2151 
2152 
2153 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2154 {
2155 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2156 		dev_warn(&instance->pdev->dev,
2157 			 "Adapter already dead, skipping kill HBA\n");
2158 		return;
2159 	}
2160 
2161 	/* Set critical error to block I/O & ioctls in case caller didn't */
2162 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2163 	/* Wait 1 second to ensure IO or ioctls in build have posted */
2164 	msleep(1000);
2165 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2166 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2167 		(instance->adapter_type != MFI_SERIES)) {
2168 		if (!instance->requestorId) {
2169 			writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2170 			/* Flush */
2171 			readl(&instance->reg_set->doorbell);
2172 		}
2173 		if (instance->requestorId && instance->peerIsPresent)
2174 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2175 	} else {
2176 		writel(MFI_STOP_ADP,
2177 			&instance->reg_set->inbound_doorbell);
2178 	}
2179 	/* Complete outstanding ioctls when adapter is killed */
2180 	megasas_complete_outstanding_ioctls(instance);
2181 }
2182 
2183  /**
2184   * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2185   *					restored to max value
2186   * @instance:			Adapter soft state
2187   *
2188   */
2189 void
2190 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2191 {
2192 	unsigned long flags;
2193 
2194 	if (instance->flag & MEGASAS_FW_BUSY
2195 	    && time_after(jiffies, instance->last_time + 5 * HZ)
2196 	    && atomic_read(&instance->fw_outstanding) <
2197 	    instance->throttlequeuedepth + 1) {
2198 
2199 		spin_lock_irqsave(instance->host->host_lock, flags);
2200 		instance->flag &= ~MEGASAS_FW_BUSY;
2201 
2202 		instance->host->can_queue = instance->cur_can_queue;
2203 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2204 	}
2205 }
2206 
2207 /**
2208  * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2209  * @instance_addr:			Address of adapter soft state
2210  *
2211  * Tasklet to complete cmds
2212  */
2213 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2214 {
2215 	u32 producer;
2216 	u32 consumer;
2217 	u32 context;
2218 	struct megasas_cmd *cmd;
2219 	struct megasas_instance *instance =
2220 				(struct megasas_instance *)instance_addr;
2221 	unsigned long flags;
2222 
2223 	/* If we have already declared adapter dead, donot complete cmds */
2224 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2225 		return;
2226 
2227 	spin_lock_irqsave(&instance->completion_lock, flags);
2228 
2229 	producer = le32_to_cpu(*instance->producer);
2230 	consumer = le32_to_cpu(*instance->consumer);
2231 
2232 	while (consumer != producer) {
2233 		context = le32_to_cpu(instance->reply_queue[consumer]);
2234 		if (context >= instance->max_fw_cmds) {
2235 			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2236 				context);
2237 			BUG();
2238 		}
2239 
2240 		cmd = instance->cmd_list[context];
2241 
2242 		megasas_complete_cmd(instance, cmd, DID_OK);
2243 
2244 		consumer++;
2245 		if (consumer == (instance->max_fw_cmds + 1)) {
2246 			consumer = 0;
2247 		}
2248 	}
2249 
2250 	*instance->consumer = cpu_to_le32(producer);
2251 
2252 	spin_unlock_irqrestore(&instance->completion_lock, flags);
2253 
2254 	/*
2255 	 * Check if we can restore can_queue
2256 	 */
2257 	megasas_check_and_restore_queue_depth(instance);
2258 }
2259 
2260 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2261 
2262 /**
2263  * megasas_start_timer - Initializes sriov heartbeat timer object
2264  * @instance:		Adapter soft state
2265  *
2266  */
2267 void megasas_start_timer(struct megasas_instance *instance)
2268 {
2269 	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2270 
2271 	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2272 	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2273 	add_timer(timer);
2274 }
2275 
2276 static void
2277 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2278 
2279 static void
2280 process_fw_state_change_wq(struct work_struct *work);
2281 
2282 static void megasas_do_ocr(struct megasas_instance *instance)
2283 {
2284 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2285 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2286 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2287 		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2288 	}
2289 	instance->instancet->disable_intr(instance);
2290 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2291 	instance->issuepend_done = 0;
2292 
2293 	atomic_set(&instance->fw_outstanding, 0);
2294 	megasas_internal_reset_defer_cmds(instance);
2295 	process_fw_state_change_wq(&instance->work_init);
2296 }
2297 
2298 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2299 					    int initial)
2300 {
2301 	struct megasas_cmd *cmd;
2302 	struct megasas_dcmd_frame *dcmd;
2303 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2304 	dma_addr_t new_affiliation_111_h;
2305 	int ld, retval = 0;
2306 	u8 thisVf;
2307 
2308 	cmd = megasas_get_cmd(instance);
2309 
2310 	if (!cmd) {
2311 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2312 		       "Failed to get cmd for scsi%d\n",
2313 			instance->host->host_no);
2314 		return -ENOMEM;
2315 	}
2316 
2317 	dcmd = &cmd->frame->dcmd;
2318 
2319 	if (!instance->vf_affiliation_111) {
2320 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2321 		       "affiliation for scsi%d\n", instance->host->host_no);
2322 		megasas_return_cmd(instance, cmd);
2323 		return -ENOMEM;
2324 	}
2325 
2326 	if (initial)
2327 			memset(instance->vf_affiliation_111, 0,
2328 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2329 	else {
2330 		new_affiliation_111 =
2331 			dma_alloc_coherent(&instance->pdev->dev,
2332 					   sizeof(struct MR_LD_VF_AFFILIATION_111),
2333 					   &new_affiliation_111_h, GFP_KERNEL);
2334 		if (!new_affiliation_111) {
2335 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2336 			       "memory for new affiliation for scsi%d\n",
2337 			       instance->host->host_no);
2338 			megasas_return_cmd(instance, cmd);
2339 			return -ENOMEM;
2340 		}
2341 	}
2342 
2343 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2344 
2345 	dcmd->cmd = MFI_CMD_DCMD;
2346 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2347 	dcmd->sge_count = 1;
2348 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2349 	dcmd->timeout = 0;
2350 	dcmd->pad_0 = 0;
2351 	dcmd->data_xfer_len =
2352 		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2353 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2354 
2355 	if (initial)
2356 		dcmd->sgl.sge32[0].phys_addr =
2357 			cpu_to_le32(instance->vf_affiliation_111_h);
2358 	else
2359 		dcmd->sgl.sge32[0].phys_addr =
2360 			cpu_to_le32(new_affiliation_111_h);
2361 
2362 	dcmd->sgl.sge32[0].length = cpu_to_le32(
2363 		sizeof(struct MR_LD_VF_AFFILIATION_111));
2364 
2365 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2366 	       "scsi%d\n", instance->host->host_no);
2367 
2368 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2369 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2370 		       " failed with status 0x%x for scsi%d\n",
2371 		       dcmd->cmd_status, instance->host->host_no);
2372 		retval = 1; /* Do a scan if we couldn't get affiliation */
2373 		goto out;
2374 	}
2375 
2376 	if (!initial) {
2377 		thisVf = new_affiliation_111->thisVf;
2378 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2379 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2380 			    new_affiliation_111->map[ld].policy[thisVf]) {
2381 				dev_warn(&instance->pdev->dev, "SR-IOV: "
2382 				       "Got new LD/VF affiliation for scsi%d\n",
2383 				       instance->host->host_no);
2384 				memcpy(instance->vf_affiliation_111,
2385 				       new_affiliation_111,
2386 				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2387 				retval = 1;
2388 				goto out;
2389 			}
2390 	}
2391 out:
2392 	if (new_affiliation_111) {
2393 		dma_free_coherent(&instance->pdev->dev,
2394 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2395 				    new_affiliation_111,
2396 				    new_affiliation_111_h);
2397 	}
2398 
2399 	megasas_return_cmd(instance, cmd);
2400 
2401 	return retval;
2402 }
2403 
2404 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2405 					    int initial)
2406 {
2407 	struct megasas_cmd *cmd;
2408 	struct megasas_dcmd_frame *dcmd;
2409 	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2410 	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2411 	dma_addr_t new_affiliation_h;
2412 	int i, j, retval = 0, found = 0, doscan = 0;
2413 	u8 thisVf;
2414 
2415 	cmd = megasas_get_cmd(instance);
2416 
2417 	if (!cmd) {
2418 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2419 		       "Failed to get cmd for scsi%d\n",
2420 		       instance->host->host_no);
2421 		return -ENOMEM;
2422 	}
2423 
2424 	dcmd = &cmd->frame->dcmd;
2425 
2426 	if (!instance->vf_affiliation) {
2427 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2428 		       "affiliation for scsi%d\n", instance->host->host_no);
2429 		megasas_return_cmd(instance, cmd);
2430 		return -ENOMEM;
2431 	}
2432 
2433 	if (initial)
2434 		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2435 		       sizeof(struct MR_LD_VF_AFFILIATION));
2436 	else {
2437 		new_affiliation =
2438 			dma_alloc_coherent(&instance->pdev->dev,
2439 					   (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2440 					   &new_affiliation_h, GFP_KERNEL);
2441 		if (!new_affiliation) {
2442 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2443 			       "memory for new affiliation for scsi%d\n",
2444 			       instance->host->host_no);
2445 			megasas_return_cmd(instance, cmd);
2446 			return -ENOMEM;
2447 		}
2448 	}
2449 
2450 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2451 
2452 	dcmd->cmd = MFI_CMD_DCMD;
2453 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2454 	dcmd->sge_count = 1;
2455 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2456 	dcmd->timeout = 0;
2457 	dcmd->pad_0 = 0;
2458 	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2459 		sizeof(struct MR_LD_VF_AFFILIATION));
2460 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2461 
2462 	if (initial)
2463 		dcmd->sgl.sge32[0].phys_addr =
2464 			cpu_to_le32(instance->vf_affiliation_h);
2465 	else
2466 		dcmd->sgl.sge32[0].phys_addr =
2467 			cpu_to_le32(new_affiliation_h);
2468 
2469 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2470 		sizeof(struct MR_LD_VF_AFFILIATION));
2471 
2472 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2473 	       "scsi%d\n", instance->host->host_no);
2474 
2475 
2476 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2477 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2478 		       " failed with status 0x%x for scsi%d\n",
2479 		       dcmd->cmd_status, instance->host->host_no);
2480 		retval = 1; /* Do a scan if we couldn't get affiliation */
2481 		goto out;
2482 	}
2483 
2484 	if (!initial) {
2485 		if (!new_affiliation->ldCount) {
2486 			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2487 			       "affiliation for passive path for scsi%d\n",
2488 			       instance->host->host_no);
2489 			retval = 1;
2490 			goto out;
2491 		}
2492 		newmap = new_affiliation->map;
2493 		savedmap = instance->vf_affiliation->map;
2494 		thisVf = new_affiliation->thisVf;
2495 		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2496 			found = 0;
2497 			for (j = 0; j < instance->vf_affiliation->ldCount;
2498 			     j++) {
2499 				if (newmap->ref.targetId ==
2500 				    savedmap->ref.targetId) {
2501 					found = 1;
2502 					if (newmap->policy[thisVf] !=
2503 					    savedmap->policy[thisVf]) {
2504 						doscan = 1;
2505 						goto out;
2506 					}
2507 				}
2508 				savedmap = (struct MR_LD_VF_MAP *)
2509 					((unsigned char *)savedmap +
2510 					 savedmap->size);
2511 			}
2512 			if (!found && newmap->policy[thisVf] !=
2513 			    MR_LD_ACCESS_HIDDEN) {
2514 				doscan = 1;
2515 				goto out;
2516 			}
2517 			newmap = (struct MR_LD_VF_MAP *)
2518 				((unsigned char *)newmap + newmap->size);
2519 		}
2520 
2521 		newmap = new_affiliation->map;
2522 		savedmap = instance->vf_affiliation->map;
2523 
2524 		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2525 			found = 0;
2526 			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2527 				if (savedmap->ref.targetId ==
2528 				    newmap->ref.targetId) {
2529 					found = 1;
2530 					if (savedmap->policy[thisVf] !=
2531 					    newmap->policy[thisVf]) {
2532 						doscan = 1;
2533 						goto out;
2534 					}
2535 				}
2536 				newmap = (struct MR_LD_VF_MAP *)
2537 					((unsigned char *)newmap +
2538 					 newmap->size);
2539 			}
2540 			if (!found && savedmap->policy[thisVf] !=
2541 			    MR_LD_ACCESS_HIDDEN) {
2542 				doscan = 1;
2543 				goto out;
2544 			}
2545 			savedmap = (struct MR_LD_VF_MAP *)
2546 				((unsigned char *)savedmap +
2547 				 savedmap->size);
2548 		}
2549 	}
2550 out:
2551 	if (doscan) {
2552 		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2553 		       "affiliation for scsi%d\n", instance->host->host_no);
2554 		memcpy(instance->vf_affiliation, new_affiliation,
2555 		       new_affiliation->size);
2556 		retval = 1;
2557 	}
2558 
2559 	if (new_affiliation)
2560 		dma_free_coherent(&instance->pdev->dev,
2561 				    (MAX_LOGICAL_DRIVES + 1) *
2562 				    sizeof(struct MR_LD_VF_AFFILIATION),
2563 				    new_affiliation, new_affiliation_h);
2564 	megasas_return_cmd(instance, cmd);
2565 
2566 	return retval;
2567 }
2568 
2569 /* This function will get the current SR-IOV LD/VF affiliation */
2570 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2571 	int initial)
2572 {
2573 	int retval;
2574 
2575 	if (instance->PlasmaFW111)
2576 		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2577 	else
2578 		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2579 	return retval;
2580 }
2581 
2582 /* This function will tell FW to start the SR-IOV heartbeat */
2583 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2584 					 int initial)
2585 {
2586 	struct megasas_cmd *cmd;
2587 	struct megasas_dcmd_frame *dcmd;
2588 	int retval = 0;
2589 
2590 	cmd = megasas_get_cmd(instance);
2591 
2592 	if (!cmd) {
2593 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2594 		       "Failed to get cmd for scsi%d\n",
2595 		       instance->host->host_no);
2596 		return -ENOMEM;
2597 	}
2598 
2599 	dcmd = &cmd->frame->dcmd;
2600 
2601 	if (initial) {
2602 		instance->hb_host_mem =
2603 			dma_alloc_coherent(&instance->pdev->dev,
2604 					   sizeof(struct MR_CTRL_HB_HOST_MEM),
2605 					   &instance->hb_host_mem_h,
2606 					   GFP_KERNEL);
2607 		if (!instance->hb_host_mem) {
2608 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2609 			       " memory for heartbeat host memory for scsi%d\n",
2610 			       instance->host->host_no);
2611 			retval = -ENOMEM;
2612 			goto out;
2613 		}
2614 	}
2615 
2616 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2617 
2618 	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2619 	dcmd->cmd = MFI_CMD_DCMD;
2620 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2621 	dcmd->sge_count = 1;
2622 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2623 	dcmd->timeout = 0;
2624 	dcmd->pad_0 = 0;
2625 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2626 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2627 
2628 	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2629 				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2630 
2631 	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2632 	       instance->host->host_no);
2633 
2634 	if ((instance->adapter_type != MFI_SERIES) &&
2635 	    !instance->mask_interrupts)
2636 		retval = megasas_issue_blocked_cmd(instance, cmd,
2637 			MEGASAS_ROUTINE_WAIT_TIME_VF);
2638 	else
2639 		retval = megasas_issue_polled(instance, cmd);
2640 
2641 	if (retval) {
2642 		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2643 			"_MEM_ALLOC DCMD %s for scsi%d\n",
2644 			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2645 			"timed out" : "failed", instance->host->host_no);
2646 		retval = 1;
2647 	}
2648 
2649 out:
2650 	megasas_return_cmd(instance, cmd);
2651 
2652 	return retval;
2653 }
2654 
2655 /* Handler for SR-IOV heartbeat */
2656 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2657 {
2658 	struct megasas_instance *instance =
2659 		from_timer(instance, t, sriov_heartbeat_timer);
2660 
2661 	if (instance->hb_host_mem->HB.fwCounter !=
2662 	    instance->hb_host_mem->HB.driverCounter) {
2663 		instance->hb_host_mem->HB.driverCounter =
2664 			instance->hb_host_mem->HB.fwCounter;
2665 		mod_timer(&instance->sriov_heartbeat_timer,
2666 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2667 	} else {
2668 		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2669 		       "completed for scsi%d\n", instance->host->host_no);
2670 		schedule_work(&instance->work_init);
2671 	}
2672 }
2673 
2674 /**
2675  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2676  * @instance:				Adapter soft state
2677  *
2678  * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2679  * complete all its outstanding commands. Returns error if one or more IOs
2680  * are pending after this time period. It also marks the controller dead.
2681  */
2682 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2683 {
2684 	int i, sl, outstanding;
2685 	u32 reset_index;
2686 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2687 	unsigned long flags;
2688 	struct list_head clist_local;
2689 	struct megasas_cmd *reset_cmd;
2690 	u32 fw_state;
2691 
2692 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2693 		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2694 		__func__, __LINE__);
2695 		return FAILED;
2696 	}
2697 
2698 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2699 
2700 		INIT_LIST_HEAD(&clist_local);
2701 		spin_lock_irqsave(&instance->hba_lock, flags);
2702 		list_splice_init(&instance->internal_reset_pending_q,
2703 				&clist_local);
2704 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2705 
2706 		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2707 		for (i = 0; i < wait_time; i++) {
2708 			msleep(1000);
2709 			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2710 				break;
2711 		}
2712 
2713 		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2714 			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2715 			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2716 			return FAILED;
2717 		}
2718 
2719 		reset_index = 0;
2720 		while (!list_empty(&clist_local)) {
2721 			reset_cmd = list_entry((&clist_local)->next,
2722 						struct megasas_cmd, list);
2723 			list_del_init(&reset_cmd->list);
2724 			if (reset_cmd->scmd) {
2725 				reset_cmd->scmd->result = DID_REQUEUE << 16;
2726 				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2727 					reset_index, reset_cmd,
2728 					reset_cmd->scmd->cmnd[0]);
2729 
2730 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2731 				megasas_return_cmd(instance, reset_cmd);
2732 			} else if (reset_cmd->sync_cmd) {
2733 				dev_notice(&instance->pdev->dev, "%p synch cmds"
2734 						"reset queue\n",
2735 						reset_cmd);
2736 
2737 				reset_cmd->cmd_status_drv = DCMD_INIT;
2738 				instance->instancet->fire_cmd(instance,
2739 						reset_cmd->frame_phys_addr,
2740 						0, instance->reg_set);
2741 			} else {
2742 				dev_notice(&instance->pdev->dev, "%p unexpected"
2743 					"cmds lst\n",
2744 					reset_cmd);
2745 			}
2746 			reset_index++;
2747 		}
2748 
2749 		return SUCCESS;
2750 	}
2751 
2752 	for (i = 0; i < resetwaittime; i++) {
2753 		outstanding = atomic_read(&instance->fw_outstanding);
2754 
2755 		if (!outstanding)
2756 			break;
2757 
2758 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2759 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2760 			       "commands to complete\n",i,outstanding);
2761 			/*
2762 			 * Call cmd completion routine. Cmd to be
2763 			 * be completed directly without depending on isr.
2764 			 */
2765 			megasas_complete_cmd_dpc((unsigned long)instance);
2766 		}
2767 
2768 		msleep(1000);
2769 	}
2770 
2771 	i = 0;
2772 	outstanding = atomic_read(&instance->fw_outstanding);
2773 	fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2774 
2775 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2776 		goto no_outstanding;
2777 
2778 	if (instance->disableOnlineCtrlReset)
2779 		goto kill_hba_and_failed;
2780 	do {
2781 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2782 			dev_info(&instance->pdev->dev,
2783 				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2784 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2785 			if (i == 3)
2786 				goto kill_hba_and_failed;
2787 			megasas_do_ocr(instance);
2788 
2789 			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2790 				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2791 				__func__, __LINE__);
2792 				return FAILED;
2793 			}
2794 			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2795 				__func__, __LINE__);
2796 
2797 			for (sl = 0; sl < 10; sl++)
2798 				msleep(500);
2799 
2800 			outstanding = atomic_read(&instance->fw_outstanding);
2801 
2802 			fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2803 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2804 				goto no_outstanding;
2805 		}
2806 		i++;
2807 	} while (i <= 3);
2808 
2809 no_outstanding:
2810 
2811 	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2812 		__func__, __LINE__);
2813 	return SUCCESS;
2814 
2815 kill_hba_and_failed:
2816 
2817 	/* Reset not supported, kill adapter */
2818 	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2819 		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2820 		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2821 		atomic_read(&instance->fw_outstanding));
2822 	megasas_dump_pending_frames(instance);
2823 	megaraid_sas_kill_hba(instance);
2824 
2825 	return FAILED;
2826 }
2827 
2828 /**
2829  * megasas_generic_reset -	Generic reset routine
2830  * @scmd:			Mid-layer SCSI command
2831  *
2832  * This routine implements a generic reset handler for device, bus and host
2833  * reset requests. Device, bus and host specific reset handlers can use this
2834  * function after they do their specific tasks.
2835  */
2836 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2837 {
2838 	int ret_val;
2839 	struct megasas_instance *instance;
2840 
2841 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2842 
2843 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2844 		 scmd->cmnd[0], scmd->retries);
2845 
2846 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2847 		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2848 		return FAILED;
2849 	}
2850 
2851 	ret_val = megasas_wait_for_outstanding(instance);
2852 	if (ret_val == SUCCESS)
2853 		dev_notice(&instance->pdev->dev, "reset successful\n");
2854 	else
2855 		dev_err(&instance->pdev->dev, "failed to do reset\n");
2856 
2857 	return ret_val;
2858 }
2859 
2860 /**
2861  * megasas_reset_timer - quiesce the adapter if required
2862  * @scmd:		scsi cmnd
2863  *
2864  * Sets the FW busy flag and reduces the host->can_queue if the
2865  * cmd has not been completed within the timeout period.
2866  */
2867 static enum
2868 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2869 {
2870 	struct megasas_instance *instance;
2871 	unsigned long flags;
2872 
2873 	if (time_after(jiffies, scmd->jiffies_at_alloc +
2874 				(scmd_timeout * 2) * HZ)) {
2875 		return BLK_EH_DONE;
2876 	}
2877 
2878 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2879 	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2880 		/* FW is busy, throttle IO */
2881 		spin_lock_irqsave(instance->host->host_lock, flags);
2882 
2883 		instance->host->can_queue = instance->throttlequeuedepth;
2884 		instance->last_time = jiffies;
2885 		instance->flag |= MEGASAS_FW_BUSY;
2886 
2887 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2888 	}
2889 	return BLK_EH_RESET_TIMER;
2890 }
2891 
2892 /**
2893  * megasas_dump -	This function will print hexdump of provided buffer.
2894  * @buf:		Buffer to be dumped
2895  * @sz:		Size in bytes
2896  * @format:		Different formats of dumping e.g. format=n will
2897  *			cause only 'n' 32 bit words to be dumped in a single
2898  *			line.
2899  */
2900 inline void
2901 megasas_dump(void *buf, int sz, int format)
2902 {
2903 	int i;
2904 	__le32 *buf_loc = (__le32 *)buf;
2905 
2906 	for (i = 0; i < (sz / sizeof(__le32)); i++) {
2907 		if ((i % format) == 0) {
2908 			if (i != 0)
2909 				printk(KERN_CONT "\n");
2910 			printk(KERN_CONT "%08x: ", (i * 4));
2911 		}
2912 		printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
2913 	}
2914 	printk(KERN_CONT "\n");
2915 }
2916 
2917 /**
2918  * megasas_dump_reg_set -	This function will print hexdump of register set
2919  * @buf:			Buffer to be dumped
2920  * @sz:				Size in bytes
2921  * @format:			Different formats of dumping e.g. format=n will
2922  *				cause only 'n' 32 bit words to be dumped in a
2923  *				single line.
2924  */
2925 inline void
2926 megasas_dump_reg_set(void __iomem *reg_set)
2927 {
2928 	unsigned int i, sz = 256;
2929 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2930 
2931 	for (i = 0; i < (sz / sizeof(u32)); i++)
2932 		printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
2933 }
2934 
2935 /**
2936  * megasas_dump_fusion_io -	This function will print key details
2937  *				of SCSI IO
2938  * @scmd:			SCSI command pointer of SCSI IO
2939  */
2940 void
2941 megasas_dump_fusion_io(struct scsi_cmnd *scmd)
2942 {
2943 	struct megasas_cmd_fusion *cmd;
2944 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2945 	struct megasas_instance *instance;
2946 
2947 	cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2948 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2949 
2950 	scmd_printk(KERN_INFO, scmd,
2951 		    "scmd: (0x%p)  retries: 0x%x  allowed: 0x%x\n",
2952 		    scmd, scmd->retries, scmd->allowed);
2953 	scsi_print_command(scmd);
2954 
2955 	if (cmd) {
2956 		req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
2957 		scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
2958 		scmd_printk(KERN_INFO, scmd,
2959 			    "RequestFlags:0x%x  MSIxIndex:0x%x  SMID:0x%x  LMID:0x%x  DevHandle:0x%x\n",
2960 			    req_desc->SCSIIO.RequestFlags,
2961 			    req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
2962 			    req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
2963 
2964 		printk(KERN_INFO "IO request frame:\n");
2965 		megasas_dump(cmd->io_request,
2966 			     MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
2967 		printk(KERN_INFO "Chain frame:\n");
2968 		megasas_dump(cmd->sg_frame,
2969 			     instance->max_chain_frame_sz, 8);
2970 	}
2971 
2972 }
2973 
2974 /*
2975  * megasas_dump_sys_regs - This function will dump system registers through
2976  *			    sysfs.
2977  * @reg_set:		    Pointer to System register set.
2978  * @buf:		    Buffer to which output is to be written.
2979  * @return:		    Number of bytes written to buffer.
2980  */
2981 static inline ssize_t
2982 megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
2983 {
2984 	unsigned int i, sz = 256;
2985 	int bytes_wrote = 0;
2986 	char *loc = (char *)buf;
2987 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2988 
2989 	for (i = 0; i < sz / sizeof(u32); i++) {
2990 		bytes_wrote += scnprintf(loc + bytes_wrote,
2991 					 PAGE_SIZE - bytes_wrote,
2992 					 "%08x: %08x\n", (i * 4),
2993 					 readl(&reg[i]));
2994 	}
2995 	return bytes_wrote;
2996 }
2997 
2998 /**
2999  * megasas_reset_bus_host -	Bus & host reset handler entry point
3000  */
3001 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
3002 {
3003 	int ret;
3004 	struct megasas_instance *instance;
3005 
3006 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3007 
3008 	scmd_printk(KERN_INFO, scmd,
3009 		"OCR is requested due to IO timeout!!\n");
3010 
3011 	scmd_printk(KERN_INFO, scmd,
3012 		"SCSI host state: %d  SCSI host busy: %d  FW outstanding: %d\n",
3013 		scmd->device->host->shost_state,
3014 		scsi_host_busy(scmd->device->host),
3015 		atomic_read(&instance->fw_outstanding));
3016 	/*
3017 	 * First wait for all commands to complete
3018 	 */
3019 	if (instance->adapter_type == MFI_SERIES) {
3020 		ret = megasas_generic_reset(scmd);
3021 	} else {
3022 		megasas_dump_fusion_io(scmd);
3023 		ret = megasas_reset_fusion(scmd->device->host,
3024 				SCSIIO_TIMEOUT_OCR);
3025 	}
3026 
3027 	return ret;
3028 }
3029 
3030 /**
3031  * megasas_task_abort - Issues task abort request to firmware
3032  *			(supported only for fusion adapters)
3033  * @scmd:		SCSI command pointer
3034  */
3035 static int megasas_task_abort(struct scsi_cmnd *scmd)
3036 {
3037 	int ret;
3038 	struct megasas_instance *instance;
3039 
3040 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3041 
3042 	if (instance->adapter_type != MFI_SERIES)
3043 		ret = megasas_task_abort_fusion(scmd);
3044 	else {
3045 		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
3046 		ret = FAILED;
3047 	}
3048 
3049 	return ret;
3050 }
3051 
3052 /**
3053  * megasas_reset_target:  Issues target reset request to firmware
3054  *                        (supported only for fusion adapters)
3055  * @scmd:                 SCSI command pointer
3056  */
3057 static int megasas_reset_target(struct scsi_cmnd *scmd)
3058 {
3059 	int ret;
3060 	struct megasas_instance *instance;
3061 
3062 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3063 
3064 	if (instance->adapter_type != MFI_SERIES)
3065 		ret = megasas_reset_target_fusion(scmd);
3066 	else {
3067 		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
3068 		ret = FAILED;
3069 	}
3070 
3071 	return ret;
3072 }
3073 
3074 /**
3075  * megasas_bios_param - Returns disk geometry for a disk
3076  * @sdev:		device handle
3077  * @bdev:		block device
3078  * @capacity:		drive capacity
3079  * @geom:		geometry parameters
3080  */
3081 static int
3082 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
3083 		 sector_t capacity, int geom[])
3084 {
3085 	int heads;
3086 	int sectors;
3087 	sector_t cylinders;
3088 	unsigned long tmp;
3089 
3090 	/* Default heads (64) & sectors (32) */
3091 	heads = 64;
3092 	sectors = 32;
3093 
3094 	tmp = heads * sectors;
3095 	cylinders = capacity;
3096 
3097 	sector_div(cylinders, tmp);
3098 
3099 	/*
3100 	 * Handle extended translation size for logical drives > 1Gb
3101 	 */
3102 
3103 	if (capacity >= 0x200000) {
3104 		heads = 255;
3105 		sectors = 63;
3106 		tmp = heads*sectors;
3107 		cylinders = capacity;
3108 		sector_div(cylinders, tmp);
3109 	}
3110 
3111 	geom[0] = heads;
3112 	geom[1] = sectors;
3113 	geom[2] = cylinders;
3114 
3115 	return 0;
3116 }
3117 
3118 static void megasas_aen_polling(struct work_struct *work);
3119 
3120 /**
3121  * megasas_service_aen -	Processes an event notification
3122  * @instance:			Adapter soft state
3123  * @cmd:			AEN command completed by the ISR
3124  *
3125  * For AEN, driver sends a command down to FW that is held by the FW till an
3126  * event occurs. When an event of interest occurs, FW completes the command
3127  * that it was previously holding.
3128  *
3129  * This routines sends SIGIO signal to processes that have registered with the
3130  * driver for AEN.
3131  */
3132 static void
3133 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
3134 {
3135 	unsigned long flags;
3136 
3137 	/*
3138 	 * Don't signal app if it is just an aborted previously registered aen
3139 	 */
3140 	if ((!cmd->abort_aen) && (instance->unload == 0)) {
3141 		spin_lock_irqsave(&poll_aen_lock, flags);
3142 		megasas_poll_wait_aen = 1;
3143 		spin_unlock_irqrestore(&poll_aen_lock, flags);
3144 		wake_up(&megasas_poll_wait);
3145 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
3146 	}
3147 	else
3148 		cmd->abort_aen = 0;
3149 
3150 	instance->aen_cmd = NULL;
3151 
3152 	megasas_return_cmd(instance, cmd);
3153 
3154 	if ((instance->unload == 0) &&
3155 		((instance->issuepend_done == 1))) {
3156 		struct megasas_aen_event *ev;
3157 
3158 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3159 		if (!ev) {
3160 			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3161 		} else {
3162 			ev->instance = instance;
3163 			instance->ev = ev;
3164 			INIT_DELAYED_WORK(&ev->hotplug_work,
3165 					  megasas_aen_polling);
3166 			schedule_delayed_work(&ev->hotplug_work, 0);
3167 		}
3168 	}
3169 }
3170 
3171 static ssize_t
3172 fw_crash_buffer_store(struct device *cdev,
3173 	struct device_attribute *attr, const char *buf, size_t count)
3174 {
3175 	struct Scsi_Host *shost = class_to_shost(cdev);
3176 	struct megasas_instance *instance =
3177 		(struct megasas_instance *) shost->hostdata;
3178 	int val = 0;
3179 	unsigned long flags;
3180 
3181 	if (kstrtoint(buf, 0, &val) != 0)
3182 		return -EINVAL;
3183 
3184 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3185 	instance->fw_crash_buffer_offset = val;
3186 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3187 	return strlen(buf);
3188 }
3189 
3190 static ssize_t
3191 fw_crash_buffer_show(struct device *cdev,
3192 	struct device_attribute *attr, char *buf)
3193 {
3194 	struct Scsi_Host *shost = class_to_shost(cdev);
3195 	struct megasas_instance *instance =
3196 		(struct megasas_instance *) shost->hostdata;
3197 	u32 size;
3198 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3199 	unsigned long chunk_left_bytes;
3200 	unsigned long src_addr;
3201 	unsigned long flags;
3202 	u32 buff_offset;
3203 
3204 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3205 	buff_offset = instance->fw_crash_buffer_offset;
3206 	if (!instance->crash_dump_buf &&
3207 		!((instance->fw_crash_state == AVAILABLE) ||
3208 		(instance->fw_crash_state == COPYING))) {
3209 		dev_err(&instance->pdev->dev,
3210 			"Firmware crash dump is not available\n");
3211 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3212 		return -EINVAL;
3213 	}
3214 
3215 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3216 		dev_err(&instance->pdev->dev,
3217 			"Firmware crash dump offset is out of range\n");
3218 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3219 		return 0;
3220 	}
3221 
3222 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3223 	chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
3224 	size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
3225 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3226 
3227 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3228 		(buff_offset % dmachunk);
3229 	memcpy(buf, (void *)src_addr, size);
3230 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3231 
3232 	return size;
3233 }
3234 
3235 static ssize_t
3236 fw_crash_buffer_size_show(struct device *cdev,
3237 	struct device_attribute *attr, char *buf)
3238 {
3239 	struct Scsi_Host *shost = class_to_shost(cdev);
3240 	struct megasas_instance *instance =
3241 		(struct megasas_instance *) shost->hostdata;
3242 
3243 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3244 		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3245 }
3246 
3247 static ssize_t
3248 fw_crash_state_store(struct device *cdev,
3249 	struct device_attribute *attr, const char *buf, size_t count)
3250 {
3251 	struct Scsi_Host *shost = class_to_shost(cdev);
3252 	struct megasas_instance *instance =
3253 		(struct megasas_instance *) shost->hostdata;
3254 	int val = 0;
3255 	unsigned long flags;
3256 
3257 	if (kstrtoint(buf, 0, &val) != 0)
3258 		return -EINVAL;
3259 
3260 	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3261 		dev_err(&instance->pdev->dev, "application updates invalid "
3262 			"firmware crash state\n");
3263 		return -EINVAL;
3264 	}
3265 
3266 	instance->fw_crash_state = val;
3267 
3268 	if ((val == COPIED) || (val == COPY_ERROR)) {
3269 		spin_lock_irqsave(&instance->crashdump_lock, flags);
3270 		megasas_free_host_crash_buffer(instance);
3271 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3272 		if (val == COPY_ERROR)
3273 			dev_info(&instance->pdev->dev, "application failed to "
3274 				"copy Firmware crash dump\n");
3275 		else
3276 			dev_info(&instance->pdev->dev, "Firmware crash dump "
3277 				"copied successfully\n");
3278 	}
3279 	return strlen(buf);
3280 }
3281 
3282 static ssize_t
3283 fw_crash_state_show(struct device *cdev,
3284 	struct device_attribute *attr, char *buf)
3285 {
3286 	struct Scsi_Host *shost = class_to_shost(cdev);
3287 	struct megasas_instance *instance =
3288 		(struct megasas_instance *) shost->hostdata;
3289 
3290 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3291 }
3292 
3293 static ssize_t
3294 page_size_show(struct device *cdev,
3295 	struct device_attribute *attr, char *buf)
3296 {
3297 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3298 }
3299 
3300 static ssize_t
3301 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3302 	char *buf)
3303 {
3304 	struct Scsi_Host *shost = class_to_shost(cdev);
3305 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3306 
3307 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3308 }
3309 
3310 static ssize_t
3311 fw_cmds_outstanding_show(struct device *cdev,
3312 				 struct device_attribute *attr, char *buf)
3313 {
3314 	struct Scsi_Host *shost = class_to_shost(cdev);
3315 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3316 
3317 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3318 }
3319 
3320 static ssize_t
3321 enable_sdev_max_qd_show(struct device *cdev,
3322 	struct device_attribute *attr, char *buf)
3323 {
3324 	struct Scsi_Host *shost = class_to_shost(cdev);
3325 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3326 
3327 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
3328 }
3329 
3330 static ssize_t
3331 enable_sdev_max_qd_store(struct device *cdev,
3332 	struct device_attribute *attr, const char *buf, size_t count)
3333 {
3334 	struct Scsi_Host *shost = class_to_shost(cdev);
3335 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3336 	u32 val = 0;
3337 	bool is_target_prop;
3338 	int ret_target_prop = DCMD_FAILED;
3339 	struct scsi_device *sdev;
3340 
3341 	if (kstrtou32(buf, 0, &val) != 0) {
3342 		pr_err("megasas: could not set enable_sdev_max_qd\n");
3343 		return -EINVAL;
3344 	}
3345 
3346 	mutex_lock(&instance->reset_mutex);
3347 	if (val)
3348 		instance->enable_sdev_max_qd = true;
3349 	else
3350 		instance->enable_sdev_max_qd = false;
3351 
3352 	shost_for_each_device(sdev, shost) {
3353 		ret_target_prop = megasas_get_target_prop(instance, sdev);
3354 		is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
3355 		megasas_set_fw_assisted_qd(sdev, is_target_prop);
3356 	}
3357 	mutex_unlock(&instance->reset_mutex);
3358 
3359 	return strlen(buf);
3360 }
3361 
3362 static ssize_t
3363 dump_system_regs_show(struct device *cdev,
3364 			       struct device_attribute *attr, char *buf)
3365 {
3366 	struct Scsi_Host *shost = class_to_shost(cdev);
3367 	struct megasas_instance *instance =
3368 			(struct megasas_instance *)shost->hostdata;
3369 
3370 	return megasas_dump_sys_regs(instance->reg_set, buf);
3371 }
3372 
3373 static ssize_t
3374 raid_map_id_show(struct device *cdev, struct device_attribute *attr,
3375 			  char *buf)
3376 {
3377 	struct Scsi_Host *shost = class_to_shost(cdev);
3378 	struct megasas_instance *instance =
3379 			(struct megasas_instance *)shost->hostdata;
3380 
3381 	return snprintf(buf, PAGE_SIZE, "%ld\n",
3382 			(unsigned long)instance->map_id);
3383 }
3384 
3385 static DEVICE_ATTR_RW(fw_crash_buffer);
3386 static DEVICE_ATTR_RO(fw_crash_buffer_size);
3387 static DEVICE_ATTR_RW(fw_crash_state);
3388 static DEVICE_ATTR_RO(page_size);
3389 static DEVICE_ATTR_RO(ldio_outstanding);
3390 static DEVICE_ATTR_RO(fw_cmds_outstanding);
3391 static DEVICE_ATTR_RW(enable_sdev_max_qd);
3392 static DEVICE_ATTR_RO(dump_system_regs);
3393 static DEVICE_ATTR_RO(raid_map_id);
3394 
3395 static struct device_attribute *megaraid_host_attrs[] = {
3396 	&dev_attr_fw_crash_buffer_size,
3397 	&dev_attr_fw_crash_buffer,
3398 	&dev_attr_fw_crash_state,
3399 	&dev_attr_page_size,
3400 	&dev_attr_ldio_outstanding,
3401 	&dev_attr_fw_cmds_outstanding,
3402 	&dev_attr_enable_sdev_max_qd,
3403 	&dev_attr_dump_system_regs,
3404 	&dev_attr_raid_map_id,
3405 	NULL,
3406 };
3407 
3408 /*
3409  * Scsi host template for megaraid_sas driver
3410  */
3411 static struct scsi_host_template megasas_template = {
3412 
3413 	.module = THIS_MODULE,
3414 	.name = "Avago SAS based MegaRAID driver",
3415 	.proc_name = "megaraid_sas",
3416 	.slave_configure = megasas_slave_configure,
3417 	.slave_alloc = megasas_slave_alloc,
3418 	.slave_destroy = megasas_slave_destroy,
3419 	.queuecommand = megasas_queue_command,
3420 	.eh_target_reset_handler = megasas_reset_target,
3421 	.eh_abort_handler = megasas_task_abort,
3422 	.eh_host_reset_handler = megasas_reset_bus_host,
3423 	.eh_timed_out = megasas_reset_timer,
3424 	.shost_attrs = megaraid_host_attrs,
3425 	.bios_param = megasas_bios_param,
3426 	.change_queue_depth = scsi_change_queue_depth,
3427 	.max_segment_size = 0xffffffff,
3428 };
3429 
3430 /**
3431  * megasas_complete_int_cmd -	Completes an internal command
3432  * @instance:			Adapter soft state
3433  * @cmd:			Command to be completed
3434  *
3435  * The megasas_issue_blocked_cmd() function waits for a command to complete
3436  * after it issues a command. This function wakes up that waiting routine by
3437  * calling wake_up() on the wait queue.
3438  */
3439 static void
3440 megasas_complete_int_cmd(struct megasas_instance *instance,
3441 			 struct megasas_cmd *cmd)
3442 {
3443 	if (cmd->cmd_status_drv == DCMD_INIT)
3444 		cmd->cmd_status_drv =
3445 		(cmd->frame->io.cmd_status == MFI_STAT_OK) ?
3446 		DCMD_SUCCESS : DCMD_FAILED;
3447 
3448 	wake_up(&instance->int_cmd_wait_q);
3449 }
3450 
3451 /**
3452  * megasas_complete_abort -	Completes aborting a command
3453  * @instance:			Adapter soft state
3454  * @cmd:			Cmd that was issued to abort another cmd
3455  *
3456  * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3457  * after it issues an abort on a previously issued command. This function
3458  * wakes up all functions waiting on the same wait queue.
3459  */
3460 static void
3461 megasas_complete_abort(struct megasas_instance *instance,
3462 		       struct megasas_cmd *cmd)
3463 {
3464 	if (cmd->sync_cmd) {
3465 		cmd->sync_cmd = 0;
3466 		cmd->cmd_status_drv = DCMD_SUCCESS;
3467 		wake_up(&instance->abort_cmd_wait_q);
3468 	}
3469 }
3470 
3471 /**
3472  * megasas_complete_cmd -	Completes a command
3473  * @instance:			Adapter soft state
3474  * @cmd:			Command to be completed
3475  * @alt_status:			If non-zero, use this value as status to
3476  *				SCSI mid-layer instead of the value returned
3477  *				by the FW. This should be used if caller wants
3478  *				an alternate status (as in the case of aborted
3479  *				commands)
3480  */
3481 void
3482 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3483 		     u8 alt_status)
3484 {
3485 	int exception = 0;
3486 	struct megasas_header *hdr = &cmd->frame->hdr;
3487 	unsigned long flags;
3488 	struct fusion_context *fusion = instance->ctrl_context;
3489 	u32 opcode, status;
3490 
3491 	/* flag for the retry reset */
3492 	cmd->retry_for_fw_reset = 0;
3493 
3494 	if (cmd->scmd)
3495 		cmd->scmd->SCp.ptr = NULL;
3496 
3497 	switch (hdr->cmd) {
3498 	case MFI_CMD_INVALID:
3499 		/* Some older 1068 controller FW may keep a pended
3500 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3501 		   when booting the kdump kernel.  Ignore this command to
3502 		   prevent a kernel panic on shutdown of the kdump kernel. */
3503 		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3504 		       "completed\n");
3505 		dev_warn(&instance->pdev->dev, "If you have a controller "
3506 		       "other than PERC5, please upgrade your firmware\n");
3507 		break;
3508 	case MFI_CMD_PD_SCSI_IO:
3509 	case MFI_CMD_LD_SCSI_IO:
3510 
3511 		/*
3512 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3513 		 * issued either through an IO path or an IOCTL path. If it
3514 		 * was via IOCTL, we will send it to internal completion.
3515 		 */
3516 		if (cmd->sync_cmd) {
3517 			cmd->sync_cmd = 0;
3518 			megasas_complete_int_cmd(instance, cmd);
3519 			break;
3520 		}
3521 		/* fall through */
3522 
3523 	case MFI_CMD_LD_READ:
3524 	case MFI_CMD_LD_WRITE:
3525 
3526 		if (alt_status) {
3527 			cmd->scmd->result = alt_status << 16;
3528 			exception = 1;
3529 		}
3530 
3531 		if (exception) {
3532 
3533 			atomic_dec(&instance->fw_outstanding);
3534 
3535 			scsi_dma_unmap(cmd->scmd);
3536 			cmd->scmd->scsi_done(cmd->scmd);
3537 			megasas_return_cmd(instance, cmd);
3538 
3539 			break;
3540 		}
3541 
3542 		switch (hdr->cmd_status) {
3543 
3544 		case MFI_STAT_OK:
3545 			cmd->scmd->result = DID_OK << 16;
3546 			break;
3547 
3548 		case MFI_STAT_SCSI_IO_FAILED:
3549 		case MFI_STAT_LD_INIT_IN_PROGRESS:
3550 			cmd->scmd->result =
3551 			    (DID_ERROR << 16) | hdr->scsi_status;
3552 			break;
3553 
3554 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3555 
3556 			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3557 
3558 			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3559 				memset(cmd->scmd->sense_buffer, 0,
3560 				       SCSI_SENSE_BUFFERSIZE);
3561 				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3562 				       hdr->sense_len);
3563 
3564 				cmd->scmd->result |= DRIVER_SENSE << 24;
3565 			}
3566 
3567 			break;
3568 
3569 		case MFI_STAT_LD_OFFLINE:
3570 		case MFI_STAT_DEVICE_NOT_FOUND:
3571 			cmd->scmd->result = DID_BAD_TARGET << 16;
3572 			break;
3573 
3574 		default:
3575 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3576 			       hdr->cmd_status);
3577 			cmd->scmd->result = DID_ERROR << 16;
3578 			break;
3579 		}
3580 
3581 		atomic_dec(&instance->fw_outstanding);
3582 
3583 		scsi_dma_unmap(cmd->scmd);
3584 		cmd->scmd->scsi_done(cmd->scmd);
3585 		megasas_return_cmd(instance, cmd);
3586 
3587 		break;
3588 
3589 	case MFI_CMD_SMP:
3590 	case MFI_CMD_STP:
3591 	case MFI_CMD_NVME:
3592 	case MFI_CMD_TOOLBOX:
3593 		megasas_complete_int_cmd(instance, cmd);
3594 		break;
3595 
3596 	case MFI_CMD_DCMD:
3597 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3598 		/* Check for LD map update */
3599 		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3600 			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3601 			fusion->fast_path_io = 0;
3602 			spin_lock_irqsave(instance->host->host_lock, flags);
3603 			status = cmd->frame->hdr.cmd_status;
3604 			instance->map_update_cmd = NULL;
3605 			if (status != MFI_STAT_OK) {
3606 				if (status != MFI_STAT_NOT_FOUND)
3607 					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3608 					       cmd->frame->hdr.cmd_status);
3609 				else {
3610 					megasas_return_cmd(instance, cmd);
3611 					spin_unlock_irqrestore(
3612 						instance->host->host_lock,
3613 						flags);
3614 					break;
3615 				}
3616 			}
3617 
3618 			megasas_return_cmd(instance, cmd);
3619 
3620 			/*
3621 			 * Set fast path IO to ZERO.
3622 			 * Validate Map will set proper value.
3623 			 * Meanwhile all IOs will go as LD IO.
3624 			 */
3625 			if (status == MFI_STAT_OK &&
3626 			    (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3627 				instance->map_id++;
3628 				fusion->fast_path_io = 1;
3629 			} else {
3630 				fusion->fast_path_io = 0;
3631 			}
3632 
3633 			megasas_sync_map_info(instance);
3634 			spin_unlock_irqrestore(instance->host->host_lock,
3635 					       flags);
3636 			break;
3637 		}
3638 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3639 		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3640 			spin_lock_irqsave(&poll_aen_lock, flags);
3641 			megasas_poll_wait_aen = 0;
3642 			spin_unlock_irqrestore(&poll_aen_lock, flags);
3643 		}
3644 
3645 		/* FW has an updated PD sequence */
3646 		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3647 			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3648 
3649 			spin_lock_irqsave(instance->host->host_lock, flags);
3650 			status = cmd->frame->hdr.cmd_status;
3651 			instance->jbod_seq_cmd = NULL;
3652 			megasas_return_cmd(instance, cmd);
3653 
3654 			if (status == MFI_STAT_OK) {
3655 				instance->pd_seq_map_id++;
3656 				/* Re-register a pd sync seq num cmd */
3657 				if (megasas_sync_pd_seq_num(instance, true))
3658 					instance->use_seqnum_jbod_fp = false;
3659 			} else
3660 				instance->use_seqnum_jbod_fp = false;
3661 
3662 			spin_unlock_irqrestore(instance->host->host_lock, flags);
3663 			break;
3664 		}
3665 
3666 		/*
3667 		 * See if got an event notification
3668 		 */
3669 		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3670 			megasas_service_aen(instance, cmd);
3671 		else
3672 			megasas_complete_int_cmd(instance, cmd);
3673 
3674 		break;
3675 
3676 	case MFI_CMD_ABORT:
3677 		/*
3678 		 * Cmd issued to abort another cmd returned
3679 		 */
3680 		megasas_complete_abort(instance, cmd);
3681 		break;
3682 
3683 	default:
3684 		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3685 		       hdr->cmd);
3686 		megasas_complete_int_cmd(instance, cmd);
3687 		break;
3688 	}
3689 }
3690 
3691 /**
3692  * megasas_issue_pending_cmds_again -	issue all pending cmds
3693  *					in FW again because of the fw reset
3694  * @instance:				Adapter soft state
3695  */
3696 static inline void
3697 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3698 {
3699 	struct megasas_cmd *cmd;
3700 	struct list_head clist_local;
3701 	union megasas_evt_class_locale class_locale;
3702 	unsigned long flags;
3703 	u32 seq_num;
3704 
3705 	INIT_LIST_HEAD(&clist_local);
3706 	spin_lock_irqsave(&instance->hba_lock, flags);
3707 	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3708 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3709 
3710 	while (!list_empty(&clist_local)) {
3711 		cmd = list_entry((&clist_local)->next,
3712 					struct megasas_cmd, list);
3713 		list_del_init(&cmd->list);
3714 
3715 		if (cmd->sync_cmd || cmd->scmd) {
3716 			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3717 				"detected to be pending while HBA reset\n",
3718 					cmd, cmd->scmd, cmd->sync_cmd);
3719 
3720 			cmd->retry_for_fw_reset++;
3721 
3722 			if (cmd->retry_for_fw_reset == 3) {
3723 				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3724 					"was tried multiple times during reset."
3725 					"Shutting down the HBA\n",
3726 					cmd, cmd->scmd, cmd->sync_cmd);
3727 				instance->instancet->disable_intr(instance);
3728 				atomic_set(&instance->fw_reset_no_pci_access, 1);
3729 				megaraid_sas_kill_hba(instance);
3730 				return;
3731 			}
3732 		}
3733 
3734 		if (cmd->sync_cmd == 1) {
3735 			if (cmd->scmd) {
3736 				dev_notice(&instance->pdev->dev, "unexpected"
3737 					"cmd attached to internal command!\n");
3738 			}
3739 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3740 						"on the internal reset queue,"
3741 						"issue it again.\n", cmd);
3742 			cmd->cmd_status_drv = DCMD_INIT;
3743 			instance->instancet->fire_cmd(instance,
3744 							cmd->frame_phys_addr,
3745 							0, instance->reg_set);
3746 		} else if (cmd->scmd) {
3747 			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3748 			"detected on the internal queue, issue again.\n",
3749 			cmd, cmd->scmd->cmnd[0]);
3750 
3751 			atomic_inc(&instance->fw_outstanding);
3752 			instance->instancet->fire_cmd(instance,
3753 					cmd->frame_phys_addr,
3754 					cmd->frame_count-1, instance->reg_set);
3755 		} else {
3756 			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3757 				"internal reset defer list while re-issue!!\n",
3758 				cmd);
3759 		}
3760 	}
3761 
3762 	if (instance->aen_cmd) {
3763 		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3764 		megasas_return_cmd(instance, instance->aen_cmd);
3765 
3766 		instance->aen_cmd = NULL;
3767 	}
3768 
3769 	/*
3770 	 * Initiate AEN (Asynchronous Event Notification)
3771 	 */
3772 	seq_num = instance->last_seq_num;
3773 	class_locale.members.reserved = 0;
3774 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3775 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3776 
3777 	megasas_register_aen(instance, seq_num, class_locale.word);
3778 }
3779 
3780 /**
3781  * Move the internal reset pending commands to a deferred queue.
3782  *
3783  * We move the commands pending at internal reset time to a
3784  * pending queue. This queue would be flushed after successful
3785  * completion of the internal reset sequence. if the internal reset
3786  * did not complete in time, the kernel reset handler would flush
3787  * these commands.
3788  **/
3789 static void
3790 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3791 {
3792 	struct megasas_cmd *cmd;
3793 	int i;
3794 	u16 max_cmd = instance->max_fw_cmds;
3795 	u32 defer_index;
3796 	unsigned long flags;
3797 
3798 	defer_index = 0;
3799 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3800 	for (i = 0; i < max_cmd; i++) {
3801 		cmd = instance->cmd_list[i];
3802 		if (cmd->sync_cmd == 1 || cmd->scmd) {
3803 			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3804 					"on the defer queue as internal\n",
3805 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3806 
3807 			if (!list_empty(&cmd->list)) {
3808 				dev_notice(&instance->pdev->dev, "ERROR while"
3809 					" moving this cmd:%p, %d %p, it was"
3810 					"discovered on some list?\n",
3811 					cmd, cmd->sync_cmd, cmd->scmd);
3812 
3813 				list_del_init(&cmd->list);
3814 			}
3815 			defer_index++;
3816 			list_add_tail(&cmd->list,
3817 				&instance->internal_reset_pending_q);
3818 		}
3819 	}
3820 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3821 }
3822 
3823 
3824 static void
3825 process_fw_state_change_wq(struct work_struct *work)
3826 {
3827 	struct megasas_instance *instance =
3828 		container_of(work, struct megasas_instance, work_init);
3829 	u32 wait;
3830 	unsigned long flags;
3831 
3832     if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3833 		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3834 				atomic_read(&instance->adprecovery));
3835 		return ;
3836 	}
3837 
3838 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3839 		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3840 					"state, restarting it...\n");
3841 
3842 		instance->instancet->disable_intr(instance);
3843 		atomic_set(&instance->fw_outstanding, 0);
3844 
3845 		atomic_set(&instance->fw_reset_no_pci_access, 1);
3846 		instance->instancet->adp_reset(instance, instance->reg_set);
3847 		atomic_set(&instance->fw_reset_no_pci_access, 0);
3848 
3849 		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3850 					"initiating next stage...\n");
3851 
3852 		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3853 					"state 2 starting...\n");
3854 
3855 		/* waiting for about 20 second before start the second init */
3856 		for (wait = 0; wait < 30; wait++) {
3857 			msleep(1000);
3858 		}
3859 
3860 		if (megasas_transition_to_ready(instance, 1)) {
3861 			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3862 
3863 			atomic_set(&instance->fw_reset_no_pci_access, 1);
3864 			megaraid_sas_kill_hba(instance);
3865 			return ;
3866 		}
3867 
3868 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3869 			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3870 			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3871 			) {
3872 			*instance->consumer = *instance->producer;
3873 		} else {
3874 			*instance->consumer = 0;
3875 			*instance->producer = 0;
3876 		}
3877 
3878 		megasas_issue_init_mfi(instance);
3879 
3880 		spin_lock_irqsave(&instance->hba_lock, flags);
3881 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3882 		spin_unlock_irqrestore(&instance->hba_lock, flags);
3883 		instance->instancet->enable_intr(instance);
3884 
3885 		megasas_issue_pending_cmds_again(instance);
3886 		instance->issuepend_done = 1;
3887 	}
3888 }
3889 
3890 /**
3891  * megasas_deplete_reply_queue -	Processes all completed commands
3892  * @instance:				Adapter soft state
3893  * @alt_status:				Alternate status to be returned to
3894  *					SCSI mid-layer instead of the status
3895  *					returned by the FW
3896  * Note: this must be called with hba lock held
3897  */
3898 static int
3899 megasas_deplete_reply_queue(struct megasas_instance *instance,
3900 					u8 alt_status)
3901 {
3902 	u32 mfiStatus;
3903 	u32 fw_state;
3904 
3905 	if ((mfiStatus = instance->instancet->check_reset(instance,
3906 					instance->reg_set)) == 1) {
3907 		return IRQ_HANDLED;
3908 	}
3909 
3910 	mfiStatus = instance->instancet->clear_intr(instance);
3911 	if (mfiStatus == 0) {
3912 		/* Hardware may not set outbound_intr_status in MSI-X mode */
3913 		if (!instance->msix_vectors)
3914 			return IRQ_NONE;
3915 	}
3916 
3917 	instance->mfiStatus = mfiStatus;
3918 
3919 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3920 		fw_state = instance->instancet->read_fw_status_reg(
3921 				instance) & MFI_STATE_MASK;
3922 
3923 		if (fw_state != MFI_STATE_FAULT) {
3924 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
3925 						fw_state);
3926 		}
3927 
3928 		if ((fw_state == MFI_STATE_FAULT) &&
3929 				(instance->disableOnlineCtrlReset == 0)) {
3930 			dev_notice(&instance->pdev->dev, "wait adp restart\n");
3931 
3932 			if ((instance->pdev->device ==
3933 					PCI_DEVICE_ID_LSI_SAS1064R) ||
3934 				(instance->pdev->device ==
3935 					PCI_DEVICE_ID_DELL_PERC5) ||
3936 				(instance->pdev->device ==
3937 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3938 
3939 				*instance->consumer =
3940 					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3941 			}
3942 
3943 
3944 			instance->instancet->disable_intr(instance);
3945 			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3946 			instance->issuepend_done = 0;
3947 
3948 			atomic_set(&instance->fw_outstanding, 0);
3949 			megasas_internal_reset_defer_cmds(instance);
3950 
3951 			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3952 					fw_state, atomic_read(&instance->adprecovery));
3953 
3954 			schedule_work(&instance->work_init);
3955 			return IRQ_HANDLED;
3956 
3957 		} else {
3958 			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3959 				fw_state, instance->disableOnlineCtrlReset);
3960 		}
3961 	}
3962 
3963 	tasklet_schedule(&instance->isr_tasklet);
3964 	return IRQ_HANDLED;
3965 }
3966 /**
3967  * megasas_isr - isr entry point
3968  */
3969 static irqreturn_t megasas_isr(int irq, void *devp)
3970 {
3971 	struct megasas_irq_context *irq_context = devp;
3972 	struct megasas_instance *instance = irq_context->instance;
3973 	unsigned long flags;
3974 	irqreturn_t rc;
3975 
3976 	if (atomic_read(&instance->fw_reset_no_pci_access))
3977 		return IRQ_HANDLED;
3978 
3979 	spin_lock_irqsave(&instance->hba_lock, flags);
3980 	rc = megasas_deplete_reply_queue(instance, DID_OK);
3981 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3982 
3983 	return rc;
3984 }
3985 
3986 /**
3987  * megasas_transition_to_ready -	Move the FW to READY state
3988  * @instance:				Adapter soft state
3989  *
3990  * During the initialization, FW passes can potentially be in any one of
3991  * several possible states. If the FW in operational, waiting-for-handshake
3992  * states, driver must take steps to bring it to ready state. Otherwise, it
3993  * has to wait for the ready state.
3994  */
3995 int
3996 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3997 {
3998 	int i;
3999 	u8 max_wait;
4000 	u32 fw_state;
4001 	u32 abs_state, curr_abs_state;
4002 
4003 	abs_state = instance->instancet->read_fw_status_reg(instance);
4004 	fw_state = abs_state & MFI_STATE_MASK;
4005 
4006 	if (fw_state != MFI_STATE_READY)
4007 		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
4008 		       " state\n");
4009 
4010 	while (fw_state != MFI_STATE_READY) {
4011 
4012 		switch (fw_state) {
4013 
4014 		case MFI_STATE_FAULT:
4015 			dev_printk(KERN_ERR, &instance->pdev->dev,
4016 				   "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
4017 				   abs_state & MFI_STATE_FAULT_CODE,
4018 				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4019 			if (ocr) {
4020 				max_wait = MEGASAS_RESET_WAIT_TIME;
4021 				break;
4022 			} else {
4023 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4024 				megasas_dump_reg_set(instance->reg_set);
4025 				return -ENODEV;
4026 			}
4027 
4028 		case MFI_STATE_WAIT_HANDSHAKE:
4029 			/*
4030 			 * Set the CLR bit in inbound doorbell
4031 			 */
4032 			if ((instance->pdev->device ==
4033 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4034 				(instance->pdev->device ==
4035 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4036 				(instance->adapter_type != MFI_SERIES))
4037 				writel(
4038 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4039 				  &instance->reg_set->doorbell);
4040 			else
4041 				writel(
4042 				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4043 					&instance->reg_set->inbound_doorbell);
4044 
4045 			max_wait = MEGASAS_RESET_WAIT_TIME;
4046 			break;
4047 
4048 		case MFI_STATE_BOOT_MESSAGE_PENDING:
4049 			if ((instance->pdev->device ==
4050 			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4051 				(instance->pdev->device ==
4052 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4053 				(instance->adapter_type != MFI_SERIES))
4054 				writel(MFI_INIT_HOTPLUG,
4055 				       &instance->reg_set->doorbell);
4056 			else
4057 				writel(MFI_INIT_HOTPLUG,
4058 					&instance->reg_set->inbound_doorbell);
4059 
4060 			max_wait = MEGASAS_RESET_WAIT_TIME;
4061 			break;
4062 
4063 		case MFI_STATE_OPERATIONAL:
4064 			/*
4065 			 * Bring it to READY state; assuming max wait 10 secs
4066 			 */
4067 			instance->instancet->disable_intr(instance);
4068 			if ((instance->pdev->device ==
4069 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4070 				(instance->pdev->device ==
4071 				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
4072 				(instance->adapter_type != MFI_SERIES)) {
4073 				writel(MFI_RESET_FLAGS,
4074 					&instance->reg_set->doorbell);
4075 
4076 				if (instance->adapter_type != MFI_SERIES) {
4077 					for (i = 0; i < (10 * 1000); i += 20) {
4078 						if (megasas_readl(
4079 							    instance,
4080 							    &instance->
4081 							    reg_set->
4082 							    doorbell) & 1)
4083 							msleep(20);
4084 						else
4085 							break;
4086 					}
4087 				}
4088 			} else
4089 				writel(MFI_RESET_FLAGS,
4090 					&instance->reg_set->inbound_doorbell);
4091 
4092 			max_wait = MEGASAS_RESET_WAIT_TIME;
4093 			break;
4094 
4095 		case MFI_STATE_UNDEFINED:
4096 			/*
4097 			 * This state should not last for more than 2 seconds
4098 			 */
4099 			max_wait = MEGASAS_RESET_WAIT_TIME;
4100 			break;
4101 
4102 		case MFI_STATE_BB_INIT:
4103 			max_wait = MEGASAS_RESET_WAIT_TIME;
4104 			break;
4105 
4106 		case MFI_STATE_FW_INIT:
4107 			max_wait = MEGASAS_RESET_WAIT_TIME;
4108 			break;
4109 
4110 		case MFI_STATE_FW_INIT_2:
4111 			max_wait = MEGASAS_RESET_WAIT_TIME;
4112 			break;
4113 
4114 		case MFI_STATE_DEVICE_SCAN:
4115 			max_wait = MEGASAS_RESET_WAIT_TIME;
4116 			break;
4117 
4118 		case MFI_STATE_FLUSH_CACHE:
4119 			max_wait = MEGASAS_RESET_WAIT_TIME;
4120 			break;
4121 
4122 		default:
4123 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
4124 			       fw_state);
4125 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4126 			megasas_dump_reg_set(instance->reg_set);
4127 			return -ENODEV;
4128 		}
4129 
4130 		/*
4131 		 * The cur_state should not last for more than max_wait secs
4132 		 */
4133 		for (i = 0; i < max_wait * 50; i++) {
4134 			curr_abs_state = instance->instancet->
4135 				read_fw_status_reg(instance);
4136 
4137 			if (abs_state == curr_abs_state) {
4138 				msleep(20);
4139 			} else
4140 				break;
4141 		}
4142 
4143 		/*
4144 		 * Return error if fw_state hasn't changed after max_wait
4145 		 */
4146 		if (curr_abs_state == abs_state) {
4147 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
4148 			       "in %d secs\n", fw_state, max_wait);
4149 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4150 			megasas_dump_reg_set(instance->reg_set);
4151 			return -ENODEV;
4152 		}
4153 
4154 		abs_state = curr_abs_state;
4155 		fw_state = curr_abs_state & MFI_STATE_MASK;
4156 	}
4157 	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
4158 
4159 	return 0;
4160 }
4161 
4162 /**
4163  * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
4164  * @instance:				Adapter soft state
4165  */
4166 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
4167 {
4168 	int i;
4169 	u16 max_cmd = instance->max_mfi_cmds;
4170 	struct megasas_cmd *cmd;
4171 
4172 	if (!instance->frame_dma_pool)
4173 		return;
4174 
4175 	/*
4176 	 * Return all frames to pool
4177 	 */
4178 	for (i = 0; i < max_cmd; i++) {
4179 
4180 		cmd = instance->cmd_list[i];
4181 
4182 		if (cmd->frame)
4183 			dma_pool_free(instance->frame_dma_pool, cmd->frame,
4184 				      cmd->frame_phys_addr);
4185 
4186 		if (cmd->sense)
4187 			dma_pool_free(instance->sense_dma_pool, cmd->sense,
4188 				      cmd->sense_phys_addr);
4189 	}
4190 
4191 	/*
4192 	 * Now destroy the pool itself
4193 	 */
4194 	dma_pool_destroy(instance->frame_dma_pool);
4195 	dma_pool_destroy(instance->sense_dma_pool);
4196 
4197 	instance->frame_dma_pool = NULL;
4198 	instance->sense_dma_pool = NULL;
4199 }
4200 
4201 /**
4202  * megasas_create_frame_pool -	Creates DMA pool for cmd frames
4203  * @instance:			Adapter soft state
4204  *
4205  * Each command packet has an embedded DMA memory buffer that is used for
4206  * filling MFI frame and the SG list that immediately follows the frame. This
4207  * function creates those DMA memory buffers for each command packet by using
4208  * PCI pool facility.
4209  */
4210 static int megasas_create_frame_pool(struct megasas_instance *instance)
4211 {
4212 	int i;
4213 	u16 max_cmd;
4214 	u32 frame_count;
4215 	struct megasas_cmd *cmd;
4216 
4217 	max_cmd = instance->max_mfi_cmds;
4218 
4219 	/*
4220 	 * For MFI controllers.
4221 	 * max_num_sge = 60
4222 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
4223 	 * Total 960 byte (15 MFI frame of 64 byte)
4224 	 *
4225 	 * Fusion adapter require only 3 extra frame.
4226 	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4227 	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
4228 	 * Total 192 byte (3 MFI frame of 64 byte)
4229 	 */
4230 	frame_count = (instance->adapter_type == MFI_SERIES) ?
4231 			(15 + 1) : (3 + 1);
4232 	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4233 	/*
4234 	 * Use DMA pool facility provided by PCI layer
4235 	 */
4236 	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4237 					&instance->pdev->dev,
4238 					instance->mfi_frame_size, 256, 0);
4239 
4240 	if (!instance->frame_dma_pool) {
4241 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4242 		return -ENOMEM;
4243 	}
4244 
4245 	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4246 						   &instance->pdev->dev, 128,
4247 						   4, 0);
4248 
4249 	if (!instance->sense_dma_pool) {
4250 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4251 
4252 		dma_pool_destroy(instance->frame_dma_pool);
4253 		instance->frame_dma_pool = NULL;
4254 
4255 		return -ENOMEM;
4256 	}
4257 
4258 	/*
4259 	 * Allocate and attach a frame to each of the commands in cmd_list.
4260 	 * By making cmd->index as the context instead of the &cmd, we can
4261 	 * always use 32bit context regardless of the architecture
4262 	 */
4263 	for (i = 0; i < max_cmd; i++) {
4264 
4265 		cmd = instance->cmd_list[i];
4266 
4267 		cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4268 					    GFP_KERNEL, &cmd->frame_phys_addr);
4269 
4270 		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4271 					    GFP_KERNEL, &cmd->sense_phys_addr);
4272 
4273 		/*
4274 		 * megasas_teardown_frame_pool() takes care of freeing
4275 		 * whatever has been allocated
4276 		 */
4277 		if (!cmd->frame || !cmd->sense) {
4278 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4279 			megasas_teardown_frame_pool(instance);
4280 			return -ENOMEM;
4281 		}
4282 
4283 		cmd->frame->io.context = cpu_to_le32(cmd->index);
4284 		cmd->frame->io.pad_0 = 0;
4285 		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4286 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4287 	}
4288 
4289 	return 0;
4290 }
4291 
4292 /**
4293  * megasas_free_cmds -	Free all the cmds in the free cmd pool
4294  * @instance:		Adapter soft state
4295  */
4296 void megasas_free_cmds(struct megasas_instance *instance)
4297 {
4298 	int i;
4299 
4300 	/* First free the MFI frame pool */
4301 	megasas_teardown_frame_pool(instance);
4302 
4303 	/* Free all the commands in the cmd_list */
4304 	for (i = 0; i < instance->max_mfi_cmds; i++)
4305 
4306 		kfree(instance->cmd_list[i]);
4307 
4308 	/* Free the cmd_list buffer itself */
4309 	kfree(instance->cmd_list);
4310 	instance->cmd_list = NULL;
4311 
4312 	INIT_LIST_HEAD(&instance->cmd_pool);
4313 }
4314 
4315 /**
4316  * megasas_alloc_cmds -	Allocates the command packets
4317  * @instance:		Adapter soft state
4318  *
4319  * Each command that is issued to the FW, whether IO commands from the OS or
4320  * internal commands like IOCTLs, are wrapped in local data structure called
4321  * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4322  * the FW.
4323  *
4324  * Each frame has a 32-bit field called context (tag). This context is used
4325  * to get back the megasas_cmd from the frame when a frame gets completed in
4326  * the ISR. Typically the address of the megasas_cmd itself would be used as
4327  * the context. But we wanted to keep the differences between 32 and 64 bit
4328  * systems to the mininum. We always use 32 bit integers for the context. In
4329  * this driver, the 32 bit values are the indices into an array cmd_list.
4330  * This array is used only to look up the megasas_cmd given the context. The
4331  * free commands themselves are maintained in a linked list called cmd_pool.
4332  */
4333 int megasas_alloc_cmds(struct megasas_instance *instance)
4334 {
4335 	int i;
4336 	int j;
4337 	u16 max_cmd;
4338 	struct megasas_cmd *cmd;
4339 
4340 	max_cmd = instance->max_mfi_cmds;
4341 
4342 	/*
4343 	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4344 	 * Allocate the dynamic array first and then allocate individual
4345 	 * commands.
4346 	 */
4347 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4348 
4349 	if (!instance->cmd_list) {
4350 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4351 		return -ENOMEM;
4352 	}
4353 
4354 	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4355 
4356 	for (i = 0; i < max_cmd; i++) {
4357 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4358 						GFP_KERNEL);
4359 
4360 		if (!instance->cmd_list[i]) {
4361 
4362 			for (j = 0; j < i; j++)
4363 				kfree(instance->cmd_list[j]);
4364 
4365 			kfree(instance->cmd_list);
4366 			instance->cmd_list = NULL;
4367 
4368 			return -ENOMEM;
4369 		}
4370 	}
4371 
4372 	for (i = 0; i < max_cmd; i++) {
4373 		cmd = instance->cmd_list[i];
4374 		memset(cmd, 0, sizeof(struct megasas_cmd));
4375 		cmd->index = i;
4376 		cmd->scmd = NULL;
4377 		cmd->instance = instance;
4378 
4379 		list_add_tail(&cmd->list, &instance->cmd_pool);
4380 	}
4381 
4382 	/*
4383 	 * Create a frame pool and assign one frame to each cmd
4384 	 */
4385 	if (megasas_create_frame_pool(instance)) {
4386 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4387 		megasas_free_cmds(instance);
4388 		return -ENOMEM;
4389 	}
4390 
4391 	return 0;
4392 }
4393 
4394 /*
4395  * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4396  * @instance:				Adapter soft state
4397  *
4398  * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4399  * or FW is not under OCR.
4400  */
4401 inline int
4402 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4403 
4404 	if (instance->adapter_type == MFI_SERIES)
4405 		return KILL_ADAPTER;
4406 	else if (instance->unload ||
4407 			test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
4408 				 &instance->reset_flags))
4409 		return IGNORE_TIMEOUT;
4410 	else
4411 		return INITIATE_OCR;
4412 }
4413 
4414 static void
4415 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4416 {
4417 	int ret;
4418 	struct megasas_cmd *cmd;
4419 	struct megasas_dcmd_frame *dcmd;
4420 
4421 	struct MR_PRIV_DEVICE *mr_device_priv_data;
4422 	u16 device_id = 0;
4423 
4424 	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4425 	cmd = megasas_get_cmd(instance);
4426 
4427 	if (!cmd) {
4428 		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4429 		return;
4430 	}
4431 
4432 	dcmd = &cmd->frame->dcmd;
4433 
4434 	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4435 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4436 
4437 	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4438 	dcmd->cmd = MFI_CMD_DCMD;
4439 	dcmd->cmd_status = 0xFF;
4440 	dcmd->sge_count = 1;
4441 	dcmd->flags = MFI_FRAME_DIR_READ;
4442 	dcmd->timeout = 0;
4443 	dcmd->pad_0 = 0;
4444 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4445 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4446 
4447 	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4448 				 sizeof(struct MR_PD_INFO));
4449 
4450 	if ((instance->adapter_type != MFI_SERIES) &&
4451 	    !instance->mask_interrupts)
4452 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4453 	else
4454 		ret = megasas_issue_polled(instance, cmd);
4455 
4456 	switch (ret) {
4457 	case DCMD_SUCCESS:
4458 		mr_device_priv_data = sdev->hostdata;
4459 		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4460 		mr_device_priv_data->interface_type =
4461 				instance->pd_info->state.ddf.pdType.intf;
4462 		break;
4463 
4464 	case DCMD_TIMEOUT:
4465 
4466 		switch (dcmd_timeout_ocr_possible(instance)) {
4467 		case INITIATE_OCR:
4468 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4469 			mutex_unlock(&instance->reset_mutex);
4470 			megasas_reset_fusion(instance->host,
4471 				MFI_IO_TIMEOUT_OCR);
4472 			mutex_lock(&instance->reset_mutex);
4473 			break;
4474 		case KILL_ADAPTER:
4475 			megaraid_sas_kill_hba(instance);
4476 			break;
4477 		case IGNORE_TIMEOUT:
4478 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4479 				__func__, __LINE__);
4480 			break;
4481 		}
4482 
4483 		break;
4484 	}
4485 
4486 	if (ret != DCMD_TIMEOUT)
4487 		megasas_return_cmd(instance, cmd);
4488 
4489 	return;
4490 }
4491 /*
4492  * megasas_get_pd_list_info -	Returns FW's pd_list structure
4493  * @instance:				Adapter soft state
4494  * @pd_list:				pd_list structure
4495  *
4496  * Issues an internal command (DCMD) to get the FW's controller PD
4497  * list structure.  This information is mainly used to find out SYSTEM
4498  * supported by the FW.
4499  */
4500 static int
4501 megasas_get_pd_list(struct megasas_instance *instance)
4502 {
4503 	int ret = 0, pd_index = 0;
4504 	struct megasas_cmd *cmd;
4505 	struct megasas_dcmd_frame *dcmd;
4506 	struct MR_PD_LIST *ci;
4507 	struct MR_PD_ADDRESS *pd_addr;
4508 
4509 	if (instance->pd_list_not_supported) {
4510 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4511 		"not supported by firmware\n");
4512 		return ret;
4513 	}
4514 
4515 	ci = instance->pd_list_buf;
4516 
4517 	cmd = megasas_get_cmd(instance);
4518 
4519 	if (!cmd) {
4520 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4521 		return -ENOMEM;
4522 	}
4523 
4524 	dcmd = &cmd->frame->dcmd;
4525 
4526 	memset(ci, 0, sizeof(*ci));
4527 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4528 
4529 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4530 	dcmd->mbox.b[1] = 0;
4531 	dcmd->cmd = MFI_CMD_DCMD;
4532 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4533 	dcmd->sge_count = 1;
4534 	dcmd->flags = MFI_FRAME_DIR_READ;
4535 	dcmd->timeout = 0;
4536 	dcmd->pad_0 = 0;
4537 	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4538 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4539 
4540 	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4541 				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4542 
4543 	if ((instance->adapter_type != MFI_SERIES) &&
4544 	    !instance->mask_interrupts)
4545 		ret = megasas_issue_blocked_cmd(instance, cmd,
4546 			MFI_IO_TIMEOUT_SECS);
4547 	else
4548 		ret = megasas_issue_polled(instance, cmd);
4549 
4550 	switch (ret) {
4551 	case DCMD_FAILED:
4552 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4553 			"failed/not supported by firmware\n");
4554 
4555 		if (instance->adapter_type != MFI_SERIES)
4556 			megaraid_sas_kill_hba(instance);
4557 		else
4558 			instance->pd_list_not_supported = 1;
4559 		break;
4560 	case DCMD_TIMEOUT:
4561 
4562 		switch (dcmd_timeout_ocr_possible(instance)) {
4563 		case INITIATE_OCR:
4564 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4565 			/*
4566 			 * DCMD failed from AEN path.
4567 			 * AEN path already hold reset_mutex to avoid PCI access
4568 			 * while OCR is in progress.
4569 			 */
4570 			mutex_unlock(&instance->reset_mutex);
4571 			megasas_reset_fusion(instance->host,
4572 						MFI_IO_TIMEOUT_OCR);
4573 			mutex_lock(&instance->reset_mutex);
4574 			break;
4575 		case KILL_ADAPTER:
4576 			megaraid_sas_kill_hba(instance);
4577 			break;
4578 		case IGNORE_TIMEOUT:
4579 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4580 				__func__, __LINE__);
4581 			break;
4582 		}
4583 
4584 		break;
4585 
4586 	case DCMD_SUCCESS:
4587 		pd_addr = ci->addr;
4588 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4589 			dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
4590 				 __func__, le32_to_cpu(ci->count));
4591 
4592 		if ((le32_to_cpu(ci->count) >
4593 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4594 			break;
4595 
4596 		memset(instance->local_pd_list, 0,
4597 				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4598 
4599 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4600 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4601 					le16_to_cpu(pd_addr->deviceId);
4602 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4603 					pd_addr->scsiDevType;
4604 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4605 					MR_PD_STATE_SYSTEM;
4606 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4607 				dev_info(&instance->pdev->dev,
4608 					 "PD%d: targetID: 0x%03x deviceType:0x%x\n",
4609 					 pd_index, le16_to_cpu(pd_addr->deviceId),
4610 					 pd_addr->scsiDevType);
4611 			pd_addr++;
4612 		}
4613 
4614 		memcpy(instance->pd_list, instance->local_pd_list,
4615 			sizeof(instance->pd_list));
4616 		break;
4617 
4618 	}
4619 
4620 	if (ret != DCMD_TIMEOUT)
4621 		megasas_return_cmd(instance, cmd);
4622 
4623 	return ret;
4624 }
4625 
4626 /*
4627  * megasas_get_ld_list_info -	Returns FW's ld_list structure
4628  * @instance:				Adapter soft state
4629  * @ld_list:				ld_list structure
4630  *
4631  * Issues an internal command (DCMD) to get the FW's controller PD
4632  * list structure.  This information is mainly used to find out SYSTEM
4633  * supported by the FW.
4634  */
4635 static int
4636 megasas_get_ld_list(struct megasas_instance *instance)
4637 {
4638 	int ret = 0, ld_index = 0, ids = 0;
4639 	struct megasas_cmd *cmd;
4640 	struct megasas_dcmd_frame *dcmd;
4641 	struct MR_LD_LIST *ci;
4642 	dma_addr_t ci_h = 0;
4643 	u32 ld_count;
4644 
4645 	ci = instance->ld_list_buf;
4646 	ci_h = instance->ld_list_buf_h;
4647 
4648 	cmd = megasas_get_cmd(instance);
4649 
4650 	if (!cmd) {
4651 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4652 		return -ENOMEM;
4653 	}
4654 
4655 	dcmd = &cmd->frame->dcmd;
4656 
4657 	memset(ci, 0, sizeof(*ci));
4658 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4659 
4660 	if (instance->supportmax256vd)
4661 		dcmd->mbox.b[0] = 1;
4662 	dcmd->cmd = MFI_CMD_DCMD;
4663 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4664 	dcmd->sge_count = 1;
4665 	dcmd->flags = MFI_FRAME_DIR_READ;
4666 	dcmd->timeout = 0;
4667 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4668 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4669 	dcmd->pad_0  = 0;
4670 
4671 	megasas_set_dma_settings(instance, dcmd, ci_h,
4672 				 sizeof(struct MR_LD_LIST));
4673 
4674 	if ((instance->adapter_type != MFI_SERIES) &&
4675 	    !instance->mask_interrupts)
4676 		ret = megasas_issue_blocked_cmd(instance, cmd,
4677 			MFI_IO_TIMEOUT_SECS);
4678 	else
4679 		ret = megasas_issue_polled(instance, cmd);
4680 
4681 	ld_count = le32_to_cpu(ci->ldCount);
4682 
4683 	switch (ret) {
4684 	case DCMD_FAILED:
4685 		megaraid_sas_kill_hba(instance);
4686 		break;
4687 	case DCMD_TIMEOUT:
4688 
4689 		switch (dcmd_timeout_ocr_possible(instance)) {
4690 		case INITIATE_OCR:
4691 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4692 			/*
4693 			 * DCMD failed from AEN path.
4694 			 * AEN path already hold reset_mutex to avoid PCI access
4695 			 * while OCR is in progress.
4696 			 */
4697 			mutex_unlock(&instance->reset_mutex);
4698 			megasas_reset_fusion(instance->host,
4699 						MFI_IO_TIMEOUT_OCR);
4700 			mutex_lock(&instance->reset_mutex);
4701 			break;
4702 		case KILL_ADAPTER:
4703 			megaraid_sas_kill_hba(instance);
4704 			break;
4705 		case IGNORE_TIMEOUT:
4706 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4707 				__func__, __LINE__);
4708 			break;
4709 		}
4710 
4711 		break;
4712 
4713 	case DCMD_SUCCESS:
4714 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4715 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4716 				 __func__, ld_count);
4717 
4718 		if (ld_count > instance->fw_supported_vd_count)
4719 			break;
4720 
4721 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4722 
4723 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4724 			if (ci->ldList[ld_index].state != 0) {
4725 				ids = ci->ldList[ld_index].ref.targetId;
4726 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4727 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4728 					dev_info(&instance->pdev->dev,
4729 						 "LD%d: targetID: 0x%03x\n",
4730 						 ld_index, ids);
4731 			}
4732 		}
4733 
4734 		break;
4735 	}
4736 
4737 	if (ret != DCMD_TIMEOUT)
4738 		megasas_return_cmd(instance, cmd);
4739 
4740 	return ret;
4741 }
4742 
4743 /**
4744  * megasas_ld_list_query -	Returns FW's ld_list structure
4745  * @instance:				Adapter soft state
4746  * @ld_list:				ld_list structure
4747  *
4748  * Issues an internal command (DCMD) to get the FW's controller PD
4749  * list structure.  This information is mainly used to find out SYSTEM
4750  * supported by the FW.
4751  */
4752 static int
4753 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4754 {
4755 	int ret = 0, ld_index = 0, ids = 0;
4756 	struct megasas_cmd *cmd;
4757 	struct megasas_dcmd_frame *dcmd;
4758 	struct MR_LD_TARGETID_LIST *ci;
4759 	dma_addr_t ci_h = 0;
4760 	u32 tgtid_count;
4761 
4762 	ci = instance->ld_targetid_list_buf;
4763 	ci_h = instance->ld_targetid_list_buf_h;
4764 
4765 	cmd = megasas_get_cmd(instance);
4766 
4767 	if (!cmd) {
4768 		dev_warn(&instance->pdev->dev,
4769 		         "megasas_ld_list_query: Failed to get cmd\n");
4770 		return -ENOMEM;
4771 	}
4772 
4773 	dcmd = &cmd->frame->dcmd;
4774 
4775 	memset(ci, 0, sizeof(*ci));
4776 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4777 
4778 	dcmd->mbox.b[0] = query_type;
4779 	if (instance->supportmax256vd)
4780 		dcmd->mbox.b[2] = 1;
4781 
4782 	dcmd->cmd = MFI_CMD_DCMD;
4783 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4784 	dcmd->sge_count = 1;
4785 	dcmd->flags = MFI_FRAME_DIR_READ;
4786 	dcmd->timeout = 0;
4787 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4788 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4789 	dcmd->pad_0  = 0;
4790 
4791 	megasas_set_dma_settings(instance, dcmd, ci_h,
4792 				 sizeof(struct MR_LD_TARGETID_LIST));
4793 
4794 	if ((instance->adapter_type != MFI_SERIES) &&
4795 	    !instance->mask_interrupts)
4796 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4797 	else
4798 		ret = megasas_issue_polled(instance, cmd);
4799 
4800 	switch (ret) {
4801 	case DCMD_FAILED:
4802 		dev_info(&instance->pdev->dev,
4803 			"DCMD not supported by firmware - %s %d\n",
4804 				__func__, __LINE__);
4805 		ret = megasas_get_ld_list(instance);
4806 		break;
4807 	case DCMD_TIMEOUT:
4808 		switch (dcmd_timeout_ocr_possible(instance)) {
4809 		case INITIATE_OCR:
4810 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4811 			/*
4812 			 * DCMD failed from AEN path.
4813 			 * AEN path already hold reset_mutex to avoid PCI access
4814 			 * while OCR is in progress.
4815 			 */
4816 			mutex_unlock(&instance->reset_mutex);
4817 			megasas_reset_fusion(instance->host,
4818 						MFI_IO_TIMEOUT_OCR);
4819 			mutex_lock(&instance->reset_mutex);
4820 			break;
4821 		case KILL_ADAPTER:
4822 			megaraid_sas_kill_hba(instance);
4823 			break;
4824 		case IGNORE_TIMEOUT:
4825 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4826 				__func__, __LINE__);
4827 			break;
4828 		}
4829 
4830 		break;
4831 	case DCMD_SUCCESS:
4832 		tgtid_count = le32_to_cpu(ci->count);
4833 
4834 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4835 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4836 				 __func__, tgtid_count);
4837 
4838 		if ((tgtid_count > (instance->fw_supported_vd_count)))
4839 			break;
4840 
4841 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4842 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4843 			ids = ci->targetId[ld_index];
4844 			instance->ld_ids[ids] = ci->targetId[ld_index];
4845 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4846 				dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
4847 					 ld_index, ci->targetId[ld_index]);
4848 		}
4849 
4850 		break;
4851 	}
4852 
4853 	if (ret != DCMD_TIMEOUT)
4854 		megasas_return_cmd(instance, cmd);
4855 
4856 	return ret;
4857 }
4858 
4859 /**
4860  * dcmd.opcode            - MR_DCMD_CTRL_DEVICE_LIST_GET
4861  * dcmd.mbox              - reserved
4862  * dcmd.sge IN            - ptr to return MR_HOST_DEVICE_LIST structure
4863  * Desc:    This DCMD will return the combined device list
4864  * Status:  MFI_STAT_OK - List returned successfully
4865  *          MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4866  *                                 disabled
4867  * @instance:			Adapter soft state
4868  * @is_probe:			Driver probe check
4869  * Return:			0 if DCMD succeeded
4870  *				 non-zero if failed
4871  */
4872 static int
4873 megasas_host_device_list_query(struct megasas_instance *instance,
4874 			       bool is_probe)
4875 {
4876 	int ret, i, target_id;
4877 	struct megasas_cmd *cmd;
4878 	struct megasas_dcmd_frame *dcmd;
4879 	struct MR_HOST_DEVICE_LIST *ci;
4880 	u32 count;
4881 	dma_addr_t ci_h;
4882 
4883 	ci = instance->host_device_list_buf;
4884 	ci_h = instance->host_device_list_buf_h;
4885 
4886 	cmd = megasas_get_cmd(instance);
4887 
4888 	if (!cmd) {
4889 		dev_warn(&instance->pdev->dev,
4890 			 "%s: failed to get cmd\n",
4891 			 __func__);
4892 		return -ENOMEM;
4893 	}
4894 
4895 	dcmd = &cmd->frame->dcmd;
4896 
4897 	memset(ci, 0, sizeof(*ci));
4898 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4899 
4900 	dcmd->mbox.b[0] = is_probe ? 0 : 1;
4901 	dcmd->cmd = MFI_CMD_DCMD;
4902 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4903 	dcmd->sge_count = 1;
4904 	dcmd->flags = MFI_FRAME_DIR_READ;
4905 	dcmd->timeout = 0;
4906 	dcmd->pad_0 = 0;
4907 	dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
4908 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
4909 
4910 	megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
4911 
4912 	if (!instance->mask_interrupts) {
4913 		ret = megasas_issue_blocked_cmd(instance, cmd,
4914 						MFI_IO_TIMEOUT_SECS);
4915 	} else {
4916 		ret = megasas_issue_polled(instance, cmd);
4917 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4918 	}
4919 
4920 	switch (ret) {
4921 	case DCMD_SUCCESS:
4922 		/* Fill the internal pd_list and ld_ids array based on
4923 		 * targetIds returned by FW
4924 		 */
4925 		count = le32_to_cpu(ci->count);
4926 
4927 		if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
4928 			break;
4929 
4930 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4931 			dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
4932 				 __func__, count);
4933 
4934 		memset(instance->local_pd_list, 0,
4935 		       MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4936 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4937 		for (i = 0; i < count; i++) {
4938 			target_id = le16_to_cpu(ci->host_device_list[i].target_id);
4939 			if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
4940 				instance->local_pd_list[target_id].tid = target_id;
4941 				instance->local_pd_list[target_id].driveType =
4942 						ci->host_device_list[i].scsi_type;
4943 				instance->local_pd_list[target_id].driveState =
4944 						MR_PD_STATE_SYSTEM;
4945 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4946 					dev_info(&instance->pdev->dev,
4947 						 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
4948 						 i, target_id, ci->host_device_list[i].scsi_type);
4949 			} else {
4950 				instance->ld_ids[target_id] = target_id;
4951 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4952 					dev_info(&instance->pdev->dev,
4953 						 "Device %d: LD targetID: 0x%03x\n",
4954 						 i, target_id);
4955 			}
4956 		}
4957 
4958 		memcpy(instance->pd_list, instance->local_pd_list,
4959 		       sizeof(instance->pd_list));
4960 		break;
4961 
4962 	case DCMD_TIMEOUT:
4963 		switch (dcmd_timeout_ocr_possible(instance)) {
4964 		case INITIATE_OCR:
4965 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4966 			mutex_unlock(&instance->reset_mutex);
4967 			megasas_reset_fusion(instance->host,
4968 				MFI_IO_TIMEOUT_OCR);
4969 			mutex_lock(&instance->reset_mutex);
4970 			break;
4971 		case KILL_ADAPTER:
4972 			megaraid_sas_kill_hba(instance);
4973 			break;
4974 		case IGNORE_TIMEOUT:
4975 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4976 				 __func__, __LINE__);
4977 			break;
4978 		}
4979 		break;
4980 	case DCMD_FAILED:
4981 		dev_err(&instance->pdev->dev,
4982 			"%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
4983 			__func__);
4984 		break;
4985 	}
4986 
4987 	if (ret != DCMD_TIMEOUT)
4988 		megasas_return_cmd(instance, cmd);
4989 
4990 	return ret;
4991 }
4992 
4993 /*
4994  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4995  * instance			 : Controller's instance
4996 */
4997 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4998 {
4999 	struct fusion_context *fusion;
5000 	u32 ventura_map_sz = 0;
5001 
5002 	fusion = instance->ctrl_context;
5003 	/* For MFI based controllers return dummy success */
5004 	if (!fusion)
5005 		return;
5006 
5007 	instance->supportmax256vd =
5008 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
5009 	/* Below is additional check to address future FW enhancement */
5010 	if (instance->ctrl_info_buf->max_lds > 64)
5011 		instance->supportmax256vd = 1;
5012 
5013 	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
5014 					* MEGASAS_MAX_DEV_PER_CHANNEL;
5015 	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
5016 					* MEGASAS_MAX_DEV_PER_CHANNEL;
5017 	if (instance->supportmax256vd) {
5018 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
5019 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5020 	} else {
5021 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5022 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5023 	}
5024 
5025 	dev_info(&instance->pdev->dev,
5026 		"FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
5027 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
5028 		instance->ctrl_info_buf->max_lds);
5029 
5030 	if (instance->max_raid_mapsize) {
5031 		ventura_map_sz = instance->max_raid_mapsize *
5032 						MR_MIN_MAP_SIZE; /* 64k */
5033 		fusion->current_map_sz = ventura_map_sz;
5034 		fusion->max_map_sz = ventura_map_sz;
5035 	} else {
5036 		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
5037 					(sizeof(struct MR_LD_SPAN_MAP) *
5038 					(instance->fw_supported_vd_count - 1));
5039 		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
5040 
5041 		fusion->max_map_sz =
5042 			max(fusion->old_map_sz, fusion->new_map_sz);
5043 
5044 		if (instance->supportmax256vd)
5045 			fusion->current_map_sz = fusion->new_map_sz;
5046 		else
5047 			fusion->current_map_sz = fusion->old_map_sz;
5048 	}
5049 	/* irrespective of FW raid maps, driver raid map is constant */
5050 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
5051 }
5052 
5053 /*
5054  * dcmd.opcode                - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
5055  * dcmd.hdr.length            - number of bytes to read
5056  * dcmd.sge                   - Ptr to MR_SNAPDUMP_PROPERTIES
5057  * Desc:			 Fill in snapdump properties
5058  * Status:			 MFI_STAT_OK- Command successful
5059  */
5060 void megasas_get_snapdump_properties(struct megasas_instance *instance)
5061 {
5062 	int ret = 0;
5063 	struct megasas_cmd *cmd;
5064 	struct megasas_dcmd_frame *dcmd;
5065 	struct MR_SNAPDUMP_PROPERTIES *ci;
5066 	dma_addr_t ci_h = 0;
5067 
5068 	ci = instance->snapdump_prop;
5069 	ci_h = instance->snapdump_prop_h;
5070 
5071 	if (!ci)
5072 		return;
5073 
5074 	cmd = megasas_get_cmd(instance);
5075 
5076 	if (!cmd) {
5077 		dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
5078 		return;
5079 	}
5080 
5081 	dcmd = &cmd->frame->dcmd;
5082 
5083 	memset(ci, 0, sizeof(*ci));
5084 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5085 
5086 	dcmd->cmd = MFI_CMD_DCMD;
5087 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5088 	dcmd->sge_count = 1;
5089 	dcmd->flags = MFI_FRAME_DIR_READ;
5090 	dcmd->timeout = 0;
5091 	dcmd->pad_0 = 0;
5092 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
5093 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
5094 
5095 	megasas_set_dma_settings(instance, dcmd, ci_h,
5096 				 sizeof(struct MR_SNAPDUMP_PROPERTIES));
5097 
5098 	if (!instance->mask_interrupts) {
5099 		ret = megasas_issue_blocked_cmd(instance, cmd,
5100 						MFI_IO_TIMEOUT_SECS);
5101 	} else {
5102 		ret = megasas_issue_polled(instance, cmd);
5103 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5104 	}
5105 
5106 	switch (ret) {
5107 	case DCMD_SUCCESS:
5108 		instance->snapdump_wait_time =
5109 			min_t(u8, ci->trigger_min_num_sec_before_ocr,
5110 				MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
5111 		break;
5112 
5113 	case DCMD_TIMEOUT:
5114 		switch (dcmd_timeout_ocr_possible(instance)) {
5115 		case INITIATE_OCR:
5116 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5117 			mutex_unlock(&instance->reset_mutex);
5118 			megasas_reset_fusion(instance->host,
5119 				MFI_IO_TIMEOUT_OCR);
5120 			mutex_lock(&instance->reset_mutex);
5121 			break;
5122 		case KILL_ADAPTER:
5123 			megaraid_sas_kill_hba(instance);
5124 			break;
5125 		case IGNORE_TIMEOUT:
5126 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5127 				__func__, __LINE__);
5128 			break;
5129 		}
5130 	}
5131 
5132 	if (ret != DCMD_TIMEOUT)
5133 		megasas_return_cmd(instance, cmd);
5134 }
5135 
5136 /**
5137  * megasas_get_controller_info -	Returns FW's controller structure
5138  * @instance:				Adapter soft state
5139  *
5140  * Issues an internal command (DCMD) to get the FW's controller structure.
5141  * This information is mainly used to find out the maximum IO transfer per
5142  * command supported by the FW.
5143  */
5144 int
5145 megasas_get_ctrl_info(struct megasas_instance *instance)
5146 {
5147 	int ret = 0;
5148 	struct megasas_cmd *cmd;
5149 	struct megasas_dcmd_frame *dcmd;
5150 	struct megasas_ctrl_info *ci;
5151 	dma_addr_t ci_h = 0;
5152 
5153 	ci = instance->ctrl_info_buf;
5154 	ci_h = instance->ctrl_info_buf_h;
5155 
5156 	cmd = megasas_get_cmd(instance);
5157 
5158 	if (!cmd) {
5159 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
5160 		return -ENOMEM;
5161 	}
5162 
5163 	dcmd = &cmd->frame->dcmd;
5164 
5165 	memset(ci, 0, sizeof(*ci));
5166 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5167 
5168 	dcmd->cmd = MFI_CMD_DCMD;
5169 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5170 	dcmd->sge_count = 1;
5171 	dcmd->flags = MFI_FRAME_DIR_READ;
5172 	dcmd->timeout = 0;
5173 	dcmd->pad_0 = 0;
5174 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
5175 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
5176 	dcmd->mbox.b[0] = 1;
5177 
5178 	megasas_set_dma_settings(instance, dcmd, ci_h,
5179 				 sizeof(struct megasas_ctrl_info));
5180 
5181 	if ((instance->adapter_type != MFI_SERIES) &&
5182 	    !instance->mask_interrupts) {
5183 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5184 	} else {
5185 		ret = megasas_issue_polled(instance, cmd);
5186 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5187 	}
5188 
5189 	switch (ret) {
5190 	case DCMD_SUCCESS:
5191 		/* Save required controller information in
5192 		 * CPU endianness format.
5193 		 */
5194 		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
5195 		le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
5196 		le32_to_cpus((u32 *)&ci->adapterOperations2);
5197 		le32_to_cpus((u32 *)&ci->adapterOperations3);
5198 		le16_to_cpus((u16 *)&ci->adapter_operations4);
5199 		le32_to_cpus((u32 *)&ci->adapter_operations5);
5200 
5201 		/* Update the latest Ext VD info.
5202 		 * From Init path, store current firmware details.
5203 		 * From OCR path, detect any firmware properties changes.
5204 		 * in case of Firmware upgrade without system reboot.
5205 		 */
5206 		megasas_update_ext_vd_details(instance);
5207 		instance->support_seqnum_jbod_fp =
5208 			ci->adapterOperations3.useSeqNumJbodFP;
5209 		instance->support_morethan256jbod =
5210 			ci->adapter_operations4.support_pd_map_target_id;
5211 		instance->support_nvme_passthru =
5212 			ci->adapter_operations4.support_nvme_passthru;
5213 		instance->support_pci_lane_margining =
5214 			ci->adapter_operations5.support_pci_lane_margining;
5215 		instance->task_abort_tmo = ci->TaskAbortTO;
5216 		instance->max_reset_tmo = ci->MaxResetTO;
5217 
5218 		/*Check whether controller is iMR or MR */
5219 		instance->is_imr = (ci->memory_size ? 0 : 1);
5220 
5221 		instance->snapdump_wait_time =
5222 			(ci->properties.on_off_properties2.enable_snap_dump ?
5223 			 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
5224 
5225 		instance->enable_fw_dev_list =
5226 			ci->properties.on_off_properties2.enable_fw_dev_list;
5227 
5228 		dev_info(&instance->pdev->dev,
5229 			"controller type\t: %s(%dMB)\n",
5230 			instance->is_imr ? "iMR" : "MR",
5231 			le16_to_cpu(ci->memory_size));
5232 
5233 		instance->disableOnlineCtrlReset =
5234 			ci->properties.OnOffProperties.disableOnlineCtrlReset;
5235 		instance->secure_jbod_support =
5236 			ci->adapterOperations3.supportSecurityonJBOD;
5237 		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
5238 			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
5239 		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
5240 			instance->secure_jbod_support ? "Yes" : "No");
5241 		dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
5242 			 instance->support_nvme_passthru ? "Yes" : "No");
5243 		dev_info(&instance->pdev->dev,
5244 			 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
5245 			 instance->task_abort_tmo, instance->max_reset_tmo);
5246 		dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
5247 			 instance->support_seqnum_jbod_fp ? "Yes" : "No");
5248 		dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
5249 			 instance->support_pci_lane_margining ? "Yes" : "No");
5250 
5251 		break;
5252 
5253 	case DCMD_TIMEOUT:
5254 		switch (dcmd_timeout_ocr_possible(instance)) {
5255 		case INITIATE_OCR:
5256 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5257 			mutex_unlock(&instance->reset_mutex);
5258 			megasas_reset_fusion(instance->host,
5259 				MFI_IO_TIMEOUT_OCR);
5260 			mutex_lock(&instance->reset_mutex);
5261 			break;
5262 		case KILL_ADAPTER:
5263 			megaraid_sas_kill_hba(instance);
5264 			break;
5265 		case IGNORE_TIMEOUT:
5266 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5267 				__func__, __LINE__);
5268 			break;
5269 		}
5270 		break;
5271 	case DCMD_FAILED:
5272 		megaraid_sas_kill_hba(instance);
5273 		break;
5274 
5275 	}
5276 
5277 	if (ret != DCMD_TIMEOUT)
5278 		megasas_return_cmd(instance, cmd);
5279 
5280 	return ret;
5281 }
5282 
5283 /*
5284  * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
5285  *					to firmware
5286  *
5287  * @instance:				Adapter soft state
5288  * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
5289 					MR_CRASH_BUF_TURN_OFF = 0
5290 					MR_CRASH_BUF_TURN_ON = 1
5291  * @return 0 on success non-zero on failure.
5292  * Issues an internal command (DCMD) to set parameters for crash dump feature.
5293  * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5294  * that driver supports crash dump feature. This DCMD will be sent only if
5295  * crash dump feature is supported by the FW.
5296  *
5297  */
5298 int megasas_set_crash_dump_params(struct megasas_instance *instance,
5299 	u8 crash_buf_state)
5300 {
5301 	int ret = 0;
5302 	struct megasas_cmd *cmd;
5303 	struct megasas_dcmd_frame *dcmd;
5304 
5305 	cmd = megasas_get_cmd(instance);
5306 
5307 	if (!cmd) {
5308 		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5309 		return -ENOMEM;
5310 	}
5311 
5312 
5313 	dcmd = &cmd->frame->dcmd;
5314 
5315 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5316 	dcmd->mbox.b[0] = crash_buf_state;
5317 	dcmd->cmd = MFI_CMD_DCMD;
5318 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5319 	dcmd->sge_count = 1;
5320 	dcmd->flags = MFI_FRAME_DIR_NONE;
5321 	dcmd->timeout = 0;
5322 	dcmd->pad_0 = 0;
5323 	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5324 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5325 
5326 	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5327 				 CRASH_DMA_BUF_SIZE);
5328 
5329 	if ((instance->adapter_type != MFI_SERIES) &&
5330 	    !instance->mask_interrupts)
5331 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5332 	else
5333 		ret = megasas_issue_polled(instance, cmd);
5334 
5335 	if (ret == DCMD_TIMEOUT) {
5336 		switch (dcmd_timeout_ocr_possible(instance)) {
5337 		case INITIATE_OCR:
5338 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5339 			megasas_reset_fusion(instance->host,
5340 					MFI_IO_TIMEOUT_OCR);
5341 			break;
5342 		case KILL_ADAPTER:
5343 			megaraid_sas_kill_hba(instance);
5344 			break;
5345 		case IGNORE_TIMEOUT:
5346 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5347 				__func__, __LINE__);
5348 			break;
5349 		}
5350 	} else
5351 		megasas_return_cmd(instance, cmd);
5352 
5353 	return ret;
5354 }
5355 
5356 /**
5357  * megasas_issue_init_mfi -	Initializes the FW
5358  * @instance:		Adapter soft state
5359  *
5360  * Issues the INIT MFI cmd
5361  */
5362 static int
5363 megasas_issue_init_mfi(struct megasas_instance *instance)
5364 {
5365 	__le32 context;
5366 	struct megasas_cmd *cmd;
5367 	struct megasas_init_frame *init_frame;
5368 	struct megasas_init_queue_info *initq_info;
5369 	dma_addr_t init_frame_h;
5370 	dma_addr_t initq_info_h;
5371 
5372 	/*
5373 	 * Prepare a init frame. Note the init frame points to queue info
5374 	 * structure. Each frame has SGL allocated after first 64 bytes. For
5375 	 * this frame - since we don't need any SGL - we use SGL's space as
5376 	 * queue info structure
5377 	 *
5378 	 * We will not get a NULL command below. We just created the pool.
5379 	 */
5380 	cmd = megasas_get_cmd(instance);
5381 
5382 	init_frame = (struct megasas_init_frame *)cmd->frame;
5383 	initq_info = (struct megasas_init_queue_info *)
5384 		((unsigned long)init_frame + 64);
5385 
5386 	init_frame_h = cmd->frame_phys_addr;
5387 	initq_info_h = init_frame_h + 64;
5388 
5389 	context = init_frame->context;
5390 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5391 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5392 	init_frame->context = context;
5393 
5394 	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5395 	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5396 
5397 	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5398 	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5399 
5400 	init_frame->cmd = MFI_CMD_INIT;
5401 	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5402 	init_frame->queue_info_new_phys_addr_lo =
5403 		cpu_to_le32(lower_32_bits(initq_info_h));
5404 	init_frame->queue_info_new_phys_addr_hi =
5405 		cpu_to_le32(upper_32_bits(initq_info_h));
5406 
5407 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5408 
5409 	/*
5410 	 * disable the intr before firing the init frame to FW
5411 	 */
5412 	instance->instancet->disable_intr(instance);
5413 
5414 	/*
5415 	 * Issue the init frame in polled mode
5416 	 */
5417 
5418 	if (megasas_issue_polled(instance, cmd)) {
5419 		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5420 		megasas_return_cmd(instance, cmd);
5421 		goto fail_fw_init;
5422 	}
5423 
5424 	megasas_return_cmd(instance, cmd);
5425 
5426 	return 0;
5427 
5428 fail_fw_init:
5429 	return -EINVAL;
5430 }
5431 
5432 static u32
5433 megasas_init_adapter_mfi(struct megasas_instance *instance)
5434 {
5435 	u32 context_sz;
5436 	u32 reply_q_sz;
5437 
5438 	/*
5439 	 * Get various operational parameters from status register
5440 	 */
5441 	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5442 	/*
5443 	 * Reduce the max supported cmds by 1. This is to ensure that the
5444 	 * reply_q_sz (1 more than the max cmd that driver may send)
5445 	 * does not exceed max cmds that the FW can support
5446 	 */
5447 	instance->max_fw_cmds = instance->max_fw_cmds-1;
5448 	instance->max_mfi_cmds = instance->max_fw_cmds;
5449 	instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5450 					0x10;
5451 	/*
5452 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5453 	 * are reserved for IOCTL + driver's internal DCMDs.
5454 	 */
5455 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5456 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5457 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5458 			MEGASAS_SKINNY_INT_CMDS);
5459 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5460 	} else {
5461 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5462 			MEGASAS_INT_CMDS);
5463 		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5464 	}
5465 
5466 	instance->cur_can_queue = instance->max_scsi_cmds;
5467 	/*
5468 	 * Create a pool of commands
5469 	 */
5470 	if (megasas_alloc_cmds(instance))
5471 		goto fail_alloc_cmds;
5472 
5473 	/*
5474 	 * Allocate memory for reply queue. Length of reply queue should
5475 	 * be _one_ more than the maximum commands handled by the firmware.
5476 	 *
5477 	 * Note: When FW completes commands, it places corresponding contex
5478 	 * values in this circular reply queue. This circular queue is a fairly
5479 	 * typical producer-consumer queue. FW is the producer (of completed
5480 	 * commands) and the driver is the consumer.
5481 	 */
5482 	context_sz = sizeof(u32);
5483 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5484 
5485 	instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5486 			reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5487 
5488 	if (!instance->reply_queue) {
5489 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5490 		goto fail_reply_queue;
5491 	}
5492 
5493 	if (megasas_issue_init_mfi(instance))
5494 		goto fail_fw_init;
5495 
5496 	if (megasas_get_ctrl_info(instance)) {
5497 		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5498 			"Fail from %s %d\n", instance->unique_id,
5499 			__func__, __LINE__);
5500 		goto fail_fw_init;
5501 	}
5502 
5503 	instance->fw_support_ieee = 0;
5504 	instance->fw_support_ieee =
5505 		(instance->instancet->read_fw_status_reg(instance) &
5506 		0x04000000);
5507 
5508 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5509 			instance->fw_support_ieee);
5510 
5511 	if (instance->fw_support_ieee)
5512 		instance->flag_ieee = 1;
5513 
5514 	return 0;
5515 
5516 fail_fw_init:
5517 
5518 	dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5519 			    instance->reply_queue, instance->reply_queue_h);
5520 fail_reply_queue:
5521 	megasas_free_cmds(instance);
5522 
5523 fail_alloc_cmds:
5524 	return 1;
5525 }
5526 
5527 static
5528 void megasas_setup_irq_poll(struct megasas_instance *instance)
5529 {
5530 	struct megasas_irq_context *irq_ctx;
5531 	u32 count, i;
5532 
5533 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5534 
5535 	/* Initialize IRQ poll */
5536 	for (i = 0; i < count; i++) {
5537 		irq_ctx = &instance->irq_context[i];
5538 		irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
5539 		irq_ctx->irq_poll_scheduled = false;
5540 		irq_poll_init(&irq_ctx->irqpoll,
5541 			      instance->threshold_reply_count,
5542 			      megasas_irqpoll);
5543 	}
5544 }
5545 
5546 /*
5547  * megasas_setup_irqs_ioapic -		register legacy interrupts.
5548  * @instance:				Adapter soft state
5549  *
5550  * Do not enable interrupt, only setup ISRs.
5551  *
5552  * Return 0 on success.
5553  */
5554 static int
5555 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5556 {
5557 	struct pci_dev *pdev;
5558 
5559 	pdev = instance->pdev;
5560 	instance->irq_context[0].instance = instance;
5561 	instance->irq_context[0].MSIxIndex = 0;
5562 	snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
5563 		"megasas", instance->host->host_no);
5564 	if (request_irq(pci_irq_vector(pdev, 0),
5565 			instance->instancet->service_isr, IRQF_SHARED,
5566 			instance->irq_context->name, &instance->irq_context[0])) {
5567 		dev_err(&instance->pdev->dev,
5568 				"Failed to register IRQ from %s %d\n",
5569 				__func__, __LINE__);
5570 		return -1;
5571 	}
5572 	instance->perf_mode = MR_LATENCY_PERF_MODE;
5573 	instance->low_latency_index_start = 0;
5574 	return 0;
5575 }
5576 
5577 /**
5578  * megasas_setup_irqs_msix -		register MSI-x interrupts.
5579  * @instance:				Adapter soft state
5580  * @is_probe:				Driver probe check
5581  *
5582  * Do not enable interrupt, only setup ISRs.
5583  *
5584  * Return 0 on success.
5585  */
5586 static int
5587 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5588 {
5589 	int i, j;
5590 	struct pci_dev *pdev;
5591 
5592 	pdev = instance->pdev;
5593 
5594 	/* Try MSI-x */
5595 	for (i = 0; i < instance->msix_vectors; i++) {
5596 		instance->irq_context[i].instance = instance;
5597 		instance->irq_context[i].MSIxIndex = i;
5598 		snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
5599 			"megasas", instance->host->host_no, i);
5600 		if (request_irq(pci_irq_vector(pdev, i),
5601 			instance->instancet->service_isr, 0, instance->irq_context[i].name,
5602 			&instance->irq_context[i])) {
5603 			dev_err(&instance->pdev->dev,
5604 				"Failed to register IRQ for vector %d.\n", i);
5605 			for (j = 0; j < i; j++)
5606 				free_irq(pci_irq_vector(pdev, j),
5607 					 &instance->irq_context[j]);
5608 			/* Retry irq register for IO_APIC*/
5609 			instance->msix_vectors = 0;
5610 			instance->msix_load_balance = false;
5611 			if (is_probe) {
5612 				pci_free_irq_vectors(instance->pdev);
5613 				return megasas_setup_irqs_ioapic(instance);
5614 			} else {
5615 				return -1;
5616 			}
5617 		}
5618 	}
5619 
5620 	return 0;
5621 }
5622 
5623 /*
5624  * megasas_destroy_irqs-		unregister interrupts.
5625  * @instance:				Adapter soft state
5626  * return:				void
5627  */
5628 static void
5629 megasas_destroy_irqs(struct megasas_instance *instance) {
5630 
5631 	int i;
5632 	int count;
5633 	struct megasas_irq_context *irq_ctx;
5634 
5635 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5636 	if (instance->adapter_type != MFI_SERIES) {
5637 		for (i = 0; i < count; i++) {
5638 			irq_ctx = &instance->irq_context[i];
5639 			irq_poll_disable(&irq_ctx->irqpoll);
5640 		}
5641 	}
5642 
5643 	if (instance->msix_vectors)
5644 		for (i = 0; i < instance->msix_vectors; i++) {
5645 			free_irq(pci_irq_vector(instance->pdev, i),
5646 				 &instance->irq_context[i]);
5647 		}
5648 	else
5649 		free_irq(pci_irq_vector(instance->pdev, 0),
5650 			 &instance->irq_context[0]);
5651 }
5652 
5653 /**
5654  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5655  * @instance:				Adapter soft state
5656  * @is_probe:				Driver probe check
5657  *
5658  * Return 0 on success.
5659  */
5660 void
5661 megasas_setup_jbod_map(struct megasas_instance *instance)
5662 {
5663 	int i;
5664 	struct fusion_context *fusion = instance->ctrl_context;
5665 	u32 pd_seq_map_sz;
5666 
5667 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5668 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5669 
5670 	instance->use_seqnum_jbod_fp =
5671 		instance->support_seqnum_jbod_fp;
5672 	if (reset_devices || !fusion ||
5673 		!instance->support_seqnum_jbod_fp) {
5674 		dev_info(&instance->pdev->dev,
5675 			"JBOD sequence map is disabled %s %d\n",
5676 			__func__, __LINE__);
5677 		instance->use_seqnum_jbod_fp = false;
5678 		return;
5679 	}
5680 
5681 	if (fusion->pd_seq_sync[0])
5682 		goto skip_alloc;
5683 
5684 	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5685 		fusion->pd_seq_sync[i] = dma_alloc_coherent
5686 			(&instance->pdev->dev, pd_seq_map_sz,
5687 			&fusion->pd_seq_phys[i], GFP_KERNEL);
5688 		if (!fusion->pd_seq_sync[i]) {
5689 			dev_err(&instance->pdev->dev,
5690 				"Failed to allocate memory from %s %d\n",
5691 				__func__, __LINE__);
5692 			if (i == 1) {
5693 				dma_free_coherent(&instance->pdev->dev,
5694 					pd_seq_map_sz, fusion->pd_seq_sync[0],
5695 					fusion->pd_seq_phys[0]);
5696 				fusion->pd_seq_sync[0] = NULL;
5697 			}
5698 			instance->use_seqnum_jbod_fp = false;
5699 			return;
5700 		}
5701 	}
5702 
5703 skip_alloc:
5704 	if (!megasas_sync_pd_seq_num(instance, false) &&
5705 		!megasas_sync_pd_seq_num(instance, true))
5706 		instance->use_seqnum_jbod_fp = true;
5707 	else
5708 		instance->use_seqnum_jbod_fp = false;
5709 }
5710 
5711 static void megasas_setup_reply_map(struct megasas_instance *instance)
5712 {
5713 	const struct cpumask *mask;
5714 	unsigned int queue, cpu, low_latency_index_start;
5715 
5716 	low_latency_index_start = instance->low_latency_index_start;
5717 
5718 	for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
5719 		mask = pci_irq_get_affinity(instance->pdev, queue);
5720 		if (!mask)
5721 			goto fallback;
5722 
5723 		for_each_cpu(cpu, mask)
5724 			instance->reply_map[cpu] = queue;
5725 	}
5726 	return;
5727 
5728 fallback:
5729 	queue = low_latency_index_start;
5730 	for_each_possible_cpu(cpu) {
5731 		instance->reply_map[cpu] = queue;
5732 		if (queue == (instance->msix_vectors - 1))
5733 			queue = low_latency_index_start;
5734 		else
5735 			queue++;
5736 	}
5737 }
5738 
5739 /**
5740  * megasas_get_device_list -	Get the PD and LD device list from FW.
5741  * @instance:			Adapter soft state
5742  * @return:			Success or failure
5743  *
5744  * Issue DCMDs to Firmware to get the PD and LD list.
5745  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5746  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5747  */
5748 static
5749 int megasas_get_device_list(struct megasas_instance *instance)
5750 {
5751 	memset(instance->pd_list, 0,
5752 	       (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5753 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5754 
5755 	if (instance->enable_fw_dev_list) {
5756 		if (megasas_host_device_list_query(instance, true))
5757 			return FAILED;
5758 	} else {
5759 		if (megasas_get_pd_list(instance) < 0) {
5760 			dev_err(&instance->pdev->dev, "failed to get PD list\n");
5761 			return FAILED;
5762 		}
5763 
5764 		if (megasas_ld_list_query(instance,
5765 					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5766 			dev_err(&instance->pdev->dev, "failed to get LD list\n");
5767 			return FAILED;
5768 		}
5769 	}
5770 
5771 	return SUCCESS;
5772 }
5773 
5774 /**
5775  * megasas_set_high_iops_queue_affinity_hint -	Set affinity hint for high IOPS queues
5776  * @instance:					Adapter soft state
5777  * return:					void
5778  */
5779 static inline void
5780 megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
5781 {
5782 	int i;
5783 	int local_numa_node;
5784 
5785 	if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
5786 		local_numa_node = dev_to_node(&instance->pdev->dev);
5787 
5788 		for (i = 0; i < instance->low_latency_index_start; i++)
5789 			irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
5790 				cpumask_of_node(local_numa_node));
5791 	}
5792 }
5793 
5794 static int
5795 __megasas_alloc_irq_vectors(struct megasas_instance *instance)
5796 {
5797 	int i, irq_flags;
5798 	struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
5799 	struct irq_affinity *descp = &desc;
5800 
5801 	irq_flags = PCI_IRQ_MSIX;
5802 
5803 	if (instance->smp_affinity_enable)
5804 		irq_flags |= PCI_IRQ_AFFINITY;
5805 	else
5806 		descp = NULL;
5807 
5808 	i = pci_alloc_irq_vectors_affinity(instance->pdev,
5809 		instance->low_latency_index_start,
5810 		instance->msix_vectors, irq_flags, descp);
5811 
5812 	return i;
5813 }
5814 
5815 /**
5816  * megasas_alloc_irq_vectors -	Allocate IRQ vectors/enable MSI-x vectors
5817  * @instance:			Adapter soft state
5818  * return:			void
5819  */
5820 static void
5821 megasas_alloc_irq_vectors(struct megasas_instance *instance)
5822 {
5823 	int i;
5824 	unsigned int num_msix_req;
5825 
5826 	i = __megasas_alloc_irq_vectors(instance);
5827 
5828 	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
5829 	    (i != instance->msix_vectors)) {
5830 		if (instance->msix_vectors)
5831 			pci_free_irq_vectors(instance->pdev);
5832 		/* Disable Balanced IOPS mode and try realloc vectors */
5833 		instance->perf_mode = MR_LATENCY_PERF_MODE;
5834 		instance->low_latency_index_start = 1;
5835 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5836 
5837 		instance->msix_vectors = min(num_msix_req,
5838 				instance->msix_vectors);
5839 
5840 		i = __megasas_alloc_irq_vectors(instance);
5841 
5842 	}
5843 
5844 	dev_info(&instance->pdev->dev,
5845 		"requested/available msix %d/%d\n", instance->msix_vectors, i);
5846 
5847 	if (i > 0)
5848 		instance->msix_vectors = i;
5849 	else
5850 		instance->msix_vectors = 0;
5851 
5852 	if (instance->smp_affinity_enable)
5853 		megasas_set_high_iops_queue_affinity_hint(instance);
5854 }
5855 
5856 /**
5857  * megasas_init_fw -	Initializes the FW
5858  * @instance:		Adapter soft state
5859  *
5860  * This is the main function for initializing firmware
5861  */
5862 
5863 static int megasas_init_fw(struct megasas_instance *instance)
5864 {
5865 	u32 max_sectors_1;
5866 	u32 max_sectors_2, tmp_sectors, msix_enable;
5867 	u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5868 	resource_size_t base_addr;
5869 	void *base_addr_phys;
5870 	struct megasas_ctrl_info *ctrl_info = NULL;
5871 	unsigned long bar_list;
5872 	int i, j, loop;
5873 	struct IOV_111 *iovPtr;
5874 	struct fusion_context *fusion;
5875 	bool intr_coalescing;
5876 	unsigned int num_msix_req;
5877 	u16 lnksta, speed;
5878 
5879 	fusion = instance->ctrl_context;
5880 
5881 	/* Find first memory bar */
5882 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5883 	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5884 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5885 					 "megasas: LSI")) {
5886 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5887 		return -EBUSY;
5888 	}
5889 
5890 	base_addr = pci_resource_start(instance->pdev, instance->bar);
5891 	instance->reg_set = ioremap(base_addr, 8192);
5892 
5893 	if (!instance->reg_set) {
5894 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5895 		goto fail_ioremap;
5896 	}
5897 
5898 	base_addr_phys = &base_addr;
5899 	dev_printk(KERN_DEBUG, &instance->pdev->dev,
5900 		   "BAR:0x%lx  BAR's base_addr(phys):%pa  mapped virt_addr:0x%p\n",
5901 		   instance->bar, base_addr_phys, instance->reg_set);
5902 
5903 	if (instance->adapter_type != MFI_SERIES)
5904 		instance->instancet = &megasas_instance_template_fusion;
5905 	else {
5906 		switch (instance->pdev->device) {
5907 		case PCI_DEVICE_ID_LSI_SAS1078R:
5908 		case PCI_DEVICE_ID_LSI_SAS1078DE:
5909 			instance->instancet = &megasas_instance_template_ppc;
5910 			break;
5911 		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5912 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5913 			instance->instancet = &megasas_instance_template_gen2;
5914 			break;
5915 		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5916 		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5917 			instance->instancet = &megasas_instance_template_skinny;
5918 			break;
5919 		case PCI_DEVICE_ID_LSI_SAS1064R:
5920 		case PCI_DEVICE_ID_DELL_PERC5:
5921 		default:
5922 			instance->instancet = &megasas_instance_template_xscale;
5923 			instance->pd_list_not_supported = 1;
5924 			break;
5925 		}
5926 	}
5927 
5928 	if (megasas_transition_to_ready(instance, 0)) {
5929 		dev_info(&instance->pdev->dev,
5930 			 "Failed to transition controller to ready from %s!\n",
5931 			 __func__);
5932 		if (instance->adapter_type != MFI_SERIES) {
5933 			status_reg = instance->instancet->read_fw_status_reg(
5934 					instance);
5935 			if (status_reg & MFI_RESET_ADAPTER) {
5936 				if (megasas_adp_reset_wait_for_ready
5937 					(instance, true, 0) == FAILED)
5938 					goto fail_ready_state;
5939 			} else {
5940 				goto fail_ready_state;
5941 			}
5942 		} else {
5943 			atomic_set(&instance->fw_reset_no_pci_access, 1);
5944 			instance->instancet->adp_reset
5945 				(instance, instance->reg_set);
5946 			atomic_set(&instance->fw_reset_no_pci_access, 0);
5947 
5948 			/*waiting for about 30 second before retry*/
5949 			ssleep(30);
5950 
5951 			if (megasas_transition_to_ready(instance, 0))
5952 				goto fail_ready_state;
5953 		}
5954 
5955 		dev_info(&instance->pdev->dev,
5956 			 "FW restarted successfully from %s!\n",
5957 			 __func__);
5958 	}
5959 
5960 	megasas_init_ctrl_params(instance);
5961 
5962 	if (megasas_set_dma_mask(instance))
5963 		goto fail_ready_state;
5964 
5965 	if (megasas_alloc_ctrl_mem(instance))
5966 		goto fail_alloc_dma_buf;
5967 
5968 	if (megasas_alloc_ctrl_dma_buffers(instance))
5969 		goto fail_alloc_dma_buf;
5970 
5971 	fusion = instance->ctrl_context;
5972 
5973 	if (instance->adapter_type >= VENTURA_SERIES) {
5974 		scratch_pad_2 =
5975 			megasas_readl(instance,
5976 				      &instance->reg_set->outbound_scratch_pad_2);
5977 		instance->max_raid_mapsize = ((scratch_pad_2 >>
5978 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5979 			MR_MAX_RAID_MAP_SIZE_MASK);
5980 	}
5981 
5982 	instance->enable_sdev_max_qd = enable_sdev_max_qd;
5983 
5984 	switch (instance->adapter_type) {
5985 	case VENTURA_SERIES:
5986 		fusion->pcie_bw_limitation = true;
5987 		break;
5988 	case AERO_SERIES:
5989 		fusion->r56_div_offload = true;
5990 		break;
5991 	default:
5992 		break;
5993 	}
5994 
5995 	/* Check if MSI-X is supported while in ready state */
5996 	msix_enable = (instance->instancet->read_fw_status_reg(instance) &
5997 		       0x4000000) >> 0x1a;
5998 	if (msix_enable && !msix_disable) {
5999 
6000 		scratch_pad_1 = megasas_readl
6001 			(instance, &instance->reg_set->outbound_scratch_pad_1);
6002 		/* Check max MSI-X vectors */
6003 		if (fusion) {
6004 			if (instance->adapter_type == THUNDERBOLT_SERIES) {
6005 				/* Thunderbolt Series*/
6006 				instance->msix_vectors = (scratch_pad_1
6007 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
6008 			} else {
6009 				instance->msix_vectors = ((scratch_pad_1
6010 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
6011 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
6012 
6013 				/*
6014 				 * For Invader series, > 8 MSI-x vectors
6015 				 * supported by FW/HW implies combined
6016 				 * reply queue mode is enabled.
6017 				 * For Ventura series, > 16 MSI-x vectors
6018 				 * supported by FW/HW implies combined
6019 				 * reply queue mode is enabled.
6020 				 */
6021 				switch (instance->adapter_type) {
6022 				case INVADER_SERIES:
6023 					if (instance->msix_vectors > 8)
6024 						instance->msix_combined = true;
6025 					break;
6026 				case AERO_SERIES:
6027 				case VENTURA_SERIES:
6028 					if (instance->msix_vectors > 16)
6029 						instance->msix_combined = true;
6030 					break;
6031 				}
6032 
6033 				if (rdpq_enable)
6034 					instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
6035 								1 : 0;
6036 
6037 				if (instance->adapter_type >= INVADER_SERIES &&
6038 				    !instance->msix_combined) {
6039 					instance->msix_load_balance = true;
6040 					instance->smp_affinity_enable = false;
6041 				}
6042 
6043 				/* Save 1-15 reply post index address to local memory
6044 				 * Index 0 is already saved from reg offset
6045 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
6046 				 */
6047 				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
6048 					instance->reply_post_host_index_addr[loop] =
6049 						(u32 __iomem *)
6050 						((u8 __iomem *)instance->reg_set +
6051 						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
6052 						+ (loop * 0x10));
6053 				}
6054 			}
6055 
6056 			dev_info(&instance->pdev->dev,
6057 				 "firmware supports msix\t: (%d)",
6058 				 instance->msix_vectors);
6059 			if (msix_vectors)
6060 				instance->msix_vectors = min(msix_vectors,
6061 					instance->msix_vectors);
6062 		} else /* MFI adapters */
6063 			instance->msix_vectors = 1;
6064 
6065 
6066 		/*
6067 		 * For Aero (if some conditions are met), driver will configure a
6068 		 * few additional reply queues with interrupt coalescing enabled.
6069 		 * These queues with interrupt coalescing enabled are called
6070 		 * High IOPS queues and rest of reply queues (based on number of
6071 		 * logical CPUs) are termed as Low latency queues.
6072 		 *
6073 		 * Total Number of reply queues = High IOPS queues + low latency queues
6074 		 *
6075 		 * For rest of fusion adapters, 1 additional reply queue will be
6076 		 * reserved for management commands, rest of reply queues
6077 		 * (based on number of logical CPUs) will be used for IOs and
6078 		 * referenced as IO queues.
6079 		 * Total Number of reply queues = 1 + IO queues
6080 		 *
6081 		 * MFI adapters supports single MSI-x so single reply queue
6082 		 * will be used for IO and management commands.
6083 		 */
6084 
6085 		intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
6086 								true : false;
6087 		if (intr_coalescing &&
6088 			(num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
6089 			(instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
6090 			instance->perf_mode = MR_BALANCED_PERF_MODE;
6091 		else
6092 			instance->perf_mode = MR_LATENCY_PERF_MODE;
6093 
6094 
6095 		if (instance->adapter_type == AERO_SERIES) {
6096 			pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
6097 			speed = lnksta & PCI_EXP_LNKSTA_CLS;
6098 
6099 			/*
6100 			 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
6101 			 * in latency perf mode and enable R1 PCI bandwidth algorithm
6102 			 */
6103 			if (speed < 0x4) {
6104 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6105 				fusion->pcie_bw_limitation = true;
6106 			}
6107 
6108 			/*
6109 			 * Performance mode settings provided through module parameter-perf_mode will
6110 			 * take affect only for:
6111 			 * 1. Aero family of adapters.
6112 			 * 2. When user sets module parameter- perf_mode in range of 0-2.
6113 			 */
6114 			if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
6115 				(perf_mode <= MR_LATENCY_PERF_MODE))
6116 				instance->perf_mode = perf_mode;
6117 			/*
6118 			 * If intr coalescing is not supported by controller FW, then IOPS
6119 			 * and Balanced modes are not feasible.
6120 			 */
6121 			if (!intr_coalescing)
6122 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6123 
6124 		}
6125 
6126 		if (instance->perf_mode == MR_BALANCED_PERF_MODE)
6127 			instance->low_latency_index_start =
6128 				MR_HIGH_IOPS_QUEUE_COUNT;
6129 		else
6130 			instance->low_latency_index_start = 1;
6131 
6132 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
6133 
6134 		instance->msix_vectors = min(num_msix_req,
6135 				instance->msix_vectors);
6136 
6137 		megasas_alloc_irq_vectors(instance);
6138 		if (!instance->msix_vectors)
6139 			instance->msix_load_balance = false;
6140 	}
6141 	/*
6142 	 * MSI-X host index 0 is common for all adapter.
6143 	 * It is used for all MPT based Adapters.
6144 	 */
6145 	if (instance->msix_combined) {
6146 		instance->reply_post_host_index_addr[0] =
6147 				(u32 *)((u8 *)instance->reg_set +
6148 				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
6149 	} else {
6150 		instance->reply_post_host_index_addr[0] =
6151 			(u32 *)((u8 *)instance->reg_set +
6152 			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
6153 	}
6154 
6155 	if (!instance->msix_vectors) {
6156 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6157 		if (i < 0)
6158 			goto fail_init_adapter;
6159 	}
6160 
6161 	megasas_setup_reply_map(instance);
6162 
6163 	dev_info(&instance->pdev->dev,
6164 		"current msix/online cpus\t: (%d/%d)\n",
6165 		instance->msix_vectors, (unsigned int)num_online_cpus());
6166 	dev_info(&instance->pdev->dev,
6167 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
6168 
6169 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6170 		(unsigned long)instance);
6171 
6172 	/*
6173 	 * Below are default value for legacy Firmware.
6174 	 * non-fusion based controllers
6175 	 */
6176 	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
6177 	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
6178 	/* Get operational params, sge flags, send init cmd to controller */
6179 	if (instance->instancet->init_adapter(instance))
6180 		goto fail_init_adapter;
6181 
6182 	if (instance->adapter_type >= VENTURA_SERIES) {
6183 		scratch_pad_3 =
6184 			megasas_readl(instance,
6185 				      &instance->reg_set->outbound_scratch_pad_3);
6186 		if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
6187 			MR_DEFAULT_NVME_PAGE_SHIFT)
6188 			instance->nvme_page_size =
6189 				(1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
6190 
6191 		dev_info(&instance->pdev->dev,
6192 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
6193 	}
6194 
6195 	if (instance->msix_vectors ?
6196 		megasas_setup_irqs_msix(instance, 1) :
6197 		megasas_setup_irqs_ioapic(instance))
6198 		goto fail_init_adapter;
6199 
6200 	if (instance->adapter_type != MFI_SERIES)
6201 		megasas_setup_irq_poll(instance);
6202 
6203 	instance->instancet->enable_intr(instance);
6204 
6205 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
6206 
6207 	megasas_setup_jbod_map(instance);
6208 
6209 	if (megasas_get_device_list(instance) != SUCCESS) {
6210 		dev_err(&instance->pdev->dev,
6211 			"%s: megasas_get_device_list failed\n",
6212 			__func__);
6213 		goto fail_get_ld_pd_list;
6214 	}
6215 
6216 	/* stream detection initialization */
6217 	if (instance->adapter_type >= VENTURA_SERIES) {
6218 		fusion->stream_detect_by_ld =
6219 			kcalloc(MAX_LOGICAL_DRIVES_EXT,
6220 				sizeof(struct LD_STREAM_DETECT *),
6221 				GFP_KERNEL);
6222 		if (!fusion->stream_detect_by_ld) {
6223 			dev_err(&instance->pdev->dev,
6224 				"unable to allocate stream detection for pool of LDs\n");
6225 			goto fail_get_ld_pd_list;
6226 		}
6227 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
6228 			fusion->stream_detect_by_ld[i] =
6229 				kzalloc(sizeof(struct LD_STREAM_DETECT),
6230 				GFP_KERNEL);
6231 			if (!fusion->stream_detect_by_ld[i]) {
6232 				dev_err(&instance->pdev->dev,
6233 					"unable to allocate stream detect by LD\n ");
6234 				for (j = 0; j < i; ++j)
6235 					kfree(fusion->stream_detect_by_ld[j]);
6236 				kfree(fusion->stream_detect_by_ld);
6237 				fusion->stream_detect_by_ld = NULL;
6238 				goto fail_get_ld_pd_list;
6239 			}
6240 			fusion->stream_detect_by_ld[i]->mru_bit_map
6241 				= MR_STREAM_BITMAP;
6242 		}
6243 	}
6244 
6245 	/*
6246 	 * Compute the max allowed sectors per IO: The controller info has two
6247 	 * limits on max sectors. Driver should use the minimum of these two.
6248 	 *
6249 	 * 1 << stripe_sz_ops.min = max sectors per strip
6250 	 *
6251 	 * Note that older firmwares ( < FW ver 30) didn't report information
6252 	 * to calculate max_sectors_1. So the number ended up as zero always.
6253 	 */
6254 	tmp_sectors = 0;
6255 	ctrl_info = instance->ctrl_info_buf;
6256 
6257 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
6258 		le16_to_cpu(ctrl_info->max_strips_per_io);
6259 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
6260 
6261 	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
6262 
6263 	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
6264 	instance->passive = ctrl_info->cluster.passive;
6265 	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
6266 	instance->UnevenSpanSupport =
6267 		ctrl_info->adapterOperations2.supportUnevenSpans;
6268 	if (instance->UnevenSpanSupport) {
6269 		struct fusion_context *fusion = instance->ctrl_context;
6270 		if (MR_ValidateMapInfo(instance, instance->map_id))
6271 			fusion->fast_path_io = 1;
6272 		else
6273 			fusion->fast_path_io = 0;
6274 
6275 	}
6276 	if (ctrl_info->host_interface.SRIOV) {
6277 		instance->requestorId = ctrl_info->iov.requestorId;
6278 		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
6279 			if (!ctrl_info->adapterOperations2.activePassive)
6280 			    instance->PlasmaFW111 = 1;
6281 
6282 			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
6283 			    instance->PlasmaFW111 ? "1.11" : "new");
6284 
6285 			if (instance->PlasmaFW111) {
6286 			    iovPtr = (struct IOV_111 *)
6287 				((unsigned char *)ctrl_info + IOV_111_OFFSET);
6288 			    instance->requestorId = iovPtr->requestorId;
6289 			}
6290 		}
6291 		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
6292 			instance->requestorId);
6293 	}
6294 
6295 	instance->crash_dump_fw_support =
6296 		ctrl_info->adapterOperations3.supportCrashDump;
6297 	instance->crash_dump_drv_support =
6298 		(instance->crash_dump_fw_support &&
6299 		instance->crash_dump_buf);
6300 	if (instance->crash_dump_drv_support)
6301 		megasas_set_crash_dump_params(instance,
6302 			MR_CRASH_BUF_TURN_OFF);
6303 
6304 	else {
6305 		if (instance->crash_dump_buf)
6306 			dma_free_coherent(&instance->pdev->dev,
6307 				CRASH_DMA_BUF_SIZE,
6308 				instance->crash_dump_buf,
6309 				instance->crash_dump_h);
6310 		instance->crash_dump_buf = NULL;
6311 	}
6312 
6313 	if (instance->snapdump_wait_time) {
6314 		megasas_get_snapdump_properties(instance);
6315 		dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
6316 			 instance->snapdump_wait_time);
6317 	}
6318 
6319 	dev_info(&instance->pdev->dev,
6320 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
6321 		le16_to_cpu(ctrl_info->pci.vendor_id),
6322 		le16_to_cpu(ctrl_info->pci.device_id),
6323 		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
6324 		le16_to_cpu(ctrl_info->pci.sub_device_id));
6325 	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
6326 		instance->UnevenSpanSupport ? "yes" : "no");
6327 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
6328 		instance->crash_dump_drv_support ? "yes" : "no");
6329 	dev_info(&instance->pdev->dev, "JBOD sequence map	: %s\n",
6330 		instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
6331 
6332 	instance->max_sectors_per_req = instance->max_num_sge *
6333 						SGE_BUFFER_SIZE / 512;
6334 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
6335 		instance->max_sectors_per_req = tmp_sectors;
6336 
6337 	/* Check for valid throttlequeuedepth module parameter */
6338 	if (throttlequeuedepth &&
6339 			throttlequeuedepth <= instance->max_scsi_cmds)
6340 		instance->throttlequeuedepth = throttlequeuedepth;
6341 	else
6342 		instance->throttlequeuedepth =
6343 				MEGASAS_THROTTLE_QUEUE_DEPTH;
6344 
6345 	if ((resetwaittime < 1) ||
6346 	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
6347 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
6348 
6349 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
6350 		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
6351 
6352 	/* Launch SR-IOV heartbeat timer */
6353 	if (instance->requestorId) {
6354 		if (!megasas_sriov_start_heartbeat(instance, 1)) {
6355 			megasas_start_timer(instance);
6356 		} else {
6357 			instance->skip_heartbeat_timer_del = 1;
6358 			goto fail_get_ld_pd_list;
6359 		}
6360 	}
6361 
6362 	/*
6363 	 * Create and start watchdog thread which will monitor
6364 	 * controller state every 1 sec and trigger OCR when
6365 	 * it enters fault state
6366 	 */
6367 	if (instance->adapter_type != MFI_SERIES)
6368 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
6369 			goto fail_start_watchdog;
6370 
6371 	return 0;
6372 
6373 fail_start_watchdog:
6374 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6375 		del_timer_sync(&instance->sriov_heartbeat_timer);
6376 fail_get_ld_pd_list:
6377 	instance->instancet->disable_intr(instance);
6378 	megasas_destroy_irqs(instance);
6379 fail_init_adapter:
6380 	if (instance->msix_vectors)
6381 		pci_free_irq_vectors(instance->pdev);
6382 	instance->msix_vectors = 0;
6383 fail_alloc_dma_buf:
6384 	megasas_free_ctrl_dma_buffers(instance);
6385 	megasas_free_ctrl_mem(instance);
6386 fail_ready_state:
6387 	iounmap(instance->reg_set);
6388 
6389 fail_ioremap:
6390 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6391 
6392 	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6393 		__func__, __LINE__);
6394 	return -EINVAL;
6395 }
6396 
6397 /**
6398  * megasas_release_mfi -	Reverses the FW initialization
6399  * @instance:			Adapter soft state
6400  */
6401 static void megasas_release_mfi(struct megasas_instance *instance)
6402 {
6403 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
6404 
6405 	if (instance->reply_queue)
6406 		dma_free_coherent(&instance->pdev->dev, reply_q_sz,
6407 			    instance->reply_queue, instance->reply_queue_h);
6408 
6409 	megasas_free_cmds(instance);
6410 
6411 	iounmap(instance->reg_set);
6412 
6413 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6414 }
6415 
6416 /**
6417  * megasas_get_seq_num -	Gets latest event sequence numbers
6418  * @instance:			Adapter soft state
6419  * @eli:			FW event log sequence numbers information
6420  *
6421  * FW maintains a log of all events in a non-volatile area. Upper layers would
6422  * usually find out the latest sequence number of the events, the seq number at
6423  * the boot etc. They would "read" all the events below the latest seq number
6424  * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
6425  * number), they would subsribe to AEN (asynchronous event notification) and
6426  * wait for the events to happen.
6427  */
6428 static int
6429 megasas_get_seq_num(struct megasas_instance *instance,
6430 		    struct megasas_evt_log_info *eli)
6431 {
6432 	struct megasas_cmd *cmd;
6433 	struct megasas_dcmd_frame *dcmd;
6434 	struct megasas_evt_log_info *el_info;
6435 	dma_addr_t el_info_h = 0;
6436 	int ret;
6437 
6438 	cmd = megasas_get_cmd(instance);
6439 
6440 	if (!cmd) {
6441 		return -ENOMEM;
6442 	}
6443 
6444 	dcmd = &cmd->frame->dcmd;
6445 	el_info = dma_alloc_coherent(&instance->pdev->dev,
6446 				     sizeof(struct megasas_evt_log_info),
6447 				     &el_info_h, GFP_KERNEL);
6448 	if (!el_info) {
6449 		megasas_return_cmd(instance, cmd);
6450 		return -ENOMEM;
6451 	}
6452 
6453 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6454 
6455 	dcmd->cmd = MFI_CMD_DCMD;
6456 	dcmd->cmd_status = 0x0;
6457 	dcmd->sge_count = 1;
6458 	dcmd->flags = MFI_FRAME_DIR_READ;
6459 	dcmd->timeout = 0;
6460 	dcmd->pad_0 = 0;
6461 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
6462 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
6463 
6464 	megasas_set_dma_settings(instance, dcmd, el_info_h,
6465 				 sizeof(struct megasas_evt_log_info));
6466 
6467 	ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
6468 	if (ret != DCMD_SUCCESS) {
6469 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6470 			__func__, __LINE__);
6471 		goto dcmd_failed;
6472 	}
6473 
6474 	/*
6475 	 * Copy the data back into callers buffer
6476 	 */
6477 	eli->newest_seq_num = el_info->newest_seq_num;
6478 	eli->oldest_seq_num = el_info->oldest_seq_num;
6479 	eli->clear_seq_num = el_info->clear_seq_num;
6480 	eli->shutdown_seq_num = el_info->shutdown_seq_num;
6481 	eli->boot_seq_num = el_info->boot_seq_num;
6482 
6483 dcmd_failed:
6484 	dma_free_coherent(&instance->pdev->dev,
6485 			sizeof(struct megasas_evt_log_info),
6486 			el_info, el_info_h);
6487 
6488 	megasas_return_cmd(instance, cmd);
6489 
6490 	return ret;
6491 }
6492 
6493 /**
6494  * megasas_register_aen -	Registers for asynchronous event notification
6495  * @instance:			Adapter soft state
6496  * @seq_num:			The starting sequence number
6497  * @class_locale:		Class of the event
6498  *
6499  * This function subscribes for AEN for events beyond the @seq_num. It requests
6500  * to be notified if and only if the event is of type @class_locale
6501  */
6502 static int
6503 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6504 		     u32 class_locale_word)
6505 {
6506 	int ret_val;
6507 	struct megasas_cmd *cmd;
6508 	struct megasas_dcmd_frame *dcmd;
6509 	union megasas_evt_class_locale curr_aen;
6510 	union megasas_evt_class_locale prev_aen;
6511 
6512 	/*
6513 	 * If there an AEN pending already (aen_cmd), check if the
6514 	 * class_locale of that pending AEN is inclusive of the new
6515 	 * AEN request we currently have. If it is, then we don't have
6516 	 * to do anything. In other words, whichever events the current
6517 	 * AEN request is subscribing to, have already been subscribed
6518 	 * to.
6519 	 *
6520 	 * If the old_cmd is _not_ inclusive, then we have to abort
6521 	 * that command, form a class_locale that is superset of both
6522 	 * old and current and re-issue to the FW
6523 	 */
6524 
6525 	curr_aen.word = class_locale_word;
6526 
6527 	if (instance->aen_cmd) {
6528 
6529 		prev_aen.word =
6530 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6531 
6532 		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6533 		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6534 			dev_info(&instance->pdev->dev,
6535 				 "%s %d out of range class %d send by application\n",
6536 				 __func__, __LINE__, curr_aen.members.class);
6537 			return 0;
6538 		}
6539 
6540 		/*
6541 		 * A class whose enum value is smaller is inclusive of all
6542 		 * higher values. If a PROGRESS (= -1) was previously
6543 		 * registered, then a new registration requests for higher
6544 		 * classes need not be sent to FW. They are automatically
6545 		 * included.
6546 		 *
6547 		 * Locale numbers don't have such hierarchy. They are bitmap
6548 		 * values
6549 		 */
6550 		if ((prev_aen.members.class <= curr_aen.members.class) &&
6551 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
6552 		      curr_aen.members.locale)) {
6553 			/*
6554 			 * Previously issued event registration includes
6555 			 * current request. Nothing to do.
6556 			 */
6557 			return 0;
6558 		} else {
6559 			curr_aen.members.locale |= prev_aen.members.locale;
6560 
6561 			if (prev_aen.members.class < curr_aen.members.class)
6562 				curr_aen.members.class = prev_aen.members.class;
6563 
6564 			instance->aen_cmd->abort_aen = 1;
6565 			ret_val = megasas_issue_blocked_abort_cmd(instance,
6566 								  instance->
6567 								  aen_cmd, 30);
6568 
6569 			if (ret_val) {
6570 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6571 				       "previous AEN command\n");
6572 				return ret_val;
6573 			}
6574 		}
6575 	}
6576 
6577 	cmd = megasas_get_cmd(instance);
6578 
6579 	if (!cmd)
6580 		return -ENOMEM;
6581 
6582 	dcmd = &cmd->frame->dcmd;
6583 
6584 	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6585 
6586 	/*
6587 	 * Prepare DCMD for aen registration
6588 	 */
6589 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6590 
6591 	dcmd->cmd = MFI_CMD_DCMD;
6592 	dcmd->cmd_status = 0x0;
6593 	dcmd->sge_count = 1;
6594 	dcmd->flags = MFI_FRAME_DIR_READ;
6595 	dcmd->timeout = 0;
6596 	dcmd->pad_0 = 0;
6597 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6598 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6599 	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6600 	instance->last_seq_num = seq_num;
6601 	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6602 
6603 	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6604 				 sizeof(struct megasas_evt_detail));
6605 
6606 	if (instance->aen_cmd != NULL) {
6607 		megasas_return_cmd(instance, cmd);
6608 		return 0;
6609 	}
6610 
6611 	/*
6612 	 * Store reference to the cmd used to register for AEN. When an
6613 	 * application wants us to register for AEN, we have to abort this
6614 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
6615 	 */
6616 	instance->aen_cmd = cmd;
6617 
6618 	/*
6619 	 * Issue the aen registration frame
6620 	 */
6621 	instance->instancet->issue_dcmd(instance, cmd);
6622 
6623 	return 0;
6624 }
6625 
6626 /* megasas_get_target_prop - Send DCMD with below details to firmware.
6627  *
6628  * This DCMD will fetch few properties of LD/system PD defined
6629  * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6630  *
6631  * DCMD send by drivers whenever new target is added to the OS.
6632  *
6633  * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
6634  * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
6635  *                       0 = system PD, 1 = LD.
6636  * dcmd.mbox.s[1]      - TargetID for LD/system PD.
6637  * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
6638  *
6639  * @instance:		Adapter soft state
6640  * @sdev:		OS provided scsi device
6641  *
6642  * Returns 0 on success non-zero on failure.
6643  */
6644 int
6645 megasas_get_target_prop(struct megasas_instance *instance,
6646 			struct scsi_device *sdev)
6647 {
6648 	int ret;
6649 	struct megasas_cmd *cmd;
6650 	struct megasas_dcmd_frame *dcmd;
6651 	u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
6652 			sdev->id;
6653 
6654 	cmd = megasas_get_cmd(instance);
6655 
6656 	if (!cmd) {
6657 		dev_err(&instance->pdev->dev,
6658 			"Failed to get cmd %s\n", __func__);
6659 		return -ENOMEM;
6660 	}
6661 
6662 	dcmd = &cmd->frame->dcmd;
6663 
6664 	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6665 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6666 	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6667 
6668 	dcmd->mbox.s[1] = cpu_to_le16(targetId);
6669 	dcmd->cmd = MFI_CMD_DCMD;
6670 	dcmd->cmd_status = 0xFF;
6671 	dcmd->sge_count = 1;
6672 	dcmd->flags = MFI_FRAME_DIR_READ;
6673 	dcmd->timeout = 0;
6674 	dcmd->pad_0 = 0;
6675 	dcmd->data_xfer_len =
6676 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6677 	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6678 
6679 	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6680 				 sizeof(struct MR_TARGET_PROPERTIES));
6681 
6682 	if ((instance->adapter_type != MFI_SERIES) &&
6683 	    !instance->mask_interrupts)
6684 		ret = megasas_issue_blocked_cmd(instance,
6685 						cmd, MFI_IO_TIMEOUT_SECS);
6686 	else
6687 		ret = megasas_issue_polled(instance, cmd);
6688 
6689 	switch (ret) {
6690 	case DCMD_TIMEOUT:
6691 		switch (dcmd_timeout_ocr_possible(instance)) {
6692 		case INITIATE_OCR:
6693 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6694 			mutex_unlock(&instance->reset_mutex);
6695 			megasas_reset_fusion(instance->host,
6696 					     MFI_IO_TIMEOUT_OCR);
6697 			mutex_lock(&instance->reset_mutex);
6698 			break;
6699 		case KILL_ADAPTER:
6700 			megaraid_sas_kill_hba(instance);
6701 			break;
6702 		case IGNORE_TIMEOUT:
6703 			dev_info(&instance->pdev->dev,
6704 				 "Ignore DCMD timeout: %s %d\n",
6705 				 __func__, __LINE__);
6706 			break;
6707 		}
6708 		break;
6709 
6710 	default:
6711 		megasas_return_cmd(instance, cmd);
6712 	}
6713 	if (ret != DCMD_SUCCESS)
6714 		dev_err(&instance->pdev->dev,
6715 			"return from %s %d return value %d\n",
6716 			__func__, __LINE__, ret);
6717 
6718 	return ret;
6719 }
6720 
6721 /**
6722  * megasas_start_aen -	Subscribes to AEN during driver load time
6723  * @instance:		Adapter soft state
6724  */
6725 static int megasas_start_aen(struct megasas_instance *instance)
6726 {
6727 	struct megasas_evt_log_info eli;
6728 	union megasas_evt_class_locale class_locale;
6729 
6730 	/*
6731 	 * Get the latest sequence number from FW
6732 	 */
6733 	memset(&eli, 0, sizeof(eli));
6734 
6735 	if (megasas_get_seq_num(instance, &eli))
6736 		return -1;
6737 
6738 	/*
6739 	 * Register AEN with FW for latest sequence number plus 1
6740 	 */
6741 	class_locale.members.reserved = 0;
6742 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
6743 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
6744 
6745 	return megasas_register_aen(instance,
6746 			le32_to_cpu(eli.newest_seq_num) + 1,
6747 			class_locale.word);
6748 }
6749 
6750 /**
6751  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
6752  * @instance:		Adapter soft state
6753  */
6754 static int megasas_io_attach(struct megasas_instance *instance)
6755 {
6756 	struct Scsi_Host *host = instance->host;
6757 
6758 	/*
6759 	 * Export parameters required by SCSI mid-layer
6760 	 */
6761 	host->unique_id = instance->unique_id;
6762 	host->can_queue = instance->max_scsi_cmds;
6763 	host->this_id = instance->init_id;
6764 	host->sg_tablesize = instance->max_num_sge;
6765 
6766 	if (instance->fw_support_ieee)
6767 		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6768 
6769 	/*
6770 	 * Check if the module parameter value for max_sectors can be used
6771 	 */
6772 	if (max_sectors && max_sectors < instance->max_sectors_per_req)
6773 		instance->max_sectors_per_req = max_sectors;
6774 	else {
6775 		if (max_sectors) {
6776 			if (((instance->pdev->device ==
6777 				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6778 				(instance->pdev->device ==
6779 				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6780 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
6781 				instance->max_sectors_per_req = max_sectors;
6782 			} else {
6783 			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6784 				"and <= %d (or < 1MB for GEN2 controller)\n",
6785 				instance->max_sectors_per_req);
6786 			}
6787 		}
6788 	}
6789 
6790 	host->max_sectors = instance->max_sectors_per_req;
6791 	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6792 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6793 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6794 	host->max_lun = MEGASAS_MAX_LUN;
6795 	host->max_cmd_len = 16;
6796 
6797 	/*
6798 	 * Notify the mid-layer about the new controller
6799 	 */
6800 	if (scsi_add_host(host, &instance->pdev->dev)) {
6801 		dev_err(&instance->pdev->dev,
6802 			"Failed to add host from %s %d\n",
6803 			__func__, __LINE__);
6804 		return -ENODEV;
6805 	}
6806 
6807 	return 0;
6808 }
6809 
6810 /**
6811  * megasas_set_dma_mask -	Set DMA mask for supported controllers
6812  *
6813  * @instance:		Adapter soft state
6814  * Description:
6815  *
6816  * For Ventura, driver/FW will operate in 63bit DMA addresses.
6817  *
6818  * For invader-
6819  *	By default, driver/FW will operate in 32bit DMA addresses
6820  *	for consistent DMA mapping but if 32 bit consistent
6821  *	DMA mask fails, driver will try with 63 bit consistent
6822  *	mask provided FW is true 63bit DMA capable
6823  *
6824  * For older controllers(Thunderbolt and MFI based adapters)-
6825  *	driver/FW will operate in 32 bit consistent DMA addresses.
6826  */
6827 static int
6828 megasas_set_dma_mask(struct megasas_instance *instance)
6829 {
6830 	u64 consistent_mask;
6831 	struct pci_dev *pdev;
6832 	u32 scratch_pad_1;
6833 
6834 	pdev = instance->pdev;
6835 	consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6836 				DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6837 
6838 	if (IS_DMA64) {
6839 		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6840 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6841 			goto fail_set_dma_mask;
6842 
6843 		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6844 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6845 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6846 			/*
6847 			 * If 32 bit DMA mask fails, then try for 64 bit mask
6848 			 * for FW capable of handling 64 bit DMA.
6849 			 */
6850 			scratch_pad_1 = megasas_readl
6851 				(instance, &instance->reg_set->outbound_scratch_pad_1);
6852 
6853 			if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6854 				goto fail_set_dma_mask;
6855 			else if (dma_set_mask_and_coherent(&pdev->dev,
6856 							   DMA_BIT_MASK(63)))
6857 				goto fail_set_dma_mask;
6858 		}
6859 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6860 		goto fail_set_dma_mask;
6861 
6862 	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6863 		instance->consistent_mask_64bit = false;
6864 	else
6865 		instance->consistent_mask_64bit = true;
6866 
6867 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6868 		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6869 		 (instance->consistent_mask_64bit ? "63" : "32"));
6870 
6871 	return 0;
6872 
6873 fail_set_dma_mask:
6874 	dev_err(&pdev->dev, "Failed to set DMA mask\n");
6875 	return -1;
6876 
6877 }
6878 
6879 /*
6880  * megasas_set_adapter_type -	Set adapter type.
6881  *				Supported controllers can be divided in
6882  *				different categories-
6883  *					enum MR_ADAPTER_TYPE {
6884  *						MFI_SERIES = 1,
6885  *						THUNDERBOLT_SERIES = 2,
6886  *						INVADER_SERIES = 3,
6887  *						VENTURA_SERIES = 4,
6888  *						AERO_SERIES = 5,
6889  *					};
6890  * @instance:			Adapter soft state
6891  * return:			void
6892  */
6893 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6894 {
6895 	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6896 	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6897 		instance->adapter_type = MFI_SERIES;
6898 	} else {
6899 		switch (instance->pdev->device) {
6900 		case PCI_DEVICE_ID_LSI_AERO_10E1:
6901 		case PCI_DEVICE_ID_LSI_AERO_10E2:
6902 		case PCI_DEVICE_ID_LSI_AERO_10E5:
6903 		case PCI_DEVICE_ID_LSI_AERO_10E6:
6904 			instance->adapter_type = AERO_SERIES;
6905 			break;
6906 		case PCI_DEVICE_ID_LSI_VENTURA:
6907 		case PCI_DEVICE_ID_LSI_CRUSADER:
6908 		case PCI_DEVICE_ID_LSI_HARPOON:
6909 		case PCI_DEVICE_ID_LSI_TOMCAT:
6910 		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6911 		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6912 			instance->adapter_type = VENTURA_SERIES;
6913 			break;
6914 		case PCI_DEVICE_ID_LSI_FUSION:
6915 		case PCI_DEVICE_ID_LSI_PLASMA:
6916 			instance->adapter_type = THUNDERBOLT_SERIES;
6917 			break;
6918 		case PCI_DEVICE_ID_LSI_INVADER:
6919 		case PCI_DEVICE_ID_LSI_INTRUDER:
6920 		case PCI_DEVICE_ID_LSI_INTRUDER_24:
6921 		case PCI_DEVICE_ID_LSI_CUTLASS_52:
6922 		case PCI_DEVICE_ID_LSI_CUTLASS_53:
6923 		case PCI_DEVICE_ID_LSI_FURY:
6924 			instance->adapter_type = INVADER_SERIES;
6925 			break;
6926 		default: /* For all other supported controllers */
6927 			instance->adapter_type = MFI_SERIES;
6928 			break;
6929 		}
6930 	}
6931 }
6932 
6933 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6934 {
6935 	instance->producer = dma_alloc_coherent(&instance->pdev->dev,
6936 			sizeof(u32), &instance->producer_h, GFP_KERNEL);
6937 	instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
6938 			sizeof(u32), &instance->consumer_h, GFP_KERNEL);
6939 
6940 	if (!instance->producer || !instance->consumer) {
6941 		dev_err(&instance->pdev->dev,
6942 			"Failed to allocate memory for producer, consumer\n");
6943 		return -1;
6944 	}
6945 
6946 	*instance->producer = 0;
6947 	*instance->consumer = 0;
6948 	return 0;
6949 }
6950 
6951 /**
6952  * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
6953  *				structures which are not common across MFI
6954  *				adapters and fusion adapters.
6955  *				For MFI based adapters, allocate producer and
6956  *				consumer buffers. For fusion adapters, allocate
6957  *				memory for fusion context.
6958  * @instance:			Adapter soft state
6959  * return:			0 for SUCCESS
6960  */
6961 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6962 {
6963 	instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
6964 				      GFP_KERNEL);
6965 	if (!instance->reply_map)
6966 		return -ENOMEM;
6967 
6968 	switch (instance->adapter_type) {
6969 	case MFI_SERIES:
6970 		if (megasas_alloc_mfi_ctrl_mem(instance))
6971 			goto fail;
6972 		break;
6973 	case AERO_SERIES:
6974 	case VENTURA_SERIES:
6975 	case THUNDERBOLT_SERIES:
6976 	case INVADER_SERIES:
6977 		if (megasas_alloc_fusion_context(instance))
6978 			goto fail;
6979 		break;
6980 	}
6981 
6982 	return 0;
6983  fail:
6984 	kfree(instance->reply_map);
6985 	instance->reply_map = NULL;
6986 	return -ENOMEM;
6987 }
6988 
6989 /*
6990  * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
6991  *				producer, consumer buffers for MFI adapters
6992  *
6993  * @instance -			Adapter soft instance
6994  *
6995  */
6996 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6997 {
6998 	kfree(instance->reply_map);
6999 	if (instance->adapter_type == MFI_SERIES) {
7000 		if (instance->producer)
7001 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7002 					    instance->producer,
7003 					    instance->producer_h);
7004 		if (instance->consumer)
7005 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7006 					    instance->consumer,
7007 					    instance->consumer_h);
7008 	} else {
7009 		megasas_free_fusion_context(instance);
7010 	}
7011 }
7012 
7013 /**
7014  * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
7015  *					driver load time
7016  *
7017  * @instance-				Adapter soft instance
7018  * @return-				O for SUCCESS
7019  */
7020 static inline
7021 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
7022 {
7023 	struct pci_dev *pdev = instance->pdev;
7024 	struct fusion_context *fusion = instance->ctrl_context;
7025 
7026 	instance->evt_detail = dma_alloc_coherent(&pdev->dev,
7027 			sizeof(struct megasas_evt_detail),
7028 			&instance->evt_detail_h, GFP_KERNEL);
7029 
7030 	if (!instance->evt_detail) {
7031 		dev_err(&instance->pdev->dev,
7032 			"Failed to allocate event detail buffer\n");
7033 		return -ENOMEM;
7034 	}
7035 
7036 	if (fusion) {
7037 		fusion->ioc_init_request =
7038 			dma_alloc_coherent(&pdev->dev,
7039 					   sizeof(struct MPI2_IOC_INIT_REQUEST),
7040 					   &fusion->ioc_init_request_phys,
7041 					   GFP_KERNEL);
7042 
7043 		if (!fusion->ioc_init_request) {
7044 			dev_err(&pdev->dev,
7045 				"Failed to allocate PD list buffer\n");
7046 			return -ENOMEM;
7047 		}
7048 
7049 		instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
7050 				sizeof(struct MR_SNAPDUMP_PROPERTIES),
7051 				&instance->snapdump_prop_h, GFP_KERNEL);
7052 
7053 		if (!instance->snapdump_prop)
7054 			dev_err(&pdev->dev,
7055 				"Failed to allocate snapdump properties buffer\n");
7056 
7057 		instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
7058 							HOST_DEVICE_LIST_SZ,
7059 							&instance->host_device_list_buf_h,
7060 							GFP_KERNEL);
7061 
7062 		if (!instance->host_device_list_buf) {
7063 			dev_err(&pdev->dev,
7064 				"Failed to allocate targetid list buffer\n");
7065 			return -ENOMEM;
7066 		}
7067 
7068 	}
7069 
7070 	instance->pd_list_buf =
7071 		dma_alloc_coherent(&pdev->dev,
7072 				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7073 				     &instance->pd_list_buf_h, GFP_KERNEL);
7074 
7075 	if (!instance->pd_list_buf) {
7076 		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
7077 		return -ENOMEM;
7078 	}
7079 
7080 	instance->ctrl_info_buf =
7081 		dma_alloc_coherent(&pdev->dev,
7082 				     sizeof(struct megasas_ctrl_info),
7083 				     &instance->ctrl_info_buf_h, GFP_KERNEL);
7084 
7085 	if (!instance->ctrl_info_buf) {
7086 		dev_err(&pdev->dev,
7087 			"Failed to allocate controller info buffer\n");
7088 		return -ENOMEM;
7089 	}
7090 
7091 	instance->ld_list_buf =
7092 		dma_alloc_coherent(&pdev->dev,
7093 				     sizeof(struct MR_LD_LIST),
7094 				     &instance->ld_list_buf_h, GFP_KERNEL);
7095 
7096 	if (!instance->ld_list_buf) {
7097 		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
7098 		return -ENOMEM;
7099 	}
7100 
7101 	instance->ld_targetid_list_buf =
7102 		dma_alloc_coherent(&pdev->dev,
7103 				sizeof(struct MR_LD_TARGETID_LIST),
7104 				&instance->ld_targetid_list_buf_h, GFP_KERNEL);
7105 
7106 	if (!instance->ld_targetid_list_buf) {
7107 		dev_err(&pdev->dev,
7108 			"Failed to allocate LD targetid list buffer\n");
7109 		return -ENOMEM;
7110 	}
7111 
7112 	if (!reset_devices) {
7113 		instance->system_info_buf =
7114 			dma_alloc_coherent(&pdev->dev,
7115 					sizeof(struct MR_DRV_SYSTEM_INFO),
7116 					&instance->system_info_h, GFP_KERNEL);
7117 		instance->pd_info =
7118 			dma_alloc_coherent(&pdev->dev,
7119 					sizeof(struct MR_PD_INFO),
7120 					&instance->pd_info_h, GFP_KERNEL);
7121 		instance->tgt_prop =
7122 			dma_alloc_coherent(&pdev->dev,
7123 					sizeof(struct MR_TARGET_PROPERTIES),
7124 					&instance->tgt_prop_h, GFP_KERNEL);
7125 		instance->crash_dump_buf =
7126 			dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7127 					&instance->crash_dump_h, GFP_KERNEL);
7128 
7129 		if (!instance->system_info_buf)
7130 			dev_err(&instance->pdev->dev,
7131 				"Failed to allocate system info buffer\n");
7132 
7133 		if (!instance->pd_info)
7134 			dev_err(&instance->pdev->dev,
7135 				"Failed to allocate pd_info buffer\n");
7136 
7137 		if (!instance->tgt_prop)
7138 			dev_err(&instance->pdev->dev,
7139 				"Failed to allocate tgt_prop buffer\n");
7140 
7141 		if (!instance->crash_dump_buf)
7142 			dev_err(&instance->pdev->dev,
7143 				"Failed to allocate crash dump buffer\n");
7144 	}
7145 
7146 	return 0;
7147 }
7148 
7149 /*
7150  * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
7151  *					during driver load time
7152  *
7153  * @instance-				Adapter soft instance
7154  *
7155  */
7156 static inline
7157 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
7158 {
7159 	struct pci_dev *pdev = instance->pdev;
7160 	struct fusion_context *fusion = instance->ctrl_context;
7161 
7162 	if (instance->evt_detail)
7163 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
7164 				    instance->evt_detail,
7165 				    instance->evt_detail_h);
7166 
7167 	if (fusion && fusion->ioc_init_request)
7168 		dma_free_coherent(&pdev->dev,
7169 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
7170 				  fusion->ioc_init_request,
7171 				  fusion->ioc_init_request_phys);
7172 
7173 	if (instance->pd_list_buf)
7174 		dma_free_coherent(&pdev->dev,
7175 				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7176 				    instance->pd_list_buf,
7177 				    instance->pd_list_buf_h);
7178 
7179 	if (instance->ld_list_buf)
7180 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
7181 				    instance->ld_list_buf,
7182 				    instance->ld_list_buf_h);
7183 
7184 	if (instance->ld_targetid_list_buf)
7185 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
7186 				    instance->ld_targetid_list_buf,
7187 				    instance->ld_targetid_list_buf_h);
7188 
7189 	if (instance->ctrl_info_buf)
7190 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
7191 				    instance->ctrl_info_buf,
7192 				    instance->ctrl_info_buf_h);
7193 
7194 	if (instance->system_info_buf)
7195 		dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
7196 				    instance->system_info_buf,
7197 				    instance->system_info_h);
7198 
7199 	if (instance->pd_info)
7200 		dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
7201 				    instance->pd_info, instance->pd_info_h);
7202 
7203 	if (instance->tgt_prop)
7204 		dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
7205 				    instance->tgt_prop, instance->tgt_prop_h);
7206 
7207 	if (instance->crash_dump_buf)
7208 		dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7209 				    instance->crash_dump_buf,
7210 				    instance->crash_dump_h);
7211 
7212 	if (instance->snapdump_prop)
7213 		dma_free_coherent(&pdev->dev,
7214 				  sizeof(struct MR_SNAPDUMP_PROPERTIES),
7215 				  instance->snapdump_prop,
7216 				  instance->snapdump_prop_h);
7217 
7218 	if (instance->host_device_list_buf)
7219 		dma_free_coherent(&pdev->dev,
7220 				  HOST_DEVICE_LIST_SZ,
7221 				  instance->host_device_list_buf,
7222 				  instance->host_device_list_buf_h);
7223 
7224 }
7225 
7226 /*
7227  * megasas_init_ctrl_params -		Initialize controller's instance
7228  *					parameters before FW init
7229  * @instance -				Adapter soft instance
7230  * @return -				void
7231  */
7232 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
7233 {
7234 	instance->fw_crash_state = UNAVAILABLE;
7235 
7236 	megasas_poll_wait_aen = 0;
7237 	instance->issuepend_done = 1;
7238 	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
7239 
7240 	/*
7241 	 * Initialize locks and queues
7242 	 */
7243 	INIT_LIST_HEAD(&instance->cmd_pool);
7244 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
7245 
7246 	atomic_set(&instance->fw_outstanding, 0);
7247 	atomic64_set(&instance->total_io_count, 0);
7248 
7249 	init_waitqueue_head(&instance->int_cmd_wait_q);
7250 	init_waitqueue_head(&instance->abort_cmd_wait_q);
7251 
7252 	spin_lock_init(&instance->crashdump_lock);
7253 	spin_lock_init(&instance->mfi_pool_lock);
7254 	spin_lock_init(&instance->hba_lock);
7255 	spin_lock_init(&instance->stream_lock);
7256 	spin_lock_init(&instance->completion_lock);
7257 
7258 	mutex_init(&instance->reset_mutex);
7259 
7260 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
7261 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
7262 		instance->flag_ieee = 1;
7263 
7264 	megasas_dbg_lvl = 0;
7265 	instance->flag = 0;
7266 	instance->unload = 1;
7267 	instance->last_time = 0;
7268 	instance->disableOnlineCtrlReset = 1;
7269 	instance->UnevenSpanSupport = 0;
7270 	instance->smp_affinity_enable = smp_affinity_enable ? true : false;
7271 	instance->msix_load_balance = false;
7272 
7273 	if (instance->adapter_type != MFI_SERIES)
7274 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
7275 	else
7276 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
7277 }
7278 
7279 /**
7280  * megasas_probe_one -	PCI hotplug entry point
7281  * @pdev:		PCI device structure
7282  * @id:			PCI ids of supported hotplugged adapter
7283  */
7284 static int megasas_probe_one(struct pci_dev *pdev,
7285 			     const struct pci_device_id *id)
7286 {
7287 	int rval, pos;
7288 	struct Scsi_Host *host;
7289 	struct megasas_instance *instance;
7290 	u16 control = 0;
7291 
7292 	switch (pdev->device) {
7293 	case PCI_DEVICE_ID_LSI_AERO_10E0:
7294 	case PCI_DEVICE_ID_LSI_AERO_10E3:
7295 	case PCI_DEVICE_ID_LSI_AERO_10E4:
7296 	case PCI_DEVICE_ID_LSI_AERO_10E7:
7297 		dev_err(&pdev->dev, "Adapter is in non secure mode\n");
7298 		return 1;
7299 	case PCI_DEVICE_ID_LSI_AERO_10E1:
7300 	case PCI_DEVICE_ID_LSI_AERO_10E5:
7301 		dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
7302 		break;
7303 	}
7304 
7305 	/* Reset MSI-X in the kdump kernel */
7306 	if (reset_devices) {
7307 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
7308 		if (pos) {
7309 			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
7310 					     &control);
7311 			if (control & PCI_MSIX_FLAGS_ENABLE) {
7312 				dev_info(&pdev->dev, "resetting MSI-X\n");
7313 				pci_write_config_word(pdev,
7314 						      pos + PCI_MSIX_FLAGS,
7315 						      control &
7316 						      ~PCI_MSIX_FLAGS_ENABLE);
7317 			}
7318 		}
7319 	}
7320 
7321 	/*
7322 	 * PCI prepping: enable device set bus mastering and dma mask
7323 	 */
7324 	rval = pci_enable_device_mem(pdev);
7325 
7326 	if (rval) {
7327 		return rval;
7328 	}
7329 
7330 	pci_set_master(pdev);
7331 
7332 	host = scsi_host_alloc(&megasas_template,
7333 			       sizeof(struct megasas_instance));
7334 
7335 	if (!host) {
7336 		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
7337 		goto fail_alloc_instance;
7338 	}
7339 
7340 	instance = (struct megasas_instance *)host->hostdata;
7341 	memset(instance, 0, sizeof(*instance));
7342 	atomic_set(&instance->fw_reset_no_pci_access, 0);
7343 
7344 	/*
7345 	 * Initialize PCI related and misc parameters
7346 	 */
7347 	instance->pdev = pdev;
7348 	instance->host = host;
7349 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
7350 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
7351 
7352 	megasas_set_adapter_type(instance);
7353 
7354 	/*
7355 	 * Initialize MFI Firmware
7356 	 */
7357 	if (megasas_init_fw(instance))
7358 		goto fail_init_mfi;
7359 
7360 	if (instance->requestorId) {
7361 		if (instance->PlasmaFW111) {
7362 			instance->vf_affiliation_111 =
7363 				dma_alloc_coherent(&pdev->dev,
7364 					sizeof(struct MR_LD_VF_AFFILIATION_111),
7365 					&instance->vf_affiliation_111_h,
7366 					GFP_KERNEL);
7367 			if (!instance->vf_affiliation_111)
7368 				dev_warn(&pdev->dev, "Can't allocate "
7369 				       "memory for VF affiliation buffer\n");
7370 		} else {
7371 			instance->vf_affiliation =
7372 				dma_alloc_coherent(&pdev->dev,
7373 					(MAX_LOGICAL_DRIVES + 1) *
7374 					sizeof(struct MR_LD_VF_AFFILIATION),
7375 					&instance->vf_affiliation_h,
7376 					GFP_KERNEL);
7377 			if (!instance->vf_affiliation)
7378 				dev_warn(&pdev->dev, "Can't allocate "
7379 				       "memory for VF affiliation buffer\n");
7380 		}
7381 	}
7382 
7383 	/*
7384 	 * Store instance in PCI softstate
7385 	 */
7386 	pci_set_drvdata(pdev, instance);
7387 
7388 	/*
7389 	 * Add this controller to megasas_mgmt_info structure so that it
7390 	 * can be exported to management applications
7391 	 */
7392 	megasas_mgmt_info.count++;
7393 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
7394 	megasas_mgmt_info.max_index++;
7395 
7396 	/*
7397 	 * Register with SCSI mid-layer
7398 	 */
7399 	if (megasas_io_attach(instance))
7400 		goto fail_io_attach;
7401 
7402 	instance->unload = 0;
7403 	/*
7404 	 * Trigger SCSI to scan our drives
7405 	 */
7406 	if (!instance->enable_fw_dev_list ||
7407 	    (instance->host_device_list_buf->count > 0))
7408 		scsi_scan_host(host);
7409 
7410 	/*
7411 	 * Initiate AEN (Asynchronous Event Notification)
7412 	 */
7413 	if (megasas_start_aen(instance)) {
7414 		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
7415 		goto fail_start_aen;
7416 	}
7417 
7418 	megasas_setup_debugfs(instance);
7419 
7420 	/* Get current SR-IOV LD/VF affiliation */
7421 	if (instance->requestorId)
7422 		megasas_get_ld_vf_affiliation(instance, 1);
7423 
7424 	return 0;
7425 
7426 fail_start_aen:
7427 fail_io_attach:
7428 	megasas_mgmt_info.count--;
7429 	megasas_mgmt_info.max_index--;
7430 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
7431 
7432 	instance->instancet->disable_intr(instance);
7433 	megasas_destroy_irqs(instance);
7434 
7435 	if (instance->adapter_type != MFI_SERIES)
7436 		megasas_release_fusion(instance);
7437 	else
7438 		megasas_release_mfi(instance);
7439 	if (instance->msix_vectors)
7440 		pci_free_irq_vectors(instance->pdev);
7441 fail_init_mfi:
7442 	scsi_host_put(host);
7443 fail_alloc_instance:
7444 	pci_disable_device(pdev);
7445 
7446 	return -ENODEV;
7447 }
7448 
7449 /**
7450  * megasas_flush_cache -	Requests FW to flush all its caches
7451  * @instance:			Adapter soft state
7452  */
7453 static void megasas_flush_cache(struct megasas_instance *instance)
7454 {
7455 	struct megasas_cmd *cmd;
7456 	struct megasas_dcmd_frame *dcmd;
7457 
7458 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7459 		return;
7460 
7461 	cmd = megasas_get_cmd(instance);
7462 
7463 	if (!cmd)
7464 		return;
7465 
7466 	dcmd = &cmd->frame->dcmd;
7467 
7468 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7469 
7470 	dcmd->cmd = MFI_CMD_DCMD;
7471 	dcmd->cmd_status = 0x0;
7472 	dcmd->sge_count = 0;
7473 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7474 	dcmd->timeout = 0;
7475 	dcmd->pad_0 = 0;
7476 	dcmd->data_xfer_len = 0;
7477 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
7478 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
7479 
7480 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7481 			!= DCMD_SUCCESS) {
7482 		dev_err(&instance->pdev->dev,
7483 			"return from %s %d\n", __func__, __LINE__);
7484 		return;
7485 	}
7486 
7487 	megasas_return_cmd(instance, cmd);
7488 }
7489 
7490 /**
7491  * megasas_shutdown_controller -	Instructs FW to shutdown the controller
7492  * @instance:				Adapter soft state
7493  * @opcode:				Shutdown/Hibernate
7494  */
7495 static void megasas_shutdown_controller(struct megasas_instance *instance,
7496 					u32 opcode)
7497 {
7498 	struct megasas_cmd *cmd;
7499 	struct megasas_dcmd_frame *dcmd;
7500 
7501 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7502 		return;
7503 
7504 	cmd = megasas_get_cmd(instance);
7505 
7506 	if (!cmd)
7507 		return;
7508 
7509 	if (instance->aen_cmd)
7510 		megasas_issue_blocked_abort_cmd(instance,
7511 			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7512 	if (instance->map_update_cmd)
7513 		megasas_issue_blocked_abort_cmd(instance,
7514 			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7515 	if (instance->jbod_seq_cmd)
7516 		megasas_issue_blocked_abort_cmd(instance,
7517 			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7518 
7519 	dcmd = &cmd->frame->dcmd;
7520 
7521 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7522 
7523 	dcmd->cmd = MFI_CMD_DCMD;
7524 	dcmd->cmd_status = 0x0;
7525 	dcmd->sge_count = 0;
7526 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7527 	dcmd->timeout = 0;
7528 	dcmd->pad_0 = 0;
7529 	dcmd->data_xfer_len = 0;
7530 	dcmd->opcode = cpu_to_le32(opcode);
7531 
7532 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7533 			!= DCMD_SUCCESS) {
7534 		dev_err(&instance->pdev->dev,
7535 			"return from %s %d\n", __func__, __LINE__);
7536 		return;
7537 	}
7538 
7539 	megasas_return_cmd(instance, cmd);
7540 }
7541 
7542 #ifdef CONFIG_PM
7543 /**
7544  * megasas_suspend -	driver suspend entry point
7545  * @pdev:		PCI device structure
7546  * @state:		PCI power state to suspend routine
7547  */
7548 static int
7549 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
7550 {
7551 	struct megasas_instance *instance;
7552 
7553 	instance = pci_get_drvdata(pdev);
7554 
7555 	if (!instance)
7556 		return 0;
7557 
7558 	instance->unload = 1;
7559 
7560 	dev_info(&pdev->dev, "%s is called\n", __func__);
7561 
7562 	/* Shutdown SR-IOV heartbeat timer */
7563 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7564 		del_timer_sync(&instance->sriov_heartbeat_timer);
7565 
7566 	/* Stop the FW fault detection watchdog */
7567 	if (instance->adapter_type != MFI_SERIES)
7568 		megasas_fusion_stop_watchdog(instance);
7569 
7570 	megasas_flush_cache(instance);
7571 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7572 
7573 	/* cancel the delayed work if this work still in queue */
7574 	if (instance->ev != NULL) {
7575 		struct megasas_aen_event *ev = instance->ev;
7576 		cancel_delayed_work_sync(&ev->hotplug_work);
7577 		instance->ev = NULL;
7578 	}
7579 
7580 	tasklet_kill(&instance->isr_tasklet);
7581 
7582 	pci_set_drvdata(instance->pdev, instance);
7583 	instance->instancet->disable_intr(instance);
7584 
7585 	megasas_destroy_irqs(instance);
7586 
7587 	if (instance->msix_vectors)
7588 		pci_free_irq_vectors(instance->pdev);
7589 
7590 	pci_save_state(pdev);
7591 	pci_disable_device(pdev);
7592 
7593 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
7594 
7595 	return 0;
7596 }
7597 
7598 /**
7599  * megasas_resume-      driver resume entry point
7600  * @pdev:               PCI device structure
7601  */
7602 static int
7603 megasas_resume(struct pci_dev *pdev)
7604 {
7605 	int rval;
7606 	struct Scsi_Host *host;
7607 	struct megasas_instance *instance;
7608 	u32 status_reg;
7609 
7610 	instance = pci_get_drvdata(pdev);
7611 
7612 	if (!instance)
7613 		return 0;
7614 
7615 	host = instance->host;
7616 	pci_set_power_state(pdev, PCI_D0);
7617 	pci_enable_wake(pdev, PCI_D0, 0);
7618 	pci_restore_state(pdev);
7619 
7620 	dev_info(&pdev->dev, "%s is called\n", __func__);
7621 	/*
7622 	 * PCI prepping: enable device set bus mastering and dma mask
7623 	 */
7624 	rval = pci_enable_device_mem(pdev);
7625 
7626 	if (rval) {
7627 		dev_err(&pdev->dev, "Enable device failed\n");
7628 		return rval;
7629 	}
7630 
7631 	pci_set_master(pdev);
7632 
7633 	/*
7634 	 * We expect the FW state to be READY
7635 	 */
7636 
7637 	if (megasas_transition_to_ready(instance, 0)) {
7638 		dev_info(&instance->pdev->dev,
7639 			 "Failed to transition controller to ready from %s!\n",
7640 			 __func__);
7641 		if (instance->adapter_type != MFI_SERIES) {
7642 			status_reg =
7643 				instance->instancet->read_fw_status_reg(instance);
7644 			if (!(status_reg & MFI_RESET_ADAPTER) ||
7645 				((megasas_adp_reset_wait_for_ready
7646 				(instance, true, 0)) == FAILED))
7647 				goto fail_ready_state;
7648 		} else {
7649 			atomic_set(&instance->fw_reset_no_pci_access, 1);
7650 			instance->instancet->adp_reset
7651 				(instance, instance->reg_set);
7652 			atomic_set(&instance->fw_reset_no_pci_access, 0);
7653 
7654 			/* waiting for about 30 seconds before retry */
7655 			ssleep(30);
7656 
7657 			if (megasas_transition_to_ready(instance, 0))
7658 				goto fail_ready_state;
7659 		}
7660 
7661 		dev_info(&instance->pdev->dev,
7662 			 "FW restarted successfully from %s!\n",
7663 			 __func__);
7664 	}
7665 	if (megasas_set_dma_mask(instance))
7666 		goto fail_set_dma_mask;
7667 
7668 	/*
7669 	 * Initialize MFI Firmware
7670 	 */
7671 
7672 	atomic_set(&instance->fw_outstanding, 0);
7673 	atomic_set(&instance->ldio_outstanding, 0);
7674 
7675 	/* Now re-enable MSI-X */
7676 	if (instance->msix_vectors)
7677 		megasas_alloc_irq_vectors(instance);
7678 
7679 	if (!instance->msix_vectors) {
7680 		rval = pci_alloc_irq_vectors(instance->pdev, 1, 1,
7681 					     PCI_IRQ_LEGACY);
7682 		if (rval < 0)
7683 			goto fail_reenable_msix;
7684 	}
7685 
7686 	megasas_setup_reply_map(instance);
7687 
7688 	if (instance->adapter_type != MFI_SERIES) {
7689 		megasas_reset_reply_desc(instance);
7690 		if (megasas_ioc_init_fusion(instance)) {
7691 			megasas_free_cmds(instance);
7692 			megasas_free_cmds_fusion(instance);
7693 			goto fail_init_mfi;
7694 		}
7695 		if (!megasas_get_map_info(instance))
7696 			megasas_sync_map_info(instance);
7697 	} else {
7698 		*instance->producer = 0;
7699 		*instance->consumer = 0;
7700 		if (megasas_issue_init_mfi(instance))
7701 			goto fail_init_mfi;
7702 	}
7703 
7704 	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7705 		goto fail_init_mfi;
7706 
7707 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7708 		     (unsigned long)instance);
7709 
7710 	if (instance->msix_vectors ?
7711 			megasas_setup_irqs_msix(instance, 0) :
7712 			megasas_setup_irqs_ioapic(instance))
7713 		goto fail_init_mfi;
7714 
7715 	if (instance->adapter_type != MFI_SERIES)
7716 		megasas_setup_irq_poll(instance);
7717 
7718 	/* Re-launch SR-IOV heartbeat timer */
7719 	if (instance->requestorId) {
7720 		if (!megasas_sriov_start_heartbeat(instance, 0))
7721 			megasas_start_timer(instance);
7722 		else {
7723 			instance->skip_heartbeat_timer_del = 1;
7724 			goto fail_init_mfi;
7725 		}
7726 	}
7727 
7728 	instance->instancet->enable_intr(instance);
7729 	megasas_setup_jbod_map(instance);
7730 	instance->unload = 0;
7731 
7732 	/*
7733 	 * Initiate AEN (Asynchronous Event Notification)
7734 	 */
7735 	if (megasas_start_aen(instance))
7736 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
7737 
7738 	/* Re-launch FW fault watchdog */
7739 	if (instance->adapter_type != MFI_SERIES)
7740 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7741 			goto fail_start_watchdog;
7742 
7743 	return 0;
7744 
7745 fail_start_watchdog:
7746 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7747 		del_timer_sync(&instance->sriov_heartbeat_timer);
7748 fail_init_mfi:
7749 	megasas_free_ctrl_dma_buffers(instance);
7750 	megasas_free_ctrl_mem(instance);
7751 	scsi_host_put(host);
7752 
7753 fail_reenable_msix:
7754 fail_set_dma_mask:
7755 fail_ready_state:
7756 
7757 	pci_disable_device(pdev);
7758 
7759 	return -ENODEV;
7760 }
7761 #else
7762 #define megasas_suspend	NULL
7763 #define megasas_resume	NULL
7764 #endif
7765 
7766 static inline int
7767 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7768 {
7769 	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7770 	int i;
7771 	u8 adp_state;
7772 
7773 	for (i = 0; i < wait_time; i++) {
7774 		adp_state = atomic_read(&instance->adprecovery);
7775 		if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7776 		    (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7777 			break;
7778 
7779 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7780 			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7781 
7782 		msleep(1000);
7783 	}
7784 
7785 	if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7786 		dev_info(&instance->pdev->dev,
7787 			 "%s HBA failed to become operational, adp_state %d\n",
7788 			 __func__, adp_state);
7789 		return 1;
7790 	}
7791 
7792 	return 0;
7793 }
7794 
7795 /**
7796  * megasas_detach_one -	PCI hot"un"plug entry point
7797  * @pdev:		PCI device structure
7798  */
7799 static void megasas_detach_one(struct pci_dev *pdev)
7800 {
7801 	int i;
7802 	struct Scsi_Host *host;
7803 	struct megasas_instance *instance;
7804 	struct fusion_context *fusion;
7805 	u32 pd_seq_map_sz;
7806 
7807 	instance = pci_get_drvdata(pdev);
7808 
7809 	if (!instance)
7810 		return;
7811 
7812 	host = instance->host;
7813 	fusion = instance->ctrl_context;
7814 
7815 	/* Shutdown SR-IOV heartbeat timer */
7816 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7817 		del_timer_sync(&instance->sriov_heartbeat_timer);
7818 
7819 	/* Stop the FW fault detection watchdog */
7820 	if (instance->adapter_type != MFI_SERIES)
7821 		megasas_fusion_stop_watchdog(instance);
7822 
7823 	if (instance->fw_crash_state != UNAVAILABLE)
7824 		megasas_free_host_crash_buffer(instance);
7825 	scsi_remove_host(instance->host);
7826 	instance->unload = 1;
7827 
7828 	if (megasas_wait_for_adapter_operational(instance))
7829 		goto skip_firing_dcmds;
7830 
7831 	megasas_flush_cache(instance);
7832 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7833 
7834 skip_firing_dcmds:
7835 	/* cancel the delayed work if this work still in queue*/
7836 	if (instance->ev != NULL) {
7837 		struct megasas_aen_event *ev = instance->ev;
7838 		cancel_delayed_work_sync(&ev->hotplug_work);
7839 		instance->ev = NULL;
7840 	}
7841 
7842 	/* cancel all wait events */
7843 	wake_up_all(&instance->int_cmd_wait_q);
7844 
7845 	tasklet_kill(&instance->isr_tasklet);
7846 
7847 	/*
7848 	 * Take the instance off the instance array. Note that we will not
7849 	 * decrement the max_index. We let this array be sparse array
7850 	 */
7851 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7852 		if (megasas_mgmt_info.instance[i] == instance) {
7853 			megasas_mgmt_info.count--;
7854 			megasas_mgmt_info.instance[i] = NULL;
7855 
7856 			break;
7857 		}
7858 	}
7859 
7860 	instance->instancet->disable_intr(instance);
7861 
7862 	megasas_destroy_irqs(instance);
7863 
7864 	if (instance->msix_vectors)
7865 		pci_free_irq_vectors(instance->pdev);
7866 
7867 	if (instance->adapter_type >= VENTURA_SERIES) {
7868 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
7869 			kfree(fusion->stream_detect_by_ld[i]);
7870 		kfree(fusion->stream_detect_by_ld);
7871 		fusion->stream_detect_by_ld = NULL;
7872 	}
7873 
7874 
7875 	if (instance->adapter_type != MFI_SERIES) {
7876 		megasas_release_fusion(instance);
7877 			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
7878 				(sizeof(struct MR_PD_CFG_SEQ) *
7879 					(MAX_PHYSICAL_DEVICES - 1));
7880 		for (i = 0; i < 2 ; i++) {
7881 			if (fusion->ld_map[i])
7882 				dma_free_coherent(&instance->pdev->dev,
7883 						  fusion->max_map_sz,
7884 						  fusion->ld_map[i],
7885 						  fusion->ld_map_phys[i]);
7886 			if (fusion->ld_drv_map[i]) {
7887 				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
7888 					vfree(fusion->ld_drv_map[i]);
7889 				else
7890 					free_pages((ulong)fusion->ld_drv_map[i],
7891 						   fusion->drv_map_pages);
7892 			}
7893 
7894 			if (fusion->pd_seq_sync[i])
7895 				dma_free_coherent(&instance->pdev->dev,
7896 					pd_seq_map_sz,
7897 					fusion->pd_seq_sync[i],
7898 					fusion->pd_seq_phys[i]);
7899 		}
7900 	} else {
7901 		megasas_release_mfi(instance);
7902 	}
7903 
7904 	if (instance->vf_affiliation)
7905 		dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
7906 				    sizeof(struct MR_LD_VF_AFFILIATION),
7907 				    instance->vf_affiliation,
7908 				    instance->vf_affiliation_h);
7909 
7910 	if (instance->vf_affiliation_111)
7911 		dma_free_coherent(&pdev->dev,
7912 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
7913 				    instance->vf_affiliation_111,
7914 				    instance->vf_affiliation_111_h);
7915 
7916 	if (instance->hb_host_mem)
7917 		dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
7918 				    instance->hb_host_mem,
7919 				    instance->hb_host_mem_h);
7920 
7921 	megasas_free_ctrl_dma_buffers(instance);
7922 
7923 	megasas_free_ctrl_mem(instance);
7924 
7925 	megasas_destroy_debugfs(instance);
7926 
7927 	scsi_host_put(host);
7928 
7929 	pci_disable_device(pdev);
7930 }
7931 
7932 /**
7933  * megasas_shutdown -	Shutdown entry point
7934  * @device:		Generic device structure
7935  */
7936 static void megasas_shutdown(struct pci_dev *pdev)
7937 {
7938 	struct megasas_instance *instance = pci_get_drvdata(pdev);
7939 
7940 	if (!instance)
7941 		return;
7942 
7943 	instance->unload = 1;
7944 
7945 	if (megasas_wait_for_adapter_operational(instance))
7946 		goto skip_firing_dcmds;
7947 
7948 	megasas_flush_cache(instance);
7949 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7950 
7951 skip_firing_dcmds:
7952 	instance->instancet->disable_intr(instance);
7953 	megasas_destroy_irqs(instance);
7954 
7955 	if (instance->msix_vectors)
7956 		pci_free_irq_vectors(instance->pdev);
7957 }
7958 
7959 /**
7960  * megasas_mgmt_open -	char node "open" entry point
7961  */
7962 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
7963 {
7964 	/*
7965 	 * Allow only those users with admin rights
7966 	 */
7967 	if (!capable(CAP_SYS_ADMIN))
7968 		return -EACCES;
7969 
7970 	return 0;
7971 }
7972 
7973 /**
7974  * megasas_mgmt_fasync -	Async notifier registration from applications
7975  *
7976  * This function adds the calling process to a driver global queue. When an
7977  * event occurs, SIGIO will be sent to all processes in this queue.
7978  */
7979 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
7980 {
7981 	int rc;
7982 
7983 	mutex_lock(&megasas_async_queue_mutex);
7984 
7985 	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
7986 
7987 	mutex_unlock(&megasas_async_queue_mutex);
7988 
7989 	if (rc >= 0) {
7990 		/* For sanity check when we get ioctl */
7991 		filep->private_data = filep;
7992 		return 0;
7993 	}
7994 
7995 	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
7996 
7997 	return rc;
7998 }
7999 
8000 /**
8001  * megasas_mgmt_poll -  char node "poll" entry point
8002  * */
8003 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
8004 {
8005 	__poll_t mask;
8006 	unsigned long flags;
8007 
8008 	poll_wait(file, &megasas_poll_wait, wait);
8009 	spin_lock_irqsave(&poll_aen_lock, flags);
8010 	if (megasas_poll_wait_aen)
8011 		mask = (EPOLLIN | EPOLLRDNORM);
8012 	else
8013 		mask = 0;
8014 	megasas_poll_wait_aen = 0;
8015 	spin_unlock_irqrestore(&poll_aen_lock, flags);
8016 	return mask;
8017 }
8018 
8019 /*
8020  * megasas_set_crash_dump_params_ioctl:
8021  *		Send CRASH_DUMP_MODE DCMD to all controllers
8022  * @cmd:	MFI command frame
8023  */
8024 
8025 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
8026 {
8027 	struct megasas_instance *local_instance;
8028 	int i, error = 0;
8029 	int crash_support;
8030 
8031 	crash_support = cmd->frame->dcmd.mbox.w[0];
8032 
8033 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
8034 		local_instance = megasas_mgmt_info.instance[i];
8035 		if (local_instance && local_instance->crash_dump_drv_support) {
8036 			if ((atomic_read(&local_instance->adprecovery) ==
8037 				MEGASAS_HBA_OPERATIONAL) &&
8038 				!megasas_set_crash_dump_params(local_instance,
8039 					crash_support)) {
8040 				local_instance->crash_dump_app_support =
8041 					crash_support;
8042 				dev_info(&local_instance->pdev->dev,
8043 					"Application firmware crash "
8044 					"dump mode set success\n");
8045 				error = 0;
8046 			} else {
8047 				dev_info(&local_instance->pdev->dev,
8048 					"Application firmware crash "
8049 					"dump mode set failed\n");
8050 				error = -1;
8051 			}
8052 		}
8053 	}
8054 	return error;
8055 }
8056 
8057 /**
8058  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
8059  * @instance:			Adapter soft state
8060  * @argp:			User's ioctl packet
8061  */
8062 static int
8063 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
8064 		      struct megasas_iocpacket __user * user_ioc,
8065 		      struct megasas_iocpacket *ioc)
8066 {
8067 	struct megasas_sge64 *kern_sge64 = NULL;
8068 	struct megasas_sge32 *kern_sge32 = NULL;
8069 	struct megasas_cmd *cmd;
8070 	void *kbuff_arr[MAX_IOCTL_SGE];
8071 	dma_addr_t buf_handle = 0;
8072 	int error = 0, i;
8073 	void *sense = NULL;
8074 	dma_addr_t sense_handle;
8075 	unsigned long *sense_ptr;
8076 	u32 opcode = 0;
8077 	int ret = DCMD_SUCCESS;
8078 
8079 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
8080 
8081 	if (ioc->sge_count > MAX_IOCTL_SGE) {
8082 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
8083 		       ioc->sge_count, MAX_IOCTL_SGE);
8084 		return -EINVAL;
8085 	}
8086 
8087 	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
8088 	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
8089 	    !instance->support_nvme_passthru) ||
8090 	    ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
8091 	    !instance->support_pci_lane_margining)) {
8092 		dev_err(&instance->pdev->dev,
8093 			"Received invalid ioctl command 0x%x\n",
8094 			ioc->frame.hdr.cmd);
8095 		return -ENOTSUPP;
8096 	}
8097 
8098 	cmd = megasas_get_cmd(instance);
8099 	if (!cmd) {
8100 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
8101 		return -ENOMEM;
8102 	}
8103 
8104 	/*
8105 	 * User's IOCTL packet has 2 frames (maximum). Copy those two
8106 	 * frames into our cmd's frames. cmd->frame's context will get
8107 	 * overwritten when we copy from user's frames. So set that value
8108 	 * alone separately
8109 	 */
8110 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
8111 	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
8112 	cmd->frame->hdr.pad_0 = 0;
8113 
8114 	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
8115 
8116 	if (instance->consistent_mask_64bit)
8117 		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
8118 				       MFI_FRAME_SENSE64));
8119 	else
8120 		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
8121 					       MFI_FRAME_SENSE64));
8122 
8123 	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
8124 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
8125 
8126 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
8127 		mutex_lock(&instance->reset_mutex);
8128 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
8129 			megasas_return_cmd(instance, cmd);
8130 			mutex_unlock(&instance->reset_mutex);
8131 			return -1;
8132 		}
8133 		mutex_unlock(&instance->reset_mutex);
8134 	}
8135 
8136 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
8137 		error = megasas_set_crash_dump_params_ioctl(cmd);
8138 		megasas_return_cmd(instance, cmd);
8139 		return error;
8140 	}
8141 
8142 	/*
8143 	 * The management interface between applications and the fw uses
8144 	 * MFI frames. E.g, RAID configuration changes, LD property changes
8145 	 * etc are accomplishes through different kinds of MFI frames. The
8146 	 * driver needs to care only about substituting user buffers with
8147 	 * kernel buffers in SGLs. The location of SGL is embedded in the
8148 	 * struct iocpacket itself.
8149 	 */
8150 	if (instance->consistent_mask_64bit)
8151 		kern_sge64 = (struct megasas_sge64 *)
8152 			((unsigned long)cmd->frame + ioc->sgl_off);
8153 	else
8154 		kern_sge32 = (struct megasas_sge32 *)
8155 			((unsigned long)cmd->frame + ioc->sgl_off);
8156 
8157 	/*
8158 	 * For each user buffer, create a mirror buffer and copy in
8159 	 */
8160 	for (i = 0; i < ioc->sge_count; i++) {
8161 		if (!ioc->sgl[i].iov_len)
8162 			continue;
8163 
8164 		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
8165 						    ioc->sgl[i].iov_len,
8166 						    &buf_handle, GFP_KERNEL);
8167 		if (!kbuff_arr[i]) {
8168 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
8169 			       "kernel SGL buffer for IOCTL\n");
8170 			error = -ENOMEM;
8171 			goto out;
8172 		}
8173 
8174 		/*
8175 		 * We don't change the dma_coherent_mask, so
8176 		 * dma_alloc_coherent only returns 32bit addresses
8177 		 */
8178 		if (instance->consistent_mask_64bit) {
8179 			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
8180 			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8181 		} else {
8182 			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
8183 			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8184 		}
8185 
8186 		/*
8187 		 * We created a kernel buffer corresponding to the
8188 		 * user buffer. Now copy in from the user buffer
8189 		 */
8190 		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
8191 				   (u32) (ioc->sgl[i].iov_len))) {
8192 			error = -EFAULT;
8193 			goto out;
8194 		}
8195 	}
8196 
8197 	if (ioc->sense_len) {
8198 		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
8199 					     &sense_handle, GFP_KERNEL);
8200 		if (!sense) {
8201 			error = -ENOMEM;
8202 			goto out;
8203 		}
8204 
8205 		sense_ptr =
8206 		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
8207 		if (instance->consistent_mask_64bit)
8208 			*sense_ptr = cpu_to_le64(sense_handle);
8209 		else
8210 			*sense_ptr = cpu_to_le32(sense_handle);
8211 	}
8212 
8213 	/*
8214 	 * Set the sync_cmd flag so that the ISR knows not to complete this
8215 	 * cmd to the SCSI mid-layer
8216 	 */
8217 	cmd->sync_cmd = 1;
8218 
8219 	ret = megasas_issue_blocked_cmd(instance, cmd, 0);
8220 	switch (ret) {
8221 	case DCMD_INIT:
8222 	case DCMD_BUSY:
8223 		cmd->sync_cmd = 0;
8224 		dev_err(&instance->pdev->dev,
8225 			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
8226 			 __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
8227 			 cmd->cmd_status_drv);
8228 		error = -EBUSY;
8229 		goto out;
8230 	}
8231 
8232 	cmd->sync_cmd = 0;
8233 
8234 	if (instance->unload == 1) {
8235 		dev_info(&instance->pdev->dev, "Driver unload is in progress "
8236 			"don't submit data to application\n");
8237 		goto out;
8238 	}
8239 	/*
8240 	 * copy out the kernel buffers to user buffers
8241 	 */
8242 	for (i = 0; i < ioc->sge_count; i++) {
8243 		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
8244 				 ioc->sgl[i].iov_len)) {
8245 			error = -EFAULT;
8246 			goto out;
8247 		}
8248 	}
8249 
8250 	/*
8251 	 * copy out the sense
8252 	 */
8253 	if (ioc->sense_len) {
8254 		/*
8255 		 * sense_ptr points to the location that has the user
8256 		 * sense buffer address
8257 		 */
8258 		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
8259 				ioc->sense_off);
8260 
8261 		if (copy_to_user((void __user *)((unsigned long)
8262 				 get_unaligned((unsigned long *)sense_ptr)),
8263 				 sense, ioc->sense_len)) {
8264 			dev_err(&instance->pdev->dev, "Failed to copy out to user "
8265 					"sense data\n");
8266 			error = -EFAULT;
8267 			goto out;
8268 		}
8269 	}
8270 
8271 	/*
8272 	 * copy the status codes returned by the fw
8273 	 */
8274 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
8275 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
8276 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
8277 		error = -EFAULT;
8278 	}
8279 
8280 out:
8281 	if (sense) {
8282 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
8283 				    sense, sense_handle);
8284 	}
8285 
8286 	for (i = 0; i < ioc->sge_count; i++) {
8287 		if (kbuff_arr[i]) {
8288 			if (instance->consistent_mask_64bit)
8289 				dma_free_coherent(&instance->pdev->dev,
8290 					le32_to_cpu(kern_sge64[i].length),
8291 					kbuff_arr[i],
8292 					le64_to_cpu(kern_sge64[i].phys_addr));
8293 			else
8294 				dma_free_coherent(&instance->pdev->dev,
8295 					le32_to_cpu(kern_sge32[i].length),
8296 					kbuff_arr[i],
8297 					le32_to_cpu(kern_sge32[i].phys_addr));
8298 			kbuff_arr[i] = NULL;
8299 		}
8300 	}
8301 
8302 	megasas_return_cmd(instance, cmd);
8303 	return error;
8304 }
8305 
8306 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
8307 {
8308 	struct megasas_iocpacket __user *user_ioc =
8309 	    (struct megasas_iocpacket __user *)arg;
8310 	struct megasas_iocpacket *ioc;
8311 	struct megasas_instance *instance;
8312 	int error;
8313 
8314 	ioc = memdup_user(user_ioc, sizeof(*ioc));
8315 	if (IS_ERR(ioc))
8316 		return PTR_ERR(ioc);
8317 
8318 	instance = megasas_lookup_instance(ioc->host_no);
8319 	if (!instance) {
8320 		error = -ENODEV;
8321 		goto out_kfree_ioc;
8322 	}
8323 
8324 	/* Block ioctls in VF mode */
8325 	if (instance->requestorId && !allow_vf_ioctls) {
8326 		error = -ENODEV;
8327 		goto out_kfree_ioc;
8328 	}
8329 
8330 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8331 		dev_err(&instance->pdev->dev, "Controller in crit error\n");
8332 		error = -ENODEV;
8333 		goto out_kfree_ioc;
8334 	}
8335 
8336 	if (instance->unload == 1) {
8337 		error = -ENODEV;
8338 		goto out_kfree_ioc;
8339 	}
8340 
8341 	if (down_interruptible(&instance->ioctl_sem)) {
8342 		error = -ERESTARTSYS;
8343 		goto out_kfree_ioc;
8344 	}
8345 
8346 	if  (megasas_wait_for_adapter_operational(instance)) {
8347 		error = -ENODEV;
8348 		goto out_up;
8349 	}
8350 
8351 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
8352 out_up:
8353 	up(&instance->ioctl_sem);
8354 
8355 out_kfree_ioc:
8356 	kfree(ioc);
8357 	return error;
8358 }
8359 
8360 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
8361 {
8362 	struct megasas_instance *instance;
8363 	struct megasas_aen aen;
8364 	int error;
8365 
8366 	if (file->private_data != file) {
8367 		printk(KERN_DEBUG "megasas: fasync_helper was not "
8368 		       "called first\n");
8369 		return -EINVAL;
8370 	}
8371 
8372 	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
8373 		return -EFAULT;
8374 
8375 	instance = megasas_lookup_instance(aen.host_no);
8376 
8377 	if (!instance)
8378 		return -ENODEV;
8379 
8380 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8381 		return -ENODEV;
8382 	}
8383 
8384 	if (instance->unload == 1) {
8385 		return -ENODEV;
8386 	}
8387 
8388 	if  (megasas_wait_for_adapter_operational(instance))
8389 		return -ENODEV;
8390 
8391 	mutex_lock(&instance->reset_mutex);
8392 	error = megasas_register_aen(instance, aen.seq_num,
8393 				     aen.class_locale_word);
8394 	mutex_unlock(&instance->reset_mutex);
8395 	return error;
8396 }
8397 
8398 /**
8399  * megasas_mgmt_ioctl -	char node ioctl entry point
8400  */
8401 static long
8402 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8403 {
8404 	switch (cmd) {
8405 	case MEGASAS_IOC_FIRMWARE:
8406 		return megasas_mgmt_ioctl_fw(file, arg);
8407 
8408 	case MEGASAS_IOC_GET_AEN:
8409 		return megasas_mgmt_ioctl_aen(file, arg);
8410 	}
8411 
8412 	return -ENOTTY;
8413 }
8414 
8415 #ifdef CONFIG_COMPAT
8416 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
8417 {
8418 	struct compat_megasas_iocpacket __user *cioc =
8419 	    (struct compat_megasas_iocpacket __user *)arg;
8420 	struct megasas_iocpacket __user *ioc =
8421 	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
8422 	int i;
8423 	int error = 0;
8424 	compat_uptr_t ptr;
8425 	u32 local_sense_off;
8426 	u32 local_sense_len;
8427 	u32 user_sense_off;
8428 
8429 	if (clear_user(ioc, sizeof(*ioc)))
8430 		return -EFAULT;
8431 
8432 	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
8433 	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
8434 	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
8435 	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
8436 	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
8437 	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
8438 		return -EFAULT;
8439 
8440 	/*
8441 	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
8442 	 * sense_len is not null, so prepare the 64bit value under
8443 	 * the same condition.
8444 	 */
8445 	if (get_user(local_sense_off, &ioc->sense_off) ||
8446 		get_user(local_sense_len, &ioc->sense_len) ||
8447 		get_user(user_sense_off, &cioc->sense_off))
8448 		return -EFAULT;
8449 
8450 	if (local_sense_off != user_sense_off)
8451 		return -EINVAL;
8452 
8453 	if (local_sense_len) {
8454 		void __user **sense_ioc_ptr =
8455 			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
8456 		compat_uptr_t *sense_cioc_ptr =
8457 			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
8458 		if (get_user(ptr, sense_cioc_ptr) ||
8459 		    put_user(compat_ptr(ptr), sense_ioc_ptr))
8460 			return -EFAULT;
8461 	}
8462 
8463 	for (i = 0; i < MAX_IOCTL_SGE; i++) {
8464 		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
8465 		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
8466 		    copy_in_user(&ioc->sgl[i].iov_len,
8467 				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
8468 			return -EFAULT;
8469 	}
8470 
8471 	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
8472 
8473 	if (copy_in_user(&cioc->frame.hdr.cmd_status,
8474 			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
8475 		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
8476 		return -EFAULT;
8477 	}
8478 	return error;
8479 }
8480 
8481 static long
8482 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
8483 			  unsigned long arg)
8484 {
8485 	switch (cmd) {
8486 	case MEGASAS_IOC_FIRMWARE32:
8487 		return megasas_mgmt_compat_ioctl_fw(file, arg);
8488 	case MEGASAS_IOC_GET_AEN:
8489 		return megasas_mgmt_ioctl_aen(file, arg);
8490 	}
8491 
8492 	return -ENOTTY;
8493 }
8494 #endif
8495 
8496 /*
8497  * File operations structure for management interface
8498  */
8499 static const struct file_operations megasas_mgmt_fops = {
8500 	.owner = THIS_MODULE,
8501 	.open = megasas_mgmt_open,
8502 	.fasync = megasas_mgmt_fasync,
8503 	.unlocked_ioctl = megasas_mgmt_ioctl,
8504 	.poll = megasas_mgmt_poll,
8505 #ifdef CONFIG_COMPAT
8506 	.compat_ioctl = megasas_mgmt_compat_ioctl,
8507 #endif
8508 	.llseek = noop_llseek,
8509 };
8510 
8511 /*
8512  * PCI hotplug support registration structure
8513  */
8514 static struct pci_driver megasas_pci_driver = {
8515 
8516 	.name = "megaraid_sas",
8517 	.id_table = megasas_pci_table,
8518 	.probe = megasas_probe_one,
8519 	.remove = megasas_detach_one,
8520 	.suspend = megasas_suspend,
8521 	.resume = megasas_resume,
8522 	.shutdown = megasas_shutdown,
8523 };
8524 
8525 /*
8526  * Sysfs driver attributes
8527  */
8528 static ssize_t version_show(struct device_driver *dd, char *buf)
8529 {
8530 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
8531 			MEGASAS_VERSION);
8532 }
8533 static DRIVER_ATTR_RO(version);
8534 
8535 static ssize_t release_date_show(struct device_driver *dd, char *buf)
8536 {
8537 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
8538 		MEGASAS_RELDATE);
8539 }
8540 static DRIVER_ATTR_RO(release_date);
8541 
8542 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
8543 {
8544 	return sprintf(buf, "%u\n", support_poll_for_event);
8545 }
8546 static DRIVER_ATTR_RO(support_poll_for_event);
8547 
8548 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
8549 {
8550 	return sprintf(buf, "%u\n", support_device_change);
8551 }
8552 static DRIVER_ATTR_RO(support_device_change);
8553 
8554 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
8555 {
8556 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
8557 }
8558 
8559 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
8560 			     size_t count)
8561 {
8562 	int retval = count;
8563 
8564 	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8565 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8566 		retval = -EINVAL;
8567 	}
8568 	return retval;
8569 }
8570 static DRIVER_ATTR_RW(dbg_lvl);
8571 
8572 static ssize_t
8573 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8574 {
8575 	return sprintf(buf, "%u\n", support_nvme_encapsulation);
8576 }
8577 
8578 static DRIVER_ATTR_RO(support_nvme_encapsulation);
8579 
8580 static ssize_t
8581 support_pci_lane_margining_show(struct device_driver *dd, char *buf)
8582 {
8583 	return sprintf(buf, "%u\n", support_pci_lane_margining);
8584 }
8585 
8586 static DRIVER_ATTR_RO(support_pci_lane_margining);
8587 
8588 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8589 {
8590 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8591 	scsi_remove_device(sdev);
8592 	scsi_device_put(sdev);
8593 }
8594 
8595 /**
8596  * megasas_update_device_list -	Update the PD and LD device list from FW
8597  *				after an AEN event notification
8598  * @instance:			Adapter soft state
8599  * @event_type:			Indicates type of event (PD or LD event)
8600  *
8601  * @return:			Success or failure
8602  *
8603  * Issue DCMDs to Firmware to update the internal device list in driver.
8604  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8605  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8606  */
8607 static
8608 int megasas_update_device_list(struct megasas_instance *instance,
8609 			       int event_type)
8610 {
8611 	int dcmd_ret = DCMD_SUCCESS;
8612 
8613 	if (instance->enable_fw_dev_list) {
8614 		dcmd_ret = megasas_host_device_list_query(instance, false);
8615 		if (dcmd_ret != DCMD_SUCCESS)
8616 			goto out;
8617 	} else {
8618 		if (event_type & SCAN_PD_CHANNEL) {
8619 			dcmd_ret = megasas_get_pd_list(instance);
8620 
8621 			if (dcmd_ret != DCMD_SUCCESS)
8622 				goto out;
8623 		}
8624 
8625 		if (event_type & SCAN_VD_CHANNEL) {
8626 			if (!instance->requestorId ||
8627 			    (instance->requestorId &&
8628 			     megasas_get_ld_vf_affiliation(instance, 0))) {
8629 				dcmd_ret = megasas_ld_list_query(instance,
8630 						MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8631 				if (dcmd_ret != DCMD_SUCCESS)
8632 					goto out;
8633 			}
8634 		}
8635 	}
8636 
8637 out:
8638 	return dcmd_ret;
8639 }
8640 
8641 /**
8642  * megasas_add_remove_devices -	Add/remove devices to SCSI mid-layer
8643  *				after an AEN event notification
8644  * @instance:			Adapter soft state
8645  * @scan_type:			Indicates type of devices (PD/LD) to add
8646  * @return			void
8647  */
8648 static
8649 void megasas_add_remove_devices(struct megasas_instance *instance,
8650 				int scan_type)
8651 {
8652 	int i, j;
8653 	u16 pd_index = 0;
8654 	u16 ld_index = 0;
8655 	u16 channel = 0, id = 0;
8656 	struct Scsi_Host *host;
8657 	struct scsi_device *sdev1;
8658 	struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8659 	struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8660 
8661 	host = instance->host;
8662 
8663 	if (instance->enable_fw_dev_list) {
8664 		targetid_list = instance->host_device_list_buf;
8665 		for (i = 0; i < targetid_list->count; i++) {
8666 			targetid_entry = &targetid_list->host_device_list[i];
8667 			if (targetid_entry->flags.u.bits.is_sys_pd) {
8668 				channel = le16_to_cpu(targetid_entry->target_id) /
8669 						MEGASAS_MAX_DEV_PER_CHANNEL;
8670 				id = le16_to_cpu(targetid_entry->target_id) %
8671 						MEGASAS_MAX_DEV_PER_CHANNEL;
8672 			} else {
8673 				channel = MEGASAS_MAX_PD_CHANNELS +
8674 					  (le16_to_cpu(targetid_entry->target_id) /
8675 					   MEGASAS_MAX_DEV_PER_CHANNEL);
8676 				id = le16_to_cpu(targetid_entry->target_id) %
8677 						MEGASAS_MAX_DEV_PER_CHANNEL;
8678 			}
8679 			sdev1 = scsi_device_lookup(host, channel, id, 0);
8680 			if (!sdev1) {
8681 				scsi_add_device(host, channel, id, 0);
8682 			} else {
8683 				scsi_device_put(sdev1);
8684 			}
8685 		}
8686 	}
8687 
8688 	if (scan_type & SCAN_PD_CHANNEL) {
8689 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8690 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8691 				pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8692 				sdev1 = scsi_device_lookup(host, i, j, 0);
8693 				if (instance->pd_list[pd_index].driveState ==
8694 							MR_PD_STATE_SYSTEM) {
8695 					if (!sdev1)
8696 						scsi_add_device(host, i, j, 0);
8697 					else
8698 						scsi_device_put(sdev1);
8699 				} else {
8700 					if (sdev1)
8701 						megasas_remove_scsi_device(sdev1);
8702 				}
8703 			}
8704 		}
8705 	}
8706 
8707 	if (scan_type & SCAN_VD_CHANNEL) {
8708 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8709 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8710 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8711 				sdev1 = scsi_device_lookup(host,
8712 						MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8713 				if (instance->ld_ids[ld_index] != 0xff) {
8714 					if (!sdev1)
8715 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8716 					else
8717 						scsi_device_put(sdev1);
8718 				} else {
8719 					if (sdev1)
8720 						megasas_remove_scsi_device(sdev1);
8721 				}
8722 			}
8723 		}
8724 	}
8725 
8726 }
8727 
8728 static void
8729 megasas_aen_polling(struct work_struct *work)
8730 {
8731 	struct megasas_aen_event *ev =
8732 		container_of(work, struct megasas_aen_event, hotplug_work.work);
8733 	struct megasas_instance *instance = ev->instance;
8734 	union megasas_evt_class_locale class_locale;
8735 	int event_type = 0;
8736 	u32 seq_num;
8737 	int error;
8738 	u8  dcmd_ret = DCMD_SUCCESS;
8739 
8740 	if (!instance) {
8741 		printk(KERN_ERR "invalid instance!\n");
8742 		kfree(ev);
8743 		return;
8744 	}
8745 
8746 	/* Don't run the event workqueue thread if OCR is running */
8747 	mutex_lock(&instance->reset_mutex);
8748 
8749 	instance->ev = NULL;
8750 	if (instance->evt_detail) {
8751 		megasas_decode_evt(instance);
8752 
8753 		switch (le32_to_cpu(instance->evt_detail->code)) {
8754 
8755 		case MR_EVT_PD_INSERTED:
8756 		case MR_EVT_PD_REMOVED:
8757 			event_type = SCAN_PD_CHANNEL;
8758 			break;
8759 
8760 		case MR_EVT_LD_OFFLINE:
8761 		case MR_EVT_CFG_CLEARED:
8762 		case MR_EVT_LD_DELETED:
8763 		case MR_EVT_LD_CREATED:
8764 			event_type = SCAN_VD_CHANNEL;
8765 			break;
8766 
8767 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8768 		case MR_EVT_FOREIGN_CFG_IMPORTED:
8769 		case MR_EVT_LD_STATE_CHANGE:
8770 			event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8771 			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8772 				instance->host->host_no);
8773 			break;
8774 
8775 		case MR_EVT_CTRL_PROP_CHANGED:
8776 			dcmd_ret = megasas_get_ctrl_info(instance);
8777 			if (dcmd_ret == DCMD_SUCCESS &&
8778 			    instance->snapdump_wait_time) {
8779 				megasas_get_snapdump_properties(instance);
8780 				dev_info(&instance->pdev->dev,
8781 					 "Snap dump wait time\t: %d\n",
8782 					 instance->snapdump_wait_time);
8783 			}
8784 			break;
8785 		default:
8786 			event_type = 0;
8787 			break;
8788 		}
8789 	} else {
8790 		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8791 		mutex_unlock(&instance->reset_mutex);
8792 		kfree(ev);
8793 		return;
8794 	}
8795 
8796 	if (event_type)
8797 		dcmd_ret = megasas_update_device_list(instance, event_type);
8798 
8799 	mutex_unlock(&instance->reset_mutex);
8800 
8801 	if (event_type && dcmd_ret == DCMD_SUCCESS)
8802 		megasas_add_remove_devices(instance, event_type);
8803 
8804 	if (dcmd_ret == DCMD_SUCCESS)
8805 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8806 	else
8807 		seq_num = instance->last_seq_num;
8808 
8809 	/* Register AEN with FW for latest sequence number plus 1 */
8810 	class_locale.members.reserved = 0;
8811 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
8812 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
8813 
8814 	if (instance->aen_cmd != NULL) {
8815 		kfree(ev);
8816 		return;
8817 	}
8818 
8819 	mutex_lock(&instance->reset_mutex);
8820 	error = megasas_register_aen(instance, seq_num,
8821 					class_locale.word);
8822 	if (error)
8823 		dev_err(&instance->pdev->dev,
8824 			"register aen failed error %x\n", error);
8825 
8826 	mutex_unlock(&instance->reset_mutex);
8827 	kfree(ev);
8828 }
8829 
8830 /**
8831  * megasas_init - Driver load entry point
8832  */
8833 static int __init megasas_init(void)
8834 {
8835 	int rval;
8836 
8837 	/*
8838 	 * Booted in kdump kernel, minimize memory footprints by
8839 	 * disabling few features
8840 	 */
8841 	if (reset_devices) {
8842 		msix_vectors = 1;
8843 		rdpq_enable = 0;
8844 		dual_qdepth_disable = 1;
8845 	}
8846 
8847 	/*
8848 	 * Announce driver version and other information
8849 	 */
8850 	pr_info("megasas: %s\n", MEGASAS_VERSION);
8851 
8852 	spin_lock_init(&poll_aen_lock);
8853 
8854 	support_poll_for_event = 2;
8855 	support_device_change = 1;
8856 	support_nvme_encapsulation = true;
8857 	support_pci_lane_margining = true;
8858 
8859 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
8860 
8861 	/*
8862 	 * Register character device node
8863 	 */
8864 	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
8865 
8866 	if (rval < 0) {
8867 		printk(KERN_DEBUG "megasas: failed to open device node\n");
8868 		return rval;
8869 	}
8870 
8871 	megasas_mgmt_majorno = rval;
8872 
8873 	megasas_init_debugfs();
8874 
8875 	/*
8876 	 * Register ourselves as PCI hotplug module
8877 	 */
8878 	rval = pci_register_driver(&megasas_pci_driver);
8879 
8880 	if (rval) {
8881 		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
8882 		goto err_pcidrv;
8883 	}
8884 
8885 	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
8886 	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
8887 		pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
8888 		event_log_level = MFI_EVT_CLASS_CRITICAL;
8889 	}
8890 
8891 	rval = driver_create_file(&megasas_pci_driver.driver,
8892 				  &driver_attr_version);
8893 	if (rval)
8894 		goto err_dcf_attr_ver;
8895 
8896 	rval = driver_create_file(&megasas_pci_driver.driver,
8897 				  &driver_attr_release_date);
8898 	if (rval)
8899 		goto err_dcf_rel_date;
8900 
8901 	rval = driver_create_file(&megasas_pci_driver.driver,
8902 				&driver_attr_support_poll_for_event);
8903 	if (rval)
8904 		goto err_dcf_support_poll_for_event;
8905 
8906 	rval = driver_create_file(&megasas_pci_driver.driver,
8907 				  &driver_attr_dbg_lvl);
8908 	if (rval)
8909 		goto err_dcf_dbg_lvl;
8910 	rval = driver_create_file(&megasas_pci_driver.driver,
8911 				&driver_attr_support_device_change);
8912 	if (rval)
8913 		goto err_dcf_support_device_change;
8914 
8915 	rval = driver_create_file(&megasas_pci_driver.driver,
8916 				  &driver_attr_support_nvme_encapsulation);
8917 	if (rval)
8918 		goto err_dcf_support_nvme_encapsulation;
8919 
8920 	rval = driver_create_file(&megasas_pci_driver.driver,
8921 				  &driver_attr_support_pci_lane_margining);
8922 	if (rval)
8923 		goto err_dcf_support_pci_lane_margining;
8924 
8925 	return rval;
8926 
8927 err_dcf_support_pci_lane_margining:
8928 	driver_remove_file(&megasas_pci_driver.driver,
8929 			   &driver_attr_support_nvme_encapsulation);
8930 
8931 err_dcf_support_nvme_encapsulation:
8932 	driver_remove_file(&megasas_pci_driver.driver,
8933 			   &driver_attr_support_device_change);
8934 
8935 err_dcf_support_device_change:
8936 	driver_remove_file(&megasas_pci_driver.driver,
8937 			   &driver_attr_dbg_lvl);
8938 err_dcf_dbg_lvl:
8939 	driver_remove_file(&megasas_pci_driver.driver,
8940 			&driver_attr_support_poll_for_event);
8941 err_dcf_support_poll_for_event:
8942 	driver_remove_file(&megasas_pci_driver.driver,
8943 			   &driver_attr_release_date);
8944 err_dcf_rel_date:
8945 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8946 err_dcf_attr_ver:
8947 	pci_unregister_driver(&megasas_pci_driver);
8948 err_pcidrv:
8949 	megasas_exit_debugfs();
8950 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8951 	return rval;
8952 }
8953 
8954 /**
8955  * megasas_exit - Driver unload entry point
8956  */
8957 static void __exit megasas_exit(void)
8958 {
8959 	driver_remove_file(&megasas_pci_driver.driver,
8960 			   &driver_attr_dbg_lvl);
8961 	driver_remove_file(&megasas_pci_driver.driver,
8962 			&driver_attr_support_poll_for_event);
8963 	driver_remove_file(&megasas_pci_driver.driver,
8964 			&driver_attr_support_device_change);
8965 	driver_remove_file(&megasas_pci_driver.driver,
8966 			   &driver_attr_release_date);
8967 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8968 	driver_remove_file(&megasas_pci_driver.driver,
8969 			   &driver_attr_support_nvme_encapsulation);
8970 	driver_remove_file(&megasas_pci_driver.driver,
8971 			   &driver_attr_support_pci_lane_margining);
8972 
8973 	pci_unregister_driver(&megasas_pci_driver);
8974 	megasas_exit_debugfs();
8975 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8976 }
8977 
8978 module_init(megasas_init);
8979 module_exit(megasas_exit);
8980