1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Linux MegaRAID driver for SAS based RAID controllers
4  *
5  *  Copyright (c) 2003-2013  LSI Corporation
6  *  Copyright (c) 2013-2016  Avago Technologies
7  *  Copyright (c) 2016-2018  Broadcom Inc.
8  *
9  *  Authors: Broadcom Inc.
10  *           Sreenivas Bagalkote
11  *           Sumant Patro
12  *           Bo Yang
13  *           Adam Radford
14  *           Kashyap Desai <kashyap.desai@broadcom.com>
15  *           Sumit Saxena <sumit.saxena@broadcom.com>
16  *
17  *  Send feedback to: megaraidlinux.pdl@broadcom.com
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/uio.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <asm/unaligned.h>
33 #include <linux/fs.h>
34 #include <linux/compat.h>
35 #include <linux/blkdev.h>
36 #include <linux/mutex.h>
37 #include <linux/poll.h>
38 #include <linux/vmalloc.h>
39 #include <linux/irq_poll.h>
40 
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <scsi/scsi_dbg.h>
47 #include "megaraid_sas_fusion.h"
48 #include "megaraid_sas.h"
49 
50 /*
51  * Number of sectors per IO command
52  * Will be set in megasas_init_mfi if user does not provide
53  */
54 static unsigned int max_sectors;
55 module_param_named(max_sectors, max_sectors, int, 0444);
56 MODULE_PARM_DESC(max_sectors,
57 	"Maximum number of sectors per IO command");
58 
59 static int msix_disable;
60 module_param(msix_disable, int, 0444);
61 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
62 
63 static unsigned int msix_vectors;
64 module_param(msix_vectors, int, 0444);
65 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
66 
67 static int allow_vf_ioctls;
68 module_param(allow_vf_ioctls, int, 0444);
69 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
70 
71 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
72 module_param(throttlequeuedepth, int, 0444);
73 MODULE_PARM_DESC(throttlequeuedepth,
74 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
75 
76 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
77 module_param(resetwaittime, int, 0444);
78 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
79 
80 int smp_affinity_enable = 1;
81 module_param(smp_affinity_enable, int, 0444);
82 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
83 
84 int rdpq_enable = 1;
85 module_param(rdpq_enable, int, 0444);
86 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
87 
88 unsigned int dual_qdepth_disable;
89 module_param(dual_qdepth_disable, int, 0444);
90 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
91 
92 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
93 module_param(scmd_timeout, int, 0444);
94 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
95 
96 int perf_mode = -1;
97 module_param(perf_mode, int, 0444);
98 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
99 		"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
100 		"interrupt coalescing is enabled only on high iops queues\n\t\t"
101 		"1 - iops: High iops queues are not allocated &\n\t\t"
102 		"interrupt coalescing is enabled on all queues\n\t\t"
103 		"2 - latency: High iops queues are not allocated &\n\t\t"
104 		"interrupt coalescing is disabled on all queues\n\t\t"
105 		"default mode is 'balanced'"
106 		);
107 
108 int event_log_level = MFI_EVT_CLASS_CRITICAL;
109 module_param(event_log_level, int, 0644);
110 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
111 
112 unsigned int enable_sdev_max_qd;
113 module_param(enable_sdev_max_qd, int, 0444);
114 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
115 
116 MODULE_LICENSE("GPL");
117 MODULE_VERSION(MEGASAS_VERSION);
118 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
119 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
120 
121 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
122 static int megasas_get_pd_list(struct megasas_instance *instance);
123 static int megasas_ld_list_query(struct megasas_instance *instance,
124 				 u8 query_type);
125 static int megasas_issue_init_mfi(struct megasas_instance *instance);
126 static int megasas_register_aen(struct megasas_instance *instance,
127 				u32 seq_num, u32 class_locale_word);
128 static void megasas_get_pd_info(struct megasas_instance *instance,
129 				struct scsi_device *sdev);
130 
131 /*
132  * PCI ID table for all supported controllers
133  */
134 static struct pci_device_id megasas_pci_table[] = {
135 
136 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
137 	/* xscale IOP */
138 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
139 	/* ppc IOP */
140 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
141 	/* ppc IOP */
142 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
143 	/* gen2*/
144 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
145 	/* gen2*/
146 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
147 	/* skinny*/
148 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
149 	/* skinny*/
150 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
151 	/* xscale IOP, vega */
152 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
153 	/* xscale IOP */
154 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
155 	/* Fusion */
156 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
157 	/* Plasma */
158 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
159 	/* Invader */
160 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
161 	/* Fury */
162 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
163 	/* Intruder */
164 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
165 	/* Intruder 24 port*/
166 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
167 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
168 	/* VENTURA */
169 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
170 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
171 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
172 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
173 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
174 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
175 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
176 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
177 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
178 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
179 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
180 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
181 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
182 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
183 	{}
184 };
185 
186 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
187 
188 static int megasas_mgmt_majorno;
189 struct megasas_mgmt_info megasas_mgmt_info;
190 static struct fasync_struct *megasas_async_queue;
191 static DEFINE_MUTEX(megasas_async_queue_mutex);
192 
193 static int megasas_poll_wait_aen;
194 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
195 static u32 support_poll_for_event;
196 u32 megasas_dbg_lvl;
197 static u32 support_device_change;
198 static bool support_nvme_encapsulation;
199 static bool support_pci_lane_margining;
200 
201 /* define lock for aen poll */
202 static spinlock_t poll_aen_lock;
203 
204 extern struct dentry *megasas_debugfs_root;
205 extern void megasas_init_debugfs(void);
206 extern void megasas_exit_debugfs(void);
207 extern void megasas_setup_debugfs(struct megasas_instance *instance);
208 extern void megasas_destroy_debugfs(struct megasas_instance *instance);
209 
210 void
211 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
212 		     u8 alt_status);
213 static u32
214 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
215 static int
216 megasas_adp_reset_gen2(struct megasas_instance *instance,
217 		       struct megasas_register_set __iomem *reg_set);
218 static irqreturn_t megasas_isr(int irq, void *devp);
219 static u32
220 megasas_init_adapter_mfi(struct megasas_instance *instance);
221 u32
222 megasas_build_and_issue_cmd(struct megasas_instance *instance,
223 			    struct scsi_cmnd *scmd);
224 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
225 int
226 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
227 	int seconds);
228 void megasas_fusion_ocr_wq(struct work_struct *work);
229 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
230 					 int initial);
231 static int
232 megasas_set_dma_mask(struct megasas_instance *instance);
233 static int
234 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
235 static inline void
236 megasas_free_ctrl_mem(struct megasas_instance *instance);
237 static inline int
238 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
239 static inline void
240 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
241 static inline void
242 megasas_init_ctrl_params(struct megasas_instance *instance);
243 
244 u32 megasas_readl(struct megasas_instance *instance,
245 		  const volatile void __iomem *addr)
246 {
247 	u32 i = 0, ret_val;
248 	/*
249 	 * Due to a HW errata in Aero controllers, reads to certain
250 	 * Fusion registers could intermittently return all zeroes.
251 	 * This behavior is transient in nature and subsequent reads will
252 	 * return valid value. As a workaround in driver, retry readl for
253 	 * upto three times until a non-zero value is read.
254 	 */
255 	if (instance->adapter_type == AERO_SERIES) {
256 		do {
257 			ret_val = readl(addr);
258 			i++;
259 		} while (ret_val == 0 && i < 3);
260 		return ret_val;
261 	} else {
262 		return readl(addr);
263 	}
264 }
265 
266 /**
267  * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
268  * @instance:			Adapter soft state
269  * @dcmd:			DCMD frame inside MFI command
270  * @dma_addr:			DMA address of buffer to be passed to FW
271  * @dma_len:			Length of DMA buffer to be passed to FW
272  * @return:			void
273  */
274 void megasas_set_dma_settings(struct megasas_instance *instance,
275 			      struct megasas_dcmd_frame *dcmd,
276 			      dma_addr_t dma_addr, u32 dma_len)
277 {
278 	if (instance->consistent_mask_64bit) {
279 		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
280 		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
281 		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
282 
283 	} else {
284 		dcmd->sgl.sge32[0].phys_addr =
285 				cpu_to_le32(lower_32_bits(dma_addr));
286 		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
287 		dcmd->flags = cpu_to_le16(dcmd->flags);
288 	}
289 }
290 
291 static void
292 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
293 {
294 	instance->instancet->fire_cmd(instance,
295 		cmd->frame_phys_addr, 0, instance->reg_set);
296 	return;
297 }
298 
299 /**
300  * megasas_get_cmd -	Get a command from the free pool
301  * @instance:		Adapter soft state
302  *
303  * Returns a free command from the pool
304  */
305 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
306 						  *instance)
307 {
308 	unsigned long flags;
309 	struct megasas_cmd *cmd = NULL;
310 
311 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
312 
313 	if (!list_empty(&instance->cmd_pool)) {
314 		cmd = list_entry((&instance->cmd_pool)->next,
315 				 struct megasas_cmd, list);
316 		list_del_init(&cmd->list);
317 	} else {
318 		dev_err(&instance->pdev->dev, "Command pool empty!\n");
319 	}
320 
321 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
322 	return cmd;
323 }
324 
325 /**
326  * megasas_return_cmd -	Return a cmd to free command pool
327  * @instance:		Adapter soft state
328  * @cmd:		Command packet to be returned to free command pool
329  */
330 void
331 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
332 {
333 	unsigned long flags;
334 	u32 blk_tags;
335 	struct megasas_cmd_fusion *cmd_fusion;
336 	struct fusion_context *fusion = instance->ctrl_context;
337 
338 	/* This flag is used only for fusion adapter.
339 	 * Wait for Interrupt for Polled mode DCMD
340 	 */
341 	if (cmd->flags & DRV_DCMD_POLLED_MODE)
342 		return;
343 
344 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
345 
346 	if (fusion) {
347 		blk_tags = instance->max_scsi_cmds + cmd->index;
348 		cmd_fusion = fusion->cmd_list[blk_tags];
349 		megasas_return_cmd_fusion(instance, cmd_fusion);
350 	}
351 	cmd->scmd = NULL;
352 	cmd->frame_count = 0;
353 	cmd->flags = 0;
354 	memset(cmd->frame, 0, instance->mfi_frame_size);
355 	cmd->frame->io.context = cpu_to_le32(cmd->index);
356 	if (!fusion && reset_devices)
357 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
358 	list_add(&cmd->list, (&instance->cmd_pool)->next);
359 
360 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
361 
362 }
363 
364 static const char *
365 format_timestamp(uint32_t timestamp)
366 {
367 	static char buffer[32];
368 
369 	if ((timestamp & 0xff000000) == 0xff000000)
370 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
371 		0x00ffffff);
372 	else
373 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
374 	return buffer;
375 }
376 
377 static const char *
378 format_class(int8_t class)
379 {
380 	static char buffer[6];
381 
382 	switch (class) {
383 	case MFI_EVT_CLASS_DEBUG:
384 		return "debug";
385 	case MFI_EVT_CLASS_PROGRESS:
386 		return "progress";
387 	case MFI_EVT_CLASS_INFO:
388 		return "info";
389 	case MFI_EVT_CLASS_WARNING:
390 		return "WARN";
391 	case MFI_EVT_CLASS_CRITICAL:
392 		return "CRIT";
393 	case MFI_EVT_CLASS_FATAL:
394 		return "FATAL";
395 	case MFI_EVT_CLASS_DEAD:
396 		return "DEAD";
397 	default:
398 		snprintf(buffer, sizeof(buffer), "%d", class);
399 		return buffer;
400 	}
401 }
402 
403 /**
404   * megasas_decode_evt: Decode FW AEN event and print critical event
405   * for information.
406   * @instance:			Adapter soft state
407   */
408 static void
409 megasas_decode_evt(struct megasas_instance *instance)
410 {
411 	struct megasas_evt_detail *evt_detail = instance->evt_detail;
412 	union megasas_evt_class_locale class_locale;
413 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
414 
415 	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
416 	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
417 		printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
418 		event_log_level = MFI_EVT_CLASS_CRITICAL;
419 	}
420 
421 	if (class_locale.members.class >= event_log_level)
422 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
423 			le32_to_cpu(evt_detail->seq_num),
424 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
425 			(class_locale.members.locale),
426 			format_class(class_locale.members.class),
427 			evt_detail->description);
428 }
429 
430 /**
431 *	The following functions are defined for xscale
432 *	(deviceid : 1064R, PERC5) controllers
433 */
434 
435 /**
436  * megasas_enable_intr_xscale -	Enables interrupts
437  * @regs:			MFI register set
438  */
439 static inline void
440 megasas_enable_intr_xscale(struct megasas_instance *instance)
441 {
442 	struct megasas_register_set __iomem *regs;
443 
444 	regs = instance->reg_set;
445 	writel(0, &(regs)->outbound_intr_mask);
446 
447 	/* Dummy readl to force pci flush */
448 	readl(&regs->outbound_intr_mask);
449 }
450 
451 /**
452  * megasas_disable_intr_xscale -Disables interrupt
453  * @regs:			MFI register set
454  */
455 static inline void
456 megasas_disable_intr_xscale(struct megasas_instance *instance)
457 {
458 	struct megasas_register_set __iomem *regs;
459 	u32 mask = 0x1f;
460 
461 	regs = instance->reg_set;
462 	writel(mask, &regs->outbound_intr_mask);
463 	/* Dummy readl to force pci flush */
464 	readl(&regs->outbound_intr_mask);
465 }
466 
467 /**
468  * megasas_read_fw_status_reg_xscale - returns the current FW status value
469  * @regs:			MFI register set
470  */
471 static u32
472 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
473 {
474 	return readl(&instance->reg_set->outbound_msg_0);
475 }
476 /**
477  * megasas_clear_interrupt_xscale -	Check & clear interrupt
478  * @regs:				MFI register set
479  */
480 static int
481 megasas_clear_intr_xscale(struct megasas_instance *instance)
482 {
483 	u32 status;
484 	u32 mfiStatus = 0;
485 	struct megasas_register_set __iomem *regs;
486 	regs = instance->reg_set;
487 
488 	/*
489 	 * Check if it is our interrupt
490 	 */
491 	status = readl(&regs->outbound_intr_status);
492 
493 	if (status & MFI_OB_INTR_STATUS_MASK)
494 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
495 	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
496 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
497 
498 	/*
499 	 * Clear the interrupt by writing back the same value
500 	 */
501 	if (mfiStatus)
502 		writel(status, &regs->outbound_intr_status);
503 
504 	/* Dummy readl to force pci flush */
505 	readl(&regs->outbound_intr_status);
506 
507 	return mfiStatus;
508 }
509 
510 /**
511  * megasas_fire_cmd_xscale -	Sends command to the FW
512  * @frame_phys_addr :		Physical address of cmd
513  * @frame_count :		Number of frames for the command
514  * @regs :			MFI register set
515  */
516 static inline void
517 megasas_fire_cmd_xscale(struct megasas_instance *instance,
518 		dma_addr_t frame_phys_addr,
519 		u32 frame_count,
520 		struct megasas_register_set __iomem *regs)
521 {
522 	unsigned long flags;
523 
524 	spin_lock_irqsave(&instance->hba_lock, flags);
525 	writel((frame_phys_addr >> 3)|(frame_count),
526 	       &(regs)->inbound_queue_port);
527 	spin_unlock_irqrestore(&instance->hba_lock, flags);
528 }
529 
530 /**
531  * megasas_adp_reset_xscale -  For controller reset
532  * @regs:                              MFI register set
533  */
534 static int
535 megasas_adp_reset_xscale(struct megasas_instance *instance,
536 	struct megasas_register_set __iomem *regs)
537 {
538 	u32 i;
539 	u32 pcidata;
540 
541 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
542 
543 	for (i = 0; i < 3; i++)
544 		msleep(1000); /* sleep for 3 secs */
545 	pcidata  = 0;
546 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
547 	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
548 	if (pcidata & 0x2) {
549 		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
550 		pcidata &= ~0x2;
551 		pci_write_config_dword(instance->pdev,
552 				MFI_1068_PCSR_OFFSET, pcidata);
553 
554 		for (i = 0; i < 2; i++)
555 			msleep(1000); /* need to wait 2 secs again */
556 
557 		pcidata  = 0;
558 		pci_read_config_dword(instance->pdev,
559 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
560 		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
561 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
562 			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
563 			pcidata = 0;
564 			pci_write_config_dword(instance->pdev,
565 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
566 		}
567 	}
568 	return 0;
569 }
570 
571 /**
572  * megasas_check_reset_xscale -	For controller reset check
573  * @regs:				MFI register set
574  */
575 static int
576 megasas_check_reset_xscale(struct megasas_instance *instance,
577 		struct megasas_register_set __iomem *regs)
578 {
579 	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
580 	    (le32_to_cpu(*instance->consumer) ==
581 		MEGASAS_ADPRESET_INPROG_SIGN))
582 		return 1;
583 	return 0;
584 }
585 
586 static struct megasas_instance_template megasas_instance_template_xscale = {
587 
588 	.fire_cmd = megasas_fire_cmd_xscale,
589 	.enable_intr = megasas_enable_intr_xscale,
590 	.disable_intr = megasas_disable_intr_xscale,
591 	.clear_intr = megasas_clear_intr_xscale,
592 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
593 	.adp_reset = megasas_adp_reset_xscale,
594 	.check_reset = megasas_check_reset_xscale,
595 	.service_isr = megasas_isr,
596 	.tasklet = megasas_complete_cmd_dpc,
597 	.init_adapter = megasas_init_adapter_mfi,
598 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
599 	.issue_dcmd = megasas_issue_dcmd,
600 };
601 
602 /**
603 *	This is the end of set of functions & definitions specific
604 *	to xscale (deviceid : 1064R, PERC5) controllers
605 */
606 
607 /**
608 *	The following functions are defined for ppc (deviceid : 0x60)
609 *	controllers
610 */
611 
612 /**
613  * megasas_enable_intr_ppc -	Enables interrupts
614  * @regs:			MFI register set
615  */
616 static inline void
617 megasas_enable_intr_ppc(struct megasas_instance *instance)
618 {
619 	struct megasas_register_set __iomem *regs;
620 
621 	regs = instance->reg_set;
622 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
623 
624 	writel(~0x80000000, &(regs)->outbound_intr_mask);
625 
626 	/* Dummy readl to force pci flush */
627 	readl(&regs->outbound_intr_mask);
628 }
629 
630 /**
631  * megasas_disable_intr_ppc -	Disable interrupt
632  * @regs:			MFI register set
633  */
634 static inline void
635 megasas_disable_intr_ppc(struct megasas_instance *instance)
636 {
637 	struct megasas_register_set __iomem *regs;
638 	u32 mask = 0xFFFFFFFF;
639 
640 	regs = instance->reg_set;
641 	writel(mask, &regs->outbound_intr_mask);
642 	/* Dummy readl to force pci flush */
643 	readl(&regs->outbound_intr_mask);
644 }
645 
646 /**
647  * megasas_read_fw_status_reg_ppc - returns the current FW status value
648  * @regs:			MFI register set
649  */
650 static u32
651 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
652 {
653 	return readl(&instance->reg_set->outbound_scratch_pad_0);
654 }
655 
656 /**
657  * megasas_clear_interrupt_ppc -	Check & clear interrupt
658  * @regs:				MFI register set
659  */
660 static int
661 megasas_clear_intr_ppc(struct megasas_instance *instance)
662 {
663 	u32 status, mfiStatus = 0;
664 	struct megasas_register_set __iomem *regs;
665 	regs = instance->reg_set;
666 
667 	/*
668 	 * Check if it is our interrupt
669 	 */
670 	status = readl(&regs->outbound_intr_status);
671 
672 	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
673 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
674 
675 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
676 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
677 
678 	/*
679 	 * Clear the interrupt by writing back the same value
680 	 */
681 	writel(status, &regs->outbound_doorbell_clear);
682 
683 	/* Dummy readl to force pci flush */
684 	readl(&regs->outbound_doorbell_clear);
685 
686 	return mfiStatus;
687 }
688 
689 /**
690  * megasas_fire_cmd_ppc -	Sends command to the FW
691  * @frame_phys_addr :		Physical address of cmd
692  * @frame_count :		Number of frames for the command
693  * @regs :			MFI register set
694  */
695 static inline void
696 megasas_fire_cmd_ppc(struct megasas_instance *instance,
697 		dma_addr_t frame_phys_addr,
698 		u32 frame_count,
699 		struct megasas_register_set __iomem *regs)
700 {
701 	unsigned long flags;
702 
703 	spin_lock_irqsave(&instance->hba_lock, flags);
704 	writel((frame_phys_addr | (frame_count<<1))|1,
705 			&(regs)->inbound_queue_port);
706 	spin_unlock_irqrestore(&instance->hba_lock, flags);
707 }
708 
709 /**
710  * megasas_check_reset_ppc -	For controller reset check
711  * @regs:				MFI register set
712  */
713 static int
714 megasas_check_reset_ppc(struct megasas_instance *instance,
715 			struct megasas_register_set __iomem *regs)
716 {
717 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
718 		return 1;
719 
720 	return 0;
721 }
722 
723 static struct megasas_instance_template megasas_instance_template_ppc = {
724 
725 	.fire_cmd = megasas_fire_cmd_ppc,
726 	.enable_intr = megasas_enable_intr_ppc,
727 	.disable_intr = megasas_disable_intr_ppc,
728 	.clear_intr = megasas_clear_intr_ppc,
729 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
730 	.adp_reset = megasas_adp_reset_xscale,
731 	.check_reset = megasas_check_reset_ppc,
732 	.service_isr = megasas_isr,
733 	.tasklet = megasas_complete_cmd_dpc,
734 	.init_adapter = megasas_init_adapter_mfi,
735 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
736 	.issue_dcmd = megasas_issue_dcmd,
737 };
738 
739 /**
740  * megasas_enable_intr_skinny -	Enables interrupts
741  * @regs:			MFI register set
742  */
743 static inline void
744 megasas_enable_intr_skinny(struct megasas_instance *instance)
745 {
746 	struct megasas_register_set __iomem *regs;
747 
748 	regs = instance->reg_set;
749 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
750 
751 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
752 
753 	/* Dummy readl to force pci flush */
754 	readl(&regs->outbound_intr_mask);
755 }
756 
757 /**
758  * megasas_disable_intr_skinny -	Disables interrupt
759  * @regs:			MFI register set
760  */
761 static inline void
762 megasas_disable_intr_skinny(struct megasas_instance *instance)
763 {
764 	struct megasas_register_set __iomem *regs;
765 	u32 mask = 0xFFFFFFFF;
766 
767 	regs = instance->reg_set;
768 	writel(mask, &regs->outbound_intr_mask);
769 	/* Dummy readl to force pci flush */
770 	readl(&regs->outbound_intr_mask);
771 }
772 
773 /**
774  * megasas_read_fw_status_reg_skinny - returns the current FW status value
775  * @regs:			MFI register set
776  */
777 static u32
778 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
779 {
780 	return readl(&instance->reg_set->outbound_scratch_pad_0);
781 }
782 
783 /**
784  * megasas_clear_interrupt_skinny -	Check & clear interrupt
785  * @regs:				MFI register set
786  */
787 static int
788 megasas_clear_intr_skinny(struct megasas_instance *instance)
789 {
790 	u32 status;
791 	u32 mfiStatus = 0;
792 	struct megasas_register_set __iomem *regs;
793 	regs = instance->reg_set;
794 
795 	/*
796 	 * Check if it is our interrupt
797 	 */
798 	status = readl(&regs->outbound_intr_status);
799 
800 	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
801 		return 0;
802 	}
803 
804 	/*
805 	 * Check if it is our interrupt
806 	 */
807 	if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
808 	    MFI_STATE_FAULT) {
809 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
810 	} else
811 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
812 
813 	/*
814 	 * Clear the interrupt by writing back the same value
815 	 */
816 	writel(status, &regs->outbound_intr_status);
817 
818 	/*
819 	 * dummy read to flush PCI
820 	 */
821 	readl(&regs->outbound_intr_status);
822 
823 	return mfiStatus;
824 }
825 
826 /**
827  * megasas_fire_cmd_skinny -	Sends command to the FW
828  * @frame_phys_addr :		Physical address of cmd
829  * @frame_count :		Number of frames for the command
830  * @regs :			MFI register set
831  */
832 static inline void
833 megasas_fire_cmd_skinny(struct megasas_instance *instance,
834 			dma_addr_t frame_phys_addr,
835 			u32 frame_count,
836 			struct megasas_register_set __iomem *regs)
837 {
838 	unsigned long flags;
839 
840 	spin_lock_irqsave(&instance->hba_lock, flags);
841 	writel(upper_32_bits(frame_phys_addr),
842 	       &(regs)->inbound_high_queue_port);
843 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
844 	       &(regs)->inbound_low_queue_port);
845 	spin_unlock_irqrestore(&instance->hba_lock, flags);
846 }
847 
848 /**
849  * megasas_check_reset_skinny -	For controller reset check
850  * @regs:				MFI register set
851  */
852 static int
853 megasas_check_reset_skinny(struct megasas_instance *instance,
854 				struct megasas_register_set __iomem *regs)
855 {
856 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
857 		return 1;
858 
859 	return 0;
860 }
861 
862 static struct megasas_instance_template megasas_instance_template_skinny = {
863 
864 	.fire_cmd = megasas_fire_cmd_skinny,
865 	.enable_intr = megasas_enable_intr_skinny,
866 	.disable_intr = megasas_disable_intr_skinny,
867 	.clear_intr = megasas_clear_intr_skinny,
868 	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
869 	.adp_reset = megasas_adp_reset_gen2,
870 	.check_reset = megasas_check_reset_skinny,
871 	.service_isr = megasas_isr,
872 	.tasklet = megasas_complete_cmd_dpc,
873 	.init_adapter = megasas_init_adapter_mfi,
874 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
875 	.issue_dcmd = megasas_issue_dcmd,
876 };
877 
878 
879 /**
880 *	The following functions are defined for gen2 (deviceid : 0x78 0x79)
881 *	controllers
882 */
883 
884 /**
885  * megasas_enable_intr_gen2 -  Enables interrupts
886  * @regs:                      MFI register set
887  */
888 static inline void
889 megasas_enable_intr_gen2(struct megasas_instance *instance)
890 {
891 	struct megasas_register_set __iomem *regs;
892 
893 	regs = instance->reg_set;
894 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
895 
896 	/* write ~0x00000005 (4 & 1) to the intr mask*/
897 	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
898 
899 	/* Dummy readl to force pci flush */
900 	readl(&regs->outbound_intr_mask);
901 }
902 
903 /**
904  * megasas_disable_intr_gen2 - Disables interrupt
905  * @regs:                      MFI register set
906  */
907 static inline void
908 megasas_disable_intr_gen2(struct megasas_instance *instance)
909 {
910 	struct megasas_register_set __iomem *regs;
911 	u32 mask = 0xFFFFFFFF;
912 
913 	regs = instance->reg_set;
914 	writel(mask, &regs->outbound_intr_mask);
915 	/* Dummy readl to force pci flush */
916 	readl(&regs->outbound_intr_mask);
917 }
918 
919 /**
920  * megasas_read_fw_status_reg_gen2 - returns the current FW status value
921  * @regs:                      MFI register set
922  */
923 static u32
924 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
925 {
926 	return readl(&instance->reg_set->outbound_scratch_pad_0);
927 }
928 
929 /**
930  * megasas_clear_interrupt_gen2 -      Check & clear interrupt
931  * @regs:                              MFI register set
932  */
933 static int
934 megasas_clear_intr_gen2(struct megasas_instance *instance)
935 {
936 	u32 status;
937 	u32 mfiStatus = 0;
938 	struct megasas_register_set __iomem *regs;
939 	regs = instance->reg_set;
940 
941 	/*
942 	 * Check if it is our interrupt
943 	 */
944 	status = readl(&regs->outbound_intr_status);
945 
946 	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
947 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
948 	}
949 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
950 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
951 	}
952 
953 	/*
954 	 * Clear the interrupt by writing back the same value
955 	 */
956 	if (mfiStatus)
957 		writel(status, &regs->outbound_doorbell_clear);
958 
959 	/* Dummy readl to force pci flush */
960 	readl(&regs->outbound_intr_status);
961 
962 	return mfiStatus;
963 }
964 /**
965  * megasas_fire_cmd_gen2 -     Sends command to the FW
966  * @frame_phys_addr :          Physical address of cmd
967  * @frame_count :              Number of frames for the command
968  * @regs :                     MFI register set
969  */
970 static inline void
971 megasas_fire_cmd_gen2(struct megasas_instance *instance,
972 			dma_addr_t frame_phys_addr,
973 			u32 frame_count,
974 			struct megasas_register_set __iomem *regs)
975 {
976 	unsigned long flags;
977 
978 	spin_lock_irqsave(&instance->hba_lock, flags);
979 	writel((frame_phys_addr | (frame_count<<1))|1,
980 			&(regs)->inbound_queue_port);
981 	spin_unlock_irqrestore(&instance->hba_lock, flags);
982 }
983 
984 /**
985  * megasas_adp_reset_gen2 -	For controller reset
986  * @regs:				MFI register set
987  */
988 static int
989 megasas_adp_reset_gen2(struct megasas_instance *instance,
990 			struct megasas_register_set __iomem *reg_set)
991 {
992 	u32 retry = 0 ;
993 	u32 HostDiag;
994 	u32 __iomem *seq_offset = &reg_set->seq_offset;
995 	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
996 
997 	if (instance->instancet == &megasas_instance_template_skinny) {
998 		seq_offset = &reg_set->fusion_seq_offset;
999 		hostdiag_offset = &reg_set->fusion_host_diag;
1000 	}
1001 
1002 	writel(0, seq_offset);
1003 	writel(4, seq_offset);
1004 	writel(0xb, seq_offset);
1005 	writel(2, seq_offset);
1006 	writel(7, seq_offset);
1007 	writel(0xd, seq_offset);
1008 
1009 	msleep(1000);
1010 
1011 	HostDiag = (u32)readl(hostdiag_offset);
1012 
1013 	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
1014 		msleep(100);
1015 		HostDiag = (u32)readl(hostdiag_offset);
1016 		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
1017 					retry, HostDiag);
1018 
1019 		if (retry++ >= 100)
1020 			return 1;
1021 
1022 	}
1023 
1024 	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
1025 
1026 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
1027 
1028 	ssleep(10);
1029 
1030 	HostDiag = (u32)readl(hostdiag_offset);
1031 	while (HostDiag & DIAG_RESET_ADAPTER) {
1032 		msleep(100);
1033 		HostDiag = (u32)readl(hostdiag_offset);
1034 		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
1035 				retry, HostDiag);
1036 
1037 		if (retry++ >= 1000)
1038 			return 1;
1039 
1040 	}
1041 	return 0;
1042 }
1043 
1044 /**
1045  * megasas_check_reset_gen2 -	For controller reset check
1046  * @regs:				MFI register set
1047  */
1048 static int
1049 megasas_check_reset_gen2(struct megasas_instance *instance,
1050 		struct megasas_register_set __iomem *regs)
1051 {
1052 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1053 		return 1;
1054 
1055 	return 0;
1056 }
1057 
1058 static struct megasas_instance_template megasas_instance_template_gen2 = {
1059 
1060 	.fire_cmd = megasas_fire_cmd_gen2,
1061 	.enable_intr = megasas_enable_intr_gen2,
1062 	.disable_intr = megasas_disable_intr_gen2,
1063 	.clear_intr = megasas_clear_intr_gen2,
1064 	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1065 	.adp_reset = megasas_adp_reset_gen2,
1066 	.check_reset = megasas_check_reset_gen2,
1067 	.service_isr = megasas_isr,
1068 	.tasklet = megasas_complete_cmd_dpc,
1069 	.init_adapter = megasas_init_adapter_mfi,
1070 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1071 	.issue_dcmd = megasas_issue_dcmd,
1072 };
1073 
1074 /**
1075 *	This is the end of set of functions & definitions
1076 *       specific to gen2 (deviceid : 0x78, 0x79) controllers
1077 */
1078 
1079 /*
1080  * Template added for TB (Fusion)
1081  */
1082 extern struct megasas_instance_template megasas_instance_template_fusion;
1083 
1084 /**
1085  * megasas_issue_polled -	Issues a polling command
1086  * @instance:			Adapter soft state
1087  * @cmd:			Command packet to be issued
1088  *
1089  * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1090  */
1091 int
1092 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1093 {
1094 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1095 
1096 	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1097 	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1098 
1099 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1100 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1101 			__func__, __LINE__);
1102 		return DCMD_INIT;
1103 	}
1104 
1105 	instance->instancet->issue_dcmd(instance, cmd);
1106 
1107 	return wait_and_poll(instance, cmd, instance->requestorId ?
1108 			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1109 }
1110 
1111 /**
1112  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1113  * @instance:			Adapter soft state
1114  * @cmd:			Command to be issued
1115  * @timeout:			Timeout in seconds
1116  *
1117  * This function waits on an event for the command to be returned from ISR.
1118  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1119  * Used to issue ioctl commands.
1120  */
1121 int
1122 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1123 			  struct megasas_cmd *cmd, int timeout)
1124 {
1125 	int ret = 0;
1126 	cmd->cmd_status_drv = DCMD_INIT;
1127 
1128 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1129 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1130 			__func__, __LINE__);
1131 		return DCMD_INIT;
1132 	}
1133 
1134 	instance->instancet->issue_dcmd(instance, cmd);
1135 
1136 	if (timeout) {
1137 		ret = wait_event_timeout(instance->int_cmd_wait_q,
1138 		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1139 		if (!ret) {
1140 			dev_err(&instance->pdev->dev,
1141 				"DCMD(opcode: 0x%x) is timed out, func:%s\n",
1142 				cmd->frame->dcmd.opcode, __func__);
1143 			return DCMD_TIMEOUT;
1144 		}
1145 	} else
1146 		wait_event(instance->int_cmd_wait_q,
1147 				cmd->cmd_status_drv != DCMD_INIT);
1148 
1149 	return cmd->cmd_status_drv;
1150 }
1151 
1152 /**
1153  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1154  * @instance:				Adapter soft state
1155  * @cmd_to_abort:			Previously issued cmd to be aborted
1156  * @timeout:				Timeout in seconds
1157  *
1158  * MFI firmware can abort previously issued AEN comamnd (automatic event
1159  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1160  * cmd and waits for return status.
1161  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1162  */
1163 static int
1164 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1165 				struct megasas_cmd *cmd_to_abort, int timeout)
1166 {
1167 	struct megasas_cmd *cmd;
1168 	struct megasas_abort_frame *abort_fr;
1169 	int ret = 0;
1170 	u32 opcode;
1171 
1172 	cmd = megasas_get_cmd(instance);
1173 
1174 	if (!cmd)
1175 		return -1;
1176 
1177 	abort_fr = &cmd->frame->abort;
1178 
1179 	/*
1180 	 * Prepare and issue the abort frame
1181 	 */
1182 	abort_fr->cmd = MFI_CMD_ABORT;
1183 	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1184 	abort_fr->flags = cpu_to_le16(0);
1185 	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1186 	abort_fr->abort_mfi_phys_addr_lo =
1187 		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1188 	abort_fr->abort_mfi_phys_addr_hi =
1189 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1190 
1191 	cmd->sync_cmd = 1;
1192 	cmd->cmd_status_drv = DCMD_INIT;
1193 
1194 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1195 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1196 			__func__, __LINE__);
1197 		return DCMD_INIT;
1198 	}
1199 
1200 	instance->instancet->issue_dcmd(instance, cmd);
1201 
1202 	if (timeout) {
1203 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1204 		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1205 		if (!ret) {
1206 			opcode = cmd_to_abort->frame->dcmd.opcode;
1207 			dev_err(&instance->pdev->dev,
1208 				"Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
1209 				opcode,  __func__);
1210 			return DCMD_TIMEOUT;
1211 		}
1212 	} else
1213 		wait_event(instance->abort_cmd_wait_q,
1214 		cmd->cmd_status_drv != DCMD_INIT);
1215 
1216 	cmd->sync_cmd = 0;
1217 
1218 	megasas_return_cmd(instance, cmd);
1219 	return cmd->cmd_status_drv;
1220 }
1221 
1222 /**
1223  * megasas_make_sgl32 -	Prepares 32-bit SGL
1224  * @instance:		Adapter soft state
1225  * @scp:		SCSI command from the mid-layer
1226  * @mfi_sgl:		SGL to be filled in
1227  *
1228  * If successful, this function returns the number of SG elements. Otherwise,
1229  * it returnes -1.
1230  */
1231 static int
1232 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1233 		   union megasas_sgl *mfi_sgl)
1234 {
1235 	int i;
1236 	int sge_count;
1237 	struct scatterlist *os_sgl;
1238 
1239 	sge_count = scsi_dma_map(scp);
1240 	BUG_ON(sge_count < 0);
1241 
1242 	if (sge_count) {
1243 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1244 			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1245 			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1246 		}
1247 	}
1248 	return sge_count;
1249 }
1250 
1251 /**
1252  * megasas_make_sgl64 -	Prepares 64-bit SGL
1253  * @instance:		Adapter soft state
1254  * @scp:		SCSI command from the mid-layer
1255  * @mfi_sgl:		SGL to be filled in
1256  *
1257  * If successful, this function returns the number of SG elements. Otherwise,
1258  * it returnes -1.
1259  */
1260 static int
1261 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1262 		   union megasas_sgl *mfi_sgl)
1263 {
1264 	int i;
1265 	int sge_count;
1266 	struct scatterlist *os_sgl;
1267 
1268 	sge_count = scsi_dma_map(scp);
1269 	BUG_ON(sge_count < 0);
1270 
1271 	if (sge_count) {
1272 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1273 			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1274 			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1275 		}
1276 	}
1277 	return sge_count;
1278 }
1279 
1280 /**
1281  * megasas_make_sgl_skinny - Prepares IEEE SGL
1282  * @instance:           Adapter soft state
1283  * @scp:                SCSI command from the mid-layer
1284  * @mfi_sgl:            SGL to be filled in
1285  *
1286  * If successful, this function returns the number of SG elements. Otherwise,
1287  * it returnes -1.
1288  */
1289 static int
1290 megasas_make_sgl_skinny(struct megasas_instance *instance,
1291 		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1292 {
1293 	int i;
1294 	int sge_count;
1295 	struct scatterlist *os_sgl;
1296 
1297 	sge_count = scsi_dma_map(scp);
1298 
1299 	if (sge_count) {
1300 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1301 			mfi_sgl->sge_skinny[i].length =
1302 				cpu_to_le32(sg_dma_len(os_sgl));
1303 			mfi_sgl->sge_skinny[i].phys_addr =
1304 				cpu_to_le64(sg_dma_address(os_sgl));
1305 			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1306 		}
1307 	}
1308 	return sge_count;
1309 }
1310 
1311  /**
1312  * megasas_get_frame_count - Computes the number of frames
1313  * @frame_type		: type of frame- io or pthru frame
1314  * @sge_count		: number of sg elements
1315  *
1316  * Returns the number of frames required for numnber of sge's (sge_count)
1317  */
1318 
1319 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1320 			u8 sge_count, u8 frame_type)
1321 {
1322 	int num_cnt;
1323 	int sge_bytes;
1324 	u32 sge_sz;
1325 	u32 frame_count = 0;
1326 
1327 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1328 	    sizeof(struct megasas_sge32);
1329 
1330 	if (instance->flag_ieee) {
1331 		sge_sz = sizeof(struct megasas_sge_skinny);
1332 	}
1333 
1334 	/*
1335 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1336 	 * 3 SGEs for 32-bit SGLs for ldio &
1337 	 * 1 SGEs for 64-bit SGLs and
1338 	 * 2 SGEs for 32-bit SGLs for pthru frame
1339 	 */
1340 	if (unlikely(frame_type == PTHRU_FRAME)) {
1341 		if (instance->flag_ieee == 1) {
1342 			num_cnt = sge_count - 1;
1343 		} else if (IS_DMA64)
1344 			num_cnt = sge_count - 1;
1345 		else
1346 			num_cnt = sge_count - 2;
1347 	} else {
1348 		if (instance->flag_ieee == 1) {
1349 			num_cnt = sge_count - 1;
1350 		} else if (IS_DMA64)
1351 			num_cnt = sge_count - 2;
1352 		else
1353 			num_cnt = sge_count - 3;
1354 	}
1355 
1356 	if (num_cnt > 0) {
1357 		sge_bytes = sge_sz * num_cnt;
1358 
1359 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1360 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1361 	}
1362 	/* Main frame */
1363 	frame_count += 1;
1364 
1365 	if (frame_count > 7)
1366 		frame_count = 8;
1367 	return frame_count;
1368 }
1369 
1370 /**
1371  * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1372  * @instance:		Adapter soft state
1373  * @scp:		SCSI command
1374  * @cmd:		Command to be prepared in
1375  *
1376  * This function prepares CDB commands. These are typcially pass-through
1377  * commands to the devices.
1378  */
1379 static int
1380 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1381 		   struct megasas_cmd *cmd)
1382 {
1383 	u32 is_logical;
1384 	u32 device_id;
1385 	u16 flags = 0;
1386 	struct megasas_pthru_frame *pthru;
1387 
1388 	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1389 	device_id = MEGASAS_DEV_INDEX(scp);
1390 	pthru = (struct megasas_pthru_frame *)cmd->frame;
1391 
1392 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1393 		flags = MFI_FRAME_DIR_WRITE;
1394 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1395 		flags = MFI_FRAME_DIR_READ;
1396 	else if (scp->sc_data_direction == DMA_NONE)
1397 		flags = MFI_FRAME_DIR_NONE;
1398 
1399 	if (instance->flag_ieee == 1) {
1400 		flags |= MFI_FRAME_IEEE;
1401 	}
1402 
1403 	/*
1404 	 * Prepare the DCDB frame
1405 	 */
1406 	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1407 	pthru->cmd_status = 0x0;
1408 	pthru->scsi_status = 0x0;
1409 	pthru->target_id = device_id;
1410 	pthru->lun = scp->device->lun;
1411 	pthru->cdb_len = scp->cmd_len;
1412 	pthru->timeout = 0;
1413 	pthru->pad_0 = 0;
1414 	pthru->flags = cpu_to_le16(flags);
1415 	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1416 
1417 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1418 
1419 	/*
1420 	 * If the command is for the tape device, set the
1421 	 * pthru timeout to the os layer timeout value.
1422 	 */
1423 	if (scp->device->type == TYPE_TAPE) {
1424 		if ((scp->request->timeout / HZ) > 0xFFFF)
1425 			pthru->timeout = cpu_to_le16(0xFFFF);
1426 		else
1427 			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1428 	}
1429 
1430 	/*
1431 	 * Construct SGL
1432 	 */
1433 	if (instance->flag_ieee == 1) {
1434 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1435 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1436 						      &pthru->sgl);
1437 	} else if (IS_DMA64) {
1438 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1439 		pthru->sge_count = megasas_make_sgl64(instance, scp,
1440 						      &pthru->sgl);
1441 	} else
1442 		pthru->sge_count = megasas_make_sgl32(instance, scp,
1443 						      &pthru->sgl);
1444 
1445 	if (pthru->sge_count > instance->max_num_sge) {
1446 		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1447 			pthru->sge_count);
1448 		return 0;
1449 	}
1450 
1451 	/*
1452 	 * Sense info specific
1453 	 */
1454 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1455 	pthru->sense_buf_phys_addr_hi =
1456 		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1457 	pthru->sense_buf_phys_addr_lo =
1458 		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1459 
1460 	/*
1461 	 * Compute the total number of frames this command consumes. FW uses
1462 	 * this number to pull sufficient number of frames from host memory.
1463 	 */
1464 	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1465 							PTHRU_FRAME);
1466 
1467 	return cmd->frame_count;
1468 }
1469 
1470 /**
1471  * megasas_build_ldio -	Prepares IOs to logical devices
1472  * @instance:		Adapter soft state
1473  * @scp:		SCSI command
1474  * @cmd:		Command to be prepared
1475  *
1476  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1477  */
1478 static int
1479 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1480 		   struct megasas_cmd *cmd)
1481 {
1482 	u32 device_id;
1483 	u8 sc = scp->cmnd[0];
1484 	u16 flags = 0;
1485 	struct megasas_io_frame *ldio;
1486 
1487 	device_id = MEGASAS_DEV_INDEX(scp);
1488 	ldio = (struct megasas_io_frame *)cmd->frame;
1489 
1490 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1491 		flags = MFI_FRAME_DIR_WRITE;
1492 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1493 		flags = MFI_FRAME_DIR_READ;
1494 
1495 	if (instance->flag_ieee == 1) {
1496 		flags |= MFI_FRAME_IEEE;
1497 	}
1498 
1499 	/*
1500 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1501 	 */
1502 	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1503 	ldio->cmd_status = 0x0;
1504 	ldio->scsi_status = 0x0;
1505 	ldio->target_id = device_id;
1506 	ldio->timeout = 0;
1507 	ldio->reserved_0 = 0;
1508 	ldio->pad_0 = 0;
1509 	ldio->flags = cpu_to_le16(flags);
1510 	ldio->start_lba_hi = 0;
1511 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1512 
1513 	/*
1514 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1515 	 */
1516 	if (scp->cmd_len == 6) {
1517 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1518 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1519 						 ((u32) scp->cmnd[2] << 8) |
1520 						 (u32) scp->cmnd[3]);
1521 
1522 		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1523 	}
1524 
1525 	/*
1526 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1527 	 */
1528 	else if (scp->cmd_len == 10) {
1529 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1530 					      ((u32) scp->cmnd[7] << 8));
1531 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1532 						 ((u32) scp->cmnd[3] << 16) |
1533 						 ((u32) scp->cmnd[4] << 8) |
1534 						 (u32) scp->cmnd[5]);
1535 	}
1536 
1537 	/*
1538 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1539 	 */
1540 	else if (scp->cmd_len == 12) {
1541 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1542 					      ((u32) scp->cmnd[7] << 16) |
1543 					      ((u32) scp->cmnd[8] << 8) |
1544 					      (u32) scp->cmnd[9]);
1545 
1546 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1547 						 ((u32) scp->cmnd[3] << 16) |
1548 						 ((u32) scp->cmnd[4] << 8) |
1549 						 (u32) scp->cmnd[5]);
1550 	}
1551 
1552 	/*
1553 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1554 	 */
1555 	else if (scp->cmd_len == 16) {
1556 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1557 					      ((u32) scp->cmnd[11] << 16) |
1558 					      ((u32) scp->cmnd[12] << 8) |
1559 					      (u32) scp->cmnd[13]);
1560 
1561 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1562 						 ((u32) scp->cmnd[7] << 16) |
1563 						 ((u32) scp->cmnd[8] << 8) |
1564 						 (u32) scp->cmnd[9]);
1565 
1566 		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1567 						 ((u32) scp->cmnd[3] << 16) |
1568 						 ((u32) scp->cmnd[4] << 8) |
1569 						 (u32) scp->cmnd[5]);
1570 
1571 	}
1572 
1573 	/*
1574 	 * Construct SGL
1575 	 */
1576 	if (instance->flag_ieee) {
1577 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1578 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1579 					      &ldio->sgl);
1580 	} else if (IS_DMA64) {
1581 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1582 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1583 	} else
1584 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1585 
1586 	if (ldio->sge_count > instance->max_num_sge) {
1587 		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1588 			ldio->sge_count);
1589 		return 0;
1590 	}
1591 
1592 	/*
1593 	 * Sense info specific
1594 	 */
1595 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1596 	ldio->sense_buf_phys_addr_hi = 0;
1597 	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1598 
1599 	/*
1600 	 * Compute the total number of frames this command consumes. FW uses
1601 	 * this number to pull sufficient number of frames from host memory.
1602 	 */
1603 	cmd->frame_count = megasas_get_frame_count(instance,
1604 			ldio->sge_count, IO_FRAME);
1605 
1606 	return cmd->frame_count;
1607 }
1608 
1609 /**
1610  * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1611  *				and whether it's RW or non RW
1612  * @scmd:			SCSI command
1613  *
1614  */
1615 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1616 {
1617 	int ret;
1618 
1619 	switch (cmd->cmnd[0]) {
1620 	case READ_10:
1621 	case WRITE_10:
1622 	case READ_12:
1623 	case WRITE_12:
1624 	case READ_6:
1625 	case WRITE_6:
1626 	case READ_16:
1627 	case WRITE_16:
1628 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1629 			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1630 		break;
1631 	default:
1632 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1633 			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1634 	}
1635 	return ret;
1636 }
1637 
1638  /**
1639  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1640  *					in FW
1641  * @instance:				Adapter soft state
1642  */
1643 static inline void
1644 megasas_dump_pending_frames(struct megasas_instance *instance)
1645 {
1646 	struct megasas_cmd *cmd;
1647 	int i,n;
1648 	union megasas_sgl *mfi_sgl;
1649 	struct megasas_io_frame *ldio;
1650 	struct megasas_pthru_frame *pthru;
1651 	u32 sgcount;
1652 	u16 max_cmd = instance->max_fw_cmds;
1653 
1654 	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1655 	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1656 	if (IS_DMA64)
1657 		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1658 	else
1659 		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1660 
1661 	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1662 	for (i = 0; i < max_cmd; i++) {
1663 		cmd = instance->cmd_list[i];
1664 		if (!cmd->scmd)
1665 			continue;
1666 		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1667 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1668 			ldio = (struct megasas_io_frame *)cmd->frame;
1669 			mfi_sgl = &ldio->sgl;
1670 			sgcount = ldio->sge_count;
1671 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1672 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1673 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1674 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1675 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1676 		} else {
1677 			pthru = (struct megasas_pthru_frame *) cmd->frame;
1678 			mfi_sgl = &pthru->sgl;
1679 			sgcount = pthru->sge_count;
1680 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1681 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1682 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1683 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1684 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1685 		}
1686 		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1687 			for (n = 0; n < sgcount; n++) {
1688 				if (IS_DMA64)
1689 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1690 						le32_to_cpu(mfi_sgl->sge64[n].length),
1691 						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1692 				else
1693 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1694 						le32_to_cpu(mfi_sgl->sge32[n].length),
1695 						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1696 			}
1697 		}
1698 	} /*for max_cmd*/
1699 	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1700 	for (i = 0; i < max_cmd; i++) {
1701 
1702 		cmd = instance->cmd_list[i];
1703 
1704 		if (cmd->sync_cmd == 1)
1705 			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1706 	}
1707 	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1708 }
1709 
1710 u32
1711 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1712 			    struct scsi_cmnd *scmd)
1713 {
1714 	struct megasas_cmd *cmd;
1715 	u32 frame_count;
1716 
1717 	cmd = megasas_get_cmd(instance);
1718 	if (!cmd)
1719 		return SCSI_MLQUEUE_HOST_BUSY;
1720 
1721 	/*
1722 	 * Logical drive command
1723 	 */
1724 	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1725 		frame_count = megasas_build_ldio(instance, scmd, cmd);
1726 	else
1727 		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1728 
1729 	if (!frame_count)
1730 		goto out_return_cmd;
1731 
1732 	cmd->scmd = scmd;
1733 	scmd->SCp.ptr = (char *)cmd;
1734 
1735 	/*
1736 	 * Issue the command to the FW
1737 	 */
1738 	atomic_inc(&instance->fw_outstanding);
1739 
1740 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1741 				cmd->frame_count-1, instance->reg_set);
1742 
1743 	return 0;
1744 out_return_cmd:
1745 	megasas_return_cmd(instance, cmd);
1746 	return SCSI_MLQUEUE_HOST_BUSY;
1747 }
1748 
1749 
1750 /**
1751  * megasas_queue_command -	Queue entry point
1752  * @scmd:			SCSI command to be queued
1753  * @done:			Callback entry point
1754  */
1755 static int
1756 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1757 {
1758 	struct megasas_instance *instance;
1759 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1760 
1761 	instance = (struct megasas_instance *)
1762 	    scmd->device->host->hostdata;
1763 
1764 	if (instance->unload == 1) {
1765 		scmd->result = DID_NO_CONNECT << 16;
1766 		scmd->scsi_done(scmd);
1767 		return 0;
1768 	}
1769 
1770 	if (instance->issuepend_done == 0)
1771 		return SCSI_MLQUEUE_HOST_BUSY;
1772 
1773 
1774 	/* Check for an mpio path and adjust behavior */
1775 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1776 		if (megasas_check_mpio_paths(instance, scmd) ==
1777 		    (DID_REQUEUE << 16)) {
1778 			return SCSI_MLQUEUE_HOST_BUSY;
1779 		} else {
1780 			scmd->result = DID_NO_CONNECT << 16;
1781 			scmd->scsi_done(scmd);
1782 			return 0;
1783 		}
1784 	}
1785 
1786 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1787 		scmd->result = DID_NO_CONNECT << 16;
1788 		scmd->scsi_done(scmd);
1789 		return 0;
1790 	}
1791 
1792 	mr_device_priv_data = scmd->device->hostdata;
1793 	if (!mr_device_priv_data) {
1794 		scmd->result = DID_NO_CONNECT << 16;
1795 		scmd->scsi_done(scmd);
1796 		return 0;
1797 	}
1798 
1799 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1800 		return SCSI_MLQUEUE_HOST_BUSY;
1801 
1802 	if (mr_device_priv_data->tm_busy)
1803 		return SCSI_MLQUEUE_DEVICE_BUSY;
1804 
1805 
1806 	scmd->result = 0;
1807 
1808 	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1809 	    (scmd->device->id >= instance->fw_supported_vd_count ||
1810 		scmd->device->lun)) {
1811 		scmd->result = DID_BAD_TARGET << 16;
1812 		goto out_done;
1813 	}
1814 
1815 	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1816 	    MEGASAS_IS_LOGICAL(scmd->device) &&
1817 	    (!instance->fw_sync_cache_support)) {
1818 		scmd->result = DID_OK << 16;
1819 		goto out_done;
1820 	}
1821 
1822 	return instance->instancet->build_and_issue_cmd(instance, scmd);
1823 
1824  out_done:
1825 	scmd->scsi_done(scmd);
1826 	return 0;
1827 }
1828 
1829 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1830 {
1831 	int i;
1832 
1833 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1834 
1835 		if ((megasas_mgmt_info.instance[i]) &&
1836 		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1837 			return megasas_mgmt_info.instance[i];
1838 	}
1839 
1840 	return NULL;
1841 }
1842 
1843 /*
1844 * megasas_set_dynamic_target_properties -
1845 * Device property set by driver may not be static and it is required to be
1846 * updated after OCR
1847 *
1848 * set tm_capable.
1849 * set dma alignment (only for eedp protection enable vd).
1850 *
1851 * @sdev: OS provided scsi device
1852 *
1853 * Returns void
1854 */
1855 void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1856 					   bool is_target_prop)
1857 {
1858 	u16 pd_index = 0, ld;
1859 	u32 device_id;
1860 	struct megasas_instance *instance;
1861 	struct fusion_context *fusion;
1862 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1863 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1864 	struct MR_LD_RAID *raid;
1865 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1866 
1867 	instance = megasas_lookup_instance(sdev->host->host_no);
1868 	fusion = instance->ctrl_context;
1869 	mr_device_priv_data = sdev->hostdata;
1870 
1871 	if (!fusion || !mr_device_priv_data)
1872 		return;
1873 
1874 	if (MEGASAS_IS_LOGICAL(sdev)) {
1875 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1876 					+ sdev->id;
1877 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1878 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1879 		if (ld >= instance->fw_supported_vd_count)
1880 			return;
1881 		raid = MR_LdRaidGet(ld, local_map_ptr);
1882 
1883 		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1884 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1885 
1886 		mr_device_priv_data->is_tm_capable =
1887 			raid->capability.tmCapable;
1888 
1889 		if (!raid->flags.isEPD)
1890 			sdev->no_write_same = 1;
1891 
1892 	} else if (instance->use_seqnum_jbod_fp) {
1893 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1894 			sdev->id;
1895 		pd_sync = (void *)fusion->pd_seq_sync
1896 				[(instance->pd_seq_map_id - 1) & 1];
1897 		mr_device_priv_data->is_tm_capable =
1898 			pd_sync->seq[pd_index].capability.tmCapable;
1899 	}
1900 
1901 	if (is_target_prop && instance->tgt_prop->reset_tmo) {
1902 		/*
1903 		 * If FW provides a target reset timeout value, driver will use
1904 		 * it. If not set, fallback to default values.
1905 		 */
1906 		mr_device_priv_data->target_reset_tmo =
1907 			min_t(u8, instance->max_reset_tmo,
1908 			      instance->tgt_prop->reset_tmo);
1909 		mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1910 	} else {
1911 		mr_device_priv_data->target_reset_tmo =
1912 						MEGASAS_DEFAULT_TM_TIMEOUT;
1913 		mr_device_priv_data->task_abort_tmo =
1914 						MEGASAS_DEFAULT_TM_TIMEOUT;
1915 	}
1916 }
1917 
1918 /*
1919  * megasas_set_nvme_device_properties -
1920  * set nomerges=2
1921  * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1922  * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1923  *
1924  * MR firmware provides value in KB. Caller of this function converts
1925  * kb into bytes.
1926  *
1927  * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1928  * MR firmware provides value 128 as (32 * 4K) = 128K.
1929  *
1930  * @sdev:				scsi device
1931  * @max_io_size:				maximum io transfer size
1932  *
1933  */
1934 static inline void
1935 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1936 {
1937 	struct megasas_instance *instance;
1938 	u32 mr_nvme_pg_size;
1939 
1940 	instance = (struct megasas_instance *)sdev->host->hostdata;
1941 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1942 				MR_DEFAULT_NVME_PAGE_SIZE);
1943 
1944 	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1945 
1946 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1947 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1948 }
1949 
1950 /*
1951  * megasas_set_fw_assisted_qd -
1952  * set device queue depth to can_queue
1953  * set device queue depth to fw assisted qd
1954  *
1955  * @sdev:				scsi device
1956  * @is_target_prop			true, if fw provided target properties.
1957  */
1958 static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
1959 						 bool is_target_prop)
1960 {
1961 	u8 interface_type;
1962 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1963 	u32 tgt_device_qd;
1964 	struct megasas_instance *instance;
1965 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1966 
1967 	instance = megasas_lookup_instance(sdev->host->host_no);
1968 	mr_device_priv_data = sdev->hostdata;
1969 	interface_type  = mr_device_priv_data->interface_type;
1970 
1971 	switch (interface_type) {
1972 	case SAS_PD:
1973 		device_qd = MEGASAS_SAS_QD;
1974 		break;
1975 	case SATA_PD:
1976 		device_qd = MEGASAS_SATA_QD;
1977 		break;
1978 	case NVME_PD:
1979 		device_qd = MEGASAS_NVME_QD;
1980 		break;
1981 	}
1982 
1983 	if (is_target_prop) {
1984 		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1985 		if (tgt_device_qd &&
1986 		    (tgt_device_qd <= instance->host->can_queue))
1987 			device_qd = tgt_device_qd;
1988 	}
1989 
1990 	if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
1991 		device_qd = instance->host->can_queue;
1992 
1993 	scsi_change_queue_depth(sdev, device_qd);
1994 }
1995 
1996 /*
1997  * megasas_set_static_target_properties -
1998  * Device property set by driver are static and it is not required to be
1999  * updated after OCR.
2000  *
2001  * set io timeout
2002  * set device queue depth
2003  * set nvme device properties. see - megasas_set_nvme_device_properties
2004  *
2005  * @sdev:				scsi device
2006  * @is_target_prop			true, if fw provided target properties.
2007  */
2008 static void megasas_set_static_target_properties(struct scsi_device *sdev,
2009 						 bool is_target_prop)
2010 {
2011 	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
2012 	struct megasas_instance *instance;
2013 
2014 	instance = megasas_lookup_instance(sdev->host->host_no);
2015 
2016 	/*
2017 	 * The RAID firmware may require extended timeouts.
2018 	 */
2019 	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
2020 
2021 	/* max_io_size_kb will be set to non zero for
2022 	 * nvme based vd and syspd.
2023 	 */
2024 	if (is_target_prop)
2025 		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
2026 
2027 	if (instance->nvme_page_size && max_io_size_kb)
2028 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
2029 
2030 	megasas_set_fw_assisted_qd(sdev, is_target_prop);
2031 }
2032 
2033 
2034 static int megasas_slave_configure(struct scsi_device *sdev)
2035 {
2036 	u16 pd_index = 0;
2037 	struct megasas_instance *instance;
2038 	int ret_target_prop = DCMD_FAILED;
2039 	bool is_target_prop = false;
2040 
2041 	instance = megasas_lookup_instance(sdev->host->host_no);
2042 	if (instance->pd_list_not_supported) {
2043 		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
2044 			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2045 				sdev->id;
2046 			if (instance->pd_list[pd_index].driveState !=
2047 				MR_PD_STATE_SYSTEM)
2048 				return -ENXIO;
2049 		}
2050 	}
2051 
2052 	mutex_lock(&instance->reset_mutex);
2053 	/* Send DCMD to Firmware and cache the information */
2054 	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
2055 		megasas_get_pd_info(instance, sdev);
2056 
2057 	/* Some ventura firmware may not have instance->nvme_page_size set.
2058 	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
2059 	 */
2060 	if ((instance->tgt_prop) && (instance->nvme_page_size))
2061 		ret_target_prop = megasas_get_target_prop(instance, sdev);
2062 
2063 	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2064 	megasas_set_static_target_properties(sdev, is_target_prop);
2065 
2066 	/* This sdev property may change post OCR */
2067 	megasas_set_dynamic_target_properties(sdev, is_target_prop);
2068 
2069 	mutex_unlock(&instance->reset_mutex);
2070 
2071 	return 0;
2072 }
2073 
2074 static int megasas_slave_alloc(struct scsi_device *sdev)
2075 {
2076 	u16 pd_index = 0;
2077 	struct megasas_instance *instance ;
2078 	struct MR_PRIV_DEVICE *mr_device_priv_data;
2079 
2080 	instance = megasas_lookup_instance(sdev->host->host_no);
2081 	if (!MEGASAS_IS_LOGICAL(sdev)) {
2082 		/*
2083 		 * Open the OS scan to the SYSTEM PD
2084 		 */
2085 		pd_index =
2086 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2087 			sdev->id;
2088 		if ((instance->pd_list_not_supported ||
2089 			instance->pd_list[pd_index].driveState ==
2090 			MR_PD_STATE_SYSTEM)) {
2091 			goto scan_target;
2092 		}
2093 		return -ENXIO;
2094 	}
2095 
2096 scan_target:
2097 	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2098 					GFP_KERNEL);
2099 	if (!mr_device_priv_data)
2100 		return -ENOMEM;
2101 	sdev->hostdata = mr_device_priv_data;
2102 
2103 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2104 		   instance->r1_ldio_hint_default);
2105 	return 0;
2106 }
2107 
2108 static void megasas_slave_destroy(struct scsi_device *sdev)
2109 {
2110 	kfree(sdev->hostdata);
2111 	sdev->hostdata = NULL;
2112 }
2113 
2114 /*
2115 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2116 *                                       kill adapter
2117 * @instance:				Adapter soft state
2118 *
2119 */
2120 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2121 {
2122 	int i;
2123 	struct megasas_cmd *cmd_mfi;
2124 	struct megasas_cmd_fusion *cmd_fusion;
2125 	struct fusion_context *fusion = instance->ctrl_context;
2126 
2127 	/* Find all outstanding ioctls */
2128 	if (fusion) {
2129 		for (i = 0; i < instance->max_fw_cmds; i++) {
2130 			cmd_fusion = fusion->cmd_list[i];
2131 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2132 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2133 				if (cmd_mfi->sync_cmd &&
2134 				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2135 					cmd_mfi->frame->hdr.cmd_status =
2136 							MFI_STAT_WRONG_STATE;
2137 					megasas_complete_cmd(instance,
2138 							     cmd_mfi, DID_OK);
2139 				}
2140 			}
2141 		}
2142 	} else {
2143 		for (i = 0; i < instance->max_fw_cmds; i++) {
2144 			cmd_mfi = instance->cmd_list[i];
2145 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2146 				MFI_CMD_ABORT)
2147 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2148 		}
2149 	}
2150 }
2151 
2152 
2153 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2154 {
2155 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2156 		dev_warn(&instance->pdev->dev,
2157 			 "Adapter already dead, skipping kill HBA\n");
2158 		return;
2159 	}
2160 
2161 	/* Set critical error to block I/O & ioctls in case caller didn't */
2162 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2163 	/* Wait 1 second to ensure IO or ioctls in build have posted */
2164 	msleep(1000);
2165 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2166 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2167 		(instance->adapter_type != MFI_SERIES)) {
2168 		if (!instance->requestorId) {
2169 			writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2170 			/* Flush */
2171 			readl(&instance->reg_set->doorbell);
2172 		}
2173 		if (instance->requestorId && instance->peerIsPresent)
2174 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2175 	} else {
2176 		writel(MFI_STOP_ADP,
2177 			&instance->reg_set->inbound_doorbell);
2178 	}
2179 	/* Complete outstanding ioctls when adapter is killed */
2180 	megasas_complete_outstanding_ioctls(instance);
2181 }
2182 
2183  /**
2184   * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2185   *					restored to max value
2186   * @instance:			Adapter soft state
2187   *
2188   */
2189 void
2190 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2191 {
2192 	unsigned long flags;
2193 
2194 	if (instance->flag & MEGASAS_FW_BUSY
2195 	    && time_after(jiffies, instance->last_time + 5 * HZ)
2196 	    && atomic_read(&instance->fw_outstanding) <
2197 	    instance->throttlequeuedepth + 1) {
2198 
2199 		spin_lock_irqsave(instance->host->host_lock, flags);
2200 		instance->flag &= ~MEGASAS_FW_BUSY;
2201 
2202 		instance->host->can_queue = instance->cur_can_queue;
2203 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2204 	}
2205 }
2206 
2207 /**
2208  * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2209  * @instance_addr:			Address of adapter soft state
2210  *
2211  * Tasklet to complete cmds
2212  */
2213 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2214 {
2215 	u32 producer;
2216 	u32 consumer;
2217 	u32 context;
2218 	struct megasas_cmd *cmd;
2219 	struct megasas_instance *instance =
2220 				(struct megasas_instance *)instance_addr;
2221 	unsigned long flags;
2222 
2223 	/* If we have already declared adapter dead, donot complete cmds */
2224 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2225 		return;
2226 
2227 	spin_lock_irqsave(&instance->completion_lock, flags);
2228 
2229 	producer = le32_to_cpu(*instance->producer);
2230 	consumer = le32_to_cpu(*instance->consumer);
2231 
2232 	while (consumer != producer) {
2233 		context = le32_to_cpu(instance->reply_queue[consumer]);
2234 		if (context >= instance->max_fw_cmds) {
2235 			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2236 				context);
2237 			BUG();
2238 		}
2239 
2240 		cmd = instance->cmd_list[context];
2241 
2242 		megasas_complete_cmd(instance, cmd, DID_OK);
2243 
2244 		consumer++;
2245 		if (consumer == (instance->max_fw_cmds + 1)) {
2246 			consumer = 0;
2247 		}
2248 	}
2249 
2250 	*instance->consumer = cpu_to_le32(producer);
2251 
2252 	spin_unlock_irqrestore(&instance->completion_lock, flags);
2253 
2254 	/*
2255 	 * Check if we can restore can_queue
2256 	 */
2257 	megasas_check_and_restore_queue_depth(instance);
2258 }
2259 
2260 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2261 
2262 /**
2263  * megasas_start_timer - Initializes sriov heartbeat timer object
2264  * @instance:		Adapter soft state
2265  *
2266  */
2267 void megasas_start_timer(struct megasas_instance *instance)
2268 {
2269 	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2270 
2271 	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2272 	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2273 	add_timer(timer);
2274 }
2275 
2276 static void
2277 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2278 
2279 static void
2280 process_fw_state_change_wq(struct work_struct *work);
2281 
2282 static void megasas_do_ocr(struct megasas_instance *instance)
2283 {
2284 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2285 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2286 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2287 		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2288 	}
2289 	instance->instancet->disable_intr(instance);
2290 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2291 	instance->issuepend_done = 0;
2292 
2293 	atomic_set(&instance->fw_outstanding, 0);
2294 	megasas_internal_reset_defer_cmds(instance);
2295 	process_fw_state_change_wq(&instance->work_init);
2296 }
2297 
2298 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2299 					    int initial)
2300 {
2301 	struct megasas_cmd *cmd;
2302 	struct megasas_dcmd_frame *dcmd;
2303 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2304 	dma_addr_t new_affiliation_111_h;
2305 	int ld, retval = 0;
2306 	u8 thisVf;
2307 
2308 	cmd = megasas_get_cmd(instance);
2309 
2310 	if (!cmd) {
2311 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2312 		       "Failed to get cmd for scsi%d\n",
2313 			instance->host->host_no);
2314 		return -ENOMEM;
2315 	}
2316 
2317 	dcmd = &cmd->frame->dcmd;
2318 
2319 	if (!instance->vf_affiliation_111) {
2320 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2321 		       "affiliation for scsi%d\n", instance->host->host_no);
2322 		megasas_return_cmd(instance, cmd);
2323 		return -ENOMEM;
2324 	}
2325 
2326 	if (initial)
2327 			memset(instance->vf_affiliation_111, 0,
2328 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2329 	else {
2330 		new_affiliation_111 =
2331 			dma_alloc_coherent(&instance->pdev->dev,
2332 					   sizeof(struct MR_LD_VF_AFFILIATION_111),
2333 					   &new_affiliation_111_h, GFP_KERNEL);
2334 		if (!new_affiliation_111) {
2335 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2336 			       "memory for new affiliation for scsi%d\n",
2337 			       instance->host->host_no);
2338 			megasas_return_cmd(instance, cmd);
2339 			return -ENOMEM;
2340 		}
2341 	}
2342 
2343 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2344 
2345 	dcmd->cmd = MFI_CMD_DCMD;
2346 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2347 	dcmd->sge_count = 1;
2348 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2349 	dcmd->timeout = 0;
2350 	dcmd->pad_0 = 0;
2351 	dcmd->data_xfer_len =
2352 		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2353 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2354 
2355 	if (initial)
2356 		dcmd->sgl.sge32[0].phys_addr =
2357 			cpu_to_le32(instance->vf_affiliation_111_h);
2358 	else
2359 		dcmd->sgl.sge32[0].phys_addr =
2360 			cpu_to_le32(new_affiliation_111_h);
2361 
2362 	dcmd->sgl.sge32[0].length = cpu_to_le32(
2363 		sizeof(struct MR_LD_VF_AFFILIATION_111));
2364 
2365 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2366 	       "scsi%d\n", instance->host->host_no);
2367 
2368 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2369 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2370 		       " failed with status 0x%x for scsi%d\n",
2371 		       dcmd->cmd_status, instance->host->host_no);
2372 		retval = 1; /* Do a scan if we couldn't get affiliation */
2373 		goto out;
2374 	}
2375 
2376 	if (!initial) {
2377 		thisVf = new_affiliation_111->thisVf;
2378 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2379 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2380 			    new_affiliation_111->map[ld].policy[thisVf]) {
2381 				dev_warn(&instance->pdev->dev, "SR-IOV: "
2382 				       "Got new LD/VF affiliation for scsi%d\n",
2383 				       instance->host->host_no);
2384 				memcpy(instance->vf_affiliation_111,
2385 				       new_affiliation_111,
2386 				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2387 				retval = 1;
2388 				goto out;
2389 			}
2390 	}
2391 out:
2392 	if (new_affiliation_111) {
2393 		dma_free_coherent(&instance->pdev->dev,
2394 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2395 				    new_affiliation_111,
2396 				    new_affiliation_111_h);
2397 	}
2398 
2399 	megasas_return_cmd(instance, cmd);
2400 
2401 	return retval;
2402 }
2403 
2404 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2405 					    int initial)
2406 {
2407 	struct megasas_cmd *cmd;
2408 	struct megasas_dcmd_frame *dcmd;
2409 	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2410 	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2411 	dma_addr_t new_affiliation_h;
2412 	int i, j, retval = 0, found = 0, doscan = 0;
2413 	u8 thisVf;
2414 
2415 	cmd = megasas_get_cmd(instance);
2416 
2417 	if (!cmd) {
2418 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2419 		       "Failed to get cmd for scsi%d\n",
2420 		       instance->host->host_no);
2421 		return -ENOMEM;
2422 	}
2423 
2424 	dcmd = &cmd->frame->dcmd;
2425 
2426 	if (!instance->vf_affiliation) {
2427 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2428 		       "affiliation for scsi%d\n", instance->host->host_no);
2429 		megasas_return_cmd(instance, cmd);
2430 		return -ENOMEM;
2431 	}
2432 
2433 	if (initial)
2434 		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2435 		       sizeof(struct MR_LD_VF_AFFILIATION));
2436 	else {
2437 		new_affiliation =
2438 			dma_alloc_coherent(&instance->pdev->dev,
2439 					   (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2440 					   &new_affiliation_h, GFP_KERNEL);
2441 		if (!new_affiliation) {
2442 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2443 			       "memory for new affiliation for scsi%d\n",
2444 			       instance->host->host_no);
2445 			megasas_return_cmd(instance, cmd);
2446 			return -ENOMEM;
2447 		}
2448 	}
2449 
2450 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2451 
2452 	dcmd->cmd = MFI_CMD_DCMD;
2453 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2454 	dcmd->sge_count = 1;
2455 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2456 	dcmd->timeout = 0;
2457 	dcmd->pad_0 = 0;
2458 	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2459 		sizeof(struct MR_LD_VF_AFFILIATION));
2460 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2461 
2462 	if (initial)
2463 		dcmd->sgl.sge32[0].phys_addr =
2464 			cpu_to_le32(instance->vf_affiliation_h);
2465 	else
2466 		dcmd->sgl.sge32[0].phys_addr =
2467 			cpu_to_le32(new_affiliation_h);
2468 
2469 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2470 		sizeof(struct MR_LD_VF_AFFILIATION));
2471 
2472 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2473 	       "scsi%d\n", instance->host->host_no);
2474 
2475 
2476 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2477 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2478 		       " failed with status 0x%x for scsi%d\n",
2479 		       dcmd->cmd_status, instance->host->host_no);
2480 		retval = 1; /* Do a scan if we couldn't get affiliation */
2481 		goto out;
2482 	}
2483 
2484 	if (!initial) {
2485 		if (!new_affiliation->ldCount) {
2486 			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2487 			       "affiliation for passive path for scsi%d\n",
2488 			       instance->host->host_no);
2489 			retval = 1;
2490 			goto out;
2491 		}
2492 		newmap = new_affiliation->map;
2493 		savedmap = instance->vf_affiliation->map;
2494 		thisVf = new_affiliation->thisVf;
2495 		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2496 			found = 0;
2497 			for (j = 0; j < instance->vf_affiliation->ldCount;
2498 			     j++) {
2499 				if (newmap->ref.targetId ==
2500 				    savedmap->ref.targetId) {
2501 					found = 1;
2502 					if (newmap->policy[thisVf] !=
2503 					    savedmap->policy[thisVf]) {
2504 						doscan = 1;
2505 						goto out;
2506 					}
2507 				}
2508 				savedmap = (struct MR_LD_VF_MAP *)
2509 					((unsigned char *)savedmap +
2510 					 savedmap->size);
2511 			}
2512 			if (!found && newmap->policy[thisVf] !=
2513 			    MR_LD_ACCESS_HIDDEN) {
2514 				doscan = 1;
2515 				goto out;
2516 			}
2517 			newmap = (struct MR_LD_VF_MAP *)
2518 				((unsigned char *)newmap + newmap->size);
2519 		}
2520 
2521 		newmap = new_affiliation->map;
2522 		savedmap = instance->vf_affiliation->map;
2523 
2524 		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2525 			found = 0;
2526 			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2527 				if (savedmap->ref.targetId ==
2528 				    newmap->ref.targetId) {
2529 					found = 1;
2530 					if (savedmap->policy[thisVf] !=
2531 					    newmap->policy[thisVf]) {
2532 						doscan = 1;
2533 						goto out;
2534 					}
2535 				}
2536 				newmap = (struct MR_LD_VF_MAP *)
2537 					((unsigned char *)newmap +
2538 					 newmap->size);
2539 			}
2540 			if (!found && savedmap->policy[thisVf] !=
2541 			    MR_LD_ACCESS_HIDDEN) {
2542 				doscan = 1;
2543 				goto out;
2544 			}
2545 			savedmap = (struct MR_LD_VF_MAP *)
2546 				((unsigned char *)savedmap +
2547 				 savedmap->size);
2548 		}
2549 	}
2550 out:
2551 	if (doscan) {
2552 		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2553 		       "affiliation for scsi%d\n", instance->host->host_no);
2554 		memcpy(instance->vf_affiliation, new_affiliation,
2555 		       new_affiliation->size);
2556 		retval = 1;
2557 	}
2558 
2559 	if (new_affiliation)
2560 		dma_free_coherent(&instance->pdev->dev,
2561 				    (MAX_LOGICAL_DRIVES + 1) *
2562 				    sizeof(struct MR_LD_VF_AFFILIATION),
2563 				    new_affiliation, new_affiliation_h);
2564 	megasas_return_cmd(instance, cmd);
2565 
2566 	return retval;
2567 }
2568 
2569 /* This function will get the current SR-IOV LD/VF affiliation */
2570 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2571 	int initial)
2572 {
2573 	int retval;
2574 
2575 	if (instance->PlasmaFW111)
2576 		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2577 	else
2578 		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2579 	return retval;
2580 }
2581 
2582 /* This function will tell FW to start the SR-IOV heartbeat */
2583 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2584 					 int initial)
2585 {
2586 	struct megasas_cmd *cmd;
2587 	struct megasas_dcmd_frame *dcmd;
2588 	int retval = 0;
2589 
2590 	cmd = megasas_get_cmd(instance);
2591 
2592 	if (!cmd) {
2593 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2594 		       "Failed to get cmd for scsi%d\n",
2595 		       instance->host->host_no);
2596 		return -ENOMEM;
2597 	}
2598 
2599 	dcmd = &cmd->frame->dcmd;
2600 
2601 	if (initial) {
2602 		instance->hb_host_mem =
2603 			dma_alloc_coherent(&instance->pdev->dev,
2604 					   sizeof(struct MR_CTRL_HB_HOST_MEM),
2605 					   &instance->hb_host_mem_h,
2606 					   GFP_KERNEL);
2607 		if (!instance->hb_host_mem) {
2608 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2609 			       " memory for heartbeat host memory for scsi%d\n",
2610 			       instance->host->host_no);
2611 			retval = -ENOMEM;
2612 			goto out;
2613 		}
2614 	}
2615 
2616 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2617 
2618 	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2619 	dcmd->cmd = MFI_CMD_DCMD;
2620 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2621 	dcmd->sge_count = 1;
2622 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2623 	dcmd->timeout = 0;
2624 	dcmd->pad_0 = 0;
2625 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2626 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2627 
2628 	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2629 				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2630 
2631 	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2632 	       instance->host->host_no);
2633 
2634 	if ((instance->adapter_type != MFI_SERIES) &&
2635 	    !instance->mask_interrupts)
2636 		retval = megasas_issue_blocked_cmd(instance, cmd,
2637 			MEGASAS_ROUTINE_WAIT_TIME_VF);
2638 	else
2639 		retval = megasas_issue_polled(instance, cmd);
2640 
2641 	if (retval) {
2642 		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2643 			"_MEM_ALLOC DCMD %s for scsi%d\n",
2644 			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2645 			"timed out" : "failed", instance->host->host_no);
2646 		retval = 1;
2647 	}
2648 
2649 out:
2650 	megasas_return_cmd(instance, cmd);
2651 
2652 	return retval;
2653 }
2654 
2655 /* Handler for SR-IOV heartbeat */
2656 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2657 {
2658 	struct megasas_instance *instance =
2659 		from_timer(instance, t, sriov_heartbeat_timer);
2660 
2661 	if (instance->hb_host_mem->HB.fwCounter !=
2662 	    instance->hb_host_mem->HB.driverCounter) {
2663 		instance->hb_host_mem->HB.driverCounter =
2664 			instance->hb_host_mem->HB.fwCounter;
2665 		mod_timer(&instance->sriov_heartbeat_timer,
2666 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2667 	} else {
2668 		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2669 		       "completed for scsi%d\n", instance->host->host_no);
2670 		schedule_work(&instance->work_init);
2671 	}
2672 }
2673 
2674 /**
2675  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2676  * @instance:				Adapter soft state
2677  *
2678  * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2679  * complete all its outstanding commands. Returns error if one or more IOs
2680  * are pending after this time period. It also marks the controller dead.
2681  */
2682 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2683 {
2684 	int i, sl, outstanding;
2685 	u32 reset_index;
2686 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2687 	unsigned long flags;
2688 	struct list_head clist_local;
2689 	struct megasas_cmd *reset_cmd;
2690 	u32 fw_state;
2691 
2692 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2693 		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2694 		__func__, __LINE__);
2695 		return FAILED;
2696 	}
2697 
2698 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2699 
2700 		INIT_LIST_HEAD(&clist_local);
2701 		spin_lock_irqsave(&instance->hba_lock, flags);
2702 		list_splice_init(&instance->internal_reset_pending_q,
2703 				&clist_local);
2704 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2705 
2706 		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2707 		for (i = 0; i < wait_time; i++) {
2708 			msleep(1000);
2709 			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2710 				break;
2711 		}
2712 
2713 		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2714 			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2715 			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2716 			return FAILED;
2717 		}
2718 
2719 		reset_index = 0;
2720 		while (!list_empty(&clist_local)) {
2721 			reset_cmd = list_entry((&clist_local)->next,
2722 						struct megasas_cmd, list);
2723 			list_del_init(&reset_cmd->list);
2724 			if (reset_cmd->scmd) {
2725 				reset_cmd->scmd->result = DID_REQUEUE << 16;
2726 				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2727 					reset_index, reset_cmd,
2728 					reset_cmd->scmd->cmnd[0]);
2729 
2730 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2731 				megasas_return_cmd(instance, reset_cmd);
2732 			} else if (reset_cmd->sync_cmd) {
2733 				dev_notice(&instance->pdev->dev, "%p synch cmds"
2734 						"reset queue\n",
2735 						reset_cmd);
2736 
2737 				reset_cmd->cmd_status_drv = DCMD_INIT;
2738 				instance->instancet->fire_cmd(instance,
2739 						reset_cmd->frame_phys_addr,
2740 						0, instance->reg_set);
2741 			} else {
2742 				dev_notice(&instance->pdev->dev, "%p unexpected"
2743 					"cmds lst\n",
2744 					reset_cmd);
2745 			}
2746 			reset_index++;
2747 		}
2748 
2749 		return SUCCESS;
2750 	}
2751 
2752 	for (i = 0; i < resetwaittime; i++) {
2753 		outstanding = atomic_read(&instance->fw_outstanding);
2754 
2755 		if (!outstanding)
2756 			break;
2757 
2758 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2759 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2760 			       "commands to complete\n",i,outstanding);
2761 			/*
2762 			 * Call cmd completion routine. Cmd to be
2763 			 * be completed directly without depending on isr.
2764 			 */
2765 			megasas_complete_cmd_dpc((unsigned long)instance);
2766 		}
2767 
2768 		msleep(1000);
2769 	}
2770 
2771 	i = 0;
2772 	outstanding = atomic_read(&instance->fw_outstanding);
2773 	fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2774 
2775 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2776 		goto no_outstanding;
2777 
2778 	if (instance->disableOnlineCtrlReset)
2779 		goto kill_hba_and_failed;
2780 	do {
2781 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2782 			dev_info(&instance->pdev->dev,
2783 				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2784 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2785 			if (i == 3)
2786 				goto kill_hba_and_failed;
2787 			megasas_do_ocr(instance);
2788 
2789 			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2790 				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2791 				__func__, __LINE__);
2792 				return FAILED;
2793 			}
2794 			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2795 				__func__, __LINE__);
2796 
2797 			for (sl = 0; sl < 10; sl++)
2798 				msleep(500);
2799 
2800 			outstanding = atomic_read(&instance->fw_outstanding);
2801 
2802 			fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2803 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2804 				goto no_outstanding;
2805 		}
2806 		i++;
2807 	} while (i <= 3);
2808 
2809 no_outstanding:
2810 
2811 	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2812 		__func__, __LINE__);
2813 	return SUCCESS;
2814 
2815 kill_hba_and_failed:
2816 
2817 	/* Reset not supported, kill adapter */
2818 	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2819 		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2820 		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2821 		atomic_read(&instance->fw_outstanding));
2822 	megasas_dump_pending_frames(instance);
2823 	megaraid_sas_kill_hba(instance);
2824 
2825 	return FAILED;
2826 }
2827 
2828 /**
2829  * megasas_generic_reset -	Generic reset routine
2830  * @scmd:			Mid-layer SCSI command
2831  *
2832  * This routine implements a generic reset handler for device, bus and host
2833  * reset requests. Device, bus and host specific reset handlers can use this
2834  * function after they do their specific tasks.
2835  */
2836 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2837 {
2838 	int ret_val;
2839 	struct megasas_instance *instance;
2840 
2841 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2842 
2843 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2844 		 scmd->cmnd[0], scmd->retries);
2845 
2846 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2847 		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2848 		return FAILED;
2849 	}
2850 
2851 	ret_val = megasas_wait_for_outstanding(instance);
2852 	if (ret_val == SUCCESS)
2853 		dev_notice(&instance->pdev->dev, "reset successful\n");
2854 	else
2855 		dev_err(&instance->pdev->dev, "failed to do reset\n");
2856 
2857 	return ret_val;
2858 }
2859 
2860 /**
2861  * megasas_reset_timer - quiesce the adapter if required
2862  * @scmd:		scsi cmnd
2863  *
2864  * Sets the FW busy flag and reduces the host->can_queue if the
2865  * cmd has not been completed within the timeout period.
2866  */
2867 static enum
2868 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2869 {
2870 	struct megasas_instance *instance;
2871 	unsigned long flags;
2872 
2873 	if (time_after(jiffies, scmd->jiffies_at_alloc +
2874 				(scmd_timeout * 2) * HZ)) {
2875 		return BLK_EH_DONE;
2876 	}
2877 
2878 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2879 	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2880 		/* FW is busy, throttle IO */
2881 		spin_lock_irqsave(instance->host->host_lock, flags);
2882 
2883 		instance->host->can_queue = instance->throttlequeuedepth;
2884 		instance->last_time = jiffies;
2885 		instance->flag |= MEGASAS_FW_BUSY;
2886 
2887 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2888 	}
2889 	return BLK_EH_RESET_TIMER;
2890 }
2891 
2892 /**
2893  * megasas_dump -	This function will print hexdump of provided buffer.
2894  * @buf:		Buffer to be dumped
2895  * @sz:		Size in bytes
2896  * @format:		Different formats of dumping e.g. format=n will
2897  *			cause only 'n' 32 bit words to be dumped in a single
2898  *			line.
2899  */
2900 inline void
2901 megasas_dump(void *buf, int sz, int format)
2902 {
2903 	int i;
2904 	__le32 *buf_loc = (__le32 *)buf;
2905 
2906 	for (i = 0; i < (sz / sizeof(__le32)); i++) {
2907 		if ((i % format) == 0) {
2908 			if (i != 0)
2909 				printk(KERN_CONT "\n");
2910 			printk(KERN_CONT "%08x: ", (i * 4));
2911 		}
2912 		printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
2913 	}
2914 	printk(KERN_CONT "\n");
2915 }
2916 
2917 /**
2918  * megasas_dump_reg_set -	This function will print hexdump of register set
2919  * @buf:			Buffer to be dumped
2920  * @sz:				Size in bytes
2921  * @format:			Different formats of dumping e.g. format=n will
2922  *				cause only 'n' 32 bit words to be dumped in a
2923  *				single line.
2924  */
2925 inline void
2926 megasas_dump_reg_set(void __iomem *reg_set)
2927 {
2928 	unsigned int i, sz = 256;
2929 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2930 
2931 	for (i = 0; i < (sz / sizeof(u32)); i++)
2932 		printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
2933 }
2934 
2935 /**
2936  * megasas_dump_fusion_io -	This function will print key details
2937  *				of SCSI IO
2938  * @scmd:			SCSI command pointer of SCSI IO
2939  */
2940 void
2941 megasas_dump_fusion_io(struct scsi_cmnd *scmd)
2942 {
2943 	struct megasas_cmd_fusion *cmd;
2944 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2945 	struct megasas_instance *instance;
2946 
2947 	cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2948 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2949 
2950 	scmd_printk(KERN_INFO, scmd,
2951 		    "scmd: (0x%p)  retries: 0x%x  allowed: 0x%x\n",
2952 		    scmd, scmd->retries, scmd->allowed);
2953 	scsi_print_command(scmd);
2954 
2955 	if (cmd) {
2956 		req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
2957 		scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
2958 		scmd_printk(KERN_INFO, scmd,
2959 			    "RequestFlags:0x%x  MSIxIndex:0x%x  SMID:0x%x  LMID:0x%x  DevHandle:0x%x\n",
2960 			    req_desc->SCSIIO.RequestFlags,
2961 			    req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
2962 			    req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
2963 
2964 		printk(KERN_INFO "IO request frame:\n");
2965 		megasas_dump(cmd->io_request,
2966 			     MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
2967 		printk(KERN_INFO "Chain frame:\n");
2968 		megasas_dump(cmd->sg_frame,
2969 			     instance->max_chain_frame_sz, 8);
2970 	}
2971 
2972 }
2973 
2974 /*
2975  * megasas_dump_sys_regs - This function will dump system registers through
2976  *			    sysfs.
2977  * @reg_set:		    Pointer to System register set.
2978  * @buf:		    Buffer to which output is to be written.
2979  * @return:		    Number of bytes written to buffer.
2980  */
2981 static inline ssize_t
2982 megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
2983 {
2984 	unsigned int i, sz = 256;
2985 	int bytes_wrote = 0;
2986 	char *loc = (char *)buf;
2987 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2988 
2989 	for (i = 0; i < sz / sizeof(u32); i++) {
2990 		bytes_wrote += snprintf(loc + bytes_wrote, PAGE_SIZE,
2991 					"%08x: %08x\n", (i * 4),
2992 					readl(&reg[i]));
2993 	}
2994 	return bytes_wrote;
2995 }
2996 
2997 /**
2998  * megasas_reset_bus_host -	Bus & host reset handler entry point
2999  */
3000 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
3001 {
3002 	int ret;
3003 	struct megasas_instance *instance;
3004 
3005 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3006 
3007 	scmd_printk(KERN_INFO, scmd,
3008 		"OCR is requested due to IO timeout!!\n");
3009 
3010 	scmd_printk(KERN_INFO, scmd,
3011 		"SCSI host state: %d  SCSI host busy: %d  FW outstanding: %d\n",
3012 		scmd->device->host->shost_state,
3013 		scsi_host_busy(scmd->device->host),
3014 		atomic_read(&instance->fw_outstanding));
3015 	/*
3016 	 * First wait for all commands to complete
3017 	 */
3018 	if (instance->adapter_type == MFI_SERIES) {
3019 		ret = megasas_generic_reset(scmd);
3020 	} else {
3021 		megasas_dump_fusion_io(scmd);
3022 		ret = megasas_reset_fusion(scmd->device->host,
3023 				SCSIIO_TIMEOUT_OCR);
3024 	}
3025 
3026 	return ret;
3027 }
3028 
3029 /**
3030  * megasas_task_abort - Issues task abort request to firmware
3031  *			(supported only for fusion adapters)
3032  * @scmd:		SCSI command pointer
3033  */
3034 static int megasas_task_abort(struct scsi_cmnd *scmd)
3035 {
3036 	int ret;
3037 	struct megasas_instance *instance;
3038 
3039 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3040 
3041 	if (instance->adapter_type != MFI_SERIES)
3042 		ret = megasas_task_abort_fusion(scmd);
3043 	else {
3044 		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
3045 		ret = FAILED;
3046 	}
3047 
3048 	return ret;
3049 }
3050 
3051 /**
3052  * megasas_reset_target:  Issues target reset request to firmware
3053  *                        (supported only for fusion adapters)
3054  * @scmd:                 SCSI command pointer
3055  */
3056 static int megasas_reset_target(struct scsi_cmnd *scmd)
3057 {
3058 	int ret;
3059 	struct megasas_instance *instance;
3060 
3061 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3062 
3063 	if (instance->adapter_type != MFI_SERIES)
3064 		ret = megasas_reset_target_fusion(scmd);
3065 	else {
3066 		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
3067 		ret = FAILED;
3068 	}
3069 
3070 	return ret;
3071 }
3072 
3073 /**
3074  * megasas_bios_param - Returns disk geometry for a disk
3075  * @sdev:		device handle
3076  * @bdev:		block device
3077  * @capacity:		drive capacity
3078  * @geom:		geometry parameters
3079  */
3080 static int
3081 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
3082 		 sector_t capacity, int geom[])
3083 {
3084 	int heads;
3085 	int sectors;
3086 	sector_t cylinders;
3087 	unsigned long tmp;
3088 
3089 	/* Default heads (64) & sectors (32) */
3090 	heads = 64;
3091 	sectors = 32;
3092 
3093 	tmp = heads * sectors;
3094 	cylinders = capacity;
3095 
3096 	sector_div(cylinders, tmp);
3097 
3098 	/*
3099 	 * Handle extended translation size for logical drives > 1Gb
3100 	 */
3101 
3102 	if (capacity >= 0x200000) {
3103 		heads = 255;
3104 		sectors = 63;
3105 		tmp = heads*sectors;
3106 		cylinders = capacity;
3107 		sector_div(cylinders, tmp);
3108 	}
3109 
3110 	geom[0] = heads;
3111 	geom[1] = sectors;
3112 	geom[2] = cylinders;
3113 
3114 	return 0;
3115 }
3116 
3117 static void megasas_aen_polling(struct work_struct *work);
3118 
3119 /**
3120  * megasas_service_aen -	Processes an event notification
3121  * @instance:			Adapter soft state
3122  * @cmd:			AEN command completed by the ISR
3123  *
3124  * For AEN, driver sends a command down to FW that is held by the FW till an
3125  * event occurs. When an event of interest occurs, FW completes the command
3126  * that it was previously holding.
3127  *
3128  * This routines sends SIGIO signal to processes that have registered with the
3129  * driver for AEN.
3130  */
3131 static void
3132 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
3133 {
3134 	unsigned long flags;
3135 
3136 	/*
3137 	 * Don't signal app if it is just an aborted previously registered aen
3138 	 */
3139 	if ((!cmd->abort_aen) && (instance->unload == 0)) {
3140 		spin_lock_irqsave(&poll_aen_lock, flags);
3141 		megasas_poll_wait_aen = 1;
3142 		spin_unlock_irqrestore(&poll_aen_lock, flags);
3143 		wake_up(&megasas_poll_wait);
3144 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
3145 	}
3146 	else
3147 		cmd->abort_aen = 0;
3148 
3149 	instance->aen_cmd = NULL;
3150 
3151 	megasas_return_cmd(instance, cmd);
3152 
3153 	if ((instance->unload == 0) &&
3154 		((instance->issuepend_done == 1))) {
3155 		struct megasas_aen_event *ev;
3156 
3157 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3158 		if (!ev) {
3159 			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3160 		} else {
3161 			ev->instance = instance;
3162 			instance->ev = ev;
3163 			INIT_DELAYED_WORK(&ev->hotplug_work,
3164 					  megasas_aen_polling);
3165 			schedule_delayed_work(&ev->hotplug_work, 0);
3166 		}
3167 	}
3168 }
3169 
3170 static ssize_t
3171 fw_crash_buffer_store(struct device *cdev,
3172 	struct device_attribute *attr, const char *buf, size_t count)
3173 {
3174 	struct Scsi_Host *shost = class_to_shost(cdev);
3175 	struct megasas_instance *instance =
3176 		(struct megasas_instance *) shost->hostdata;
3177 	int val = 0;
3178 	unsigned long flags;
3179 
3180 	if (kstrtoint(buf, 0, &val) != 0)
3181 		return -EINVAL;
3182 
3183 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3184 	instance->fw_crash_buffer_offset = val;
3185 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3186 	return strlen(buf);
3187 }
3188 
3189 static ssize_t
3190 fw_crash_buffer_show(struct device *cdev,
3191 	struct device_attribute *attr, char *buf)
3192 {
3193 	struct Scsi_Host *shost = class_to_shost(cdev);
3194 	struct megasas_instance *instance =
3195 		(struct megasas_instance *) shost->hostdata;
3196 	u32 size;
3197 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3198 	unsigned long chunk_left_bytes;
3199 	unsigned long src_addr;
3200 	unsigned long flags;
3201 	u32 buff_offset;
3202 
3203 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3204 	buff_offset = instance->fw_crash_buffer_offset;
3205 	if (!instance->crash_dump_buf &&
3206 		!((instance->fw_crash_state == AVAILABLE) ||
3207 		(instance->fw_crash_state == COPYING))) {
3208 		dev_err(&instance->pdev->dev,
3209 			"Firmware crash dump is not available\n");
3210 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3211 		return -EINVAL;
3212 	}
3213 
3214 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3215 		dev_err(&instance->pdev->dev,
3216 			"Firmware crash dump offset is out of range\n");
3217 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3218 		return 0;
3219 	}
3220 
3221 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3222 	chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
3223 	size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
3224 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3225 
3226 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3227 		(buff_offset % dmachunk);
3228 	memcpy(buf, (void *)src_addr, size);
3229 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3230 
3231 	return size;
3232 }
3233 
3234 static ssize_t
3235 fw_crash_buffer_size_show(struct device *cdev,
3236 	struct device_attribute *attr, char *buf)
3237 {
3238 	struct Scsi_Host *shost = class_to_shost(cdev);
3239 	struct megasas_instance *instance =
3240 		(struct megasas_instance *) shost->hostdata;
3241 
3242 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3243 		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3244 }
3245 
3246 static ssize_t
3247 fw_crash_state_store(struct device *cdev,
3248 	struct device_attribute *attr, const char *buf, size_t count)
3249 {
3250 	struct Scsi_Host *shost = class_to_shost(cdev);
3251 	struct megasas_instance *instance =
3252 		(struct megasas_instance *) shost->hostdata;
3253 	int val = 0;
3254 	unsigned long flags;
3255 
3256 	if (kstrtoint(buf, 0, &val) != 0)
3257 		return -EINVAL;
3258 
3259 	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3260 		dev_err(&instance->pdev->dev, "application updates invalid "
3261 			"firmware crash state\n");
3262 		return -EINVAL;
3263 	}
3264 
3265 	instance->fw_crash_state = val;
3266 
3267 	if ((val == COPIED) || (val == COPY_ERROR)) {
3268 		spin_lock_irqsave(&instance->crashdump_lock, flags);
3269 		megasas_free_host_crash_buffer(instance);
3270 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3271 		if (val == COPY_ERROR)
3272 			dev_info(&instance->pdev->dev, "application failed to "
3273 				"copy Firmware crash dump\n");
3274 		else
3275 			dev_info(&instance->pdev->dev, "Firmware crash dump "
3276 				"copied successfully\n");
3277 	}
3278 	return strlen(buf);
3279 }
3280 
3281 static ssize_t
3282 fw_crash_state_show(struct device *cdev,
3283 	struct device_attribute *attr, char *buf)
3284 {
3285 	struct Scsi_Host *shost = class_to_shost(cdev);
3286 	struct megasas_instance *instance =
3287 		(struct megasas_instance *) shost->hostdata;
3288 
3289 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3290 }
3291 
3292 static ssize_t
3293 page_size_show(struct device *cdev,
3294 	struct device_attribute *attr, char *buf)
3295 {
3296 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3297 }
3298 
3299 static ssize_t
3300 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3301 	char *buf)
3302 {
3303 	struct Scsi_Host *shost = class_to_shost(cdev);
3304 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3305 
3306 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3307 }
3308 
3309 static ssize_t
3310 fw_cmds_outstanding_show(struct device *cdev,
3311 				 struct device_attribute *attr, char *buf)
3312 {
3313 	struct Scsi_Host *shost = class_to_shost(cdev);
3314 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3315 
3316 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3317 }
3318 
3319 static ssize_t
3320 enable_sdev_max_qd_show(struct device *cdev,
3321 	struct device_attribute *attr, char *buf)
3322 {
3323 	struct Scsi_Host *shost = class_to_shost(cdev);
3324 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3325 
3326 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
3327 }
3328 
3329 static ssize_t
3330 enable_sdev_max_qd_store(struct device *cdev,
3331 	struct device_attribute *attr, const char *buf, size_t count)
3332 {
3333 	struct Scsi_Host *shost = class_to_shost(cdev);
3334 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3335 	u32 val = 0;
3336 	bool is_target_prop;
3337 	int ret_target_prop = DCMD_FAILED;
3338 	struct scsi_device *sdev;
3339 
3340 	if (kstrtou32(buf, 0, &val) != 0) {
3341 		pr_err("megasas: could not set enable_sdev_max_qd\n");
3342 		return -EINVAL;
3343 	}
3344 
3345 	mutex_lock(&instance->reset_mutex);
3346 	if (val)
3347 		instance->enable_sdev_max_qd = true;
3348 	else
3349 		instance->enable_sdev_max_qd = false;
3350 
3351 	shost_for_each_device(sdev, shost) {
3352 		ret_target_prop = megasas_get_target_prop(instance, sdev);
3353 		is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
3354 		megasas_set_fw_assisted_qd(sdev, is_target_prop);
3355 	}
3356 	mutex_unlock(&instance->reset_mutex);
3357 
3358 	return strlen(buf);
3359 }
3360 
3361 static ssize_t
3362 dump_system_regs_show(struct device *cdev,
3363 			       struct device_attribute *attr, char *buf)
3364 {
3365 	struct Scsi_Host *shost = class_to_shost(cdev);
3366 	struct megasas_instance *instance =
3367 			(struct megasas_instance *)shost->hostdata;
3368 
3369 	return megasas_dump_sys_regs(instance->reg_set, buf);
3370 }
3371 
3372 static ssize_t
3373 raid_map_id_show(struct device *cdev, struct device_attribute *attr,
3374 			  char *buf)
3375 {
3376 	struct Scsi_Host *shost = class_to_shost(cdev);
3377 	struct megasas_instance *instance =
3378 			(struct megasas_instance *)shost->hostdata;
3379 
3380 	return snprintf(buf, PAGE_SIZE, "%ld\n",
3381 			(unsigned long)instance->map_id);
3382 }
3383 
3384 static DEVICE_ATTR_RW(fw_crash_buffer);
3385 static DEVICE_ATTR_RO(fw_crash_buffer_size);
3386 static DEVICE_ATTR_RW(fw_crash_state);
3387 static DEVICE_ATTR_RO(page_size);
3388 static DEVICE_ATTR_RO(ldio_outstanding);
3389 static DEVICE_ATTR_RO(fw_cmds_outstanding);
3390 static DEVICE_ATTR_RW(enable_sdev_max_qd);
3391 static DEVICE_ATTR_RO(dump_system_regs);
3392 static DEVICE_ATTR_RO(raid_map_id);
3393 
3394 static struct device_attribute *megaraid_host_attrs[] = {
3395 	&dev_attr_fw_crash_buffer_size,
3396 	&dev_attr_fw_crash_buffer,
3397 	&dev_attr_fw_crash_state,
3398 	&dev_attr_page_size,
3399 	&dev_attr_ldio_outstanding,
3400 	&dev_attr_fw_cmds_outstanding,
3401 	&dev_attr_enable_sdev_max_qd,
3402 	&dev_attr_dump_system_regs,
3403 	&dev_attr_raid_map_id,
3404 	NULL,
3405 };
3406 
3407 /*
3408  * Scsi host template for megaraid_sas driver
3409  */
3410 static struct scsi_host_template megasas_template = {
3411 
3412 	.module = THIS_MODULE,
3413 	.name = "Avago SAS based MegaRAID driver",
3414 	.proc_name = "megaraid_sas",
3415 	.slave_configure = megasas_slave_configure,
3416 	.slave_alloc = megasas_slave_alloc,
3417 	.slave_destroy = megasas_slave_destroy,
3418 	.queuecommand = megasas_queue_command,
3419 	.eh_target_reset_handler = megasas_reset_target,
3420 	.eh_abort_handler = megasas_task_abort,
3421 	.eh_host_reset_handler = megasas_reset_bus_host,
3422 	.eh_timed_out = megasas_reset_timer,
3423 	.shost_attrs = megaraid_host_attrs,
3424 	.bios_param = megasas_bios_param,
3425 	.change_queue_depth = scsi_change_queue_depth,
3426 	.max_segment_size = 0xffffffff,
3427 };
3428 
3429 /**
3430  * megasas_complete_int_cmd -	Completes an internal command
3431  * @instance:			Adapter soft state
3432  * @cmd:			Command to be completed
3433  *
3434  * The megasas_issue_blocked_cmd() function waits for a command to complete
3435  * after it issues a command. This function wakes up that waiting routine by
3436  * calling wake_up() on the wait queue.
3437  */
3438 static void
3439 megasas_complete_int_cmd(struct megasas_instance *instance,
3440 			 struct megasas_cmd *cmd)
3441 {
3442 	if (cmd->cmd_status_drv == DCMD_INIT)
3443 		cmd->cmd_status_drv =
3444 		(cmd->frame->io.cmd_status == MFI_STAT_OK) ?
3445 		DCMD_SUCCESS : DCMD_FAILED;
3446 
3447 	wake_up(&instance->int_cmd_wait_q);
3448 }
3449 
3450 /**
3451  * megasas_complete_abort -	Completes aborting a command
3452  * @instance:			Adapter soft state
3453  * @cmd:			Cmd that was issued to abort another cmd
3454  *
3455  * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3456  * after it issues an abort on a previously issued command. This function
3457  * wakes up all functions waiting on the same wait queue.
3458  */
3459 static void
3460 megasas_complete_abort(struct megasas_instance *instance,
3461 		       struct megasas_cmd *cmd)
3462 {
3463 	if (cmd->sync_cmd) {
3464 		cmd->sync_cmd = 0;
3465 		cmd->cmd_status_drv = DCMD_SUCCESS;
3466 		wake_up(&instance->abort_cmd_wait_q);
3467 	}
3468 }
3469 
3470 /**
3471  * megasas_complete_cmd -	Completes a command
3472  * @instance:			Adapter soft state
3473  * @cmd:			Command to be completed
3474  * @alt_status:			If non-zero, use this value as status to
3475  *				SCSI mid-layer instead of the value returned
3476  *				by the FW. This should be used if caller wants
3477  *				an alternate status (as in the case of aborted
3478  *				commands)
3479  */
3480 void
3481 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3482 		     u8 alt_status)
3483 {
3484 	int exception = 0;
3485 	struct megasas_header *hdr = &cmd->frame->hdr;
3486 	unsigned long flags;
3487 	struct fusion_context *fusion = instance->ctrl_context;
3488 	u32 opcode, status;
3489 
3490 	/* flag for the retry reset */
3491 	cmd->retry_for_fw_reset = 0;
3492 
3493 	if (cmd->scmd)
3494 		cmd->scmd->SCp.ptr = NULL;
3495 
3496 	switch (hdr->cmd) {
3497 	case MFI_CMD_INVALID:
3498 		/* Some older 1068 controller FW may keep a pended
3499 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3500 		   when booting the kdump kernel.  Ignore this command to
3501 		   prevent a kernel panic on shutdown of the kdump kernel. */
3502 		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3503 		       "completed\n");
3504 		dev_warn(&instance->pdev->dev, "If you have a controller "
3505 		       "other than PERC5, please upgrade your firmware\n");
3506 		break;
3507 	case MFI_CMD_PD_SCSI_IO:
3508 	case MFI_CMD_LD_SCSI_IO:
3509 
3510 		/*
3511 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3512 		 * issued either through an IO path or an IOCTL path. If it
3513 		 * was via IOCTL, we will send it to internal completion.
3514 		 */
3515 		if (cmd->sync_cmd) {
3516 			cmd->sync_cmd = 0;
3517 			megasas_complete_int_cmd(instance, cmd);
3518 			break;
3519 		}
3520 		/* fall through */
3521 
3522 	case MFI_CMD_LD_READ:
3523 	case MFI_CMD_LD_WRITE:
3524 
3525 		if (alt_status) {
3526 			cmd->scmd->result = alt_status << 16;
3527 			exception = 1;
3528 		}
3529 
3530 		if (exception) {
3531 
3532 			atomic_dec(&instance->fw_outstanding);
3533 
3534 			scsi_dma_unmap(cmd->scmd);
3535 			cmd->scmd->scsi_done(cmd->scmd);
3536 			megasas_return_cmd(instance, cmd);
3537 
3538 			break;
3539 		}
3540 
3541 		switch (hdr->cmd_status) {
3542 
3543 		case MFI_STAT_OK:
3544 			cmd->scmd->result = DID_OK << 16;
3545 			break;
3546 
3547 		case MFI_STAT_SCSI_IO_FAILED:
3548 		case MFI_STAT_LD_INIT_IN_PROGRESS:
3549 			cmd->scmd->result =
3550 			    (DID_ERROR << 16) | hdr->scsi_status;
3551 			break;
3552 
3553 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3554 
3555 			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3556 
3557 			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3558 				memset(cmd->scmd->sense_buffer, 0,
3559 				       SCSI_SENSE_BUFFERSIZE);
3560 				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3561 				       hdr->sense_len);
3562 
3563 				cmd->scmd->result |= DRIVER_SENSE << 24;
3564 			}
3565 
3566 			break;
3567 
3568 		case MFI_STAT_LD_OFFLINE:
3569 		case MFI_STAT_DEVICE_NOT_FOUND:
3570 			cmd->scmd->result = DID_BAD_TARGET << 16;
3571 			break;
3572 
3573 		default:
3574 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3575 			       hdr->cmd_status);
3576 			cmd->scmd->result = DID_ERROR << 16;
3577 			break;
3578 		}
3579 
3580 		atomic_dec(&instance->fw_outstanding);
3581 
3582 		scsi_dma_unmap(cmd->scmd);
3583 		cmd->scmd->scsi_done(cmd->scmd);
3584 		megasas_return_cmd(instance, cmd);
3585 
3586 		break;
3587 
3588 	case MFI_CMD_SMP:
3589 	case MFI_CMD_STP:
3590 	case MFI_CMD_NVME:
3591 	case MFI_CMD_TOOLBOX:
3592 		megasas_complete_int_cmd(instance, cmd);
3593 		break;
3594 
3595 	case MFI_CMD_DCMD:
3596 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3597 		/* Check for LD map update */
3598 		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3599 			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3600 			fusion->fast_path_io = 0;
3601 			spin_lock_irqsave(instance->host->host_lock, flags);
3602 			status = cmd->frame->hdr.cmd_status;
3603 			instance->map_update_cmd = NULL;
3604 			if (status != MFI_STAT_OK) {
3605 				if (status != MFI_STAT_NOT_FOUND)
3606 					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3607 					       cmd->frame->hdr.cmd_status);
3608 				else {
3609 					megasas_return_cmd(instance, cmd);
3610 					spin_unlock_irqrestore(
3611 						instance->host->host_lock,
3612 						flags);
3613 					break;
3614 				}
3615 			}
3616 
3617 			megasas_return_cmd(instance, cmd);
3618 
3619 			/*
3620 			 * Set fast path IO to ZERO.
3621 			 * Validate Map will set proper value.
3622 			 * Meanwhile all IOs will go as LD IO.
3623 			 */
3624 			if (status == MFI_STAT_OK &&
3625 			    (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3626 				instance->map_id++;
3627 				fusion->fast_path_io = 1;
3628 			} else {
3629 				fusion->fast_path_io = 0;
3630 			}
3631 
3632 			megasas_sync_map_info(instance);
3633 			spin_unlock_irqrestore(instance->host->host_lock,
3634 					       flags);
3635 			break;
3636 		}
3637 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3638 		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3639 			spin_lock_irqsave(&poll_aen_lock, flags);
3640 			megasas_poll_wait_aen = 0;
3641 			spin_unlock_irqrestore(&poll_aen_lock, flags);
3642 		}
3643 
3644 		/* FW has an updated PD sequence */
3645 		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3646 			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3647 
3648 			spin_lock_irqsave(instance->host->host_lock, flags);
3649 			status = cmd->frame->hdr.cmd_status;
3650 			instance->jbod_seq_cmd = NULL;
3651 			megasas_return_cmd(instance, cmd);
3652 
3653 			if (status == MFI_STAT_OK) {
3654 				instance->pd_seq_map_id++;
3655 				/* Re-register a pd sync seq num cmd */
3656 				if (megasas_sync_pd_seq_num(instance, true))
3657 					instance->use_seqnum_jbod_fp = false;
3658 			} else
3659 				instance->use_seqnum_jbod_fp = false;
3660 
3661 			spin_unlock_irqrestore(instance->host->host_lock, flags);
3662 			break;
3663 		}
3664 
3665 		/*
3666 		 * See if got an event notification
3667 		 */
3668 		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3669 			megasas_service_aen(instance, cmd);
3670 		else
3671 			megasas_complete_int_cmd(instance, cmd);
3672 
3673 		break;
3674 
3675 	case MFI_CMD_ABORT:
3676 		/*
3677 		 * Cmd issued to abort another cmd returned
3678 		 */
3679 		megasas_complete_abort(instance, cmd);
3680 		break;
3681 
3682 	default:
3683 		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3684 		       hdr->cmd);
3685 		megasas_complete_int_cmd(instance, cmd);
3686 		break;
3687 	}
3688 }
3689 
3690 /**
3691  * megasas_issue_pending_cmds_again -	issue all pending cmds
3692  *					in FW again because of the fw reset
3693  * @instance:				Adapter soft state
3694  */
3695 static inline void
3696 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3697 {
3698 	struct megasas_cmd *cmd;
3699 	struct list_head clist_local;
3700 	union megasas_evt_class_locale class_locale;
3701 	unsigned long flags;
3702 	u32 seq_num;
3703 
3704 	INIT_LIST_HEAD(&clist_local);
3705 	spin_lock_irqsave(&instance->hba_lock, flags);
3706 	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3707 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3708 
3709 	while (!list_empty(&clist_local)) {
3710 		cmd = list_entry((&clist_local)->next,
3711 					struct megasas_cmd, list);
3712 		list_del_init(&cmd->list);
3713 
3714 		if (cmd->sync_cmd || cmd->scmd) {
3715 			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3716 				"detected to be pending while HBA reset\n",
3717 					cmd, cmd->scmd, cmd->sync_cmd);
3718 
3719 			cmd->retry_for_fw_reset++;
3720 
3721 			if (cmd->retry_for_fw_reset == 3) {
3722 				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3723 					"was tried multiple times during reset."
3724 					"Shutting down the HBA\n",
3725 					cmd, cmd->scmd, cmd->sync_cmd);
3726 				instance->instancet->disable_intr(instance);
3727 				atomic_set(&instance->fw_reset_no_pci_access, 1);
3728 				megaraid_sas_kill_hba(instance);
3729 				return;
3730 			}
3731 		}
3732 
3733 		if (cmd->sync_cmd == 1) {
3734 			if (cmd->scmd) {
3735 				dev_notice(&instance->pdev->dev, "unexpected"
3736 					"cmd attached to internal command!\n");
3737 			}
3738 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3739 						"on the internal reset queue,"
3740 						"issue it again.\n", cmd);
3741 			cmd->cmd_status_drv = DCMD_INIT;
3742 			instance->instancet->fire_cmd(instance,
3743 							cmd->frame_phys_addr,
3744 							0, instance->reg_set);
3745 		} else if (cmd->scmd) {
3746 			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3747 			"detected on the internal queue, issue again.\n",
3748 			cmd, cmd->scmd->cmnd[0]);
3749 
3750 			atomic_inc(&instance->fw_outstanding);
3751 			instance->instancet->fire_cmd(instance,
3752 					cmd->frame_phys_addr,
3753 					cmd->frame_count-1, instance->reg_set);
3754 		} else {
3755 			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3756 				"internal reset defer list while re-issue!!\n",
3757 				cmd);
3758 		}
3759 	}
3760 
3761 	if (instance->aen_cmd) {
3762 		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3763 		megasas_return_cmd(instance, instance->aen_cmd);
3764 
3765 		instance->aen_cmd = NULL;
3766 	}
3767 
3768 	/*
3769 	 * Initiate AEN (Asynchronous Event Notification)
3770 	 */
3771 	seq_num = instance->last_seq_num;
3772 	class_locale.members.reserved = 0;
3773 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3774 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3775 
3776 	megasas_register_aen(instance, seq_num, class_locale.word);
3777 }
3778 
3779 /**
3780  * Move the internal reset pending commands to a deferred queue.
3781  *
3782  * We move the commands pending at internal reset time to a
3783  * pending queue. This queue would be flushed after successful
3784  * completion of the internal reset sequence. if the internal reset
3785  * did not complete in time, the kernel reset handler would flush
3786  * these commands.
3787  **/
3788 static void
3789 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3790 {
3791 	struct megasas_cmd *cmd;
3792 	int i;
3793 	u16 max_cmd = instance->max_fw_cmds;
3794 	u32 defer_index;
3795 	unsigned long flags;
3796 
3797 	defer_index = 0;
3798 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3799 	for (i = 0; i < max_cmd; i++) {
3800 		cmd = instance->cmd_list[i];
3801 		if (cmd->sync_cmd == 1 || cmd->scmd) {
3802 			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3803 					"on the defer queue as internal\n",
3804 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3805 
3806 			if (!list_empty(&cmd->list)) {
3807 				dev_notice(&instance->pdev->dev, "ERROR while"
3808 					" moving this cmd:%p, %d %p, it was"
3809 					"discovered on some list?\n",
3810 					cmd, cmd->sync_cmd, cmd->scmd);
3811 
3812 				list_del_init(&cmd->list);
3813 			}
3814 			defer_index++;
3815 			list_add_tail(&cmd->list,
3816 				&instance->internal_reset_pending_q);
3817 		}
3818 	}
3819 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3820 }
3821 
3822 
3823 static void
3824 process_fw_state_change_wq(struct work_struct *work)
3825 {
3826 	struct megasas_instance *instance =
3827 		container_of(work, struct megasas_instance, work_init);
3828 	u32 wait;
3829 	unsigned long flags;
3830 
3831     if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3832 		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3833 				atomic_read(&instance->adprecovery));
3834 		return ;
3835 	}
3836 
3837 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3838 		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3839 					"state, restarting it...\n");
3840 
3841 		instance->instancet->disable_intr(instance);
3842 		atomic_set(&instance->fw_outstanding, 0);
3843 
3844 		atomic_set(&instance->fw_reset_no_pci_access, 1);
3845 		instance->instancet->adp_reset(instance, instance->reg_set);
3846 		atomic_set(&instance->fw_reset_no_pci_access, 0);
3847 
3848 		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3849 					"initiating next stage...\n");
3850 
3851 		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3852 					"state 2 starting...\n");
3853 
3854 		/* waiting for about 20 second before start the second init */
3855 		for (wait = 0; wait < 30; wait++) {
3856 			msleep(1000);
3857 		}
3858 
3859 		if (megasas_transition_to_ready(instance, 1)) {
3860 			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3861 
3862 			atomic_set(&instance->fw_reset_no_pci_access, 1);
3863 			megaraid_sas_kill_hba(instance);
3864 			return ;
3865 		}
3866 
3867 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3868 			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3869 			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3870 			) {
3871 			*instance->consumer = *instance->producer;
3872 		} else {
3873 			*instance->consumer = 0;
3874 			*instance->producer = 0;
3875 		}
3876 
3877 		megasas_issue_init_mfi(instance);
3878 
3879 		spin_lock_irqsave(&instance->hba_lock, flags);
3880 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3881 		spin_unlock_irqrestore(&instance->hba_lock, flags);
3882 		instance->instancet->enable_intr(instance);
3883 
3884 		megasas_issue_pending_cmds_again(instance);
3885 		instance->issuepend_done = 1;
3886 	}
3887 }
3888 
3889 /**
3890  * megasas_deplete_reply_queue -	Processes all completed commands
3891  * @instance:				Adapter soft state
3892  * @alt_status:				Alternate status to be returned to
3893  *					SCSI mid-layer instead of the status
3894  *					returned by the FW
3895  * Note: this must be called with hba lock held
3896  */
3897 static int
3898 megasas_deplete_reply_queue(struct megasas_instance *instance,
3899 					u8 alt_status)
3900 {
3901 	u32 mfiStatus;
3902 	u32 fw_state;
3903 
3904 	if ((mfiStatus = instance->instancet->check_reset(instance,
3905 					instance->reg_set)) == 1) {
3906 		return IRQ_HANDLED;
3907 	}
3908 
3909 	mfiStatus = instance->instancet->clear_intr(instance);
3910 	if (mfiStatus == 0) {
3911 		/* Hardware may not set outbound_intr_status in MSI-X mode */
3912 		if (!instance->msix_vectors)
3913 			return IRQ_NONE;
3914 	}
3915 
3916 	instance->mfiStatus = mfiStatus;
3917 
3918 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3919 		fw_state = instance->instancet->read_fw_status_reg(
3920 				instance) & MFI_STATE_MASK;
3921 
3922 		if (fw_state != MFI_STATE_FAULT) {
3923 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
3924 						fw_state);
3925 		}
3926 
3927 		if ((fw_state == MFI_STATE_FAULT) &&
3928 				(instance->disableOnlineCtrlReset == 0)) {
3929 			dev_notice(&instance->pdev->dev, "wait adp restart\n");
3930 
3931 			if ((instance->pdev->device ==
3932 					PCI_DEVICE_ID_LSI_SAS1064R) ||
3933 				(instance->pdev->device ==
3934 					PCI_DEVICE_ID_DELL_PERC5) ||
3935 				(instance->pdev->device ==
3936 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3937 
3938 				*instance->consumer =
3939 					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3940 			}
3941 
3942 
3943 			instance->instancet->disable_intr(instance);
3944 			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3945 			instance->issuepend_done = 0;
3946 
3947 			atomic_set(&instance->fw_outstanding, 0);
3948 			megasas_internal_reset_defer_cmds(instance);
3949 
3950 			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3951 					fw_state, atomic_read(&instance->adprecovery));
3952 
3953 			schedule_work(&instance->work_init);
3954 			return IRQ_HANDLED;
3955 
3956 		} else {
3957 			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3958 				fw_state, instance->disableOnlineCtrlReset);
3959 		}
3960 	}
3961 
3962 	tasklet_schedule(&instance->isr_tasklet);
3963 	return IRQ_HANDLED;
3964 }
3965 /**
3966  * megasas_isr - isr entry point
3967  */
3968 static irqreturn_t megasas_isr(int irq, void *devp)
3969 {
3970 	struct megasas_irq_context *irq_context = devp;
3971 	struct megasas_instance *instance = irq_context->instance;
3972 	unsigned long flags;
3973 	irqreturn_t rc;
3974 
3975 	if (atomic_read(&instance->fw_reset_no_pci_access))
3976 		return IRQ_HANDLED;
3977 
3978 	spin_lock_irqsave(&instance->hba_lock, flags);
3979 	rc = megasas_deplete_reply_queue(instance, DID_OK);
3980 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3981 
3982 	return rc;
3983 }
3984 
3985 /**
3986  * megasas_transition_to_ready -	Move the FW to READY state
3987  * @instance:				Adapter soft state
3988  *
3989  * During the initialization, FW passes can potentially be in any one of
3990  * several possible states. If the FW in operational, waiting-for-handshake
3991  * states, driver must take steps to bring it to ready state. Otherwise, it
3992  * has to wait for the ready state.
3993  */
3994 int
3995 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3996 {
3997 	int i;
3998 	u8 max_wait;
3999 	u32 fw_state;
4000 	u32 abs_state, curr_abs_state;
4001 
4002 	abs_state = instance->instancet->read_fw_status_reg(instance);
4003 	fw_state = abs_state & MFI_STATE_MASK;
4004 
4005 	if (fw_state != MFI_STATE_READY)
4006 		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
4007 		       " state\n");
4008 
4009 	while (fw_state != MFI_STATE_READY) {
4010 
4011 		switch (fw_state) {
4012 
4013 		case MFI_STATE_FAULT:
4014 			dev_printk(KERN_ERR, &instance->pdev->dev,
4015 				   "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
4016 				   abs_state & MFI_STATE_FAULT_CODE,
4017 				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4018 			if (ocr) {
4019 				max_wait = MEGASAS_RESET_WAIT_TIME;
4020 				break;
4021 			} else {
4022 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4023 				megasas_dump_reg_set(instance->reg_set);
4024 				return -ENODEV;
4025 			}
4026 
4027 		case MFI_STATE_WAIT_HANDSHAKE:
4028 			/*
4029 			 * Set the CLR bit in inbound doorbell
4030 			 */
4031 			if ((instance->pdev->device ==
4032 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4033 				(instance->pdev->device ==
4034 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4035 				(instance->adapter_type != MFI_SERIES))
4036 				writel(
4037 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4038 				  &instance->reg_set->doorbell);
4039 			else
4040 				writel(
4041 				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4042 					&instance->reg_set->inbound_doorbell);
4043 
4044 			max_wait = MEGASAS_RESET_WAIT_TIME;
4045 			break;
4046 
4047 		case MFI_STATE_BOOT_MESSAGE_PENDING:
4048 			if ((instance->pdev->device ==
4049 			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4050 				(instance->pdev->device ==
4051 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4052 				(instance->adapter_type != MFI_SERIES))
4053 				writel(MFI_INIT_HOTPLUG,
4054 				       &instance->reg_set->doorbell);
4055 			else
4056 				writel(MFI_INIT_HOTPLUG,
4057 					&instance->reg_set->inbound_doorbell);
4058 
4059 			max_wait = MEGASAS_RESET_WAIT_TIME;
4060 			break;
4061 
4062 		case MFI_STATE_OPERATIONAL:
4063 			/*
4064 			 * Bring it to READY state; assuming max wait 10 secs
4065 			 */
4066 			instance->instancet->disable_intr(instance);
4067 			if ((instance->pdev->device ==
4068 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4069 				(instance->pdev->device ==
4070 				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
4071 				(instance->adapter_type != MFI_SERIES)) {
4072 				writel(MFI_RESET_FLAGS,
4073 					&instance->reg_set->doorbell);
4074 
4075 				if (instance->adapter_type != MFI_SERIES) {
4076 					for (i = 0; i < (10 * 1000); i += 20) {
4077 						if (megasas_readl(
4078 							    instance,
4079 							    &instance->
4080 							    reg_set->
4081 							    doorbell) & 1)
4082 							msleep(20);
4083 						else
4084 							break;
4085 					}
4086 				}
4087 			} else
4088 				writel(MFI_RESET_FLAGS,
4089 					&instance->reg_set->inbound_doorbell);
4090 
4091 			max_wait = MEGASAS_RESET_WAIT_TIME;
4092 			break;
4093 
4094 		case MFI_STATE_UNDEFINED:
4095 			/*
4096 			 * This state should not last for more than 2 seconds
4097 			 */
4098 			max_wait = MEGASAS_RESET_WAIT_TIME;
4099 			break;
4100 
4101 		case MFI_STATE_BB_INIT:
4102 			max_wait = MEGASAS_RESET_WAIT_TIME;
4103 			break;
4104 
4105 		case MFI_STATE_FW_INIT:
4106 			max_wait = MEGASAS_RESET_WAIT_TIME;
4107 			break;
4108 
4109 		case MFI_STATE_FW_INIT_2:
4110 			max_wait = MEGASAS_RESET_WAIT_TIME;
4111 			break;
4112 
4113 		case MFI_STATE_DEVICE_SCAN:
4114 			max_wait = MEGASAS_RESET_WAIT_TIME;
4115 			break;
4116 
4117 		case MFI_STATE_FLUSH_CACHE:
4118 			max_wait = MEGASAS_RESET_WAIT_TIME;
4119 			break;
4120 
4121 		default:
4122 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
4123 			       fw_state);
4124 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4125 			megasas_dump_reg_set(instance->reg_set);
4126 			return -ENODEV;
4127 		}
4128 
4129 		/*
4130 		 * The cur_state should not last for more than max_wait secs
4131 		 */
4132 		for (i = 0; i < max_wait * 50; i++) {
4133 			curr_abs_state = instance->instancet->
4134 				read_fw_status_reg(instance);
4135 
4136 			if (abs_state == curr_abs_state) {
4137 				msleep(20);
4138 			} else
4139 				break;
4140 		}
4141 
4142 		/*
4143 		 * Return error if fw_state hasn't changed after max_wait
4144 		 */
4145 		if (curr_abs_state == abs_state) {
4146 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
4147 			       "in %d secs\n", fw_state, max_wait);
4148 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4149 			megasas_dump_reg_set(instance->reg_set);
4150 			return -ENODEV;
4151 		}
4152 
4153 		abs_state = curr_abs_state;
4154 		fw_state = curr_abs_state & MFI_STATE_MASK;
4155 	}
4156 	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
4157 
4158 	return 0;
4159 }
4160 
4161 /**
4162  * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
4163  * @instance:				Adapter soft state
4164  */
4165 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
4166 {
4167 	int i;
4168 	u16 max_cmd = instance->max_mfi_cmds;
4169 	struct megasas_cmd *cmd;
4170 
4171 	if (!instance->frame_dma_pool)
4172 		return;
4173 
4174 	/*
4175 	 * Return all frames to pool
4176 	 */
4177 	for (i = 0; i < max_cmd; i++) {
4178 
4179 		cmd = instance->cmd_list[i];
4180 
4181 		if (cmd->frame)
4182 			dma_pool_free(instance->frame_dma_pool, cmd->frame,
4183 				      cmd->frame_phys_addr);
4184 
4185 		if (cmd->sense)
4186 			dma_pool_free(instance->sense_dma_pool, cmd->sense,
4187 				      cmd->sense_phys_addr);
4188 	}
4189 
4190 	/*
4191 	 * Now destroy the pool itself
4192 	 */
4193 	dma_pool_destroy(instance->frame_dma_pool);
4194 	dma_pool_destroy(instance->sense_dma_pool);
4195 
4196 	instance->frame_dma_pool = NULL;
4197 	instance->sense_dma_pool = NULL;
4198 }
4199 
4200 /**
4201  * megasas_create_frame_pool -	Creates DMA pool for cmd frames
4202  * @instance:			Adapter soft state
4203  *
4204  * Each command packet has an embedded DMA memory buffer that is used for
4205  * filling MFI frame and the SG list that immediately follows the frame. This
4206  * function creates those DMA memory buffers for each command packet by using
4207  * PCI pool facility.
4208  */
4209 static int megasas_create_frame_pool(struct megasas_instance *instance)
4210 {
4211 	int i;
4212 	u16 max_cmd;
4213 	u32 frame_count;
4214 	struct megasas_cmd *cmd;
4215 
4216 	max_cmd = instance->max_mfi_cmds;
4217 
4218 	/*
4219 	 * For MFI controllers.
4220 	 * max_num_sge = 60
4221 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
4222 	 * Total 960 byte (15 MFI frame of 64 byte)
4223 	 *
4224 	 * Fusion adapter require only 3 extra frame.
4225 	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4226 	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
4227 	 * Total 192 byte (3 MFI frame of 64 byte)
4228 	 */
4229 	frame_count = (instance->adapter_type == MFI_SERIES) ?
4230 			(15 + 1) : (3 + 1);
4231 	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4232 	/*
4233 	 * Use DMA pool facility provided by PCI layer
4234 	 */
4235 	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4236 					&instance->pdev->dev,
4237 					instance->mfi_frame_size, 256, 0);
4238 
4239 	if (!instance->frame_dma_pool) {
4240 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4241 		return -ENOMEM;
4242 	}
4243 
4244 	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4245 						   &instance->pdev->dev, 128,
4246 						   4, 0);
4247 
4248 	if (!instance->sense_dma_pool) {
4249 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4250 
4251 		dma_pool_destroy(instance->frame_dma_pool);
4252 		instance->frame_dma_pool = NULL;
4253 
4254 		return -ENOMEM;
4255 	}
4256 
4257 	/*
4258 	 * Allocate and attach a frame to each of the commands in cmd_list.
4259 	 * By making cmd->index as the context instead of the &cmd, we can
4260 	 * always use 32bit context regardless of the architecture
4261 	 */
4262 	for (i = 0; i < max_cmd; i++) {
4263 
4264 		cmd = instance->cmd_list[i];
4265 
4266 		cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4267 					    GFP_KERNEL, &cmd->frame_phys_addr);
4268 
4269 		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4270 					    GFP_KERNEL, &cmd->sense_phys_addr);
4271 
4272 		/*
4273 		 * megasas_teardown_frame_pool() takes care of freeing
4274 		 * whatever has been allocated
4275 		 */
4276 		if (!cmd->frame || !cmd->sense) {
4277 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4278 			megasas_teardown_frame_pool(instance);
4279 			return -ENOMEM;
4280 		}
4281 
4282 		cmd->frame->io.context = cpu_to_le32(cmd->index);
4283 		cmd->frame->io.pad_0 = 0;
4284 		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4285 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4286 	}
4287 
4288 	return 0;
4289 }
4290 
4291 /**
4292  * megasas_free_cmds -	Free all the cmds in the free cmd pool
4293  * @instance:		Adapter soft state
4294  */
4295 void megasas_free_cmds(struct megasas_instance *instance)
4296 {
4297 	int i;
4298 
4299 	/* First free the MFI frame pool */
4300 	megasas_teardown_frame_pool(instance);
4301 
4302 	/* Free all the commands in the cmd_list */
4303 	for (i = 0; i < instance->max_mfi_cmds; i++)
4304 
4305 		kfree(instance->cmd_list[i]);
4306 
4307 	/* Free the cmd_list buffer itself */
4308 	kfree(instance->cmd_list);
4309 	instance->cmd_list = NULL;
4310 
4311 	INIT_LIST_HEAD(&instance->cmd_pool);
4312 }
4313 
4314 /**
4315  * megasas_alloc_cmds -	Allocates the command packets
4316  * @instance:		Adapter soft state
4317  *
4318  * Each command that is issued to the FW, whether IO commands from the OS or
4319  * internal commands like IOCTLs, are wrapped in local data structure called
4320  * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4321  * the FW.
4322  *
4323  * Each frame has a 32-bit field called context (tag). This context is used
4324  * to get back the megasas_cmd from the frame when a frame gets completed in
4325  * the ISR. Typically the address of the megasas_cmd itself would be used as
4326  * the context. But we wanted to keep the differences between 32 and 64 bit
4327  * systems to the mininum. We always use 32 bit integers for the context. In
4328  * this driver, the 32 bit values are the indices into an array cmd_list.
4329  * This array is used only to look up the megasas_cmd given the context. The
4330  * free commands themselves are maintained in a linked list called cmd_pool.
4331  */
4332 int megasas_alloc_cmds(struct megasas_instance *instance)
4333 {
4334 	int i;
4335 	int j;
4336 	u16 max_cmd;
4337 	struct megasas_cmd *cmd;
4338 
4339 	max_cmd = instance->max_mfi_cmds;
4340 
4341 	/*
4342 	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4343 	 * Allocate the dynamic array first and then allocate individual
4344 	 * commands.
4345 	 */
4346 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4347 
4348 	if (!instance->cmd_list) {
4349 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4350 		return -ENOMEM;
4351 	}
4352 
4353 	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4354 
4355 	for (i = 0; i < max_cmd; i++) {
4356 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4357 						GFP_KERNEL);
4358 
4359 		if (!instance->cmd_list[i]) {
4360 
4361 			for (j = 0; j < i; j++)
4362 				kfree(instance->cmd_list[j]);
4363 
4364 			kfree(instance->cmd_list);
4365 			instance->cmd_list = NULL;
4366 
4367 			return -ENOMEM;
4368 		}
4369 	}
4370 
4371 	for (i = 0; i < max_cmd; i++) {
4372 		cmd = instance->cmd_list[i];
4373 		memset(cmd, 0, sizeof(struct megasas_cmd));
4374 		cmd->index = i;
4375 		cmd->scmd = NULL;
4376 		cmd->instance = instance;
4377 
4378 		list_add_tail(&cmd->list, &instance->cmd_pool);
4379 	}
4380 
4381 	/*
4382 	 * Create a frame pool and assign one frame to each cmd
4383 	 */
4384 	if (megasas_create_frame_pool(instance)) {
4385 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4386 		megasas_free_cmds(instance);
4387 		return -ENOMEM;
4388 	}
4389 
4390 	return 0;
4391 }
4392 
4393 /*
4394  * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4395  * @instance:				Adapter soft state
4396  *
4397  * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4398  * or FW is not under OCR.
4399  */
4400 inline int
4401 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4402 
4403 	if (instance->adapter_type == MFI_SERIES)
4404 		return KILL_ADAPTER;
4405 	else if (instance->unload ||
4406 			test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
4407 				 &instance->reset_flags))
4408 		return IGNORE_TIMEOUT;
4409 	else
4410 		return INITIATE_OCR;
4411 }
4412 
4413 static void
4414 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4415 {
4416 	int ret;
4417 	struct megasas_cmd *cmd;
4418 	struct megasas_dcmd_frame *dcmd;
4419 
4420 	struct MR_PRIV_DEVICE *mr_device_priv_data;
4421 	u16 device_id = 0;
4422 
4423 	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4424 	cmd = megasas_get_cmd(instance);
4425 
4426 	if (!cmd) {
4427 		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4428 		return;
4429 	}
4430 
4431 	dcmd = &cmd->frame->dcmd;
4432 
4433 	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4434 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4435 
4436 	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4437 	dcmd->cmd = MFI_CMD_DCMD;
4438 	dcmd->cmd_status = 0xFF;
4439 	dcmd->sge_count = 1;
4440 	dcmd->flags = MFI_FRAME_DIR_READ;
4441 	dcmd->timeout = 0;
4442 	dcmd->pad_0 = 0;
4443 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4444 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4445 
4446 	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4447 				 sizeof(struct MR_PD_INFO));
4448 
4449 	if ((instance->adapter_type != MFI_SERIES) &&
4450 	    !instance->mask_interrupts)
4451 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4452 	else
4453 		ret = megasas_issue_polled(instance, cmd);
4454 
4455 	switch (ret) {
4456 	case DCMD_SUCCESS:
4457 		mr_device_priv_data = sdev->hostdata;
4458 		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4459 		mr_device_priv_data->interface_type =
4460 				instance->pd_info->state.ddf.pdType.intf;
4461 		break;
4462 
4463 	case DCMD_TIMEOUT:
4464 
4465 		switch (dcmd_timeout_ocr_possible(instance)) {
4466 		case INITIATE_OCR:
4467 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4468 			mutex_unlock(&instance->reset_mutex);
4469 			megasas_reset_fusion(instance->host,
4470 				MFI_IO_TIMEOUT_OCR);
4471 			mutex_lock(&instance->reset_mutex);
4472 			break;
4473 		case KILL_ADAPTER:
4474 			megaraid_sas_kill_hba(instance);
4475 			break;
4476 		case IGNORE_TIMEOUT:
4477 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4478 				__func__, __LINE__);
4479 			break;
4480 		}
4481 
4482 		break;
4483 	}
4484 
4485 	if (ret != DCMD_TIMEOUT)
4486 		megasas_return_cmd(instance, cmd);
4487 
4488 	return;
4489 }
4490 /*
4491  * megasas_get_pd_list_info -	Returns FW's pd_list structure
4492  * @instance:				Adapter soft state
4493  * @pd_list:				pd_list structure
4494  *
4495  * Issues an internal command (DCMD) to get the FW's controller PD
4496  * list structure.  This information is mainly used to find out SYSTEM
4497  * supported by the FW.
4498  */
4499 static int
4500 megasas_get_pd_list(struct megasas_instance *instance)
4501 {
4502 	int ret = 0, pd_index = 0;
4503 	struct megasas_cmd *cmd;
4504 	struct megasas_dcmd_frame *dcmd;
4505 	struct MR_PD_LIST *ci;
4506 	struct MR_PD_ADDRESS *pd_addr;
4507 
4508 	if (instance->pd_list_not_supported) {
4509 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4510 		"not supported by firmware\n");
4511 		return ret;
4512 	}
4513 
4514 	ci = instance->pd_list_buf;
4515 
4516 	cmd = megasas_get_cmd(instance);
4517 
4518 	if (!cmd) {
4519 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4520 		return -ENOMEM;
4521 	}
4522 
4523 	dcmd = &cmd->frame->dcmd;
4524 
4525 	memset(ci, 0, sizeof(*ci));
4526 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4527 
4528 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4529 	dcmd->mbox.b[1] = 0;
4530 	dcmd->cmd = MFI_CMD_DCMD;
4531 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4532 	dcmd->sge_count = 1;
4533 	dcmd->flags = MFI_FRAME_DIR_READ;
4534 	dcmd->timeout = 0;
4535 	dcmd->pad_0 = 0;
4536 	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4537 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4538 
4539 	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4540 				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4541 
4542 	if ((instance->adapter_type != MFI_SERIES) &&
4543 	    !instance->mask_interrupts)
4544 		ret = megasas_issue_blocked_cmd(instance, cmd,
4545 			MFI_IO_TIMEOUT_SECS);
4546 	else
4547 		ret = megasas_issue_polled(instance, cmd);
4548 
4549 	switch (ret) {
4550 	case DCMD_FAILED:
4551 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4552 			"failed/not supported by firmware\n");
4553 
4554 		if (instance->adapter_type != MFI_SERIES)
4555 			megaraid_sas_kill_hba(instance);
4556 		else
4557 			instance->pd_list_not_supported = 1;
4558 		break;
4559 	case DCMD_TIMEOUT:
4560 
4561 		switch (dcmd_timeout_ocr_possible(instance)) {
4562 		case INITIATE_OCR:
4563 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4564 			/*
4565 			 * DCMD failed from AEN path.
4566 			 * AEN path already hold reset_mutex to avoid PCI access
4567 			 * while OCR is in progress.
4568 			 */
4569 			mutex_unlock(&instance->reset_mutex);
4570 			megasas_reset_fusion(instance->host,
4571 						MFI_IO_TIMEOUT_OCR);
4572 			mutex_lock(&instance->reset_mutex);
4573 			break;
4574 		case KILL_ADAPTER:
4575 			megaraid_sas_kill_hba(instance);
4576 			break;
4577 		case IGNORE_TIMEOUT:
4578 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4579 				__func__, __LINE__);
4580 			break;
4581 		}
4582 
4583 		break;
4584 
4585 	case DCMD_SUCCESS:
4586 		pd_addr = ci->addr;
4587 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4588 			dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
4589 				 __func__, le32_to_cpu(ci->count));
4590 
4591 		if ((le32_to_cpu(ci->count) >
4592 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4593 			break;
4594 
4595 		memset(instance->local_pd_list, 0,
4596 				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4597 
4598 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4599 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4600 					le16_to_cpu(pd_addr->deviceId);
4601 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4602 					pd_addr->scsiDevType;
4603 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4604 					MR_PD_STATE_SYSTEM;
4605 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4606 				dev_info(&instance->pdev->dev,
4607 					 "PD%d: targetID: 0x%03x deviceType:0x%x\n",
4608 					 pd_index, le16_to_cpu(pd_addr->deviceId),
4609 					 pd_addr->scsiDevType);
4610 			pd_addr++;
4611 		}
4612 
4613 		memcpy(instance->pd_list, instance->local_pd_list,
4614 			sizeof(instance->pd_list));
4615 		break;
4616 
4617 	}
4618 
4619 	if (ret != DCMD_TIMEOUT)
4620 		megasas_return_cmd(instance, cmd);
4621 
4622 	return ret;
4623 }
4624 
4625 /*
4626  * megasas_get_ld_list_info -	Returns FW's ld_list structure
4627  * @instance:				Adapter soft state
4628  * @ld_list:				ld_list structure
4629  *
4630  * Issues an internal command (DCMD) to get the FW's controller PD
4631  * list structure.  This information is mainly used to find out SYSTEM
4632  * supported by the FW.
4633  */
4634 static int
4635 megasas_get_ld_list(struct megasas_instance *instance)
4636 {
4637 	int ret = 0, ld_index = 0, ids = 0;
4638 	struct megasas_cmd *cmd;
4639 	struct megasas_dcmd_frame *dcmd;
4640 	struct MR_LD_LIST *ci;
4641 	dma_addr_t ci_h = 0;
4642 	u32 ld_count;
4643 
4644 	ci = instance->ld_list_buf;
4645 	ci_h = instance->ld_list_buf_h;
4646 
4647 	cmd = megasas_get_cmd(instance);
4648 
4649 	if (!cmd) {
4650 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4651 		return -ENOMEM;
4652 	}
4653 
4654 	dcmd = &cmd->frame->dcmd;
4655 
4656 	memset(ci, 0, sizeof(*ci));
4657 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4658 
4659 	if (instance->supportmax256vd)
4660 		dcmd->mbox.b[0] = 1;
4661 	dcmd->cmd = MFI_CMD_DCMD;
4662 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4663 	dcmd->sge_count = 1;
4664 	dcmd->flags = MFI_FRAME_DIR_READ;
4665 	dcmd->timeout = 0;
4666 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4667 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4668 	dcmd->pad_0  = 0;
4669 
4670 	megasas_set_dma_settings(instance, dcmd, ci_h,
4671 				 sizeof(struct MR_LD_LIST));
4672 
4673 	if ((instance->adapter_type != MFI_SERIES) &&
4674 	    !instance->mask_interrupts)
4675 		ret = megasas_issue_blocked_cmd(instance, cmd,
4676 			MFI_IO_TIMEOUT_SECS);
4677 	else
4678 		ret = megasas_issue_polled(instance, cmd);
4679 
4680 	ld_count = le32_to_cpu(ci->ldCount);
4681 
4682 	switch (ret) {
4683 	case DCMD_FAILED:
4684 		megaraid_sas_kill_hba(instance);
4685 		break;
4686 	case DCMD_TIMEOUT:
4687 
4688 		switch (dcmd_timeout_ocr_possible(instance)) {
4689 		case INITIATE_OCR:
4690 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4691 			/*
4692 			 * DCMD failed from AEN path.
4693 			 * AEN path already hold reset_mutex to avoid PCI access
4694 			 * while OCR is in progress.
4695 			 */
4696 			mutex_unlock(&instance->reset_mutex);
4697 			megasas_reset_fusion(instance->host,
4698 						MFI_IO_TIMEOUT_OCR);
4699 			mutex_lock(&instance->reset_mutex);
4700 			break;
4701 		case KILL_ADAPTER:
4702 			megaraid_sas_kill_hba(instance);
4703 			break;
4704 		case IGNORE_TIMEOUT:
4705 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4706 				__func__, __LINE__);
4707 			break;
4708 		}
4709 
4710 		break;
4711 
4712 	case DCMD_SUCCESS:
4713 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4714 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4715 				 __func__, ld_count);
4716 
4717 		if (ld_count > instance->fw_supported_vd_count)
4718 			break;
4719 
4720 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4721 
4722 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4723 			if (ci->ldList[ld_index].state != 0) {
4724 				ids = ci->ldList[ld_index].ref.targetId;
4725 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4726 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4727 					dev_info(&instance->pdev->dev,
4728 						 "LD%d: targetID: 0x%03x\n",
4729 						 ld_index, ids);
4730 			}
4731 		}
4732 
4733 		break;
4734 	}
4735 
4736 	if (ret != DCMD_TIMEOUT)
4737 		megasas_return_cmd(instance, cmd);
4738 
4739 	return ret;
4740 }
4741 
4742 /**
4743  * megasas_ld_list_query -	Returns FW's ld_list structure
4744  * @instance:				Adapter soft state
4745  * @ld_list:				ld_list structure
4746  *
4747  * Issues an internal command (DCMD) to get the FW's controller PD
4748  * list structure.  This information is mainly used to find out SYSTEM
4749  * supported by the FW.
4750  */
4751 static int
4752 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4753 {
4754 	int ret = 0, ld_index = 0, ids = 0;
4755 	struct megasas_cmd *cmd;
4756 	struct megasas_dcmd_frame *dcmd;
4757 	struct MR_LD_TARGETID_LIST *ci;
4758 	dma_addr_t ci_h = 0;
4759 	u32 tgtid_count;
4760 
4761 	ci = instance->ld_targetid_list_buf;
4762 	ci_h = instance->ld_targetid_list_buf_h;
4763 
4764 	cmd = megasas_get_cmd(instance);
4765 
4766 	if (!cmd) {
4767 		dev_warn(&instance->pdev->dev,
4768 		         "megasas_ld_list_query: Failed to get cmd\n");
4769 		return -ENOMEM;
4770 	}
4771 
4772 	dcmd = &cmd->frame->dcmd;
4773 
4774 	memset(ci, 0, sizeof(*ci));
4775 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4776 
4777 	dcmd->mbox.b[0] = query_type;
4778 	if (instance->supportmax256vd)
4779 		dcmd->mbox.b[2] = 1;
4780 
4781 	dcmd->cmd = MFI_CMD_DCMD;
4782 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4783 	dcmd->sge_count = 1;
4784 	dcmd->flags = MFI_FRAME_DIR_READ;
4785 	dcmd->timeout = 0;
4786 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4787 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4788 	dcmd->pad_0  = 0;
4789 
4790 	megasas_set_dma_settings(instance, dcmd, ci_h,
4791 				 sizeof(struct MR_LD_TARGETID_LIST));
4792 
4793 	if ((instance->adapter_type != MFI_SERIES) &&
4794 	    !instance->mask_interrupts)
4795 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4796 	else
4797 		ret = megasas_issue_polled(instance, cmd);
4798 
4799 	switch (ret) {
4800 	case DCMD_FAILED:
4801 		dev_info(&instance->pdev->dev,
4802 			"DCMD not supported by firmware - %s %d\n",
4803 				__func__, __LINE__);
4804 		ret = megasas_get_ld_list(instance);
4805 		break;
4806 	case DCMD_TIMEOUT:
4807 		switch (dcmd_timeout_ocr_possible(instance)) {
4808 		case INITIATE_OCR:
4809 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4810 			/*
4811 			 * DCMD failed from AEN path.
4812 			 * AEN path already hold reset_mutex to avoid PCI access
4813 			 * while OCR is in progress.
4814 			 */
4815 			mutex_unlock(&instance->reset_mutex);
4816 			megasas_reset_fusion(instance->host,
4817 						MFI_IO_TIMEOUT_OCR);
4818 			mutex_lock(&instance->reset_mutex);
4819 			break;
4820 		case KILL_ADAPTER:
4821 			megaraid_sas_kill_hba(instance);
4822 			break;
4823 		case IGNORE_TIMEOUT:
4824 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4825 				__func__, __LINE__);
4826 			break;
4827 		}
4828 
4829 		break;
4830 	case DCMD_SUCCESS:
4831 		tgtid_count = le32_to_cpu(ci->count);
4832 
4833 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4834 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4835 				 __func__, tgtid_count);
4836 
4837 		if ((tgtid_count > (instance->fw_supported_vd_count)))
4838 			break;
4839 
4840 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4841 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4842 			ids = ci->targetId[ld_index];
4843 			instance->ld_ids[ids] = ci->targetId[ld_index];
4844 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4845 				dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
4846 					 ld_index, ci->targetId[ld_index]);
4847 		}
4848 
4849 		break;
4850 	}
4851 
4852 	if (ret != DCMD_TIMEOUT)
4853 		megasas_return_cmd(instance, cmd);
4854 
4855 	return ret;
4856 }
4857 
4858 /**
4859  * dcmd.opcode            - MR_DCMD_CTRL_DEVICE_LIST_GET
4860  * dcmd.mbox              - reserved
4861  * dcmd.sge IN            - ptr to return MR_HOST_DEVICE_LIST structure
4862  * Desc:    This DCMD will return the combined device list
4863  * Status:  MFI_STAT_OK - List returned successfully
4864  *          MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4865  *                                 disabled
4866  * @instance:			Adapter soft state
4867  * @is_probe:			Driver probe check
4868  * Return:			0 if DCMD succeeded
4869  *				 non-zero if failed
4870  */
4871 static int
4872 megasas_host_device_list_query(struct megasas_instance *instance,
4873 			       bool is_probe)
4874 {
4875 	int ret, i, target_id;
4876 	struct megasas_cmd *cmd;
4877 	struct megasas_dcmd_frame *dcmd;
4878 	struct MR_HOST_DEVICE_LIST *ci;
4879 	u32 count;
4880 	dma_addr_t ci_h;
4881 
4882 	ci = instance->host_device_list_buf;
4883 	ci_h = instance->host_device_list_buf_h;
4884 
4885 	cmd = megasas_get_cmd(instance);
4886 
4887 	if (!cmd) {
4888 		dev_warn(&instance->pdev->dev,
4889 			 "%s: failed to get cmd\n",
4890 			 __func__);
4891 		return -ENOMEM;
4892 	}
4893 
4894 	dcmd = &cmd->frame->dcmd;
4895 
4896 	memset(ci, 0, sizeof(*ci));
4897 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4898 
4899 	dcmd->mbox.b[0] = is_probe ? 0 : 1;
4900 	dcmd->cmd = MFI_CMD_DCMD;
4901 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4902 	dcmd->sge_count = 1;
4903 	dcmd->flags = MFI_FRAME_DIR_READ;
4904 	dcmd->timeout = 0;
4905 	dcmd->pad_0 = 0;
4906 	dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
4907 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
4908 
4909 	megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
4910 
4911 	if (!instance->mask_interrupts) {
4912 		ret = megasas_issue_blocked_cmd(instance, cmd,
4913 						MFI_IO_TIMEOUT_SECS);
4914 	} else {
4915 		ret = megasas_issue_polled(instance, cmd);
4916 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4917 	}
4918 
4919 	switch (ret) {
4920 	case DCMD_SUCCESS:
4921 		/* Fill the internal pd_list and ld_ids array based on
4922 		 * targetIds returned by FW
4923 		 */
4924 		count = le32_to_cpu(ci->count);
4925 
4926 		if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
4927 			break;
4928 
4929 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4930 			dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
4931 				 __func__, count);
4932 
4933 		memset(instance->local_pd_list, 0,
4934 		       MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4935 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4936 		for (i = 0; i < count; i++) {
4937 			target_id = le16_to_cpu(ci->host_device_list[i].target_id);
4938 			if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
4939 				instance->local_pd_list[target_id].tid = target_id;
4940 				instance->local_pd_list[target_id].driveType =
4941 						ci->host_device_list[i].scsi_type;
4942 				instance->local_pd_list[target_id].driveState =
4943 						MR_PD_STATE_SYSTEM;
4944 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4945 					dev_info(&instance->pdev->dev,
4946 						 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
4947 						 i, target_id, ci->host_device_list[i].scsi_type);
4948 			} else {
4949 				instance->ld_ids[target_id] = target_id;
4950 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4951 					dev_info(&instance->pdev->dev,
4952 						 "Device %d: LD targetID: 0x%03x\n",
4953 						 i, target_id);
4954 			}
4955 		}
4956 
4957 		memcpy(instance->pd_list, instance->local_pd_list,
4958 		       sizeof(instance->pd_list));
4959 		break;
4960 
4961 	case DCMD_TIMEOUT:
4962 		switch (dcmd_timeout_ocr_possible(instance)) {
4963 		case INITIATE_OCR:
4964 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4965 			mutex_unlock(&instance->reset_mutex);
4966 			megasas_reset_fusion(instance->host,
4967 				MFI_IO_TIMEOUT_OCR);
4968 			mutex_lock(&instance->reset_mutex);
4969 			break;
4970 		case KILL_ADAPTER:
4971 			megaraid_sas_kill_hba(instance);
4972 			break;
4973 		case IGNORE_TIMEOUT:
4974 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4975 				 __func__, __LINE__);
4976 			break;
4977 		}
4978 		break;
4979 	case DCMD_FAILED:
4980 		dev_err(&instance->pdev->dev,
4981 			"%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
4982 			__func__);
4983 		break;
4984 	}
4985 
4986 	if (ret != DCMD_TIMEOUT)
4987 		megasas_return_cmd(instance, cmd);
4988 
4989 	return ret;
4990 }
4991 
4992 /*
4993  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4994  * instance			 : Controller's instance
4995 */
4996 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4997 {
4998 	struct fusion_context *fusion;
4999 	u32 ventura_map_sz = 0;
5000 
5001 	fusion = instance->ctrl_context;
5002 	/* For MFI based controllers return dummy success */
5003 	if (!fusion)
5004 		return;
5005 
5006 	instance->supportmax256vd =
5007 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
5008 	/* Below is additional check to address future FW enhancement */
5009 	if (instance->ctrl_info_buf->max_lds > 64)
5010 		instance->supportmax256vd = 1;
5011 
5012 	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
5013 					* MEGASAS_MAX_DEV_PER_CHANNEL;
5014 	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
5015 					* MEGASAS_MAX_DEV_PER_CHANNEL;
5016 	if (instance->supportmax256vd) {
5017 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
5018 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5019 	} else {
5020 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5021 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5022 	}
5023 
5024 	dev_info(&instance->pdev->dev,
5025 		"FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
5026 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
5027 		instance->ctrl_info_buf->max_lds);
5028 
5029 	if (instance->max_raid_mapsize) {
5030 		ventura_map_sz = instance->max_raid_mapsize *
5031 						MR_MIN_MAP_SIZE; /* 64k */
5032 		fusion->current_map_sz = ventura_map_sz;
5033 		fusion->max_map_sz = ventura_map_sz;
5034 	} else {
5035 		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
5036 					(sizeof(struct MR_LD_SPAN_MAP) *
5037 					(instance->fw_supported_vd_count - 1));
5038 		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
5039 
5040 		fusion->max_map_sz =
5041 			max(fusion->old_map_sz, fusion->new_map_sz);
5042 
5043 		if (instance->supportmax256vd)
5044 			fusion->current_map_sz = fusion->new_map_sz;
5045 		else
5046 			fusion->current_map_sz = fusion->old_map_sz;
5047 	}
5048 	/* irrespective of FW raid maps, driver raid map is constant */
5049 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
5050 }
5051 
5052 /*
5053  * dcmd.opcode                - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
5054  * dcmd.hdr.length            - number of bytes to read
5055  * dcmd.sge                   - Ptr to MR_SNAPDUMP_PROPERTIES
5056  * Desc:			 Fill in snapdump properties
5057  * Status:			 MFI_STAT_OK- Command successful
5058  */
5059 void megasas_get_snapdump_properties(struct megasas_instance *instance)
5060 {
5061 	int ret = 0;
5062 	struct megasas_cmd *cmd;
5063 	struct megasas_dcmd_frame *dcmd;
5064 	struct MR_SNAPDUMP_PROPERTIES *ci;
5065 	dma_addr_t ci_h = 0;
5066 
5067 	ci = instance->snapdump_prop;
5068 	ci_h = instance->snapdump_prop_h;
5069 
5070 	if (!ci)
5071 		return;
5072 
5073 	cmd = megasas_get_cmd(instance);
5074 
5075 	if (!cmd) {
5076 		dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
5077 		return;
5078 	}
5079 
5080 	dcmd = &cmd->frame->dcmd;
5081 
5082 	memset(ci, 0, sizeof(*ci));
5083 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5084 
5085 	dcmd->cmd = MFI_CMD_DCMD;
5086 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5087 	dcmd->sge_count = 1;
5088 	dcmd->flags = MFI_FRAME_DIR_READ;
5089 	dcmd->timeout = 0;
5090 	dcmd->pad_0 = 0;
5091 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
5092 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
5093 
5094 	megasas_set_dma_settings(instance, dcmd, ci_h,
5095 				 sizeof(struct MR_SNAPDUMP_PROPERTIES));
5096 
5097 	if (!instance->mask_interrupts) {
5098 		ret = megasas_issue_blocked_cmd(instance, cmd,
5099 						MFI_IO_TIMEOUT_SECS);
5100 	} else {
5101 		ret = megasas_issue_polled(instance, cmd);
5102 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5103 	}
5104 
5105 	switch (ret) {
5106 	case DCMD_SUCCESS:
5107 		instance->snapdump_wait_time =
5108 			min_t(u8, ci->trigger_min_num_sec_before_ocr,
5109 				MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
5110 		break;
5111 
5112 	case DCMD_TIMEOUT:
5113 		switch (dcmd_timeout_ocr_possible(instance)) {
5114 		case INITIATE_OCR:
5115 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5116 			mutex_unlock(&instance->reset_mutex);
5117 			megasas_reset_fusion(instance->host,
5118 				MFI_IO_TIMEOUT_OCR);
5119 			mutex_lock(&instance->reset_mutex);
5120 			break;
5121 		case KILL_ADAPTER:
5122 			megaraid_sas_kill_hba(instance);
5123 			break;
5124 		case IGNORE_TIMEOUT:
5125 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5126 				__func__, __LINE__);
5127 			break;
5128 		}
5129 	}
5130 
5131 	if (ret != DCMD_TIMEOUT)
5132 		megasas_return_cmd(instance, cmd);
5133 }
5134 
5135 /**
5136  * megasas_get_controller_info -	Returns FW's controller structure
5137  * @instance:				Adapter soft state
5138  *
5139  * Issues an internal command (DCMD) to get the FW's controller structure.
5140  * This information is mainly used to find out the maximum IO transfer per
5141  * command supported by the FW.
5142  */
5143 int
5144 megasas_get_ctrl_info(struct megasas_instance *instance)
5145 {
5146 	int ret = 0;
5147 	struct megasas_cmd *cmd;
5148 	struct megasas_dcmd_frame *dcmd;
5149 	struct megasas_ctrl_info *ci;
5150 	dma_addr_t ci_h = 0;
5151 
5152 	ci = instance->ctrl_info_buf;
5153 	ci_h = instance->ctrl_info_buf_h;
5154 
5155 	cmd = megasas_get_cmd(instance);
5156 
5157 	if (!cmd) {
5158 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
5159 		return -ENOMEM;
5160 	}
5161 
5162 	dcmd = &cmd->frame->dcmd;
5163 
5164 	memset(ci, 0, sizeof(*ci));
5165 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5166 
5167 	dcmd->cmd = MFI_CMD_DCMD;
5168 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5169 	dcmd->sge_count = 1;
5170 	dcmd->flags = MFI_FRAME_DIR_READ;
5171 	dcmd->timeout = 0;
5172 	dcmd->pad_0 = 0;
5173 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
5174 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
5175 	dcmd->mbox.b[0] = 1;
5176 
5177 	megasas_set_dma_settings(instance, dcmd, ci_h,
5178 				 sizeof(struct megasas_ctrl_info));
5179 
5180 	if ((instance->adapter_type != MFI_SERIES) &&
5181 	    !instance->mask_interrupts) {
5182 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5183 	} else {
5184 		ret = megasas_issue_polled(instance, cmd);
5185 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5186 	}
5187 
5188 	switch (ret) {
5189 	case DCMD_SUCCESS:
5190 		/* Save required controller information in
5191 		 * CPU endianness format.
5192 		 */
5193 		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
5194 		le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
5195 		le32_to_cpus((u32 *)&ci->adapterOperations2);
5196 		le32_to_cpus((u32 *)&ci->adapterOperations3);
5197 		le16_to_cpus((u16 *)&ci->adapter_operations4);
5198 		le32_to_cpus((u32 *)&ci->adapter_operations5);
5199 
5200 		/* Update the latest Ext VD info.
5201 		 * From Init path, store current firmware details.
5202 		 * From OCR path, detect any firmware properties changes.
5203 		 * in case of Firmware upgrade without system reboot.
5204 		 */
5205 		megasas_update_ext_vd_details(instance);
5206 		instance->support_seqnum_jbod_fp =
5207 			ci->adapterOperations3.useSeqNumJbodFP;
5208 		instance->support_morethan256jbod =
5209 			ci->adapter_operations4.support_pd_map_target_id;
5210 		instance->support_nvme_passthru =
5211 			ci->adapter_operations4.support_nvme_passthru;
5212 		instance->support_pci_lane_margining =
5213 			ci->adapter_operations5.support_pci_lane_margining;
5214 		instance->task_abort_tmo = ci->TaskAbortTO;
5215 		instance->max_reset_tmo = ci->MaxResetTO;
5216 
5217 		/*Check whether controller is iMR or MR */
5218 		instance->is_imr = (ci->memory_size ? 0 : 1);
5219 
5220 		instance->snapdump_wait_time =
5221 			(ci->properties.on_off_properties2.enable_snap_dump ?
5222 			 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
5223 
5224 		instance->enable_fw_dev_list =
5225 			ci->properties.on_off_properties2.enable_fw_dev_list;
5226 
5227 		dev_info(&instance->pdev->dev,
5228 			"controller type\t: %s(%dMB)\n",
5229 			instance->is_imr ? "iMR" : "MR",
5230 			le16_to_cpu(ci->memory_size));
5231 
5232 		instance->disableOnlineCtrlReset =
5233 			ci->properties.OnOffProperties.disableOnlineCtrlReset;
5234 		instance->secure_jbod_support =
5235 			ci->adapterOperations3.supportSecurityonJBOD;
5236 		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
5237 			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
5238 		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
5239 			instance->secure_jbod_support ? "Yes" : "No");
5240 		dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
5241 			 instance->support_nvme_passthru ? "Yes" : "No");
5242 		dev_info(&instance->pdev->dev,
5243 			 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
5244 			 instance->task_abort_tmo, instance->max_reset_tmo);
5245 		dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
5246 			 instance->support_seqnum_jbod_fp ? "Yes" : "No");
5247 		dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
5248 			 instance->support_pci_lane_margining ? "Yes" : "No");
5249 
5250 		break;
5251 
5252 	case DCMD_TIMEOUT:
5253 		switch (dcmd_timeout_ocr_possible(instance)) {
5254 		case INITIATE_OCR:
5255 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5256 			mutex_unlock(&instance->reset_mutex);
5257 			megasas_reset_fusion(instance->host,
5258 				MFI_IO_TIMEOUT_OCR);
5259 			mutex_lock(&instance->reset_mutex);
5260 			break;
5261 		case KILL_ADAPTER:
5262 			megaraid_sas_kill_hba(instance);
5263 			break;
5264 		case IGNORE_TIMEOUT:
5265 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5266 				__func__, __LINE__);
5267 			break;
5268 		}
5269 		break;
5270 	case DCMD_FAILED:
5271 		megaraid_sas_kill_hba(instance);
5272 		break;
5273 
5274 	}
5275 
5276 	if (ret != DCMD_TIMEOUT)
5277 		megasas_return_cmd(instance, cmd);
5278 
5279 	return ret;
5280 }
5281 
5282 /*
5283  * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
5284  *					to firmware
5285  *
5286  * @instance:				Adapter soft state
5287  * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
5288 					MR_CRASH_BUF_TURN_OFF = 0
5289 					MR_CRASH_BUF_TURN_ON = 1
5290  * @return 0 on success non-zero on failure.
5291  * Issues an internal command (DCMD) to set parameters for crash dump feature.
5292  * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5293  * that driver supports crash dump feature. This DCMD will be sent only if
5294  * crash dump feature is supported by the FW.
5295  *
5296  */
5297 int megasas_set_crash_dump_params(struct megasas_instance *instance,
5298 	u8 crash_buf_state)
5299 {
5300 	int ret = 0;
5301 	struct megasas_cmd *cmd;
5302 	struct megasas_dcmd_frame *dcmd;
5303 
5304 	cmd = megasas_get_cmd(instance);
5305 
5306 	if (!cmd) {
5307 		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5308 		return -ENOMEM;
5309 	}
5310 
5311 
5312 	dcmd = &cmd->frame->dcmd;
5313 
5314 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5315 	dcmd->mbox.b[0] = crash_buf_state;
5316 	dcmd->cmd = MFI_CMD_DCMD;
5317 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5318 	dcmd->sge_count = 1;
5319 	dcmd->flags = MFI_FRAME_DIR_NONE;
5320 	dcmd->timeout = 0;
5321 	dcmd->pad_0 = 0;
5322 	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5323 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5324 
5325 	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5326 				 CRASH_DMA_BUF_SIZE);
5327 
5328 	if ((instance->adapter_type != MFI_SERIES) &&
5329 	    !instance->mask_interrupts)
5330 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5331 	else
5332 		ret = megasas_issue_polled(instance, cmd);
5333 
5334 	if (ret == DCMD_TIMEOUT) {
5335 		switch (dcmd_timeout_ocr_possible(instance)) {
5336 		case INITIATE_OCR:
5337 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5338 			megasas_reset_fusion(instance->host,
5339 					MFI_IO_TIMEOUT_OCR);
5340 			break;
5341 		case KILL_ADAPTER:
5342 			megaraid_sas_kill_hba(instance);
5343 			break;
5344 		case IGNORE_TIMEOUT:
5345 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5346 				__func__, __LINE__);
5347 			break;
5348 		}
5349 	} else
5350 		megasas_return_cmd(instance, cmd);
5351 
5352 	return ret;
5353 }
5354 
5355 /**
5356  * megasas_issue_init_mfi -	Initializes the FW
5357  * @instance:		Adapter soft state
5358  *
5359  * Issues the INIT MFI cmd
5360  */
5361 static int
5362 megasas_issue_init_mfi(struct megasas_instance *instance)
5363 {
5364 	__le32 context;
5365 	struct megasas_cmd *cmd;
5366 	struct megasas_init_frame *init_frame;
5367 	struct megasas_init_queue_info *initq_info;
5368 	dma_addr_t init_frame_h;
5369 	dma_addr_t initq_info_h;
5370 
5371 	/*
5372 	 * Prepare a init frame. Note the init frame points to queue info
5373 	 * structure. Each frame has SGL allocated after first 64 bytes. For
5374 	 * this frame - since we don't need any SGL - we use SGL's space as
5375 	 * queue info structure
5376 	 *
5377 	 * We will not get a NULL command below. We just created the pool.
5378 	 */
5379 	cmd = megasas_get_cmd(instance);
5380 
5381 	init_frame = (struct megasas_init_frame *)cmd->frame;
5382 	initq_info = (struct megasas_init_queue_info *)
5383 		((unsigned long)init_frame + 64);
5384 
5385 	init_frame_h = cmd->frame_phys_addr;
5386 	initq_info_h = init_frame_h + 64;
5387 
5388 	context = init_frame->context;
5389 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5390 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5391 	init_frame->context = context;
5392 
5393 	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5394 	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5395 
5396 	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5397 	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5398 
5399 	init_frame->cmd = MFI_CMD_INIT;
5400 	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5401 	init_frame->queue_info_new_phys_addr_lo =
5402 		cpu_to_le32(lower_32_bits(initq_info_h));
5403 	init_frame->queue_info_new_phys_addr_hi =
5404 		cpu_to_le32(upper_32_bits(initq_info_h));
5405 
5406 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5407 
5408 	/*
5409 	 * disable the intr before firing the init frame to FW
5410 	 */
5411 	instance->instancet->disable_intr(instance);
5412 
5413 	/*
5414 	 * Issue the init frame in polled mode
5415 	 */
5416 
5417 	if (megasas_issue_polled(instance, cmd)) {
5418 		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5419 		megasas_return_cmd(instance, cmd);
5420 		goto fail_fw_init;
5421 	}
5422 
5423 	megasas_return_cmd(instance, cmd);
5424 
5425 	return 0;
5426 
5427 fail_fw_init:
5428 	return -EINVAL;
5429 }
5430 
5431 static u32
5432 megasas_init_adapter_mfi(struct megasas_instance *instance)
5433 {
5434 	u32 context_sz;
5435 	u32 reply_q_sz;
5436 
5437 	/*
5438 	 * Get various operational parameters from status register
5439 	 */
5440 	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5441 	/*
5442 	 * Reduce the max supported cmds by 1. This is to ensure that the
5443 	 * reply_q_sz (1 more than the max cmd that driver may send)
5444 	 * does not exceed max cmds that the FW can support
5445 	 */
5446 	instance->max_fw_cmds = instance->max_fw_cmds-1;
5447 	instance->max_mfi_cmds = instance->max_fw_cmds;
5448 	instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5449 					0x10;
5450 	/*
5451 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5452 	 * are reserved for IOCTL + driver's internal DCMDs.
5453 	 */
5454 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5455 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5456 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5457 			MEGASAS_SKINNY_INT_CMDS);
5458 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5459 	} else {
5460 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5461 			MEGASAS_INT_CMDS);
5462 		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5463 	}
5464 
5465 	instance->cur_can_queue = instance->max_scsi_cmds;
5466 	/*
5467 	 * Create a pool of commands
5468 	 */
5469 	if (megasas_alloc_cmds(instance))
5470 		goto fail_alloc_cmds;
5471 
5472 	/*
5473 	 * Allocate memory for reply queue. Length of reply queue should
5474 	 * be _one_ more than the maximum commands handled by the firmware.
5475 	 *
5476 	 * Note: When FW completes commands, it places corresponding contex
5477 	 * values in this circular reply queue. This circular queue is a fairly
5478 	 * typical producer-consumer queue. FW is the producer (of completed
5479 	 * commands) and the driver is the consumer.
5480 	 */
5481 	context_sz = sizeof(u32);
5482 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5483 
5484 	instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5485 			reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5486 
5487 	if (!instance->reply_queue) {
5488 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5489 		goto fail_reply_queue;
5490 	}
5491 
5492 	if (megasas_issue_init_mfi(instance))
5493 		goto fail_fw_init;
5494 
5495 	if (megasas_get_ctrl_info(instance)) {
5496 		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5497 			"Fail from %s %d\n", instance->unique_id,
5498 			__func__, __LINE__);
5499 		goto fail_fw_init;
5500 	}
5501 
5502 	instance->fw_support_ieee = 0;
5503 	instance->fw_support_ieee =
5504 		(instance->instancet->read_fw_status_reg(instance) &
5505 		0x04000000);
5506 
5507 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5508 			instance->fw_support_ieee);
5509 
5510 	if (instance->fw_support_ieee)
5511 		instance->flag_ieee = 1;
5512 
5513 	return 0;
5514 
5515 fail_fw_init:
5516 
5517 	dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5518 			    instance->reply_queue, instance->reply_queue_h);
5519 fail_reply_queue:
5520 	megasas_free_cmds(instance);
5521 
5522 fail_alloc_cmds:
5523 	return 1;
5524 }
5525 
5526 static
5527 void megasas_setup_irq_poll(struct megasas_instance *instance)
5528 {
5529 	struct megasas_irq_context *irq_ctx;
5530 	u32 count, i;
5531 
5532 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5533 
5534 	/* Initialize IRQ poll */
5535 	for (i = 0; i < count; i++) {
5536 		irq_ctx = &instance->irq_context[i];
5537 		irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
5538 		irq_ctx->irq_poll_scheduled = false;
5539 		irq_poll_init(&irq_ctx->irqpoll,
5540 			      instance->threshold_reply_count,
5541 			      megasas_irqpoll);
5542 	}
5543 }
5544 
5545 /*
5546  * megasas_setup_irqs_ioapic -		register legacy interrupts.
5547  * @instance:				Adapter soft state
5548  *
5549  * Do not enable interrupt, only setup ISRs.
5550  *
5551  * Return 0 on success.
5552  */
5553 static int
5554 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5555 {
5556 	struct pci_dev *pdev;
5557 
5558 	pdev = instance->pdev;
5559 	instance->irq_context[0].instance = instance;
5560 	instance->irq_context[0].MSIxIndex = 0;
5561 	snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
5562 		"megasas", instance->host->host_no);
5563 	if (request_irq(pci_irq_vector(pdev, 0),
5564 			instance->instancet->service_isr, IRQF_SHARED,
5565 			instance->irq_context->name, &instance->irq_context[0])) {
5566 		dev_err(&instance->pdev->dev,
5567 				"Failed to register IRQ from %s %d\n",
5568 				__func__, __LINE__);
5569 		return -1;
5570 	}
5571 	instance->perf_mode = MR_LATENCY_PERF_MODE;
5572 	instance->low_latency_index_start = 0;
5573 	return 0;
5574 }
5575 
5576 /**
5577  * megasas_setup_irqs_msix -		register MSI-x interrupts.
5578  * @instance:				Adapter soft state
5579  * @is_probe:				Driver probe check
5580  *
5581  * Do not enable interrupt, only setup ISRs.
5582  *
5583  * Return 0 on success.
5584  */
5585 static int
5586 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5587 {
5588 	int i, j;
5589 	struct pci_dev *pdev;
5590 
5591 	pdev = instance->pdev;
5592 
5593 	/* Try MSI-x */
5594 	for (i = 0; i < instance->msix_vectors; i++) {
5595 		instance->irq_context[i].instance = instance;
5596 		instance->irq_context[i].MSIxIndex = i;
5597 		snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
5598 			"megasas", instance->host->host_no, i);
5599 		if (request_irq(pci_irq_vector(pdev, i),
5600 			instance->instancet->service_isr, 0, instance->irq_context[i].name,
5601 			&instance->irq_context[i])) {
5602 			dev_err(&instance->pdev->dev,
5603 				"Failed to register IRQ for vector %d.\n", i);
5604 			for (j = 0; j < i; j++)
5605 				free_irq(pci_irq_vector(pdev, j),
5606 					 &instance->irq_context[j]);
5607 			/* Retry irq register for IO_APIC*/
5608 			instance->msix_vectors = 0;
5609 			instance->msix_load_balance = false;
5610 			if (is_probe) {
5611 				pci_free_irq_vectors(instance->pdev);
5612 				return megasas_setup_irqs_ioapic(instance);
5613 			} else {
5614 				return -1;
5615 			}
5616 		}
5617 	}
5618 
5619 	return 0;
5620 }
5621 
5622 /*
5623  * megasas_destroy_irqs-		unregister interrupts.
5624  * @instance:				Adapter soft state
5625  * return:				void
5626  */
5627 static void
5628 megasas_destroy_irqs(struct megasas_instance *instance) {
5629 
5630 	int i;
5631 	int count;
5632 	struct megasas_irq_context *irq_ctx;
5633 
5634 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5635 	if (instance->adapter_type != MFI_SERIES) {
5636 		for (i = 0; i < count; i++) {
5637 			irq_ctx = &instance->irq_context[i];
5638 			irq_poll_disable(&irq_ctx->irqpoll);
5639 		}
5640 	}
5641 
5642 	if (instance->msix_vectors)
5643 		for (i = 0; i < instance->msix_vectors; i++) {
5644 			free_irq(pci_irq_vector(instance->pdev, i),
5645 				 &instance->irq_context[i]);
5646 		}
5647 	else
5648 		free_irq(pci_irq_vector(instance->pdev, 0),
5649 			 &instance->irq_context[0]);
5650 }
5651 
5652 /**
5653  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5654  * @instance:				Adapter soft state
5655  * @is_probe:				Driver probe check
5656  *
5657  * Return 0 on success.
5658  */
5659 void
5660 megasas_setup_jbod_map(struct megasas_instance *instance)
5661 {
5662 	int i;
5663 	struct fusion_context *fusion = instance->ctrl_context;
5664 	u32 pd_seq_map_sz;
5665 
5666 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5667 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5668 
5669 	instance->use_seqnum_jbod_fp =
5670 		instance->support_seqnum_jbod_fp;
5671 	if (reset_devices || !fusion ||
5672 		!instance->support_seqnum_jbod_fp) {
5673 		dev_info(&instance->pdev->dev,
5674 			"JBOD sequence map is disabled %s %d\n",
5675 			__func__, __LINE__);
5676 		instance->use_seqnum_jbod_fp = false;
5677 		return;
5678 	}
5679 
5680 	if (fusion->pd_seq_sync[0])
5681 		goto skip_alloc;
5682 
5683 	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5684 		fusion->pd_seq_sync[i] = dma_alloc_coherent
5685 			(&instance->pdev->dev, pd_seq_map_sz,
5686 			&fusion->pd_seq_phys[i], GFP_KERNEL);
5687 		if (!fusion->pd_seq_sync[i]) {
5688 			dev_err(&instance->pdev->dev,
5689 				"Failed to allocate memory from %s %d\n",
5690 				__func__, __LINE__);
5691 			if (i == 1) {
5692 				dma_free_coherent(&instance->pdev->dev,
5693 					pd_seq_map_sz, fusion->pd_seq_sync[0],
5694 					fusion->pd_seq_phys[0]);
5695 				fusion->pd_seq_sync[0] = NULL;
5696 			}
5697 			instance->use_seqnum_jbod_fp = false;
5698 			return;
5699 		}
5700 	}
5701 
5702 skip_alloc:
5703 	if (!megasas_sync_pd_seq_num(instance, false) &&
5704 		!megasas_sync_pd_seq_num(instance, true))
5705 		instance->use_seqnum_jbod_fp = true;
5706 	else
5707 		instance->use_seqnum_jbod_fp = false;
5708 }
5709 
5710 static void megasas_setup_reply_map(struct megasas_instance *instance)
5711 {
5712 	const struct cpumask *mask;
5713 	unsigned int queue, cpu, low_latency_index_start;
5714 
5715 	low_latency_index_start = instance->low_latency_index_start;
5716 
5717 	for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
5718 		mask = pci_irq_get_affinity(instance->pdev, queue);
5719 		if (!mask)
5720 			goto fallback;
5721 
5722 		for_each_cpu(cpu, mask)
5723 			instance->reply_map[cpu] = queue;
5724 	}
5725 	return;
5726 
5727 fallback:
5728 	queue = low_latency_index_start;
5729 	for_each_possible_cpu(cpu) {
5730 		instance->reply_map[cpu] = queue;
5731 		if (queue == (instance->msix_vectors - 1))
5732 			queue = low_latency_index_start;
5733 		else
5734 			queue++;
5735 	}
5736 }
5737 
5738 /**
5739  * megasas_get_device_list -	Get the PD and LD device list from FW.
5740  * @instance:			Adapter soft state
5741  * @return:			Success or failure
5742  *
5743  * Issue DCMDs to Firmware to get the PD and LD list.
5744  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5745  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5746  */
5747 static
5748 int megasas_get_device_list(struct megasas_instance *instance)
5749 {
5750 	memset(instance->pd_list, 0,
5751 	       (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5752 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5753 
5754 	if (instance->enable_fw_dev_list) {
5755 		if (megasas_host_device_list_query(instance, true))
5756 			return FAILED;
5757 	} else {
5758 		if (megasas_get_pd_list(instance) < 0) {
5759 			dev_err(&instance->pdev->dev, "failed to get PD list\n");
5760 			return FAILED;
5761 		}
5762 
5763 		if (megasas_ld_list_query(instance,
5764 					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5765 			dev_err(&instance->pdev->dev, "failed to get LD list\n");
5766 			return FAILED;
5767 		}
5768 	}
5769 
5770 	return SUCCESS;
5771 }
5772 
5773 /**
5774  * megasas_set_high_iops_queue_affinity_hint -	Set affinity hint for high IOPS queues
5775  * @instance:					Adapter soft state
5776  * return:					void
5777  */
5778 static inline void
5779 megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
5780 {
5781 	int i;
5782 	int local_numa_node;
5783 
5784 	if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
5785 		local_numa_node = dev_to_node(&instance->pdev->dev);
5786 
5787 		for (i = 0; i < instance->low_latency_index_start; i++)
5788 			irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
5789 				cpumask_of_node(local_numa_node));
5790 	}
5791 }
5792 
5793 static int
5794 __megasas_alloc_irq_vectors(struct megasas_instance *instance)
5795 {
5796 	int i, irq_flags;
5797 	struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
5798 	struct irq_affinity *descp = &desc;
5799 
5800 	irq_flags = PCI_IRQ_MSIX;
5801 
5802 	if (instance->smp_affinity_enable)
5803 		irq_flags |= PCI_IRQ_AFFINITY;
5804 	else
5805 		descp = NULL;
5806 
5807 	i = pci_alloc_irq_vectors_affinity(instance->pdev,
5808 		instance->low_latency_index_start,
5809 		instance->msix_vectors, irq_flags, descp);
5810 
5811 	return i;
5812 }
5813 
5814 /**
5815  * megasas_alloc_irq_vectors -	Allocate IRQ vectors/enable MSI-x vectors
5816  * @instance:			Adapter soft state
5817  * return:			void
5818  */
5819 static void
5820 megasas_alloc_irq_vectors(struct megasas_instance *instance)
5821 {
5822 	int i;
5823 	unsigned int num_msix_req;
5824 
5825 	i = __megasas_alloc_irq_vectors(instance);
5826 
5827 	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
5828 	    (i != instance->msix_vectors)) {
5829 		if (instance->msix_vectors)
5830 			pci_free_irq_vectors(instance->pdev);
5831 		/* Disable Balanced IOPS mode and try realloc vectors */
5832 		instance->perf_mode = MR_LATENCY_PERF_MODE;
5833 		instance->low_latency_index_start = 1;
5834 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5835 
5836 		instance->msix_vectors = min(num_msix_req,
5837 				instance->msix_vectors);
5838 
5839 		i = __megasas_alloc_irq_vectors(instance);
5840 
5841 	}
5842 
5843 	dev_info(&instance->pdev->dev,
5844 		"requested/available msix %d/%d\n", instance->msix_vectors, i);
5845 
5846 	if (i > 0)
5847 		instance->msix_vectors = i;
5848 	else
5849 		instance->msix_vectors = 0;
5850 
5851 	if (instance->smp_affinity_enable)
5852 		megasas_set_high_iops_queue_affinity_hint(instance);
5853 }
5854 
5855 /**
5856  * megasas_init_fw -	Initializes the FW
5857  * @instance:		Adapter soft state
5858  *
5859  * This is the main function for initializing firmware
5860  */
5861 
5862 static int megasas_init_fw(struct megasas_instance *instance)
5863 {
5864 	u32 max_sectors_1;
5865 	u32 max_sectors_2, tmp_sectors, msix_enable;
5866 	u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5867 	resource_size_t base_addr;
5868 	void *base_addr_phys;
5869 	struct megasas_ctrl_info *ctrl_info = NULL;
5870 	unsigned long bar_list;
5871 	int i, j, loop;
5872 	struct IOV_111 *iovPtr;
5873 	struct fusion_context *fusion;
5874 	bool intr_coalescing;
5875 	unsigned int num_msix_req;
5876 	u16 lnksta, speed;
5877 
5878 	fusion = instance->ctrl_context;
5879 
5880 	/* Find first memory bar */
5881 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5882 	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5883 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5884 					 "megasas: LSI")) {
5885 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5886 		return -EBUSY;
5887 	}
5888 
5889 	base_addr = pci_resource_start(instance->pdev, instance->bar);
5890 	instance->reg_set = ioremap(base_addr, 8192);
5891 
5892 	if (!instance->reg_set) {
5893 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5894 		goto fail_ioremap;
5895 	}
5896 
5897 	base_addr_phys = &base_addr;
5898 	dev_printk(KERN_DEBUG, &instance->pdev->dev,
5899 		   "BAR:0x%lx  BAR's base_addr(phys):%pa  mapped virt_addr:0x%p\n",
5900 		   instance->bar, base_addr_phys, instance->reg_set);
5901 
5902 	if (instance->adapter_type != MFI_SERIES)
5903 		instance->instancet = &megasas_instance_template_fusion;
5904 	else {
5905 		switch (instance->pdev->device) {
5906 		case PCI_DEVICE_ID_LSI_SAS1078R:
5907 		case PCI_DEVICE_ID_LSI_SAS1078DE:
5908 			instance->instancet = &megasas_instance_template_ppc;
5909 			break;
5910 		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5911 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5912 			instance->instancet = &megasas_instance_template_gen2;
5913 			break;
5914 		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5915 		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5916 			instance->instancet = &megasas_instance_template_skinny;
5917 			break;
5918 		case PCI_DEVICE_ID_LSI_SAS1064R:
5919 		case PCI_DEVICE_ID_DELL_PERC5:
5920 		default:
5921 			instance->instancet = &megasas_instance_template_xscale;
5922 			instance->pd_list_not_supported = 1;
5923 			break;
5924 		}
5925 	}
5926 
5927 	if (megasas_transition_to_ready(instance, 0)) {
5928 		dev_info(&instance->pdev->dev,
5929 			 "Failed to transition controller to ready from %s!\n",
5930 			 __func__);
5931 		if (instance->adapter_type != MFI_SERIES) {
5932 			status_reg = instance->instancet->read_fw_status_reg(
5933 					instance);
5934 			if (status_reg & MFI_RESET_ADAPTER) {
5935 				if (megasas_adp_reset_wait_for_ready
5936 					(instance, true, 0) == FAILED)
5937 					goto fail_ready_state;
5938 			} else {
5939 				goto fail_ready_state;
5940 			}
5941 		} else {
5942 			atomic_set(&instance->fw_reset_no_pci_access, 1);
5943 			instance->instancet->adp_reset
5944 				(instance, instance->reg_set);
5945 			atomic_set(&instance->fw_reset_no_pci_access, 0);
5946 
5947 			/*waiting for about 30 second before retry*/
5948 			ssleep(30);
5949 
5950 			if (megasas_transition_to_ready(instance, 0))
5951 				goto fail_ready_state;
5952 		}
5953 
5954 		dev_info(&instance->pdev->dev,
5955 			 "FW restarted successfully from %s!\n",
5956 			 __func__);
5957 	}
5958 
5959 	megasas_init_ctrl_params(instance);
5960 
5961 	if (megasas_set_dma_mask(instance))
5962 		goto fail_ready_state;
5963 
5964 	if (megasas_alloc_ctrl_mem(instance))
5965 		goto fail_alloc_dma_buf;
5966 
5967 	if (megasas_alloc_ctrl_dma_buffers(instance))
5968 		goto fail_alloc_dma_buf;
5969 
5970 	fusion = instance->ctrl_context;
5971 
5972 	if (instance->adapter_type >= VENTURA_SERIES) {
5973 		scratch_pad_2 =
5974 			megasas_readl(instance,
5975 				      &instance->reg_set->outbound_scratch_pad_2);
5976 		instance->max_raid_mapsize = ((scratch_pad_2 >>
5977 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5978 			MR_MAX_RAID_MAP_SIZE_MASK);
5979 	}
5980 
5981 	instance->enable_sdev_max_qd = enable_sdev_max_qd;
5982 
5983 	switch (instance->adapter_type) {
5984 	case VENTURA_SERIES:
5985 		fusion->pcie_bw_limitation = true;
5986 		break;
5987 	case AERO_SERIES:
5988 		fusion->r56_div_offload = true;
5989 		break;
5990 	default:
5991 		break;
5992 	}
5993 
5994 	/* Check if MSI-X is supported while in ready state */
5995 	msix_enable = (instance->instancet->read_fw_status_reg(instance) &
5996 		       0x4000000) >> 0x1a;
5997 	if (msix_enable && !msix_disable) {
5998 
5999 		scratch_pad_1 = megasas_readl
6000 			(instance, &instance->reg_set->outbound_scratch_pad_1);
6001 		/* Check max MSI-X vectors */
6002 		if (fusion) {
6003 			if (instance->adapter_type == THUNDERBOLT_SERIES) {
6004 				/* Thunderbolt Series*/
6005 				instance->msix_vectors = (scratch_pad_1
6006 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
6007 			} else {
6008 				instance->msix_vectors = ((scratch_pad_1
6009 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
6010 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
6011 
6012 				/*
6013 				 * For Invader series, > 8 MSI-x vectors
6014 				 * supported by FW/HW implies combined
6015 				 * reply queue mode is enabled.
6016 				 * For Ventura series, > 16 MSI-x vectors
6017 				 * supported by FW/HW implies combined
6018 				 * reply queue mode is enabled.
6019 				 */
6020 				switch (instance->adapter_type) {
6021 				case INVADER_SERIES:
6022 					if (instance->msix_vectors > 8)
6023 						instance->msix_combined = true;
6024 					break;
6025 				case AERO_SERIES:
6026 				case VENTURA_SERIES:
6027 					if (instance->msix_vectors > 16)
6028 						instance->msix_combined = true;
6029 					break;
6030 				}
6031 
6032 				if (rdpq_enable)
6033 					instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
6034 								1 : 0;
6035 
6036 				if (instance->adapter_type >= INVADER_SERIES &&
6037 				    !instance->msix_combined) {
6038 					instance->msix_load_balance = true;
6039 					instance->smp_affinity_enable = false;
6040 				}
6041 
6042 				/* Save 1-15 reply post index address to local memory
6043 				 * Index 0 is already saved from reg offset
6044 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
6045 				 */
6046 				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
6047 					instance->reply_post_host_index_addr[loop] =
6048 						(u32 __iomem *)
6049 						((u8 __iomem *)instance->reg_set +
6050 						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
6051 						+ (loop * 0x10));
6052 				}
6053 			}
6054 
6055 			dev_info(&instance->pdev->dev,
6056 				 "firmware supports msix\t: (%d)",
6057 				 instance->msix_vectors);
6058 			if (msix_vectors)
6059 				instance->msix_vectors = min(msix_vectors,
6060 					instance->msix_vectors);
6061 		} else /* MFI adapters */
6062 			instance->msix_vectors = 1;
6063 
6064 
6065 		/*
6066 		 * For Aero (if some conditions are met), driver will configure a
6067 		 * few additional reply queues with interrupt coalescing enabled.
6068 		 * These queues with interrupt coalescing enabled are called
6069 		 * High IOPS queues and rest of reply queues (based on number of
6070 		 * logical CPUs) are termed as Low latency queues.
6071 		 *
6072 		 * Total Number of reply queues = High IOPS queues + low latency queues
6073 		 *
6074 		 * For rest of fusion adapters, 1 additional reply queue will be
6075 		 * reserved for management commands, rest of reply queues
6076 		 * (based on number of logical CPUs) will be used for IOs and
6077 		 * referenced as IO queues.
6078 		 * Total Number of reply queues = 1 + IO queues
6079 		 *
6080 		 * MFI adapters supports single MSI-x so single reply queue
6081 		 * will be used for IO and management commands.
6082 		 */
6083 
6084 		intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
6085 								true : false;
6086 		if (intr_coalescing &&
6087 			(num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
6088 			(instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
6089 			instance->perf_mode = MR_BALANCED_PERF_MODE;
6090 		else
6091 			instance->perf_mode = MR_LATENCY_PERF_MODE;
6092 
6093 
6094 		if (instance->adapter_type == AERO_SERIES) {
6095 			pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
6096 			speed = lnksta & PCI_EXP_LNKSTA_CLS;
6097 
6098 			/*
6099 			 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
6100 			 * in latency perf mode and enable R1 PCI bandwidth algorithm
6101 			 */
6102 			if (speed < 0x4) {
6103 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6104 				fusion->pcie_bw_limitation = true;
6105 			}
6106 
6107 			/*
6108 			 * Performance mode settings provided through module parameter-perf_mode will
6109 			 * take affect only for:
6110 			 * 1. Aero family of adapters.
6111 			 * 2. When user sets module parameter- perf_mode in range of 0-2.
6112 			 */
6113 			if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
6114 				(perf_mode <= MR_LATENCY_PERF_MODE))
6115 				instance->perf_mode = perf_mode;
6116 			/*
6117 			 * If intr coalescing is not supported by controller FW, then IOPS
6118 			 * and Balanced modes are not feasible.
6119 			 */
6120 			if (!intr_coalescing)
6121 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6122 
6123 		}
6124 
6125 		if (instance->perf_mode == MR_BALANCED_PERF_MODE)
6126 			instance->low_latency_index_start =
6127 				MR_HIGH_IOPS_QUEUE_COUNT;
6128 		else
6129 			instance->low_latency_index_start = 1;
6130 
6131 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
6132 
6133 		instance->msix_vectors = min(num_msix_req,
6134 				instance->msix_vectors);
6135 
6136 		megasas_alloc_irq_vectors(instance);
6137 		if (!instance->msix_vectors)
6138 			instance->msix_load_balance = false;
6139 	}
6140 	/*
6141 	 * MSI-X host index 0 is common for all adapter.
6142 	 * It is used for all MPT based Adapters.
6143 	 */
6144 	if (instance->msix_combined) {
6145 		instance->reply_post_host_index_addr[0] =
6146 				(u32 *)((u8 *)instance->reg_set +
6147 				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
6148 	} else {
6149 		instance->reply_post_host_index_addr[0] =
6150 			(u32 *)((u8 *)instance->reg_set +
6151 			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
6152 	}
6153 
6154 	if (!instance->msix_vectors) {
6155 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6156 		if (i < 0)
6157 			goto fail_init_adapter;
6158 	}
6159 
6160 	megasas_setup_reply_map(instance);
6161 
6162 	dev_info(&instance->pdev->dev,
6163 		"current msix/online cpus\t: (%d/%d)\n",
6164 		instance->msix_vectors, (unsigned int)num_online_cpus());
6165 	dev_info(&instance->pdev->dev,
6166 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
6167 
6168 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6169 		(unsigned long)instance);
6170 
6171 	/*
6172 	 * Below are default value for legacy Firmware.
6173 	 * non-fusion based controllers
6174 	 */
6175 	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
6176 	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
6177 	/* Get operational params, sge flags, send init cmd to controller */
6178 	if (instance->instancet->init_adapter(instance))
6179 		goto fail_init_adapter;
6180 
6181 	if (instance->adapter_type >= VENTURA_SERIES) {
6182 		scratch_pad_3 =
6183 			megasas_readl(instance,
6184 				      &instance->reg_set->outbound_scratch_pad_3);
6185 		if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
6186 			MR_DEFAULT_NVME_PAGE_SHIFT)
6187 			instance->nvme_page_size =
6188 				(1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
6189 
6190 		dev_info(&instance->pdev->dev,
6191 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
6192 	}
6193 
6194 	if (instance->msix_vectors ?
6195 		megasas_setup_irqs_msix(instance, 1) :
6196 		megasas_setup_irqs_ioapic(instance))
6197 		goto fail_init_adapter;
6198 
6199 	if (instance->adapter_type != MFI_SERIES)
6200 		megasas_setup_irq_poll(instance);
6201 
6202 	instance->instancet->enable_intr(instance);
6203 
6204 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
6205 
6206 	megasas_setup_jbod_map(instance);
6207 
6208 	if (megasas_get_device_list(instance) != SUCCESS) {
6209 		dev_err(&instance->pdev->dev,
6210 			"%s: megasas_get_device_list failed\n",
6211 			__func__);
6212 		goto fail_get_ld_pd_list;
6213 	}
6214 
6215 	/* stream detection initialization */
6216 	if (instance->adapter_type >= VENTURA_SERIES) {
6217 		fusion->stream_detect_by_ld =
6218 			kcalloc(MAX_LOGICAL_DRIVES_EXT,
6219 				sizeof(struct LD_STREAM_DETECT *),
6220 				GFP_KERNEL);
6221 		if (!fusion->stream_detect_by_ld) {
6222 			dev_err(&instance->pdev->dev,
6223 				"unable to allocate stream detection for pool of LDs\n");
6224 			goto fail_get_ld_pd_list;
6225 		}
6226 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
6227 			fusion->stream_detect_by_ld[i] =
6228 				kzalloc(sizeof(struct LD_STREAM_DETECT),
6229 				GFP_KERNEL);
6230 			if (!fusion->stream_detect_by_ld[i]) {
6231 				dev_err(&instance->pdev->dev,
6232 					"unable to allocate stream detect by LD\n ");
6233 				for (j = 0; j < i; ++j)
6234 					kfree(fusion->stream_detect_by_ld[j]);
6235 				kfree(fusion->stream_detect_by_ld);
6236 				fusion->stream_detect_by_ld = NULL;
6237 				goto fail_get_ld_pd_list;
6238 			}
6239 			fusion->stream_detect_by_ld[i]->mru_bit_map
6240 				= MR_STREAM_BITMAP;
6241 		}
6242 	}
6243 
6244 	/*
6245 	 * Compute the max allowed sectors per IO: The controller info has two
6246 	 * limits on max sectors. Driver should use the minimum of these two.
6247 	 *
6248 	 * 1 << stripe_sz_ops.min = max sectors per strip
6249 	 *
6250 	 * Note that older firmwares ( < FW ver 30) didn't report information
6251 	 * to calculate max_sectors_1. So the number ended up as zero always.
6252 	 */
6253 	tmp_sectors = 0;
6254 	ctrl_info = instance->ctrl_info_buf;
6255 
6256 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
6257 		le16_to_cpu(ctrl_info->max_strips_per_io);
6258 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
6259 
6260 	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
6261 
6262 	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
6263 	instance->passive = ctrl_info->cluster.passive;
6264 	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
6265 	instance->UnevenSpanSupport =
6266 		ctrl_info->adapterOperations2.supportUnevenSpans;
6267 	if (instance->UnevenSpanSupport) {
6268 		struct fusion_context *fusion = instance->ctrl_context;
6269 		if (MR_ValidateMapInfo(instance, instance->map_id))
6270 			fusion->fast_path_io = 1;
6271 		else
6272 			fusion->fast_path_io = 0;
6273 
6274 	}
6275 	if (ctrl_info->host_interface.SRIOV) {
6276 		instance->requestorId = ctrl_info->iov.requestorId;
6277 		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
6278 			if (!ctrl_info->adapterOperations2.activePassive)
6279 			    instance->PlasmaFW111 = 1;
6280 
6281 			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
6282 			    instance->PlasmaFW111 ? "1.11" : "new");
6283 
6284 			if (instance->PlasmaFW111) {
6285 			    iovPtr = (struct IOV_111 *)
6286 				((unsigned char *)ctrl_info + IOV_111_OFFSET);
6287 			    instance->requestorId = iovPtr->requestorId;
6288 			}
6289 		}
6290 		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
6291 			instance->requestorId);
6292 	}
6293 
6294 	instance->crash_dump_fw_support =
6295 		ctrl_info->adapterOperations3.supportCrashDump;
6296 	instance->crash_dump_drv_support =
6297 		(instance->crash_dump_fw_support &&
6298 		instance->crash_dump_buf);
6299 	if (instance->crash_dump_drv_support)
6300 		megasas_set_crash_dump_params(instance,
6301 			MR_CRASH_BUF_TURN_OFF);
6302 
6303 	else {
6304 		if (instance->crash_dump_buf)
6305 			dma_free_coherent(&instance->pdev->dev,
6306 				CRASH_DMA_BUF_SIZE,
6307 				instance->crash_dump_buf,
6308 				instance->crash_dump_h);
6309 		instance->crash_dump_buf = NULL;
6310 	}
6311 
6312 	if (instance->snapdump_wait_time) {
6313 		megasas_get_snapdump_properties(instance);
6314 		dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
6315 			 instance->snapdump_wait_time);
6316 	}
6317 
6318 	dev_info(&instance->pdev->dev,
6319 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
6320 		le16_to_cpu(ctrl_info->pci.vendor_id),
6321 		le16_to_cpu(ctrl_info->pci.device_id),
6322 		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
6323 		le16_to_cpu(ctrl_info->pci.sub_device_id));
6324 	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
6325 		instance->UnevenSpanSupport ? "yes" : "no");
6326 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
6327 		instance->crash_dump_drv_support ? "yes" : "no");
6328 	dev_info(&instance->pdev->dev, "JBOD sequence map	: %s\n",
6329 		instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
6330 
6331 	instance->max_sectors_per_req = instance->max_num_sge *
6332 						SGE_BUFFER_SIZE / 512;
6333 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
6334 		instance->max_sectors_per_req = tmp_sectors;
6335 
6336 	/* Check for valid throttlequeuedepth module parameter */
6337 	if (throttlequeuedepth &&
6338 			throttlequeuedepth <= instance->max_scsi_cmds)
6339 		instance->throttlequeuedepth = throttlequeuedepth;
6340 	else
6341 		instance->throttlequeuedepth =
6342 				MEGASAS_THROTTLE_QUEUE_DEPTH;
6343 
6344 	if ((resetwaittime < 1) ||
6345 	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
6346 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
6347 
6348 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
6349 		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
6350 
6351 	/* Launch SR-IOV heartbeat timer */
6352 	if (instance->requestorId) {
6353 		if (!megasas_sriov_start_heartbeat(instance, 1)) {
6354 			megasas_start_timer(instance);
6355 		} else {
6356 			instance->skip_heartbeat_timer_del = 1;
6357 			goto fail_get_ld_pd_list;
6358 		}
6359 	}
6360 
6361 	/*
6362 	 * Create and start watchdog thread which will monitor
6363 	 * controller state every 1 sec and trigger OCR when
6364 	 * it enters fault state
6365 	 */
6366 	if (instance->adapter_type != MFI_SERIES)
6367 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
6368 			goto fail_start_watchdog;
6369 
6370 	return 0;
6371 
6372 fail_start_watchdog:
6373 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6374 		del_timer_sync(&instance->sriov_heartbeat_timer);
6375 fail_get_ld_pd_list:
6376 	instance->instancet->disable_intr(instance);
6377 	megasas_destroy_irqs(instance);
6378 fail_init_adapter:
6379 	if (instance->msix_vectors)
6380 		pci_free_irq_vectors(instance->pdev);
6381 	instance->msix_vectors = 0;
6382 fail_alloc_dma_buf:
6383 	megasas_free_ctrl_dma_buffers(instance);
6384 	megasas_free_ctrl_mem(instance);
6385 fail_ready_state:
6386 	iounmap(instance->reg_set);
6387 
6388 fail_ioremap:
6389 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6390 
6391 	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6392 		__func__, __LINE__);
6393 	return -EINVAL;
6394 }
6395 
6396 /**
6397  * megasas_release_mfi -	Reverses the FW initialization
6398  * @instance:			Adapter soft state
6399  */
6400 static void megasas_release_mfi(struct megasas_instance *instance)
6401 {
6402 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
6403 
6404 	if (instance->reply_queue)
6405 		dma_free_coherent(&instance->pdev->dev, reply_q_sz,
6406 			    instance->reply_queue, instance->reply_queue_h);
6407 
6408 	megasas_free_cmds(instance);
6409 
6410 	iounmap(instance->reg_set);
6411 
6412 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6413 }
6414 
6415 /**
6416  * megasas_get_seq_num -	Gets latest event sequence numbers
6417  * @instance:			Adapter soft state
6418  * @eli:			FW event log sequence numbers information
6419  *
6420  * FW maintains a log of all events in a non-volatile area. Upper layers would
6421  * usually find out the latest sequence number of the events, the seq number at
6422  * the boot etc. They would "read" all the events below the latest seq number
6423  * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
6424  * number), they would subsribe to AEN (asynchronous event notification) and
6425  * wait for the events to happen.
6426  */
6427 static int
6428 megasas_get_seq_num(struct megasas_instance *instance,
6429 		    struct megasas_evt_log_info *eli)
6430 {
6431 	struct megasas_cmd *cmd;
6432 	struct megasas_dcmd_frame *dcmd;
6433 	struct megasas_evt_log_info *el_info;
6434 	dma_addr_t el_info_h = 0;
6435 	int ret;
6436 
6437 	cmd = megasas_get_cmd(instance);
6438 
6439 	if (!cmd) {
6440 		return -ENOMEM;
6441 	}
6442 
6443 	dcmd = &cmd->frame->dcmd;
6444 	el_info = dma_alloc_coherent(&instance->pdev->dev,
6445 				     sizeof(struct megasas_evt_log_info),
6446 				     &el_info_h, GFP_KERNEL);
6447 	if (!el_info) {
6448 		megasas_return_cmd(instance, cmd);
6449 		return -ENOMEM;
6450 	}
6451 
6452 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6453 
6454 	dcmd->cmd = MFI_CMD_DCMD;
6455 	dcmd->cmd_status = 0x0;
6456 	dcmd->sge_count = 1;
6457 	dcmd->flags = MFI_FRAME_DIR_READ;
6458 	dcmd->timeout = 0;
6459 	dcmd->pad_0 = 0;
6460 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
6461 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
6462 
6463 	megasas_set_dma_settings(instance, dcmd, el_info_h,
6464 				 sizeof(struct megasas_evt_log_info));
6465 
6466 	ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
6467 	if (ret != DCMD_SUCCESS) {
6468 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6469 			__func__, __LINE__);
6470 		goto dcmd_failed;
6471 	}
6472 
6473 	/*
6474 	 * Copy the data back into callers buffer
6475 	 */
6476 	eli->newest_seq_num = el_info->newest_seq_num;
6477 	eli->oldest_seq_num = el_info->oldest_seq_num;
6478 	eli->clear_seq_num = el_info->clear_seq_num;
6479 	eli->shutdown_seq_num = el_info->shutdown_seq_num;
6480 	eli->boot_seq_num = el_info->boot_seq_num;
6481 
6482 dcmd_failed:
6483 	dma_free_coherent(&instance->pdev->dev,
6484 			sizeof(struct megasas_evt_log_info),
6485 			el_info, el_info_h);
6486 
6487 	megasas_return_cmd(instance, cmd);
6488 
6489 	return ret;
6490 }
6491 
6492 /**
6493  * megasas_register_aen -	Registers for asynchronous event notification
6494  * @instance:			Adapter soft state
6495  * @seq_num:			The starting sequence number
6496  * @class_locale:		Class of the event
6497  *
6498  * This function subscribes for AEN for events beyond the @seq_num. It requests
6499  * to be notified if and only if the event is of type @class_locale
6500  */
6501 static int
6502 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6503 		     u32 class_locale_word)
6504 {
6505 	int ret_val;
6506 	struct megasas_cmd *cmd;
6507 	struct megasas_dcmd_frame *dcmd;
6508 	union megasas_evt_class_locale curr_aen;
6509 	union megasas_evt_class_locale prev_aen;
6510 
6511 	/*
6512 	 * If there an AEN pending already (aen_cmd), check if the
6513 	 * class_locale of that pending AEN is inclusive of the new
6514 	 * AEN request we currently have. If it is, then we don't have
6515 	 * to do anything. In other words, whichever events the current
6516 	 * AEN request is subscribing to, have already been subscribed
6517 	 * to.
6518 	 *
6519 	 * If the old_cmd is _not_ inclusive, then we have to abort
6520 	 * that command, form a class_locale that is superset of both
6521 	 * old and current and re-issue to the FW
6522 	 */
6523 
6524 	curr_aen.word = class_locale_word;
6525 
6526 	if (instance->aen_cmd) {
6527 
6528 		prev_aen.word =
6529 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6530 
6531 		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6532 		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6533 			dev_info(&instance->pdev->dev,
6534 				 "%s %d out of range class %d send by application\n",
6535 				 __func__, __LINE__, curr_aen.members.class);
6536 			return 0;
6537 		}
6538 
6539 		/*
6540 		 * A class whose enum value is smaller is inclusive of all
6541 		 * higher values. If a PROGRESS (= -1) was previously
6542 		 * registered, then a new registration requests for higher
6543 		 * classes need not be sent to FW. They are automatically
6544 		 * included.
6545 		 *
6546 		 * Locale numbers don't have such hierarchy. They are bitmap
6547 		 * values
6548 		 */
6549 		if ((prev_aen.members.class <= curr_aen.members.class) &&
6550 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
6551 		      curr_aen.members.locale)) {
6552 			/*
6553 			 * Previously issued event registration includes
6554 			 * current request. Nothing to do.
6555 			 */
6556 			return 0;
6557 		} else {
6558 			curr_aen.members.locale |= prev_aen.members.locale;
6559 
6560 			if (prev_aen.members.class < curr_aen.members.class)
6561 				curr_aen.members.class = prev_aen.members.class;
6562 
6563 			instance->aen_cmd->abort_aen = 1;
6564 			ret_val = megasas_issue_blocked_abort_cmd(instance,
6565 								  instance->
6566 								  aen_cmd, 30);
6567 
6568 			if (ret_val) {
6569 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6570 				       "previous AEN command\n");
6571 				return ret_val;
6572 			}
6573 		}
6574 	}
6575 
6576 	cmd = megasas_get_cmd(instance);
6577 
6578 	if (!cmd)
6579 		return -ENOMEM;
6580 
6581 	dcmd = &cmd->frame->dcmd;
6582 
6583 	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6584 
6585 	/*
6586 	 * Prepare DCMD for aen registration
6587 	 */
6588 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6589 
6590 	dcmd->cmd = MFI_CMD_DCMD;
6591 	dcmd->cmd_status = 0x0;
6592 	dcmd->sge_count = 1;
6593 	dcmd->flags = MFI_FRAME_DIR_READ;
6594 	dcmd->timeout = 0;
6595 	dcmd->pad_0 = 0;
6596 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6597 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6598 	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6599 	instance->last_seq_num = seq_num;
6600 	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6601 
6602 	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6603 				 sizeof(struct megasas_evt_detail));
6604 
6605 	if (instance->aen_cmd != NULL) {
6606 		megasas_return_cmd(instance, cmd);
6607 		return 0;
6608 	}
6609 
6610 	/*
6611 	 * Store reference to the cmd used to register for AEN. When an
6612 	 * application wants us to register for AEN, we have to abort this
6613 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
6614 	 */
6615 	instance->aen_cmd = cmd;
6616 
6617 	/*
6618 	 * Issue the aen registration frame
6619 	 */
6620 	instance->instancet->issue_dcmd(instance, cmd);
6621 
6622 	return 0;
6623 }
6624 
6625 /* megasas_get_target_prop - Send DCMD with below details to firmware.
6626  *
6627  * This DCMD will fetch few properties of LD/system PD defined
6628  * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6629  *
6630  * DCMD send by drivers whenever new target is added to the OS.
6631  *
6632  * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
6633  * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
6634  *                       0 = system PD, 1 = LD.
6635  * dcmd.mbox.s[1]      - TargetID for LD/system PD.
6636  * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
6637  *
6638  * @instance:		Adapter soft state
6639  * @sdev:		OS provided scsi device
6640  *
6641  * Returns 0 on success non-zero on failure.
6642  */
6643 int
6644 megasas_get_target_prop(struct megasas_instance *instance,
6645 			struct scsi_device *sdev)
6646 {
6647 	int ret;
6648 	struct megasas_cmd *cmd;
6649 	struct megasas_dcmd_frame *dcmd;
6650 	u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
6651 			sdev->id;
6652 
6653 	cmd = megasas_get_cmd(instance);
6654 
6655 	if (!cmd) {
6656 		dev_err(&instance->pdev->dev,
6657 			"Failed to get cmd %s\n", __func__);
6658 		return -ENOMEM;
6659 	}
6660 
6661 	dcmd = &cmd->frame->dcmd;
6662 
6663 	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6664 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6665 	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6666 
6667 	dcmd->mbox.s[1] = cpu_to_le16(targetId);
6668 	dcmd->cmd = MFI_CMD_DCMD;
6669 	dcmd->cmd_status = 0xFF;
6670 	dcmd->sge_count = 1;
6671 	dcmd->flags = MFI_FRAME_DIR_READ;
6672 	dcmd->timeout = 0;
6673 	dcmd->pad_0 = 0;
6674 	dcmd->data_xfer_len =
6675 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6676 	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6677 
6678 	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6679 				 sizeof(struct MR_TARGET_PROPERTIES));
6680 
6681 	if ((instance->adapter_type != MFI_SERIES) &&
6682 	    !instance->mask_interrupts)
6683 		ret = megasas_issue_blocked_cmd(instance,
6684 						cmd, MFI_IO_TIMEOUT_SECS);
6685 	else
6686 		ret = megasas_issue_polled(instance, cmd);
6687 
6688 	switch (ret) {
6689 	case DCMD_TIMEOUT:
6690 		switch (dcmd_timeout_ocr_possible(instance)) {
6691 		case INITIATE_OCR:
6692 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6693 			mutex_unlock(&instance->reset_mutex);
6694 			megasas_reset_fusion(instance->host,
6695 					     MFI_IO_TIMEOUT_OCR);
6696 			mutex_lock(&instance->reset_mutex);
6697 			break;
6698 		case KILL_ADAPTER:
6699 			megaraid_sas_kill_hba(instance);
6700 			break;
6701 		case IGNORE_TIMEOUT:
6702 			dev_info(&instance->pdev->dev,
6703 				 "Ignore DCMD timeout: %s %d\n",
6704 				 __func__, __LINE__);
6705 			break;
6706 		}
6707 		break;
6708 
6709 	default:
6710 		megasas_return_cmd(instance, cmd);
6711 	}
6712 	if (ret != DCMD_SUCCESS)
6713 		dev_err(&instance->pdev->dev,
6714 			"return from %s %d return value %d\n",
6715 			__func__, __LINE__, ret);
6716 
6717 	return ret;
6718 }
6719 
6720 /**
6721  * megasas_start_aen -	Subscribes to AEN during driver load time
6722  * @instance:		Adapter soft state
6723  */
6724 static int megasas_start_aen(struct megasas_instance *instance)
6725 {
6726 	struct megasas_evt_log_info eli;
6727 	union megasas_evt_class_locale class_locale;
6728 
6729 	/*
6730 	 * Get the latest sequence number from FW
6731 	 */
6732 	memset(&eli, 0, sizeof(eli));
6733 
6734 	if (megasas_get_seq_num(instance, &eli))
6735 		return -1;
6736 
6737 	/*
6738 	 * Register AEN with FW for latest sequence number plus 1
6739 	 */
6740 	class_locale.members.reserved = 0;
6741 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
6742 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
6743 
6744 	return megasas_register_aen(instance,
6745 			le32_to_cpu(eli.newest_seq_num) + 1,
6746 			class_locale.word);
6747 }
6748 
6749 /**
6750  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
6751  * @instance:		Adapter soft state
6752  */
6753 static int megasas_io_attach(struct megasas_instance *instance)
6754 {
6755 	struct Scsi_Host *host = instance->host;
6756 
6757 	/*
6758 	 * Export parameters required by SCSI mid-layer
6759 	 */
6760 	host->unique_id = instance->unique_id;
6761 	host->can_queue = instance->max_scsi_cmds;
6762 	host->this_id = instance->init_id;
6763 	host->sg_tablesize = instance->max_num_sge;
6764 
6765 	if (instance->fw_support_ieee)
6766 		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6767 
6768 	/*
6769 	 * Check if the module parameter value for max_sectors can be used
6770 	 */
6771 	if (max_sectors && max_sectors < instance->max_sectors_per_req)
6772 		instance->max_sectors_per_req = max_sectors;
6773 	else {
6774 		if (max_sectors) {
6775 			if (((instance->pdev->device ==
6776 				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6777 				(instance->pdev->device ==
6778 				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6779 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
6780 				instance->max_sectors_per_req = max_sectors;
6781 			} else {
6782 			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6783 				"and <= %d (or < 1MB for GEN2 controller)\n",
6784 				instance->max_sectors_per_req);
6785 			}
6786 		}
6787 	}
6788 
6789 	host->max_sectors = instance->max_sectors_per_req;
6790 	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6791 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6792 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6793 	host->max_lun = MEGASAS_MAX_LUN;
6794 	host->max_cmd_len = 16;
6795 
6796 	/*
6797 	 * Notify the mid-layer about the new controller
6798 	 */
6799 	if (scsi_add_host(host, &instance->pdev->dev)) {
6800 		dev_err(&instance->pdev->dev,
6801 			"Failed to add host from %s %d\n",
6802 			__func__, __LINE__);
6803 		return -ENODEV;
6804 	}
6805 
6806 	return 0;
6807 }
6808 
6809 /**
6810  * megasas_set_dma_mask -	Set DMA mask for supported controllers
6811  *
6812  * @instance:		Adapter soft state
6813  * Description:
6814  *
6815  * For Ventura, driver/FW will operate in 63bit DMA addresses.
6816  *
6817  * For invader-
6818  *	By default, driver/FW will operate in 32bit DMA addresses
6819  *	for consistent DMA mapping but if 32 bit consistent
6820  *	DMA mask fails, driver will try with 63 bit consistent
6821  *	mask provided FW is true 63bit DMA capable
6822  *
6823  * For older controllers(Thunderbolt and MFI based adapters)-
6824  *	driver/FW will operate in 32 bit consistent DMA addresses.
6825  */
6826 static int
6827 megasas_set_dma_mask(struct megasas_instance *instance)
6828 {
6829 	u64 consistent_mask;
6830 	struct pci_dev *pdev;
6831 	u32 scratch_pad_1;
6832 
6833 	pdev = instance->pdev;
6834 	consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6835 				DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6836 
6837 	if (IS_DMA64) {
6838 		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6839 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6840 			goto fail_set_dma_mask;
6841 
6842 		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6843 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6844 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6845 			/*
6846 			 * If 32 bit DMA mask fails, then try for 64 bit mask
6847 			 * for FW capable of handling 64 bit DMA.
6848 			 */
6849 			scratch_pad_1 = megasas_readl
6850 				(instance, &instance->reg_set->outbound_scratch_pad_1);
6851 
6852 			if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6853 				goto fail_set_dma_mask;
6854 			else if (dma_set_mask_and_coherent(&pdev->dev,
6855 							   DMA_BIT_MASK(63)))
6856 				goto fail_set_dma_mask;
6857 		}
6858 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6859 		goto fail_set_dma_mask;
6860 
6861 	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6862 		instance->consistent_mask_64bit = false;
6863 	else
6864 		instance->consistent_mask_64bit = true;
6865 
6866 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6867 		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6868 		 (instance->consistent_mask_64bit ? "63" : "32"));
6869 
6870 	return 0;
6871 
6872 fail_set_dma_mask:
6873 	dev_err(&pdev->dev, "Failed to set DMA mask\n");
6874 	return -1;
6875 
6876 }
6877 
6878 /*
6879  * megasas_set_adapter_type -	Set adapter type.
6880  *				Supported controllers can be divided in
6881  *				different categories-
6882  *					enum MR_ADAPTER_TYPE {
6883  *						MFI_SERIES = 1,
6884  *						THUNDERBOLT_SERIES = 2,
6885  *						INVADER_SERIES = 3,
6886  *						VENTURA_SERIES = 4,
6887  *						AERO_SERIES = 5,
6888  *					};
6889  * @instance:			Adapter soft state
6890  * return:			void
6891  */
6892 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6893 {
6894 	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6895 	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6896 		instance->adapter_type = MFI_SERIES;
6897 	} else {
6898 		switch (instance->pdev->device) {
6899 		case PCI_DEVICE_ID_LSI_AERO_10E1:
6900 		case PCI_DEVICE_ID_LSI_AERO_10E2:
6901 		case PCI_DEVICE_ID_LSI_AERO_10E5:
6902 		case PCI_DEVICE_ID_LSI_AERO_10E6:
6903 			instance->adapter_type = AERO_SERIES;
6904 			break;
6905 		case PCI_DEVICE_ID_LSI_VENTURA:
6906 		case PCI_DEVICE_ID_LSI_CRUSADER:
6907 		case PCI_DEVICE_ID_LSI_HARPOON:
6908 		case PCI_DEVICE_ID_LSI_TOMCAT:
6909 		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6910 		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6911 			instance->adapter_type = VENTURA_SERIES;
6912 			break;
6913 		case PCI_DEVICE_ID_LSI_FUSION:
6914 		case PCI_DEVICE_ID_LSI_PLASMA:
6915 			instance->adapter_type = THUNDERBOLT_SERIES;
6916 			break;
6917 		case PCI_DEVICE_ID_LSI_INVADER:
6918 		case PCI_DEVICE_ID_LSI_INTRUDER:
6919 		case PCI_DEVICE_ID_LSI_INTRUDER_24:
6920 		case PCI_DEVICE_ID_LSI_CUTLASS_52:
6921 		case PCI_DEVICE_ID_LSI_CUTLASS_53:
6922 		case PCI_DEVICE_ID_LSI_FURY:
6923 			instance->adapter_type = INVADER_SERIES;
6924 			break;
6925 		default: /* For all other supported controllers */
6926 			instance->adapter_type = MFI_SERIES;
6927 			break;
6928 		}
6929 	}
6930 }
6931 
6932 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6933 {
6934 	instance->producer = dma_alloc_coherent(&instance->pdev->dev,
6935 			sizeof(u32), &instance->producer_h, GFP_KERNEL);
6936 	instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
6937 			sizeof(u32), &instance->consumer_h, GFP_KERNEL);
6938 
6939 	if (!instance->producer || !instance->consumer) {
6940 		dev_err(&instance->pdev->dev,
6941 			"Failed to allocate memory for producer, consumer\n");
6942 		return -1;
6943 	}
6944 
6945 	*instance->producer = 0;
6946 	*instance->consumer = 0;
6947 	return 0;
6948 }
6949 
6950 /**
6951  * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
6952  *				structures which are not common across MFI
6953  *				adapters and fusion adapters.
6954  *				For MFI based adapters, allocate producer and
6955  *				consumer buffers. For fusion adapters, allocate
6956  *				memory for fusion context.
6957  * @instance:			Adapter soft state
6958  * return:			0 for SUCCESS
6959  */
6960 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6961 {
6962 	instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
6963 				      GFP_KERNEL);
6964 	if (!instance->reply_map)
6965 		return -ENOMEM;
6966 
6967 	switch (instance->adapter_type) {
6968 	case MFI_SERIES:
6969 		if (megasas_alloc_mfi_ctrl_mem(instance))
6970 			goto fail;
6971 		break;
6972 	case AERO_SERIES:
6973 	case VENTURA_SERIES:
6974 	case THUNDERBOLT_SERIES:
6975 	case INVADER_SERIES:
6976 		if (megasas_alloc_fusion_context(instance))
6977 			goto fail;
6978 		break;
6979 	}
6980 
6981 	return 0;
6982  fail:
6983 	kfree(instance->reply_map);
6984 	instance->reply_map = NULL;
6985 	return -ENOMEM;
6986 }
6987 
6988 /*
6989  * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
6990  *				producer, consumer buffers for MFI adapters
6991  *
6992  * @instance -			Adapter soft instance
6993  *
6994  */
6995 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6996 {
6997 	kfree(instance->reply_map);
6998 	if (instance->adapter_type == MFI_SERIES) {
6999 		if (instance->producer)
7000 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7001 					    instance->producer,
7002 					    instance->producer_h);
7003 		if (instance->consumer)
7004 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7005 					    instance->consumer,
7006 					    instance->consumer_h);
7007 	} else {
7008 		megasas_free_fusion_context(instance);
7009 	}
7010 }
7011 
7012 /**
7013  * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
7014  *					driver load time
7015  *
7016  * @instance-				Adapter soft instance
7017  * @return-				O for SUCCESS
7018  */
7019 static inline
7020 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
7021 {
7022 	struct pci_dev *pdev = instance->pdev;
7023 	struct fusion_context *fusion = instance->ctrl_context;
7024 
7025 	instance->evt_detail = dma_alloc_coherent(&pdev->dev,
7026 			sizeof(struct megasas_evt_detail),
7027 			&instance->evt_detail_h, GFP_KERNEL);
7028 
7029 	if (!instance->evt_detail) {
7030 		dev_err(&instance->pdev->dev,
7031 			"Failed to allocate event detail buffer\n");
7032 		return -ENOMEM;
7033 	}
7034 
7035 	if (fusion) {
7036 		fusion->ioc_init_request =
7037 			dma_alloc_coherent(&pdev->dev,
7038 					   sizeof(struct MPI2_IOC_INIT_REQUEST),
7039 					   &fusion->ioc_init_request_phys,
7040 					   GFP_KERNEL);
7041 
7042 		if (!fusion->ioc_init_request) {
7043 			dev_err(&pdev->dev,
7044 				"Failed to allocate PD list buffer\n");
7045 			return -ENOMEM;
7046 		}
7047 
7048 		instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
7049 				sizeof(struct MR_SNAPDUMP_PROPERTIES),
7050 				&instance->snapdump_prop_h, GFP_KERNEL);
7051 
7052 		if (!instance->snapdump_prop)
7053 			dev_err(&pdev->dev,
7054 				"Failed to allocate snapdump properties buffer\n");
7055 
7056 		instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
7057 							HOST_DEVICE_LIST_SZ,
7058 							&instance->host_device_list_buf_h,
7059 							GFP_KERNEL);
7060 
7061 		if (!instance->host_device_list_buf) {
7062 			dev_err(&pdev->dev,
7063 				"Failed to allocate targetid list buffer\n");
7064 			return -ENOMEM;
7065 		}
7066 
7067 	}
7068 
7069 	instance->pd_list_buf =
7070 		dma_alloc_coherent(&pdev->dev,
7071 				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7072 				     &instance->pd_list_buf_h, GFP_KERNEL);
7073 
7074 	if (!instance->pd_list_buf) {
7075 		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
7076 		return -ENOMEM;
7077 	}
7078 
7079 	instance->ctrl_info_buf =
7080 		dma_alloc_coherent(&pdev->dev,
7081 				     sizeof(struct megasas_ctrl_info),
7082 				     &instance->ctrl_info_buf_h, GFP_KERNEL);
7083 
7084 	if (!instance->ctrl_info_buf) {
7085 		dev_err(&pdev->dev,
7086 			"Failed to allocate controller info buffer\n");
7087 		return -ENOMEM;
7088 	}
7089 
7090 	instance->ld_list_buf =
7091 		dma_alloc_coherent(&pdev->dev,
7092 				     sizeof(struct MR_LD_LIST),
7093 				     &instance->ld_list_buf_h, GFP_KERNEL);
7094 
7095 	if (!instance->ld_list_buf) {
7096 		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
7097 		return -ENOMEM;
7098 	}
7099 
7100 	instance->ld_targetid_list_buf =
7101 		dma_alloc_coherent(&pdev->dev,
7102 				sizeof(struct MR_LD_TARGETID_LIST),
7103 				&instance->ld_targetid_list_buf_h, GFP_KERNEL);
7104 
7105 	if (!instance->ld_targetid_list_buf) {
7106 		dev_err(&pdev->dev,
7107 			"Failed to allocate LD targetid list buffer\n");
7108 		return -ENOMEM;
7109 	}
7110 
7111 	if (!reset_devices) {
7112 		instance->system_info_buf =
7113 			dma_alloc_coherent(&pdev->dev,
7114 					sizeof(struct MR_DRV_SYSTEM_INFO),
7115 					&instance->system_info_h, GFP_KERNEL);
7116 		instance->pd_info =
7117 			dma_alloc_coherent(&pdev->dev,
7118 					sizeof(struct MR_PD_INFO),
7119 					&instance->pd_info_h, GFP_KERNEL);
7120 		instance->tgt_prop =
7121 			dma_alloc_coherent(&pdev->dev,
7122 					sizeof(struct MR_TARGET_PROPERTIES),
7123 					&instance->tgt_prop_h, GFP_KERNEL);
7124 		instance->crash_dump_buf =
7125 			dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7126 					&instance->crash_dump_h, GFP_KERNEL);
7127 
7128 		if (!instance->system_info_buf)
7129 			dev_err(&instance->pdev->dev,
7130 				"Failed to allocate system info buffer\n");
7131 
7132 		if (!instance->pd_info)
7133 			dev_err(&instance->pdev->dev,
7134 				"Failed to allocate pd_info buffer\n");
7135 
7136 		if (!instance->tgt_prop)
7137 			dev_err(&instance->pdev->dev,
7138 				"Failed to allocate tgt_prop buffer\n");
7139 
7140 		if (!instance->crash_dump_buf)
7141 			dev_err(&instance->pdev->dev,
7142 				"Failed to allocate crash dump buffer\n");
7143 	}
7144 
7145 	return 0;
7146 }
7147 
7148 /*
7149  * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
7150  *					during driver load time
7151  *
7152  * @instance-				Adapter soft instance
7153  *
7154  */
7155 static inline
7156 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
7157 {
7158 	struct pci_dev *pdev = instance->pdev;
7159 	struct fusion_context *fusion = instance->ctrl_context;
7160 
7161 	if (instance->evt_detail)
7162 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
7163 				    instance->evt_detail,
7164 				    instance->evt_detail_h);
7165 
7166 	if (fusion && fusion->ioc_init_request)
7167 		dma_free_coherent(&pdev->dev,
7168 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
7169 				  fusion->ioc_init_request,
7170 				  fusion->ioc_init_request_phys);
7171 
7172 	if (instance->pd_list_buf)
7173 		dma_free_coherent(&pdev->dev,
7174 				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7175 				    instance->pd_list_buf,
7176 				    instance->pd_list_buf_h);
7177 
7178 	if (instance->ld_list_buf)
7179 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
7180 				    instance->ld_list_buf,
7181 				    instance->ld_list_buf_h);
7182 
7183 	if (instance->ld_targetid_list_buf)
7184 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
7185 				    instance->ld_targetid_list_buf,
7186 				    instance->ld_targetid_list_buf_h);
7187 
7188 	if (instance->ctrl_info_buf)
7189 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
7190 				    instance->ctrl_info_buf,
7191 				    instance->ctrl_info_buf_h);
7192 
7193 	if (instance->system_info_buf)
7194 		dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
7195 				    instance->system_info_buf,
7196 				    instance->system_info_h);
7197 
7198 	if (instance->pd_info)
7199 		dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
7200 				    instance->pd_info, instance->pd_info_h);
7201 
7202 	if (instance->tgt_prop)
7203 		dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
7204 				    instance->tgt_prop, instance->tgt_prop_h);
7205 
7206 	if (instance->crash_dump_buf)
7207 		dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7208 				    instance->crash_dump_buf,
7209 				    instance->crash_dump_h);
7210 
7211 	if (instance->snapdump_prop)
7212 		dma_free_coherent(&pdev->dev,
7213 				  sizeof(struct MR_SNAPDUMP_PROPERTIES),
7214 				  instance->snapdump_prop,
7215 				  instance->snapdump_prop_h);
7216 
7217 	if (instance->host_device_list_buf)
7218 		dma_free_coherent(&pdev->dev,
7219 				  HOST_DEVICE_LIST_SZ,
7220 				  instance->host_device_list_buf,
7221 				  instance->host_device_list_buf_h);
7222 
7223 }
7224 
7225 /*
7226  * megasas_init_ctrl_params -		Initialize controller's instance
7227  *					parameters before FW init
7228  * @instance -				Adapter soft instance
7229  * @return -				void
7230  */
7231 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
7232 {
7233 	instance->fw_crash_state = UNAVAILABLE;
7234 
7235 	megasas_poll_wait_aen = 0;
7236 	instance->issuepend_done = 1;
7237 	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
7238 
7239 	/*
7240 	 * Initialize locks and queues
7241 	 */
7242 	INIT_LIST_HEAD(&instance->cmd_pool);
7243 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
7244 
7245 	atomic_set(&instance->fw_outstanding, 0);
7246 	atomic64_set(&instance->total_io_count, 0);
7247 
7248 	init_waitqueue_head(&instance->int_cmd_wait_q);
7249 	init_waitqueue_head(&instance->abort_cmd_wait_q);
7250 
7251 	spin_lock_init(&instance->crashdump_lock);
7252 	spin_lock_init(&instance->mfi_pool_lock);
7253 	spin_lock_init(&instance->hba_lock);
7254 	spin_lock_init(&instance->stream_lock);
7255 	spin_lock_init(&instance->completion_lock);
7256 
7257 	mutex_init(&instance->reset_mutex);
7258 
7259 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
7260 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
7261 		instance->flag_ieee = 1;
7262 
7263 	megasas_dbg_lvl = 0;
7264 	instance->flag = 0;
7265 	instance->unload = 1;
7266 	instance->last_time = 0;
7267 	instance->disableOnlineCtrlReset = 1;
7268 	instance->UnevenSpanSupport = 0;
7269 	instance->smp_affinity_enable = smp_affinity_enable ? true : false;
7270 	instance->msix_load_balance = false;
7271 
7272 	if (instance->adapter_type != MFI_SERIES)
7273 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
7274 	else
7275 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
7276 }
7277 
7278 /**
7279  * megasas_probe_one -	PCI hotplug entry point
7280  * @pdev:		PCI device structure
7281  * @id:			PCI ids of supported hotplugged adapter
7282  */
7283 static int megasas_probe_one(struct pci_dev *pdev,
7284 			     const struct pci_device_id *id)
7285 {
7286 	int rval, pos;
7287 	struct Scsi_Host *host;
7288 	struct megasas_instance *instance;
7289 	u16 control = 0;
7290 
7291 	switch (pdev->device) {
7292 	case PCI_DEVICE_ID_LSI_AERO_10E0:
7293 	case PCI_DEVICE_ID_LSI_AERO_10E3:
7294 	case PCI_DEVICE_ID_LSI_AERO_10E4:
7295 	case PCI_DEVICE_ID_LSI_AERO_10E7:
7296 		dev_err(&pdev->dev, "Adapter is in non secure mode\n");
7297 		return 1;
7298 	case PCI_DEVICE_ID_LSI_AERO_10E1:
7299 	case PCI_DEVICE_ID_LSI_AERO_10E5:
7300 		dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
7301 		break;
7302 	}
7303 
7304 	/* Reset MSI-X in the kdump kernel */
7305 	if (reset_devices) {
7306 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
7307 		if (pos) {
7308 			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
7309 					     &control);
7310 			if (control & PCI_MSIX_FLAGS_ENABLE) {
7311 				dev_info(&pdev->dev, "resetting MSI-X\n");
7312 				pci_write_config_word(pdev,
7313 						      pos + PCI_MSIX_FLAGS,
7314 						      control &
7315 						      ~PCI_MSIX_FLAGS_ENABLE);
7316 			}
7317 		}
7318 	}
7319 
7320 	/*
7321 	 * PCI prepping: enable device set bus mastering and dma mask
7322 	 */
7323 	rval = pci_enable_device_mem(pdev);
7324 
7325 	if (rval) {
7326 		return rval;
7327 	}
7328 
7329 	pci_set_master(pdev);
7330 
7331 	host = scsi_host_alloc(&megasas_template,
7332 			       sizeof(struct megasas_instance));
7333 
7334 	if (!host) {
7335 		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
7336 		goto fail_alloc_instance;
7337 	}
7338 
7339 	instance = (struct megasas_instance *)host->hostdata;
7340 	memset(instance, 0, sizeof(*instance));
7341 	atomic_set(&instance->fw_reset_no_pci_access, 0);
7342 
7343 	/*
7344 	 * Initialize PCI related and misc parameters
7345 	 */
7346 	instance->pdev = pdev;
7347 	instance->host = host;
7348 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
7349 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
7350 
7351 	megasas_set_adapter_type(instance);
7352 
7353 	/*
7354 	 * Initialize MFI Firmware
7355 	 */
7356 	if (megasas_init_fw(instance))
7357 		goto fail_init_mfi;
7358 
7359 	if (instance->requestorId) {
7360 		if (instance->PlasmaFW111) {
7361 			instance->vf_affiliation_111 =
7362 				dma_alloc_coherent(&pdev->dev,
7363 					sizeof(struct MR_LD_VF_AFFILIATION_111),
7364 					&instance->vf_affiliation_111_h,
7365 					GFP_KERNEL);
7366 			if (!instance->vf_affiliation_111)
7367 				dev_warn(&pdev->dev, "Can't allocate "
7368 				       "memory for VF affiliation buffer\n");
7369 		} else {
7370 			instance->vf_affiliation =
7371 				dma_alloc_coherent(&pdev->dev,
7372 					(MAX_LOGICAL_DRIVES + 1) *
7373 					sizeof(struct MR_LD_VF_AFFILIATION),
7374 					&instance->vf_affiliation_h,
7375 					GFP_KERNEL);
7376 			if (!instance->vf_affiliation)
7377 				dev_warn(&pdev->dev, "Can't allocate "
7378 				       "memory for VF affiliation buffer\n");
7379 		}
7380 	}
7381 
7382 	/*
7383 	 * Store instance in PCI softstate
7384 	 */
7385 	pci_set_drvdata(pdev, instance);
7386 
7387 	/*
7388 	 * Add this controller to megasas_mgmt_info structure so that it
7389 	 * can be exported to management applications
7390 	 */
7391 	megasas_mgmt_info.count++;
7392 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
7393 	megasas_mgmt_info.max_index++;
7394 
7395 	/*
7396 	 * Register with SCSI mid-layer
7397 	 */
7398 	if (megasas_io_attach(instance))
7399 		goto fail_io_attach;
7400 
7401 	instance->unload = 0;
7402 	/*
7403 	 * Trigger SCSI to scan our drives
7404 	 */
7405 	if (!instance->enable_fw_dev_list ||
7406 	    (instance->host_device_list_buf->count > 0))
7407 		scsi_scan_host(host);
7408 
7409 	/*
7410 	 * Initiate AEN (Asynchronous Event Notification)
7411 	 */
7412 	if (megasas_start_aen(instance)) {
7413 		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
7414 		goto fail_start_aen;
7415 	}
7416 
7417 	megasas_setup_debugfs(instance);
7418 
7419 	/* Get current SR-IOV LD/VF affiliation */
7420 	if (instance->requestorId)
7421 		megasas_get_ld_vf_affiliation(instance, 1);
7422 
7423 	return 0;
7424 
7425 fail_start_aen:
7426 fail_io_attach:
7427 	megasas_mgmt_info.count--;
7428 	megasas_mgmt_info.max_index--;
7429 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
7430 
7431 	instance->instancet->disable_intr(instance);
7432 	megasas_destroy_irqs(instance);
7433 
7434 	if (instance->adapter_type != MFI_SERIES)
7435 		megasas_release_fusion(instance);
7436 	else
7437 		megasas_release_mfi(instance);
7438 	if (instance->msix_vectors)
7439 		pci_free_irq_vectors(instance->pdev);
7440 fail_init_mfi:
7441 	scsi_host_put(host);
7442 fail_alloc_instance:
7443 	pci_disable_device(pdev);
7444 
7445 	return -ENODEV;
7446 }
7447 
7448 /**
7449  * megasas_flush_cache -	Requests FW to flush all its caches
7450  * @instance:			Adapter soft state
7451  */
7452 static void megasas_flush_cache(struct megasas_instance *instance)
7453 {
7454 	struct megasas_cmd *cmd;
7455 	struct megasas_dcmd_frame *dcmd;
7456 
7457 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7458 		return;
7459 
7460 	cmd = megasas_get_cmd(instance);
7461 
7462 	if (!cmd)
7463 		return;
7464 
7465 	dcmd = &cmd->frame->dcmd;
7466 
7467 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7468 
7469 	dcmd->cmd = MFI_CMD_DCMD;
7470 	dcmd->cmd_status = 0x0;
7471 	dcmd->sge_count = 0;
7472 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7473 	dcmd->timeout = 0;
7474 	dcmd->pad_0 = 0;
7475 	dcmd->data_xfer_len = 0;
7476 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
7477 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
7478 
7479 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7480 			!= DCMD_SUCCESS) {
7481 		dev_err(&instance->pdev->dev,
7482 			"return from %s %d\n", __func__, __LINE__);
7483 		return;
7484 	}
7485 
7486 	megasas_return_cmd(instance, cmd);
7487 }
7488 
7489 /**
7490  * megasas_shutdown_controller -	Instructs FW to shutdown the controller
7491  * @instance:				Adapter soft state
7492  * @opcode:				Shutdown/Hibernate
7493  */
7494 static void megasas_shutdown_controller(struct megasas_instance *instance,
7495 					u32 opcode)
7496 {
7497 	struct megasas_cmd *cmd;
7498 	struct megasas_dcmd_frame *dcmd;
7499 
7500 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7501 		return;
7502 
7503 	cmd = megasas_get_cmd(instance);
7504 
7505 	if (!cmd)
7506 		return;
7507 
7508 	if (instance->aen_cmd)
7509 		megasas_issue_blocked_abort_cmd(instance,
7510 			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7511 	if (instance->map_update_cmd)
7512 		megasas_issue_blocked_abort_cmd(instance,
7513 			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7514 	if (instance->jbod_seq_cmd)
7515 		megasas_issue_blocked_abort_cmd(instance,
7516 			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7517 
7518 	dcmd = &cmd->frame->dcmd;
7519 
7520 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7521 
7522 	dcmd->cmd = MFI_CMD_DCMD;
7523 	dcmd->cmd_status = 0x0;
7524 	dcmd->sge_count = 0;
7525 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7526 	dcmd->timeout = 0;
7527 	dcmd->pad_0 = 0;
7528 	dcmd->data_xfer_len = 0;
7529 	dcmd->opcode = cpu_to_le32(opcode);
7530 
7531 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7532 			!= DCMD_SUCCESS) {
7533 		dev_err(&instance->pdev->dev,
7534 			"return from %s %d\n", __func__, __LINE__);
7535 		return;
7536 	}
7537 
7538 	megasas_return_cmd(instance, cmd);
7539 }
7540 
7541 #ifdef CONFIG_PM
7542 /**
7543  * megasas_suspend -	driver suspend entry point
7544  * @pdev:		PCI device structure
7545  * @state:		PCI power state to suspend routine
7546  */
7547 static int
7548 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
7549 {
7550 	struct megasas_instance *instance;
7551 
7552 	instance = pci_get_drvdata(pdev);
7553 
7554 	if (!instance)
7555 		return 0;
7556 
7557 	instance->unload = 1;
7558 
7559 	dev_info(&pdev->dev, "%s is called\n", __func__);
7560 
7561 	/* Shutdown SR-IOV heartbeat timer */
7562 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7563 		del_timer_sync(&instance->sriov_heartbeat_timer);
7564 
7565 	/* Stop the FW fault detection watchdog */
7566 	if (instance->adapter_type != MFI_SERIES)
7567 		megasas_fusion_stop_watchdog(instance);
7568 
7569 	megasas_flush_cache(instance);
7570 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7571 
7572 	/* cancel the delayed work if this work still in queue */
7573 	if (instance->ev != NULL) {
7574 		struct megasas_aen_event *ev = instance->ev;
7575 		cancel_delayed_work_sync(&ev->hotplug_work);
7576 		instance->ev = NULL;
7577 	}
7578 
7579 	tasklet_kill(&instance->isr_tasklet);
7580 
7581 	pci_set_drvdata(instance->pdev, instance);
7582 	instance->instancet->disable_intr(instance);
7583 
7584 	megasas_destroy_irqs(instance);
7585 
7586 	if (instance->msix_vectors)
7587 		pci_free_irq_vectors(instance->pdev);
7588 
7589 	pci_save_state(pdev);
7590 	pci_disable_device(pdev);
7591 
7592 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
7593 
7594 	return 0;
7595 }
7596 
7597 /**
7598  * megasas_resume-      driver resume entry point
7599  * @pdev:               PCI device structure
7600  */
7601 static int
7602 megasas_resume(struct pci_dev *pdev)
7603 {
7604 	int rval;
7605 	struct Scsi_Host *host;
7606 	struct megasas_instance *instance;
7607 	u32 status_reg;
7608 
7609 	instance = pci_get_drvdata(pdev);
7610 
7611 	if (!instance)
7612 		return 0;
7613 
7614 	host = instance->host;
7615 	pci_set_power_state(pdev, PCI_D0);
7616 	pci_enable_wake(pdev, PCI_D0, 0);
7617 	pci_restore_state(pdev);
7618 
7619 	dev_info(&pdev->dev, "%s is called\n", __func__);
7620 	/*
7621 	 * PCI prepping: enable device set bus mastering and dma mask
7622 	 */
7623 	rval = pci_enable_device_mem(pdev);
7624 
7625 	if (rval) {
7626 		dev_err(&pdev->dev, "Enable device failed\n");
7627 		return rval;
7628 	}
7629 
7630 	pci_set_master(pdev);
7631 
7632 	/*
7633 	 * We expect the FW state to be READY
7634 	 */
7635 
7636 	if (megasas_transition_to_ready(instance, 0)) {
7637 		dev_info(&instance->pdev->dev,
7638 			 "Failed to transition controller to ready from %s!\n",
7639 			 __func__);
7640 		if (instance->adapter_type != MFI_SERIES) {
7641 			status_reg =
7642 				instance->instancet->read_fw_status_reg(instance);
7643 			if (!(status_reg & MFI_RESET_ADAPTER) ||
7644 				((megasas_adp_reset_wait_for_ready
7645 				(instance, true, 0)) == FAILED))
7646 				goto fail_ready_state;
7647 		} else {
7648 			atomic_set(&instance->fw_reset_no_pci_access, 1);
7649 			instance->instancet->adp_reset
7650 				(instance, instance->reg_set);
7651 			atomic_set(&instance->fw_reset_no_pci_access, 0);
7652 
7653 			/* waiting for about 30 seconds before retry */
7654 			ssleep(30);
7655 
7656 			if (megasas_transition_to_ready(instance, 0))
7657 				goto fail_ready_state;
7658 		}
7659 
7660 		dev_info(&instance->pdev->dev,
7661 			 "FW restarted successfully from %s!\n",
7662 			 __func__);
7663 	}
7664 	if (megasas_set_dma_mask(instance))
7665 		goto fail_set_dma_mask;
7666 
7667 	/*
7668 	 * Initialize MFI Firmware
7669 	 */
7670 
7671 	atomic_set(&instance->fw_outstanding, 0);
7672 	atomic_set(&instance->ldio_outstanding, 0);
7673 
7674 	/* Now re-enable MSI-X */
7675 	if (instance->msix_vectors)
7676 		megasas_alloc_irq_vectors(instance);
7677 
7678 	if (!instance->msix_vectors) {
7679 		rval = pci_alloc_irq_vectors(instance->pdev, 1, 1,
7680 					     PCI_IRQ_LEGACY);
7681 		if (rval < 0)
7682 			goto fail_reenable_msix;
7683 	}
7684 
7685 	megasas_setup_reply_map(instance);
7686 
7687 	if (instance->adapter_type != MFI_SERIES) {
7688 		megasas_reset_reply_desc(instance);
7689 		if (megasas_ioc_init_fusion(instance)) {
7690 			megasas_free_cmds(instance);
7691 			megasas_free_cmds_fusion(instance);
7692 			goto fail_init_mfi;
7693 		}
7694 		if (!megasas_get_map_info(instance))
7695 			megasas_sync_map_info(instance);
7696 	} else {
7697 		*instance->producer = 0;
7698 		*instance->consumer = 0;
7699 		if (megasas_issue_init_mfi(instance))
7700 			goto fail_init_mfi;
7701 	}
7702 
7703 	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7704 		goto fail_init_mfi;
7705 
7706 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7707 		     (unsigned long)instance);
7708 
7709 	if (instance->msix_vectors ?
7710 			megasas_setup_irqs_msix(instance, 0) :
7711 			megasas_setup_irqs_ioapic(instance))
7712 		goto fail_init_mfi;
7713 
7714 	if (instance->adapter_type != MFI_SERIES)
7715 		megasas_setup_irq_poll(instance);
7716 
7717 	/* Re-launch SR-IOV heartbeat timer */
7718 	if (instance->requestorId) {
7719 		if (!megasas_sriov_start_heartbeat(instance, 0))
7720 			megasas_start_timer(instance);
7721 		else {
7722 			instance->skip_heartbeat_timer_del = 1;
7723 			goto fail_init_mfi;
7724 		}
7725 	}
7726 
7727 	instance->instancet->enable_intr(instance);
7728 	megasas_setup_jbod_map(instance);
7729 	instance->unload = 0;
7730 
7731 	/*
7732 	 * Initiate AEN (Asynchronous Event Notification)
7733 	 */
7734 	if (megasas_start_aen(instance))
7735 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
7736 
7737 	/* Re-launch FW fault watchdog */
7738 	if (instance->adapter_type != MFI_SERIES)
7739 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7740 			goto fail_start_watchdog;
7741 
7742 	return 0;
7743 
7744 fail_start_watchdog:
7745 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7746 		del_timer_sync(&instance->sriov_heartbeat_timer);
7747 fail_init_mfi:
7748 	megasas_free_ctrl_dma_buffers(instance);
7749 	megasas_free_ctrl_mem(instance);
7750 	scsi_host_put(host);
7751 
7752 fail_reenable_msix:
7753 fail_set_dma_mask:
7754 fail_ready_state:
7755 
7756 	pci_disable_device(pdev);
7757 
7758 	return -ENODEV;
7759 }
7760 #else
7761 #define megasas_suspend	NULL
7762 #define megasas_resume	NULL
7763 #endif
7764 
7765 static inline int
7766 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7767 {
7768 	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7769 	int i;
7770 	u8 adp_state;
7771 
7772 	for (i = 0; i < wait_time; i++) {
7773 		adp_state = atomic_read(&instance->adprecovery);
7774 		if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7775 		    (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7776 			break;
7777 
7778 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7779 			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7780 
7781 		msleep(1000);
7782 	}
7783 
7784 	if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7785 		dev_info(&instance->pdev->dev,
7786 			 "%s HBA failed to become operational, adp_state %d\n",
7787 			 __func__, adp_state);
7788 		return 1;
7789 	}
7790 
7791 	return 0;
7792 }
7793 
7794 /**
7795  * megasas_detach_one -	PCI hot"un"plug entry point
7796  * @pdev:		PCI device structure
7797  */
7798 static void megasas_detach_one(struct pci_dev *pdev)
7799 {
7800 	int i;
7801 	struct Scsi_Host *host;
7802 	struct megasas_instance *instance;
7803 	struct fusion_context *fusion;
7804 	u32 pd_seq_map_sz;
7805 
7806 	instance = pci_get_drvdata(pdev);
7807 
7808 	if (!instance)
7809 		return;
7810 
7811 	host = instance->host;
7812 	fusion = instance->ctrl_context;
7813 
7814 	/* Shutdown SR-IOV heartbeat timer */
7815 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7816 		del_timer_sync(&instance->sriov_heartbeat_timer);
7817 
7818 	/* Stop the FW fault detection watchdog */
7819 	if (instance->adapter_type != MFI_SERIES)
7820 		megasas_fusion_stop_watchdog(instance);
7821 
7822 	if (instance->fw_crash_state != UNAVAILABLE)
7823 		megasas_free_host_crash_buffer(instance);
7824 	scsi_remove_host(instance->host);
7825 	instance->unload = 1;
7826 
7827 	if (megasas_wait_for_adapter_operational(instance))
7828 		goto skip_firing_dcmds;
7829 
7830 	megasas_flush_cache(instance);
7831 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7832 
7833 skip_firing_dcmds:
7834 	/* cancel the delayed work if this work still in queue*/
7835 	if (instance->ev != NULL) {
7836 		struct megasas_aen_event *ev = instance->ev;
7837 		cancel_delayed_work_sync(&ev->hotplug_work);
7838 		instance->ev = NULL;
7839 	}
7840 
7841 	/* cancel all wait events */
7842 	wake_up_all(&instance->int_cmd_wait_q);
7843 
7844 	tasklet_kill(&instance->isr_tasklet);
7845 
7846 	/*
7847 	 * Take the instance off the instance array. Note that we will not
7848 	 * decrement the max_index. We let this array be sparse array
7849 	 */
7850 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7851 		if (megasas_mgmt_info.instance[i] == instance) {
7852 			megasas_mgmt_info.count--;
7853 			megasas_mgmt_info.instance[i] = NULL;
7854 
7855 			break;
7856 		}
7857 	}
7858 
7859 	instance->instancet->disable_intr(instance);
7860 
7861 	megasas_destroy_irqs(instance);
7862 
7863 	if (instance->msix_vectors)
7864 		pci_free_irq_vectors(instance->pdev);
7865 
7866 	if (instance->adapter_type >= VENTURA_SERIES) {
7867 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
7868 			kfree(fusion->stream_detect_by_ld[i]);
7869 		kfree(fusion->stream_detect_by_ld);
7870 		fusion->stream_detect_by_ld = NULL;
7871 	}
7872 
7873 
7874 	if (instance->adapter_type != MFI_SERIES) {
7875 		megasas_release_fusion(instance);
7876 			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
7877 				(sizeof(struct MR_PD_CFG_SEQ) *
7878 					(MAX_PHYSICAL_DEVICES - 1));
7879 		for (i = 0; i < 2 ; i++) {
7880 			if (fusion->ld_map[i])
7881 				dma_free_coherent(&instance->pdev->dev,
7882 						  fusion->max_map_sz,
7883 						  fusion->ld_map[i],
7884 						  fusion->ld_map_phys[i]);
7885 			if (fusion->ld_drv_map[i]) {
7886 				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
7887 					vfree(fusion->ld_drv_map[i]);
7888 				else
7889 					free_pages((ulong)fusion->ld_drv_map[i],
7890 						   fusion->drv_map_pages);
7891 			}
7892 
7893 			if (fusion->pd_seq_sync[i])
7894 				dma_free_coherent(&instance->pdev->dev,
7895 					pd_seq_map_sz,
7896 					fusion->pd_seq_sync[i],
7897 					fusion->pd_seq_phys[i]);
7898 		}
7899 	} else {
7900 		megasas_release_mfi(instance);
7901 	}
7902 
7903 	if (instance->vf_affiliation)
7904 		dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
7905 				    sizeof(struct MR_LD_VF_AFFILIATION),
7906 				    instance->vf_affiliation,
7907 				    instance->vf_affiliation_h);
7908 
7909 	if (instance->vf_affiliation_111)
7910 		dma_free_coherent(&pdev->dev,
7911 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
7912 				    instance->vf_affiliation_111,
7913 				    instance->vf_affiliation_111_h);
7914 
7915 	if (instance->hb_host_mem)
7916 		dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
7917 				    instance->hb_host_mem,
7918 				    instance->hb_host_mem_h);
7919 
7920 	megasas_free_ctrl_dma_buffers(instance);
7921 
7922 	megasas_free_ctrl_mem(instance);
7923 
7924 	megasas_destroy_debugfs(instance);
7925 
7926 	scsi_host_put(host);
7927 
7928 	pci_disable_device(pdev);
7929 }
7930 
7931 /**
7932  * megasas_shutdown -	Shutdown entry point
7933  * @device:		Generic device structure
7934  */
7935 static void megasas_shutdown(struct pci_dev *pdev)
7936 {
7937 	struct megasas_instance *instance = pci_get_drvdata(pdev);
7938 
7939 	if (!instance)
7940 		return;
7941 
7942 	instance->unload = 1;
7943 
7944 	if (megasas_wait_for_adapter_operational(instance))
7945 		goto skip_firing_dcmds;
7946 
7947 	megasas_flush_cache(instance);
7948 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7949 
7950 skip_firing_dcmds:
7951 	instance->instancet->disable_intr(instance);
7952 	megasas_destroy_irqs(instance);
7953 
7954 	if (instance->msix_vectors)
7955 		pci_free_irq_vectors(instance->pdev);
7956 }
7957 
7958 /**
7959  * megasas_mgmt_open -	char node "open" entry point
7960  */
7961 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
7962 {
7963 	/*
7964 	 * Allow only those users with admin rights
7965 	 */
7966 	if (!capable(CAP_SYS_ADMIN))
7967 		return -EACCES;
7968 
7969 	return 0;
7970 }
7971 
7972 /**
7973  * megasas_mgmt_fasync -	Async notifier registration from applications
7974  *
7975  * This function adds the calling process to a driver global queue. When an
7976  * event occurs, SIGIO will be sent to all processes in this queue.
7977  */
7978 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
7979 {
7980 	int rc;
7981 
7982 	mutex_lock(&megasas_async_queue_mutex);
7983 
7984 	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
7985 
7986 	mutex_unlock(&megasas_async_queue_mutex);
7987 
7988 	if (rc >= 0) {
7989 		/* For sanity check when we get ioctl */
7990 		filep->private_data = filep;
7991 		return 0;
7992 	}
7993 
7994 	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
7995 
7996 	return rc;
7997 }
7998 
7999 /**
8000  * megasas_mgmt_poll -  char node "poll" entry point
8001  * */
8002 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
8003 {
8004 	__poll_t mask;
8005 	unsigned long flags;
8006 
8007 	poll_wait(file, &megasas_poll_wait, wait);
8008 	spin_lock_irqsave(&poll_aen_lock, flags);
8009 	if (megasas_poll_wait_aen)
8010 		mask = (EPOLLIN | EPOLLRDNORM);
8011 	else
8012 		mask = 0;
8013 	megasas_poll_wait_aen = 0;
8014 	spin_unlock_irqrestore(&poll_aen_lock, flags);
8015 	return mask;
8016 }
8017 
8018 /*
8019  * megasas_set_crash_dump_params_ioctl:
8020  *		Send CRASH_DUMP_MODE DCMD to all controllers
8021  * @cmd:	MFI command frame
8022  */
8023 
8024 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
8025 {
8026 	struct megasas_instance *local_instance;
8027 	int i, error = 0;
8028 	int crash_support;
8029 
8030 	crash_support = cmd->frame->dcmd.mbox.w[0];
8031 
8032 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
8033 		local_instance = megasas_mgmt_info.instance[i];
8034 		if (local_instance && local_instance->crash_dump_drv_support) {
8035 			if ((atomic_read(&local_instance->adprecovery) ==
8036 				MEGASAS_HBA_OPERATIONAL) &&
8037 				!megasas_set_crash_dump_params(local_instance,
8038 					crash_support)) {
8039 				local_instance->crash_dump_app_support =
8040 					crash_support;
8041 				dev_info(&local_instance->pdev->dev,
8042 					"Application firmware crash "
8043 					"dump mode set success\n");
8044 				error = 0;
8045 			} else {
8046 				dev_info(&local_instance->pdev->dev,
8047 					"Application firmware crash "
8048 					"dump mode set failed\n");
8049 				error = -1;
8050 			}
8051 		}
8052 	}
8053 	return error;
8054 }
8055 
8056 /**
8057  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
8058  * @instance:			Adapter soft state
8059  * @argp:			User's ioctl packet
8060  */
8061 static int
8062 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
8063 		      struct megasas_iocpacket __user * user_ioc,
8064 		      struct megasas_iocpacket *ioc)
8065 {
8066 	struct megasas_sge64 *kern_sge64 = NULL;
8067 	struct megasas_sge32 *kern_sge32 = NULL;
8068 	struct megasas_cmd *cmd;
8069 	void *kbuff_arr[MAX_IOCTL_SGE];
8070 	dma_addr_t buf_handle = 0;
8071 	int error = 0, i;
8072 	void *sense = NULL;
8073 	dma_addr_t sense_handle;
8074 	unsigned long *sense_ptr;
8075 	u32 opcode = 0;
8076 	int ret = DCMD_SUCCESS;
8077 
8078 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
8079 
8080 	if (ioc->sge_count > MAX_IOCTL_SGE) {
8081 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
8082 		       ioc->sge_count, MAX_IOCTL_SGE);
8083 		return -EINVAL;
8084 	}
8085 
8086 	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
8087 	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
8088 	    !instance->support_nvme_passthru) ||
8089 	    ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
8090 	    !instance->support_pci_lane_margining)) {
8091 		dev_err(&instance->pdev->dev,
8092 			"Received invalid ioctl command 0x%x\n",
8093 			ioc->frame.hdr.cmd);
8094 		return -ENOTSUPP;
8095 	}
8096 
8097 	cmd = megasas_get_cmd(instance);
8098 	if (!cmd) {
8099 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
8100 		return -ENOMEM;
8101 	}
8102 
8103 	/*
8104 	 * User's IOCTL packet has 2 frames (maximum). Copy those two
8105 	 * frames into our cmd's frames. cmd->frame's context will get
8106 	 * overwritten when we copy from user's frames. So set that value
8107 	 * alone separately
8108 	 */
8109 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
8110 	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
8111 	cmd->frame->hdr.pad_0 = 0;
8112 
8113 	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
8114 
8115 	if (instance->consistent_mask_64bit)
8116 		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
8117 				       MFI_FRAME_SENSE64));
8118 	else
8119 		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
8120 					       MFI_FRAME_SENSE64));
8121 
8122 	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
8123 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
8124 
8125 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
8126 		mutex_lock(&instance->reset_mutex);
8127 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
8128 			megasas_return_cmd(instance, cmd);
8129 			mutex_unlock(&instance->reset_mutex);
8130 			return -1;
8131 		}
8132 		mutex_unlock(&instance->reset_mutex);
8133 	}
8134 
8135 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
8136 		error = megasas_set_crash_dump_params_ioctl(cmd);
8137 		megasas_return_cmd(instance, cmd);
8138 		return error;
8139 	}
8140 
8141 	/*
8142 	 * The management interface between applications and the fw uses
8143 	 * MFI frames. E.g, RAID configuration changes, LD property changes
8144 	 * etc are accomplishes through different kinds of MFI frames. The
8145 	 * driver needs to care only about substituting user buffers with
8146 	 * kernel buffers in SGLs. The location of SGL is embedded in the
8147 	 * struct iocpacket itself.
8148 	 */
8149 	if (instance->consistent_mask_64bit)
8150 		kern_sge64 = (struct megasas_sge64 *)
8151 			((unsigned long)cmd->frame + ioc->sgl_off);
8152 	else
8153 		kern_sge32 = (struct megasas_sge32 *)
8154 			((unsigned long)cmd->frame + ioc->sgl_off);
8155 
8156 	/*
8157 	 * For each user buffer, create a mirror buffer and copy in
8158 	 */
8159 	for (i = 0; i < ioc->sge_count; i++) {
8160 		if (!ioc->sgl[i].iov_len)
8161 			continue;
8162 
8163 		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
8164 						    ioc->sgl[i].iov_len,
8165 						    &buf_handle, GFP_KERNEL);
8166 		if (!kbuff_arr[i]) {
8167 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
8168 			       "kernel SGL buffer for IOCTL\n");
8169 			error = -ENOMEM;
8170 			goto out;
8171 		}
8172 
8173 		/*
8174 		 * We don't change the dma_coherent_mask, so
8175 		 * dma_alloc_coherent only returns 32bit addresses
8176 		 */
8177 		if (instance->consistent_mask_64bit) {
8178 			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
8179 			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8180 		} else {
8181 			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
8182 			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8183 		}
8184 
8185 		/*
8186 		 * We created a kernel buffer corresponding to the
8187 		 * user buffer. Now copy in from the user buffer
8188 		 */
8189 		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
8190 				   (u32) (ioc->sgl[i].iov_len))) {
8191 			error = -EFAULT;
8192 			goto out;
8193 		}
8194 	}
8195 
8196 	if (ioc->sense_len) {
8197 		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
8198 					     &sense_handle, GFP_KERNEL);
8199 		if (!sense) {
8200 			error = -ENOMEM;
8201 			goto out;
8202 		}
8203 
8204 		sense_ptr =
8205 		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
8206 		if (instance->consistent_mask_64bit)
8207 			*sense_ptr = cpu_to_le64(sense_handle);
8208 		else
8209 			*sense_ptr = cpu_to_le32(sense_handle);
8210 	}
8211 
8212 	/*
8213 	 * Set the sync_cmd flag so that the ISR knows not to complete this
8214 	 * cmd to the SCSI mid-layer
8215 	 */
8216 	cmd->sync_cmd = 1;
8217 
8218 	ret = megasas_issue_blocked_cmd(instance, cmd, 0);
8219 	switch (ret) {
8220 	case DCMD_INIT:
8221 	case DCMD_BUSY:
8222 		cmd->sync_cmd = 0;
8223 		dev_err(&instance->pdev->dev,
8224 			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
8225 			 __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
8226 			 cmd->cmd_status_drv);
8227 			error = -EBUSY;
8228 			goto out;
8229 	}
8230 
8231 	cmd->sync_cmd = 0;
8232 
8233 	if (instance->unload == 1) {
8234 		dev_info(&instance->pdev->dev, "Driver unload is in progress "
8235 			"don't submit data to application\n");
8236 		goto out;
8237 	}
8238 	/*
8239 	 * copy out the kernel buffers to user buffers
8240 	 */
8241 	for (i = 0; i < ioc->sge_count; i++) {
8242 		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
8243 				 ioc->sgl[i].iov_len)) {
8244 			error = -EFAULT;
8245 			goto out;
8246 		}
8247 	}
8248 
8249 	/*
8250 	 * copy out the sense
8251 	 */
8252 	if (ioc->sense_len) {
8253 		/*
8254 		 * sense_ptr points to the location that has the user
8255 		 * sense buffer address
8256 		 */
8257 		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
8258 				ioc->sense_off);
8259 
8260 		if (copy_to_user((void __user *)((unsigned long)
8261 				 get_unaligned((unsigned long *)sense_ptr)),
8262 				 sense, ioc->sense_len)) {
8263 			dev_err(&instance->pdev->dev, "Failed to copy out to user "
8264 					"sense data\n");
8265 			error = -EFAULT;
8266 			goto out;
8267 		}
8268 	}
8269 
8270 	/*
8271 	 * copy the status codes returned by the fw
8272 	 */
8273 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
8274 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
8275 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
8276 		error = -EFAULT;
8277 	}
8278 
8279 out:
8280 	if (sense) {
8281 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
8282 				    sense, sense_handle);
8283 	}
8284 
8285 	for (i = 0; i < ioc->sge_count; i++) {
8286 		if (kbuff_arr[i]) {
8287 			if (instance->consistent_mask_64bit)
8288 				dma_free_coherent(&instance->pdev->dev,
8289 					le32_to_cpu(kern_sge64[i].length),
8290 					kbuff_arr[i],
8291 					le64_to_cpu(kern_sge64[i].phys_addr));
8292 			else
8293 				dma_free_coherent(&instance->pdev->dev,
8294 					le32_to_cpu(kern_sge32[i].length),
8295 					kbuff_arr[i],
8296 					le32_to_cpu(kern_sge32[i].phys_addr));
8297 			kbuff_arr[i] = NULL;
8298 		}
8299 	}
8300 
8301 	megasas_return_cmd(instance, cmd);
8302 	return error;
8303 }
8304 
8305 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
8306 {
8307 	struct megasas_iocpacket __user *user_ioc =
8308 	    (struct megasas_iocpacket __user *)arg;
8309 	struct megasas_iocpacket *ioc;
8310 	struct megasas_instance *instance;
8311 	int error;
8312 
8313 	ioc = memdup_user(user_ioc, sizeof(*ioc));
8314 	if (IS_ERR(ioc))
8315 		return PTR_ERR(ioc);
8316 
8317 	instance = megasas_lookup_instance(ioc->host_no);
8318 	if (!instance) {
8319 		error = -ENODEV;
8320 		goto out_kfree_ioc;
8321 	}
8322 
8323 	/* Block ioctls in VF mode */
8324 	if (instance->requestorId && !allow_vf_ioctls) {
8325 		error = -ENODEV;
8326 		goto out_kfree_ioc;
8327 	}
8328 
8329 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8330 		dev_err(&instance->pdev->dev, "Controller in crit error\n");
8331 		error = -ENODEV;
8332 		goto out_kfree_ioc;
8333 	}
8334 
8335 	if (instance->unload == 1) {
8336 		error = -ENODEV;
8337 		goto out_kfree_ioc;
8338 	}
8339 
8340 	if (down_interruptible(&instance->ioctl_sem)) {
8341 		error = -ERESTARTSYS;
8342 		goto out_kfree_ioc;
8343 	}
8344 
8345 	if  (megasas_wait_for_adapter_operational(instance)) {
8346 		error = -ENODEV;
8347 		goto out_up;
8348 	}
8349 
8350 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
8351 out_up:
8352 	up(&instance->ioctl_sem);
8353 
8354 out_kfree_ioc:
8355 	kfree(ioc);
8356 	return error;
8357 }
8358 
8359 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
8360 {
8361 	struct megasas_instance *instance;
8362 	struct megasas_aen aen;
8363 	int error;
8364 
8365 	if (file->private_data != file) {
8366 		printk(KERN_DEBUG "megasas: fasync_helper was not "
8367 		       "called first\n");
8368 		return -EINVAL;
8369 	}
8370 
8371 	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
8372 		return -EFAULT;
8373 
8374 	instance = megasas_lookup_instance(aen.host_no);
8375 
8376 	if (!instance)
8377 		return -ENODEV;
8378 
8379 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8380 		return -ENODEV;
8381 	}
8382 
8383 	if (instance->unload == 1) {
8384 		return -ENODEV;
8385 	}
8386 
8387 	if  (megasas_wait_for_adapter_operational(instance))
8388 		return -ENODEV;
8389 
8390 	mutex_lock(&instance->reset_mutex);
8391 	error = megasas_register_aen(instance, aen.seq_num,
8392 				     aen.class_locale_word);
8393 	mutex_unlock(&instance->reset_mutex);
8394 	return error;
8395 }
8396 
8397 /**
8398  * megasas_mgmt_ioctl -	char node ioctl entry point
8399  */
8400 static long
8401 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8402 {
8403 	switch (cmd) {
8404 	case MEGASAS_IOC_FIRMWARE:
8405 		return megasas_mgmt_ioctl_fw(file, arg);
8406 
8407 	case MEGASAS_IOC_GET_AEN:
8408 		return megasas_mgmt_ioctl_aen(file, arg);
8409 	}
8410 
8411 	return -ENOTTY;
8412 }
8413 
8414 #ifdef CONFIG_COMPAT
8415 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
8416 {
8417 	struct compat_megasas_iocpacket __user *cioc =
8418 	    (struct compat_megasas_iocpacket __user *)arg;
8419 	struct megasas_iocpacket __user *ioc =
8420 	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
8421 	int i;
8422 	int error = 0;
8423 	compat_uptr_t ptr;
8424 	u32 local_sense_off;
8425 	u32 local_sense_len;
8426 	u32 user_sense_off;
8427 
8428 	if (clear_user(ioc, sizeof(*ioc)))
8429 		return -EFAULT;
8430 
8431 	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
8432 	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
8433 	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
8434 	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
8435 	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
8436 	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
8437 		return -EFAULT;
8438 
8439 	/*
8440 	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
8441 	 * sense_len is not null, so prepare the 64bit value under
8442 	 * the same condition.
8443 	 */
8444 	if (get_user(local_sense_off, &ioc->sense_off) ||
8445 		get_user(local_sense_len, &ioc->sense_len) ||
8446 		get_user(user_sense_off, &cioc->sense_off))
8447 		return -EFAULT;
8448 
8449 	if (local_sense_off != user_sense_off)
8450 		return -EINVAL;
8451 
8452 	if (local_sense_len) {
8453 		void __user **sense_ioc_ptr =
8454 			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
8455 		compat_uptr_t *sense_cioc_ptr =
8456 			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
8457 		if (get_user(ptr, sense_cioc_ptr) ||
8458 		    put_user(compat_ptr(ptr), sense_ioc_ptr))
8459 			return -EFAULT;
8460 	}
8461 
8462 	for (i = 0; i < MAX_IOCTL_SGE; i++) {
8463 		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
8464 		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
8465 		    copy_in_user(&ioc->sgl[i].iov_len,
8466 				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
8467 			return -EFAULT;
8468 	}
8469 
8470 	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
8471 
8472 	if (copy_in_user(&cioc->frame.hdr.cmd_status,
8473 			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
8474 		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
8475 		return -EFAULT;
8476 	}
8477 	return error;
8478 }
8479 
8480 static long
8481 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
8482 			  unsigned long arg)
8483 {
8484 	switch (cmd) {
8485 	case MEGASAS_IOC_FIRMWARE32:
8486 		return megasas_mgmt_compat_ioctl_fw(file, arg);
8487 	case MEGASAS_IOC_GET_AEN:
8488 		return megasas_mgmt_ioctl_aen(file, arg);
8489 	}
8490 
8491 	return -ENOTTY;
8492 }
8493 #endif
8494 
8495 /*
8496  * File operations structure for management interface
8497  */
8498 static const struct file_operations megasas_mgmt_fops = {
8499 	.owner = THIS_MODULE,
8500 	.open = megasas_mgmt_open,
8501 	.fasync = megasas_mgmt_fasync,
8502 	.unlocked_ioctl = megasas_mgmt_ioctl,
8503 	.poll = megasas_mgmt_poll,
8504 #ifdef CONFIG_COMPAT
8505 	.compat_ioctl = megasas_mgmt_compat_ioctl,
8506 #endif
8507 	.llseek = noop_llseek,
8508 };
8509 
8510 /*
8511  * PCI hotplug support registration structure
8512  */
8513 static struct pci_driver megasas_pci_driver = {
8514 
8515 	.name = "megaraid_sas",
8516 	.id_table = megasas_pci_table,
8517 	.probe = megasas_probe_one,
8518 	.remove = megasas_detach_one,
8519 	.suspend = megasas_suspend,
8520 	.resume = megasas_resume,
8521 	.shutdown = megasas_shutdown,
8522 };
8523 
8524 /*
8525  * Sysfs driver attributes
8526  */
8527 static ssize_t version_show(struct device_driver *dd, char *buf)
8528 {
8529 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
8530 			MEGASAS_VERSION);
8531 }
8532 static DRIVER_ATTR_RO(version);
8533 
8534 static ssize_t release_date_show(struct device_driver *dd, char *buf)
8535 {
8536 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
8537 		MEGASAS_RELDATE);
8538 }
8539 static DRIVER_ATTR_RO(release_date);
8540 
8541 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
8542 {
8543 	return sprintf(buf, "%u\n", support_poll_for_event);
8544 }
8545 static DRIVER_ATTR_RO(support_poll_for_event);
8546 
8547 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
8548 {
8549 	return sprintf(buf, "%u\n", support_device_change);
8550 }
8551 static DRIVER_ATTR_RO(support_device_change);
8552 
8553 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
8554 {
8555 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
8556 }
8557 
8558 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
8559 			     size_t count)
8560 {
8561 	int retval = count;
8562 
8563 	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8564 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8565 		retval = -EINVAL;
8566 	}
8567 	return retval;
8568 }
8569 static DRIVER_ATTR_RW(dbg_lvl);
8570 
8571 static ssize_t
8572 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8573 {
8574 	return sprintf(buf, "%u\n", support_nvme_encapsulation);
8575 }
8576 
8577 static DRIVER_ATTR_RO(support_nvme_encapsulation);
8578 
8579 static ssize_t
8580 support_pci_lane_margining_show(struct device_driver *dd, char *buf)
8581 {
8582 	return sprintf(buf, "%u\n", support_pci_lane_margining);
8583 }
8584 
8585 static DRIVER_ATTR_RO(support_pci_lane_margining);
8586 
8587 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8588 {
8589 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8590 	scsi_remove_device(sdev);
8591 	scsi_device_put(sdev);
8592 }
8593 
8594 /**
8595  * megasas_update_device_list -	Update the PD and LD device list from FW
8596  *				after an AEN event notification
8597  * @instance:			Adapter soft state
8598  * @event_type:			Indicates type of event (PD or LD event)
8599  *
8600  * @return:			Success or failure
8601  *
8602  * Issue DCMDs to Firmware to update the internal device list in driver.
8603  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8604  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8605  */
8606 static
8607 int megasas_update_device_list(struct megasas_instance *instance,
8608 			       int event_type)
8609 {
8610 	int dcmd_ret = DCMD_SUCCESS;
8611 
8612 	if (instance->enable_fw_dev_list) {
8613 		dcmd_ret = megasas_host_device_list_query(instance, false);
8614 		if (dcmd_ret != DCMD_SUCCESS)
8615 			goto out;
8616 	} else {
8617 		if (event_type & SCAN_PD_CHANNEL) {
8618 			dcmd_ret = megasas_get_pd_list(instance);
8619 
8620 			if (dcmd_ret != DCMD_SUCCESS)
8621 				goto out;
8622 		}
8623 
8624 		if (event_type & SCAN_VD_CHANNEL) {
8625 			if (!instance->requestorId ||
8626 			    (instance->requestorId &&
8627 			     megasas_get_ld_vf_affiliation(instance, 0))) {
8628 				dcmd_ret = megasas_ld_list_query(instance,
8629 						MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8630 				if (dcmd_ret != DCMD_SUCCESS)
8631 					goto out;
8632 			}
8633 		}
8634 	}
8635 
8636 out:
8637 	return dcmd_ret;
8638 }
8639 
8640 /**
8641  * megasas_add_remove_devices -	Add/remove devices to SCSI mid-layer
8642  *				after an AEN event notification
8643  * @instance:			Adapter soft state
8644  * @scan_type:			Indicates type of devices (PD/LD) to add
8645  * @return			void
8646  */
8647 static
8648 void megasas_add_remove_devices(struct megasas_instance *instance,
8649 				int scan_type)
8650 {
8651 	int i, j;
8652 	u16 pd_index = 0;
8653 	u16 ld_index = 0;
8654 	u16 channel = 0, id = 0;
8655 	struct Scsi_Host *host;
8656 	struct scsi_device *sdev1;
8657 	struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8658 	struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8659 
8660 	host = instance->host;
8661 
8662 	if (instance->enable_fw_dev_list) {
8663 		targetid_list = instance->host_device_list_buf;
8664 		for (i = 0; i < targetid_list->count; i++) {
8665 			targetid_entry = &targetid_list->host_device_list[i];
8666 			if (targetid_entry->flags.u.bits.is_sys_pd) {
8667 				channel = le16_to_cpu(targetid_entry->target_id) /
8668 						MEGASAS_MAX_DEV_PER_CHANNEL;
8669 				id = le16_to_cpu(targetid_entry->target_id) %
8670 						MEGASAS_MAX_DEV_PER_CHANNEL;
8671 			} else {
8672 				channel = MEGASAS_MAX_PD_CHANNELS +
8673 					  (le16_to_cpu(targetid_entry->target_id) /
8674 					   MEGASAS_MAX_DEV_PER_CHANNEL);
8675 				id = le16_to_cpu(targetid_entry->target_id) %
8676 						MEGASAS_MAX_DEV_PER_CHANNEL;
8677 			}
8678 			sdev1 = scsi_device_lookup(host, channel, id, 0);
8679 			if (!sdev1) {
8680 				scsi_add_device(host, channel, id, 0);
8681 			} else {
8682 				scsi_device_put(sdev1);
8683 			}
8684 		}
8685 	}
8686 
8687 	if (scan_type & SCAN_PD_CHANNEL) {
8688 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8689 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8690 				pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8691 				sdev1 = scsi_device_lookup(host, i, j, 0);
8692 				if (instance->pd_list[pd_index].driveState ==
8693 							MR_PD_STATE_SYSTEM) {
8694 					if (!sdev1)
8695 						scsi_add_device(host, i, j, 0);
8696 					else
8697 						scsi_device_put(sdev1);
8698 				} else {
8699 					if (sdev1)
8700 						megasas_remove_scsi_device(sdev1);
8701 				}
8702 			}
8703 		}
8704 	}
8705 
8706 	if (scan_type & SCAN_VD_CHANNEL) {
8707 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8708 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8709 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8710 				sdev1 = scsi_device_lookup(host,
8711 						MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8712 				if (instance->ld_ids[ld_index] != 0xff) {
8713 					if (!sdev1)
8714 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8715 					else
8716 						scsi_device_put(sdev1);
8717 				} else {
8718 					if (sdev1)
8719 						megasas_remove_scsi_device(sdev1);
8720 				}
8721 			}
8722 		}
8723 	}
8724 
8725 }
8726 
8727 static void
8728 megasas_aen_polling(struct work_struct *work)
8729 {
8730 	struct megasas_aen_event *ev =
8731 		container_of(work, struct megasas_aen_event, hotplug_work.work);
8732 	struct megasas_instance *instance = ev->instance;
8733 	union megasas_evt_class_locale class_locale;
8734 	int event_type = 0;
8735 	u32 seq_num;
8736 	int error;
8737 	u8  dcmd_ret = DCMD_SUCCESS;
8738 
8739 	if (!instance) {
8740 		printk(KERN_ERR "invalid instance!\n");
8741 		kfree(ev);
8742 		return;
8743 	}
8744 
8745 	/* Don't run the event workqueue thread if OCR is running */
8746 	mutex_lock(&instance->reset_mutex);
8747 
8748 	instance->ev = NULL;
8749 	if (instance->evt_detail) {
8750 		megasas_decode_evt(instance);
8751 
8752 		switch (le32_to_cpu(instance->evt_detail->code)) {
8753 
8754 		case MR_EVT_PD_INSERTED:
8755 		case MR_EVT_PD_REMOVED:
8756 			event_type = SCAN_PD_CHANNEL;
8757 			break;
8758 
8759 		case MR_EVT_LD_OFFLINE:
8760 		case MR_EVT_CFG_CLEARED:
8761 		case MR_EVT_LD_DELETED:
8762 		case MR_EVT_LD_CREATED:
8763 			event_type = SCAN_VD_CHANNEL;
8764 			break;
8765 
8766 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8767 		case MR_EVT_FOREIGN_CFG_IMPORTED:
8768 		case MR_EVT_LD_STATE_CHANGE:
8769 			event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8770 			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8771 				instance->host->host_no);
8772 			break;
8773 
8774 		case MR_EVT_CTRL_PROP_CHANGED:
8775 			dcmd_ret = megasas_get_ctrl_info(instance);
8776 			if (dcmd_ret == DCMD_SUCCESS &&
8777 			    instance->snapdump_wait_time) {
8778 				megasas_get_snapdump_properties(instance);
8779 				dev_info(&instance->pdev->dev,
8780 					 "Snap dump wait time\t: %d\n",
8781 					 instance->snapdump_wait_time);
8782 			}
8783 			break;
8784 		default:
8785 			event_type = 0;
8786 			break;
8787 		}
8788 	} else {
8789 		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8790 		mutex_unlock(&instance->reset_mutex);
8791 		kfree(ev);
8792 		return;
8793 	}
8794 
8795 	if (event_type)
8796 		dcmd_ret = megasas_update_device_list(instance, event_type);
8797 
8798 	mutex_unlock(&instance->reset_mutex);
8799 
8800 	if (event_type && dcmd_ret == DCMD_SUCCESS)
8801 		megasas_add_remove_devices(instance, event_type);
8802 
8803 	if (dcmd_ret == DCMD_SUCCESS)
8804 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8805 	else
8806 		seq_num = instance->last_seq_num;
8807 
8808 	/* Register AEN with FW for latest sequence number plus 1 */
8809 	class_locale.members.reserved = 0;
8810 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
8811 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
8812 
8813 	if (instance->aen_cmd != NULL) {
8814 		kfree(ev);
8815 		return;
8816 	}
8817 
8818 	mutex_lock(&instance->reset_mutex);
8819 	error = megasas_register_aen(instance, seq_num,
8820 					class_locale.word);
8821 	if (error)
8822 		dev_err(&instance->pdev->dev,
8823 			"register aen failed error %x\n", error);
8824 
8825 	mutex_unlock(&instance->reset_mutex);
8826 	kfree(ev);
8827 }
8828 
8829 /**
8830  * megasas_init - Driver load entry point
8831  */
8832 static int __init megasas_init(void)
8833 {
8834 	int rval;
8835 
8836 	/*
8837 	 * Booted in kdump kernel, minimize memory footprints by
8838 	 * disabling few features
8839 	 */
8840 	if (reset_devices) {
8841 		msix_vectors = 1;
8842 		rdpq_enable = 0;
8843 		dual_qdepth_disable = 1;
8844 	}
8845 
8846 	/*
8847 	 * Announce driver version and other information
8848 	 */
8849 	pr_info("megasas: %s\n", MEGASAS_VERSION);
8850 
8851 	spin_lock_init(&poll_aen_lock);
8852 
8853 	support_poll_for_event = 2;
8854 	support_device_change = 1;
8855 	support_nvme_encapsulation = true;
8856 	support_pci_lane_margining = true;
8857 
8858 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
8859 
8860 	/*
8861 	 * Register character device node
8862 	 */
8863 	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
8864 
8865 	if (rval < 0) {
8866 		printk(KERN_DEBUG "megasas: failed to open device node\n");
8867 		return rval;
8868 	}
8869 
8870 	megasas_mgmt_majorno = rval;
8871 
8872 	megasas_init_debugfs();
8873 
8874 	/*
8875 	 * Register ourselves as PCI hotplug module
8876 	 */
8877 	rval = pci_register_driver(&megasas_pci_driver);
8878 
8879 	if (rval) {
8880 		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
8881 		goto err_pcidrv;
8882 	}
8883 
8884 	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
8885 	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
8886 		pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
8887 		event_log_level = MFI_EVT_CLASS_CRITICAL;
8888 	}
8889 
8890 	rval = driver_create_file(&megasas_pci_driver.driver,
8891 				  &driver_attr_version);
8892 	if (rval)
8893 		goto err_dcf_attr_ver;
8894 
8895 	rval = driver_create_file(&megasas_pci_driver.driver,
8896 				  &driver_attr_release_date);
8897 	if (rval)
8898 		goto err_dcf_rel_date;
8899 
8900 	rval = driver_create_file(&megasas_pci_driver.driver,
8901 				&driver_attr_support_poll_for_event);
8902 	if (rval)
8903 		goto err_dcf_support_poll_for_event;
8904 
8905 	rval = driver_create_file(&megasas_pci_driver.driver,
8906 				  &driver_attr_dbg_lvl);
8907 	if (rval)
8908 		goto err_dcf_dbg_lvl;
8909 	rval = driver_create_file(&megasas_pci_driver.driver,
8910 				&driver_attr_support_device_change);
8911 	if (rval)
8912 		goto err_dcf_support_device_change;
8913 
8914 	rval = driver_create_file(&megasas_pci_driver.driver,
8915 				  &driver_attr_support_nvme_encapsulation);
8916 	if (rval)
8917 		goto err_dcf_support_nvme_encapsulation;
8918 
8919 	rval = driver_create_file(&megasas_pci_driver.driver,
8920 				  &driver_attr_support_pci_lane_margining);
8921 	if (rval)
8922 		goto err_dcf_support_pci_lane_margining;
8923 
8924 	return rval;
8925 
8926 err_dcf_support_pci_lane_margining:
8927 	driver_remove_file(&megasas_pci_driver.driver,
8928 			   &driver_attr_support_nvme_encapsulation);
8929 
8930 err_dcf_support_nvme_encapsulation:
8931 	driver_remove_file(&megasas_pci_driver.driver,
8932 			   &driver_attr_support_device_change);
8933 
8934 err_dcf_support_device_change:
8935 	driver_remove_file(&megasas_pci_driver.driver,
8936 			   &driver_attr_dbg_lvl);
8937 err_dcf_dbg_lvl:
8938 	driver_remove_file(&megasas_pci_driver.driver,
8939 			&driver_attr_support_poll_for_event);
8940 err_dcf_support_poll_for_event:
8941 	driver_remove_file(&megasas_pci_driver.driver,
8942 			   &driver_attr_release_date);
8943 err_dcf_rel_date:
8944 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8945 err_dcf_attr_ver:
8946 	pci_unregister_driver(&megasas_pci_driver);
8947 err_pcidrv:
8948 	megasas_exit_debugfs();
8949 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8950 	return rval;
8951 }
8952 
8953 /**
8954  * megasas_exit - Driver unload entry point
8955  */
8956 static void __exit megasas_exit(void)
8957 {
8958 	driver_remove_file(&megasas_pci_driver.driver,
8959 			   &driver_attr_dbg_lvl);
8960 	driver_remove_file(&megasas_pci_driver.driver,
8961 			&driver_attr_support_poll_for_event);
8962 	driver_remove_file(&megasas_pci_driver.driver,
8963 			&driver_attr_support_device_change);
8964 	driver_remove_file(&megasas_pci_driver.driver,
8965 			   &driver_attr_release_date);
8966 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8967 	driver_remove_file(&megasas_pci_driver.driver,
8968 			   &driver_attr_support_nvme_encapsulation);
8969 	driver_remove_file(&megasas_pci_driver.driver,
8970 			   &driver_attr_support_pci_lane_margining);
8971 
8972 	pci_unregister_driver(&megasas_pci_driver);
8973 	megasas_exit_debugfs();
8974 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8975 }
8976 
8977 module_init(megasas_init);
8978 module_exit(megasas_exit);
8979