1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Linux MegaRAID driver for SAS based RAID controllers
4  *
5  *  Copyright (c) 2003-2013  LSI Corporation
6  *  Copyright (c) 2013-2016  Avago Technologies
7  *  Copyright (c) 2016-2018  Broadcom Inc.
8  *
9  *  Authors: Broadcom Inc.
10  *           Sreenivas Bagalkote
11  *           Sumant Patro
12  *           Bo Yang
13  *           Adam Radford
14  *           Kashyap Desai <kashyap.desai@broadcom.com>
15  *           Sumit Saxena <sumit.saxena@broadcom.com>
16  *
17  *  Send feedback to: megaraidlinux.pdl@broadcom.com
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/uio.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <asm/unaligned.h>
33 #include <linux/fs.h>
34 #include <linux/compat.h>
35 #include <linux/blkdev.h>
36 #include <linux/mutex.h>
37 #include <linux/poll.h>
38 #include <linux/vmalloc.h>
39 #include <linux/irq_poll.h>
40 
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <scsi/scsi_dbg.h>
47 #include "megaraid_sas_fusion.h"
48 #include "megaraid_sas.h"
49 
50 /*
51  * Number of sectors per IO command
52  * Will be set in megasas_init_mfi if user does not provide
53  */
54 static unsigned int max_sectors;
55 module_param_named(max_sectors, max_sectors, int, 0444);
56 MODULE_PARM_DESC(max_sectors,
57 	"Maximum number of sectors per IO command");
58 
59 static int msix_disable;
60 module_param(msix_disable, int, 0444);
61 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
62 
63 static unsigned int msix_vectors;
64 module_param(msix_vectors, int, 0444);
65 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
66 
67 static int allow_vf_ioctls;
68 module_param(allow_vf_ioctls, int, 0444);
69 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
70 
71 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
72 module_param(throttlequeuedepth, int, 0444);
73 MODULE_PARM_DESC(throttlequeuedepth,
74 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
75 
76 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
77 module_param(resetwaittime, int, 0444);
78 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
79 
80 int smp_affinity_enable = 1;
81 module_param(smp_affinity_enable, int, 0444);
82 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
83 
84 int rdpq_enable = 1;
85 module_param(rdpq_enable, int, 0444);
86 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
87 
88 unsigned int dual_qdepth_disable;
89 module_param(dual_qdepth_disable, int, 0444);
90 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
91 
92 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
93 module_param(scmd_timeout, int, 0444);
94 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
95 
96 int perf_mode = -1;
97 module_param(perf_mode, int, 0444);
98 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
99 		"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
100 		"interrupt coalescing is enabled only on high iops queues\n\t\t"
101 		"1 - iops: High iops queues are not allocated &\n\t\t"
102 		"interrupt coalescing is enabled on all queues\n\t\t"
103 		"2 - latency: High iops queues are not allocated &\n\t\t"
104 		"interrupt coalescing is disabled on all queues\n\t\t"
105 		"default mode is 'balanced'"
106 		);
107 
108 int event_log_level = MFI_EVT_CLASS_CRITICAL;
109 module_param(event_log_level, int, 0644);
110 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
111 
112 unsigned int enable_sdev_max_qd;
113 module_param(enable_sdev_max_qd, int, 0444);
114 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
115 
116 MODULE_LICENSE("GPL");
117 MODULE_VERSION(MEGASAS_VERSION);
118 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
119 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
120 
121 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
122 static int megasas_get_pd_list(struct megasas_instance *instance);
123 static int megasas_ld_list_query(struct megasas_instance *instance,
124 				 u8 query_type);
125 static int megasas_issue_init_mfi(struct megasas_instance *instance);
126 static int megasas_register_aen(struct megasas_instance *instance,
127 				u32 seq_num, u32 class_locale_word);
128 static void megasas_get_pd_info(struct megasas_instance *instance,
129 				struct scsi_device *sdev);
130 
131 /*
132  * PCI ID table for all supported controllers
133  */
134 static struct pci_device_id megasas_pci_table[] = {
135 
136 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
137 	/* xscale IOP */
138 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
139 	/* ppc IOP */
140 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
141 	/* ppc IOP */
142 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
143 	/* gen2*/
144 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
145 	/* gen2*/
146 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
147 	/* skinny*/
148 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
149 	/* skinny*/
150 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
151 	/* xscale IOP, vega */
152 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
153 	/* xscale IOP */
154 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
155 	/* Fusion */
156 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
157 	/* Plasma */
158 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
159 	/* Invader */
160 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
161 	/* Fury */
162 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
163 	/* Intruder */
164 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
165 	/* Intruder 24 port*/
166 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
167 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
168 	/* VENTURA */
169 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
170 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
171 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
172 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
173 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
174 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
175 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
176 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
177 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
178 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
179 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
180 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
181 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
182 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
183 	{}
184 };
185 
186 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
187 
188 static int megasas_mgmt_majorno;
189 struct megasas_mgmt_info megasas_mgmt_info;
190 static struct fasync_struct *megasas_async_queue;
191 static DEFINE_MUTEX(megasas_async_queue_mutex);
192 
193 static int megasas_poll_wait_aen;
194 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
195 static u32 support_poll_for_event;
196 u32 megasas_dbg_lvl;
197 static u32 support_device_change;
198 static bool support_nvme_encapsulation;
199 static bool support_pci_lane_margining;
200 
201 /* define lock for aen poll */
202 spinlock_t poll_aen_lock;
203 
204 extern struct dentry *megasas_debugfs_root;
205 extern void megasas_init_debugfs(void);
206 extern void megasas_exit_debugfs(void);
207 extern void megasas_setup_debugfs(struct megasas_instance *instance);
208 extern void megasas_destroy_debugfs(struct megasas_instance *instance);
209 
210 void
211 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
212 		     u8 alt_status);
213 static u32
214 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
215 static int
216 megasas_adp_reset_gen2(struct megasas_instance *instance,
217 		       struct megasas_register_set __iomem *reg_set);
218 static irqreturn_t megasas_isr(int irq, void *devp);
219 static u32
220 megasas_init_adapter_mfi(struct megasas_instance *instance);
221 u32
222 megasas_build_and_issue_cmd(struct megasas_instance *instance,
223 			    struct scsi_cmnd *scmd);
224 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
225 int
226 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
227 	int seconds);
228 void megasas_fusion_ocr_wq(struct work_struct *work);
229 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
230 					 int initial);
231 static int
232 megasas_set_dma_mask(struct megasas_instance *instance);
233 static int
234 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
235 static inline void
236 megasas_free_ctrl_mem(struct megasas_instance *instance);
237 static inline int
238 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
239 static inline void
240 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
241 static inline void
242 megasas_init_ctrl_params(struct megasas_instance *instance);
243 
244 u32 megasas_readl(struct megasas_instance *instance,
245 		  const volatile void __iomem *addr)
246 {
247 	u32 i = 0, ret_val;
248 	/*
249 	 * Due to a HW errata in Aero controllers, reads to certain
250 	 * Fusion registers could intermittently return all zeroes.
251 	 * This behavior is transient in nature and subsequent reads will
252 	 * return valid value. As a workaround in driver, retry readl for
253 	 * upto three times until a non-zero value is read.
254 	 */
255 	if (instance->adapter_type == AERO_SERIES) {
256 		do {
257 			ret_val = readl(addr);
258 			i++;
259 		} while (ret_val == 0 && i < 3);
260 		return ret_val;
261 	} else {
262 		return readl(addr);
263 	}
264 }
265 
266 /**
267  * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
268  * @instance:			Adapter soft state
269  * @dcmd:			DCMD frame inside MFI command
270  * @dma_addr:			DMA address of buffer to be passed to FW
271  * @dma_len:			Length of DMA buffer to be passed to FW
272  * @return:			void
273  */
274 void megasas_set_dma_settings(struct megasas_instance *instance,
275 			      struct megasas_dcmd_frame *dcmd,
276 			      dma_addr_t dma_addr, u32 dma_len)
277 {
278 	if (instance->consistent_mask_64bit) {
279 		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
280 		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
281 		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
282 
283 	} else {
284 		dcmd->sgl.sge32[0].phys_addr =
285 				cpu_to_le32(lower_32_bits(dma_addr));
286 		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
287 		dcmd->flags = cpu_to_le16(dcmd->flags);
288 	}
289 }
290 
291 static void
292 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
293 {
294 	instance->instancet->fire_cmd(instance,
295 		cmd->frame_phys_addr, 0, instance->reg_set);
296 	return;
297 }
298 
299 /**
300  * megasas_get_cmd -	Get a command from the free pool
301  * @instance:		Adapter soft state
302  *
303  * Returns a free command from the pool
304  */
305 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
306 						  *instance)
307 {
308 	unsigned long flags;
309 	struct megasas_cmd *cmd = NULL;
310 
311 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
312 
313 	if (!list_empty(&instance->cmd_pool)) {
314 		cmd = list_entry((&instance->cmd_pool)->next,
315 				 struct megasas_cmd, list);
316 		list_del_init(&cmd->list);
317 	} else {
318 		dev_err(&instance->pdev->dev, "Command pool empty!\n");
319 	}
320 
321 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
322 	return cmd;
323 }
324 
325 /**
326  * megasas_return_cmd -	Return a cmd to free command pool
327  * @instance:		Adapter soft state
328  * @cmd:		Command packet to be returned to free command pool
329  */
330 void
331 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
332 {
333 	unsigned long flags;
334 	u32 blk_tags;
335 	struct megasas_cmd_fusion *cmd_fusion;
336 	struct fusion_context *fusion = instance->ctrl_context;
337 
338 	/* This flag is used only for fusion adapter.
339 	 * Wait for Interrupt for Polled mode DCMD
340 	 */
341 	if (cmd->flags & DRV_DCMD_POLLED_MODE)
342 		return;
343 
344 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
345 
346 	if (fusion) {
347 		blk_tags = instance->max_scsi_cmds + cmd->index;
348 		cmd_fusion = fusion->cmd_list[blk_tags];
349 		megasas_return_cmd_fusion(instance, cmd_fusion);
350 	}
351 	cmd->scmd = NULL;
352 	cmd->frame_count = 0;
353 	cmd->flags = 0;
354 	memset(cmd->frame, 0, instance->mfi_frame_size);
355 	cmd->frame->io.context = cpu_to_le32(cmd->index);
356 	if (!fusion && reset_devices)
357 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
358 	list_add(&cmd->list, (&instance->cmd_pool)->next);
359 
360 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
361 
362 }
363 
364 static const char *
365 format_timestamp(uint32_t timestamp)
366 {
367 	static char buffer[32];
368 
369 	if ((timestamp & 0xff000000) == 0xff000000)
370 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
371 		0x00ffffff);
372 	else
373 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
374 	return buffer;
375 }
376 
377 static const char *
378 format_class(int8_t class)
379 {
380 	static char buffer[6];
381 
382 	switch (class) {
383 	case MFI_EVT_CLASS_DEBUG:
384 		return "debug";
385 	case MFI_EVT_CLASS_PROGRESS:
386 		return "progress";
387 	case MFI_EVT_CLASS_INFO:
388 		return "info";
389 	case MFI_EVT_CLASS_WARNING:
390 		return "WARN";
391 	case MFI_EVT_CLASS_CRITICAL:
392 		return "CRIT";
393 	case MFI_EVT_CLASS_FATAL:
394 		return "FATAL";
395 	case MFI_EVT_CLASS_DEAD:
396 		return "DEAD";
397 	default:
398 		snprintf(buffer, sizeof(buffer), "%d", class);
399 		return buffer;
400 	}
401 }
402 
403 /**
404   * megasas_decode_evt: Decode FW AEN event and print critical event
405   * for information.
406   * @instance:			Adapter soft state
407   */
408 static void
409 megasas_decode_evt(struct megasas_instance *instance)
410 {
411 	struct megasas_evt_detail *evt_detail = instance->evt_detail;
412 	union megasas_evt_class_locale class_locale;
413 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
414 
415 	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
416 	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
417 		printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
418 		event_log_level = MFI_EVT_CLASS_CRITICAL;
419 	}
420 
421 	if (class_locale.members.class >= event_log_level)
422 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
423 			le32_to_cpu(evt_detail->seq_num),
424 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
425 			(class_locale.members.locale),
426 			format_class(class_locale.members.class),
427 			evt_detail->description);
428 }
429 
430 /**
431 *	The following functions are defined for xscale
432 *	(deviceid : 1064R, PERC5) controllers
433 */
434 
435 /**
436  * megasas_enable_intr_xscale -	Enables interrupts
437  * @regs:			MFI register set
438  */
439 static inline void
440 megasas_enable_intr_xscale(struct megasas_instance *instance)
441 {
442 	struct megasas_register_set __iomem *regs;
443 
444 	regs = instance->reg_set;
445 	writel(0, &(regs)->outbound_intr_mask);
446 
447 	/* Dummy readl to force pci flush */
448 	readl(&regs->outbound_intr_mask);
449 }
450 
451 /**
452  * megasas_disable_intr_xscale -Disables interrupt
453  * @regs:			MFI register set
454  */
455 static inline void
456 megasas_disable_intr_xscale(struct megasas_instance *instance)
457 {
458 	struct megasas_register_set __iomem *regs;
459 	u32 mask = 0x1f;
460 
461 	regs = instance->reg_set;
462 	writel(mask, &regs->outbound_intr_mask);
463 	/* Dummy readl to force pci flush */
464 	readl(&regs->outbound_intr_mask);
465 }
466 
467 /**
468  * megasas_read_fw_status_reg_xscale - returns the current FW status value
469  * @regs:			MFI register set
470  */
471 static u32
472 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
473 {
474 	return readl(&instance->reg_set->outbound_msg_0);
475 }
476 /**
477  * megasas_clear_interrupt_xscale -	Check & clear interrupt
478  * @regs:				MFI register set
479  */
480 static int
481 megasas_clear_intr_xscale(struct megasas_instance *instance)
482 {
483 	u32 status;
484 	u32 mfiStatus = 0;
485 	struct megasas_register_set __iomem *regs;
486 	regs = instance->reg_set;
487 
488 	/*
489 	 * Check if it is our interrupt
490 	 */
491 	status = readl(&regs->outbound_intr_status);
492 
493 	if (status & MFI_OB_INTR_STATUS_MASK)
494 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
495 	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
496 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
497 
498 	/*
499 	 * Clear the interrupt by writing back the same value
500 	 */
501 	if (mfiStatus)
502 		writel(status, &regs->outbound_intr_status);
503 
504 	/* Dummy readl to force pci flush */
505 	readl(&regs->outbound_intr_status);
506 
507 	return mfiStatus;
508 }
509 
510 /**
511  * megasas_fire_cmd_xscale -	Sends command to the FW
512  * @frame_phys_addr :		Physical address of cmd
513  * @frame_count :		Number of frames for the command
514  * @regs :			MFI register set
515  */
516 static inline void
517 megasas_fire_cmd_xscale(struct megasas_instance *instance,
518 		dma_addr_t frame_phys_addr,
519 		u32 frame_count,
520 		struct megasas_register_set __iomem *regs)
521 {
522 	unsigned long flags;
523 
524 	spin_lock_irqsave(&instance->hba_lock, flags);
525 	writel((frame_phys_addr >> 3)|(frame_count),
526 	       &(regs)->inbound_queue_port);
527 	spin_unlock_irqrestore(&instance->hba_lock, flags);
528 }
529 
530 /**
531  * megasas_adp_reset_xscale -  For controller reset
532  * @regs:                              MFI register set
533  */
534 static int
535 megasas_adp_reset_xscale(struct megasas_instance *instance,
536 	struct megasas_register_set __iomem *regs)
537 {
538 	u32 i;
539 	u32 pcidata;
540 
541 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
542 
543 	for (i = 0; i < 3; i++)
544 		msleep(1000); /* sleep for 3 secs */
545 	pcidata  = 0;
546 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
547 	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
548 	if (pcidata & 0x2) {
549 		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
550 		pcidata &= ~0x2;
551 		pci_write_config_dword(instance->pdev,
552 				MFI_1068_PCSR_OFFSET, pcidata);
553 
554 		for (i = 0; i < 2; i++)
555 			msleep(1000); /* need to wait 2 secs again */
556 
557 		pcidata  = 0;
558 		pci_read_config_dword(instance->pdev,
559 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
560 		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
561 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
562 			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
563 			pcidata = 0;
564 			pci_write_config_dword(instance->pdev,
565 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
566 		}
567 	}
568 	return 0;
569 }
570 
571 /**
572  * megasas_check_reset_xscale -	For controller reset check
573  * @regs:				MFI register set
574  */
575 static int
576 megasas_check_reset_xscale(struct megasas_instance *instance,
577 		struct megasas_register_set __iomem *regs)
578 {
579 	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
580 	    (le32_to_cpu(*instance->consumer) ==
581 		MEGASAS_ADPRESET_INPROG_SIGN))
582 		return 1;
583 	return 0;
584 }
585 
586 static struct megasas_instance_template megasas_instance_template_xscale = {
587 
588 	.fire_cmd = megasas_fire_cmd_xscale,
589 	.enable_intr = megasas_enable_intr_xscale,
590 	.disable_intr = megasas_disable_intr_xscale,
591 	.clear_intr = megasas_clear_intr_xscale,
592 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
593 	.adp_reset = megasas_adp_reset_xscale,
594 	.check_reset = megasas_check_reset_xscale,
595 	.service_isr = megasas_isr,
596 	.tasklet = megasas_complete_cmd_dpc,
597 	.init_adapter = megasas_init_adapter_mfi,
598 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
599 	.issue_dcmd = megasas_issue_dcmd,
600 };
601 
602 /**
603 *	This is the end of set of functions & definitions specific
604 *	to xscale (deviceid : 1064R, PERC5) controllers
605 */
606 
607 /**
608 *	The following functions are defined for ppc (deviceid : 0x60)
609 *	controllers
610 */
611 
612 /**
613  * megasas_enable_intr_ppc -	Enables interrupts
614  * @regs:			MFI register set
615  */
616 static inline void
617 megasas_enable_intr_ppc(struct megasas_instance *instance)
618 {
619 	struct megasas_register_set __iomem *regs;
620 
621 	regs = instance->reg_set;
622 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
623 
624 	writel(~0x80000000, &(regs)->outbound_intr_mask);
625 
626 	/* Dummy readl to force pci flush */
627 	readl(&regs->outbound_intr_mask);
628 }
629 
630 /**
631  * megasas_disable_intr_ppc -	Disable interrupt
632  * @regs:			MFI register set
633  */
634 static inline void
635 megasas_disable_intr_ppc(struct megasas_instance *instance)
636 {
637 	struct megasas_register_set __iomem *regs;
638 	u32 mask = 0xFFFFFFFF;
639 
640 	regs = instance->reg_set;
641 	writel(mask, &regs->outbound_intr_mask);
642 	/* Dummy readl to force pci flush */
643 	readl(&regs->outbound_intr_mask);
644 }
645 
646 /**
647  * megasas_read_fw_status_reg_ppc - returns the current FW status value
648  * @regs:			MFI register set
649  */
650 static u32
651 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
652 {
653 	return readl(&instance->reg_set->outbound_scratch_pad_0);
654 }
655 
656 /**
657  * megasas_clear_interrupt_ppc -	Check & clear interrupt
658  * @regs:				MFI register set
659  */
660 static int
661 megasas_clear_intr_ppc(struct megasas_instance *instance)
662 {
663 	u32 status, mfiStatus = 0;
664 	struct megasas_register_set __iomem *regs;
665 	regs = instance->reg_set;
666 
667 	/*
668 	 * Check if it is our interrupt
669 	 */
670 	status = readl(&regs->outbound_intr_status);
671 
672 	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
673 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
674 
675 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
676 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
677 
678 	/*
679 	 * Clear the interrupt by writing back the same value
680 	 */
681 	writel(status, &regs->outbound_doorbell_clear);
682 
683 	/* Dummy readl to force pci flush */
684 	readl(&regs->outbound_doorbell_clear);
685 
686 	return mfiStatus;
687 }
688 
689 /**
690  * megasas_fire_cmd_ppc -	Sends command to the FW
691  * @frame_phys_addr :		Physical address of cmd
692  * @frame_count :		Number of frames for the command
693  * @regs :			MFI register set
694  */
695 static inline void
696 megasas_fire_cmd_ppc(struct megasas_instance *instance,
697 		dma_addr_t frame_phys_addr,
698 		u32 frame_count,
699 		struct megasas_register_set __iomem *regs)
700 {
701 	unsigned long flags;
702 
703 	spin_lock_irqsave(&instance->hba_lock, flags);
704 	writel((frame_phys_addr | (frame_count<<1))|1,
705 			&(regs)->inbound_queue_port);
706 	spin_unlock_irqrestore(&instance->hba_lock, flags);
707 }
708 
709 /**
710  * megasas_check_reset_ppc -	For controller reset check
711  * @regs:				MFI register set
712  */
713 static int
714 megasas_check_reset_ppc(struct megasas_instance *instance,
715 			struct megasas_register_set __iomem *regs)
716 {
717 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
718 		return 1;
719 
720 	return 0;
721 }
722 
723 static struct megasas_instance_template megasas_instance_template_ppc = {
724 
725 	.fire_cmd = megasas_fire_cmd_ppc,
726 	.enable_intr = megasas_enable_intr_ppc,
727 	.disable_intr = megasas_disable_intr_ppc,
728 	.clear_intr = megasas_clear_intr_ppc,
729 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
730 	.adp_reset = megasas_adp_reset_xscale,
731 	.check_reset = megasas_check_reset_ppc,
732 	.service_isr = megasas_isr,
733 	.tasklet = megasas_complete_cmd_dpc,
734 	.init_adapter = megasas_init_adapter_mfi,
735 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
736 	.issue_dcmd = megasas_issue_dcmd,
737 };
738 
739 /**
740  * megasas_enable_intr_skinny -	Enables interrupts
741  * @regs:			MFI register set
742  */
743 static inline void
744 megasas_enable_intr_skinny(struct megasas_instance *instance)
745 {
746 	struct megasas_register_set __iomem *regs;
747 
748 	regs = instance->reg_set;
749 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
750 
751 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
752 
753 	/* Dummy readl to force pci flush */
754 	readl(&regs->outbound_intr_mask);
755 }
756 
757 /**
758  * megasas_disable_intr_skinny -	Disables interrupt
759  * @regs:			MFI register set
760  */
761 static inline void
762 megasas_disable_intr_skinny(struct megasas_instance *instance)
763 {
764 	struct megasas_register_set __iomem *regs;
765 	u32 mask = 0xFFFFFFFF;
766 
767 	regs = instance->reg_set;
768 	writel(mask, &regs->outbound_intr_mask);
769 	/* Dummy readl to force pci flush */
770 	readl(&regs->outbound_intr_mask);
771 }
772 
773 /**
774  * megasas_read_fw_status_reg_skinny - returns the current FW status value
775  * @regs:			MFI register set
776  */
777 static u32
778 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
779 {
780 	return readl(&instance->reg_set->outbound_scratch_pad_0);
781 }
782 
783 /**
784  * megasas_clear_interrupt_skinny -	Check & clear interrupt
785  * @regs:				MFI register set
786  */
787 static int
788 megasas_clear_intr_skinny(struct megasas_instance *instance)
789 {
790 	u32 status;
791 	u32 mfiStatus = 0;
792 	struct megasas_register_set __iomem *regs;
793 	regs = instance->reg_set;
794 
795 	/*
796 	 * Check if it is our interrupt
797 	 */
798 	status = readl(&regs->outbound_intr_status);
799 
800 	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
801 		return 0;
802 	}
803 
804 	/*
805 	 * Check if it is our interrupt
806 	 */
807 	if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
808 	    MFI_STATE_FAULT) {
809 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
810 	} else
811 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
812 
813 	/*
814 	 * Clear the interrupt by writing back the same value
815 	 */
816 	writel(status, &regs->outbound_intr_status);
817 
818 	/*
819 	 * dummy read to flush PCI
820 	 */
821 	readl(&regs->outbound_intr_status);
822 
823 	return mfiStatus;
824 }
825 
826 /**
827  * megasas_fire_cmd_skinny -	Sends command to the FW
828  * @frame_phys_addr :		Physical address of cmd
829  * @frame_count :		Number of frames for the command
830  * @regs :			MFI register set
831  */
832 static inline void
833 megasas_fire_cmd_skinny(struct megasas_instance *instance,
834 			dma_addr_t frame_phys_addr,
835 			u32 frame_count,
836 			struct megasas_register_set __iomem *regs)
837 {
838 	unsigned long flags;
839 
840 	spin_lock_irqsave(&instance->hba_lock, flags);
841 	writel(upper_32_bits(frame_phys_addr),
842 	       &(regs)->inbound_high_queue_port);
843 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
844 	       &(regs)->inbound_low_queue_port);
845 	spin_unlock_irqrestore(&instance->hba_lock, flags);
846 }
847 
848 /**
849  * megasas_check_reset_skinny -	For controller reset check
850  * @regs:				MFI register set
851  */
852 static int
853 megasas_check_reset_skinny(struct megasas_instance *instance,
854 				struct megasas_register_set __iomem *regs)
855 {
856 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
857 		return 1;
858 
859 	return 0;
860 }
861 
862 static struct megasas_instance_template megasas_instance_template_skinny = {
863 
864 	.fire_cmd = megasas_fire_cmd_skinny,
865 	.enable_intr = megasas_enable_intr_skinny,
866 	.disable_intr = megasas_disable_intr_skinny,
867 	.clear_intr = megasas_clear_intr_skinny,
868 	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
869 	.adp_reset = megasas_adp_reset_gen2,
870 	.check_reset = megasas_check_reset_skinny,
871 	.service_isr = megasas_isr,
872 	.tasklet = megasas_complete_cmd_dpc,
873 	.init_adapter = megasas_init_adapter_mfi,
874 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
875 	.issue_dcmd = megasas_issue_dcmd,
876 };
877 
878 
879 /**
880 *	The following functions are defined for gen2 (deviceid : 0x78 0x79)
881 *	controllers
882 */
883 
884 /**
885  * megasas_enable_intr_gen2 -  Enables interrupts
886  * @regs:                      MFI register set
887  */
888 static inline void
889 megasas_enable_intr_gen2(struct megasas_instance *instance)
890 {
891 	struct megasas_register_set __iomem *regs;
892 
893 	regs = instance->reg_set;
894 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
895 
896 	/* write ~0x00000005 (4 & 1) to the intr mask*/
897 	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
898 
899 	/* Dummy readl to force pci flush */
900 	readl(&regs->outbound_intr_mask);
901 }
902 
903 /**
904  * megasas_disable_intr_gen2 - Disables interrupt
905  * @regs:                      MFI register set
906  */
907 static inline void
908 megasas_disable_intr_gen2(struct megasas_instance *instance)
909 {
910 	struct megasas_register_set __iomem *regs;
911 	u32 mask = 0xFFFFFFFF;
912 
913 	regs = instance->reg_set;
914 	writel(mask, &regs->outbound_intr_mask);
915 	/* Dummy readl to force pci flush */
916 	readl(&regs->outbound_intr_mask);
917 }
918 
919 /**
920  * megasas_read_fw_status_reg_gen2 - returns the current FW status value
921  * @regs:                      MFI register set
922  */
923 static u32
924 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
925 {
926 	return readl(&instance->reg_set->outbound_scratch_pad_0);
927 }
928 
929 /**
930  * megasas_clear_interrupt_gen2 -      Check & clear interrupt
931  * @regs:                              MFI register set
932  */
933 static int
934 megasas_clear_intr_gen2(struct megasas_instance *instance)
935 {
936 	u32 status;
937 	u32 mfiStatus = 0;
938 	struct megasas_register_set __iomem *regs;
939 	regs = instance->reg_set;
940 
941 	/*
942 	 * Check if it is our interrupt
943 	 */
944 	status = readl(&regs->outbound_intr_status);
945 
946 	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
947 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
948 	}
949 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
950 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
951 	}
952 
953 	/*
954 	 * Clear the interrupt by writing back the same value
955 	 */
956 	if (mfiStatus)
957 		writel(status, &regs->outbound_doorbell_clear);
958 
959 	/* Dummy readl to force pci flush */
960 	readl(&regs->outbound_intr_status);
961 
962 	return mfiStatus;
963 }
964 /**
965  * megasas_fire_cmd_gen2 -     Sends command to the FW
966  * @frame_phys_addr :          Physical address of cmd
967  * @frame_count :              Number of frames for the command
968  * @regs :                     MFI register set
969  */
970 static inline void
971 megasas_fire_cmd_gen2(struct megasas_instance *instance,
972 			dma_addr_t frame_phys_addr,
973 			u32 frame_count,
974 			struct megasas_register_set __iomem *regs)
975 {
976 	unsigned long flags;
977 
978 	spin_lock_irqsave(&instance->hba_lock, flags);
979 	writel((frame_phys_addr | (frame_count<<1))|1,
980 			&(regs)->inbound_queue_port);
981 	spin_unlock_irqrestore(&instance->hba_lock, flags);
982 }
983 
984 /**
985  * megasas_adp_reset_gen2 -	For controller reset
986  * @regs:				MFI register set
987  */
988 static int
989 megasas_adp_reset_gen2(struct megasas_instance *instance,
990 			struct megasas_register_set __iomem *reg_set)
991 {
992 	u32 retry = 0 ;
993 	u32 HostDiag;
994 	u32 __iomem *seq_offset = &reg_set->seq_offset;
995 	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
996 
997 	if (instance->instancet == &megasas_instance_template_skinny) {
998 		seq_offset = &reg_set->fusion_seq_offset;
999 		hostdiag_offset = &reg_set->fusion_host_diag;
1000 	}
1001 
1002 	writel(0, seq_offset);
1003 	writel(4, seq_offset);
1004 	writel(0xb, seq_offset);
1005 	writel(2, seq_offset);
1006 	writel(7, seq_offset);
1007 	writel(0xd, seq_offset);
1008 
1009 	msleep(1000);
1010 
1011 	HostDiag = (u32)readl(hostdiag_offset);
1012 
1013 	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
1014 		msleep(100);
1015 		HostDiag = (u32)readl(hostdiag_offset);
1016 		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
1017 					retry, HostDiag);
1018 
1019 		if (retry++ >= 100)
1020 			return 1;
1021 
1022 	}
1023 
1024 	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
1025 
1026 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
1027 
1028 	ssleep(10);
1029 
1030 	HostDiag = (u32)readl(hostdiag_offset);
1031 	while (HostDiag & DIAG_RESET_ADAPTER) {
1032 		msleep(100);
1033 		HostDiag = (u32)readl(hostdiag_offset);
1034 		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
1035 				retry, HostDiag);
1036 
1037 		if (retry++ >= 1000)
1038 			return 1;
1039 
1040 	}
1041 	return 0;
1042 }
1043 
1044 /**
1045  * megasas_check_reset_gen2 -	For controller reset check
1046  * @regs:				MFI register set
1047  */
1048 static int
1049 megasas_check_reset_gen2(struct megasas_instance *instance,
1050 		struct megasas_register_set __iomem *regs)
1051 {
1052 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1053 		return 1;
1054 
1055 	return 0;
1056 }
1057 
1058 static struct megasas_instance_template megasas_instance_template_gen2 = {
1059 
1060 	.fire_cmd = megasas_fire_cmd_gen2,
1061 	.enable_intr = megasas_enable_intr_gen2,
1062 	.disable_intr = megasas_disable_intr_gen2,
1063 	.clear_intr = megasas_clear_intr_gen2,
1064 	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1065 	.adp_reset = megasas_adp_reset_gen2,
1066 	.check_reset = megasas_check_reset_gen2,
1067 	.service_isr = megasas_isr,
1068 	.tasklet = megasas_complete_cmd_dpc,
1069 	.init_adapter = megasas_init_adapter_mfi,
1070 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1071 	.issue_dcmd = megasas_issue_dcmd,
1072 };
1073 
1074 /**
1075 *	This is the end of set of functions & definitions
1076 *       specific to gen2 (deviceid : 0x78, 0x79) controllers
1077 */
1078 
1079 /*
1080  * Template added for TB (Fusion)
1081  */
1082 extern struct megasas_instance_template megasas_instance_template_fusion;
1083 
1084 /**
1085  * megasas_issue_polled -	Issues a polling command
1086  * @instance:			Adapter soft state
1087  * @cmd:			Command packet to be issued
1088  *
1089  * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1090  */
1091 int
1092 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1093 {
1094 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1095 
1096 	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1097 	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1098 
1099 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1100 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1101 			__func__, __LINE__);
1102 		return DCMD_NOT_FIRED;
1103 	}
1104 
1105 	instance->instancet->issue_dcmd(instance, cmd);
1106 
1107 	return wait_and_poll(instance, cmd, instance->requestorId ?
1108 			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1109 }
1110 
1111 /**
1112  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1113  * @instance:			Adapter soft state
1114  * @cmd:			Command to be issued
1115  * @timeout:			Timeout in seconds
1116  *
1117  * This function waits on an event for the command to be returned from ISR.
1118  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1119  * Used to issue ioctl commands.
1120  */
1121 int
1122 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1123 			  struct megasas_cmd *cmd, int timeout)
1124 {
1125 	int ret = 0;
1126 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1127 
1128 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1129 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1130 			__func__, __LINE__);
1131 		return DCMD_NOT_FIRED;
1132 	}
1133 
1134 	instance->instancet->issue_dcmd(instance, cmd);
1135 
1136 	if (timeout) {
1137 		ret = wait_event_timeout(instance->int_cmd_wait_q,
1138 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1139 		if (!ret) {
1140 			dev_err(&instance->pdev->dev,
1141 				"DCMD(opcode: 0x%x) is timed out, func:%s\n",
1142 				cmd->frame->dcmd.opcode, __func__);
1143 			return DCMD_TIMEOUT;
1144 		}
1145 	} else
1146 		wait_event(instance->int_cmd_wait_q,
1147 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1148 
1149 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1150 		DCMD_SUCCESS : DCMD_FAILED;
1151 }
1152 
1153 /**
1154  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1155  * @instance:				Adapter soft state
1156  * @cmd_to_abort:			Previously issued cmd to be aborted
1157  * @timeout:				Timeout in seconds
1158  *
1159  * MFI firmware can abort previously issued AEN comamnd (automatic event
1160  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1161  * cmd and waits for return status.
1162  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1163  */
1164 static int
1165 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1166 				struct megasas_cmd *cmd_to_abort, int timeout)
1167 {
1168 	struct megasas_cmd *cmd;
1169 	struct megasas_abort_frame *abort_fr;
1170 	int ret = 0;
1171 	u32 opcode;
1172 
1173 	cmd = megasas_get_cmd(instance);
1174 
1175 	if (!cmd)
1176 		return -1;
1177 
1178 	abort_fr = &cmd->frame->abort;
1179 
1180 	/*
1181 	 * Prepare and issue the abort frame
1182 	 */
1183 	abort_fr->cmd = MFI_CMD_ABORT;
1184 	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1185 	abort_fr->flags = cpu_to_le16(0);
1186 	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1187 	abort_fr->abort_mfi_phys_addr_lo =
1188 		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1189 	abort_fr->abort_mfi_phys_addr_hi =
1190 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1191 
1192 	cmd->sync_cmd = 1;
1193 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1194 
1195 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1196 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1197 			__func__, __LINE__);
1198 		return DCMD_NOT_FIRED;
1199 	}
1200 
1201 	instance->instancet->issue_dcmd(instance, cmd);
1202 
1203 	if (timeout) {
1204 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1205 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1206 		if (!ret) {
1207 			opcode = cmd_to_abort->frame->dcmd.opcode;
1208 			dev_err(&instance->pdev->dev,
1209 				"Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
1210 				opcode,  __func__);
1211 			return DCMD_TIMEOUT;
1212 		}
1213 	} else
1214 		wait_event(instance->abort_cmd_wait_q,
1215 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1216 
1217 	cmd->sync_cmd = 0;
1218 
1219 	megasas_return_cmd(instance, cmd);
1220 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1221 		DCMD_SUCCESS : DCMD_FAILED;
1222 }
1223 
1224 /**
1225  * megasas_make_sgl32 -	Prepares 32-bit SGL
1226  * @instance:		Adapter soft state
1227  * @scp:		SCSI command from the mid-layer
1228  * @mfi_sgl:		SGL to be filled in
1229  *
1230  * If successful, this function returns the number of SG elements. Otherwise,
1231  * it returnes -1.
1232  */
1233 static int
1234 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1235 		   union megasas_sgl *mfi_sgl)
1236 {
1237 	int i;
1238 	int sge_count;
1239 	struct scatterlist *os_sgl;
1240 
1241 	sge_count = scsi_dma_map(scp);
1242 	BUG_ON(sge_count < 0);
1243 
1244 	if (sge_count) {
1245 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1246 			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1247 			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1248 		}
1249 	}
1250 	return sge_count;
1251 }
1252 
1253 /**
1254  * megasas_make_sgl64 -	Prepares 64-bit SGL
1255  * @instance:		Adapter soft state
1256  * @scp:		SCSI command from the mid-layer
1257  * @mfi_sgl:		SGL to be filled in
1258  *
1259  * If successful, this function returns the number of SG elements. Otherwise,
1260  * it returnes -1.
1261  */
1262 static int
1263 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1264 		   union megasas_sgl *mfi_sgl)
1265 {
1266 	int i;
1267 	int sge_count;
1268 	struct scatterlist *os_sgl;
1269 
1270 	sge_count = scsi_dma_map(scp);
1271 	BUG_ON(sge_count < 0);
1272 
1273 	if (sge_count) {
1274 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1275 			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1276 			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1277 		}
1278 	}
1279 	return sge_count;
1280 }
1281 
1282 /**
1283  * megasas_make_sgl_skinny - Prepares IEEE SGL
1284  * @instance:           Adapter soft state
1285  * @scp:                SCSI command from the mid-layer
1286  * @mfi_sgl:            SGL to be filled in
1287  *
1288  * If successful, this function returns the number of SG elements. Otherwise,
1289  * it returnes -1.
1290  */
1291 static int
1292 megasas_make_sgl_skinny(struct megasas_instance *instance,
1293 		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1294 {
1295 	int i;
1296 	int sge_count;
1297 	struct scatterlist *os_sgl;
1298 
1299 	sge_count = scsi_dma_map(scp);
1300 
1301 	if (sge_count) {
1302 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1303 			mfi_sgl->sge_skinny[i].length =
1304 				cpu_to_le32(sg_dma_len(os_sgl));
1305 			mfi_sgl->sge_skinny[i].phys_addr =
1306 				cpu_to_le64(sg_dma_address(os_sgl));
1307 			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1308 		}
1309 	}
1310 	return sge_count;
1311 }
1312 
1313  /**
1314  * megasas_get_frame_count - Computes the number of frames
1315  * @frame_type		: type of frame- io or pthru frame
1316  * @sge_count		: number of sg elements
1317  *
1318  * Returns the number of frames required for numnber of sge's (sge_count)
1319  */
1320 
1321 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1322 			u8 sge_count, u8 frame_type)
1323 {
1324 	int num_cnt;
1325 	int sge_bytes;
1326 	u32 sge_sz;
1327 	u32 frame_count = 0;
1328 
1329 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1330 	    sizeof(struct megasas_sge32);
1331 
1332 	if (instance->flag_ieee) {
1333 		sge_sz = sizeof(struct megasas_sge_skinny);
1334 	}
1335 
1336 	/*
1337 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1338 	 * 3 SGEs for 32-bit SGLs for ldio &
1339 	 * 1 SGEs for 64-bit SGLs and
1340 	 * 2 SGEs for 32-bit SGLs for pthru frame
1341 	 */
1342 	if (unlikely(frame_type == PTHRU_FRAME)) {
1343 		if (instance->flag_ieee == 1) {
1344 			num_cnt = sge_count - 1;
1345 		} else if (IS_DMA64)
1346 			num_cnt = sge_count - 1;
1347 		else
1348 			num_cnt = sge_count - 2;
1349 	} else {
1350 		if (instance->flag_ieee == 1) {
1351 			num_cnt = sge_count - 1;
1352 		} else if (IS_DMA64)
1353 			num_cnt = sge_count - 2;
1354 		else
1355 			num_cnt = sge_count - 3;
1356 	}
1357 
1358 	if (num_cnt > 0) {
1359 		sge_bytes = sge_sz * num_cnt;
1360 
1361 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1362 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1363 	}
1364 	/* Main frame */
1365 	frame_count += 1;
1366 
1367 	if (frame_count > 7)
1368 		frame_count = 8;
1369 	return frame_count;
1370 }
1371 
1372 /**
1373  * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1374  * @instance:		Adapter soft state
1375  * @scp:		SCSI command
1376  * @cmd:		Command to be prepared in
1377  *
1378  * This function prepares CDB commands. These are typcially pass-through
1379  * commands to the devices.
1380  */
1381 static int
1382 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1383 		   struct megasas_cmd *cmd)
1384 {
1385 	u32 is_logical;
1386 	u32 device_id;
1387 	u16 flags = 0;
1388 	struct megasas_pthru_frame *pthru;
1389 
1390 	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1391 	device_id = MEGASAS_DEV_INDEX(scp);
1392 	pthru = (struct megasas_pthru_frame *)cmd->frame;
1393 
1394 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1395 		flags = MFI_FRAME_DIR_WRITE;
1396 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1397 		flags = MFI_FRAME_DIR_READ;
1398 	else if (scp->sc_data_direction == DMA_NONE)
1399 		flags = MFI_FRAME_DIR_NONE;
1400 
1401 	if (instance->flag_ieee == 1) {
1402 		flags |= MFI_FRAME_IEEE;
1403 	}
1404 
1405 	/*
1406 	 * Prepare the DCDB frame
1407 	 */
1408 	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1409 	pthru->cmd_status = 0x0;
1410 	pthru->scsi_status = 0x0;
1411 	pthru->target_id = device_id;
1412 	pthru->lun = scp->device->lun;
1413 	pthru->cdb_len = scp->cmd_len;
1414 	pthru->timeout = 0;
1415 	pthru->pad_0 = 0;
1416 	pthru->flags = cpu_to_le16(flags);
1417 	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1418 
1419 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1420 
1421 	/*
1422 	 * If the command is for the tape device, set the
1423 	 * pthru timeout to the os layer timeout value.
1424 	 */
1425 	if (scp->device->type == TYPE_TAPE) {
1426 		if ((scp->request->timeout / HZ) > 0xFFFF)
1427 			pthru->timeout = cpu_to_le16(0xFFFF);
1428 		else
1429 			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1430 	}
1431 
1432 	/*
1433 	 * Construct SGL
1434 	 */
1435 	if (instance->flag_ieee == 1) {
1436 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1437 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1438 						      &pthru->sgl);
1439 	} else if (IS_DMA64) {
1440 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1441 		pthru->sge_count = megasas_make_sgl64(instance, scp,
1442 						      &pthru->sgl);
1443 	} else
1444 		pthru->sge_count = megasas_make_sgl32(instance, scp,
1445 						      &pthru->sgl);
1446 
1447 	if (pthru->sge_count > instance->max_num_sge) {
1448 		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1449 			pthru->sge_count);
1450 		return 0;
1451 	}
1452 
1453 	/*
1454 	 * Sense info specific
1455 	 */
1456 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1457 	pthru->sense_buf_phys_addr_hi =
1458 		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1459 	pthru->sense_buf_phys_addr_lo =
1460 		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1461 
1462 	/*
1463 	 * Compute the total number of frames this command consumes. FW uses
1464 	 * this number to pull sufficient number of frames from host memory.
1465 	 */
1466 	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1467 							PTHRU_FRAME);
1468 
1469 	return cmd->frame_count;
1470 }
1471 
1472 /**
1473  * megasas_build_ldio -	Prepares IOs to logical devices
1474  * @instance:		Adapter soft state
1475  * @scp:		SCSI command
1476  * @cmd:		Command to be prepared
1477  *
1478  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1479  */
1480 static int
1481 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1482 		   struct megasas_cmd *cmd)
1483 {
1484 	u32 device_id;
1485 	u8 sc = scp->cmnd[0];
1486 	u16 flags = 0;
1487 	struct megasas_io_frame *ldio;
1488 
1489 	device_id = MEGASAS_DEV_INDEX(scp);
1490 	ldio = (struct megasas_io_frame *)cmd->frame;
1491 
1492 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1493 		flags = MFI_FRAME_DIR_WRITE;
1494 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1495 		flags = MFI_FRAME_DIR_READ;
1496 
1497 	if (instance->flag_ieee == 1) {
1498 		flags |= MFI_FRAME_IEEE;
1499 	}
1500 
1501 	/*
1502 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1503 	 */
1504 	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1505 	ldio->cmd_status = 0x0;
1506 	ldio->scsi_status = 0x0;
1507 	ldio->target_id = device_id;
1508 	ldio->timeout = 0;
1509 	ldio->reserved_0 = 0;
1510 	ldio->pad_0 = 0;
1511 	ldio->flags = cpu_to_le16(flags);
1512 	ldio->start_lba_hi = 0;
1513 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1514 
1515 	/*
1516 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1517 	 */
1518 	if (scp->cmd_len == 6) {
1519 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1520 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1521 						 ((u32) scp->cmnd[2] << 8) |
1522 						 (u32) scp->cmnd[3]);
1523 
1524 		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1525 	}
1526 
1527 	/*
1528 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1529 	 */
1530 	else if (scp->cmd_len == 10) {
1531 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1532 					      ((u32) scp->cmnd[7] << 8));
1533 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1534 						 ((u32) scp->cmnd[3] << 16) |
1535 						 ((u32) scp->cmnd[4] << 8) |
1536 						 (u32) scp->cmnd[5]);
1537 	}
1538 
1539 	/*
1540 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1541 	 */
1542 	else if (scp->cmd_len == 12) {
1543 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1544 					      ((u32) scp->cmnd[7] << 16) |
1545 					      ((u32) scp->cmnd[8] << 8) |
1546 					      (u32) scp->cmnd[9]);
1547 
1548 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1549 						 ((u32) scp->cmnd[3] << 16) |
1550 						 ((u32) scp->cmnd[4] << 8) |
1551 						 (u32) scp->cmnd[5]);
1552 	}
1553 
1554 	/*
1555 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1556 	 */
1557 	else if (scp->cmd_len == 16) {
1558 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1559 					      ((u32) scp->cmnd[11] << 16) |
1560 					      ((u32) scp->cmnd[12] << 8) |
1561 					      (u32) scp->cmnd[13]);
1562 
1563 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1564 						 ((u32) scp->cmnd[7] << 16) |
1565 						 ((u32) scp->cmnd[8] << 8) |
1566 						 (u32) scp->cmnd[9]);
1567 
1568 		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1569 						 ((u32) scp->cmnd[3] << 16) |
1570 						 ((u32) scp->cmnd[4] << 8) |
1571 						 (u32) scp->cmnd[5]);
1572 
1573 	}
1574 
1575 	/*
1576 	 * Construct SGL
1577 	 */
1578 	if (instance->flag_ieee) {
1579 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1580 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1581 					      &ldio->sgl);
1582 	} else if (IS_DMA64) {
1583 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1584 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1585 	} else
1586 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1587 
1588 	if (ldio->sge_count > instance->max_num_sge) {
1589 		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1590 			ldio->sge_count);
1591 		return 0;
1592 	}
1593 
1594 	/*
1595 	 * Sense info specific
1596 	 */
1597 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1598 	ldio->sense_buf_phys_addr_hi = 0;
1599 	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1600 
1601 	/*
1602 	 * Compute the total number of frames this command consumes. FW uses
1603 	 * this number to pull sufficient number of frames from host memory.
1604 	 */
1605 	cmd->frame_count = megasas_get_frame_count(instance,
1606 			ldio->sge_count, IO_FRAME);
1607 
1608 	return cmd->frame_count;
1609 }
1610 
1611 /**
1612  * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1613  *				and whether it's RW or non RW
1614  * @scmd:			SCSI command
1615  *
1616  */
1617 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1618 {
1619 	int ret;
1620 
1621 	switch (cmd->cmnd[0]) {
1622 	case READ_10:
1623 	case WRITE_10:
1624 	case READ_12:
1625 	case WRITE_12:
1626 	case READ_6:
1627 	case WRITE_6:
1628 	case READ_16:
1629 	case WRITE_16:
1630 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1631 			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1632 		break;
1633 	default:
1634 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1635 			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1636 	}
1637 	return ret;
1638 }
1639 
1640  /**
1641  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1642  *					in FW
1643  * @instance:				Adapter soft state
1644  */
1645 static inline void
1646 megasas_dump_pending_frames(struct megasas_instance *instance)
1647 {
1648 	struct megasas_cmd *cmd;
1649 	int i,n;
1650 	union megasas_sgl *mfi_sgl;
1651 	struct megasas_io_frame *ldio;
1652 	struct megasas_pthru_frame *pthru;
1653 	u32 sgcount;
1654 	u16 max_cmd = instance->max_fw_cmds;
1655 
1656 	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1657 	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1658 	if (IS_DMA64)
1659 		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1660 	else
1661 		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1662 
1663 	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1664 	for (i = 0; i < max_cmd; i++) {
1665 		cmd = instance->cmd_list[i];
1666 		if (!cmd->scmd)
1667 			continue;
1668 		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1669 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1670 			ldio = (struct megasas_io_frame *)cmd->frame;
1671 			mfi_sgl = &ldio->sgl;
1672 			sgcount = ldio->sge_count;
1673 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1674 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1675 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1676 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1677 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1678 		} else {
1679 			pthru = (struct megasas_pthru_frame *) cmd->frame;
1680 			mfi_sgl = &pthru->sgl;
1681 			sgcount = pthru->sge_count;
1682 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1683 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1684 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1685 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1686 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1687 		}
1688 		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1689 			for (n = 0; n < sgcount; n++) {
1690 				if (IS_DMA64)
1691 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1692 						le32_to_cpu(mfi_sgl->sge64[n].length),
1693 						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1694 				else
1695 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1696 						le32_to_cpu(mfi_sgl->sge32[n].length),
1697 						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1698 			}
1699 		}
1700 	} /*for max_cmd*/
1701 	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1702 	for (i = 0; i < max_cmd; i++) {
1703 
1704 		cmd = instance->cmd_list[i];
1705 
1706 		if (cmd->sync_cmd == 1)
1707 			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1708 	}
1709 	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1710 }
1711 
1712 u32
1713 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1714 			    struct scsi_cmnd *scmd)
1715 {
1716 	struct megasas_cmd *cmd;
1717 	u32 frame_count;
1718 
1719 	cmd = megasas_get_cmd(instance);
1720 	if (!cmd)
1721 		return SCSI_MLQUEUE_HOST_BUSY;
1722 
1723 	/*
1724 	 * Logical drive command
1725 	 */
1726 	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1727 		frame_count = megasas_build_ldio(instance, scmd, cmd);
1728 	else
1729 		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1730 
1731 	if (!frame_count)
1732 		goto out_return_cmd;
1733 
1734 	cmd->scmd = scmd;
1735 	scmd->SCp.ptr = (char *)cmd;
1736 
1737 	/*
1738 	 * Issue the command to the FW
1739 	 */
1740 	atomic_inc(&instance->fw_outstanding);
1741 
1742 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1743 				cmd->frame_count-1, instance->reg_set);
1744 
1745 	return 0;
1746 out_return_cmd:
1747 	megasas_return_cmd(instance, cmd);
1748 	return SCSI_MLQUEUE_HOST_BUSY;
1749 }
1750 
1751 
1752 /**
1753  * megasas_queue_command -	Queue entry point
1754  * @scmd:			SCSI command to be queued
1755  * @done:			Callback entry point
1756  */
1757 static int
1758 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1759 {
1760 	struct megasas_instance *instance;
1761 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1762 
1763 	instance = (struct megasas_instance *)
1764 	    scmd->device->host->hostdata;
1765 
1766 	if (instance->unload == 1) {
1767 		scmd->result = DID_NO_CONNECT << 16;
1768 		scmd->scsi_done(scmd);
1769 		return 0;
1770 	}
1771 
1772 	if (instance->issuepend_done == 0)
1773 		return SCSI_MLQUEUE_HOST_BUSY;
1774 
1775 
1776 	/* Check for an mpio path and adjust behavior */
1777 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1778 		if (megasas_check_mpio_paths(instance, scmd) ==
1779 		    (DID_REQUEUE << 16)) {
1780 			return SCSI_MLQUEUE_HOST_BUSY;
1781 		} else {
1782 			scmd->result = DID_NO_CONNECT << 16;
1783 			scmd->scsi_done(scmd);
1784 			return 0;
1785 		}
1786 	}
1787 
1788 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1789 		scmd->result = DID_NO_CONNECT << 16;
1790 		scmd->scsi_done(scmd);
1791 		return 0;
1792 	}
1793 
1794 	mr_device_priv_data = scmd->device->hostdata;
1795 	if (!mr_device_priv_data) {
1796 		scmd->result = DID_NO_CONNECT << 16;
1797 		scmd->scsi_done(scmd);
1798 		return 0;
1799 	}
1800 
1801 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1802 		return SCSI_MLQUEUE_HOST_BUSY;
1803 
1804 	if (mr_device_priv_data->tm_busy)
1805 		return SCSI_MLQUEUE_DEVICE_BUSY;
1806 
1807 
1808 	scmd->result = 0;
1809 
1810 	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1811 	    (scmd->device->id >= instance->fw_supported_vd_count ||
1812 		scmd->device->lun)) {
1813 		scmd->result = DID_BAD_TARGET << 16;
1814 		goto out_done;
1815 	}
1816 
1817 	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1818 	    MEGASAS_IS_LOGICAL(scmd->device) &&
1819 	    (!instance->fw_sync_cache_support)) {
1820 		scmd->result = DID_OK << 16;
1821 		goto out_done;
1822 	}
1823 
1824 	return instance->instancet->build_and_issue_cmd(instance, scmd);
1825 
1826  out_done:
1827 	scmd->scsi_done(scmd);
1828 	return 0;
1829 }
1830 
1831 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1832 {
1833 	int i;
1834 
1835 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1836 
1837 		if ((megasas_mgmt_info.instance[i]) &&
1838 		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1839 			return megasas_mgmt_info.instance[i];
1840 	}
1841 
1842 	return NULL;
1843 }
1844 
1845 /*
1846 * megasas_set_dynamic_target_properties -
1847 * Device property set by driver may not be static and it is required to be
1848 * updated after OCR
1849 *
1850 * set tm_capable.
1851 * set dma alignment (only for eedp protection enable vd).
1852 *
1853 * @sdev: OS provided scsi device
1854 *
1855 * Returns void
1856 */
1857 void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1858 					   bool is_target_prop)
1859 {
1860 	u16 pd_index = 0, ld;
1861 	u32 device_id;
1862 	struct megasas_instance *instance;
1863 	struct fusion_context *fusion;
1864 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1865 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1866 	struct MR_LD_RAID *raid;
1867 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1868 
1869 	instance = megasas_lookup_instance(sdev->host->host_no);
1870 	fusion = instance->ctrl_context;
1871 	mr_device_priv_data = sdev->hostdata;
1872 
1873 	if (!fusion || !mr_device_priv_data)
1874 		return;
1875 
1876 	if (MEGASAS_IS_LOGICAL(sdev)) {
1877 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1878 					+ sdev->id;
1879 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1880 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1881 		if (ld >= instance->fw_supported_vd_count)
1882 			return;
1883 		raid = MR_LdRaidGet(ld, local_map_ptr);
1884 
1885 		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1886 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1887 
1888 		mr_device_priv_data->is_tm_capable =
1889 			raid->capability.tmCapable;
1890 	} else if (instance->use_seqnum_jbod_fp) {
1891 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1892 			sdev->id;
1893 		pd_sync = (void *)fusion->pd_seq_sync
1894 				[(instance->pd_seq_map_id - 1) & 1];
1895 		mr_device_priv_data->is_tm_capable =
1896 			pd_sync->seq[pd_index].capability.tmCapable;
1897 	}
1898 
1899 	if (is_target_prop && instance->tgt_prop->reset_tmo) {
1900 		/*
1901 		 * If FW provides a target reset timeout value, driver will use
1902 		 * it. If not set, fallback to default values.
1903 		 */
1904 		mr_device_priv_data->target_reset_tmo =
1905 			min_t(u8, instance->max_reset_tmo,
1906 			      instance->tgt_prop->reset_tmo);
1907 		mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1908 	} else {
1909 		mr_device_priv_data->target_reset_tmo =
1910 						MEGASAS_DEFAULT_TM_TIMEOUT;
1911 		mr_device_priv_data->task_abort_tmo =
1912 						MEGASAS_DEFAULT_TM_TIMEOUT;
1913 	}
1914 }
1915 
1916 /*
1917  * megasas_set_nvme_device_properties -
1918  * set nomerges=2
1919  * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1920  * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1921  *
1922  * MR firmware provides value in KB. Caller of this function converts
1923  * kb into bytes.
1924  *
1925  * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1926  * MR firmware provides value 128 as (32 * 4K) = 128K.
1927  *
1928  * @sdev:				scsi device
1929  * @max_io_size:				maximum io transfer size
1930  *
1931  */
1932 static inline void
1933 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1934 {
1935 	struct megasas_instance *instance;
1936 	u32 mr_nvme_pg_size;
1937 
1938 	instance = (struct megasas_instance *)sdev->host->hostdata;
1939 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1940 				MR_DEFAULT_NVME_PAGE_SIZE);
1941 
1942 	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1943 
1944 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1945 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1946 }
1947 
1948 /*
1949  * megasas_set_fw_assisted_qd -
1950  * set device queue depth to can_queue
1951  * set device queue depth to fw assisted qd
1952  *
1953  * @sdev:				scsi device
1954  * @is_target_prop			true, if fw provided target properties.
1955  */
1956 static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
1957 						 bool is_target_prop)
1958 {
1959 	u8 interface_type;
1960 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1961 	u32 tgt_device_qd;
1962 	struct megasas_instance *instance;
1963 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1964 
1965 	instance = megasas_lookup_instance(sdev->host->host_no);
1966 	mr_device_priv_data = sdev->hostdata;
1967 	interface_type  = mr_device_priv_data->interface_type;
1968 
1969 	switch (interface_type) {
1970 	case SAS_PD:
1971 		device_qd = MEGASAS_SAS_QD;
1972 		break;
1973 	case SATA_PD:
1974 		device_qd = MEGASAS_SATA_QD;
1975 		break;
1976 	case NVME_PD:
1977 		device_qd = MEGASAS_NVME_QD;
1978 		break;
1979 	}
1980 
1981 	if (is_target_prop) {
1982 		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1983 		if (tgt_device_qd &&
1984 		    (tgt_device_qd <= instance->host->can_queue))
1985 			device_qd = tgt_device_qd;
1986 	}
1987 
1988 	if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
1989 		device_qd = instance->host->can_queue;
1990 
1991 	scsi_change_queue_depth(sdev, device_qd);
1992 }
1993 
1994 /*
1995  * megasas_set_static_target_properties -
1996  * Device property set by driver are static and it is not required to be
1997  * updated after OCR.
1998  *
1999  * set io timeout
2000  * set device queue depth
2001  * set nvme device properties. see - megasas_set_nvme_device_properties
2002  *
2003  * @sdev:				scsi device
2004  * @is_target_prop			true, if fw provided target properties.
2005  */
2006 static void megasas_set_static_target_properties(struct scsi_device *sdev,
2007 						 bool is_target_prop)
2008 {
2009 	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
2010 	struct megasas_instance *instance;
2011 
2012 	instance = megasas_lookup_instance(sdev->host->host_no);
2013 
2014 	/*
2015 	 * The RAID firmware may require extended timeouts.
2016 	 */
2017 	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
2018 
2019 	/* max_io_size_kb will be set to non zero for
2020 	 * nvme based vd and syspd.
2021 	 */
2022 	if (is_target_prop)
2023 		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
2024 
2025 	if (instance->nvme_page_size && max_io_size_kb)
2026 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
2027 
2028 	megasas_set_fw_assisted_qd(sdev, is_target_prop);
2029 }
2030 
2031 
2032 static int megasas_slave_configure(struct scsi_device *sdev)
2033 {
2034 	u16 pd_index = 0;
2035 	struct megasas_instance *instance;
2036 	int ret_target_prop = DCMD_FAILED;
2037 	bool is_target_prop = false;
2038 
2039 	instance = megasas_lookup_instance(sdev->host->host_no);
2040 	if (instance->pd_list_not_supported) {
2041 		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
2042 			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2043 				sdev->id;
2044 			if (instance->pd_list[pd_index].driveState !=
2045 				MR_PD_STATE_SYSTEM)
2046 				return -ENXIO;
2047 		}
2048 	}
2049 
2050 	mutex_lock(&instance->reset_mutex);
2051 	/* Send DCMD to Firmware and cache the information */
2052 	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
2053 		megasas_get_pd_info(instance, sdev);
2054 
2055 	/* Some ventura firmware may not have instance->nvme_page_size set.
2056 	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
2057 	 */
2058 	if ((instance->tgt_prop) && (instance->nvme_page_size))
2059 		ret_target_prop = megasas_get_target_prop(instance, sdev);
2060 
2061 	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2062 	megasas_set_static_target_properties(sdev, is_target_prop);
2063 
2064 	/* This sdev property may change post OCR */
2065 	megasas_set_dynamic_target_properties(sdev, is_target_prop);
2066 
2067 	mutex_unlock(&instance->reset_mutex);
2068 
2069 	return 0;
2070 }
2071 
2072 static int megasas_slave_alloc(struct scsi_device *sdev)
2073 {
2074 	u16 pd_index = 0;
2075 	struct megasas_instance *instance ;
2076 	struct MR_PRIV_DEVICE *mr_device_priv_data;
2077 
2078 	instance = megasas_lookup_instance(sdev->host->host_no);
2079 	if (!MEGASAS_IS_LOGICAL(sdev)) {
2080 		/*
2081 		 * Open the OS scan to the SYSTEM PD
2082 		 */
2083 		pd_index =
2084 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2085 			sdev->id;
2086 		if ((instance->pd_list_not_supported ||
2087 			instance->pd_list[pd_index].driveState ==
2088 			MR_PD_STATE_SYSTEM)) {
2089 			goto scan_target;
2090 		}
2091 		return -ENXIO;
2092 	}
2093 
2094 scan_target:
2095 	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2096 					GFP_KERNEL);
2097 	if (!mr_device_priv_data)
2098 		return -ENOMEM;
2099 	sdev->hostdata = mr_device_priv_data;
2100 
2101 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2102 		   instance->r1_ldio_hint_default);
2103 	return 0;
2104 }
2105 
2106 static void megasas_slave_destroy(struct scsi_device *sdev)
2107 {
2108 	kfree(sdev->hostdata);
2109 	sdev->hostdata = NULL;
2110 }
2111 
2112 /*
2113 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2114 *                                       kill adapter
2115 * @instance:				Adapter soft state
2116 *
2117 */
2118 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2119 {
2120 	int i;
2121 	struct megasas_cmd *cmd_mfi;
2122 	struct megasas_cmd_fusion *cmd_fusion;
2123 	struct fusion_context *fusion = instance->ctrl_context;
2124 
2125 	/* Find all outstanding ioctls */
2126 	if (fusion) {
2127 		for (i = 0; i < instance->max_fw_cmds; i++) {
2128 			cmd_fusion = fusion->cmd_list[i];
2129 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2130 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2131 				if (cmd_mfi->sync_cmd &&
2132 				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2133 					cmd_mfi->frame->hdr.cmd_status =
2134 							MFI_STAT_WRONG_STATE;
2135 					megasas_complete_cmd(instance,
2136 							     cmd_mfi, DID_OK);
2137 				}
2138 			}
2139 		}
2140 	} else {
2141 		for (i = 0; i < instance->max_fw_cmds; i++) {
2142 			cmd_mfi = instance->cmd_list[i];
2143 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2144 				MFI_CMD_ABORT)
2145 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2146 		}
2147 	}
2148 }
2149 
2150 
2151 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2152 {
2153 	/* Set critical error to block I/O & ioctls in case caller didn't */
2154 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2155 	/* Wait 1 second to ensure IO or ioctls in build have posted */
2156 	msleep(1000);
2157 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2158 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2159 		(instance->adapter_type != MFI_SERIES)) {
2160 		if (!instance->requestorId) {
2161 			writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2162 			/* Flush */
2163 			readl(&instance->reg_set->doorbell);
2164 		}
2165 		if (instance->requestorId && instance->peerIsPresent)
2166 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2167 	} else {
2168 		writel(MFI_STOP_ADP,
2169 			&instance->reg_set->inbound_doorbell);
2170 	}
2171 	/* Complete outstanding ioctls when adapter is killed */
2172 	megasas_complete_outstanding_ioctls(instance);
2173 }
2174 
2175  /**
2176   * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2177   *					restored to max value
2178   * @instance:			Adapter soft state
2179   *
2180   */
2181 void
2182 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2183 {
2184 	unsigned long flags;
2185 
2186 	if (instance->flag & MEGASAS_FW_BUSY
2187 	    && time_after(jiffies, instance->last_time + 5 * HZ)
2188 	    && atomic_read(&instance->fw_outstanding) <
2189 	    instance->throttlequeuedepth + 1) {
2190 
2191 		spin_lock_irqsave(instance->host->host_lock, flags);
2192 		instance->flag &= ~MEGASAS_FW_BUSY;
2193 
2194 		instance->host->can_queue = instance->cur_can_queue;
2195 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2196 	}
2197 }
2198 
2199 /**
2200  * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2201  * @instance_addr:			Address of adapter soft state
2202  *
2203  * Tasklet to complete cmds
2204  */
2205 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2206 {
2207 	u32 producer;
2208 	u32 consumer;
2209 	u32 context;
2210 	struct megasas_cmd *cmd;
2211 	struct megasas_instance *instance =
2212 				(struct megasas_instance *)instance_addr;
2213 	unsigned long flags;
2214 
2215 	/* If we have already declared adapter dead, donot complete cmds */
2216 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2217 		return;
2218 
2219 	spin_lock_irqsave(&instance->completion_lock, flags);
2220 
2221 	producer = le32_to_cpu(*instance->producer);
2222 	consumer = le32_to_cpu(*instance->consumer);
2223 
2224 	while (consumer != producer) {
2225 		context = le32_to_cpu(instance->reply_queue[consumer]);
2226 		if (context >= instance->max_fw_cmds) {
2227 			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2228 				context);
2229 			BUG();
2230 		}
2231 
2232 		cmd = instance->cmd_list[context];
2233 
2234 		megasas_complete_cmd(instance, cmd, DID_OK);
2235 
2236 		consumer++;
2237 		if (consumer == (instance->max_fw_cmds + 1)) {
2238 			consumer = 0;
2239 		}
2240 	}
2241 
2242 	*instance->consumer = cpu_to_le32(producer);
2243 
2244 	spin_unlock_irqrestore(&instance->completion_lock, flags);
2245 
2246 	/*
2247 	 * Check if we can restore can_queue
2248 	 */
2249 	megasas_check_and_restore_queue_depth(instance);
2250 }
2251 
2252 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2253 
2254 /**
2255  * megasas_start_timer - Initializes sriov heartbeat timer object
2256  * @instance:		Adapter soft state
2257  *
2258  */
2259 void megasas_start_timer(struct megasas_instance *instance)
2260 {
2261 	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2262 
2263 	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2264 	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2265 	add_timer(timer);
2266 }
2267 
2268 static void
2269 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2270 
2271 static void
2272 process_fw_state_change_wq(struct work_struct *work);
2273 
2274 static void megasas_do_ocr(struct megasas_instance *instance)
2275 {
2276 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2277 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2278 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2279 		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2280 	}
2281 	instance->instancet->disable_intr(instance);
2282 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2283 	instance->issuepend_done = 0;
2284 
2285 	atomic_set(&instance->fw_outstanding, 0);
2286 	megasas_internal_reset_defer_cmds(instance);
2287 	process_fw_state_change_wq(&instance->work_init);
2288 }
2289 
2290 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2291 					    int initial)
2292 {
2293 	struct megasas_cmd *cmd;
2294 	struct megasas_dcmd_frame *dcmd;
2295 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2296 	dma_addr_t new_affiliation_111_h;
2297 	int ld, retval = 0;
2298 	u8 thisVf;
2299 
2300 	cmd = megasas_get_cmd(instance);
2301 
2302 	if (!cmd) {
2303 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2304 		       "Failed to get cmd for scsi%d\n",
2305 			instance->host->host_no);
2306 		return -ENOMEM;
2307 	}
2308 
2309 	dcmd = &cmd->frame->dcmd;
2310 
2311 	if (!instance->vf_affiliation_111) {
2312 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2313 		       "affiliation for scsi%d\n", instance->host->host_no);
2314 		megasas_return_cmd(instance, cmd);
2315 		return -ENOMEM;
2316 	}
2317 
2318 	if (initial)
2319 			memset(instance->vf_affiliation_111, 0,
2320 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2321 	else {
2322 		new_affiliation_111 =
2323 			dma_alloc_coherent(&instance->pdev->dev,
2324 					   sizeof(struct MR_LD_VF_AFFILIATION_111),
2325 					   &new_affiliation_111_h, GFP_KERNEL);
2326 		if (!new_affiliation_111) {
2327 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2328 			       "memory for new affiliation for scsi%d\n",
2329 			       instance->host->host_no);
2330 			megasas_return_cmd(instance, cmd);
2331 			return -ENOMEM;
2332 		}
2333 	}
2334 
2335 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2336 
2337 	dcmd->cmd = MFI_CMD_DCMD;
2338 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2339 	dcmd->sge_count = 1;
2340 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2341 	dcmd->timeout = 0;
2342 	dcmd->pad_0 = 0;
2343 	dcmd->data_xfer_len =
2344 		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2345 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2346 
2347 	if (initial)
2348 		dcmd->sgl.sge32[0].phys_addr =
2349 			cpu_to_le32(instance->vf_affiliation_111_h);
2350 	else
2351 		dcmd->sgl.sge32[0].phys_addr =
2352 			cpu_to_le32(new_affiliation_111_h);
2353 
2354 	dcmd->sgl.sge32[0].length = cpu_to_le32(
2355 		sizeof(struct MR_LD_VF_AFFILIATION_111));
2356 
2357 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2358 	       "scsi%d\n", instance->host->host_no);
2359 
2360 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2361 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2362 		       " failed with status 0x%x for scsi%d\n",
2363 		       dcmd->cmd_status, instance->host->host_no);
2364 		retval = 1; /* Do a scan if we couldn't get affiliation */
2365 		goto out;
2366 	}
2367 
2368 	if (!initial) {
2369 		thisVf = new_affiliation_111->thisVf;
2370 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2371 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2372 			    new_affiliation_111->map[ld].policy[thisVf]) {
2373 				dev_warn(&instance->pdev->dev, "SR-IOV: "
2374 				       "Got new LD/VF affiliation for scsi%d\n",
2375 				       instance->host->host_no);
2376 				memcpy(instance->vf_affiliation_111,
2377 				       new_affiliation_111,
2378 				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2379 				retval = 1;
2380 				goto out;
2381 			}
2382 	}
2383 out:
2384 	if (new_affiliation_111) {
2385 		dma_free_coherent(&instance->pdev->dev,
2386 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2387 				    new_affiliation_111,
2388 				    new_affiliation_111_h);
2389 	}
2390 
2391 	megasas_return_cmd(instance, cmd);
2392 
2393 	return retval;
2394 }
2395 
2396 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2397 					    int initial)
2398 {
2399 	struct megasas_cmd *cmd;
2400 	struct megasas_dcmd_frame *dcmd;
2401 	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2402 	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2403 	dma_addr_t new_affiliation_h;
2404 	int i, j, retval = 0, found = 0, doscan = 0;
2405 	u8 thisVf;
2406 
2407 	cmd = megasas_get_cmd(instance);
2408 
2409 	if (!cmd) {
2410 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2411 		       "Failed to get cmd for scsi%d\n",
2412 		       instance->host->host_no);
2413 		return -ENOMEM;
2414 	}
2415 
2416 	dcmd = &cmd->frame->dcmd;
2417 
2418 	if (!instance->vf_affiliation) {
2419 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2420 		       "affiliation for scsi%d\n", instance->host->host_no);
2421 		megasas_return_cmd(instance, cmd);
2422 		return -ENOMEM;
2423 	}
2424 
2425 	if (initial)
2426 		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2427 		       sizeof(struct MR_LD_VF_AFFILIATION));
2428 	else {
2429 		new_affiliation =
2430 			dma_alloc_coherent(&instance->pdev->dev,
2431 					   (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2432 					   &new_affiliation_h, GFP_KERNEL);
2433 		if (!new_affiliation) {
2434 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2435 			       "memory for new affiliation for scsi%d\n",
2436 			       instance->host->host_no);
2437 			megasas_return_cmd(instance, cmd);
2438 			return -ENOMEM;
2439 		}
2440 	}
2441 
2442 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2443 
2444 	dcmd->cmd = MFI_CMD_DCMD;
2445 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2446 	dcmd->sge_count = 1;
2447 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2448 	dcmd->timeout = 0;
2449 	dcmd->pad_0 = 0;
2450 	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2451 		sizeof(struct MR_LD_VF_AFFILIATION));
2452 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2453 
2454 	if (initial)
2455 		dcmd->sgl.sge32[0].phys_addr =
2456 			cpu_to_le32(instance->vf_affiliation_h);
2457 	else
2458 		dcmd->sgl.sge32[0].phys_addr =
2459 			cpu_to_le32(new_affiliation_h);
2460 
2461 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2462 		sizeof(struct MR_LD_VF_AFFILIATION));
2463 
2464 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2465 	       "scsi%d\n", instance->host->host_no);
2466 
2467 
2468 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2469 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2470 		       " failed with status 0x%x for scsi%d\n",
2471 		       dcmd->cmd_status, instance->host->host_no);
2472 		retval = 1; /* Do a scan if we couldn't get affiliation */
2473 		goto out;
2474 	}
2475 
2476 	if (!initial) {
2477 		if (!new_affiliation->ldCount) {
2478 			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2479 			       "affiliation for passive path for scsi%d\n",
2480 			       instance->host->host_no);
2481 			retval = 1;
2482 			goto out;
2483 		}
2484 		newmap = new_affiliation->map;
2485 		savedmap = instance->vf_affiliation->map;
2486 		thisVf = new_affiliation->thisVf;
2487 		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2488 			found = 0;
2489 			for (j = 0; j < instance->vf_affiliation->ldCount;
2490 			     j++) {
2491 				if (newmap->ref.targetId ==
2492 				    savedmap->ref.targetId) {
2493 					found = 1;
2494 					if (newmap->policy[thisVf] !=
2495 					    savedmap->policy[thisVf]) {
2496 						doscan = 1;
2497 						goto out;
2498 					}
2499 				}
2500 				savedmap = (struct MR_LD_VF_MAP *)
2501 					((unsigned char *)savedmap +
2502 					 savedmap->size);
2503 			}
2504 			if (!found && newmap->policy[thisVf] !=
2505 			    MR_LD_ACCESS_HIDDEN) {
2506 				doscan = 1;
2507 				goto out;
2508 			}
2509 			newmap = (struct MR_LD_VF_MAP *)
2510 				((unsigned char *)newmap + newmap->size);
2511 		}
2512 
2513 		newmap = new_affiliation->map;
2514 		savedmap = instance->vf_affiliation->map;
2515 
2516 		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2517 			found = 0;
2518 			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2519 				if (savedmap->ref.targetId ==
2520 				    newmap->ref.targetId) {
2521 					found = 1;
2522 					if (savedmap->policy[thisVf] !=
2523 					    newmap->policy[thisVf]) {
2524 						doscan = 1;
2525 						goto out;
2526 					}
2527 				}
2528 				newmap = (struct MR_LD_VF_MAP *)
2529 					((unsigned char *)newmap +
2530 					 newmap->size);
2531 			}
2532 			if (!found && savedmap->policy[thisVf] !=
2533 			    MR_LD_ACCESS_HIDDEN) {
2534 				doscan = 1;
2535 				goto out;
2536 			}
2537 			savedmap = (struct MR_LD_VF_MAP *)
2538 				((unsigned char *)savedmap +
2539 				 savedmap->size);
2540 		}
2541 	}
2542 out:
2543 	if (doscan) {
2544 		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2545 		       "affiliation for scsi%d\n", instance->host->host_no);
2546 		memcpy(instance->vf_affiliation, new_affiliation,
2547 		       new_affiliation->size);
2548 		retval = 1;
2549 	}
2550 
2551 	if (new_affiliation)
2552 		dma_free_coherent(&instance->pdev->dev,
2553 				    (MAX_LOGICAL_DRIVES + 1) *
2554 				    sizeof(struct MR_LD_VF_AFFILIATION),
2555 				    new_affiliation, new_affiliation_h);
2556 	megasas_return_cmd(instance, cmd);
2557 
2558 	return retval;
2559 }
2560 
2561 /* This function will get the current SR-IOV LD/VF affiliation */
2562 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2563 	int initial)
2564 {
2565 	int retval;
2566 
2567 	if (instance->PlasmaFW111)
2568 		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2569 	else
2570 		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2571 	return retval;
2572 }
2573 
2574 /* This function will tell FW to start the SR-IOV heartbeat */
2575 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2576 					 int initial)
2577 {
2578 	struct megasas_cmd *cmd;
2579 	struct megasas_dcmd_frame *dcmd;
2580 	int retval = 0;
2581 
2582 	cmd = megasas_get_cmd(instance);
2583 
2584 	if (!cmd) {
2585 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2586 		       "Failed to get cmd for scsi%d\n",
2587 		       instance->host->host_no);
2588 		return -ENOMEM;
2589 	}
2590 
2591 	dcmd = &cmd->frame->dcmd;
2592 
2593 	if (initial) {
2594 		instance->hb_host_mem =
2595 			dma_alloc_coherent(&instance->pdev->dev,
2596 					   sizeof(struct MR_CTRL_HB_HOST_MEM),
2597 					   &instance->hb_host_mem_h,
2598 					   GFP_KERNEL);
2599 		if (!instance->hb_host_mem) {
2600 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2601 			       " memory for heartbeat host memory for scsi%d\n",
2602 			       instance->host->host_no);
2603 			retval = -ENOMEM;
2604 			goto out;
2605 		}
2606 	}
2607 
2608 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2609 
2610 	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2611 	dcmd->cmd = MFI_CMD_DCMD;
2612 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2613 	dcmd->sge_count = 1;
2614 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2615 	dcmd->timeout = 0;
2616 	dcmd->pad_0 = 0;
2617 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2618 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2619 
2620 	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2621 				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2622 
2623 	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2624 	       instance->host->host_no);
2625 
2626 	if ((instance->adapter_type != MFI_SERIES) &&
2627 	    !instance->mask_interrupts)
2628 		retval = megasas_issue_blocked_cmd(instance, cmd,
2629 			MEGASAS_ROUTINE_WAIT_TIME_VF);
2630 	else
2631 		retval = megasas_issue_polled(instance, cmd);
2632 
2633 	if (retval) {
2634 		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2635 			"_MEM_ALLOC DCMD %s for scsi%d\n",
2636 			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2637 			"timed out" : "failed", instance->host->host_no);
2638 		retval = 1;
2639 	}
2640 
2641 out:
2642 	megasas_return_cmd(instance, cmd);
2643 
2644 	return retval;
2645 }
2646 
2647 /* Handler for SR-IOV heartbeat */
2648 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2649 {
2650 	struct megasas_instance *instance =
2651 		from_timer(instance, t, sriov_heartbeat_timer);
2652 
2653 	if (instance->hb_host_mem->HB.fwCounter !=
2654 	    instance->hb_host_mem->HB.driverCounter) {
2655 		instance->hb_host_mem->HB.driverCounter =
2656 			instance->hb_host_mem->HB.fwCounter;
2657 		mod_timer(&instance->sriov_heartbeat_timer,
2658 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2659 	} else {
2660 		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2661 		       "completed for scsi%d\n", instance->host->host_no);
2662 		schedule_work(&instance->work_init);
2663 	}
2664 }
2665 
2666 /**
2667  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2668  * @instance:				Adapter soft state
2669  *
2670  * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2671  * complete all its outstanding commands. Returns error if one or more IOs
2672  * are pending after this time period. It also marks the controller dead.
2673  */
2674 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2675 {
2676 	int i, sl, outstanding;
2677 	u32 reset_index;
2678 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2679 	unsigned long flags;
2680 	struct list_head clist_local;
2681 	struct megasas_cmd *reset_cmd;
2682 	u32 fw_state;
2683 
2684 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2685 		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2686 		__func__, __LINE__);
2687 		return FAILED;
2688 	}
2689 
2690 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2691 
2692 		INIT_LIST_HEAD(&clist_local);
2693 		spin_lock_irqsave(&instance->hba_lock, flags);
2694 		list_splice_init(&instance->internal_reset_pending_q,
2695 				&clist_local);
2696 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2697 
2698 		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2699 		for (i = 0; i < wait_time; i++) {
2700 			msleep(1000);
2701 			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2702 				break;
2703 		}
2704 
2705 		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2706 			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2707 			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2708 			return FAILED;
2709 		}
2710 
2711 		reset_index = 0;
2712 		while (!list_empty(&clist_local)) {
2713 			reset_cmd = list_entry((&clist_local)->next,
2714 						struct megasas_cmd, list);
2715 			list_del_init(&reset_cmd->list);
2716 			if (reset_cmd->scmd) {
2717 				reset_cmd->scmd->result = DID_REQUEUE << 16;
2718 				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2719 					reset_index, reset_cmd,
2720 					reset_cmd->scmd->cmnd[0]);
2721 
2722 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2723 				megasas_return_cmd(instance, reset_cmd);
2724 			} else if (reset_cmd->sync_cmd) {
2725 				dev_notice(&instance->pdev->dev, "%p synch cmds"
2726 						"reset queue\n",
2727 						reset_cmd);
2728 
2729 				reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2730 				instance->instancet->fire_cmd(instance,
2731 						reset_cmd->frame_phys_addr,
2732 						0, instance->reg_set);
2733 			} else {
2734 				dev_notice(&instance->pdev->dev, "%p unexpected"
2735 					"cmds lst\n",
2736 					reset_cmd);
2737 			}
2738 			reset_index++;
2739 		}
2740 
2741 		return SUCCESS;
2742 	}
2743 
2744 	for (i = 0; i < resetwaittime; i++) {
2745 		outstanding = atomic_read(&instance->fw_outstanding);
2746 
2747 		if (!outstanding)
2748 			break;
2749 
2750 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2751 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2752 			       "commands to complete\n",i,outstanding);
2753 			/*
2754 			 * Call cmd completion routine. Cmd to be
2755 			 * be completed directly without depending on isr.
2756 			 */
2757 			megasas_complete_cmd_dpc((unsigned long)instance);
2758 		}
2759 
2760 		msleep(1000);
2761 	}
2762 
2763 	i = 0;
2764 	outstanding = atomic_read(&instance->fw_outstanding);
2765 	fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2766 
2767 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2768 		goto no_outstanding;
2769 
2770 	if (instance->disableOnlineCtrlReset)
2771 		goto kill_hba_and_failed;
2772 	do {
2773 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2774 			dev_info(&instance->pdev->dev,
2775 				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2776 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2777 			if (i == 3)
2778 				goto kill_hba_and_failed;
2779 			megasas_do_ocr(instance);
2780 
2781 			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2782 				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2783 				__func__, __LINE__);
2784 				return FAILED;
2785 			}
2786 			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2787 				__func__, __LINE__);
2788 
2789 			for (sl = 0; sl < 10; sl++)
2790 				msleep(500);
2791 
2792 			outstanding = atomic_read(&instance->fw_outstanding);
2793 
2794 			fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2795 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2796 				goto no_outstanding;
2797 		}
2798 		i++;
2799 	} while (i <= 3);
2800 
2801 no_outstanding:
2802 
2803 	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2804 		__func__, __LINE__);
2805 	return SUCCESS;
2806 
2807 kill_hba_and_failed:
2808 
2809 	/* Reset not supported, kill adapter */
2810 	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2811 		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2812 		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2813 		atomic_read(&instance->fw_outstanding));
2814 	megasas_dump_pending_frames(instance);
2815 	megaraid_sas_kill_hba(instance);
2816 
2817 	return FAILED;
2818 }
2819 
2820 /**
2821  * megasas_generic_reset -	Generic reset routine
2822  * @scmd:			Mid-layer SCSI command
2823  *
2824  * This routine implements a generic reset handler for device, bus and host
2825  * reset requests. Device, bus and host specific reset handlers can use this
2826  * function after they do their specific tasks.
2827  */
2828 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2829 {
2830 	int ret_val;
2831 	struct megasas_instance *instance;
2832 
2833 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2834 
2835 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2836 		 scmd->cmnd[0], scmd->retries);
2837 
2838 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2839 		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2840 		return FAILED;
2841 	}
2842 
2843 	ret_val = megasas_wait_for_outstanding(instance);
2844 	if (ret_val == SUCCESS)
2845 		dev_notice(&instance->pdev->dev, "reset successful\n");
2846 	else
2847 		dev_err(&instance->pdev->dev, "failed to do reset\n");
2848 
2849 	return ret_val;
2850 }
2851 
2852 /**
2853  * megasas_reset_timer - quiesce the adapter if required
2854  * @scmd:		scsi cmnd
2855  *
2856  * Sets the FW busy flag and reduces the host->can_queue if the
2857  * cmd has not been completed within the timeout period.
2858  */
2859 static enum
2860 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2861 {
2862 	struct megasas_instance *instance;
2863 	unsigned long flags;
2864 
2865 	if (time_after(jiffies, scmd->jiffies_at_alloc +
2866 				(scmd_timeout * 2) * HZ)) {
2867 		return BLK_EH_DONE;
2868 	}
2869 
2870 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2871 	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2872 		/* FW is busy, throttle IO */
2873 		spin_lock_irqsave(instance->host->host_lock, flags);
2874 
2875 		instance->host->can_queue = instance->throttlequeuedepth;
2876 		instance->last_time = jiffies;
2877 		instance->flag |= MEGASAS_FW_BUSY;
2878 
2879 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2880 	}
2881 	return BLK_EH_RESET_TIMER;
2882 }
2883 
2884 /**
2885  * megasas_dump -	This function will print hexdump of provided buffer.
2886  * @buf:		Buffer to be dumped
2887  * @sz:		Size in bytes
2888  * @format:		Different formats of dumping e.g. format=n will
2889  *			cause only 'n' 32 bit words to be dumped in a single
2890  *			line.
2891  */
2892 inline void
2893 megasas_dump(void *buf, int sz, int format)
2894 {
2895 	int i;
2896 	__le32 *buf_loc = (__le32 *)buf;
2897 
2898 	for (i = 0; i < (sz / sizeof(__le32)); i++) {
2899 		if ((i % format) == 0) {
2900 			if (i != 0)
2901 				printk(KERN_CONT "\n");
2902 			printk(KERN_CONT "%08x: ", (i * 4));
2903 		}
2904 		printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
2905 	}
2906 	printk(KERN_CONT "\n");
2907 }
2908 
2909 /**
2910  * megasas_dump_reg_set -	This function will print hexdump of register set
2911  * @buf:			Buffer to be dumped
2912  * @sz:				Size in bytes
2913  * @format:			Different formats of dumping e.g. format=n will
2914  *				cause only 'n' 32 bit words to be dumped in a
2915  *				single line.
2916  */
2917 inline void
2918 megasas_dump_reg_set(void __iomem *reg_set)
2919 {
2920 	unsigned int i, sz = 256;
2921 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2922 
2923 	for (i = 0; i < (sz / sizeof(u32)); i++)
2924 		printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
2925 }
2926 
2927 /**
2928  * megasas_dump_fusion_io -	This function will print key details
2929  *				of SCSI IO
2930  * @scmd:			SCSI command pointer of SCSI IO
2931  */
2932 void
2933 megasas_dump_fusion_io(struct scsi_cmnd *scmd)
2934 {
2935 	struct megasas_cmd_fusion *cmd;
2936 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2937 	struct megasas_instance *instance;
2938 
2939 	cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2940 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2941 
2942 	scmd_printk(KERN_INFO, scmd,
2943 		    "scmd: (0x%p)  retries: 0x%x  allowed: 0x%x\n",
2944 		    scmd, scmd->retries, scmd->allowed);
2945 	scsi_print_command(scmd);
2946 
2947 	if (cmd) {
2948 		req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
2949 		scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
2950 		scmd_printk(KERN_INFO, scmd,
2951 			    "RequestFlags:0x%x  MSIxIndex:0x%x  SMID:0x%x  LMID:0x%x  DevHandle:0x%x\n",
2952 			    req_desc->SCSIIO.RequestFlags,
2953 			    req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
2954 			    req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
2955 
2956 		printk(KERN_INFO "IO request frame:\n");
2957 		megasas_dump(cmd->io_request,
2958 			     MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
2959 		printk(KERN_INFO "Chain frame:\n");
2960 		megasas_dump(cmd->sg_frame,
2961 			     instance->max_chain_frame_sz, 8);
2962 	}
2963 
2964 }
2965 
2966 /*
2967  * megasas_dump_sys_regs - This function will dump system registers through
2968  *			    sysfs.
2969  * @reg_set:		    Pointer to System register set.
2970  * @buf:		    Buffer to which output is to be written.
2971  * @return:		    Number of bytes written to buffer.
2972  */
2973 static inline ssize_t
2974 megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
2975 {
2976 	unsigned int i, sz = 256;
2977 	int bytes_wrote = 0;
2978 	char *loc = (char *)buf;
2979 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2980 
2981 	for (i = 0; i < sz / sizeof(u32); i++) {
2982 		bytes_wrote += snprintf(loc + bytes_wrote, PAGE_SIZE,
2983 					"%08x: %08x\n", (i * 4),
2984 					readl(&reg[i]));
2985 	}
2986 	return bytes_wrote;
2987 }
2988 
2989 /**
2990  * megasas_reset_bus_host -	Bus & host reset handler entry point
2991  */
2992 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2993 {
2994 	int ret;
2995 	struct megasas_instance *instance;
2996 
2997 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2998 
2999 	scmd_printk(KERN_INFO, scmd,
3000 		"OCR is requested due to IO timeout!!\n");
3001 
3002 	scmd_printk(KERN_INFO, scmd,
3003 		"SCSI host state: %d  SCSI host busy: %d  FW outstanding: %d\n",
3004 		scmd->device->host->shost_state,
3005 		scsi_host_busy(scmd->device->host),
3006 		atomic_read(&instance->fw_outstanding));
3007 	/*
3008 	 * First wait for all commands to complete
3009 	 */
3010 	if (instance->adapter_type == MFI_SERIES) {
3011 		ret = megasas_generic_reset(scmd);
3012 	} else {
3013 		megasas_dump_fusion_io(scmd);
3014 		ret = megasas_reset_fusion(scmd->device->host,
3015 				SCSIIO_TIMEOUT_OCR);
3016 	}
3017 
3018 	return ret;
3019 }
3020 
3021 /**
3022  * megasas_task_abort - Issues task abort request to firmware
3023  *			(supported only for fusion adapters)
3024  * @scmd:		SCSI command pointer
3025  */
3026 static int megasas_task_abort(struct scsi_cmnd *scmd)
3027 {
3028 	int ret;
3029 	struct megasas_instance *instance;
3030 
3031 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3032 
3033 	if (instance->adapter_type != MFI_SERIES)
3034 		ret = megasas_task_abort_fusion(scmd);
3035 	else {
3036 		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
3037 		ret = FAILED;
3038 	}
3039 
3040 	return ret;
3041 }
3042 
3043 /**
3044  * megasas_reset_target:  Issues target reset request to firmware
3045  *                        (supported only for fusion adapters)
3046  * @scmd:                 SCSI command pointer
3047  */
3048 static int megasas_reset_target(struct scsi_cmnd *scmd)
3049 {
3050 	int ret;
3051 	struct megasas_instance *instance;
3052 
3053 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3054 
3055 	if (instance->adapter_type != MFI_SERIES)
3056 		ret = megasas_reset_target_fusion(scmd);
3057 	else {
3058 		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
3059 		ret = FAILED;
3060 	}
3061 
3062 	return ret;
3063 }
3064 
3065 /**
3066  * megasas_bios_param - Returns disk geometry for a disk
3067  * @sdev:		device handle
3068  * @bdev:		block device
3069  * @capacity:		drive capacity
3070  * @geom:		geometry parameters
3071  */
3072 static int
3073 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
3074 		 sector_t capacity, int geom[])
3075 {
3076 	int heads;
3077 	int sectors;
3078 	sector_t cylinders;
3079 	unsigned long tmp;
3080 
3081 	/* Default heads (64) & sectors (32) */
3082 	heads = 64;
3083 	sectors = 32;
3084 
3085 	tmp = heads * sectors;
3086 	cylinders = capacity;
3087 
3088 	sector_div(cylinders, tmp);
3089 
3090 	/*
3091 	 * Handle extended translation size for logical drives > 1Gb
3092 	 */
3093 
3094 	if (capacity >= 0x200000) {
3095 		heads = 255;
3096 		sectors = 63;
3097 		tmp = heads*sectors;
3098 		cylinders = capacity;
3099 		sector_div(cylinders, tmp);
3100 	}
3101 
3102 	geom[0] = heads;
3103 	geom[1] = sectors;
3104 	geom[2] = cylinders;
3105 
3106 	return 0;
3107 }
3108 
3109 static void megasas_aen_polling(struct work_struct *work);
3110 
3111 /**
3112  * megasas_service_aen -	Processes an event notification
3113  * @instance:			Adapter soft state
3114  * @cmd:			AEN command completed by the ISR
3115  *
3116  * For AEN, driver sends a command down to FW that is held by the FW till an
3117  * event occurs. When an event of interest occurs, FW completes the command
3118  * that it was previously holding.
3119  *
3120  * This routines sends SIGIO signal to processes that have registered with the
3121  * driver for AEN.
3122  */
3123 static void
3124 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
3125 {
3126 	unsigned long flags;
3127 
3128 	/*
3129 	 * Don't signal app if it is just an aborted previously registered aen
3130 	 */
3131 	if ((!cmd->abort_aen) && (instance->unload == 0)) {
3132 		spin_lock_irqsave(&poll_aen_lock, flags);
3133 		megasas_poll_wait_aen = 1;
3134 		spin_unlock_irqrestore(&poll_aen_lock, flags);
3135 		wake_up(&megasas_poll_wait);
3136 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
3137 	}
3138 	else
3139 		cmd->abort_aen = 0;
3140 
3141 	instance->aen_cmd = NULL;
3142 
3143 	megasas_return_cmd(instance, cmd);
3144 
3145 	if ((instance->unload == 0) &&
3146 		((instance->issuepend_done == 1))) {
3147 		struct megasas_aen_event *ev;
3148 
3149 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3150 		if (!ev) {
3151 			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3152 		} else {
3153 			ev->instance = instance;
3154 			instance->ev = ev;
3155 			INIT_DELAYED_WORK(&ev->hotplug_work,
3156 					  megasas_aen_polling);
3157 			schedule_delayed_work(&ev->hotplug_work, 0);
3158 		}
3159 	}
3160 }
3161 
3162 static ssize_t
3163 fw_crash_buffer_store(struct device *cdev,
3164 	struct device_attribute *attr, const char *buf, size_t count)
3165 {
3166 	struct Scsi_Host *shost = class_to_shost(cdev);
3167 	struct megasas_instance *instance =
3168 		(struct megasas_instance *) shost->hostdata;
3169 	int val = 0;
3170 	unsigned long flags;
3171 
3172 	if (kstrtoint(buf, 0, &val) != 0)
3173 		return -EINVAL;
3174 
3175 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3176 	instance->fw_crash_buffer_offset = val;
3177 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3178 	return strlen(buf);
3179 }
3180 
3181 static ssize_t
3182 fw_crash_buffer_show(struct device *cdev,
3183 	struct device_attribute *attr, char *buf)
3184 {
3185 	struct Scsi_Host *shost = class_to_shost(cdev);
3186 	struct megasas_instance *instance =
3187 		(struct megasas_instance *) shost->hostdata;
3188 	u32 size;
3189 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3190 	unsigned long chunk_left_bytes;
3191 	unsigned long src_addr;
3192 	unsigned long flags;
3193 	u32 buff_offset;
3194 
3195 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3196 	buff_offset = instance->fw_crash_buffer_offset;
3197 	if (!instance->crash_dump_buf &&
3198 		!((instance->fw_crash_state == AVAILABLE) ||
3199 		(instance->fw_crash_state == COPYING))) {
3200 		dev_err(&instance->pdev->dev,
3201 			"Firmware crash dump is not available\n");
3202 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3203 		return -EINVAL;
3204 	}
3205 
3206 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3207 		dev_err(&instance->pdev->dev,
3208 			"Firmware crash dump offset is out of range\n");
3209 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3210 		return 0;
3211 	}
3212 
3213 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3214 	chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
3215 	size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
3216 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3217 
3218 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3219 		(buff_offset % dmachunk);
3220 	memcpy(buf, (void *)src_addr, size);
3221 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3222 
3223 	return size;
3224 }
3225 
3226 static ssize_t
3227 fw_crash_buffer_size_show(struct device *cdev,
3228 	struct device_attribute *attr, char *buf)
3229 {
3230 	struct Scsi_Host *shost = class_to_shost(cdev);
3231 	struct megasas_instance *instance =
3232 		(struct megasas_instance *) shost->hostdata;
3233 
3234 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3235 		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3236 }
3237 
3238 static ssize_t
3239 fw_crash_state_store(struct device *cdev,
3240 	struct device_attribute *attr, const char *buf, size_t count)
3241 {
3242 	struct Scsi_Host *shost = class_to_shost(cdev);
3243 	struct megasas_instance *instance =
3244 		(struct megasas_instance *) shost->hostdata;
3245 	int val = 0;
3246 	unsigned long flags;
3247 
3248 	if (kstrtoint(buf, 0, &val) != 0)
3249 		return -EINVAL;
3250 
3251 	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3252 		dev_err(&instance->pdev->dev, "application updates invalid "
3253 			"firmware crash state\n");
3254 		return -EINVAL;
3255 	}
3256 
3257 	instance->fw_crash_state = val;
3258 
3259 	if ((val == COPIED) || (val == COPY_ERROR)) {
3260 		spin_lock_irqsave(&instance->crashdump_lock, flags);
3261 		megasas_free_host_crash_buffer(instance);
3262 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3263 		if (val == COPY_ERROR)
3264 			dev_info(&instance->pdev->dev, "application failed to "
3265 				"copy Firmware crash dump\n");
3266 		else
3267 			dev_info(&instance->pdev->dev, "Firmware crash dump "
3268 				"copied successfully\n");
3269 	}
3270 	return strlen(buf);
3271 }
3272 
3273 static ssize_t
3274 fw_crash_state_show(struct device *cdev,
3275 	struct device_attribute *attr, char *buf)
3276 {
3277 	struct Scsi_Host *shost = class_to_shost(cdev);
3278 	struct megasas_instance *instance =
3279 		(struct megasas_instance *) shost->hostdata;
3280 
3281 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3282 }
3283 
3284 static ssize_t
3285 page_size_show(struct device *cdev,
3286 	struct device_attribute *attr, char *buf)
3287 {
3288 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3289 }
3290 
3291 static ssize_t
3292 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3293 	char *buf)
3294 {
3295 	struct Scsi_Host *shost = class_to_shost(cdev);
3296 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3297 
3298 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3299 }
3300 
3301 static ssize_t
3302 fw_cmds_outstanding_show(struct device *cdev,
3303 				 struct device_attribute *attr, char *buf)
3304 {
3305 	struct Scsi_Host *shost = class_to_shost(cdev);
3306 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3307 
3308 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3309 }
3310 
3311 static ssize_t
3312 enable_sdev_max_qd_show(struct device *cdev,
3313 	struct device_attribute *attr, char *buf)
3314 {
3315 	struct Scsi_Host *shost = class_to_shost(cdev);
3316 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3317 
3318 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
3319 }
3320 
3321 static ssize_t
3322 enable_sdev_max_qd_store(struct device *cdev,
3323 	struct device_attribute *attr, const char *buf, size_t count)
3324 {
3325 	struct Scsi_Host *shost = class_to_shost(cdev);
3326 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3327 	u32 val = 0;
3328 	bool is_target_prop;
3329 	int ret_target_prop = DCMD_FAILED;
3330 	struct scsi_device *sdev;
3331 
3332 	if (kstrtou32(buf, 0, &val) != 0) {
3333 		pr_err("megasas: could not set enable_sdev_max_qd\n");
3334 		return -EINVAL;
3335 	}
3336 
3337 	mutex_lock(&instance->reset_mutex);
3338 	if (val)
3339 		instance->enable_sdev_max_qd = true;
3340 	else
3341 		instance->enable_sdev_max_qd = false;
3342 
3343 	shost_for_each_device(sdev, shost) {
3344 		ret_target_prop = megasas_get_target_prop(instance, sdev);
3345 		is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
3346 		megasas_set_fw_assisted_qd(sdev, is_target_prop);
3347 	}
3348 	mutex_unlock(&instance->reset_mutex);
3349 
3350 	return strlen(buf);
3351 }
3352 
3353 static ssize_t
3354 dump_system_regs_show(struct device *cdev,
3355 			       struct device_attribute *attr, char *buf)
3356 {
3357 	struct Scsi_Host *shost = class_to_shost(cdev);
3358 	struct megasas_instance *instance =
3359 			(struct megasas_instance *)shost->hostdata;
3360 
3361 	return megasas_dump_sys_regs(instance->reg_set, buf);
3362 }
3363 
3364 static ssize_t
3365 raid_map_id_show(struct device *cdev, struct device_attribute *attr,
3366 			  char *buf)
3367 {
3368 	struct Scsi_Host *shost = class_to_shost(cdev);
3369 	struct megasas_instance *instance =
3370 			(struct megasas_instance *)shost->hostdata;
3371 
3372 	return snprintf(buf, PAGE_SIZE, "%ld\n",
3373 			(unsigned long)instance->map_id);
3374 }
3375 
3376 static DEVICE_ATTR_RW(fw_crash_buffer);
3377 static DEVICE_ATTR_RO(fw_crash_buffer_size);
3378 static DEVICE_ATTR_RW(fw_crash_state);
3379 static DEVICE_ATTR_RO(page_size);
3380 static DEVICE_ATTR_RO(ldio_outstanding);
3381 static DEVICE_ATTR_RO(fw_cmds_outstanding);
3382 static DEVICE_ATTR_RW(enable_sdev_max_qd);
3383 static DEVICE_ATTR_RO(dump_system_regs);
3384 static DEVICE_ATTR_RO(raid_map_id);
3385 
3386 static struct device_attribute *megaraid_host_attrs[] = {
3387 	&dev_attr_fw_crash_buffer_size,
3388 	&dev_attr_fw_crash_buffer,
3389 	&dev_attr_fw_crash_state,
3390 	&dev_attr_page_size,
3391 	&dev_attr_ldio_outstanding,
3392 	&dev_attr_fw_cmds_outstanding,
3393 	&dev_attr_enable_sdev_max_qd,
3394 	&dev_attr_dump_system_regs,
3395 	&dev_attr_raid_map_id,
3396 	NULL,
3397 };
3398 
3399 /*
3400  * Scsi host template for megaraid_sas driver
3401  */
3402 static struct scsi_host_template megasas_template = {
3403 
3404 	.module = THIS_MODULE,
3405 	.name = "Avago SAS based MegaRAID driver",
3406 	.proc_name = "megaraid_sas",
3407 	.slave_configure = megasas_slave_configure,
3408 	.slave_alloc = megasas_slave_alloc,
3409 	.slave_destroy = megasas_slave_destroy,
3410 	.queuecommand = megasas_queue_command,
3411 	.eh_target_reset_handler = megasas_reset_target,
3412 	.eh_abort_handler = megasas_task_abort,
3413 	.eh_host_reset_handler = megasas_reset_bus_host,
3414 	.eh_timed_out = megasas_reset_timer,
3415 	.shost_attrs = megaraid_host_attrs,
3416 	.bios_param = megasas_bios_param,
3417 	.change_queue_depth = scsi_change_queue_depth,
3418 	.max_segment_size = 0xffffffff,
3419 	.no_write_same = 1,
3420 };
3421 
3422 /**
3423  * megasas_complete_int_cmd -	Completes an internal command
3424  * @instance:			Adapter soft state
3425  * @cmd:			Command to be completed
3426  *
3427  * The megasas_issue_blocked_cmd() function waits for a command to complete
3428  * after it issues a command. This function wakes up that waiting routine by
3429  * calling wake_up() on the wait queue.
3430  */
3431 static void
3432 megasas_complete_int_cmd(struct megasas_instance *instance,
3433 			 struct megasas_cmd *cmd)
3434 {
3435 	cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3436 	wake_up(&instance->int_cmd_wait_q);
3437 }
3438 
3439 /**
3440  * megasas_complete_abort -	Completes aborting a command
3441  * @instance:			Adapter soft state
3442  * @cmd:			Cmd that was issued to abort another cmd
3443  *
3444  * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3445  * after it issues an abort on a previously issued command. This function
3446  * wakes up all functions waiting on the same wait queue.
3447  */
3448 static void
3449 megasas_complete_abort(struct megasas_instance *instance,
3450 		       struct megasas_cmd *cmd)
3451 {
3452 	if (cmd->sync_cmd) {
3453 		cmd->sync_cmd = 0;
3454 		cmd->cmd_status_drv = 0;
3455 		wake_up(&instance->abort_cmd_wait_q);
3456 	}
3457 }
3458 
3459 /**
3460  * megasas_complete_cmd -	Completes a command
3461  * @instance:			Adapter soft state
3462  * @cmd:			Command to be completed
3463  * @alt_status:			If non-zero, use this value as status to
3464  *				SCSI mid-layer instead of the value returned
3465  *				by the FW. This should be used if caller wants
3466  *				an alternate status (as in the case of aborted
3467  *				commands)
3468  */
3469 void
3470 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3471 		     u8 alt_status)
3472 {
3473 	int exception = 0;
3474 	struct megasas_header *hdr = &cmd->frame->hdr;
3475 	unsigned long flags;
3476 	struct fusion_context *fusion = instance->ctrl_context;
3477 	u32 opcode, status;
3478 
3479 	/* flag for the retry reset */
3480 	cmd->retry_for_fw_reset = 0;
3481 
3482 	if (cmd->scmd)
3483 		cmd->scmd->SCp.ptr = NULL;
3484 
3485 	switch (hdr->cmd) {
3486 	case MFI_CMD_INVALID:
3487 		/* Some older 1068 controller FW may keep a pended
3488 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3489 		   when booting the kdump kernel.  Ignore this command to
3490 		   prevent a kernel panic on shutdown of the kdump kernel. */
3491 		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3492 		       "completed\n");
3493 		dev_warn(&instance->pdev->dev, "If you have a controller "
3494 		       "other than PERC5, please upgrade your firmware\n");
3495 		break;
3496 	case MFI_CMD_PD_SCSI_IO:
3497 	case MFI_CMD_LD_SCSI_IO:
3498 
3499 		/*
3500 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3501 		 * issued either through an IO path or an IOCTL path. If it
3502 		 * was via IOCTL, we will send it to internal completion.
3503 		 */
3504 		if (cmd->sync_cmd) {
3505 			cmd->sync_cmd = 0;
3506 			megasas_complete_int_cmd(instance, cmd);
3507 			break;
3508 		}
3509 		/* fall through */
3510 
3511 	case MFI_CMD_LD_READ:
3512 	case MFI_CMD_LD_WRITE:
3513 
3514 		if (alt_status) {
3515 			cmd->scmd->result = alt_status << 16;
3516 			exception = 1;
3517 		}
3518 
3519 		if (exception) {
3520 
3521 			atomic_dec(&instance->fw_outstanding);
3522 
3523 			scsi_dma_unmap(cmd->scmd);
3524 			cmd->scmd->scsi_done(cmd->scmd);
3525 			megasas_return_cmd(instance, cmd);
3526 
3527 			break;
3528 		}
3529 
3530 		switch (hdr->cmd_status) {
3531 
3532 		case MFI_STAT_OK:
3533 			cmd->scmd->result = DID_OK << 16;
3534 			break;
3535 
3536 		case MFI_STAT_SCSI_IO_FAILED:
3537 		case MFI_STAT_LD_INIT_IN_PROGRESS:
3538 			cmd->scmd->result =
3539 			    (DID_ERROR << 16) | hdr->scsi_status;
3540 			break;
3541 
3542 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3543 
3544 			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3545 
3546 			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3547 				memset(cmd->scmd->sense_buffer, 0,
3548 				       SCSI_SENSE_BUFFERSIZE);
3549 				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3550 				       hdr->sense_len);
3551 
3552 				cmd->scmd->result |= DRIVER_SENSE << 24;
3553 			}
3554 
3555 			break;
3556 
3557 		case MFI_STAT_LD_OFFLINE:
3558 		case MFI_STAT_DEVICE_NOT_FOUND:
3559 			cmd->scmd->result = DID_BAD_TARGET << 16;
3560 			break;
3561 
3562 		default:
3563 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3564 			       hdr->cmd_status);
3565 			cmd->scmd->result = DID_ERROR << 16;
3566 			break;
3567 		}
3568 
3569 		atomic_dec(&instance->fw_outstanding);
3570 
3571 		scsi_dma_unmap(cmd->scmd);
3572 		cmd->scmd->scsi_done(cmd->scmd);
3573 		megasas_return_cmd(instance, cmd);
3574 
3575 		break;
3576 
3577 	case MFI_CMD_SMP:
3578 	case MFI_CMD_STP:
3579 	case MFI_CMD_NVME:
3580 	case MFI_CMD_TOOLBOX:
3581 		megasas_complete_int_cmd(instance, cmd);
3582 		break;
3583 
3584 	case MFI_CMD_DCMD:
3585 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3586 		/* Check for LD map update */
3587 		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3588 			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3589 			fusion->fast_path_io = 0;
3590 			spin_lock_irqsave(instance->host->host_lock, flags);
3591 			status = cmd->frame->hdr.cmd_status;
3592 			instance->map_update_cmd = NULL;
3593 			if (status != MFI_STAT_OK) {
3594 				if (status != MFI_STAT_NOT_FOUND)
3595 					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3596 					       cmd->frame->hdr.cmd_status);
3597 				else {
3598 					megasas_return_cmd(instance, cmd);
3599 					spin_unlock_irqrestore(
3600 						instance->host->host_lock,
3601 						flags);
3602 					break;
3603 				}
3604 			}
3605 
3606 			megasas_return_cmd(instance, cmd);
3607 
3608 			/*
3609 			 * Set fast path IO to ZERO.
3610 			 * Validate Map will set proper value.
3611 			 * Meanwhile all IOs will go as LD IO.
3612 			 */
3613 			if (status == MFI_STAT_OK &&
3614 			    (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3615 				instance->map_id++;
3616 				fusion->fast_path_io = 1;
3617 			} else {
3618 				fusion->fast_path_io = 0;
3619 			}
3620 
3621 			megasas_sync_map_info(instance);
3622 			spin_unlock_irqrestore(instance->host->host_lock,
3623 					       flags);
3624 			break;
3625 		}
3626 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3627 		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3628 			spin_lock_irqsave(&poll_aen_lock, flags);
3629 			megasas_poll_wait_aen = 0;
3630 			spin_unlock_irqrestore(&poll_aen_lock, flags);
3631 		}
3632 
3633 		/* FW has an updated PD sequence */
3634 		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3635 			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3636 
3637 			spin_lock_irqsave(instance->host->host_lock, flags);
3638 			status = cmd->frame->hdr.cmd_status;
3639 			instance->jbod_seq_cmd = NULL;
3640 			megasas_return_cmd(instance, cmd);
3641 
3642 			if (status == MFI_STAT_OK) {
3643 				instance->pd_seq_map_id++;
3644 				/* Re-register a pd sync seq num cmd */
3645 				if (megasas_sync_pd_seq_num(instance, true))
3646 					instance->use_seqnum_jbod_fp = false;
3647 			} else
3648 				instance->use_seqnum_jbod_fp = false;
3649 
3650 			spin_unlock_irqrestore(instance->host->host_lock, flags);
3651 			break;
3652 		}
3653 
3654 		/*
3655 		 * See if got an event notification
3656 		 */
3657 		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3658 			megasas_service_aen(instance, cmd);
3659 		else
3660 			megasas_complete_int_cmd(instance, cmd);
3661 
3662 		break;
3663 
3664 	case MFI_CMD_ABORT:
3665 		/*
3666 		 * Cmd issued to abort another cmd returned
3667 		 */
3668 		megasas_complete_abort(instance, cmd);
3669 		break;
3670 
3671 	default:
3672 		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3673 		       hdr->cmd);
3674 		megasas_complete_int_cmd(instance, cmd);
3675 		break;
3676 	}
3677 }
3678 
3679 /**
3680  * megasas_issue_pending_cmds_again -	issue all pending cmds
3681  *					in FW again because of the fw reset
3682  * @instance:				Adapter soft state
3683  */
3684 static inline void
3685 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3686 {
3687 	struct megasas_cmd *cmd;
3688 	struct list_head clist_local;
3689 	union megasas_evt_class_locale class_locale;
3690 	unsigned long flags;
3691 	u32 seq_num;
3692 
3693 	INIT_LIST_HEAD(&clist_local);
3694 	spin_lock_irqsave(&instance->hba_lock, flags);
3695 	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3696 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3697 
3698 	while (!list_empty(&clist_local)) {
3699 		cmd = list_entry((&clist_local)->next,
3700 					struct megasas_cmd, list);
3701 		list_del_init(&cmd->list);
3702 
3703 		if (cmd->sync_cmd || cmd->scmd) {
3704 			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3705 				"detected to be pending while HBA reset\n",
3706 					cmd, cmd->scmd, cmd->sync_cmd);
3707 
3708 			cmd->retry_for_fw_reset++;
3709 
3710 			if (cmd->retry_for_fw_reset == 3) {
3711 				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3712 					"was tried multiple times during reset."
3713 					"Shutting down the HBA\n",
3714 					cmd, cmd->scmd, cmd->sync_cmd);
3715 				instance->instancet->disable_intr(instance);
3716 				atomic_set(&instance->fw_reset_no_pci_access, 1);
3717 				megaraid_sas_kill_hba(instance);
3718 				return;
3719 			}
3720 		}
3721 
3722 		if (cmd->sync_cmd == 1) {
3723 			if (cmd->scmd) {
3724 				dev_notice(&instance->pdev->dev, "unexpected"
3725 					"cmd attached to internal command!\n");
3726 			}
3727 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3728 						"on the internal reset queue,"
3729 						"issue it again.\n", cmd);
3730 			cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3731 			instance->instancet->fire_cmd(instance,
3732 							cmd->frame_phys_addr,
3733 							0, instance->reg_set);
3734 		} else if (cmd->scmd) {
3735 			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3736 			"detected on the internal queue, issue again.\n",
3737 			cmd, cmd->scmd->cmnd[0]);
3738 
3739 			atomic_inc(&instance->fw_outstanding);
3740 			instance->instancet->fire_cmd(instance,
3741 					cmd->frame_phys_addr,
3742 					cmd->frame_count-1, instance->reg_set);
3743 		} else {
3744 			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3745 				"internal reset defer list while re-issue!!\n",
3746 				cmd);
3747 		}
3748 	}
3749 
3750 	if (instance->aen_cmd) {
3751 		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3752 		megasas_return_cmd(instance, instance->aen_cmd);
3753 
3754 		instance->aen_cmd = NULL;
3755 	}
3756 
3757 	/*
3758 	 * Initiate AEN (Asynchronous Event Notification)
3759 	 */
3760 	seq_num = instance->last_seq_num;
3761 	class_locale.members.reserved = 0;
3762 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3763 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3764 
3765 	megasas_register_aen(instance, seq_num, class_locale.word);
3766 }
3767 
3768 /**
3769  * Move the internal reset pending commands to a deferred queue.
3770  *
3771  * We move the commands pending at internal reset time to a
3772  * pending queue. This queue would be flushed after successful
3773  * completion of the internal reset sequence. if the internal reset
3774  * did not complete in time, the kernel reset handler would flush
3775  * these commands.
3776  **/
3777 static void
3778 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3779 {
3780 	struct megasas_cmd *cmd;
3781 	int i;
3782 	u16 max_cmd = instance->max_fw_cmds;
3783 	u32 defer_index;
3784 	unsigned long flags;
3785 
3786 	defer_index = 0;
3787 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3788 	for (i = 0; i < max_cmd; i++) {
3789 		cmd = instance->cmd_list[i];
3790 		if (cmd->sync_cmd == 1 || cmd->scmd) {
3791 			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3792 					"on the defer queue as internal\n",
3793 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3794 
3795 			if (!list_empty(&cmd->list)) {
3796 				dev_notice(&instance->pdev->dev, "ERROR while"
3797 					" moving this cmd:%p, %d %p, it was"
3798 					"discovered on some list?\n",
3799 					cmd, cmd->sync_cmd, cmd->scmd);
3800 
3801 				list_del_init(&cmd->list);
3802 			}
3803 			defer_index++;
3804 			list_add_tail(&cmd->list,
3805 				&instance->internal_reset_pending_q);
3806 		}
3807 	}
3808 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3809 }
3810 
3811 
3812 static void
3813 process_fw_state_change_wq(struct work_struct *work)
3814 {
3815 	struct megasas_instance *instance =
3816 		container_of(work, struct megasas_instance, work_init);
3817 	u32 wait;
3818 	unsigned long flags;
3819 
3820     if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3821 		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3822 				atomic_read(&instance->adprecovery));
3823 		return ;
3824 	}
3825 
3826 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3827 		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3828 					"state, restarting it...\n");
3829 
3830 		instance->instancet->disable_intr(instance);
3831 		atomic_set(&instance->fw_outstanding, 0);
3832 
3833 		atomic_set(&instance->fw_reset_no_pci_access, 1);
3834 		instance->instancet->adp_reset(instance, instance->reg_set);
3835 		atomic_set(&instance->fw_reset_no_pci_access, 0);
3836 
3837 		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3838 					"initiating next stage...\n");
3839 
3840 		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3841 					"state 2 starting...\n");
3842 
3843 		/* waiting for about 20 second before start the second init */
3844 		for (wait = 0; wait < 30; wait++) {
3845 			msleep(1000);
3846 		}
3847 
3848 		if (megasas_transition_to_ready(instance, 1)) {
3849 			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3850 
3851 			atomic_set(&instance->fw_reset_no_pci_access, 1);
3852 			megaraid_sas_kill_hba(instance);
3853 			return ;
3854 		}
3855 
3856 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3857 			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3858 			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3859 			) {
3860 			*instance->consumer = *instance->producer;
3861 		} else {
3862 			*instance->consumer = 0;
3863 			*instance->producer = 0;
3864 		}
3865 
3866 		megasas_issue_init_mfi(instance);
3867 
3868 		spin_lock_irqsave(&instance->hba_lock, flags);
3869 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3870 		spin_unlock_irqrestore(&instance->hba_lock, flags);
3871 		instance->instancet->enable_intr(instance);
3872 
3873 		megasas_issue_pending_cmds_again(instance);
3874 		instance->issuepend_done = 1;
3875 	}
3876 }
3877 
3878 /**
3879  * megasas_deplete_reply_queue -	Processes all completed commands
3880  * @instance:				Adapter soft state
3881  * @alt_status:				Alternate status to be returned to
3882  *					SCSI mid-layer instead of the status
3883  *					returned by the FW
3884  * Note: this must be called with hba lock held
3885  */
3886 static int
3887 megasas_deplete_reply_queue(struct megasas_instance *instance,
3888 					u8 alt_status)
3889 {
3890 	u32 mfiStatus;
3891 	u32 fw_state;
3892 
3893 	if ((mfiStatus = instance->instancet->check_reset(instance,
3894 					instance->reg_set)) == 1) {
3895 		return IRQ_HANDLED;
3896 	}
3897 
3898 	mfiStatus = instance->instancet->clear_intr(instance);
3899 	if (mfiStatus == 0) {
3900 		/* Hardware may not set outbound_intr_status in MSI-X mode */
3901 		if (!instance->msix_vectors)
3902 			return IRQ_NONE;
3903 	}
3904 
3905 	instance->mfiStatus = mfiStatus;
3906 
3907 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3908 		fw_state = instance->instancet->read_fw_status_reg(
3909 				instance) & MFI_STATE_MASK;
3910 
3911 		if (fw_state != MFI_STATE_FAULT) {
3912 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
3913 						fw_state);
3914 		}
3915 
3916 		if ((fw_state == MFI_STATE_FAULT) &&
3917 				(instance->disableOnlineCtrlReset == 0)) {
3918 			dev_notice(&instance->pdev->dev, "wait adp restart\n");
3919 
3920 			if ((instance->pdev->device ==
3921 					PCI_DEVICE_ID_LSI_SAS1064R) ||
3922 				(instance->pdev->device ==
3923 					PCI_DEVICE_ID_DELL_PERC5) ||
3924 				(instance->pdev->device ==
3925 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3926 
3927 				*instance->consumer =
3928 					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3929 			}
3930 
3931 
3932 			instance->instancet->disable_intr(instance);
3933 			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3934 			instance->issuepend_done = 0;
3935 
3936 			atomic_set(&instance->fw_outstanding, 0);
3937 			megasas_internal_reset_defer_cmds(instance);
3938 
3939 			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3940 					fw_state, atomic_read(&instance->adprecovery));
3941 
3942 			schedule_work(&instance->work_init);
3943 			return IRQ_HANDLED;
3944 
3945 		} else {
3946 			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3947 				fw_state, instance->disableOnlineCtrlReset);
3948 		}
3949 	}
3950 
3951 	tasklet_schedule(&instance->isr_tasklet);
3952 	return IRQ_HANDLED;
3953 }
3954 /**
3955  * megasas_isr - isr entry point
3956  */
3957 static irqreturn_t megasas_isr(int irq, void *devp)
3958 {
3959 	struct megasas_irq_context *irq_context = devp;
3960 	struct megasas_instance *instance = irq_context->instance;
3961 	unsigned long flags;
3962 	irqreturn_t rc;
3963 
3964 	if (atomic_read(&instance->fw_reset_no_pci_access))
3965 		return IRQ_HANDLED;
3966 
3967 	spin_lock_irqsave(&instance->hba_lock, flags);
3968 	rc = megasas_deplete_reply_queue(instance, DID_OK);
3969 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3970 
3971 	return rc;
3972 }
3973 
3974 /**
3975  * megasas_transition_to_ready -	Move the FW to READY state
3976  * @instance:				Adapter soft state
3977  *
3978  * During the initialization, FW passes can potentially be in any one of
3979  * several possible states. If the FW in operational, waiting-for-handshake
3980  * states, driver must take steps to bring it to ready state. Otherwise, it
3981  * has to wait for the ready state.
3982  */
3983 int
3984 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3985 {
3986 	int i;
3987 	u8 max_wait;
3988 	u32 fw_state;
3989 	u32 abs_state, curr_abs_state;
3990 
3991 	abs_state = instance->instancet->read_fw_status_reg(instance);
3992 	fw_state = abs_state & MFI_STATE_MASK;
3993 
3994 	if (fw_state != MFI_STATE_READY)
3995 		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3996 		       " state\n");
3997 
3998 	while (fw_state != MFI_STATE_READY) {
3999 
4000 		switch (fw_state) {
4001 
4002 		case MFI_STATE_FAULT:
4003 			dev_printk(KERN_ERR, &instance->pdev->dev,
4004 				   "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
4005 				   abs_state & MFI_STATE_FAULT_CODE,
4006 				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4007 			if (ocr) {
4008 				max_wait = MEGASAS_RESET_WAIT_TIME;
4009 				break;
4010 			} else {
4011 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4012 				megasas_dump_reg_set(instance->reg_set);
4013 				return -ENODEV;
4014 			}
4015 
4016 		case MFI_STATE_WAIT_HANDSHAKE:
4017 			/*
4018 			 * Set the CLR bit in inbound doorbell
4019 			 */
4020 			if ((instance->pdev->device ==
4021 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4022 				(instance->pdev->device ==
4023 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4024 				(instance->adapter_type != MFI_SERIES))
4025 				writel(
4026 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4027 				  &instance->reg_set->doorbell);
4028 			else
4029 				writel(
4030 				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4031 					&instance->reg_set->inbound_doorbell);
4032 
4033 			max_wait = MEGASAS_RESET_WAIT_TIME;
4034 			break;
4035 
4036 		case MFI_STATE_BOOT_MESSAGE_PENDING:
4037 			if ((instance->pdev->device ==
4038 			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4039 				(instance->pdev->device ==
4040 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4041 				(instance->adapter_type != MFI_SERIES))
4042 				writel(MFI_INIT_HOTPLUG,
4043 				       &instance->reg_set->doorbell);
4044 			else
4045 				writel(MFI_INIT_HOTPLUG,
4046 					&instance->reg_set->inbound_doorbell);
4047 
4048 			max_wait = MEGASAS_RESET_WAIT_TIME;
4049 			break;
4050 
4051 		case MFI_STATE_OPERATIONAL:
4052 			/*
4053 			 * Bring it to READY state; assuming max wait 10 secs
4054 			 */
4055 			instance->instancet->disable_intr(instance);
4056 			if ((instance->pdev->device ==
4057 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4058 				(instance->pdev->device ==
4059 				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
4060 				(instance->adapter_type != MFI_SERIES)) {
4061 				writel(MFI_RESET_FLAGS,
4062 					&instance->reg_set->doorbell);
4063 
4064 				if (instance->adapter_type != MFI_SERIES) {
4065 					for (i = 0; i < (10 * 1000); i += 20) {
4066 						if (megasas_readl(
4067 							    instance,
4068 							    &instance->
4069 							    reg_set->
4070 							    doorbell) & 1)
4071 							msleep(20);
4072 						else
4073 							break;
4074 					}
4075 				}
4076 			} else
4077 				writel(MFI_RESET_FLAGS,
4078 					&instance->reg_set->inbound_doorbell);
4079 
4080 			max_wait = MEGASAS_RESET_WAIT_TIME;
4081 			break;
4082 
4083 		case MFI_STATE_UNDEFINED:
4084 			/*
4085 			 * This state should not last for more than 2 seconds
4086 			 */
4087 			max_wait = MEGASAS_RESET_WAIT_TIME;
4088 			break;
4089 
4090 		case MFI_STATE_BB_INIT:
4091 			max_wait = MEGASAS_RESET_WAIT_TIME;
4092 			break;
4093 
4094 		case MFI_STATE_FW_INIT:
4095 			max_wait = MEGASAS_RESET_WAIT_TIME;
4096 			break;
4097 
4098 		case MFI_STATE_FW_INIT_2:
4099 			max_wait = MEGASAS_RESET_WAIT_TIME;
4100 			break;
4101 
4102 		case MFI_STATE_DEVICE_SCAN:
4103 			max_wait = MEGASAS_RESET_WAIT_TIME;
4104 			break;
4105 
4106 		case MFI_STATE_FLUSH_CACHE:
4107 			max_wait = MEGASAS_RESET_WAIT_TIME;
4108 			break;
4109 
4110 		default:
4111 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
4112 			       fw_state);
4113 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4114 			megasas_dump_reg_set(instance->reg_set);
4115 			return -ENODEV;
4116 		}
4117 
4118 		/*
4119 		 * The cur_state should not last for more than max_wait secs
4120 		 */
4121 		for (i = 0; i < max_wait * 50; i++) {
4122 			curr_abs_state = instance->instancet->
4123 				read_fw_status_reg(instance);
4124 
4125 			if (abs_state == curr_abs_state) {
4126 				msleep(20);
4127 			} else
4128 				break;
4129 		}
4130 
4131 		/*
4132 		 * Return error if fw_state hasn't changed after max_wait
4133 		 */
4134 		if (curr_abs_state == abs_state) {
4135 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
4136 			       "in %d secs\n", fw_state, max_wait);
4137 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4138 			megasas_dump_reg_set(instance->reg_set);
4139 			return -ENODEV;
4140 		}
4141 
4142 		abs_state = curr_abs_state;
4143 		fw_state = curr_abs_state & MFI_STATE_MASK;
4144 	}
4145 	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
4146 
4147 	return 0;
4148 }
4149 
4150 /**
4151  * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
4152  * @instance:				Adapter soft state
4153  */
4154 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
4155 {
4156 	int i;
4157 	u16 max_cmd = instance->max_mfi_cmds;
4158 	struct megasas_cmd *cmd;
4159 
4160 	if (!instance->frame_dma_pool)
4161 		return;
4162 
4163 	/*
4164 	 * Return all frames to pool
4165 	 */
4166 	for (i = 0; i < max_cmd; i++) {
4167 
4168 		cmd = instance->cmd_list[i];
4169 
4170 		if (cmd->frame)
4171 			dma_pool_free(instance->frame_dma_pool, cmd->frame,
4172 				      cmd->frame_phys_addr);
4173 
4174 		if (cmd->sense)
4175 			dma_pool_free(instance->sense_dma_pool, cmd->sense,
4176 				      cmd->sense_phys_addr);
4177 	}
4178 
4179 	/*
4180 	 * Now destroy the pool itself
4181 	 */
4182 	dma_pool_destroy(instance->frame_dma_pool);
4183 	dma_pool_destroy(instance->sense_dma_pool);
4184 
4185 	instance->frame_dma_pool = NULL;
4186 	instance->sense_dma_pool = NULL;
4187 }
4188 
4189 /**
4190  * megasas_create_frame_pool -	Creates DMA pool for cmd frames
4191  * @instance:			Adapter soft state
4192  *
4193  * Each command packet has an embedded DMA memory buffer that is used for
4194  * filling MFI frame and the SG list that immediately follows the frame. This
4195  * function creates those DMA memory buffers for each command packet by using
4196  * PCI pool facility.
4197  */
4198 static int megasas_create_frame_pool(struct megasas_instance *instance)
4199 {
4200 	int i;
4201 	u16 max_cmd;
4202 	u32 frame_count;
4203 	struct megasas_cmd *cmd;
4204 
4205 	max_cmd = instance->max_mfi_cmds;
4206 
4207 	/*
4208 	 * For MFI controllers.
4209 	 * max_num_sge = 60
4210 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
4211 	 * Total 960 byte (15 MFI frame of 64 byte)
4212 	 *
4213 	 * Fusion adapter require only 3 extra frame.
4214 	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4215 	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
4216 	 * Total 192 byte (3 MFI frame of 64 byte)
4217 	 */
4218 	frame_count = (instance->adapter_type == MFI_SERIES) ?
4219 			(15 + 1) : (3 + 1);
4220 	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4221 	/*
4222 	 * Use DMA pool facility provided by PCI layer
4223 	 */
4224 	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4225 					&instance->pdev->dev,
4226 					instance->mfi_frame_size, 256, 0);
4227 
4228 	if (!instance->frame_dma_pool) {
4229 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4230 		return -ENOMEM;
4231 	}
4232 
4233 	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4234 						   &instance->pdev->dev, 128,
4235 						   4, 0);
4236 
4237 	if (!instance->sense_dma_pool) {
4238 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4239 
4240 		dma_pool_destroy(instance->frame_dma_pool);
4241 		instance->frame_dma_pool = NULL;
4242 
4243 		return -ENOMEM;
4244 	}
4245 
4246 	/*
4247 	 * Allocate and attach a frame to each of the commands in cmd_list.
4248 	 * By making cmd->index as the context instead of the &cmd, we can
4249 	 * always use 32bit context regardless of the architecture
4250 	 */
4251 	for (i = 0; i < max_cmd; i++) {
4252 
4253 		cmd = instance->cmd_list[i];
4254 
4255 		cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4256 					    GFP_KERNEL, &cmd->frame_phys_addr);
4257 
4258 		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4259 					    GFP_KERNEL, &cmd->sense_phys_addr);
4260 
4261 		/*
4262 		 * megasas_teardown_frame_pool() takes care of freeing
4263 		 * whatever has been allocated
4264 		 */
4265 		if (!cmd->frame || !cmd->sense) {
4266 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4267 			megasas_teardown_frame_pool(instance);
4268 			return -ENOMEM;
4269 		}
4270 
4271 		cmd->frame->io.context = cpu_to_le32(cmd->index);
4272 		cmd->frame->io.pad_0 = 0;
4273 		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4274 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4275 	}
4276 
4277 	return 0;
4278 }
4279 
4280 /**
4281  * megasas_free_cmds -	Free all the cmds in the free cmd pool
4282  * @instance:		Adapter soft state
4283  */
4284 void megasas_free_cmds(struct megasas_instance *instance)
4285 {
4286 	int i;
4287 
4288 	/* First free the MFI frame pool */
4289 	megasas_teardown_frame_pool(instance);
4290 
4291 	/* Free all the commands in the cmd_list */
4292 	for (i = 0; i < instance->max_mfi_cmds; i++)
4293 
4294 		kfree(instance->cmd_list[i]);
4295 
4296 	/* Free the cmd_list buffer itself */
4297 	kfree(instance->cmd_list);
4298 	instance->cmd_list = NULL;
4299 
4300 	INIT_LIST_HEAD(&instance->cmd_pool);
4301 }
4302 
4303 /**
4304  * megasas_alloc_cmds -	Allocates the command packets
4305  * @instance:		Adapter soft state
4306  *
4307  * Each command that is issued to the FW, whether IO commands from the OS or
4308  * internal commands like IOCTLs, are wrapped in local data structure called
4309  * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4310  * the FW.
4311  *
4312  * Each frame has a 32-bit field called context (tag). This context is used
4313  * to get back the megasas_cmd from the frame when a frame gets completed in
4314  * the ISR. Typically the address of the megasas_cmd itself would be used as
4315  * the context. But we wanted to keep the differences between 32 and 64 bit
4316  * systems to the mininum. We always use 32 bit integers for the context. In
4317  * this driver, the 32 bit values are the indices into an array cmd_list.
4318  * This array is used only to look up the megasas_cmd given the context. The
4319  * free commands themselves are maintained in a linked list called cmd_pool.
4320  */
4321 int megasas_alloc_cmds(struct megasas_instance *instance)
4322 {
4323 	int i;
4324 	int j;
4325 	u16 max_cmd;
4326 	struct megasas_cmd *cmd;
4327 
4328 	max_cmd = instance->max_mfi_cmds;
4329 
4330 	/*
4331 	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4332 	 * Allocate the dynamic array first and then allocate individual
4333 	 * commands.
4334 	 */
4335 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4336 
4337 	if (!instance->cmd_list) {
4338 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4339 		return -ENOMEM;
4340 	}
4341 
4342 	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4343 
4344 	for (i = 0; i < max_cmd; i++) {
4345 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4346 						GFP_KERNEL);
4347 
4348 		if (!instance->cmd_list[i]) {
4349 
4350 			for (j = 0; j < i; j++)
4351 				kfree(instance->cmd_list[j]);
4352 
4353 			kfree(instance->cmd_list);
4354 			instance->cmd_list = NULL;
4355 
4356 			return -ENOMEM;
4357 		}
4358 	}
4359 
4360 	for (i = 0; i < max_cmd; i++) {
4361 		cmd = instance->cmd_list[i];
4362 		memset(cmd, 0, sizeof(struct megasas_cmd));
4363 		cmd->index = i;
4364 		cmd->scmd = NULL;
4365 		cmd->instance = instance;
4366 
4367 		list_add_tail(&cmd->list, &instance->cmd_pool);
4368 	}
4369 
4370 	/*
4371 	 * Create a frame pool and assign one frame to each cmd
4372 	 */
4373 	if (megasas_create_frame_pool(instance)) {
4374 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4375 		megasas_free_cmds(instance);
4376 		return -ENOMEM;
4377 	}
4378 
4379 	return 0;
4380 }
4381 
4382 /*
4383  * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4384  * @instance:				Adapter soft state
4385  *
4386  * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4387  * or FW is not under OCR.
4388  */
4389 inline int
4390 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4391 
4392 	if (instance->adapter_type == MFI_SERIES)
4393 		return KILL_ADAPTER;
4394 	else if (instance->unload ||
4395 			test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
4396 		return IGNORE_TIMEOUT;
4397 	else
4398 		return INITIATE_OCR;
4399 }
4400 
4401 static void
4402 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4403 {
4404 	int ret;
4405 	struct megasas_cmd *cmd;
4406 	struct megasas_dcmd_frame *dcmd;
4407 
4408 	struct MR_PRIV_DEVICE *mr_device_priv_data;
4409 	u16 device_id = 0;
4410 
4411 	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4412 	cmd = megasas_get_cmd(instance);
4413 
4414 	if (!cmd) {
4415 		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4416 		return;
4417 	}
4418 
4419 	dcmd = &cmd->frame->dcmd;
4420 
4421 	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4422 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4423 
4424 	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4425 	dcmd->cmd = MFI_CMD_DCMD;
4426 	dcmd->cmd_status = 0xFF;
4427 	dcmd->sge_count = 1;
4428 	dcmd->flags = MFI_FRAME_DIR_READ;
4429 	dcmd->timeout = 0;
4430 	dcmd->pad_0 = 0;
4431 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4432 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4433 
4434 	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4435 				 sizeof(struct MR_PD_INFO));
4436 
4437 	if ((instance->adapter_type != MFI_SERIES) &&
4438 	    !instance->mask_interrupts)
4439 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4440 	else
4441 		ret = megasas_issue_polled(instance, cmd);
4442 
4443 	switch (ret) {
4444 	case DCMD_SUCCESS:
4445 		mr_device_priv_data = sdev->hostdata;
4446 		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4447 		mr_device_priv_data->interface_type =
4448 				instance->pd_info->state.ddf.pdType.intf;
4449 		break;
4450 
4451 	case DCMD_TIMEOUT:
4452 
4453 		switch (dcmd_timeout_ocr_possible(instance)) {
4454 		case INITIATE_OCR:
4455 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4456 			mutex_unlock(&instance->reset_mutex);
4457 			megasas_reset_fusion(instance->host,
4458 				MFI_IO_TIMEOUT_OCR);
4459 			mutex_lock(&instance->reset_mutex);
4460 			break;
4461 		case KILL_ADAPTER:
4462 			megaraid_sas_kill_hba(instance);
4463 			break;
4464 		case IGNORE_TIMEOUT:
4465 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4466 				__func__, __LINE__);
4467 			break;
4468 		}
4469 
4470 		break;
4471 	}
4472 
4473 	if (ret != DCMD_TIMEOUT)
4474 		megasas_return_cmd(instance, cmd);
4475 
4476 	return;
4477 }
4478 /*
4479  * megasas_get_pd_list_info -	Returns FW's pd_list structure
4480  * @instance:				Adapter soft state
4481  * @pd_list:				pd_list structure
4482  *
4483  * Issues an internal command (DCMD) to get the FW's controller PD
4484  * list structure.  This information is mainly used to find out SYSTEM
4485  * supported by the FW.
4486  */
4487 static int
4488 megasas_get_pd_list(struct megasas_instance *instance)
4489 {
4490 	int ret = 0, pd_index = 0;
4491 	struct megasas_cmd *cmd;
4492 	struct megasas_dcmd_frame *dcmd;
4493 	struct MR_PD_LIST *ci;
4494 	struct MR_PD_ADDRESS *pd_addr;
4495 
4496 	if (instance->pd_list_not_supported) {
4497 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4498 		"not supported by firmware\n");
4499 		return ret;
4500 	}
4501 
4502 	ci = instance->pd_list_buf;
4503 
4504 	cmd = megasas_get_cmd(instance);
4505 
4506 	if (!cmd) {
4507 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4508 		return -ENOMEM;
4509 	}
4510 
4511 	dcmd = &cmd->frame->dcmd;
4512 
4513 	memset(ci, 0, sizeof(*ci));
4514 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4515 
4516 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4517 	dcmd->mbox.b[1] = 0;
4518 	dcmd->cmd = MFI_CMD_DCMD;
4519 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4520 	dcmd->sge_count = 1;
4521 	dcmd->flags = MFI_FRAME_DIR_READ;
4522 	dcmd->timeout = 0;
4523 	dcmd->pad_0 = 0;
4524 	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4525 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4526 
4527 	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4528 				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4529 
4530 	if ((instance->adapter_type != MFI_SERIES) &&
4531 	    !instance->mask_interrupts)
4532 		ret = megasas_issue_blocked_cmd(instance, cmd,
4533 			MFI_IO_TIMEOUT_SECS);
4534 	else
4535 		ret = megasas_issue_polled(instance, cmd);
4536 
4537 	switch (ret) {
4538 	case DCMD_FAILED:
4539 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4540 			"failed/not supported by firmware\n");
4541 
4542 		if (instance->adapter_type != MFI_SERIES)
4543 			megaraid_sas_kill_hba(instance);
4544 		else
4545 			instance->pd_list_not_supported = 1;
4546 		break;
4547 	case DCMD_TIMEOUT:
4548 
4549 		switch (dcmd_timeout_ocr_possible(instance)) {
4550 		case INITIATE_OCR:
4551 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4552 			/*
4553 			 * DCMD failed from AEN path.
4554 			 * AEN path already hold reset_mutex to avoid PCI access
4555 			 * while OCR is in progress.
4556 			 */
4557 			mutex_unlock(&instance->reset_mutex);
4558 			megasas_reset_fusion(instance->host,
4559 						MFI_IO_TIMEOUT_OCR);
4560 			mutex_lock(&instance->reset_mutex);
4561 			break;
4562 		case KILL_ADAPTER:
4563 			megaraid_sas_kill_hba(instance);
4564 			break;
4565 		case IGNORE_TIMEOUT:
4566 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4567 				__func__, __LINE__);
4568 			break;
4569 		}
4570 
4571 		break;
4572 
4573 	case DCMD_SUCCESS:
4574 		pd_addr = ci->addr;
4575 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4576 			dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
4577 				 __func__, le32_to_cpu(ci->count));
4578 
4579 		if ((le32_to_cpu(ci->count) >
4580 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4581 			break;
4582 
4583 		memset(instance->local_pd_list, 0,
4584 				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4585 
4586 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4587 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4588 					le16_to_cpu(pd_addr->deviceId);
4589 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4590 					pd_addr->scsiDevType;
4591 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4592 					MR_PD_STATE_SYSTEM;
4593 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4594 				dev_info(&instance->pdev->dev,
4595 					 "PD%d: targetID: 0x%03x deviceType:0x%x\n",
4596 					 pd_index, le16_to_cpu(pd_addr->deviceId),
4597 					 pd_addr->scsiDevType);
4598 			pd_addr++;
4599 		}
4600 
4601 		memcpy(instance->pd_list, instance->local_pd_list,
4602 			sizeof(instance->pd_list));
4603 		break;
4604 
4605 	}
4606 
4607 	if (ret != DCMD_TIMEOUT)
4608 		megasas_return_cmd(instance, cmd);
4609 
4610 	return ret;
4611 }
4612 
4613 /*
4614  * megasas_get_ld_list_info -	Returns FW's ld_list structure
4615  * @instance:				Adapter soft state
4616  * @ld_list:				ld_list structure
4617  *
4618  * Issues an internal command (DCMD) to get the FW's controller PD
4619  * list structure.  This information is mainly used to find out SYSTEM
4620  * supported by the FW.
4621  */
4622 static int
4623 megasas_get_ld_list(struct megasas_instance *instance)
4624 {
4625 	int ret = 0, ld_index = 0, ids = 0;
4626 	struct megasas_cmd *cmd;
4627 	struct megasas_dcmd_frame *dcmd;
4628 	struct MR_LD_LIST *ci;
4629 	dma_addr_t ci_h = 0;
4630 	u32 ld_count;
4631 
4632 	ci = instance->ld_list_buf;
4633 	ci_h = instance->ld_list_buf_h;
4634 
4635 	cmd = megasas_get_cmd(instance);
4636 
4637 	if (!cmd) {
4638 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4639 		return -ENOMEM;
4640 	}
4641 
4642 	dcmd = &cmd->frame->dcmd;
4643 
4644 	memset(ci, 0, sizeof(*ci));
4645 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4646 
4647 	if (instance->supportmax256vd)
4648 		dcmd->mbox.b[0] = 1;
4649 	dcmd->cmd = MFI_CMD_DCMD;
4650 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4651 	dcmd->sge_count = 1;
4652 	dcmd->flags = MFI_FRAME_DIR_READ;
4653 	dcmd->timeout = 0;
4654 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4655 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4656 	dcmd->pad_0  = 0;
4657 
4658 	megasas_set_dma_settings(instance, dcmd, ci_h,
4659 				 sizeof(struct MR_LD_LIST));
4660 
4661 	if ((instance->adapter_type != MFI_SERIES) &&
4662 	    !instance->mask_interrupts)
4663 		ret = megasas_issue_blocked_cmd(instance, cmd,
4664 			MFI_IO_TIMEOUT_SECS);
4665 	else
4666 		ret = megasas_issue_polled(instance, cmd);
4667 
4668 	ld_count = le32_to_cpu(ci->ldCount);
4669 
4670 	switch (ret) {
4671 	case DCMD_FAILED:
4672 		megaraid_sas_kill_hba(instance);
4673 		break;
4674 	case DCMD_TIMEOUT:
4675 
4676 		switch (dcmd_timeout_ocr_possible(instance)) {
4677 		case INITIATE_OCR:
4678 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4679 			/*
4680 			 * DCMD failed from AEN path.
4681 			 * AEN path already hold reset_mutex to avoid PCI access
4682 			 * while OCR is in progress.
4683 			 */
4684 			mutex_unlock(&instance->reset_mutex);
4685 			megasas_reset_fusion(instance->host,
4686 						MFI_IO_TIMEOUT_OCR);
4687 			mutex_lock(&instance->reset_mutex);
4688 			break;
4689 		case KILL_ADAPTER:
4690 			megaraid_sas_kill_hba(instance);
4691 			break;
4692 		case IGNORE_TIMEOUT:
4693 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4694 				__func__, __LINE__);
4695 			break;
4696 		}
4697 
4698 		break;
4699 
4700 	case DCMD_SUCCESS:
4701 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4702 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4703 				 __func__, ld_count);
4704 
4705 		if (ld_count > instance->fw_supported_vd_count)
4706 			break;
4707 
4708 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4709 
4710 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4711 			if (ci->ldList[ld_index].state != 0) {
4712 				ids = ci->ldList[ld_index].ref.targetId;
4713 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4714 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4715 					dev_info(&instance->pdev->dev,
4716 						 "LD%d: targetID: 0x%03x\n",
4717 						 ld_index, ids);
4718 			}
4719 		}
4720 
4721 		break;
4722 	}
4723 
4724 	if (ret != DCMD_TIMEOUT)
4725 		megasas_return_cmd(instance, cmd);
4726 
4727 	return ret;
4728 }
4729 
4730 /**
4731  * megasas_ld_list_query -	Returns FW's ld_list structure
4732  * @instance:				Adapter soft state
4733  * @ld_list:				ld_list structure
4734  *
4735  * Issues an internal command (DCMD) to get the FW's controller PD
4736  * list structure.  This information is mainly used to find out SYSTEM
4737  * supported by the FW.
4738  */
4739 static int
4740 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4741 {
4742 	int ret = 0, ld_index = 0, ids = 0;
4743 	struct megasas_cmd *cmd;
4744 	struct megasas_dcmd_frame *dcmd;
4745 	struct MR_LD_TARGETID_LIST *ci;
4746 	dma_addr_t ci_h = 0;
4747 	u32 tgtid_count;
4748 
4749 	ci = instance->ld_targetid_list_buf;
4750 	ci_h = instance->ld_targetid_list_buf_h;
4751 
4752 	cmd = megasas_get_cmd(instance);
4753 
4754 	if (!cmd) {
4755 		dev_warn(&instance->pdev->dev,
4756 		         "megasas_ld_list_query: Failed to get cmd\n");
4757 		return -ENOMEM;
4758 	}
4759 
4760 	dcmd = &cmd->frame->dcmd;
4761 
4762 	memset(ci, 0, sizeof(*ci));
4763 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4764 
4765 	dcmd->mbox.b[0] = query_type;
4766 	if (instance->supportmax256vd)
4767 		dcmd->mbox.b[2] = 1;
4768 
4769 	dcmd->cmd = MFI_CMD_DCMD;
4770 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4771 	dcmd->sge_count = 1;
4772 	dcmd->flags = MFI_FRAME_DIR_READ;
4773 	dcmd->timeout = 0;
4774 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4775 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4776 	dcmd->pad_0  = 0;
4777 
4778 	megasas_set_dma_settings(instance, dcmd, ci_h,
4779 				 sizeof(struct MR_LD_TARGETID_LIST));
4780 
4781 	if ((instance->adapter_type != MFI_SERIES) &&
4782 	    !instance->mask_interrupts)
4783 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4784 	else
4785 		ret = megasas_issue_polled(instance, cmd);
4786 
4787 	switch (ret) {
4788 	case DCMD_FAILED:
4789 		dev_info(&instance->pdev->dev,
4790 			"DCMD not supported by firmware - %s %d\n",
4791 				__func__, __LINE__);
4792 		ret = megasas_get_ld_list(instance);
4793 		break;
4794 	case DCMD_TIMEOUT:
4795 		switch (dcmd_timeout_ocr_possible(instance)) {
4796 		case INITIATE_OCR:
4797 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4798 			/*
4799 			 * DCMD failed from AEN path.
4800 			 * AEN path already hold reset_mutex to avoid PCI access
4801 			 * while OCR is in progress.
4802 			 */
4803 			mutex_unlock(&instance->reset_mutex);
4804 			megasas_reset_fusion(instance->host,
4805 						MFI_IO_TIMEOUT_OCR);
4806 			mutex_lock(&instance->reset_mutex);
4807 			break;
4808 		case KILL_ADAPTER:
4809 			megaraid_sas_kill_hba(instance);
4810 			break;
4811 		case IGNORE_TIMEOUT:
4812 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4813 				__func__, __LINE__);
4814 			break;
4815 		}
4816 
4817 		break;
4818 	case DCMD_SUCCESS:
4819 		tgtid_count = le32_to_cpu(ci->count);
4820 
4821 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4822 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4823 				 __func__, tgtid_count);
4824 
4825 		if ((tgtid_count > (instance->fw_supported_vd_count)))
4826 			break;
4827 
4828 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4829 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4830 			ids = ci->targetId[ld_index];
4831 			instance->ld_ids[ids] = ci->targetId[ld_index];
4832 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4833 				dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
4834 					 ld_index, ci->targetId[ld_index]);
4835 		}
4836 
4837 		break;
4838 	}
4839 
4840 	if (ret != DCMD_TIMEOUT)
4841 		megasas_return_cmd(instance, cmd);
4842 
4843 	return ret;
4844 }
4845 
4846 /**
4847  * dcmd.opcode            - MR_DCMD_CTRL_DEVICE_LIST_GET
4848  * dcmd.mbox              - reserved
4849  * dcmd.sge IN            - ptr to return MR_HOST_DEVICE_LIST structure
4850  * Desc:    This DCMD will return the combined device list
4851  * Status:  MFI_STAT_OK - List returned successfully
4852  *          MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4853  *                                 disabled
4854  * @instance:			Adapter soft state
4855  * @is_probe:			Driver probe check
4856  * Return:			0 if DCMD succeeded
4857  *				 non-zero if failed
4858  */
4859 static int
4860 megasas_host_device_list_query(struct megasas_instance *instance,
4861 			       bool is_probe)
4862 {
4863 	int ret, i, target_id;
4864 	struct megasas_cmd *cmd;
4865 	struct megasas_dcmd_frame *dcmd;
4866 	struct MR_HOST_DEVICE_LIST *ci;
4867 	u32 count;
4868 	dma_addr_t ci_h;
4869 
4870 	ci = instance->host_device_list_buf;
4871 	ci_h = instance->host_device_list_buf_h;
4872 
4873 	cmd = megasas_get_cmd(instance);
4874 
4875 	if (!cmd) {
4876 		dev_warn(&instance->pdev->dev,
4877 			 "%s: failed to get cmd\n",
4878 			 __func__);
4879 		return -ENOMEM;
4880 	}
4881 
4882 	dcmd = &cmd->frame->dcmd;
4883 
4884 	memset(ci, 0, sizeof(*ci));
4885 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4886 
4887 	dcmd->mbox.b[0] = is_probe ? 0 : 1;
4888 	dcmd->cmd = MFI_CMD_DCMD;
4889 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4890 	dcmd->sge_count = 1;
4891 	dcmd->flags = MFI_FRAME_DIR_READ;
4892 	dcmd->timeout = 0;
4893 	dcmd->pad_0 = 0;
4894 	dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
4895 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
4896 
4897 	megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
4898 
4899 	if (!instance->mask_interrupts) {
4900 		ret = megasas_issue_blocked_cmd(instance, cmd,
4901 						MFI_IO_TIMEOUT_SECS);
4902 	} else {
4903 		ret = megasas_issue_polled(instance, cmd);
4904 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4905 	}
4906 
4907 	switch (ret) {
4908 	case DCMD_SUCCESS:
4909 		/* Fill the internal pd_list and ld_ids array based on
4910 		 * targetIds returned by FW
4911 		 */
4912 		count = le32_to_cpu(ci->count);
4913 
4914 		if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
4915 			break;
4916 
4917 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4918 			dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
4919 				 __func__, count);
4920 
4921 		memset(instance->local_pd_list, 0,
4922 		       MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4923 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4924 		for (i = 0; i < count; i++) {
4925 			target_id = le16_to_cpu(ci->host_device_list[i].target_id);
4926 			if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
4927 				instance->local_pd_list[target_id].tid = target_id;
4928 				instance->local_pd_list[target_id].driveType =
4929 						ci->host_device_list[i].scsi_type;
4930 				instance->local_pd_list[target_id].driveState =
4931 						MR_PD_STATE_SYSTEM;
4932 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4933 					dev_info(&instance->pdev->dev,
4934 						 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
4935 						 i, target_id, ci->host_device_list[i].scsi_type);
4936 			} else {
4937 				instance->ld_ids[target_id] = target_id;
4938 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4939 					dev_info(&instance->pdev->dev,
4940 						 "Device %d: LD targetID: 0x%03x\n",
4941 						 i, target_id);
4942 			}
4943 		}
4944 
4945 		memcpy(instance->pd_list, instance->local_pd_list,
4946 		       sizeof(instance->pd_list));
4947 		break;
4948 
4949 	case DCMD_TIMEOUT:
4950 		switch (dcmd_timeout_ocr_possible(instance)) {
4951 		case INITIATE_OCR:
4952 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4953 			mutex_unlock(&instance->reset_mutex);
4954 			megasas_reset_fusion(instance->host,
4955 				MFI_IO_TIMEOUT_OCR);
4956 			mutex_lock(&instance->reset_mutex);
4957 			break;
4958 		case KILL_ADAPTER:
4959 			megaraid_sas_kill_hba(instance);
4960 			break;
4961 		case IGNORE_TIMEOUT:
4962 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4963 				 __func__, __LINE__);
4964 			break;
4965 		}
4966 		break;
4967 	case DCMD_FAILED:
4968 		dev_err(&instance->pdev->dev,
4969 			"%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
4970 			__func__);
4971 		break;
4972 	}
4973 
4974 	if (ret != DCMD_TIMEOUT)
4975 		megasas_return_cmd(instance, cmd);
4976 
4977 	return ret;
4978 }
4979 
4980 /*
4981  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4982  * instance			 : Controller's instance
4983 */
4984 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4985 {
4986 	struct fusion_context *fusion;
4987 	u32 ventura_map_sz = 0;
4988 
4989 	fusion = instance->ctrl_context;
4990 	/* For MFI based controllers return dummy success */
4991 	if (!fusion)
4992 		return;
4993 
4994 	instance->supportmax256vd =
4995 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
4996 	/* Below is additional check to address future FW enhancement */
4997 	if (instance->ctrl_info_buf->max_lds > 64)
4998 		instance->supportmax256vd = 1;
4999 
5000 	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
5001 					* MEGASAS_MAX_DEV_PER_CHANNEL;
5002 	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
5003 					* MEGASAS_MAX_DEV_PER_CHANNEL;
5004 	if (instance->supportmax256vd) {
5005 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
5006 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5007 	} else {
5008 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5009 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5010 	}
5011 
5012 	dev_info(&instance->pdev->dev,
5013 		"FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
5014 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
5015 		instance->ctrl_info_buf->max_lds);
5016 
5017 	if (instance->max_raid_mapsize) {
5018 		ventura_map_sz = instance->max_raid_mapsize *
5019 						MR_MIN_MAP_SIZE; /* 64k */
5020 		fusion->current_map_sz = ventura_map_sz;
5021 		fusion->max_map_sz = ventura_map_sz;
5022 	} else {
5023 		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
5024 					(sizeof(struct MR_LD_SPAN_MAP) *
5025 					(instance->fw_supported_vd_count - 1));
5026 		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
5027 
5028 		fusion->max_map_sz =
5029 			max(fusion->old_map_sz, fusion->new_map_sz);
5030 
5031 		if (instance->supportmax256vd)
5032 			fusion->current_map_sz = fusion->new_map_sz;
5033 		else
5034 			fusion->current_map_sz = fusion->old_map_sz;
5035 	}
5036 	/* irrespective of FW raid maps, driver raid map is constant */
5037 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
5038 }
5039 
5040 /*
5041  * dcmd.opcode                - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
5042  * dcmd.hdr.length            - number of bytes to read
5043  * dcmd.sge                   - Ptr to MR_SNAPDUMP_PROPERTIES
5044  * Desc:			 Fill in snapdump properties
5045  * Status:			 MFI_STAT_OK- Command successful
5046  */
5047 void megasas_get_snapdump_properties(struct megasas_instance *instance)
5048 {
5049 	int ret = 0;
5050 	struct megasas_cmd *cmd;
5051 	struct megasas_dcmd_frame *dcmd;
5052 	struct MR_SNAPDUMP_PROPERTIES *ci;
5053 	dma_addr_t ci_h = 0;
5054 
5055 	ci = instance->snapdump_prop;
5056 	ci_h = instance->snapdump_prop_h;
5057 
5058 	if (!ci)
5059 		return;
5060 
5061 	cmd = megasas_get_cmd(instance);
5062 
5063 	if (!cmd) {
5064 		dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
5065 		return;
5066 	}
5067 
5068 	dcmd = &cmd->frame->dcmd;
5069 
5070 	memset(ci, 0, sizeof(*ci));
5071 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5072 
5073 	dcmd->cmd = MFI_CMD_DCMD;
5074 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5075 	dcmd->sge_count = 1;
5076 	dcmd->flags = MFI_FRAME_DIR_READ;
5077 	dcmd->timeout = 0;
5078 	dcmd->pad_0 = 0;
5079 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
5080 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
5081 
5082 	megasas_set_dma_settings(instance, dcmd, ci_h,
5083 				 sizeof(struct MR_SNAPDUMP_PROPERTIES));
5084 
5085 	if (!instance->mask_interrupts) {
5086 		ret = megasas_issue_blocked_cmd(instance, cmd,
5087 						MFI_IO_TIMEOUT_SECS);
5088 	} else {
5089 		ret = megasas_issue_polled(instance, cmd);
5090 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5091 	}
5092 
5093 	switch (ret) {
5094 	case DCMD_SUCCESS:
5095 		instance->snapdump_wait_time =
5096 			min_t(u8, ci->trigger_min_num_sec_before_ocr,
5097 				MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
5098 		break;
5099 
5100 	case DCMD_TIMEOUT:
5101 		switch (dcmd_timeout_ocr_possible(instance)) {
5102 		case INITIATE_OCR:
5103 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5104 			mutex_unlock(&instance->reset_mutex);
5105 			megasas_reset_fusion(instance->host,
5106 				MFI_IO_TIMEOUT_OCR);
5107 			mutex_lock(&instance->reset_mutex);
5108 			break;
5109 		case KILL_ADAPTER:
5110 			megaraid_sas_kill_hba(instance);
5111 			break;
5112 		case IGNORE_TIMEOUT:
5113 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5114 				__func__, __LINE__);
5115 			break;
5116 		}
5117 	}
5118 
5119 	if (ret != DCMD_TIMEOUT)
5120 		megasas_return_cmd(instance, cmd);
5121 }
5122 
5123 /**
5124  * megasas_get_controller_info -	Returns FW's controller structure
5125  * @instance:				Adapter soft state
5126  *
5127  * Issues an internal command (DCMD) to get the FW's controller structure.
5128  * This information is mainly used to find out the maximum IO transfer per
5129  * command supported by the FW.
5130  */
5131 int
5132 megasas_get_ctrl_info(struct megasas_instance *instance)
5133 {
5134 	int ret = 0;
5135 	struct megasas_cmd *cmd;
5136 	struct megasas_dcmd_frame *dcmd;
5137 	struct megasas_ctrl_info *ci;
5138 	dma_addr_t ci_h = 0;
5139 
5140 	ci = instance->ctrl_info_buf;
5141 	ci_h = instance->ctrl_info_buf_h;
5142 
5143 	cmd = megasas_get_cmd(instance);
5144 
5145 	if (!cmd) {
5146 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
5147 		return -ENOMEM;
5148 	}
5149 
5150 	dcmd = &cmd->frame->dcmd;
5151 
5152 	memset(ci, 0, sizeof(*ci));
5153 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5154 
5155 	dcmd->cmd = MFI_CMD_DCMD;
5156 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5157 	dcmd->sge_count = 1;
5158 	dcmd->flags = MFI_FRAME_DIR_READ;
5159 	dcmd->timeout = 0;
5160 	dcmd->pad_0 = 0;
5161 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
5162 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
5163 	dcmd->mbox.b[0] = 1;
5164 
5165 	megasas_set_dma_settings(instance, dcmd, ci_h,
5166 				 sizeof(struct megasas_ctrl_info));
5167 
5168 	if ((instance->adapter_type != MFI_SERIES) &&
5169 	    !instance->mask_interrupts) {
5170 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5171 	} else {
5172 		ret = megasas_issue_polled(instance, cmd);
5173 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5174 	}
5175 
5176 	switch (ret) {
5177 	case DCMD_SUCCESS:
5178 		/* Save required controller information in
5179 		 * CPU endianness format.
5180 		 */
5181 		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
5182 		le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
5183 		le32_to_cpus((u32 *)&ci->adapterOperations2);
5184 		le32_to_cpus((u32 *)&ci->adapterOperations3);
5185 		le16_to_cpus((u16 *)&ci->adapter_operations4);
5186 		le32_to_cpus((u32 *)&ci->adapter_operations5);
5187 
5188 		/* Update the latest Ext VD info.
5189 		 * From Init path, store current firmware details.
5190 		 * From OCR path, detect any firmware properties changes.
5191 		 * in case of Firmware upgrade without system reboot.
5192 		 */
5193 		megasas_update_ext_vd_details(instance);
5194 		instance->support_seqnum_jbod_fp =
5195 			ci->adapterOperations3.useSeqNumJbodFP;
5196 		instance->support_morethan256jbod =
5197 			ci->adapter_operations4.support_pd_map_target_id;
5198 		instance->support_nvme_passthru =
5199 			ci->adapter_operations4.support_nvme_passthru;
5200 		instance->support_pci_lane_margining =
5201 			ci->adapter_operations5.support_pci_lane_margining;
5202 		instance->task_abort_tmo = ci->TaskAbortTO;
5203 		instance->max_reset_tmo = ci->MaxResetTO;
5204 
5205 		/*Check whether controller is iMR or MR */
5206 		instance->is_imr = (ci->memory_size ? 0 : 1);
5207 
5208 		instance->snapdump_wait_time =
5209 			(ci->properties.on_off_properties2.enable_snap_dump ?
5210 			 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
5211 
5212 		instance->enable_fw_dev_list =
5213 			ci->properties.on_off_properties2.enable_fw_dev_list;
5214 
5215 		dev_info(&instance->pdev->dev,
5216 			"controller type\t: %s(%dMB)\n",
5217 			instance->is_imr ? "iMR" : "MR",
5218 			le16_to_cpu(ci->memory_size));
5219 
5220 		instance->disableOnlineCtrlReset =
5221 			ci->properties.OnOffProperties.disableOnlineCtrlReset;
5222 		instance->secure_jbod_support =
5223 			ci->adapterOperations3.supportSecurityonJBOD;
5224 		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
5225 			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
5226 		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
5227 			instance->secure_jbod_support ? "Yes" : "No");
5228 		dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
5229 			 instance->support_nvme_passthru ? "Yes" : "No");
5230 		dev_info(&instance->pdev->dev,
5231 			 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
5232 			 instance->task_abort_tmo, instance->max_reset_tmo);
5233 		dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
5234 			 instance->support_seqnum_jbod_fp ? "Yes" : "No");
5235 		dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
5236 			 instance->support_pci_lane_margining ? "Yes" : "No");
5237 
5238 		break;
5239 
5240 	case DCMD_TIMEOUT:
5241 		switch (dcmd_timeout_ocr_possible(instance)) {
5242 		case INITIATE_OCR:
5243 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5244 			mutex_unlock(&instance->reset_mutex);
5245 			megasas_reset_fusion(instance->host,
5246 				MFI_IO_TIMEOUT_OCR);
5247 			mutex_lock(&instance->reset_mutex);
5248 			break;
5249 		case KILL_ADAPTER:
5250 			megaraid_sas_kill_hba(instance);
5251 			break;
5252 		case IGNORE_TIMEOUT:
5253 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5254 				__func__, __LINE__);
5255 			break;
5256 		}
5257 		break;
5258 	case DCMD_FAILED:
5259 		megaraid_sas_kill_hba(instance);
5260 		break;
5261 
5262 	}
5263 
5264 	if (ret != DCMD_TIMEOUT)
5265 		megasas_return_cmd(instance, cmd);
5266 
5267 	return ret;
5268 }
5269 
5270 /*
5271  * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
5272  *					to firmware
5273  *
5274  * @instance:				Adapter soft state
5275  * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
5276 					MR_CRASH_BUF_TURN_OFF = 0
5277 					MR_CRASH_BUF_TURN_ON = 1
5278  * @return 0 on success non-zero on failure.
5279  * Issues an internal command (DCMD) to set parameters for crash dump feature.
5280  * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5281  * that driver supports crash dump feature. This DCMD will be sent only if
5282  * crash dump feature is supported by the FW.
5283  *
5284  */
5285 int megasas_set_crash_dump_params(struct megasas_instance *instance,
5286 	u8 crash_buf_state)
5287 {
5288 	int ret = 0;
5289 	struct megasas_cmd *cmd;
5290 	struct megasas_dcmd_frame *dcmd;
5291 
5292 	cmd = megasas_get_cmd(instance);
5293 
5294 	if (!cmd) {
5295 		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5296 		return -ENOMEM;
5297 	}
5298 
5299 
5300 	dcmd = &cmd->frame->dcmd;
5301 
5302 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5303 	dcmd->mbox.b[0] = crash_buf_state;
5304 	dcmd->cmd = MFI_CMD_DCMD;
5305 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5306 	dcmd->sge_count = 1;
5307 	dcmd->flags = MFI_FRAME_DIR_NONE;
5308 	dcmd->timeout = 0;
5309 	dcmd->pad_0 = 0;
5310 	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5311 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5312 
5313 	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5314 				 CRASH_DMA_BUF_SIZE);
5315 
5316 	if ((instance->adapter_type != MFI_SERIES) &&
5317 	    !instance->mask_interrupts)
5318 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5319 	else
5320 		ret = megasas_issue_polled(instance, cmd);
5321 
5322 	if (ret == DCMD_TIMEOUT) {
5323 		switch (dcmd_timeout_ocr_possible(instance)) {
5324 		case INITIATE_OCR:
5325 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5326 			megasas_reset_fusion(instance->host,
5327 					MFI_IO_TIMEOUT_OCR);
5328 			break;
5329 		case KILL_ADAPTER:
5330 			megaraid_sas_kill_hba(instance);
5331 			break;
5332 		case IGNORE_TIMEOUT:
5333 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5334 				__func__, __LINE__);
5335 			break;
5336 		}
5337 	} else
5338 		megasas_return_cmd(instance, cmd);
5339 
5340 	return ret;
5341 }
5342 
5343 /**
5344  * megasas_issue_init_mfi -	Initializes the FW
5345  * @instance:		Adapter soft state
5346  *
5347  * Issues the INIT MFI cmd
5348  */
5349 static int
5350 megasas_issue_init_mfi(struct megasas_instance *instance)
5351 {
5352 	__le32 context;
5353 	struct megasas_cmd *cmd;
5354 	struct megasas_init_frame *init_frame;
5355 	struct megasas_init_queue_info *initq_info;
5356 	dma_addr_t init_frame_h;
5357 	dma_addr_t initq_info_h;
5358 
5359 	/*
5360 	 * Prepare a init frame. Note the init frame points to queue info
5361 	 * structure. Each frame has SGL allocated after first 64 bytes. For
5362 	 * this frame - since we don't need any SGL - we use SGL's space as
5363 	 * queue info structure
5364 	 *
5365 	 * We will not get a NULL command below. We just created the pool.
5366 	 */
5367 	cmd = megasas_get_cmd(instance);
5368 
5369 	init_frame = (struct megasas_init_frame *)cmd->frame;
5370 	initq_info = (struct megasas_init_queue_info *)
5371 		((unsigned long)init_frame + 64);
5372 
5373 	init_frame_h = cmd->frame_phys_addr;
5374 	initq_info_h = init_frame_h + 64;
5375 
5376 	context = init_frame->context;
5377 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5378 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5379 	init_frame->context = context;
5380 
5381 	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5382 	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5383 
5384 	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5385 	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5386 
5387 	init_frame->cmd = MFI_CMD_INIT;
5388 	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5389 	init_frame->queue_info_new_phys_addr_lo =
5390 		cpu_to_le32(lower_32_bits(initq_info_h));
5391 	init_frame->queue_info_new_phys_addr_hi =
5392 		cpu_to_le32(upper_32_bits(initq_info_h));
5393 
5394 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5395 
5396 	/*
5397 	 * disable the intr before firing the init frame to FW
5398 	 */
5399 	instance->instancet->disable_intr(instance);
5400 
5401 	/*
5402 	 * Issue the init frame in polled mode
5403 	 */
5404 
5405 	if (megasas_issue_polled(instance, cmd)) {
5406 		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5407 		megasas_return_cmd(instance, cmd);
5408 		goto fail_fw_init;
5409 	}
5410 
5411 	megasas_return_cmd(instance, cmd);
5412 
5413 	return 0;
5414 
5415 fail_fw_init:
5416 	return -EINVAL;
5417 }
5418 
5419 static u32
5420 megasas_init_adapter_mfi(struct megasas_instance *instance)
5421 {
5422 	u32 context_sz;
5423 	u32 reply_q_sz;
5424 
5425 	/*
5426 	 * Get various operational parameters from status register
5427 	 */
5428 	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5429 	/*
5430 	 * Reduce the max supported cmds by 1. This is to ensure that the
5431 	 * reply_q_sz (1 more than the max cmd that driver may send)
5432 	 * does not exceed max cmds that the FW can support
5433 	 */
5434 	instance->max_fw_cmds = instance->max_fw_cmds-1;
5435 	instance->max_mfi_cmds = instance->max_fw_cmds;
5436 	instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5437 					0x10;
5438 	/*
5439 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5440 	 * are reserved for IOCTL + driver's internal DCMDs.
5441 	 */
5442 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5443 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5444 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5445 			MEGASAS_SKINNY_INT_CMDS);
5446 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5447 	} else {
5448 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5449 			MEGASAS_INT_CMDS);
5450 		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5451 	}
5452 
5453 	instance->cur_can_queue = instance->max_scsi_cmds;
5454 	/*
5455 	 * Create a pool of commands
5456 	 */
5457 	if (megasas_alloc_cmds(instance))
5458 		goto fail_alloc_cmds;
5459 
5460 	/*
5461 	 * Allocate memory for reply queue. Length of reply queue should
5462 	 * be _one_ more than the maximum commands handled by the firmware.
5463 	 *
5464 	 * Note: When FW completes commands, it places corresponding contex
5465 	 * values in this circular reply queue. This circular queue is a fairly
5466 	 * typical producer-consumer queue. FW is the producer (of completed
5467 	 * commands) and the driver is the consumer.
5468 	 */
5469 	context_sz = sizeof(u32);
5470 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5471 
5472 	instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5473 			reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5474 
5475 	if (!instance->reply_queue) {
5476 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5477 		goto fail_reply_queue;
5478 	}
5479 
5480 	if (megasas_issue_init_mfi(instance))
5481 		goto fail_fw_init;
5482 
5483 	if (megasas_get_ctrl_info(instance)) {
5484 		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5485 			"Fail from %s %d\n", instance->unique_id,
5486 			__func__, __LINE__);
5487 		goto fail_fw_init;
5488 	}
5489 
5490 	instance->fw_support_ieee = 0;
5491 	instance->fw_support_ieee =
5492 		(instance->instancet->read_fw_status_reg(instance) &
5493 		0x04000000);
5494 
5495 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5496 			instance->fw_support_ieee);
5497 
5498 	if (instance->fw_support_ieee)
5499 		instance->flag_ieee = 1;
5500 
5501 	return 0;
5502 
5503 fail_fw_init:
5504 
5505 	dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5506 			    instance->reply_queue, instance->reply_queue_h);
5507 fail_reply_queue:
5508 	megasas_free_cmds(instance);
5509 
5510 fail_alloc_cmds:
5511 	return 1;
5512 }
5513 
5514 static
5515 void megasas_setup_irq_poll(struct megasas_instance *instance)
5516 {
5517 	struct megasas_irq_context *irq_ctx;
5518 	u32 count, i;
5519 
5520 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5521 
5522 	/* Initialize IRQ poll */
5523 	for (i = 0; i < count; i++) {
5524 		irq_ctx = &instance->irq_context[i];
5525 		irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
5526 		irq_ctx->irq_poll_scheduled = false;
5527 		irq_poll_init(&irq_ctx->irqpoll,
5528 			      instance->threshold_reply_count,
5529 			      megasas_irqpoll);
5530 	}
5531 }
5532 
5533 /*
5534  * megasas_setup_irqs_ioapic -		register legacy interrupts.
5535  * @instance:				Adapter soft state
5536  *
5537  * Do not enable interrupt, only setup ISRs.
5538  *
5539  * Return 0 on success.
5540  */
5541 static int
5542 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5543 {
5544 	struct pci_dev *pdev;
5545 
5546 	pdev = instance->pdev;
5547 	instance->irq_context[0].instance = instance;
5548 	instance->irq_context[0].MSIxIndex = 0;
5549 	if (request_irq(pci_irq_vector(pdev, 0),
5550 			instance->instancet->service_isr, IRQF_SHARED,
5551 			"megasas", &instance->irq_context[0])) {
5552 		dev_err(&instance->pdev->dev,
5553 				"Failed to register IRQ from %s %d\n",
5554 				__func__, __LINE__);
5555 		return -1;
5556 	}
5557 	instance->perf_mode = MR_LATENCY_PERF_MODE;
5558 	instance->low_latency_index_start = 0;
5559 	return 0;
5560 }
5561 
5562 /**
5563  * megasas_setup_irqs_msix -		register MSI-x interrupts.
5564  * @instance:				Adapter soft state
5565  * @is_probe:				Driver probe check
5566  *
5567  * Do not enable interrupt, only setup ISRs.
5568  *
5569  * Return 0 on success.
5570  */
5571 static int
5572 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5573 {
5574 	int i, j;
5575 	struct pci_dev *pdev;
5576 
5577 	pdev = instance->pdev;
5578 
5579 	/* Try MSI-x */
5580 	for (i = 0; i < instance->msix_vectors; i++) {
5581 		instance->irq_context[i].instance = instance;
5582 		instance->irq_context[i].MSIxIndex = i;
5583 		if (request_irq(pci_irq_vector(pdev, i),
5584 			instance->instancet->service_isr, 0, "megasas",
5585 			&instance->irq_context[i])) {
5586 			dev_err(&instance->pdev->dev,
5587 				"Failed to register IRQ for vector %d.\n", i);
5588 			for (j = 0; j < i; j++)
5589 				free_irq(pci_irq_vector(pdev, j),
5590 					 &instance->irq_context[j]);
5591 			/* Retry irq register for IO_APIC*/
5592 			instance->msix_vectors = 0;
5593 			instance->msix_load_balance = false;
5594 			if (is_probe) {
5595 				pci_free_irq_vectors(instance->pdev);
5596 				return megasas_setup_irqs_ioapic(instance);
5597 			} else {
5598 				return -1;
5599 			}
5600 		}
5601 	}
5602 
5603 	return 0;
5604 }
5605 
5606 /*
5607  * megasas_destroy_irqs-		unregister interrupts.
5608  * @instance:				Adapter soft state
5609  * return:				void
5610  */
5611 static void
5612 megasas_destroy_irqs(struct megasas_instance *instance) {
5613 
5614 	int i;
5615 	int count;
5616 	struct megasas_irq_context *irq_ctx;
5617 
5618 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5619 	if (instance->adapter_type != MFI_SERIES) {
5620 		for (i = 0; i < count; i++) {
5621 			irq_ctx = &instance->irq_context[i];
5622 			irq_poll_disable(&irq_ctx->irqpoll);
5623 		}
5624 	}
5625 
5626 	if (instance->msix_vectors)
5627 		for (i = 0; i < instance->msix_vectors; i++) {
5628 			free_irq(pci_irq_vector(instance->pdev, i),
5629 				 &instance->irq_context[i]);
5630 		}
5631 	else
5632 		free_irq(pci_irq_vector(instance->pdev, 0),
5633 			 &instance->irq_context[0]);
5634 }
5635 
5636 /**
5637  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5638  * @instance:				Adapter soft state
5639  * @is_probe:				Driver probe check
5640  *
5641  * Return 0 on success.
5642  */
5643 void
5644 megasas_setup_jbod_map(struct megasas_instance *instance)
5645 {
5646 	int i;
5647 	struct fusion_context *fusion = instance->ctrl_context;
5648 	u32 pd_seq_map_sz;
5649 
5650 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5651 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5652 
5653 	instance->use_seqnum_jbod_fp =
5654 		instance->support_seqnum_jbod_fp;
5655 	if (reset_devices || !fusion ||
5656 		!instance->support_seqnum_jbod_fp) {
5657 		dev_info(&instance->pdev->dev,
5658 			"JBOD sequence map is disabled %s %d\n",
5659 			__func__, __LINE__);
5660 		instance->use_seqnum_jbod_fp = false;
5661 		return;
5662 	}
5663 
5664 	if (fusion->pd_seq_sync[0])
5665 		goto skip_alloc;
5666 
5667 	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5668 		fusion->pd_seq_sync[i] = dma_alloc_coherent
5669 			(&instance->pdev->dev, pd_seq_map_sz,
5670 			&fusion->pd_seq_phys[i], GFP_KERNEL);
5671 		if (!fusion->pd_seq_sync[i]) {
5672 			dev_err(&instance->pdev->dev,
5673 				"Failed to allocate memory from %s %d\n",
5674 				__func__, __LINE__);
5675 			if (i == 1) {
5676 				dma_free_coherent(&instance->pdev->dev,
5677 					pd_seq_map_sz, fusion->pd_seq_sync[0],
5678 					fusion->pd_seq_phys[0]);
5679 				fusion->pd_seq_sync[0] = NULL;
5680 			}
5681 			instance->use_seqnum_jbod_fp = false;
5682 			return;
5683 		}
5684 	}
5685 
5686 skip_alloc:
5687 	if (!megasas_sync_pd_seq_num(instance, false) &&
5688 		!megasas_sync_pd_seq_num(instance, true))
5689 		instance->use_seqnum_jbod_fp = true;
5690 	else
5691 		instance->use_seqnum_jbod_fp = false;
5692 }
5693 
5694 static void megasas_setup_reply_map(struct megasas_instance *instance)
5695 {
5696 	const struct cpumask *mask;
5697 	unsigned int queue, cpu, low_latency_index_start;
5698 
5699 	low_latency_index_start = instance->low_latency_index_start;
5700 
5701 	for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
5702 		mask = pci_irq_get_affinity(instance->pdev, queue);
5703 		if (!mask)
5704 			goto fallback;
5705 
5706 		for_each_cpu(cpu, mask)
5707 			instance->reply_map[cpu] = queue;
5708 	}
5709 	return;
5710 
5711 fallback:
5712 	queue = low_latency_index_start;
5713 	for_each_possible_cpu(cpu) {
5714 		instance->reply_map[cpu] = queue;
5715 		if (queue == (instance->msix_vectors - 1))
5716 			queue = low_latency_index_start;
5717 		else
5718 			queue++;
5719 	}
5720 }
5721 
5722 /**
5723  * megasas_get_device_list -	Get the PD and LD device list from FW.
5724  * @instance:			Adapter soft state
5725  * @return:			Success or failure
5726  *
5727  * Issue DCMDs to Firmware to get the PD and LD list.
5728  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5729  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5730  */
5731 static
5732 int megasas_get_device_list(struct megasas_instance *instance)
5733 {
5734 	memset(instance->pd_list, 0,
5735 	       (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5736 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5737 
5738 	if (instance->enable_fw_dev_list) {
5739 		if (megasas_host_device_list_query(instance, true))
5740 			return FAILED;
5741 	} else {
5742 		if (megasas_get_pd_list(instance) < 0) {
5743 			dev_err(&instance->pdev->dev, "failed to get PD list\n");
5744 			return FAILED;
5745 		}
5746 
5747 		if (megasas_ld_list_query(instance,
5748 					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5749 			dev_err(&instance->pdev->dev, "failed to get LD list\n");
5750 			return FAILED;
5751 		}
5752 	}
5753 
5754 	return SUCCESS;
5755 }
5756 
5757 /**
5758  * megasas_set_high_iops_queue_affinity_hint -	Set affinity hint for high IOPS queues
5759  * @instance:					Adapter soft state
5760  * return:					void
5761  */
5762 static inline void
5763 megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
5764 {
5765 	int i;
5766 	int local_numa_node;
5767 
5768 	if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
5769 		local_numa_node = dev_to_node(&instance->pdev->dev);
5770 
5771 		for (i = 0; i < instance->low_latency_index_start; i++)
5772 			irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
5773 				cpumask_of_node(local_numa_node));
5774 	}
5775 }
5776 
5777 static int
5778 __megasas_alloc_irq_vectors(struct megasas_instance *instance)
5779 {
5780 	int i, irq_flags;
5781 	struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
5782 	struct irq_affinity *descp = &desc;
5783 
5784 	irq_flags = PCI_IRQ_MSIX;
5785 
5786 	if (instance->smp_affinity_enable)
5787 		irq_flags |= PCI_IRQ_AFFINITY;
5788 	else
5789 		descp = NULL;
5790 
5791 	i = pci_alloc_irq_vectors_affinity(instance->pdev,
5792 		instance->low_latency_index_start,
5793 		instance->msix_vectors, irq_flags, descp);
5794 
5795 	return i;
5796 }
5797 
5798 /**
5799  * megasas_alloc_irq_vectors -	Allocate IRQ vectors/enable MSI-x vectors
5800  * @instance:			Adapter soft state
5801  * return:			void
5802  */
5803 static void
5804 megasas_alloc_irq_vectors(struct megasas_instance *instance)
5805 {
5806 	int i;
5807 	unsigned int num_msix_req;
5808 
5809 	i = __megasas_alloc_irq_vectors(instance);
5810 
5811 	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
5812 	    (i != instance->msix_vectors)) {
5813 		if (instance->msix_vectors)
5814 			pci_free_irq_vectors(instance->pdev);
5815 		/* Disable Balanced IOPS mode and try realloc vectors */
5816 		instance->perf_mode = MR_LATENCY_PERF_MODE;
5817 		instance->low_latency_index_start = 1;
5818 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5819 
5820 		instance->msix_vectors = min(num_msix_req,
5821 				instance->msix_vectors);
5822 
5823 		i = __megasas_alloc_irq_vectors(instance);
5824 
5825 	}
5826 
5827 	dev_info(&instance->pdev->dev,
5828 		"requested/available msix %d/%d\n", instance->msix_vectors, i);
5829 
5830 	if (i > 0)
5831 		instance->msix_vectors = i;
5832 	else
5833 		instance->msix_vectors = 0;
5834 
5835 	if (instance->smp_affinity_enable)
5836 		megasas_set_high_iops_queue_affinity_hint(instance);
5837 }
5838 
5839 /**
5840  * megasas_init_fw -	Initializes the FW
5841  * @instance:		Adapter soft state
5842  *
5843  * This is the main function for initializing firmware
5844  */
5845 
5846 static int megasas_init_fw(struct megasas_instance *instance)
5847 {
5848 	u32 max_sectors_1;
5849 	u32 max_sectors_2, tmp_sectors, msix_enable;
5850 	u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5851 	resource_size_t base_addr;
5852 	void *base_addr_phys;
5853 	struct megasas_ctrl_info *ctrl_info = NULL;
5854 	unsigned long bar_list;
5855 	int i, j, loop;
5856 	struct IOV_111 *iovPtr;
5857 	struct fusion_context *fusion;
5858 	bool intr_coalescing;
5859 	unsigned int num_msix_req;
5860 	u16 lnksta, speed;
5861 
5862 	fusion = instance->ctrl_context;
5863 
5864 	/* Find first memory bar */
5865 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5866 	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5867 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5868 					 "megasas: LSI")) {
5869 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5870 		return -EBUSY;
5871 	}
5872 
5873 	base_addr = pci_resource_start(instance->pdev, instance->bar);
5874 	instance->reg_set = ioremap_nocache(base_addr, 8192);
5875 
5876 	if (!instance->reg_set) {
5877 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5878 		goto fail_ioremap;
5879 	}
5880 
5881 	base_addr_phys = &base_addr;
5882 	dev_printk(KERN_DEBUG, &instance->pdev->dev,
5883 		   "BAR:0x%lx  BAR's base_addr(phys):%pa  mapped virt_addr:0x%p\n",
5884 		   instance->bar, base_addr_phys, instance->reg_set);
5885 
5886 	if (instance->adapter_type != MFI_SERIES)
5887 		instance->instancet = &megasas_instance_template_fusion;
5888 	else {
5889 		switch (instance->pdev->device) {
5890 		case PCI_DEVICE_ID_LSI_SAS1078R:
5891 		case PCI_DEVICE_ID_LSI_SAS1078DE:
5892 			instance->instancet = &megasas_instance_template_ppc;
5893 			break;
5894 		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5895 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5896 			instance->instancet = &megasas_instance_template_gen2;
5897 			break;
5898 		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5899 		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5900 			instance->instancet = &megasas_instance_template_skinny;
5901 			break;
5902 		case PCI_DEVICE_ID_LSI_SAS1064R:
5903 		case PCI_DEVICE_ID_DELL_PERC5:
5904 		default:
5905 			instance->instancet = &megasas_instance_template_xscale;
5906 			instance->pd_list_not_supported = 1;
5907 			break;
5908 		}
5909 	}
5910 
5911 	if (megasas_transition_to_ready(instance, 0)) {
5912 		dev_info(&instance->pdev->dev,
5913 			 "Failed to transition controller to ready from %s!\n",
5914 			 __func__);
5915 		if (instance->adapter_type != MFI_SERIES) {
5916 			status_reg = instance->instancet->read_fw_status_reg(
5917 					instance);
5918 			if (status_reg & MFI_RESET_ADAPTER) {
5919 				if (megasas_adp_reset_wait_for_ready
5920 					(instance, true, 0) == FAILED)
5921 					goto fail_ready_state;
5922 			} else {
5923 				goto fail_ready_state;
5924 			}
5925 		} else {
5926 			atomic_set(&instance->fw_reset_no_pci_access, 1);
5927 			instance->instancet->adp_reset
5928 				(instance, instance->reg_set);
5929 			atomic_set(&instance->fw_reset_no_pci_access, 0);
5930 
5931 			/*waiting for about 30 second before retry*/
5932 			ssleep(30);
5933 
5934 			if (megasas_transition_to_ready(instance, 0))
5935 				goto fail_ready_state;
5936 		}
5937 
5938 		dev_info(&instance->pdev->dev,
5939 			 "FW restarted successfully from %s!\n",
5940 			 __func__);
5941 	}
5942 
5943 	megasas_init_ctrl_params(instance);
5944 
5945 	if (megasas_set_dma_mask(instance))
5946 		goto fail_ready_state;
5947 
5948 	if (megasas_alloc_ctrl_mem(instance))
5949 		goto fail_alloc_dma_buf;
5950 
5951 	if (megasas_alloc_ctrl_dma_buffers(instance))
5952 		goto fail_alloc_dma_buf;
5953 
5954 	fusion = instance->ctrl_context;
5955 
5956 	if (instance->adapter_type >= VENTURA_SERIES) {
5957 		scratch_pad_2 =
5958 			megasas_readl(instance,
5959 				      &instance->reg_set->outbound_scratch_pad_2);
5960 		instance->max_raid_mapsize = ((scratch_pad_2 >>
5961 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5962 			MR_MAX_RAID_MAP_SIZE_MASK);
5963 	}
5964 
5965 	instance->enable_sdev_max_qd = enable_sdev_max_qd;
5966 
5967 	switch (instance->adapter_type) {
5968 	case VENTURA_SERIES:
5969 		fusion->pcie_bw_limitation = true;
5970 		break;
5971 	case AERO_SERIES:
5972 		fusion->r56_div_offload = true;
5973 		break;
5974 	default:
5975 		break;
5976 	}
5977 
5978 	/* Check if MSI-X is supported while in ready state */
5979 	msix_enable = (instance->instancet->read_fw_status_reg(instance) &
5980 		       0x4000000) >> 0x1a;
5981 	if (msix_enable && !msix_disable) {
5982 
5983 		scratch_pad_1 = megasas_readl
5984 			(instance, &instance->reg_set->outbound_scratch_pad_1);
5985 		/* Check max MSI-X vectors */
5986 		if (fusion) {
5987 			if (instance->adapter_type == THUNDERBOLT_SERIES) {
5988 				/* Thunderbolt Series*/
5989 				instance->msix_vectors = (scratch_pad_1
5990 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5991 			} else {
5992 				instance->msix_vectors = ((scratch_pad_1
5993 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5994 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5995 
5996 				/*
5997 				 * For Invader series, > 8 MSI-x vectors
5998 				 * supported by FW/HW implies combined
5999 				 * reply queue mode is enabled.
6000 				 * For Ventura series, > 16 MSI-x vectors
6001 				 * supported by FW/HW implies combined
6002 				 * reply queue mode is enabled.
6003 				 */
6004 				switch (instance->adapter_type) {
6005 				case INVADER_SERIES:
6006 					if (instance->msix_vectors > 8)
6007 						instance->msix_combined = true;
6008 					break;
6009 				case AERO_SERIES:
6010 				case VENTURA_SERIES:
6011 					if (instance->msix_vectors > 16)
6012 						instance->msix_combined = true;
6013 					break;
6014 				}
6015 
6016 				if (rdpq_enable)
6017 					instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
6018 								1 : 0;
6019 
6020 				if (instance->adapter_type >= INVADER_SERIES &&
6021 				    !instance->msix_combined) {
6022 					instance->msix_load_balance = true;
6023 					instance->smp_affinity_enable = false;
6024 				}
6025 
6026 				/* Save 1-15 reply post index address to local memory
6027 				 * Index 0 is already saved from reg offset
6028 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
6029 				 */
6030 				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
6031 					instance->reply_post_host_index_addr[loop] =
6032 						(u32 __iomem *)
6033 						((u8 __iomem *)instance->reg_set +
6034 						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
6035 						+ (loop * 0x10));
6036 				}
6037 			}
6038 
6039 			dev_info(&instance->pdev->dev,
6040 				 "firmware supports msix\t: (%d)",
6041 				 instance->msix_vectors);
6042 			if (msix_vectors)
6043 				instance->msix_vectors = min(msix_vectors,
6044 					instance->msix_vectors);
6045 		} else /* MFI adapters */
6046 			instance->msix_vectors = 1;
6047 
6048 
6049 		/*
6050 		 * For Aero (if some conditions are met), driver will configure a
6051 		 * few additional reply queues with interrupt coalescing enabled.
6052 		 * These queues with interrupt coalescing enabled are called
6053 		 * High IOPS queues and rest of reply queues (based on number of
6054 		 * logical CPUs) are termed as Low latency queues.
6055 		 *
6056 		 * Total Number of reply queues = High IOPS queues + low latency queues
6057 		 *
6058 		 * For rest of fusion adapters, 1 additional reply queue will be
6059 		 * reserved for management commands, rest of reply queues
6060 		 * (based on number of logical CPUs) will be used for IOs and
6061 		 * referenced as IO queues.
6062 		 * Total Number of reply queues = 1 + IO queues
6063 		 *
6064 		 * MFI adapters supports single MSI-x so single reply queue
6065 		 * will be used for IO and management commands.
6066 		 */
6067 
6068 		intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
6069 								true : false;
6070 		if (intr_coalescing &&
6071 			(num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
6072 			(instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
6073 			instance->perf_mode = MR_BALANCED_PERF_MODE;
6074 		else
6075 			instance->perf_mode = MR_LATENCY_PERF_MODE;
6076 
6077 
6078 		if (instance->adapter_type == AERO_SERIES) {
6079 			pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
6080 			speed = lnksta & PCI_EXP_LNKSTA_CLS;
6081 
6082 			/*
6083 			 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
6084 			 * in latency perf mode and enable R1 PCI bandwidth algorithm
6085 			 */
6086 			if (speed < 0x4) {
6087 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6088 				fusion->pcie_bw_limitation = true;
6089 			}
6090 
6091 			/*
6092 			 * Performance mode settings provided through module parameter-perf_mode will
6093 			 * take affect only for:
6094 			 * 1. Aero family of adapters.
6095 			 * 2. When user sets module parameter- perf_mode in range of 0-2.
6096 			 */
6097 			if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
6098 				(perf_mode <= MR_LATENCY_PERF_MODE))
6099 				instance->perf_mode = perf_mode;
6100 			/*
6101 			 * If intr coalescing is not supported by controller FW, then IOPS
6102 			 * and Balanced modes are not feasible.
6103 			 */
6104 			if (!intr_coalescing)
6105 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6106 
6107 		}
6108 
6109 		if (instance->perf_mode == MR_BALANCED_PERF_MODE)
6110 			instance->low_latency_index_start =
6111 				MR_HIGH_IOPS_QUEUE_COUNT;
6112 		else
6113 			instance->low_latency_index_start = 1;
6114 
6115 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
6116 
6117 		instance->msix_vectors = min(num_msix_req,
6118 				instance->msix_vectors);
6119 
6120 		megasas_alloc_irq_vectors(instance);
6121 		if (!instance->msix_vectors)
6122 			instance->msix_load_balance = false;
6123 	}
6124 	/*
6125 	 * MSI-X host index 0 is common for all adapter.
6126 	 * It is used for all MPT based Adapters.
6127 	 */
6128 	if (instance->msix_combined) {
6129 		instance->reply_post_host_index_addr[0] =
6130 				(u32 *)((u8 *)instance->reg_set +
6131 				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
6132 	} else {
6133 		instance->reply_post_host_index_addr[0] =
6134 			(u32 *)((u8 *)instance->reg_set +
6135 			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
6136 	}
6137 
6138 	if (!instance->msix_vectors) {
6139 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6140 		if (i < 0)
6141 			goto fail_init_adapter;
6142 	}
6143 
6144 	megasas_setup_reply_map(instance);
6145 
6146 	dev_info(&instance->pdev->dev,
6147 		"current msix/online cpus\t: (%d/%d)\n",
6148 		instance->msix_vectors, (unsigned int)num_online_cpus());
6149 	dev_info(&instance->pdev->dev,
6150 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
6151 
6152 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6153 		(unsigned long)instance);
6154 
6155 	/*
6156 	 * Below are default value for legacy Firmware.
6157 	 * non-fusion based controllers
6158 	 */
6159 	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
6160 	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
6161 	/* Get operational params, sge flags, send init cmd to controller */
6162 	if (instance->instancet->init_adapter(instance))
6163 		goto fail_init_adapter;
6164 
6165 	if (instance->adapter_type >= VENTURA_SERIES) {
6166 		scratch_pad_3 =
6167 			megasas_readl(instance,
6168 				      &instance->reg_set->outbound_scratch_pad_3);
6169 		if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
6170 			MR_DEFAULT_NVME_PAGE_SHIFT)
6171 			instance->nvme_page_size =
6172 				(1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
6173 
6174 		dev_info(&instance->pdev->dev,
6175 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
6176 	}
6177 
6178 	if (instance->msix_vectors ?
6179 		megasas_setup_irqs_msix(instance, 1) :
6180 		megasas_setup_irqs_ioapic(instance))
6181 		goto fail_init_adapter;
6182 
6183 	if (instance->adapter_type != MFI_SERIES)
6184 		megasas_setup_irq_poll(instance);
6185 
6186 	instance->instancet->enable_intr(instance);
6187 
6188 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
6189 
6190 	megasas_setup_jbod_map(instance);
6191 
6192 	if (megasas_get_device_list(instance) != SUCCESS) {
6193 		dev_err(&instance->pdev->dev,
6194 			"%s: megasas_get_device_list failed\n",
6195 			__func__);
6196 		goto fail_get_ld_pd_list;
6197 	}
6198 
6199 	/* stream detection initialization */
6200 	if (instance->adapter_type >= VENTURA_SERIES) {
6201 		fusion->stream_detect_by_ld =
6202 			kcalloc(MAX_LOGICAL_DRIVES_EXT,
6203 				sizeof(struct LD_STREAM_DETECT *),
6204 				GFP_KERNEL);
6205 		if (!fusion->stream_detect_by_ld) {
6206 			dev_err(&instance->pdev->dev,
6207 				"unable to allocate stream detection for pool of LDs\n");
6208 			goto fail_get_ld_pd_list;
6209 		}
6210 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
6211 			fusion->stream_detect_by_ld[i] =
6212 				kzalloc(sizeof(struct LD_STREAM_DETECT),
6213 				GFP_KERNEL);
6214 			if (!fusion->stream_detect_by_ld[i]) {
6215 				dev_err(&instance->pdev->dev,
6216 					"unable to allocate stream detect by LD\n ");
6217 				for (j = 0; j < i; ++j)
6218 					kfree(fusion->stream_detect_by_ld[j]);
6219 				kfree(fusion->stream_detect_by_ld);
6220 				fusion->stream_detect_by_ld = NULL;
6221 				goto fail_get_ld_pd_list;
6222 			}
6223 			fusion->stream_detect_by_ld[i]->mru_bit_map
6224 				= MR_STREAM_BITMAP;
6225 		}
6226 	}
6227 
6228 	/*
6229 	 * Compute the max allowed sectors per IO: The controller info has two
6230 	 * limits on max sectors. Driver should use the minimum of these two.
6231 	 *
6232 	 * 1 << stripe_sz_ops.min = max sectors per strip
6233 	 *
6234 	 * Note that older firmwares ( < FW ver 30) didn't report information
6235 	 * to calculate max_sectors_1. So the number ended up as zero always.
6236 	 */
6237 	tmp_sectors = 0;
6238 	ctrl_info = instance->ctrl_info_buf;
6239 
6240 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
6241 		le16_to_cpu(ctrl_info->max_strips_per_io);
6242 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
6243 
6244 	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
6245 
6246 	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
6247 	instance->passive = ctrl_info->cluster.passive;
6248 	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
6249 	instance->UnevenSpanSupport =
6250 		ctrl_info->adapterOperations2.supportUnevenSpans;
6251 	if (instance->UnevenSpanSupport) {
6252 		struct fusion_context *fusion = instance->ctrl_context;
6253 		if (MR_ValidateMapInfo(instance, instance->map_id))
6254 			fusion->fast_path_io = 1;
6255 		else
6256 			fusion->fast_path_io = 0;
6257 
6258 	}
6259 	if (ctrl_info->host_interface.SRIOV) {
6260 		instance->requestorId = ctrl_info->iov.requestorId;
6261 		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
6262 			if (!ctrl_info->adapterOperations2.activePassive)
6263 			    instance->PlasmaFW111 = 1;
6264 
6265 			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
6266 			    instance->PlasmaFW111 ? "1.11" : "new");
6267 
6268 			if (instance->PlasmaFW111) {
6269 			    iovPtr = (struct IOV_111 *)
6270 				((unsigned char *)ctrl_info + IOV_111_OFFSET);
6271 			    instance->requestorId = iovPtr->requestorId;
6272 			}
6273 		}
6274 		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
6275 			instance->requestorId);
6276 	}
6277 
6278 	instance->crash_dump_fw_support =
6279 		ctrl_info->adapterOperations3.supportCrashDump;
6280 	instance->crash_dump_drv_support =
6281 		(instance->crash_dump_fw_support &&
6282 		instance->crash_dump_buf);
6283 	if (instance->crash_dump_drv_support)
6284 		megasas_set_crash_dump_params(instance,
6285 			MR_CRASH_BUF_TURN_OFF);
6286 
6287 	else {
6288 		if (instance->crash_dump_buf)
6289 			dma_free_coherent(&instance->pdev->dev,
6290 				CRASH_DMA_BUF_SIZE,
6291 				instance->crash_dump_buf,
6292 				instance->crash_dump_h);
6293 		instance->crash_dump_buf = NULL;
6294 	}
6295 
6296 	if (instance->snapdump_wait_time) {
6297 		megasas_get_snapdump_properties(instance);
6298 		dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
6299 			 instance->snapdump_wait_time);
6300 	}
6301 
6302 	dev_info(&instance->pdev->dev,
6303 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
6304 		le16_to_cpu(ctrl_info->pci.vendor_id),
6305 		le16_to_cpu(ctrl_info->pci.device_id),
6306 		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
6307 		le16_to_cpu(ctrl_info->pci.sub_device_id));
6308 	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
6309 		instance->UnevenSpanSupport ? "yes" : "no");
6310 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
6311 		instance->crash_dump_drv_support ? "yes" : "no");
6312 	dev_info(&instance->pdev->dev, "JBOD sequence map	: %s\n",
6313 		instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
6314 
6315 	instance->max_sectors_per_req = instance->max_num_sge *
6316 						SGE_BUFFER_SIZE / 512;
6317 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
6318 		instance->max_sectors_per_req = tmp_sectors;
6319 
6320 	/* Check for valid throttlequeuedepth module parameter */
6321 	if (throttlequeuedepth &&
6322 			throttlequeuedepth <= instance->max_scsi_cmds)
6323 		instance->throttlequeuedepth = throttlequeuedepth;
6324 	else
6325 		instance->throttlequeuedepth =
6326 				MEGASAS_THROTTLE_QUEUE_DEPTH;
6327 
6328 	if ((resetwaittime < 1) ||
6329 	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
6330 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
6331 
6332 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
6333 		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
6334 
6335 	/* Launch SR-IOV heartbeat timer */
6336 	if (instance->requestorId) {
6337 		if (!megasas_sriov_start_heartbeat(instance, 1)) {
6338 			megasas_start_timer(instance);
6339 		} else {
6340 			instance->skip_heartbeat_timer_del = 1;
6341 			goto fail_get_ld_pd_list;
6342 		}
6343 	}
6344 
6345 	/*
6346 	 * Create and start watchdog thread which will monitor
6347 	 * controller state every 1 sec and trigger OCR when
6348 	 * it enters fault state
6349 	 */
6350 	if (instance->adapter_type != MFI_SERIES)
6351 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
6352 			goto fail_start_watchdog;
6353 
6354 	return 0;
6355 
6356 fail_start_watchdog:
6357 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6358 		del_timer_sync(&instance->sriov_heartbeat_timer);
6359 fail_get_ld_pd_list:
6360 	instance->instancet->disable_intr(instance);
6361 	megasas_destroy_irqs(instance);
6362 fail_init_adapter:
6363 	if (instance->msix_vectors)
6364 		pci_free_irq_vectors(instance->pdev);
6365 	instance->msix_vectors = 0;
6366 fail_alloc_dma_buf:
6367 	megasas_free_ctrl_dma_buffers(instance);
6368 	megasas_free_ctrl_mem(instance);
6369 fail_ready_state:
6370 	iounmap(instance->reg_set);
6371 
6372 fail_ioremap:
6373 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6374 
6375 	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6376 		__func__, __LINE__);
6377 	return -EINVAL;
6378 }
6379 
6380 /**
6381  * megasas_release_mfi -	Reverses the FW initialization
6382  * @instance:			Adapter soft state
6383  */
6384 static void megasas_release_mfi(struct megasas_instance *instance)
6385 {
6386 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
6387 
6388 	if (instance->reply_queue)
6389 		dma_free_coherent(&instance->pdev->dev, reply_q_sz,
6390 			    instance->reply_queue, instance->reply_queue_h);
6391 
6392 	megasas_free_cmds(instance);
6393 
6394 	iounmap(instance->reg_set);
6395 
6396 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6397 }
6398 
6399 /**
6400  * megasas_get_seq_num -	Gets latest event sequence numbers
6401  * @instance:			Adapter soft state
6402  * @eli:			FW event log sequence numbers information
6403  *
6404  * FW maintains a log of all events in a non-volatile area. Upper layers would
6405  * usually find out the latest sequence number of the events, the seq number at
6406  * the boot etc. They would "read" all the events below the latest seq number
6407  * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
6408  * number), they would subsribe to AEN (asynchronous event notification) and
6409  * wait for the events to happen.
6410  */
6411 static int
6412 megasas_get_seq_num(struct megasas_instance *instance,
6413 		    struct megasas_evt_log_info *eli)
6414 {
6415 	struct megasas_cmd *cmd;
6416 	struct megasas_dcmd_frame *dcmd;
6417 	struct megasas_evt_log_info *el_info;
6418 	dma_addr_t el_info_h = 0;
6419 	int ret;
6420 
6421 	cmd = megasas_get_cmd(instance);
6422 
6423 	if (!cmd) {
6424 		return -ENOMEM;
6425 	}
6426 
6427 	dcmd = &cmd->frame->dcmd;
6428 	el_info = dma_alloc_coherent(&instance->pdev->dev,
6429 				     sizeof(struct megasas_evt_log_info),
6430 				     &el_info_h, GFP_KERNEL);
6431 	if (!el_info) {
6432 		megasas_return_cmd(instance, cmd);
6433 		return -ENOMEM;
6434 	}
6435 
6436 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6437 
6438 	dcmd->cmd = MFI_CMD_DCMD;
6439 	dcmd->cmd_status = 0x0;
6440 	dcmd->sge_count = 1;
6441 	dcmd->flags = MFI_FRAME_DIR_READ;
6442 	dcmd->timeout = 0;
6443 	dcmd->pad_0 = 0;
6444 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
6445 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
6446 
6447 	megasas_set_dma_settings(instance, dcmd, el_info_h,
6448 				 sizeof(struct megasas_evt_log_info));
6449 
6450 	ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
6451 	if (ret != DCMD_SUCCESS) {
6452 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6453 			__func__, __LINE__);
6454 		goto dcmd_failed;
6455 	}
6456 
6457 	/*
6458 	 * Copy the data back into callers buffer
6459 	 */
6460 	eli->newest_seq_num = el_info->newest_seq_num;
6461 	eli->oldest_seq_num = el_info->oldest_seq_num;
6462 	eli->clear_seq_num = el_info->clear_seq_num;
6463 	eli->shutdown_seq_num = el_info->shutdown_seq_num;
6464 	eli->boot_seq_num = el_info->boot_seq_num;
6465 
6466 dcmd_failed:
6467 	dma_free_coherent(&instance->pdev->dev,
6468 			sizeof(struct megasas_evt_log_info),
6469 			el_info, el_info_h);
6470 
6471 	megasas_return_cmd(instance, cmd);
6472 
6473 	return ret;
6474 }
6475 
6476 /**
6477  * megasas_register_aen -	Registers for asynchronous event notification
6478  * @instance:			Adapter soft state
6479  * @seq_num:			The starting sequence number
6480  * @class_locale:		Class of the event
6481  *
6482  * This function subscribes for AEN for events beyond the @seq_num. It requests
6483  * to be notified if and only if the event is of type @class_locale
6484  */
6485 static int
6486 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6487 		     u32 class_locale_word)
6488 {
6489 	int ret_val;
6490 	struct megasas_cmd *cmd;
6491 	struct megasas_dcmd_frame *dcmd;
6492 	union megasas_evt_class_locale curr_aen;
6493 	union megasas_evt_class_locale prev_aen;
6494 
6495 	/*
6496 	 * If there an AEN pending already (aen_cmd), check if the
6497 	 * class_locale of that pending AEN is inclusive of the new
6498 	 * AEN request we currently have. If it is, then we don't have
6499 	 * to do anything. In other words, whichever events the current
6500 	 * AEN request is subscribing to, have already been subscribed
6501 	 * to.
6502 	 *
6503 	 * If the old_cmd is _not_ inclusive, then we have to abort
6504 	 * that command, form a class_locale that is superset of both
6505 	 * old and current and re-issue to the FW
6506 	 */
6507 
6508 	curr_aen.word = class_locale_word;
6509 
6510 	if (instance->aen_cmd) {
6511 
6512 		prev_aen.word =
6513 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6514 
6515 		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6516 		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6517 			dev_info(&instance->pdev->dev,
6518 				 "%s %d out of range class %d send by application\n",
6519 				 __func__, __LINE__, curr_aen.members.class);
6520 			return 0;
6521 		}
6522 
6523 		/*
6524 		 * A class whose enum value is smaller is inclusive of all
6525 		 * higher values. If a PROGRESS (= -1) was previously
6526 		 * registered, then a new registration requests for higher
6527 		 * classes need not be sent to FW. They are automatically
6528 		 * included.
6529 		 *
6530 		 * Locale numbers don't have such hierarchy. They are bitmap
6531 		 * values
6532 		 */
6533 		if ((prev_aen.members.class <= curr_aen.members.class) &&
6534 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
6535 		      curr_aen.members.locale)) {
6536 			/*
6537 			 * Previously issued event registration includes
6538 			 * current request. Nothing to do.
6539 			 */
6540 			return 0;
6541 		} else {
6542 			curr_aen.members.locale |= prev_aen.members.locale;
6543 
6544 			if (prev_aen.members.class < curr_aen.members.class)
6545 				curr_aen.members.class = prev_aen.members.class;
6546 
6547 			instance->aen_cmd->abort_aen = 1;
6548 			ret_val = megasas_issue_blocked_abort_cmd(instance,
6549 								  instance->
6550 								  aen_cmd, 30);
6551 
6552 			if (ret_val) {
6553 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6554 				       "previous AEN command\n");
6555 				return ret_val;
6556 			}
6557 		}
6558 	}
6559 
6560 	cmd = megasas_get_cmd(instance);
6561 
6562 	if (!cmd)
6563 		return -ENOMEM;
6564 
6565 	dcmd = &cmd->frame->dcmd;
6566 
6567 	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6568 
6569 	/*
6570 	 * Prepare DCMD for aen registration
6571 	 */
6572 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6573 
6574 	dcmd->cmd = MFI_CMD_DCMD;
6575 	dcmd->cmd_status = 0x0;
6576 	dcmd->sge_count = 1;
6577 	dcmd->flags = MFI_FRAME_DIR_READ;
6578 	dcmd->timeout = 0;
6579 	dcmd->pad_0 = 0;
6580 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6581 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6582 	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6583 	instance->last_seq_num = seq_num;
6584 	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6585 
6586 	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6587 				 sizeof(struct megasas_evt_detail));
6588 
6589 	if (instance->aen_cmd != NULL) {
6590 		megasas_return_cmd(instance, cmd);
6591 		return 0;
6592 	}
6593 
6594 	/*
6595 	 * Store reference to the cmd used to register for AEN. When an
6596 	 * application wants us to register for AEN, we have to abort this
6597 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
6598 	 */
6599 	instance->aen_cmd = cmd;
6600 
6601 	/*
6602 	 * Issue the aen registration frame
6603 	 */
6604 	instance->instancet->issue_dcmd(instance, cmd);
6605 
6606 	return 0;
6607 }
6608 
6609 /* megasas_get_target_prop - Send DCMD with below details to firmware.
6610  *
6611  * This DCMD will fetch few properties of LD/system PD defined
6612  * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6613  *
6614  * DCMD send by drivers whenever new target is added to the OS.
6615  *
6616  * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
6617  * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
6618  *                       0 = system PD, 1 = LD.
6619  * dcmd.mbox.s[1]      - TargetID for LD/system PD.
6620  * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
6621  *
6622  * @instance:		Adapter soft state
6623  * @sdev:		OS provided scsi device
6624  *
6625  * Returns 0 on success non-zero on failure.
6626  */
6627 int
6628 megasas_get_target_prop(struct megasas_instance *instance,
6629 			struct scsi_device *sdev)
6630 {
6631 	int ret;
6632 	struct megasas_cmd *cmd;
6633 	struct megasas_dcmd_frame *dcmd;
6634 	u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
6635 			sdev->id;
6636 
6637 	cmd = megasas_get_cmd(instance);
6638 
6639 	if (!cmd) {
6640 		dev_err(&instance->pdev->dev,
6641 			"Failed to get cmd %s\n", __func__);
6642 		return -ENOMEM;
6643 	}
6644 
6645 	dcmd = &cmd->frame->dcmd;
6646 
6647 	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6648 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6649 	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6650 
6651 	dcmd->mbox.s[1] = cpu_to_le16(targetId);
6652 	dcmd->cmd = MFI_CMD_DCMD;
6653 	dcmd->cmd_status = 0xFF;
6654 	dcmd->sge_count = 1;
6655 	dcmd->flags = MFI_FRAME_DIR_READ;
6656 	dcmd->timeout = 0;
6657 	dcmd->pad_0 = 0;
6658 	dcmd->data_xfer_len =
6659 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6660 	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6661 
6662 	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6663 				 sizeof(struct MR_TARGET_PROPERTIES));
6664 
6665 	if ((instance->adapter_type != MFI_SERIES) &&
6666 	    !instance->mask_interrupts)
6667 		ret = megasas_issue_blocked_cmd(instance,
6668 						cmd, MFI_IO_TIMEOUT_SECS);
6669 	else
6670 		ret = megasas_issue_polled(instance, cmd);
6671 
6672 	switch (ret) {
6673 	case DCMD_TIMEOUT:
6674 		switch (dcmd_timeout_ocr_possible(instance)) {
6675 		case INITIATE_OCR:
6676 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6677 			mutex_unlock(&instance->reset_mutex);
6678 			megasas_reset_fusion(instance->host,
6679 					     MFI_IO_TIMEOUT_OCR);
6680 			mutex_lock(&instance->reset_mutex);
6681 			break;
6682 		case KILL_ADAPTER:
6683 			megaraid_sas_kill_hba(instance);
6684 			break;
6685 		case IGNORE_TIMEOUT:
6686 			dev_info(&instance->pdev->dev,
6687 				 "Ignore DCMD timeout: %s %d\n",
6688 				 __func__, __LINE__);
6689 			break;
6690 		}
6691 		break;
6692 
6693 	default:
6694 		megasas_return_cmd(instance, cmd);
6695 	}
6696 	if (ret != DCMD_SUCCESS)
6697 		dev_err(&instance->pdev->dev,
6698 			"return from %s %d return value %d\n",
6699 			__func__, __LINE__, ret);
6700 
6701 	return ret;
6702 }
6703 
6704 /**
6705  * megasas_start_aen -	Subscribes to AEN during driver load time
6706  * @instance:		Adapter soft state
6707  */
6708 static int megasas_start_aen(struct megasas_instance *instance)
6709 {
6710 	struct megasas_evt_log_info eli;
6711 	union megasas_evt_class_locale class_locale;
6712 
6713 	/*
6714 	 * Get the latest sequence number from FW
6715 	 */
6716 	memset(&eli, 0, sizeof(eli));
6717 
6718 	if (megasas_get_seq_num(instance, &eli))
6719 		return -1;
6720 
6721 	/*
6722 	 * Register AEN with FW for latest sequence number plus 1
6723 	 */
6724 	class_locale.members.reserved = 0;
6725 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
6726 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
6727 
6728 	return megasas_register_aen(instance,
6729 			le32_to_cpu(eli.newest_seq_num) + 1,
6730 			class_locale.word);
6731 }
6732 
6733 /**
6734  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
6735  * @instance:		Adapter soft state
6736  */
6737 static int megasas_io_attach(struct megasas_instance *instance)
6738 {
6739 	struct Scsi_Host *host = instance->host;
6740 
6741 	/*
6742 	 * Export parameters required by SCSI mid-layer
6743 	 */
6744 	host->unique_id = instance->unique_id;
6745 	host->can_queue = instance->max_scsi_cmds;
6746 	host->this_id = instance->init_id;
6747 	host->sg_tablesize = instance->max_num_sge;
6748 
6749 	if (instance->fw_support_ieee)
6750 		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6751 
6752 	/*
6753 	 * Check if the module parameter value for max_sectors can be used
6754 	 */
6755 	if (max_sectors && max_sectors < instance->max_sectors_per_req)
6756 		instance->max_sectors_per_req = max_sectors;
6757 	else {
6758 		if (max_sectors) {
6759 			if (((instance->pdev->device ==
6760 				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6761 				(instance->pdev->device ==
6762 				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6763 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
6764 				instance->max_sectors_per_req = max_sectors;
6765 			} else {
6766 			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6767 				"and <= %d (or < 1MB for GEN2 controller)\n",
6768 				instance->max_sectors_per_req);
6769 			}
6770 		}
6771 	}
6772 
6773 	host->max_sectors = instance->max_sectors_per_req;
6774 	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6775 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6776 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6777 	host->max_lun = MEGASAS_MAX_LUN;
6778 	host->max_cmd_len = 16;
6779 
6780 	/*
6781 	 * Notify the mid-layer about the new controller
6782 	 */
6783 	if (scsi_add_host(host, &instance->pdev->dev)) {
6784 		dev_err(&instance->pdev->dev,
6785 			"Failed to add host from %s %d\n",
6786 			__func__, __LINE__);
6787 		return -ENODEV;
6788 	}
6789 
6790 	return 0;
6791 }
6792 
6793 /**
6794  * megasas_set_dma_mask -	Set DMA mask for supported controllers
6795  *
6796  * @instance:		Adapter soft state
6797  * Description:
6798  *
6799  * For Ventura, driver/FW will operate in 63bit DMA addresses.
6800  *
6801  * For invader-
6802  *	By default, driver/FW will operate in 32bit DMA addresses
6803  *	for consistent DMA mapping but if 32 bit consistent
6804  *	DMA mask fails, driver will try with 63 bit consistent
6805  *	mask provided FW is true 63bit DMA capable
6806  *
6807  * For older controllers(Thunderbolt and MFI based adapters)-
6808  *	driver/FW will operate in 32 bit consistent DMA addresses.
6809  */
6810 static int
6811 megasas_set_dma_mask(struct megasas_instance *instance)
6812 {
6813 	u64 consistent_mask;
6814 	struct pci_dev *pdev;
6815 	u32 scratch_pad_1;
6816 
6817 	pdev = instance->pdev;
6818 	consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6819 				DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6820 
6821 	if (IS_DMA64) {
6822 		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6823 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6824 			goto fail_set_dma_mask;
6825 
6826 		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6827 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6828 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6829 			/*
6830 			 * If 32 bit DMA mask fails, then try for 64 bit mask
6831 			 * for FW capable of handling 64 bit DMA.
6832 			 */
6833 			scratch_pad_1 = megasas_readl
6834 				(instance, &instance->reg_set->outbound_scratch_pad_1);
6835 
6836 			if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6837 				goto fail_set_dma_mask;
6838 			else if (dma_set_mask_and_coherent(&pdev->dev,
6839 							   DMA_BIT_MASK(63)))
6840 				goto fail_set_dma_mask;
6841 		}
6842 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6843 		goto fail_set_dma_mask;
6844 
6845 	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6846 		instance->consistent_mask_64bit = false;
6847 	else
6848 		instance->consistent_mask_64bit = true;
6849 
6850 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6851 		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6852 		 (instance->consistent_mask_64bit ? "63" : "32"));
6853 
6854 	return 0;
6855 
6856 fail_set_dma_mask:
6857 	dev_err(&pdev->dev, "Failed to set DMA mask\n");
6858 	return -1;
6859 
6860 }
6861 
6862 /*
6863  * megasas_set_adapter_type -	Set adapter type.
6864  *				Supported controllers can be divided in
6865  *				different categories-
6866  *					enum MR_ADAPTER_TYPE {
6867  *						MFI_SERIES = 1,
6868  *						THUNDERBOLT_SERIES = 2,
6869  *						INVADER_SERIES = 3,
6870  *						VENTURA_SERIES = 4,
6871  *						AERO_SERIES = 5,
6872  *					};
6873  * @instance:			Adapter soft state
6874  * return:			void
6875  */
6876 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6877 {
6878 	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6879 	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6880 		instance->adapter_type = MFI_SERIES;
6881 	} else {
6882 		switch (instance->pdev->device) {
6883 		case PCI_DEVICE_ID_LSI_AERO_10E1:
6884 		case PCI_DEVICE_ID_LSI_AERO_10E2:
6885 		case PCI_DEVICE_ID_LSI_AERO_10E5:
6886 		case PCI_DEVICE_ID_LSI_AERO_10E6:
6887 			instance->adapter_type = AERO_SERIES;
6888 			break;
6889 		case PCI_DEVICE_ID_LSI_VENTURA:
6890 		case PCI_DEVICE_ID_LSI_CRUSADER:
6891 		case PCI_DEVICE_ID_LSI_HARPOON:
6892 		case PCI_DEVICE_ID_LSI_TOMCAT:
6893 		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6894 		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6895 			instance->adapter_type = VENTURA_SERIES;
6896 			break;
6897 		case PCI_DEVICE_ID_LSI_FUSION:
6898 		case PCI_DEVICE_ID_LSI_PLASMA:
6899 			instance->adapter_type = THUNDERBOLT_SERIES;
6900 			break;
6901 		case PCI_DEVICE_ID_LSI_INVADER:
6902 		case PCI_DEVICE_ID_LSI_INTRUDER:
6903 		case PCI_DEVICE_ID_LSI_INTRUDER_24:
6904 		case PCI_DEVICE_ID_LSI_CUTLASS_52:
6905 		case PCI_DEVICE_ID_LSI_CUTLASS_53:
6906 		case PCI_DEVICE_ID_LSI_FURY:
6907 			instance->adapter_type = INVADER_SERIES;
6908 			break;
6909 		default: /* For all other supported controllers */
6910 			instance->adapter_type = MFI_SERIES;
6911 			break;
6912 		}
6913 	}
6914 }
6915 
6916 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6917 {
6918 	instance->producer = dma_alloc_coherent(&instance->pdev->dev,
6919 			sizeof(u32), &instance->producer_h, GFP_KERNEL);
6920 	instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
6921 			sizeof(u32), &instance->consumer_h, GFP_KERNEL);
6922 
6923 	if (!instance->producer || !instance->consumer) {
6924 		dev_err(&instance->pdev->dev,
6925 			"Failed to allocate memory for producer, consumer\n");
6926 		return -1;
6927 	}
6928 
6929 	*instance->producer = 0;
6930 	*instance->consumer = 0;
6931 	return 0;
6932 }
6933 
6934 /**
6935  * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
6936  *				structures which are not common across MFI
6937  *				adapters and fusion adapters.
6938  *				For MFI based adapters, allocate producer and
6939  *				consumer buffers. For fusion adapters, allocate
6940  *				memory for fusion context.
6941  * @instance:			Adapter soft state
6942  * return:			0 for SUCCESS
6943  */
6944 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6945 {
6946 	instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
6947 				      GFP_KERNEL);
6948 	if (!instance->reply_map)
6949 		return -ENOMEM;
6950 
6951 	switch (instance->adapter_type) {
6952 	case MFI_SERIES:
6953 		if (megasas_alloc_mfi_ctrl_mem(instance))
6954 			goto fail;
6955 		break;
6956 	case AERO_SERIES:
6957 	case VENTURA_SERIES:
6958 	case THUNDERBOLT_SERIES:
6959 	case INVADER_SERIES:
6960 		if (megasas_alloc_fusion_context(instance))
6961 			goto fail;
6962 		break;
6963 	}
6964 
6965 	return 0;
6966  fail:
6967 	kfree(instance->reply_map);
6968 	instance->reply_map = NULL;
6969 	return -ENOMEM;
6970 }
6971 
6972 /*
6973  * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
6974  *				producer, consumer buffers for MFI adapters
6975  *
6976  * @instance -			Adapter soft instance
6977  *
6978  */
6979 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6980 {
6981 	kfree(instance->reply_map);
6982 	if (instance->adapter_type == MFI_SERIES) {
6983 		if (instance->producer)
6984 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
6985 					    instance->producer,
6986 					    instance->producer_h);
6987 		if (instance->consumer)
6988 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
6989 					    instance->consumer,
6990 					    instance->consumer_h);
6991 	} else {
6992 		megasas_free_fusion_context(instance);
6993 	}
6994 }
6995 
6996 /**
6997  * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
6998  *					driver load time
6999  *
7000  * @instance-				Adapter soft instance
7001  * @return-				O for SUCCESS
7002  */
7003 static inline
7004 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
7005 {
7006 	struct pci_dev *pdev = instance->pdev;
7007 	struct fusion_context *fusion = instance->ctrl_context;
7008 
7009 	instance->evt_detail = dma_alloc_coherent(&pdev->dev,
7010 			sizeof(struct megasas_evt_detail),
7011 			&instance->evt_detail_h, GFP_KERNEL);
7012 
7013 	if (!instance->evt_detail) {
7014 		dev_err(&instance->pdev->dev,
7015 			"Failed to allocate event detail buffer\n");
7016 		return -ENOMEM;
7017 	}
7018 
7019 	if (fusion) {
7020 		fusion->ioc_init_request =
7021 			dma_alloc_coherent(&pdev->dev,
7022 					   sizeof(struct MPI2_IOC_INIT_REQUEST),
7023 					   &fusion->ioc_init_request_phys,
7024 					   GFP_KERNEL);
7025 
7026 		if (!fusion->ioc_init_request) {
7027 			dev_err(&pdev->dev,
7028 				"Failed to allocate PD list buffer\n");
7029 			return -ENOMEM;
7030 		}
7031 
7032 		instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
7033 				sizeof(struct MR_SNAPDUMP_PROPERTIES),
7034 				&instance->snapdump_prop_h, GFP_KERNEL);
7035 
7036 		if (!instance->snapdump_prop)
7037 			dev_err(&pdev->dev,
7038 				"Failed to allocate snapdump properties buffer\n");
7039 
7040 		instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
7041 							HOST_DEVICE_LIST_SZ,
7042 							&instance->host_device_list_buf_h,
7043 							GFP_KERNEL);
7044 
7045 		if (!instance->host_device_list_buf) {
7046 			dev_err(&pdev->dev,
7047 				"Failed to allocate targetid list buffer\n");
7048 			return -ENOMEM;
7049 		}
7050 
7051 	}
7052 
7053 	instance->pd_list_buf =
7054 		dma_alloc_coherent(&pdev->dev,
7055 				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7056 				     &instance->pd_list_buf_h, GFP_KERNEL);
7057 
7058 	if (!instance->pd_list_buf) {
7059 		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
7060 		return -ENOMEM;
7061 	}
7062 
7063 	instance->ctrl_info_buf =
7064 		dma_alloc_coherent(&pdev->dev,
7065 				     sizeof(struct megasas_ctrl_info),
7066 				     &instance->ctrl_info_buf_h, GFP_KERNEL);
7067 
7068 	if (!instance->ctrl_info_buf) {
7069 		dev_err(&pdev->dev,
7070 			"Failed to allocate controller info buffer\n");
7071 		return -ENOMEM;
7072 	}
7073 
7074 	instance->ld_list_buf =
7075 		dma_alloc_coherent(&pdev->dev,
7076 				     sizeof(struct MR_LD_LIST),
7077 				     &instance->ld_list_buf_h, GFP_KERNEL);
7078 
7079 	if (!instance->ld_list_buf) {
7080 		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
7081 		return -ENOMEM;
7082 	}
7083 
7084 	instance->ld_targetid_list_buf =
7085 		dma_alloc_coherent(&pdev->dev,
7086 				sizeof(struct MR_LD_TARGETID_LIST),
7087 				&instance->ld_targetid_list_buf_h, GFP_KERNEL);
7088 
7089 	if (!instance->ld_targetid_list_buf) {
7090 		dev_err(&pdev->dev,
7091 			"Failed to allocate LD targetid list buffer\n");
7092 		return -ENOMEM;
7093 	}
7094 
7095 	if (!reset_devices) {
7096 		instance->system_info_buf =
7097 			dma_alloc_coherent(&pdev->dev,
7098 					sizeof(struct MR_DRV_SYSTEM_INFO),
7099 					&instance->system_info_h, GFP_KERNEL);
7100 		instance->pd_info =
7101 			dma_alloc_coherent(&pdev->dev,
7102 					sizeof(struct MR_PD_INFO),
7103 					&instance->pd_info_h, GFP_KERNEL);
7104 		instance->tgt_prop =
7105 			dma_alloc_coherent(&pdev->dev,
7106 					sizeof(struct MR_TARGET_PROPERTIES),
7107 					&instance->tgt_prop_h, GFP_KERNEL);
7108 		instance->crash_dump_buf =
7109 			dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7110 					&instance->crash_dump_h, GFP_KERNEL);
7111 
7112 		if (!instance->system_info_buf)
7113 			dev_err(&instance->pdev->dev,
7114 				"Failed to allocate system info buffer\n");
7115 
7116 		if (!instance->pd_info)
7117 			dev_err(&instance->pdev->dev,
7118 				"Failed to allocate pd_info buffer\n");
7119 
7120 		if (!instance->tgt_prop)
7121 			dev_err(&instance->pdev->dev,
7122 				"Failed to allocate tgt_prop buffer\n");
7123 
7124 		if (!instance->crash_dump_buf)
7125 			dev_err(&instance->pdev->dev,
7126 				"Failed to allocate crash dump buffer\n");
7127 	}
7128 
7129 	return 0;
7130 }
7131 
7132 /*
7133  * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
7134  *					during driver load time
7135  *
7136  * @instance-				Adapter soft instance
7137  *
7138  */
7139 static inline
7140 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
7141 {
7142 	struct pci_dev *pdev = instance->pdev;
7143 	struct fusion_context *fusion = instance->ctrl_context;
7144 
7145 	if (instance->evt_detail)
7146 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
7147 				    instance->evt_detail,
7148 				    instance->evt_detail_h);
7149 
7150 	if (fusion && fusion->ioc_init_request)
7151 		dma_free_coherent(&pdev->dev,
7152 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
7153 				  fusion->ioc_init_request,
7154 				  fusion->ioc_init_request_phys);
7155 
7156 	if (instance->pd_list_buf)
7157 		dma_free_coherent(&pdev->dev,
7158 				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7159 				    instance->pd_list_buf,
7160 				    instance->pd_list_buf_h);
7161 
7162 	if (instance->ld_list_buf)
7163 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
7164 				    instance->ld_list_buf,
7165 				    instance->ld_list_buf_h);
7166 
7167 	if (instance->ld_targetid_list_buf)
7168 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
7169 				    instance->ld_targetid_list_buf,
7170 				    instance->ld_targetid_list_buf_h);
7171 
7172 	if (instance->ctrl_info_buf)
7173 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
7174 				    instance->ctrl_info_buf,
7175 				    instance->ctrl_info_buf_h);
7176 
7177 	if (instance->system_info_buf)
7178 		dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
7179 				    instance->system_info_buf,
7180 				    instance->system_info_h);
7181 
7182 	if (instance->pd_info)
7183 		dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
7184 				    instance->pd_info, instance->pd_info_h);
7185 
7186 	if (instance->tgt_prop)
7187 		dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
7188 				    instance->tgt_prop, instance->tgt_prop_h);
7189 
7190 	if (instance->crash_dump_buf)
7191 		dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7192 				    instance->crash_dump_buf,
7193 				    instance->crash_dump_h);
7194 
7195 	if (instance->snapdump_prop)
7196 		dma_free_coherent(&pdev->dev,
7197 				  sizeof(struct MR_SNAPDUMP_PROPERTIES),
7198 				  instance->snapdump_prop,
7199 				  instance->snapdump_prop_h);
7200 
7201 	if (instance->host_device_list_buf)
7202 		dma_free_coherent(&pdev->dev,
7203 				  HOST_DEVICE_LIST_SZ,
7204 				  instance->host_device_list_buf,
7205 				  instance->host_device_list_buf_h);
7206 
7207 }
7208 
7209 /*
7210  * megasas_init_ctrl_params -		Initialize controller's instance
7211  *					parameters before FW init
7212  * @instance -				Adapter soft instance
7213  * @return -				void
7214  */
7215 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
7216 {
7217 	instance->fw_crash_state = UNAVAILABLE;
7218 
7219 	megasas_poll_wait_aen = 0;
7220 	instance->issuepend_done = 1;
7221 	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
7222 
7223 	/*
7224 	 * Initialize locks and queues
7225 	 */
7226 	INIT_LIST_HEAD(&instance->cmd_pool);
7227 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
7228 
7229 	atomic_set(&instance->fw_outstanding, 0);
7230 	atomic64_set(&instance->total_io_count, 0);
7231 
7232 	init_waitqueue_head(&instance->int_cmd_wait_q);
7233 	init_waitqueue_head(&instance->abort_cmd_wait_q);
7234 
7235 	spin_lock_init(&instance->crashdump_lock);
7236 	spin_lock_init(&instance->mfi_pool_lock);
7237 	spin_lock_init(&instance->hba_lock);
7238 	spin_lock_init(&instance->stream_lock);
7239 	spin_lock_init(&instance->completion_lock);
7240 
7241 	mutex_init(&instance->reset_mutex);
7242 
7243 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
7244 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
7245 		instance->flag_ieee = 1;
7246 
7247 	megasas_dbg_lvl = 0;
7248 	instance->flag = 0;
7249 	instance->unload = 1;
7250 	instance->last_time = 0;
7251 	instance->disableOnlineCtrlReset = 1;
7252 	instance->UnevenSpanSupport = 0;
7253 	instance->smp_affinity_enable = smp_affinity_enable ? true : false;
7254 	instance->msix_load_balance = false;
7255 
7256 	if (instance->adapter_type != MFI_SERIES)
7257 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
7258 	else
7259 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
7260 }
7261 
7262 /**
7263  * megasas_probe_one -	PCI hotplug entry point
7264  * @pdev:		PCI device structure
7265  * @id:			PCI ids of supported hotplugged adapter
7266  */
7267 static int megasas_probe_one(struct pci_dev *pdev,
7268 			     const struct pci_device_id *id)
7269 {
7270 	int rval, pos;
7271 	struct Scsi_Host *host;
7272 	struct megasas_instance *instance;
7273 	u16 control = 0;
7274 
7275 	switch (pdev->device) {
7276 	case PCI_DEVICE_ID_LSI_AERO_10E0:
7277 	case PCI_DEVICE_ID_LSI_AERO_10E3:
7278 	case PCI_DEVICE_ID_LSI_AERO_10E4:
7279 	case PCI_DEVICE_ID_LSI_AERO_10E7:
7280 		dev_err(&pdev->dev, "Adapter is in non secure mode\n");
7281 		return 1;
7282 	case PCI_DEVICE_ID_LSI_AERO_10E1:
7283 	case PCI_DEVICE_ID_LSI_AERO_10E5:
7284 		dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
7285 		break;
7286 	}
7287 
7288 	/* Reset MSI-X in the kdump kernel */
7289 	if (reset_devices) {
7290 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
7291 		if (pos) {
7292 			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
7293 					     &control);
7294 			if (control & PCI_MSIX_FLAGS_ENABLE) {
7295 				dev_info(&pdev->dev, "resetting MSI-X\n");
7296 				pci_write_config_word(pdev,
7297 						      pos + PCI_MSIX_FLAGS,
7298 						      control &
7299 						      ~PCI_MSIX_FLAGS_ENABLE);
7300 			}
7301 		}
7302 	}
7303 
7304 	/*
7305 	 * PCI prepping: enable device set bus mastering and dma mask
7306 	 */
7307 	rval = pci_enable_device_mem(pdev);
7308 
7309 	if (rval) {
7310 		return rval;
7311 	}
7312 
7313 	pci_set_master(pdev);
7314 
7315 	host = scsi_host_alloc(&megasas_template,
7316 			       sizeof(struct megasas_instance));
7317 
7318 	if (!host) {
7319 		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
7320 		goto fail_alloc_instance;
7321 	}
7322 
7323 	instance = (struct megasas_instance *)host->hostdata;
7324 	memset(instance, 0, sizeof(*instance));
7325 	atomic_set(&instance->fw_reset_no_pci_access, 0);
7326 
7327 	/*
7328 	 * Initialize PCI related and misc parameters
7329 	 */
7330 	instance->pdev = pdev;
7331 	instance->host = host;
7332 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
7333 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
7334 
7335 	megasas_set_adapter_type(instance);
7336 
7337 	/*
7338 	 * Initialize MFI Firmware
7339 	 */
7340 	if (megasas_init_fw(instance))
7341 		goto fail_init_mfi;
7342 
7343 	if (instance->requestorId) {
7344 		if (instance->PlasmaFW111) {
7345 			instance->vf_affiliation_111 =
7346 				dma_alloc_coherent(&pdev->dev,
7347 					sizeof(struct MR_LD_VF_AFFILIATION_111),
7348 					&instance->vf_affiliation_111_h,
7349 					GFP_KERNEL);
7350 			if (!instance->vf_affiliation_111)
7351 				dev_warn(&pdev->dev, "Can't allocate "
7352 				       "memory for VF affiliation buffer\n");
7353 		} else {
7354 			instance->vf_affiliation =
7355 				dma_alloc_coherent(&pdev->dev,
7356 					(MAX_LOGICAL_DRIVES + 1) *
7357 					sizeof(struct MR_LD_VF_AFFILIATION),
7358 					&instance->vf_affiliation_h,
7359 					GFP_KERNEL);
7360 			if (!instance->vf_affiliation)
7361 				dev_warn(&pdev->dev, "Can't allocate "
7362 				       "memory for VF affiliation buffer\n");
7363 		}
7364 	}
7365 
7366 	/*
7367 	 * Store instance in PCI softstate
7368 	 */
7369 	pci_set_drvdata(pdev, instance);
7370 
7371 	/*
7372 	 * Add this controller to megasas_mgmt_info structure so that it
7373 	 * can be exported to management applications
7374 	 */
7375 	megasas_mgmt_info.count++;
7376 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
7377 	megasas_mgmt_info.max_index++;
7378 
7379 	/*
7380 	 * Register with SCSI mid-layer
7381 	 */
7382 	if (megasas_io_attach(instance))
7383 		goto fail_io_attach;
7384 
7385 	instance->unload = 0;
7386 	/*
7387 	 * Trigger SCSI to scan our drives
7388 	 */
7389 	if (!instance->enable_fw_dev_list ||
7390 	    (instance->host_device_list_buf->count > 0))
7391 		scsi_scan_host(host);
7392 
7393 	/*
7394 	 * Initiate AEN (Asynchronous Event Notification)
7395 	 */
7396 	if (megasas_start_aen(instance)) {
7397 		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
7398 		goto fail_start_aen;
7399 	}
7400 
7401 	megasas_setup_debugfs(instance);
7402 
7403 	/* Get current SR-IOV LD/VF affiliation */
7404 	if (instance->requestorId)
7405 		megasas_get_ld_vf_affiliation(instance, 1);
7406 
7407 	return 0;
7408 
7409 fail_start_aen:
7410 fail_io_attach:
7411 	megasas_mgmt_info.count--;
7412 	megasas_mgmt_info.max_index--;
7413 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
7414 
7415 	instance->instancet->disable_intr(instance);
7416 	megasas_destroy_irqs(instance);
7417 
7418 	if (instance->adapter_type != MFI_SERIES)
7419 		megasas_release_fusion(instance);
7420 	else
7421 		megasas_release_mfi(instance);
7422 	if (instance->msix_vectors)
7423 		pci_free_irq_vectors(instance->pdev);
7424 fail_init_mfi:
7425 	scsi_host_put(host);
7426 fail_alloc_instance:
7427 	pci_disable_device(pdev);
7428 
7429 	return -ENODEV;
7430 }
7431 
7432 /**
7433  * megasas_flush_cache -	Requests FW to flush all its caches
7434  * @instance:			Adapter soft state
7435  */
7436 static void megasas_flush_cache(struct megasas_instance *instance)
7437 {
7438 	struct megasas_cmd *cmd;
7439 	struct megasas_dcmd_frame *dcmd;
7440 
7441 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7442 		return;
7443 
7444 	cmd = megasas_get_cmd(instance);
7445 
7446 	if (!cmd)
7447 		return;
7448 
7449 	dcmd = &cmd->frame->dcmd;
7450 
7451 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7452 
7453 	dcmd->cmd = MFI_CMD_DCMD;
7454 	dcmd->cmd_status = 0x0;
7455 	dcmd->sge_count = 0;
7456 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7457 	dcmd->timeout = 0;
7458 	dcmd->pad_0 = 0;
7459 	dcmd->data_xfer_len = 0;
7460 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
7461 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
7462 
7463 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7464 			!= DCMD_SUCCESS) {
7465 		dev_err(&instance->pdev->dev,
7466 			"return from %s %d\n", __func__, __LINE__);
7467 		return;
7468 	}
7469 
7470 	megasas_return_cmd(instance, cmd);
7471 }
7472 
7473 /**
7474  * megasas_shutdown_controller -	Instructs FW to shutdown the controller
7475  * @instance:				Adapter soft state
7476  * @opcode:				Shutdown/Hibernate
7477  */
7478 static void megasas_shutdown_controller(struct megasas_instance *instance,
7479 					u32 opcode)
7480 {
7481 	struct megasas_cmd *cmd;
7482 	struct megasas_dcmd_frame *dcmd;
7483 
7484 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7485 		return;
7486 
7487 	cmd = megasas_get_cmd(instance);
7488 
7489 	if (!cmd)
7490 		return;
7491 
7492 	if (instance->aen_cmd)
7493 		megasas_issue_blocked_abort_cmd(instance,
7494 			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7495 	if (instance->map_update_cmd)
7496 		megasas_issue_blocked_abort_cmd(instance,
7497 			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7498 	if (instance->jbod_seq_cmd)
7499 		megasas_issue_blocked_abort_cmd(instance,
7500 			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7501 
7502 	dcmd = &cmd->frame->dcmd;
7503 
7504 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7505 
7506 	dcmd->cmd = MFI_CMD_DCMD;
7507 	dcmd->cmd_status = 0x0;
7508 	dcmd->sge_count = 0;
7509 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7510 	dcmd->timeout = 0;
7511 	dcmd->pad_0 = 0;
7512 	dcmd->data_xfer_len = 0;
7513 	dcmd->opcode = cpu_to_le32(opcode);
7514 
7515 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7516 			!= DCMD_SUCCESS) {
7517 		dev_err(&instance->pdev->dev,
7518 			"return from %s %d\n", __func__, __LINE__);
7519 		return;
7520 	}
7521 
7522 	megasas_return_cmd(instance, cmd);
7523 }
7524 
7525 #ifdef CONFIG_PM
7526 /**
7527  * megasas_suspend -	driver suspend entry point
7528  * @pdev:		PCI device structure
7529  * @state:		PCI power state to suspend routine
7530  */
7531 static int
7532 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
7533 {
7534 	struct megasas_instance *instance;
7535 
7536 	instance = pci_get_drvdata(pdev);
7537 
7538 	if (!instance)
7539 		return 0;
7540 
7541 	instance->unload = 1;
7542 
7543 	dev_info(&pdev->dev, "%s is called\n", __func__);
7544 
7545 	/* Shutdown SR-IOV heartbeat timer */
7546 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7547 		del_timer_sync(&instance->sriov_heartbeat_timer);
7548 
7549 	/* Stop the FW fault detection watchdog */
7550 	if (instance->adapter_type != MFI_SERIES)
7551 		megasas_fusion_stop_watchdog(instance);
7552 
7553 	megasas_flush_cache(instance);
7554 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7555 
7556 	/* cancel the delayed work if this work still in queue */
7557 	if (instance->ev != NULL) {
7558 		struct megasas_aen_event *ev = instance->ev;
7559 		cancel_delayed_work_sync(&ev->hotplug_work);
7560 		instance->ev = NULL;
7561 	}
7562 
7563 	tasklet_kill(&instance->isr_tasklet);
7564 
7565 	pci_set_drvdata(instance->pdev, instance);
7566 	instance->instancet->disable_intr(instance);
7567 
7568 	megasas_destroy_irqs(instance);
7569 
7570 	if (instance->msix_vectors)
7571 		pci_free_irq_vectors(instance->pdev);
7572 
7573 	pci_save_state(pdev);
7574 	pci_disable_device(pdev);
7575 
7576 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
7577 
7578 	return 0;
7579 }
7580 
7581 /**
7582  * megasas_resume-      driver resume entry point
7583  * @pdev:               PCI device structure
7584  */
7585 static int
7586 megasas_resume(struct pci_dev *pdev)
7587 {
7588 	int rval;
7589 	struct Scsi_Host *host;
7590 	struct megasas_instance *instance;
7591 	int irq_flags = PCI_IRQ_LEGACY;
7592 
7593 	instance = pci_get_drvdata(pdev);
7594 
7595 	if (!instance)
7596 		return 0;
7597 
7598 	host = instance->host;
7599 	pci_set_power_state(pdev, PCI_D0);
7600 	pci_enable_wake(pdev, PCI_D0, 0);
7601 	pci_restore_state(pdev);
7602 
7603 	dev_info(&pdev->dev, "%s is called\n", __func__);
7604 	/*
7605 	 * PCI prepping: enable device set bus mastering and dma mask
7606 	 */
7607 	rval = pci_enable_device_mem(pdev);
7608 
7609 	if (rval) {
7610 		dev_err(&pdev->dev, "Enable device failed\n");
7611 		return rval;
7612 	}
7613 
7614 	pci_set_master(pdev);
7615 
7616 	/*
7617 	 * We expect the FW state to be READY
7618 	 */
7619 	if (megasas_transition_to_ready(instance, 0))
7620 		goto fail_ready_state;
7621 
7622 	if (megasas_set_dma_mask(instance))
7623 		goto fail_set_dma_mask;
7624 
7625 	/*
7626 	 * Initialize MFI Firmware
7627 	 */
7628 
7629 	atomic_set(&instance->fw_outstanding, 0);
7630 	atomic_set(&instance->ldio_outstanding, 0);
7631 
7632 	/* Now re-enable MSI-X */
7633 	if (instance->msix_vectors) {
7634 		irq_flags = PCI_IRQ_MSIX;
7635 		if (instance->smp_affinity_enable)
7636 			irq_flags |= PCI_IRQ_AFFINITY;
7637 	}
7638 	rval = pci_alloc_irq_vectors(instance->pdev, 1,
7639 				     instance->msix_vectors ?
7640 				     instance->msix_vectors : 1, irq_flags);
7641 	if (rval < 0)
7642 		goto fail_reenable_msix;
7643 
7644 	megasas_setup_reply_map(instance);
7645 
7646 	if (instance->adapter_type != MFI_SERIES) {
7647 		megasas_reset_reply_desc(instance);
7648 		if (megasas_ioc_init_fusion(instance)) {
7649 			megasas_free_cmds(instance);
7650 			megasas_free_cmds_fusion(instance);
7651 			goto fail_init_mfi;
7652 		}
7653 		if (!megasas_get_map_info(instance))
7654 			megasas_sync_map_info(instance);
7655 	} else {
7656 		*instance->producer = 0;
7657 		*instance->consumer = 0;
7658 		if (megasas_issue_init_mfi(instance))
7659 			goto fail_init_mfi;
7660 	}
7661 
7662 	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7663 		goto fail_init_mfi;
7664 
7665 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7666 		     (unsigned long)instance);
7667 
7668 	if (instance->msix_vectors ?
7669 			megasas_setup_irqs_msix(instance, 0) :
7670 			megasas_setup_irqs_ioapic(instance))
7671 		goto fail_init_mfi;
7672 
7673 	if (instance->adapter_type != MFI_SERIES)
7674 		megasas_setup_irq_poll(instance);
7675 
7676 	/* Re-launch SR-IOV heartbeat timer */
7677 	if (instance->requestorId) {
7678 		if (!megasas_sriov_start_heartbeat(instance, 0))
7679 			megasas_start_timer(instance);
7680 		else {
7681 			instance->skip_heartbeat_timer_del = 1;
7682 			goto fail_init_mfi;
7683 		}
7684 	}
7685 
7686 	instance->instancet->enable_intr(instance);
7687 	megasas_setup_jbod_map(instance);
7688 	instance->unload = 0;
7689 
7690 	/*
7691 	 * Initiate AEN (Asynchronous Event Notification)
7692 	 */
7693 	if (megasas_start_aen(instance))
7694 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
7695 
7696 	/* Re-launch FW fault watchdog */
7697 	if (instance->adapter_type != MFI_SERIES)
7698 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7699 			goto fail_start_watchdog;
7700 
7701 	return 0;
7702 
7703 fail_start_watchdog:
7704 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7705 		del_timer_sync(&instance->sriov_heartbeat_timer);
7706 fail_init_mfi:
7707 	megasas_free_ctrl_dma_buffers(instance);
7708 	megasas_free_ctrl_mem(instance);
7709 	scsi_host_put(host);
7710 
7711 fail_reenable_msix:
7712 fail_set_dma_mask:
7713 fail_ready_state:
7714 
7715 	pci_disable_device(pdev);
7716 
7717 	return -ENODEV;
7718 }
7719 #else
7720 #define megasas_suspend	NULL
7721 #define megasas_resume	NULL
7722 #endif
7723 
7724 static inline int
7725 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7726 {
7727 	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7728 	int i;
7729 	u8 adp_state;
7730 
7731 	for (i = 0; i < wait_time; i++) {
7732 		adp_state = atomic_read(&instance->adprecovery);
7733 		if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7734 		    (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7735 			break;
7736 
7737 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7738 			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7739 
7740 		msleep(1000);
7741 	}
7742 
7743 	if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7744 		dev_info(&instance->pdev->dev,
7745 			 "%s HBA failed to become operational, adp_state %d\n",
7746 			 __func__, adp_state);
7747 		return 1;
7748 	}
7749 
7750 	return 0;
7751 }
7752 
7753 /**
7754  * megasas_detach_one -	PCI hot"un"plug entry point
7755  * @pdev:		PCI device structure
7756  */
7757 static void megasas_detach_one(struct pci_dev *pdev)
7758 {
7759 	int i;
7760 	struct Scsi_Host *host;
7761 	struct megasas_instance *instance;
7762 	struct fusion_context *fusion;
7763 	u32 pd_seq_map_sz;
7764 
7765 	instance = pci_get_drvdata(pdev);
7766 
7767 	if (!instance)
7768 		return;
7769 
7770 	host = instance->host;
7771 	fusion = instance->ctrl_context;
7772 
7773 	/* Shutdown SR-IOV heartbeat timer */
7774 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7775 		del_timer_sync(&instance->sriov_heartbeat_timer);
7776 
7777 	/* Stop the FW fault detection watchdog */
7778 	if (instance->adapter_type != MFI_SERIES)
7779 		megasas_fusion_stop_watchdog(instance);
7780 
7781 	if (instance->fw_crash_state != UNAVAILABLE)
7782 		megasas_free_host_crash_buffer(instance);
7783 	scsi_remove_host(instance->host);
7784 	instance->unload = 1;
7785 
7786 	if (megasas_wait_for_adapter_operational(instance))
7787 		goto skip_firing_dcmds;
7788 
7789 	megasas_flush_cache(instance);
7790 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7791 
7792 skip_firing_dcmds:
7793 	/* cancel the delayed work if this work still in queue*/
7794 	if (instance->ev != NULL) {
7795 		struct megasas_aen_event *ev = instance->ev;
7796 		cancel_delayed_work_sync(&ev->hotplug_work);
7797 		instance->ev = NULL;
7798 	}
7799 
7800 	/* cancel all wait events */
7801 	wake_up_all(&instance->int_cmd_wait_q);
7802 
7803 	tasklet_kill(&instance->isr_tasklet);
7804 
7805 	/*
7806 	 * Take the instance off the instance array. Note that we will not
7807 	 * decrement the max_index. We let this array be sparse array
7808 	 */
7809 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7810 		if (megasas_mgmt_info.instance[i] == instance) {
7811 			megasas_mgmt_info.count--;
7812 			megasas_mgmt_info.instance[i] = NULL;
7813 
7814 			break;
7815 		}
7816 	}
7817 
7818 	instance->instancet->disable_intr(instance);
7819 
7820 	megasas_destroy_irqs(instance);
7821 
7822 	if (instance->msix_vectors)
7823 		pci_free_irq_vectors(instance->pdev);
7824 
7825 	if (instance->adapter_type >= VENTURA_SERIES) {
7826 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
7827 			kfree(fusion->stream_detect_by_ld[i]);
7828 		kfree(fusion->stream_detect_by_ld);
7829 		fusion->stream_detect_by_ld = NULL;
7830 	}
7831 
7832 
7833 	if (instance->adapter_type != MFI_SERIES) {
7834 		megasas_release_fusion(instance);
7835 			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
7836 				(sizeof(struct MR_PD_CFG_SEQ) *
7837 					(MAX_PHYSICAL_DEVICES - 1));
7838 		for (i = 0; i < 2 ; i++) {
7839 			if (fusion->ld_map[i])
7840 				dma_free_coherent(&instance->pdev->dev,
7841 						  fusion->max_map_sz,
7842 						  fusion->ld_map[i],
7843 						  fusion->ld_map_phys[i]);
7844 			if (fusion->ld_drv_map[i]) {
7845 				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
7846 					vfree(fusion->ld_drv_map[i]);
7847 				else
7848 					free_pages((ulong)fusion->ld_drv_map[i],
7849 						   fusion->drv_map_pages);
7850 			}
7851 
7852 			if (fusion->pd_seq_sync[i])
7853 				dma_free_coherent(&instance->pdev->dev,
7854 					pd_seq_map_sz,
7855 					fusion->pd_seq_sync[i],
7856 					fusion->pd_seq_phys[i]);
7857 		}
7858 	} else {
7859 		megasas_release_mfi(instance);
7860 	}
7861 
7862 	if (instance->vf_affiliation)
7863 		dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
7864 				    sizeof(struct MR_LD_VF_AFFILIATION),
7865 				    instance->vf_affiliation,
7866 				    instance->vf_affiliation_h);
7867 
7868 	if (instance->vf_affiliation_111)
7869 		dma_free_coherent(&pdev->dev,
7870 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
7871 				    instance->vf_affiliation_111,
7872 				    instance->vf_affiliation_111_h);
7873 
7874 	if (instance->hb_host_mem)
7875 		dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
7876 				    instance->hb_host_mem,
7877 				    instance->hb_host_mem_h);
7878 
7879 	megasas_free_ctrl_dma_buffers(instance);
7880 
7881 	megasas_free_ctrl_mem(instance);
7882 
7883 	megasas_destroy_debugfs(instance);
7884 
7885 	scsi_host_put(host);
7886 
7887 	pci_disable_device(pdev);
7888 }
7889 
7890 /**
7891  * megasas_shutdown -	Shutdown entry point
7892  * @device:		Generic device structure
7893  */
7894 static void megasas_shutdown(struct pci_dev *pdev)
7895 {
7896 	struct megasas_instance *instance = pci_get_drvdata(pdev);
7897 
7898 	if (!instance)
7899 		return;
7900 
7901 	instance->unload = 1;
7902 
7903 	if (megasas_wait_for_adapter_operational(instance))
7904 		goto skip_firing_dcmds;
7905 
7906 	megasas_flush_cache(instance);
7907 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7908 
7909 skip_firing_dcmds:
7910 	instance->instancet->disable_intr(instance);
7911 	megasas_destroy_irqs(instance);
7912 
7913 	if (instance->msix_vectors)
7914 		pci_free_irq_vectors(instance->pdev);
7915 }
7916 
7917 /**
7918  * megasas_mgmt_open -	char node "open" entry point
7919  */
7920 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
7921 {
7922 	/*
7923 	 * Allow only those users with admin rights
7924 	 */
7925 	if (!capable(CAP_SYS_ADMIN))
7926 		return -EACCES;
7927 
7928 	return 0;
7929 }
7930 
7931 /**
7932  * megasas_mgmt_fasync -	Async notifier registration from applications
7933  *
7934  * This function adds the calling process to a driver global queue. When an
7935  * event occurs, SIGIO will be sent to all processes in this queue.
7936  */
7937 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
7938 {
7939 	int rc;
7940 
7941 	mutex_lock(&megasas_async_queue_mutex);
7942 
7943 	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
7944 
7945 	mutex_unlock(&megasas_async_queue_mutex);
7946 
7947 	if (rc >= 0) {
7948 		/* For sanity check when we get ioctl */
7949 		filep->private_data = filep;
7950 		return 0;
7951 	}
7952 
7953 	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
7954 
7955 	return rc;
7956 }
7957 
7958 /**
7959  * megasas_mgmt_poll -  char node "poll" entry point
7960  * */
7961 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
7962 {
7963 	__poll_t mask;
7964 	unsigned long flags;
7965 
7966 	poll_wait(file, &megasas_poll_wait, wait);
7967 	spin_lock_irqsave(&poll_aen_lock, flags);
7968 	if (megasas_poll_wait_aen)
7969 		mask = (EPOLLIN | EPOLLRDNORM);
7970 	else
7971 		mask = 0;
7972 	megasas_poll_wait_aen = 0;
7973 	spin_unlock_irqrestore(&poll_aen_lock, flags);
7974 	return mask;
7975 }
7976 
7977 /*
7978  * megasas_set_crash_dump_params_ioctl:
7979  *		Send CRASH_DUMP_MODE DCMD to all controllers
7980  * @cmd:	MFI command frame
7981  */
7982 
7983 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
7984 {
7985 	struct megasas_instance *local_instance;
7986 	int i, error = 0;
7987 	int crash_support;
7988 
7989 	crash_support = cmd->frame->dcmd.mbox.w[0];
7990 
7991 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7992 		local_instance = megasas_mgmt_info.instance[i];
7993 		if (local_instance && local_instance->crash_dump_drv_support) {
7994 			if ((atomic_read(&local_instance->adprecovery) ==
7995 				MEGASAS_HBA_OPERATIONAL) &&
7996 				!megasas_set_crash_dump_params(local_instance,
7997 					crash_support)) {
7998 				local_instance->crash_dump_app_support =
7999 					crash_support;
8000 				dev_info(&local_instance->pdev->dev,
8001 					"Application firmware crash "
8002 					"dump mode set success\n");
8003 				error = 0;
8004 			} else {
8005 				dev_info(&local_instance->pdev->dev,
8006 					"Application firmware crash "
8007 					"dump mode set failed\n");
8008 				error = -1;
8009 			}
8010 		}
8011 	}
8012 	return error;
8013 }
8014 
8015 /**
8016  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
8017  * @instance:			Adapter soft state
8018  * @argp:			User's ioctl packet
8019  */
8020 static int
8021 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
8022 		      struct megasas_iocpacket __user * user_ioc,
8023 		      struct megasas_iocpacket *ioc)
8024 {
8025 	struct megasas_sge64 *kern_sge64 = NULL;
8026 	struct megasas_sge32 *kern_sge32 = NULL;
8027 	struct megasas_cmd *cmd;
8028 	void *kbuff_arr[MAX_IOCTL_SGE];
8029 	dma_addr_t buf_handle = 0;
8030 	int error = 0, i;
8031 	void *sense = NULL;
8032 	dma_addr_t sense_handle;
8033 	unsigned long *sense_ptr;
8034 	u32 opcode = 0;
8035 
8036 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
8037 
8038 	if (ioc->sge_count > MAX_IOCTL_SGE) {
8039 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
8040 		       ioc->sge_count, MAX_IOCTL_SGE);
8041 		return -EINVAL;
8042 	}
8043 
8044 	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
8045 	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
8046 	    !instance->support_nvme_passthru) ||
8047 	    ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
8048 	    !instance->support_pci_lane_margining)) {
8049 		dev_err(&instance->pdev->dev,
8050 			"Received invalid ioctl command 0x%x\n",
8051 			ioc->frame.hdr.cmd);
8052 		return -ENOTSUPP;
8053 	}
8054 
8055 	cmd = megasas_get_cmd(instance);
8056 	if (!cmd) {
8057 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
8058 		return -ENOMEM;
8059 	}
8060 
8061 	/*
8062 	 * User's IOCTL packet has 2 frames (maximum). Copy those two
8063 	 * frames into our cmd's frames. cmd->frame's context will get
8064 	 * overwritten when we copy from user's frames. So set that value
8065 	 * alone separately
8066 	 */
8067 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
8068 	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
8069 	cmd->frame->hdr.pad_0 = 0;
8070 
8071 	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
8072 
8073 	if (instance->consistent_mask_64bit)
8074 		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
8075 				       MFI_FRAME_SENSE64));
8076 	else
8077 		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
8078 					       MFI_FRAME_SENSE64));
8079 
8080 	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
8081 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
8082 
8083 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
8084 		mutex_lock(&instance->reset_mutex);
8085 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
8086 			megasas_return_cmd(instance, cmd);
8087 			mutex_unlock(&instance->reset_mutex);
8088 			return -1;
8089 		}
8090 		mutex_unlock(&instance->reset_mutex);
8091 	}
8092 
8093 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
8094 		error = megasas_set_crash_dump_params_ioctl(cmd);
8095 		megasas_return_cmd(instance, cmd);
8096 		return error;
8097 	}
8098 
8099 	/*
8100 	 * The management interface between applications and the fw uses
8101 	 * MFI frames. E.g, RAID configuration changes, LD property changes
8102 	 * etc are accomplishes through different kinds of MFI frames. The
8103 	 * driver needs to care only about substituting user buffers with
8104 	 * kernel buffers in SGLs. The location of SGL is embedded in the
8105 	 * struct iocpacket itself.
8106 	 */
8107 	if (instance->consistent_mask_64bit)
8108 		kern_sge64 = (struct megasas_sge64 *)
8109 			((unsigned long)cmd->frame + ioc->sgl_off);
8110 	else
8111 		kern_sge32 = (struct megasas_sge32 *)
8112 			((unsigned long)cmd->frame + ioc->sgl_off);
8113 
8114 	/*
8115 	 * For each user buffer, create a mirror buffer and copy in
8116 	 */
8117 	for (i = 0; i < ioc->sge_count; i++) {
8118 		if (!ioc->sgl[i].iov_len)
8119 			continue;
8120 
8121 		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
8122 						    ioc->sgl[i].iov_len,
8123 						    &buf_handle, GFP_KERNEL);
8124 		if (!kbuff_arr[i]) {
8125 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
8126 			       "kernel SGL buffer for IOCTL\n");
8127 			error = -ENOMEM;
8128 			goto out;
8129 		}
8130 
8131 		/*
8132 		 * We don't change the dma_coherent_mask, so
8133 		 * dma_alloc_coherent only returns 32bit addresses
8134 		 */
8135 		if (instance->consistent_mask_64bit) {
8136 			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
8137 			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8138 		} else {
8139 			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
8140 			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8141 		}
8142 
8143 		/*
8144 		 * We created a kernel buffer corresponding to the
8145 		 * user buffer. Now copy in from the user buffer
8146 		 */
8147 		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
8148 				   (u32) (ioc->sgl[i].iov_len))) {
8149 			error = -EFAULT;
8150 			goto out;
8151 		}
8152 	}
8153 
8154 	if (ioc->sense_len) {
8155 		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
8156 					     &sense_handle, GFP_KERNEL);
8157 		if (!sense) {
8158 			error = -ENOMEM;
8159 			goto out;
8160 		}
8161 
8162 		sense_ptr =
8163 		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
8164 		if (instance->consistent_mask_64bit)
8165 			*sense_ptr = cpu_to_le64(sense_handle);
8166 		else
8167 			*sense_ptr = cpu_to_le32(sense_handle);
8168 	}
8169 
8170 	/*
8171 	 * Set the sync_cmd flag so that the ISR knows not to complete this
8172 	 * cmd to the SCSI mid-layer
8173 	 */
8174 	cmd->sync_cmd = 1;
8175 	if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
8176 		cmd->sync_cmd = 0;
8177 		dev_err(&instance->pdev->dev,
8178 			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
8179 			__func__, __LINE__, cmd->frame->hdr.cmd, opcode,
8180 			cmd->cmd_status_drv);
8181 		return -EBUSY;
8182 	}
8183 
8184 	cmd->sync_cmd = 0;
8185 
8186 	if (instance->unload == 1) {
8187 		dev_info(&instance->pdev->dev, "Driver unload is in progress "
8188 			"don't submit data to application\n");
8189 		goto out;
8190 	}
8191 	/*
8192 	 * copy out the kernel buffers to user buffers
8193 	 */
8194 	for (i = 0; i < ioc->sge_count; i++) {
8195 		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
8196 				 ioc->sgl[i].iov_len)) {
8197 			error = -EFAULT;
8198 			goto out;
8199 		}
8200 	}
8201 
8202 	/*
8203 	 * copy out the sense
8204 	 */
8205 	if (ioc->sense_len) {
8206 		/*
8207 		 * sense_ptr points to the location that has the user
8208 		 * sense buffer address
8209 		 */
8210 		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
8211 				ioc->sense_off);
8212 
8213 		if (copy_to_user((void __user *)((unsigned long)
8214 				 get_unaligned((unsigned long *)sense_ptr)),
8215 				 sense, ioc->sense_len)) {
8216 			dev_err(&instance->pdev->dev, "Failed to copy out to user "
8217 					"sense data\n");
8218 			error = -EFAULT;
8219 			goto out;
8220 		}
8221 	}
8222 
8223 	/*
8224 	 * copy the status codes returned by the fw
8225 	 */
8226 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
8227 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
8228 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
8229 		error = -EFAULT;
8230 	}
8231 
8232 out:
8233 	if (sense) {
8234 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
8235 				    sense, sense_handle);
8236 	}
8237 
8238 	for (i = 0; i < ioc->sge_count; i++) {
8239 		if (kbuff_arr[i]) {
8240 			if (instance->consistent_mask_64bit)
8241 				dma_free_coherent(&instance->pdev->dev,
8242 					le32_to_cpu(kern_sge64[i].length),
8243 					kbuff_arr[i],
8244 					le64_to_cpu(kern_sge64[i].phys_addr));
8245 			else
8246 				dma_free_coherent(&instance->pdev->dev,
8247 					le32_to_cpu(kern_sge32[i].length),
8248 					kbuff_arr[i],
8249 					le32_to_cpu(kern_sge32[i].phys_addr));
8250 			kbuff_arr[i] = NULL;
8251 		}
8252 	}
8253 
8254 	megasas_return_cmd(instance, cmd);
8255 	return error;
8256 }
8257 
8258 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
8259 {
8260 	struct megasas_iocpacket __user *user_ioc =
8261 	    (struct megasas_iocpacket __user *)arg;
8262 	struct megasas_iocpacket *ioc;
8263 	struct megasas_instance *instance;
8264 	int error;
8265 
8266 	ioc = memdup_user(user_ioc, sizeof(*ioc));
8267 	if (IS_ERR(ioc))
8268 		return PTR_ERR(ioc);
8269 
8270 	instance = megasas_lookup_instance(ioc->host_no);
8271 	if (!instance) {
8272 		error = -ENODEV;
8273 		goto out_kfree_ioc;
8274 	}
8275 
8276 	/* Block ioctls in VF mode */
8277 	if (instance->requestorId && !allow_vf_ioctls) {
8278 		error = -ENODEV;
8279 		goto out_kfree_ioc;
8280 	}
8281 
8282 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8283 		dev_err(&instance->pdev->dev, "Controller in crit error\n");
8284 		error = -ENODEV;
8285 		goto out_kfree_ioc;
8286 	}
8287 
8288 	if (instance->unload == 1) {
8289 		error = -ENODEV;
8290 		goto out_kfree_ioc;
8291 	}
8292 
8293 	if (down_interruptible(&instance->ioctl_sem)) {
8294 		error = -ERESTARTSYS;
8295 		goto out_kfree_ioc;
8296 	}
8297 
8298 	if  (megasas_wait_for_adapter_operational(instance)) {
8299 		error = -ENODEV;
8300 		goto out_up;
8301 	}
8302 
8303 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
8304 out_up:
8305 	up(&instance->ioctl_sem);
8306 
8307 out_kfree_ioc:
8308 	kfree(ioc);
8309 	return error;
8310 }
8311 
8312 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
8313 {
8314 	struct megasas_instance *instance;
8315 	struct megasas_aen aen;
8316 	int error;
8317 
8318 	if (file->private_data != file) {
8319 		printk(KERN_DEBUG "megasas: fasync_helper was not "
8320 		       "called first\n");
8321 		return -EINVAL;
8322 	}
8323 
8324 	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
8325 		return -EFAULT;
8326 
8327 	instance = megasas_lookup_instance(aen.host_no);
8328 
8329 	if (!instance)
8330 		return -ENODEV;
8331 
8332 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8333 		return -ENODEV;
8334 	}
8335 
8336 	if (instance->unload == 1) {
8337 		return -ENODEV;
8338 	}
8339 
8340 	if  (megasas_wait_for_adapter_operational(instance))
8341 		return -ENODEV;
8342 
8343 	mutex_lock(&instance->reset_mutex);
8344 	error = megasas_register_aen(instance, aen.seq_num,
8345 				     aen.class_locale_word);
8346 	mutex_unlock(&instance->reset_mutex);
8347 	return error;
8348 }
8349 
8350 /**
8351  * megasas_mgmt_ioctl -	char node ioctl entry point
8352  */
8353 static long
8354 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8355 {
8356 	switch (cmd) {
8357 	case MEGASAS_IOC_FIRMWARE:
8358 		return megasas_mgmt_ioctl_fw(file, arg);
8359 
8360 	case MEGASAS_IOC_GET_AEN:
8361 		return megasas_mgmt_ioctl_aen(file, arg);
8362 	}
8363 
8364 	return -ENOTTY;
8365 }
8366 
8367 #ifdef CONFIG_COMPAT
8368 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
8369 {
8370 	struct compat_megasas_iocpacket __user *cioc =
8371 	    (struct compat_megasas_iocpacket __user *)arg;
8372 	struct megasas_iocpacket __user *ioc =
8373 	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
8374 	int i;
8375 	int error = 0;
8376 	compat_uptr_t ptr;
8377 	u32 local_sense_off;
8378 	u32 local_sense_len;
8379 	u32 user_sense_off;
8380 
8381 	if (clear_user(ioc, sizeof(*ioc)))
8382 		return -EFAULT;
8383 
8384 	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
8385 	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
8386 	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
8387 	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
8388 	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
8389 	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
8390 		return -EFAULT;
8391 
8392 	/*
8393 	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
8394 	 * sense_len is not null, so prepare the 64bit value under
8395 	 * the same condition.
8396 	 */
8397 	if (get_user(local_sense_off, &ioc->sense_off) ||
8398 		get_user(local_sense_len, &ioc->sense_len) ||
8399 		get_user(user_sense_off, &cioc->sense_off))
8400 		return -EFAULT;
8401 
8402 	if (local_sense_off != user_sense_off)
8403 		return -EINVAL;
8404 
8405 	if (local_sense_len) {
8406 		void __user **sense_ioc_ptr =
8407 			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
8408 		compat_uptr_t *sense_cioc_ptr =
8409 			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
8410 		if (get_user(ptr, sense_cioc_ptr) ||
8411 		    put_user(compat_ptr(ptr), sense_ioc_ptr))
8412 			return -EFAULT;
8413 	}
8414 
8415 	for (i = 0; i < MAX_IOCTL_SGE; i++) {
8416 		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
8417 		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
8418 		    copy_in_user(&ioc->sgl[i].iov_len,
8419 				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
8420 			return -EFAULT;
8421 	}
8422 
8423 	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
8424 
8425 	if (copy_in_user(&cioc->frame.hdr.cmd_status,
8426 			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
8427 		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
8428 		return -EFAULT;
8429 	}
8430 	return error;
8431 }
8432 
8433 static long
8434 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
8435 			  unsigned long arg)
8436 {
8437 	switch (cmd) {
8438 	case MEGASAS_IOC_FIRMWARE32:
8439 		return megasas_mgmt_compat_ioctl_fw(file, arg);
8440 	case MEGASAS_IOC_GET_AEN:
8441 		return megasas_mgmt_ioctl_aen(file, arg);
8442 	}
8443 
8444 	return -ENOTTY;
8445 }
8446 #endif
8447 
8448 /*
8449  * File operations structure for management interface
8450  */
8451 static const struct file_operations megasas_mgmt_fops = {
8452 	.owner = THIS_MODULE,
8453 	.open = megasas_mgmt_open,
8454 	.fasync = megasas_mgmt_fasync,
8455 	.unlocked_ioctl = megasas_mgmt_ioctl,
8456 	.poll = megasas_mgmt_poll,
8457 #ifdef CONFIG_COMPAT
8458 	.compat_ioctl = megasas_mgmt_compat_ioctl,
8459 #endif
8460 	.llseek = noop_llseek,
8461 };
8462 
8463 /*
8464  * PCI hotplug support registration structure
8465  */
8466 static struct pci_driver megasas_pci_driver = {
8467 
8468 	.name = "megaraid_sas",
8469 	.id_table = megasas_pci_table,
8470 	.probe = megasas_probe_one,
8471 	.remove = megasas_detach_one,
8472 	.suspend = megasas_suspend,
8473 	.resume = megasas_resume,
8474 	.shutdown = megasas_shutdown,
8475 };
8476 
8477 /*
8478  * Sysfs driver attributes
8479  */
8480 static ssize_t version_show(struct device_driver *dd, char *buf)
8481 {
8482 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
8483 			MEGASAS_VERSION);
8484 }
8485 static DRIVER_ATTR_RO(version);
8486 
8487 static ssize_t release_date_show(struct device_driver *dd, char *buf)
8488 {
8489 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
8490 		MEGASAS_RELDATE);
8491 }
8492 static DRIVER_ATTR_RO(release_date);
8493 
8494 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
8495 {
8496 	return sprintf(buf, "%u\n", support_poll_for_event);
8497 }
8498 static DRIVER_ATTR_RO(support_poll_for_event);
8499 
8500 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
8501 {
8502 	return sprintf(buf, "%u\n", support_device_change);
8503 }
8504 static DRIVER_ATTR_RO(support_device_change);
8505 
8506 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
8507 {
8508 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
8509 }
8510 
8511 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
8512 			     size_t count)
8513 {
8514 	int retval = count;
8515 
8516 	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8517 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8518 		retval = -EINVAL;
8519 	}
8520 	return retval;
8521 }
8522 static DRIVER_ATTR_RW(dbg_lvl);
8523 
8524 static ssize_t
8525 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8526 {
8527 	return sprintf(buf, "%u\n", support_nvme_encapsulation);
8528 }
8529 
8530 static DRIVER_ATTR_RO(support_nvme_encapsulation);
8531 
8532 static ssize_t
8533 support_pci_lane_margining_show(struct device_driver *dd, char *buf)
8534 {
8535 	return sprintf(buf, "%u\n", support_pci_lane_margining);
8536 }
8537 
8538 static DRIVER_ATTR_RO(support_pci_lane_margining);
8539 
8540 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8541 {
8542 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8543 	scsi_remove_device(sdev);
8544 	scsi_device_put(sdev);
8545 }
8546 
8547 /**
8548  * megasas_update_device_list -	Update the PD and LD device list from FW
8549  *				after an AEN event notification
8550  * @instance:			Adapter soft state
8551  * @event_type:			Indicates type of event (PD or LD event)
8552  *
8553  * @return:			Success or failure
8554  *
8555  * Issue DCMDs to Firmware to update the internal device list in driver.
8556  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8557  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8558  */
8559 static
8560 int megasas_update_device_list(struct megasas_instance *instance,
8561 			       int event_type)
8562 {
8563 	int dcmd_ret = DCMD_SUCCESS;
8564 
8565 	if (instance->enable_fw_dev_list) {
8566 		dcmd_ret = megasas_host_device_list_query(instance, false);
8567 		if (dcmd_ret != DCMD_SUCCESS)
8568 			goto out;
8569 	} else {
8570 		if (event_type & SCAN_PD_CHANNEL) {
8571 			dcmd_ret = megasas_get_pd_list(instance);
8572 
8573 			if (dcmd_ret != DCMD_SUCCESS)
8574 				goto out;
8575 		}
8576 
8577 		if (event_type & SCAN_VD_CHANNEL) {
8578 			if (!instance->requestorId ||
8579 			    (instance->requestorId &&
8580 			     megasas_get_ld_vf_affiliation(instance, 0))) {
8581 				dcmd_ret = megasas_ld_list_query(instance,
8582 						MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8583 				if (dcmd_ret != DCMD_SUCCESS)
8584 					goto out;
8585 			}
8586 		}
8587 	}
8588 
8589 out:
8590 	return dcmd_ret;
8591 }
8592 
8593 /**
8594  * megasas_add_remove_devices -	Add/remove devices to SCSI mid-layer
8595  *				after an AEN event notification
8596  * @instance:			Adapter soft state
8597  * @scan_type:			Indicates type of devices (PD/LD) to add
8598  * @return			void
8599  */
8600 static
8601 void megasas_add_remove_devices(struct megasas_instance *instance,
8602 				int scan_type)
8603 {
8604 	int i, j;
8605 	u16 pd_index = 0;
8606 	u16 ld_index = 0;
8607 	u16 channel = 0, id = 0;
8608 	struct Scsi_Host *host;
8609 	struct scsi_device *sdev1;
8610 	struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8611 	struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8612 
8613 	host = instance->host;
8614 
8615 	if (instance->enable_fw_dev_list) {
8616 		targetid_list = instance->host_device_list_buf;
8617 		for (i = 0; i < targetid_list->count; i++) {
8618 			targetid_entry = &targetid_list->host_device_list[i];
8619 			if (targetid_entry->flags.u.bits.is_sys_pd) {
8620 				channel = le16_to_cpu(targetid_entry->target_id) /
8621 						MEGASAS_MAX_DEV_PER_CHANNEL;
8622 				id = le16_to_cpu(targetid_entry->target_id) %
8623 						MEGASAS_MAX_DEV_PER_CHANNEL;
8624 			} else {
8625 				channel = MEGASAS_MAX_PD_CHANNELS +
8626 					  (le16_to_cpu(targetid_entry->target_id) /
8627 					   MEGASAS_MAX_DEV_PER_CHANNEL);
8628 				id = le16_to_cpu(targetid_entry->target_id) %
8629 						MEGASAS_MAX_DEV_PER_CHANNEL;
8630 			}
8631 			sdev1 = scsi_device_lookup(host, channel, id, 0);
8632 			if (!sdev1) {
8633 				scsi_add_device(host, channel, id, 0);
8634 			} else {
8635 				scsi_device_put(sdev1);
8636 			}
8637 		}
8638 	}
8639 
8640 	if (scan_type & SCAN_PD_CHANNEL) {
8641 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8642 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8643 				pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8644 				sdev1 = scsi_device_lookup(host, i, j, 0);
8645 				if (instance->pd_list[pd_index].driveState ==
8646 							MR_PD_STATE_SYSTEM) {
8647 					if (!sdev1)
8648 						scsi_add_device(host, i, j, 0);
8649 					else
8650 						scsi_device_put(sdev1);
8651 				} else {
8652 					if (sdev1)
8653 						megasas_remove_scsi_device(sdev1);
8654 				}
8655 			}
8656 		}
8657 	}
8658 
8659 	if (scan_type & SCAN_VD_CHANNEL) {
8660 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8661 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8662 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8663 				sdev1 = scsi_device_lookup(host,
8664 						MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8665 				if (instance->ld_ids[ld_index] != 0xff) {
8666 					if (!sdev1)
8667 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8668 					else
8669 						scsi_device_put(sdev1);
8670 				} else {
8671 					if (sdev1)
8672 						megasas_remove_scsi_device(sdev1);
8673 				}
8674 			}
8675 		}
8676 	}
8677 
8678 }
8679 
8680 static void
8681 megasas_aen_polling(struct work_struct *work)
8682 {
8683 	struct megasas_aen_event *ev =
8684 		container_of(work, struct megasas_aen_event, hotplug_work.work);
8685 	struct megasas_instance *instance = ev->instance;
8686 	union megasas_evt_class_locale class_locale;
8687 	int event_type = 0;
8688 	u32 seq_num;
8689 	int error;
8690 	u8  dcmd_ret = DCMD_SUCCESS;
8691 
8692 	if (!instance) {
8693 		printk(KERN_ERR "invalid instance!\n");
8694 		kfree(ev);
8695 		return;
8696 	}
8697 
8698 	/* Don't run the event workqueue thread if OCR is running */
8699 	mutex_lock(&instance->reset_mutex);
8700 
8701 	instance->ev = NULL;
8702 	if (instance->evt_detail) {
8703 		megasas_decode_evt(instance);
8704 
8705 		switch (le32_to_cpu(instance->evt_detail->code)) {
8706 
8707 		case MR_EVT_PD_INSERTED:
8708 		case MR_EVT_PD_REMOVED:
8709 			event_type = SCAN_PD_CHANNEL;
8710 			break;
8711 
8712 		case MR_EVT_LD_OFFLINE:
8713 		case MR_EVT_CFG_CLEARED:
8714 		case MR_EVT_LD_DELETED:
8715 		case MR_EVT_LD_CREATED:
8716 			event_type = SCAN_VD_CHANNEL;
8717 			break;
8718 
8719 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8720 		case MR_EVT_FOREIGN_CFG_IMPORTED:
8721 		case MR_EVT_LD_STATE_CHANGE:
8722 			event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8723 			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8724 				instance->host->host_no);
8725 			break;
8726 
8727 		case MR_EVT_CTRL_PROP_CHANGED:
8728 			dcmd_ret = megasas_get_ctrl_info(instance);
8729 			if (dcmd_ret == DCMD_SUCCESS &&
8730 			    instance->snapdump_wait_time) {
8731 				megasas_get_snapdump_properties(instance);
8732 				dev_info(&instance->pdev->dev,
8733 					 "Snap dump wait time\t: %d\n",
8734 					 instance->snapdump_wait_time);
8735 			}
8736 			break;
8737 		default:
8738 			event_type = 0;
8739 			break;
8740 		}
8741 	} else {
8742 		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8743 		mutex_unlock(&instance->reset_mutex);
8744 		kfree(ev);
8745 		return;
8746 	}
8747 
8748 	if (event_type)
8749 		dcmd_ret = megasas_update_device_list(instance, event_type);
8750 
8751 	mutex_unlock(&instance->reset_mutex);
8752 
8753 	if (event_type && dcmd_ret == DCMD_SUCCESS)
8754 		megasas_add_remove_devices(instance, event_type);
8755 
8756 	if (dcmd_ret == DCMD_SUCCESS)
8757 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8758 	else
8759 		seq_num = instance->last_seq_num;
8760 
8761 	/* Register AEN with FW for latest sequence number plus 1 */
8762 	class_locale.members.reserved = 0;
8763 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
8764 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
8765 
8766 	if (instance->aen_cmd != NULL) {
8767 		kfree(ev);
8768 		return;
8769 	}
8770 
8771 	mutex_lock(&instance->reset_mutex);
8772 	error = megasas_register_aen(instance, seq_num,
8773 					class_locale.word);
8774 	if (error)
8775 		dev_err(&instance->pdev->dev,
8776 			"register aen failed error %x\n", error);
8777 
8778 	mutex_unlock(&instance->reset_mutex);
8779 	kfree(ev);
8780 }
8781 
8782 /**
8783  * megasas_init - Driver load entry point
8784  */
8785 static int __init megasas_init(void)
8786 {
8787 	int rval;
8788 
8789 	/*
8790 	 * Booted in kdump kernel, minimize memory footprints by
8791 	 * disabling few features
8792 	 */
8793 	if (reset_devices) {
8794 		msix_vectors = 1;
8795 		rdpq_enable = 0;
8796 		dual_qdepth_disable = 1;
8797 	}
8798 
8799 	/*
8800 	 * Announce driver version and other information
8801 	 */
8802 	pr_info("megasas: %s\n", MEGASAS_VERSION);
8803 
8804 	spin_lock_init(&poll_aen_lock);
8805 
8806 	support_poll_for_event = 2;
8807 	support_device_change = 1;
8808 	support_nvme_encapsulation = true;
8809 	support_pci_lane_margining = true;
8810 
8811 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
8812 
8813 	/*
8814 	 * Register character device node
8815 	 */
8816 	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
8817 
8818 	if (rval < 0) {
8819 		printk(KERN_DEBUG "megasas: failed to open device node\n");
8820 		return rval;
8821 	}
8822 
8823 	megasas_mgmt_majorno = rval;
8824 
8825 	megasas_init_debugfs();
8826 
8827 	/*
8828 	 * Register ourselves as PCI hotplug module
8829 	 */
8830 	rval = pci_register_driver(&megasas_pci_driver);
8831 
8832 	if (rval) {
8833 		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
8834 		goto err_pcidrv;
8835 	}
8836 
8837 	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
8838 	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
8839 		pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
8840 		event_log_level = MFI_EVT_CLASS_CRITICAL;
8841 	}
8842 
8843 	rval = driver_create_file(&megasas_pci_driver.driver,
8844 				  &driver_attr_version);
8845 	if (rval)
8846 		goto err_dcf_attr_ver;
8847 
8848 	rval = driver_create_file(&megasas_pci_driver.driver,
8849 				  &driver_attr_release_date);
8850 	if (rval)
8851 		goto err_dcf_rel_date;
8852 
8853 	rval = driver_create_file(&megasas_pci_driver.driver,
8854 				&driver_attr_support_poll_for_event);
8855 	if (rval)
8856 		goto err_dcf_support_poll_for_event;
8857 
8858 	rval = driver_create_file(&megasas_pci_driver.driver,
8859 				  &driver_attr_dbg_lvl);
8860 	if (rval)
8861 		goto err_dcf_dbg_lvl;
8862 	rval = driver_create_file(&megasas_pci_driver.driver,
8863 				&driver_attr_support_device_change);
8864 	if (rval)
8865 		goto err_dcf_support_device_change;
8866 
8867 	rval = driver_create_file(&megasas_pci_driver.driver,
8868 				  &driver_attr_support_nvme_encapsulation);
8869 	if (rval)
8870 		goto err_dcf_support_nvme_encapsulation;
8871 
8872 	rval = driver_create_file(&megasas_pci_driver.driver,
8873 				  &driver_attr_support_pci_lane_margining);
8874 	if (rval)
8875 		goto err_dcf_support_pci_lane_margining;
8876 
8877 	return rval;
8878 
8879 err_dcf_support_pci_lane_margining:
8880 	driver_remove_file(&megasas_pci_driver.driver,
8881 			   &driver_attr_support_nvme_encapsulation);
8882 
8883 err_dcf_support_nvme_encapsulation:
8884 	driver_remove_file(&megasas_pci_driver.driver,
8885 			   &driver_attr_support_device_change);
8886 
8887 err_dcf_support_device_change:
8888 	driver_remove_file(&megasas_pci_driver.driver,
8889 			   &driver_attr_dbg_lvl);
8890 err_dcf_dbg_lvl:
8891 	driver_remove_file(&megasas_pci_driver.driver,
8892 			&driver_attr_support_poll_for_event);
8893 err_dcf_support_poll_for_event:
8894 	driver_remove_file(&megasas_pci_driver.driver,
8895 			   &driver_attr_release_date);
8896 err_dcf_rel_date:
8897 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8898 err_dcf_attr_ver:
8899 	pci_unregister_driver(&megasas_pci_driver);
8900 err_pcidrv:
8901 	megasas_exit_debugfs();
8902 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8903 	return rval;
8904 }
8905 
8906 /**
8907  * megasas_exit - Driver unload entry point
8908  */
8909 static void __exit megasas_exit(void)
8910 {
8911 	driver_remove_file(&megasas_pci_driver.driver,
8912 			   &driver_attr_dbg_lvl);
8913 	driver_remove_file(&megasas_pci_driver.driver,
8914 			&driver_attr_support_poll_for_event);
8915 	driver_remove_file(&megasas_pci_driver.driver,
8916 			&driver_attr_support_device_change);
8917 	driver_remove_file(&megasas_pci_driver.driver,
8918 			   &driver_attr_release_date);
8919 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8920 	driver_remove_file(&megasas_pci_driver.driver,
8921 			   &driver_attr_support_nvme_encapsulation);
8922 	driver_remove_file(&megasas_pci_driver.driver,
8923 			   &driver_attr_support_pci_lane_margining);
8924 
8925 	pci_unregister_driver(&megasas_pci_driver);
8926 	megasas_exit_debugfs();
8927 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8928 }
8929 
8930 module_init(megasas_init);
8931 module_exit(megasas_exit);
8932