1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Linux MegaRAID driver for SAS based RAID controllers
4  *
5  *  Copyright (c) 2003-2013  LSI Corporation
6  *  Copyright (c) 2013-2016  Avago Technologies
7  *  Copyright (c) 2016-2018  Broadcom Inc.
8  *
9  *  Authors: Broadcom Inc.
10  *           Sreenivas Bagalkote
11  *           Sumant Patro
12  *           Bo Yang
13  *           Adam Radford
14  *           Kashyap Desai <kashyap.desai@broadcom.com>
15  *           Sumit Saxena <sumit.saxena@broadcom.com>
16  *
17  *  Send feedback to: megaraidlinux.pdl@broadcom.com
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/uio.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <asm/unaligned.h>
33 #include <linux/fs.h>
34 #include <linux/compat.h>
35 #include <linux/blkdev.h>
36 #include <linux/mutex.h>
37 #include <linux/poll.h>
38 #include <linux/vmalloc.h>
39 #include <linux/irq_poll.h>
40 #include <linux/blk-mq-pci.h>
41 
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_tcq.h>
47 #include <scsi/scsi_dbg.h>
48 #include "megaraid_sas_fusion.h"
49 #include "megaraid_sas.h"
50 
51 /*
52  * Number of sectors per IO command
53  * Will be set in megasas_init_mfi if user does not provide
54  */
55 static unsigned int max_sectors;
56 module_param_named(max_sectors, max_sectors, int, 0444);
57 MODULE_PARM_DESC(max_sectors,
58 	"Maximum number of sectors per IO command");
59 
60 static int msix_disable;
61 module_param(msix_disable, int, 0444);
62 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
63 
64 static unsigned int msix_vectors;
65 module_param(msix_vectors, int, 0444);
66 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
67 
68 static int allow_vf_ioctls;
69 module_param(allow_vf_ioctls, int, 0444);
70 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
71 
72 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
73 module_param(throttlequeuedepth, int, 0444);
74 MODULE_PARM_DESC(throttlequeuedepth,
75 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
76 
77 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
78 module_param(resetwaittime, int, 0444);
79 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
80 
81 static int smp_affinity_enable = 1;
82 module_param(smp_affinity_enable, int, 0444);
83 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
84 
85 static int rdpq_enable = 1;
86 module_param(rdpq_enable, int, 0444);
87 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
88 
89 unsigned int dual_qdepth_disable;
90 module_param(dual_qdepth_disable, int, 0444);
91 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
92 
93 static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
94 module_param(scmd_timeout, int, 0444);
95 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
96 
97 int perf_mode = -1;
98 module_param(perf_mode, int, 0444);
99 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
100 		"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
101 		"interrupt coalescing is enabled only on high iops queues\n\t\t"
102 		"1 - iops: High iops queues are not allocated &\n\t\t"
103 		"interrupt coalescing is enabled on all queues\n\t\t"
104 		"2 - latency: High iops queues are not allocated &\n\t\t"
105 		"interrupt coalescing is disabled on all queues\n\t\t"
106 		"default mode is 'balanced'"
107 		);
108 
109 int event_log_level = MFI_EVT_CLASS_CRITICAL;
110 module_param(event_log_level, int, 0644);
111 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
112 
113 unsigned int enable_sdev_max_qd;
114 module_param(enable_sdev_max_qd, int, 0444);
115 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
116 
117 int host_tagset_enable = 1;
118 module_param(host_tagset_enable, int, 0444);
119 MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
120 
121 MODULE_LICENSE("GPL");
122 MODULE_VERSION(MEGASAS_VERSION);
123 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
124 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
125 
126 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
127 static int megasas_get_pd_list(struct megasas_instance *instance);
128 static int megasas_ld_list_query(struct megasas_instance *instance,
129 				 u8 query_type);
130 static int megasas_issue_init_mfi(struct megasas_instance *instance);
131 static int megasas_register_aen(struct megasas_instance *instance,
132 				u32 seq_num, u32 class_locale_word);
133 static void megasas_get_pd_info(struct megasas_instance *instance,
134 				struct scsi_device *sdev);
135 
136 /*
137  * PCI ID table for all supported controllers
138  */
139 static struct pci_device_id megasas_pci_table[] = {
140 
141 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
142 	/* xscale IOP */
143 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
144 	/* ppc IOP */
145 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
146 	/* ppc IOP */
147 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
148 	/* gen2*/
149 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
150 	/* gen2*/
151 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
152 	/* skinny*/
153 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
154 	/* skinny*/
155 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
156 	/* xscale IOP, vega */
157 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
158 	/* xscale IOP */
159 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
160 	/* Fusion */
161 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
162 	/* Plasma */
163 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
164 	/* Invader */
165 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
166 	/* Fury */
167 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
168 	/* Intruder */
169 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
170 	/* Intruder 24 port*/
171 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
172 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
173 	/* VENTURA */
174 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
175 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
176 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
177 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
178 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
179 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
180 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
181 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
182 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
183 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
184 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
185 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
186 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
187 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
188 	{}
189 };
190 
191 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
192 
193 static int megasas_mgmt_majorno;
194 struct megasas_mgmt_info megasas_mgmt_info;
195 static struct fasync_struct *megasas_async_queue;
196 static DEFINE_MUTEX(megasas_async_queue_mutex);
197 
198 static int megasas_poll_wait_aen;
199 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
200 static u32 support_poll_for_event;
201 u32 megasas_dbg_lvl;
202 static u32 support_device_change;
203 static bool support_nvme_encapsulation;
204 static bool support_pci_lane_margining;
205 
206 /* define lock for aen poll */
207 static spinlock_t poll_aen_lock;
208 
209 extern struct dentry *megasas_debugfs_root;
210 
211 void
212 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
213 		     u8 alt_status);
214 static u32
215 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
216 static int
217 megasas_adp_reset_gen2(struct megasas_instance *instance,
218 		       struct megasas_register_set __iomem *reg_set);
219 static irqreturn_t megasas_isr(int irq, void *devp);
220 static u32
221 megasas_init_adapter_mfi(struct megasas_instance *instance);
222 u32
223 megasas_build_and_issue_cmd(struct megasas_instance *instance,
224 			    struct scsi_cmnd *scmd);
225 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
226 int
227 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
228 	int seconds);
229 void megasas_fusion_ocr_wq(struct work_struct *work);
230 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
231 					 int initial);
232 static int
233 megasas_set_dma_mask(struct megasas_instance *instance);
234 static int
235 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
236 static inline void
237 megasas_free_ctrl_mem(struct megasas_instance *instance);
238 static inline int
239 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
240 static inline void
241 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
242 static inline void
243 megasas_init_ctrl_params(struct megasas_instance *instance);
244 
245 u32 megasas_readl(struct megasas_instance *instance,
246 		  const volatile void __iomem *addr)
247 {
248 	u32 i = 0, ret_val;
249 	/*
250 	 * Due to a HW errata in Aero controllers, reads to certain
251 	 * Fusion registers could intermittently return all zeroes.
252 	 * This behavior is transient in nature and subsequent reads will
253 	 * return valid value. As a workaround in driver, retry readl for
254 	 * upto three times until a non-zero value is read.
255 	 */
256 	if (instance->adapter_type == AERO_SERIES) {
257 		do {
258 			ret_val = readl(addr);
259 			i++;
260 		} while (ret_val == 0 && i < 3);
261 		return ret_val;
262 	} else {
263 		return readl(addr);
264 	}
265 }
266 
267 /**
268  * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
269  * @instance:			Adapter soft state
270  * @dcmd:			DCMD frame inside MFI command
271  * @dma_addr:			DMA address of buffer to be passed to FW
272  * @dma_len:			Length of DMA buffer to be passed to FW
273  * @return:			void
274  */
275 void megasas_set_dma_settings(struct megasas_instance *instance,
276 			      struct megasas_dcmd_frame *dcmd,
277 			      dma_addr_t dma_addr, u32 dma_len)
278 {
279 	if (instance->consistent_mask_64bit) {
280 		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
281 		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
282 		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
283 
284 	} else {
285 		dcmd->sgl.sge32[0].phys_addr =
286 				cpu_to_le32(lower_32_bits(dma_addr));
287 		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
288 		dcmd->flags = cpu_to_le16(dcmd->flags);
289 	}
290 }
291 
292 static void
293 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
294 {
295 	instance->instancet->fire_cmd(instance,
296 		cmd->frame_phys_addr, 0, instance->reg_set);
297 	return;
298 }
299 
300 /**
301  * megasas_get_cmd -	Get a command from the free pool
302  * @instance:		Adapter soft state
303  *
304  * Returns a free command from the pool
305  */
306 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
307 						  *instance)
308 {
309 	unsigned long flags;
310 	struct megasas_cmd *cmd = NULL;
311 
312 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
313 
314 	if (!list_empty(&instance->cmd_pool)) {
315 		cmd = list_entry((&instance->cmd_pool)->next,
316 				 struct megasas_cmd, list);
317 		list_del_init(&cmd->list);
318 	} else {
319 		dev_err(&instance->pdev->dev, "Command pool empty!\n");
320 	}
321 
322 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
323 	return cmd;
324 }
325 
326 /**
327  * megasas_return_cmd -	Return a cmd to free command pool
328  * @instance:		Adapter soft state
329  * @cmd:		Command packet to be returned to free command pool
330  */
331 void
332 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
333 {
334 	unsigned long flags;
335 	u32 blk_tags;
336 	struct megasas_cmd_fusion *cmd_fusion;
337 	struct fusion_context *fusion = instance->ctrl_context;
338 
339 	/* This flag is used only for fusion adapter.
340 	 * Wait for Interrupt for Polled mode DCMD
341 	 */
342 	if (cmd->flags & DRV_DCMD_POLLED_MODE)
343 		return;
344 
345 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
346 
347 	if (fusion) {
348 		blk_tags = instance->max_scsi_cmds + cmd->index;
349 		cmd_fusion = fusion->cmd_list[blk_tags];
350 		megasas_return_cmd_fusion(instance, cmd_fusion);
351 	}
352 	cmd->scmd = NULL;
353 	cmd->frame_count = 0;
354 	cmd->flags = 0;
355 	memset(cmd->frame, 0, instance->mfi_frame_size);
356 	cmd->frame->io.context = cpu_to_le32(cmd->index);
357 	if (!fusion && reset_devices)
358 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
359 	list_add(&cmd->list, (&instance->cmd_pool)->next);
360 
361 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
362 
363 }
364 
365 static const char *
366 format_timestamp(uint32_t timestamp)
367 {
368 	static char buffer[32];
369 
370 	if ((timestamp & 0xff000000) == 0xff000000)
371 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
372 		0x00ffffff);
373 	else
374 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
375 	return buffer;
376 }
377 
378 static const char *
379 format_class(int8_t class)
380 {
381 	static char buffer[6];
382 
383 	switch (class) {
384 	case MFI_EVT_CLASS_DEBUG:
385 		return "debug";
386 	case MFI_EVT_CLASS_PROGRESS:
387 		return "progress";
388 	case MFI_EVT_CLASS_INFO:
389 		return "info";
390 	case MFI_EVT_CLASS_WARNING:
391 		return "WARN";
392 	case MFI_EVT_CLASS_CRITICAL:
393 		return "CRIT";
394 	case MFI_EVT_CLASS_FATAL:
395 		return "FATAL";
396 	case MFI_EVT_CLASS_DEAD:
397 		return "DEAD";
398 	default:
399 		snprintf(buffer, sizeof(buffer), "%d", class);
400 		return buffer;
401 	}
402 }
403 
404 /**
405   * megasas_decode_evt: Decode FW AEN event and print critical event
406   * for information.
407   * @instance:			Adapter soft state
408   */
409 static void
410 megasas_decode_evt(struct megasas_instance *instance)
411 {
412 	struct megasas_evt_detail *evt_detail = instance->evt_detail;
413 	union megasas_evt_class_locale class_locale;
414 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
415 
416 	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
417 	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
418 		printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
419 		event_log_level = MFI_EVT_CLASS_CRITICAL;
420 	}
421 
422 	if (class_locale.members.class >= event_log_level)
423 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
424 			le32_to_cpu(evt_detail->seq_num),
425 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
426 			(class_locale.members.locale),
427 			format_class(class_locale.members.class),
428 			evt_detail->description);
429 }
430 
431 /*
432  * The following functions are defined for xscale
433  * (deviceid : 1064R, PERC5) controllers
434  */
435 
436 /**
437  * megasas_enable_intr_xscale -	Enables interrupts
438  * @instance:	Adapter soft state
439  */
440 static inline void
441 megasas_enable_intr_xscale(struct megasas_instance *instance)
442 {
443 	struct megasas_register_set __iomem *regs;
444 
445 	regs = instance->reg_set;
446 	writel(0, &(regs)->outbound_intr_mask);
447 
448 	/* Dummy readl to force pci flush */
449 	readl(&regs->outbound_intr_mask);
450 }
451 
452 /**
453  * megasas_disable_intr_xscale -Disables interrupt
454  * @instance:	Adapter soft state
455  */
456 static inline void
457 megasas_disable_intr_xscale(struct megasas_instance *instance)
458 {
459 	struct megasas_register_set __iomem *regs;
460 	u32 mask = 0x1f;
461 
462 	regs = instance->reg_set;
463 	writel(mask, &regs->outbound_intr_mask);
464 	/* Dummy readl to force pci flush */
465 	readl(&regs->outbound_intr_mask);
466 }
467 
468 /**
469  * megasas_read_fw_status_reg_xscale - returns the current FW status value
470  * @instance:	Adapter soft state
471  */
472 static u32
473 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
474 {
475 	return readl(&instance->reg_set->outbound_msg_0);
476 }
477 /**
478  * megasas_clear_interrupt_xscale -	Check & clear interrupt
479  * @instance:	Adapter soft state
480  */
481 static int
482 megasas_clear_intr_xscale(struct megasas_instance *instance)
483 {
484 	u32 status;
485 	u32 mfiStatus = 0;
486 	struct megasas_register_set __iomem *regs;
487 	regs = instance->reg_set;
488 
489 	/*
490 	 * Check if it is our interrupt
491 	 */
492 	status = readl(&regs->outbound_intr_status);
493 
494 	if (status & MFI_OB_INTR_STATUS_MASK)
495 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
496 	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
497 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
498 
499 	/*
500 	 * Clear the interrupt by writing back the same value
501 	 */
502 	if (mfiStatus)
503 		writel(status, &regs->outbound_intr_status);
504 
505 	/* Dummy readl to force pci flush */
506 	readl(&regs->outbound_intr_status);
507 
508 	return mfiStatus;
509 }
510 
511 /**
512  * megasas_fire_cmd_xscale -	Sends command to the FW
513  * @instance:		Adapter soft state
514  * @frame_phys_addr :	Physical address of cmd
515  * @frame_count :	Number of frames for the command
516  * @regs :		MFI register set
517  */
518 static inline void
519 megasas_fire_cmd_xscale(struct megasas_instance *instance,
520 		dma_addr_t frame_phys_addr,
521 		u32 frame_count,
522 		struct megasas_register_set __iomem *regs)
523 {
524 	unsigned long flags;
525 
526 	spin_lock_irqsave(&instance->hba_lock, flags);
527 	writel((frame_phys_addr >> 3)|(frame_count),
528 	       &(regs)->inbound_queue_port);
529 	spin_unlock_irqrestore(&instance->hba_lock, flags);
530 }
531 
532 /**
533  * megasas_adp_reset_xscale -  For controller reset
534  * @instance:	Adapter soft state
535  * @regs:	MFI register set
536  */
537 static int
538 megasas_adp_reset_xscale(struct megasas_instance *instance,
539 	struct megasas_register_set __iomem *regs)
540 {
541 	u32 i;
542 	u32 pcidata;
543 
544 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
545 
546 	for (i = 0; i < 3; i++)
547 		msleep(1000); /* sleep for 3 secs */
548 	pcidata  = 0;
549 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
550 	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
551 	if (pcidata & 0x2) {
552 		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
553 		pcidata &= ~0x2;
554 		pci_write_config_dword(instance->pdev,
555 				MFI_1068_PCSR_OFFSET, pcidata);
556 
557 		for (i = 0; i < 2; i++)
558 			msleep(1000); /* need to wait 2 secs again */
559 
560 		pcidata  = 0;
561 		pci_read_config_dword(instance->pdev,
562 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
563 		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
564 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
565 			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
566 			pcidata = 0;
567 			pci_write_config_dword(instance->pdev,
568 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
569 		}
570 	}
571 	return 0;
572 }
573 
574 /**
575  * megasas_check_reset_xscale -	For controller reset check
576  * @instance:	Adapter soft state
577  * @regs:	MFI register set
578  */
579 static int
580 megasas_check_reset_xscale(struct megasas_instance *instance,
581 		struct megasas_register_set __iomem *regs)
582 {
583 	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
584 	    (le32_to_cpu(*instance->consumer) ==
585 		MEGASAS_ADPRESET_INPROG_SIGN))
586 		return 1;
587 	return 0;
588 }
589 
590 static struct megasas_instance_template megasas_instance_template_xscale = {
591 
592 	.fire_cmd = megasas_fire_cmd_xscale,
593 	.enable_intr = megasas_enable_intr_xscale,
594 	.disable_intr = megasas_disable_intr_xscale,
595 	.clear_intr = megasas_clear_intr_xscale,
596 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
597 	.adp_reset = megasas_adp_reset_xscale,
598 	.check_reset = megasas_check_reset_xscale,
599 	.service_isr = megasas_isr,
600 	.tasklet = megasas_complete_cmd_dpc,
601 	.init_adapter = megasas_init_adapter_mfi,
602 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
603 	.issue_dcmd = megasas_issue_dcmd,
604 };
605 
606 /*
607  * This is the end of set of functions & definitions specific
608  * to xscale (deviceid : 1064R, PERC5) controllers
609  */
610 
611 /*
612  * The following functions are defined for ppc (deviceid : 0x60)
613  * controllers
614  */
615 
616 /**
617  * megasas_enable_intr_ppc -	Enables interrupts
618  * @instance:	Adapter soft state
619  */
620 static inline void
621 megasas_enable_intr_ppc(struct megasas_instance *instance)
622 {
623 	struct megasas_register_set __iomem *regs;
624 
625 	regs = instance->reg_set;
626 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
627 
628 	writel(~0x80000000, &(regs)->outbound_intr_mask);
629 
630 	/* Dummy readl to force pci flush */
631 	readl(&regs->outbound_intr_mask);
632 }
633 
634 /**
635  * megasas_disable_intr_ppc -	Disable interrupt
636  * @instance:	Adapter soft state
637  */
638 static inline void
639 megasas_disable_intr_ppc(struct megasas_instance *instance)
640 {
641 	struct megasas_register_set __iomem *regs;
642 	u32 mask = 0xFFFFFFFF;
643 
644 	regs = instance->reg_set;
645 	writel(mask, &regs->outbound_intr_mask);
646 	/* Dummy readl to force pci flush */
647 	readl(&regs->outbound_intr_mask);
648 }
649 
650 /**
651  * megasas_read_fw_status_reg_ppc - returns the current FW status value
652  * @instance:	Adapter soft state
653  */
654 static u32
655 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
656 {
657 	return readl(&instance->reg_set->outbound_scratch_pad_0);
658 }
659 
660 /**
661  * megasas_clear_interrupt_ppc -	Check & clear interrupt
662  * @instance:	Adapter soft state
663  */
664 static int
665 megasas_clear_intr_ppc(struct megasas_instance *instance)
666 {
667 	u32 status, mfiStatus = 0;
668 	struct megasas_register_set __iomem *regs;
669 	regs = instance->reg_set;
670 
671 	/*
672 	 * Check if it is our interrupt
673 	 */
674 	status = readl(&regs->outbound_intr_status);
675 
676 	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
677 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
678 
679 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
680 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
681 
682 	/*
683 	 * Clear the interrupt by writing back the same value
684 	 */
685 	writel(status, &regs->outbound_doorbell_clear);
686 
687 	/* Dummy readl to force pci flush */
688 	readl(&regs->outbound_doorbell_clear);
689 
690 	return mfiStatus;
691 }
692 
693 /**
694  * megasas_fire_cmd_ppc -	Sends command to the FW
695  * @instance:		Adapter soft state
696  * @frame_phys_addr:	Physical address of cmd
697  * @frame_count:	Number of frames for the command
698  * @regs:		MFI register set
699  */
700 static inline void
701 megasas_fire_cmd_ppc(struct megasas_instance *instance,
702 		dma_addr_t frame_phys_addr,
703 		u32 frame_count,
704 		struct megasas_register_set __iomem *regs)
705 {
706 	unsigned long flags;
707 
708 	spin_lock_irqsave(&instance->hba_lock, flags);
709 	writel((frame_phys_addr | (frame_count<<1))|1,
710 			&(regs)->inbound_queue_port);
711 	spin_unlock_irqrestore(&instance->hba_lock, flags);
712 }
713 
714 /**
715  * megasas_check_reset_ppc -	For controller reset check
716  * @instance:	Adapter soft state
717  * @regs:	MFI register set
718  */
719 static int
720 megasas_check_reset_ppc(struct megasas_instance *instance,
721 			struct megasas_register_set __iomem *regs)
722 {
723 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
724 		return 1;
725 
726 	return 0;
727 }
728 
729 static struct megasas_instance_template megasas_instance_template_ppc = {
730 
731 	.fire_cmd = megasas_fire_cmd_ppc,
732 	.enable_intr = megasas_enable_intr_ppc,
733 	.disable_intr = megasas_disable_intr_ppc,
734 	.clear_intr = megasas_clear_intr_ppc,
735 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
736 	.adp_reset = megasas_adp_reset_xscale,
737 	.check_reset = megasas_check_reset_ppc,
738 	.service_isr = megasas_isr,
739 	.tasklet = megasas_complete_cmd_dpc,
740 	.init_adapter = megasas_init_adapter_mfi,
741 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
742 	.issue_dcmd = megasas_issue_dcmd,
743 };
744 
745 /**
746  * megasas_enable_intr_skinny -	Enables interrupts
747  * @instance:	Adapter soft state
748  */
749 static inline void
750 megasas_enable_intr_skinny(struct megasas_instance *instance)
751 {
752 	struct megasas_register_set __iomem *regs;
753 
754 	regs = instance->reg_set;
755 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
756 
757 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
758 
759 	/* Dummy readl to force pci flush */
760 	readl(&regs->outbound_intr_mask);
761 }
762 
763 /**
764  * megasas_disable_intr_skinny -	Disables interrupt
765  * @instance:	Adapter soft state
766  */
767 static inline void
768 megasas_disable_intr_skinny(struct megasas_instance *instance)
769 {
770 	struct megasas_register_set __iomem *regs;
771 	u32 mask = 0xFFFFFFFF;
772 
773 	regs = instance->reg_set;
774 	writel(mask, &regs->outbound_intr_mask);
775 	/* Dummy readl to force pci flush */
776 	readl(&regs->outbound_intr_mask);
777 }
778 
779 /**
780  * megasas_read_fw_status_reg_skinny - returns the current FW status value
781  * @instance:	Adapter soft state
782  */
783 static u32
784 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
785 {
786 	return readl(&instance->reg_set->outbound_scratch_pad_0);
787 }
788 
789 /**
790  * megasas_clear_interrupt_skinny -	Check & clear interrupt
791  * @instance:	Adapter soft state
792  */
793 static int
794 megasas_clear_intr_skinny(struct megasas_instance *instance)
795 {
796 	u32 status;
797 	u32 mfiStatus = 0;
798 	struct megasas_register_set __iomem *regs;
799 	regs = instance->reg_set;
800 
801 	/*
802 	 * Check if it is our interrupt
803 	 */
804 	status = readl(&regs->outbound_intr_status);
805 
806 	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
807 		return 0;
808 	}
809 
810 	/*
811 	 * Check if it is our interrupt
812 	 */
813 	if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
814 	    MFI_STATE_FAULT) {
815 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
816 	} else
817 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
818 
819 	/*
820 	 * Clear the interrupt by writing back the same value
821 	 */
822 	writel(status, &regs->outbound_intr_status);
823 
824 	/*
825 	 * dummy read to flush PCI
826 	 */
827 	readl(&regs->outbound_intr_status);
828 
829 	return mfiStatus;
830 }
831 
832 /**
833  * megasas_fire_cmd_skinny -	Sends command to the FW
834  * @instance:		Adapter soft state
835  * @frame_phys_addr:	Physical address of cmd
836  * @frame_count:	Number of frames for the command
837  * @regs:		MFI register set
838  */
839 static inline void
840 megasas_fire_cmd_skinny(struct megasas_instance *instance,
841 			dma_addr_t frame_phys_addr,
842 			u32 frame_count,
843 			struct megasas_register_set __iomem *regs)
844 {
845 	unsigned long flags;
846 
847 	spin_lock_irqsave(&instance->hba_lock, flags);
848 	writel(upper_32_bits(frame_phys_addr),
849 	       &(regs)->inbound_high_queue_port);
850 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
851 	       &(regs)->inbound_low_queue_port);
852 	spin_unlock_irqrestore(&instance->hba_lock, flags);
853 }
854 
855 /**
856  * megasas_check_reset_skinny -	For controller reset check
857  * @instance:	Adapter soft state
858  * @regs:	MFI register set
859  */
860 static int
861 megasas_check_reset_skinny(struct megasas_instance *instance,
862 				struct megasas_register_set __iomem *regs)
863 {
864 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
865 		return 1;
866 
867 	return 0;
868 }
869 
870 static struct megasas_instance_template megasas_instance_template_skinny = {
871 
872 	.fire_cmd = megasas_fire_cmd_skinny,
873 	.enable_intr = megasas_enable_intr_skinny,
874 	.disable_intr = megasas_disable_intr_skinny,
875 	.clear_intr = megasas_clear_intr_skinny,
876 	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
877 	.adp_reset = megasas_adp_reset_gen2,
878 	.check_reset = megasas_check_reset_skinny,
879 	.service_isr = megasas_isr,
880 	.tasklet = megasas_complete_cmd_dpc,
881 	.init_adapter = megasas_init_adapter_mfi,
882 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
883 	.issue_dcmd = megasas_issue_dcmd,
884 };
885 
886 
887 /*
888  * The following functions are defined for gen2 (deviceid : 0x78 0x79)
889  * controllers
890  */
891 
892 /**
893  * megasas_enable_intr_gen2 -  Enables interrupts
894  * @instance:	Adapter soft state
895  */
896 static inline void
897 megasas_enable_intr_gen2(struct megasas_instance *instance)
898 {
899 	struct megasas_register_set __iomem *regs;
900 
901 	regs = instance->reg_set;
902 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
903 
904 	/* write ~0x00000005 (4 & 1) to the intr mask*/
905 	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
906 
907 	/* Dummy readl to force pci flush */
908 	readl(&regs->outbound_intr_mask);
909 }
910 
911 /**
912  * megasas_disable_intr_gen2 - Disables interrupt
913  * @instance:	Adapter soft state
914  */
915 static inline void
916 megasas_disable_intr_gen2(struct megasas_instance *instance)
917 {
918 	struct megasas_register_set __iomem *regs;
919 	u32 mask = 0xFFFFFFFF;
920 
921 	regs = instance->reg_set;
922 	writel(mask, &regs->outbound_intr_mask);
923 	/* Dummy readl to force pci flush */
924 	readl(&regs->outbound_intr_mask);
925 }
926 
927 /**
928  * megasas_read_fw_status_reg_gen2 - returns the current FW status value
929  * @instance:	Adapter soft state
930  */
931 static u32
932 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
933 {
934 	return readl(&instance->reg_set->outbound_scratch_pad_0);
935 }
936 
937 /**
938  * megasas_clear_interrupt_gen2 -      Check & clear interrupt
939  * @instance:	Adapter soft state
940  */
941 static int
942 megasas_clear_intr_gen2(struct megasas_instance *instance)
943 {
944 	u32 status;
945 	u32 mfiStatus = 0;
946 	struct megasas_register_set __iomem *regs;
947 	regs = instance->reg_set;
948 
949 	/*
950 	 * Check if it is our interrupt
951 	 */
952 	status = readl(&regs->outbound_intr_status);
953 
954 	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
955 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
956 	}
957 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
958 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
959 	}
960 
961 	/*
962 	 * Clear the interrupt by writing back the same value
963 	 */
964 	if (mfiStatus)
965 		writel(status, &regs->outbound_doorbell_clear);
966 
967 	/* Dummy readl to force pci flush */
968 	readl(&regs->outbound_intr_status);
969 
970 	return mfiStatus;
971 }
972 
973 /**
974  * megasas_fire_cmd_gen2 -     Sends command to the FW
975  * @instance:		Adapter soft state
976  * @frame_phys_addr:	Physical address of cmd
977  * @frame_count:	Number of frames for the command
978  * @regs:		MFI register set
979  */
980 static inline void
981 megasas_fire_cmd_gen2(struct megasas_instance *instance,
982 			dma_addr_t frame_phys_addr,
983 			u32 frame_count,
984 			struct megasas_register_set __iomem *regs)
985 {
986 	unsigned long flags;
987 
988 	spin_lock_irqsave(&instance->hba_lock, flags);
989 	writel((frame_phys_addr | (frame_count<<1))|1,
990 			&(regs)->inbound_queue_port);
991 	spin_unlock_irqrestore(&instance->hba_lock, flags);
992 }
993 
994 /**
995  * megasas_adp_reset_gen2 -	For controller reset
996  * @instance:	Adapter soft state
997  * @reg_set:	MFI register set
998  */
999 static int
1000 megasas_adp_reset_gen2(struct megasas_instance *instance,
1001 			struct megasas_register_set __iomem *reg_set)
1002 {
1003 	u32 retry = 0 ;
1004 	u32 HostDiag;
1005 	u32 __iomem *seq_offset = &reg_set->seq_offset;
1006 	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
1007 
1008 	if (instance->instancet == &megasas_instance_template_skinny) {
1009 		seq_offset = &reg_set->fusion_seq_offset;
1010 		hostdiag_offset = &reg_set->fusion_host_diag;
1011 	}
1012 
1013 	writel(0, seq_offset);
1014 	writel(4, seq_offset);
1015 	writel(0xb, seq_offset);
1016 	writel(2, seq_offset);
1017 	writel(7, seq_offset);
1018 	writel(0xd, seq_offset);
1019 
1020 	msleep(1000);
1021 
1022 	HostDiag = (u32)readl(hostdiag_offset);
1023 
1024 	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
1025 		msleep(100);
1026 		HostDiag = (u32)readl(hostdiag_offset);
1027 		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
1028 					retry, HostDiag);
1029 
1030 		if (retry++ >= 100)
1031 			return 1;
1032 
1033 	}
1034 
1035 	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
1036 
1037 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
1038 
1039 	ssleep(10);
1040 
1041 	HostDiag = (u32)readl(hostdiag_offset);
1042 	while (HostDiag & DIAG_RESET_ADAPTER) {
1043 		msleep(100);
1044 		HostDiag = (u32)readl(hostdiag_offset);
1045 		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
1046 				retry, HostDiag);
1047 
1048 		if (retry++ >= 1000)
1049 			return 1;
1050 
1051 	}
1052 	return 0;
1053 }
1054 
1055 /**
1056  * megasas_check_reset_gen2 -	For controller reset check
1057  * @instance:	Adapter soft state
1058  * @regs:	MFI register set
1059  */
1060 static int
1061 megasas_check_reset_gen2(struct megasas_instance *instance,
1062 		struct megasas_register_set __iomem *regs)
1063 {
1064 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1065 		return 1;
1066 
1067 	return 0;
1068 }
1069 
1070 static struct megasas_instance_template megasas_instance_template_gen2 = {
1071 
1072 	.fire_cmd = megasas_fire_cmd_gen2,
1073 	.enable_intr = megasas_enable_intr_gen2,
1074 	.disable_intr = megasas_disable_intr_gen2,
1075 	.clear_intr = megasas_clear_intr_gen2,
1076 	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1077 	.adp_reset = megasas_adp_reset_gen2,
1078 	.check_reset = megasas_check_reset_gen2,
1079 	.service_isr = megasas_isr,
1080 	.tasklet = megasas_complete_cmd_dpc,
1081 	.init_adapter = megasas_init_adapter_mfi,
1082 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1083 	.issue_dcmd = megasas_issue_dcmd,
1084 };
1085 
1086 /*
1087  * This is the end of set of functions & definitions
1088  * specific to gen2 (deviceid : 0x78, 0x79) controllers
1089  */
1090 
1091 /*
1092  * Template added for TB (Fusion)
1093  */
1094 extern struct megasas_instance_template megasas_instance_template_fusion;
1095 
1096 /**
1097  * megasas_issue_polled -	Issues a polling command
1098  * @instance:			Adapter soft state
1099  * @cmd:			Command packet to be issued
1100  *
1101  * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1102  */
1103 int
1104 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1105 {
1106 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1107 
1108 	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1109 	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1110 
1111 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1112 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1113 			__func__, __LINE__);
1114 		return DCMD_INIT;
1115 	}
1116 
1117 	instance->instancet->issue_dcmd(instance, cmd);
1118 
1119 	return wait_and_poll(instance, cmd, instance->requestorId ?
1120 			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1121 }
1122 
1123 /**
1124  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1125  * @instance:			Adapter soft state
1126  * @cmd:			Command to be issued
1127  * @timeout:			Timeout in seconds
1128  *
1129  * This function waits on an event for the command to be returned from ISR.
1130  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1131  * Used to issue ioctl commands.
1132  */
1133 int
1134 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1135 			  struct megasas_cmd *cmd, int timeout)
1136 {
1137 	int ret = 0;
1138 	cmd->cmd_status_drv = DCMD_INIT;
1139 
1140 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1141 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1142 			__func__, __LINE__);
1143 		return DCMD_INIT;
1144 	}
1145 
1146 	instance->instancet->issue_dcmd(instance, cmd);
1147 
1148 	if (timeout) {
1149 		ret = wait_event_timeout(instance->int_cmd_wait_q,
1150 		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1151 		if (!ret) {
1152 			dev_err(&instance->pdev->dev,
1153 				"DCMD(opcode: 0x%x) is timed out, func:%s\n",
1154 				cmd->frame->dcmd.opcode, __func__);
1155 			return DCMD_TIMEOUT;
1156 		}
1157 	} else
1158 		wait_event(instance->int_cmd_wait_q,
1159 				cmd->cmd_status_drv != DCMD_INIT);
1160 
1161 	return cmd->cmd_status_drv;
1162 }
1163 
1164 /**
1165  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1166  * @instance:				Adapter soft state
1167  * @cmd_to_abort:			Previously issued cmd to be aborted
1168  * @timeout:				Timeout in seconds
1169  *
1170  * MFI firmware can abort previously issued AEN comamnd (automatic event
1171  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1172  * cmd and waits for return status.
1173  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1174  */
1175 static int
1176 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1177 				struct megasas_cmd *cmd_to_abort, int timeout)
1178 {
1179 	struct megasas_cmd *cmd;
1180 	struct megasas_abort_frame *abort_fr;
1181 	int ret = 0;
1182 	u32 opcode;
1183 
1184 	cmd = megasas_get_cmd(instance);
1185 
1186 	if (!cmd)
1187 		return -1;
1188 
1189 	abort_fr = &cmd->frame->abort;
1190 
1191 	/*
1192 	 * Prepare and issue the abort frame
1193 	 */
1194 	abort_fr->cmd = MFI_CMD_ABORT;
1195 	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1196 	abort_fr->flags = cpu_to_le16(0);
1197 	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1198 	abort_fr->abort_mfi_phys_addr_lo =
1199 		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1200 	abort_fr->abort_mfi_phys_addr_hi =
1201 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1202 
1203 	cmd->sync_cmd = 1;
1204 	cmd->cmd_status_drv = DCMD_INIT;
1205 
1206 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1207 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1208 			__func__, __LINE__);
1209 		return DCMD_INIT;
1210 	}
1211 
1212 	instance->instancet->issue_dcmd(instance, cmd);
1213 
1214 	if (timeout) {
1215 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1216 		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1217 		if (!ret) {
1218 			opcode = cmd_to_abort->frame->dcmd.opcode;
1219 			dev_err(&instance->pdev->dev,
1220 				"Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
1221 				opcode,  __func__);
1222 			return DCMD_TIMEOUT;
1223 		}
1224 	} else
1225 		wait_event(instance->abort_cmd_wait_q,
1226 		cmd->cmd_status_drv != DCMD_INIT);
1227 
1228 	cmd->sync_cmd = 0;
1229 
1230 	megasas_return_cmd(instance, cmd);
1231 	return cmd->cmd_status_drv;
1232 }
1233 
1234 /**
1235  * megasas_make_sgl32 -	Prepares 32-bit SGL
1236  * @instance:		Adapter soft state
1237  * @scp:		SCSI command from the mid-layer
1238  * @mfi_sgl:		SGL to be filled in
1239  *
1240  * If successful, this function returns the number of SG elements. Otherwise,
1241  * it returnes -1.
1242  */
1243 static int
1244 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1245 		   union megasas_sgl *mfi_sgl)
1246 {
1247 	int i;
1248 	int sge_count;
1249 	struct scatterlist *os_sgl;
1250 
1251 	sge_count = scsi_dma_map(scp);
1252 	BUG_ON(sge_count < 0);
1253 
1254 	if (sge_count) {
1255 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1256 			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1257 			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1258 		}
1259 	}
1260 	return sge_count;
1261 }
1262 
1263 /**
1264  * megasas_make_sgl64 -	Prepares 64-bit SGL
1265  * @instance:		Adapter soft state
1266  * @scp:		SCSI command from the mid-layer
1267  * @mfi_sgl:		SGL to be filled in
1268  *
1269  * If successful, this function returns the number of SG elements. Otherwise,
1270  * it returnes -1.
1271  */
1272 static int
1273 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1274 		   union megasas_sgl *mfi_sgl)
1275 {
1276 	int i;
1277 	int sge_count;
1278 	struct scatterlist *os_sgl;
1279 
1280 	sge_count = scsi_dma_map(scp);
1281 	BUG_ON(sge_count < 0);
1282 
1283 	if (sge_count) {
1284 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1285 			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1286 			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1287 		}
1288 	}
1289 	return sge_count;
1290 }
1291 
1292 /**
1293  * megasas_make_sgl_skinny - Prepares IEEE SGL
1294  * @instance:           Adapter soft state
1295  * @scp:                SCSI command from the mid-layer
1296  * @mfi_sgl:            SGL to be filled in
1297  *
1298  * If successful, this function returns the number of SG elements. Otherwise,
1299  * it returnes -1.
1300  */
1301 static int
1302 megasas_make_sgl_skinny(struct megasas_instance *instance,
1303 		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1304 {
1305 	int i;
1306 	int sge_count;
1307 	struct scatterlist *os_sgl;
1308 
1309 	sge_count = scsi_dma_map(scp);
1310 
1311 	if (sge_count) {
1312 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1313 			mfi_sgl->sge_skinny[i].length =
1314 				cpu_to_le32(sg_dma_len(os_sgl));
1315 			mfi_sgl->sge_skinny[i].phys_addr =
1316 				cpu_to_le64(sg_dma_address(os_sgl));
1317 			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1318 		}
1319 	}
1320 	return sge_count;
1321 }
1322 
1323  /**
1324  * megasas_get_frame_count - Computes the number of frames
1325  * @frame_type		: type of frame- io or pthru frame
1326  * @sge_count		: number of sg elements
1327  *
1328  * Returns the number of frames required for numnber of sge's (sge_count)
1329  */
1330 
1331 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1332 			u8 sge_count, u8 frame_type)
1333 {
1334 	int num_cnt;
1335 	int sge_bytes;
1336 	u32 sge_sz;
1337 	u32 frame_count = 0;
1338 
1339 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1340 	    sizeof(struct megasas_sge32);
1341 
1342 	if (instance->flag_ieee) {
1343 		sge_sz = sizeof(struct megasas_sge_skinny);
1344 	}
1345 
1346 	/*
1347 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1348 	 * 3 SGEs for 32-bit SGLs for ldio &
1349 	 * 1 SGEs for 64-bit SGLs and
1350 	 * 2 SGEs for 32-bit SGLs for pthru frame
1351 	 */
1352 	if (unlikely(frame_type == PTHRU_FRAME)) {
1353 		if (instance->flag_ieee == 1) {
1354 			num_cnt = sge_count - 1;
1355 		} else if (IS_DMA64)
1356 			num_cnt = sge_count - 1;
1357 		else
1358 			num_cnt = sge_count - 2;
1359 	} else {
1360 		if (instance->flag_ieee == 1) {
1361 			num_cnt = sge_count - 1;
1362 		} else if (IS_DMA64)
1363 			num_cnt = sge_count - 2;
1364 		else
1365 			num_cnt = sge_count - 3;
1366 	}
1367 
1368 	if (num_cnt > 0) {
1369 		sge_bytes = sge_sz * num_cnt;
1370 
1371 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1372 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1373 	}
1374 	/* Main frame */
1375 	frame_count += 1;
1376 
1377 	if (frame_count > 7)
1378 		frame_count = 8;
1379 	return frame_count;
1380 }
1381 
1382 /**
1383  * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1384  * @instance:		Adapter soft state
1385  * @scp:		SCSI command
1386  * @cmd:		Command to be prepared in
1387  *
1388  * This function prepares CDB commands. These are typcially pass-through
1389  * commands to the devices.
1390  */
1391 static int
1392 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1393 		   struct megasas_cmd *cmd)
1394 {
1395 	u32 is_logical;
1396 	u32 device_id;
1397 	u16 flags = 0;
1398 	struct megasas_pthru_frame *pthru;
1399 
1400 	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1401 	device_id = MEGASAS_DEV_INDEX(scp);
1402 	pthru = (struct megasas_pthru_frame *)cmd->frame;
1403 
1404 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1405 		flags = MFI_FRAME_DIR_WRITE;
1406 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1407 		flags = MFI_FRAME_DIR_READ;
1408 	else if (scp->sc_data_direction == DMA_NONE)
1409 		flags = MFI_FRAME_DIR_NONE;
1410 
1411 	if (instance->flag_ieee == 1) {
1412 		flags |= MFI_FRAME_IEEE;
1413 	}
1414 
1415 	/*
1416 	 * Prepare the DCDB frame
1417 	 */
1418 	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1419 	pthru->cmd_status = 0x0;
1420 	pthru->scsi_status = 0x0;
1421 	pthru->target_id = device_id;
1422 	pthru->lun = scp->device->lun;
1423 	pthru->cdb_len = scp->cmd_len;
1424 	pthru->timeout = 0;
1425 	pthru->pad_0 = 0;
1426 	pthru->flags = cpu_to_le16(flags);
1427 	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1428 
1429 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1430 
1431 	/*
1432 	 * If the command is for the tape device, set the
1433 	 * pthru timeout to the os layer timeout value.
1434 	 */
1435 	if (scp->device->type == TYPE_TAPE) {
1436 		if ((scp->request->timeout / HZ) > 0xFFFF)
1437 			pthru->timeout = cpu_to_le16(0xFFFF);
1438 		else
1439 			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1440 	}
1441 
1442 	/*
1443 	 * Construct SGL
1444 	 */
1445 	if (instance->flag_ieee == 1) {
1446 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1447 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1448 						      &pthru->sgl);
1449 	} else if (IS_DMA64) {
1450 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1451 		pthru->sge_count = megasas_make_sgl64(instance, scp,
1452 						      &pthru->sgl);
1453 	} else
1454 		pthru->sge_count = megasas_make_sgl32(instance, scp,
1455 						      &pthru->sgl);
1456 
1457 	if (pthru->sge_count > instance->max_num_sge) {
1458 		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1459 			pthru->sge_count);
1460 		return 0;
1461 	}
1462 
1463 	/*
1464 	 * Sense info specific
1465 	 */
1466 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1467 	pthru->sense_buf_phys_addr_hi =
1468 		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1469 	pthru->sense_buf_phys_addr_lo =
1470 		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1471 
1472 	/*
1473 	 * Compute the total number of frames this command consumes. FW uses
1474 	 * this number to pull sufficient number of frames from host memory.
1475 	 */
1476 	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1477 							PTHRU_FRAME);
1478 
1479 	return cmd->frame_count;
1480 }
1481 
1482 /**
1483  * megasas_build_ldio -	Prepares IOs to logical devices
1484  * @instance:		Adapter soft state
1485  * @scp:		SCSI command
1486  * @cmd:		Command to be prepared
1487  *
1488  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1489  */
1490 static int
1491 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1492 		   struct megasas_cmd *cmd)
1493 {
1494 	u32 device_id;
1495 	u8 sc = scp->cmnd[0];
1496 	u16 flags = 0;
1497 	struct megasas_io_frame *ldio;
1498 
1499 	device_id = MEGASAS_DEV_INDEX(scp);
1500 	ldio = (struct megasas_io_frame *)cmd->frame;
1501 
1502 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1503 		flags = MFI_FRAME_DIR_WRITE;
1504 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1505 		flags = MFI_FRAME_DIR_READ;
1506 
1507 	if (instance->flag_ieee == 1) {
1508 		flags |= MFI_FRAME_IEEE;
1509 	}
1510 
1511 	/*
1512 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1513 	 */
1514 	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1515 	ldio->cmd_status = 0x0;
1516 	ldio->scsi_status = 0x0;
1517 	ldio->target_id = device_id;
1518 	ldio->timeout = 0;
1519 	ldio->reserved_0 = 0;
1520 	ldio->pad_0 = 0;
1521 	ldio->flags = cpu_to_le16(flags);
1522 	ldio->start_lba_hi = 0;
1523 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1524 
1525 	/*
1526 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1527 	 */
1528 	if (scp->cmd_len == 6) {
1529 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1530 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1531 						 ((u32) scp->cmnd[2] << 8) |
1532 						 (u32) scp->cmnd[3]);
1533 
1534 		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1535 	}
1536 
1537 	/*
1538 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1539 	 */
1540 	else if (scp->cmd_len == 10) {
1541 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1542 					      ((u32) scp->cmnd[7] << 8));
1543 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1544 						 ((u32) scp->cmnd[3] << 16) |
1545 						 ((u32) scp->cmnd[4] << 8) |
1546 						 (u32) scp->cmnd[5]);
1547 	}
1548 
1549 	/*
1550 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1551 	 */
1552 	else if (scp->cmd_len == 12) {
1553 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1554 					      ((u32) scp->cmnd[7] << 16) |
1555 					      ((u32) scp->cmnd[8] << 8) |
1556 					      (u32) scp->cmnd[9]);
1557 
1558 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1559 						 ((u32) scp->cmnd[3] << 16) |
1560 						 ((u32) scp->cmnd[4] << 8) |
1561 						 (u32) scp->cmnd[5]);
1562 	}
1563 
1564 	/*
1565 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1566 	 */
1567 	else if (scp->cmd_len == 16) {
1568 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1569 					      ((u32) scp->cmnd[11] << 16) |
1570 					      ((u32) scp->cmnd[12] << 8) |
1571 					      (u32) scp->cmnd[13]);
1572 
1573 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1574 						 ((u32) scp->cmnd[7] << 16) |
1575 						 ((u32) scp->cmnd[8] << 8) |
1576 						 (u32) scp->cmnd[9]);
1577 
1578 		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1579 						 ((u32) scp->cmnd[3] << 16) |
1580 						 ((u32) scp->cmnd[4] << 8) |
1581 						 (u32) scp->cmnd[5]);
1582 
1583 	}
1584 
1585 	/*
1586 	 * Construct SGL
1587 	 */
1588 	if (instance->flag_ieee) {
1589 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1590 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1591 					      &ldio->sgl);
1592 	} else if (IS_DMA64) {
1593 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1594 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1595 	} else
1596 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1597 
1598 	if (ldio->sge_count > instance->max_num_sge) {
1599 		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1600 			ldio->sge_count);
1601 		return 0;
1602 	}
1603 
1604 	/*
1605 	 * Sense info specific
1606 	 */
1607 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1608 	ldio->sense_buf_phys_addr_hi = 0;
1609 	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1610 
1611 	/*
1612 	 * Compute the total number of frames this command consumes. FW uses
1613 	 * this number to pull sufficient number of frames from host memory.
1614 	 */
1615 	cmd->frame_count = megasas_get_frame_count(instance,
1616 			ldio->sge_count, IO_FRAME);
1617 
1618 	return cmd->frame_count;
1619 }
1620 
1621 /**
1622  * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1623  *				and whether it's RW or non RW
1624  * @cmd:			SCSI command
1625  *
1626  */
1627 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1628 {
1629 	int ret;
1630 
1631 	switch (cmd->cmnd[0]) {
1632 	case READ_10:
1633 	case WRITE_10:
1634 	case READ_12:
1635 	case WRITE_12:
1636 	case READ_6:
1637 	case WRITE_6:
1638 	case READ_16:
1639 	case WRITE_16:
1640 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1641 			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1642 		break;
1643 	default:
1644 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1645 			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1646 	}
1647 	return ret;
1648 }
1649 
1650  /**
1651  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1652  *					in FW
1653  * @instance:				Adapter soft state
1654  */
1655 static inline void
1656 megasas_dump_pending_frames(struct megasas_instance *instance)
1657 {
1658 	struct megasas_cmd *cmd;
1659 	int i,n;
1660 	union megasas_sgl *mfi_sgl;
1661 	struct megasas_io_frame *ldio;
1662 	struct megasas_pthru_frame *pthru;
1663 	u32 sgcount;
1664 	u16 max_cmd = instance->max_fw_cmds;
1665 
1666 	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1667 	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1668 	if (IS_DMA64)
1669 		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1670 	else
1671 		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1672 
1673 	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1674 	for (i = 0; i < max_cmd; i++) {
1675 		cmd = instance->cmd_list[i];
1676 		if (!cmd->scmd)
1677 			continue;
1678 		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1679 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1680 			ldio = (struct megasas_io_frame *)cmd->frame;
1681 			mfi_sgl = &ldio->sgl;
1682 			sgcount = ldio->sge_count;
1683 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1684 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1685 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1686 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1687 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1688 		} else {
1689 			pthru = (struct megasas_pthru_frame *) cmd->frame;
1690 			mfi_sgl = &pthru->sgl;
1691 			sgcount = pthru->sge_count;
1692 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1693 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1694 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1695 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1696 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1697 		}
1698 		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1699 			for (n = 0; n < sgcount; n++) {
1700 				if (IS_DMA64)
1701 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1702 						le32_to_cpu(mfi_sgl->sge64[n].length),
1703 						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1704 				else
1705 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1706 						le32_to_cpu(mfi_sgl->sge32[n].length),
1707 						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1708 			}
1709 		}
1710 	} /*for max_cmd*/
1711 	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1712 	for (i = 0; i < max_cmd; i++) {
1713 
1714 		cmd = instance->cmd_list[i];
1715 
1716 		if (cmd->sync_cmd == 1)
1717 			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1718 	}
1719 	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1720 }
1721 
1722 u32
1723 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1724 			    struct scsi_cmnd *scmd)
1725 {
1726 	struct megasas_cmd *cmd;
1727 	u32 frame_count;
1728 
1729 	cmd = megasas_get_cmd(instance);
1730 	if (!cmd)
1731 		return SCSI_MLQUEUE_HOST_BUSY;
1732 
1733 	/*
1734 	 * Logical drive command
1735 	 */
1736 	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1737 		frame_count = megasas_build_ldio(instance, scmd, cmd);
1738 	else
1739 		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1740 
1741 	if (!frame_count)
1742 		goto out_return_cmd;
1743 
1744 	cmd->scmd = scmd;
1745 	scmd->SCp.ptr = (char *)cmd;
1746 
1747 	/*
1748 	 * Issue the command to the FW
1749 	 */
1750 	atomic_inc(&instance->fw_outstanding);
1751 
1752 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1753 				cmd->frame_count-1, instance->reg_set);
1754 
1755 	return 0;
1756 out_return_cmd:
1757 	megasas_return_cmd(instance, cmd);
1758 	return SCSI_MLQUEUE_HOST_BUSY;
1759 }
1760 
1761 
1762 /**
1763  * megasas_queue_command -	Queue entry point
1764  * @shost:			adapter SCSI host
1765  * @scmd:			SCSI command to be queued
1766  */
1767 static int
1768 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1769 {
1770 	struct megasas_instance *instance;
1771 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1772 
1773 	instance = (struct megasas_instance *)
1774 	    scmd->device->host->hostdata;
1775 
1776 	if (instance->unload == 1) {
1777 		scmd->result = DID_NO_CONNECT << 16;
1778 		scmd->scsi_done(scmd);
1779 		return 0;
1780 	}
1781 
1782 	if (instance->issuepend_done == 0)
1783 		return SCSI_MLQUEUE_HOST_BUSY;
1784 
1785 
1786 	/* Check for an mpio path and adjust behavior */
1787 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1788 		if (megasas_check_mpio_paths(instance, scmd) ==
1789 		    (DID_REQUEUE << 16)) {
1790 			return SCSI_MLQUEUE_HOST_BUSY;
1791 		} else {
1792 			scmd->result = DID_NO_CONNECT << 16;
1793 			scmd->scsi_done(scmd);
1794 			return 0;
1795 		}
1796 	}
1797 
1798 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1799 		scmd->result = DID_NO_CONNECT << 16;
1800 		scmd->scsi_done(scmd);
1801 		return 0;
1802 	}
1803 
1804 	mr_device_priv_data = scmd->device->hostdata;
1805 	if (!mr_device_priv_data) {
1806 		scmd->result = DID_NO_CONNECT << 16;
1807 		scmd->scsi_done(scmd);
1808 		return 0;
1809 	}
1810 
1811 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1812 		return SCSI_MLQUEUE_HOST_BUSY;
1813 
1814 	if (mr_device_priv_data->tm_busy)
1815 		return SCSI_MLQUEUE_DEVICE_BUSY;
1816 
1817 
1818 	scmd->result = 0;
1819 
1820 	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1821 	    (scmd->device->id >= instance->fw_supported_vd_count ||
1822 		scmd->device->lun)) {
1823 		scmd->result = DID_BAD_TARGET << 16;
1824 		goto out_done;
1825 	}
1826 
1827 	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1828 	    MEGASAS_IS_LOGICAL(scmd->device) &&
1829 	    (!instance->fw_sync_cache_support)) {
1830 		scmd->result = DID_OK << 16;
1831 		goto out_done;
1832 	}
1833 
1834 	return instance->instancet->build_and_issue_cmd(instance, scmd);
1835 
1836  out_done:
1837 	scmd->scsi_done(scmd);
1838 	return 0;
1839 }
1840 
1841 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1842 {
1843 	int i;
1844 
1845 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1846 
1847 		if ((megasas_mgmt_info.instance[i]) &&
1848 		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1849 			return megasas_mgmt_info.instance[i];
1850 	}
1851 
1852 	return NULL;
1853 }
1854 
1855 /*
1856 * megasas_set_dynamic_target_properties -
1857 * Device property set by driver may not be static and it is required to be
1858 * updated after OCR
1859 *
1860 * set tm_capable.
1861 * set dma alignment (only for eedp protection enable vd).
1862 *
1863 * @sdev: OS provided scsi device
1864 *
1865 * Returns void
1866 */
1867 void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1868 					   bool is_target_prop)
1869 {
1870 	u16 pd_index = 0, ld;
1871 	u32 device_id;
1872 	struct megasas_instance *instance;
1873 	struct fusion_context *fusion;
1874 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1875 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1876 	struct MR_LD_RAID *raid;
1877 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1878 
1879 	instance = megasas_lookup_instance(sdev->host->host_no);
1880 	fusion = instance->ctrl_context;
1881 	mr_device_priv_data = sdev->hostdata;
1882 
1883 	if (!fusion || !mr_device_priv_data)
1884 		return;
1885 
1886 	if (MEGASAS_IS_LOGICAL(sdev)) {
1887 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1888 					+ sdev->id;
1889 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1890 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1891 		if (ld >= instance->fw_supported_vd_count)
1892 			return;
1893 		raid = MR_LdRaidGet(ld, local_map_ptr);
1894 
1895 		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1896 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1897 
1898 		mr_device_priv_data->is_tm_capable =
1899 			raid->capability.tmCapable;
1900 
1901 		if (!raid->flags.isEPD)
1902 			sdev->no_write_same = 1;
1903 
1904 	} else if (instance->use_seqnum_jbod_fp) {
1905 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1906 			sdev->id;
1907 		pd_sync = (void *)fusion->pd_seq_sync
1908 				[(instance->pd_seq_map_id - 1) & 1];
1909 		mr_device_priv_data->is_tm_capable =
1910 			pd_sync->seq[pd_index].capability.tmCapable;
1911 	}
1912 
1913 	if (is_target_prop && instance->tgt_prop->reset_tmo) {
1914 		/*
1915 		 * If FW provides a target reset timeout value, driver will use
1916 		 * it. If not set, fallback to default values.
1917 		 */
1918 		mr_device_priv_data->target_reset_tmo =
1919 			min_t(u8, instance->max_reset_tmo,
1920 			      instance->tgt_prop->reset_tmo);
1921 		mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1922 	} else {
1923 		mr_device_priv_data->target_reset_tmo =
1924 						MEGASAS_DEFAULT_TM_TIMEOUT;
1925 		mr_device_priv_data->task_abort_tmo =
1926 						MEGASAS_DEFAULT_TM_TIMEOUT;
1927 	}
1928 }
1929 
1930 /*
1931  * megasas_set_nvme_device_properties -
1932  * set nomerges=2
1933  * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1934  * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1935  *
1936  * MR firmware provides value in KB. Caller of this function converts
1937  * kb into bytes.
1938  *
1939  * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1940  * MR firmware provides value 128 as (32 * 4K) = 128K.
1941  *
1942  * @sdev:				scsi device
1943  * @max_io_size:				maximum io transfer size
1944  *
1945  */
1946 static inline void
1947 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1948 {
1949 	struct megasas_instance *instance;
1950 	u32 mr_nvme_pg_size;
1951 
1952 	instance = (struct megasas_instance *)sdev->host->hostdata;
1953 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1954 				MR_DEFAULT_NVME_PAGE_SIZE);
1955 
1956 	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1957 
1958 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1959 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1960 }
1961 
1962 /*
1963  * megasas_set_fw_assisted_qd -
1964  * set device queue depth to can_queue
1965  * set device queue depth to fw assisted qd
1966  *
1967  * @sdev:				scsi device
1968  * @is_target_prop			true, if fw provided target properties.
1969  */
1970 static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
1971 						 bool is_target_prop)
1972 {
1973 	u8 interface_type;
1974 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1975 	u32 tgt_device_qd;
1976 	struct megasas_instance *instance;
1977 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1978 
1979 	instance = megasas_lookup_instance(sdev->host->host_no);
1980 	mr_device_priv_data = sdev->hostdata;
1981 	interface_type  = mr_device_priv_data->interface_type;
1982 
1983 	switch (interface_type) {
1984 	case SAS_PD:
1985 		device_qd = MEGASAS_SAS_QD;
1986 		break;
1987 	case SATA_PD:
1988 		device_qd = MEGASAS_SATA_QD;
1989 		break;
1990 	case NVME_PD:
1991 		device_qd = MEGASAS_NVME_QD;
1992 		break;
1993 	}
1994 
1995 	if (is_target_prop) {
1996 		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1997 		if (tgt_device_qd)
1998 			device_qd = min(instance->host->can_queue,
1999 					(int)tgt_device_qd);
2000 	}
2001 
2002 	if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
2003 		device_qd = instance->host->can_queue;
2004 
2005 	scsi_change_queue_depth(sdev, device_qd);
2006 }
2007 
2008 /*
2009  * megasas_set_static_target_properties -
2010  * Device property set by driver are static and it is not required to be
2011  * updated after OCR.
2012  *
2013  * set io timeout
2014  * set device queue depth
2015  * set nvme device properties. see - megasas_set_nvme_device_properties
2016  *
2017  * @sdev:				scsi device
2018  * @is_target_prop			true, if fw provided target properties.
2019  */
2020 static void megasas_set_static_target_properties(struct scsi_device *sdev,
2021 						 bool is_target_prop)
2022 {
2023 	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
2024 	struct megasas_instance *instance;
2025 
2026 	instance = megasas_lookup_instance(sdev->host->host_no);
2027 
2028 	/*
2029 	 * The RAID firmware may require extended timeouts.
2030 	 */
2031 	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
2032 
2033 	/* max_io_size_kb will be set to non zero for
2034 	 * nvme based vd and syspd.
2035 	 */
2036 	if (is_target_prop)
2037 		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
2038 
2039 	if (instance->nvme_page_size && max_io_size_kb)
2040 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
2041 
2042 	megasas_set_fw_assisted_qd(sdev, is_target_prop);
2043 }
2044 
2045 
2046 static int megasas_slave_configure(struct scsi_device *sdev)
2047 {
2048 	u16 pd_index = 0;
2049 	struct megasas_instance *instance;
2050 	int ret_target_prop = DCMD_FAILED;
2051 	bool is_target_prop = false;
2052 
2053 	instance = megasas_lookup_instance(sdev->host->host_no);
2054 	if (instance->pd_list_not_supported) {
2055 		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
2056 			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2057 				sdev->id;
2058 			if (instance->pd_list[pd_index].driveState !=
2059 				MR_PD_STATE_SYSTEM)
2060 				return -ENXIO;
2061 		}
2062 	}
2063 
2064 	mutex_lock(&instance->reset_mutex);
2065 	/* Send DCMD to Firmware and cache the information */
2066 	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
2067 		megasas_get_pd_info(instance, sdev);
2068 
2069 	/* Some ventura firmware may not have instance->nvme_page_size set.
2070 	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
2071 	 */
2072 	if ((instance->tgt_prop) && (instance->nvme_page_size))
2073 		ret_target_prop = megasas_get_target_prop(instance, sdev);
2074 
2075 	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2076 	megasas_set_static_target_properties(sdev, is_target_prop);
2077 
2078 	/* This sdev property may change post OCR */
2079 	megasas_set_dynamic_target_properties(sdev, is_target_prop);
2080 
2081 	mutex_unlock(&instance->reset_mutex);
2082 
2083 	return 0;
2084 }
2085 
2086 static int megasas_slave_alloc(struct scsi_device *sdev)
2087 {
2088 	u16 pd_index = 0;
2089 	struct megasas_instance *instance ;
2090 	struct MR_PRIV_DEVICE *mr_device_priv_data;
2091 
2092 	instance = megasas_lookup_instance(sdev->host->host_no);
2093 	if (!MEGASAS_IS_LOGICAL(sdev)) {
2094 		/*
2095 		 * Open the OS scan to the SYSTEM PD
2096 		 */
2097 		pd_index =
2098 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2099 			sdev->id;
2100 		if ((instance->pd_list_not_supported ||
2101 			instance->pd_list[pd_index].driveState ==
2102 			MR_PD_STATE_SYSTEM)) {
2103 			goto scan_target;
2104 		}
2105 		return -ENXIO;
2106 	}
2107 
2108 scan_target:
2109 	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2110 					GFP_KERNEL);
2111 	if (!mr_device_priv_data)
2112 		return -ENOMEM;
2113 	sdev->hostdata = mr_device_priv_data;
2114 
2115 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2116 		   instance->r1_ldio_hint_default);
2117 	return 0;
2118 }
2119 
2120 static void megasas_slave_destroy(struct scsi_device *sdev)
2121 {
2122 	kfree(sdev->hostdata);
2123 	sdev->hostdata = NULL;
2124 }
2125 
2126 /*
2127 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2128 *                                       kill adapter
2129 * @instance:				Adapter soft state
2130 *
2131 */
2132 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2133 {
2134 	int i;
2135 	struct megasas_cmd *cmd_mfi;
2136 	struct megasas_cmd_fusion *cmd_fusion;
2137 	struct fusion_context *fusion = instance->ctrl_context;
2138 
2139 	/* Find all outstanding ioctls */
2140 	if (fusion) {
2141 		for (i = 0; i < instance->max_fw_cmds; i++) {
2142 			cmd_fusion = fusion->cmd_list[i];
2143 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2144 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2145 				if (cmd_mfi->sync_cmd &&
2146 				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2147 					cmd_mfi->frame->hdr.cmd_status =
2148 							MFI_STAT_WRONG_STATE;
2149 					megasas_complete_cmd(instance,
2150 							     cmd_mfi, DID_OK);
2151 				}
2152 			}
2153 		}
2154 	} else {
2155 		for (i = 0; i < instance->max_fw_cmds; i++) {
2156 			cmd_mfi = instance->cmd_list[i];
2157 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2158 				MFI_CMD_ABORT)
2159 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2160 		}
2161 	}
2162 }
2163 
2164 
2165 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2166 {
2167 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2168 		dev_warn(&instance->pdev->dev,
2169 			 "Adapter already dead, skipping kill HBA\n");
2170 		return;
2171 	}
2172 
2173 	/* Set critical error to block I/O & ioctls in case caller didn't */
2174 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2175 	/* Wait 1 second to ensure IO or ioctls in build have posted */
2176 	msleep(1000);
2177 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2178 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2179 		(instance->adapter_type != MFI_SERIES)) {
2180 		if (!instance->requestorId) {
2181 			writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2182 			/* Flush */
2183 			readl(&instance->reg_set->doorbell);
2184 		}
2185 		if (instance->requestorId && instance->peerIsPresent)
2186 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2187 	} else {
2188 		writel(MFI_STOP_ADP,
2189 			&instance->reg_set->inbound_doorbell);
2190 	}
2191 	/* Complete outstanding ioctls when adapter is killed */
2192 	megasas_complete_outstanding_ioctls(instance);
2193 }
2194 
2195  /**
2196   * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2197   *					restored to max value
2198   * @instance:			Adapter soft state
2199   *
2200   */
2201 void
2202 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2203 {
2204 	unsigned long flags;
2205 
2206 	if (instance->flag & MEGASAS_FW_BUSY
2207 	    && time_after(jiffies, instance->last_time + 5 * HZ)
2208 	    && atomic_read(&instance->fw_outstanding) <
2209 	    instance->throttlequeuedepth + 1) {
2210 
2211 		spin_lock_irqsave(instance->host->host_lock, flags);
2212 		instance->flag &= ~MEGASAS_FW_BUSY;
2213 
2214 		instance->host->can_queue = instance->cur_can_queue;
2215 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2216 	}
2217 }
2218 
2219 /**
2220  * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2221  * @instance_addr:			Address of adapter soft state
2222  *
2223  * Tasklet to complete cmds
2224  */
2225 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2226 {
2227 	u32 producer;
2228 	u32 consumer;
2229 	u32 context;
2230 	struct megasas_cmd *cmd;
2231 	struct megasas_instance *instance =
2232 				(struct megasas_instance *)instance_addr;
2233 	unsigned long flags;
2234 
2235 	/* If we have already declared adapter dead, donot complete cmds */
2236 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2237 		return;
2238 
2239 	spin_lock_irqsave(&instance->completion_lock, flags);
2240 
2241 	producer = le32_to_cpu(*instance->producer);
2242 	consumer = le32_to_cpu(*instance->consumer);
2243 
2244 	while (consumer != producer) {
2245 		context = le32_to_cpu(instance->reply_queue[consumer]);
2246 		if (context >= instance->max_fw_cmds) {
2247 			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2248 				context);
2249 			BUG();
2250 		}
2251 
2252 		cmd = instance->cmd_list[context];
2253 
2254 		megasas_complete_cmd(instance, cmd, DID_OK);
2255 
2256 		consumer++;
2257 		if (consumer == (instance->max_fw_cmds + 1)) {
2258 			consumer = 0;
2259 		}
2260 	}
2261 
2262 	*instance->consumer = cpu_to_le32(producer);
2263 
2264 	spin_unlock_irqrestore(&instance->completion_lock, flags);
2265 
2266 	/*
2267 	 * Check if we can restore can_queue
2268 	 */
2269 	megasas_check_and_restore_queue_depth(instance);
2270 }
2271 
2272 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2273 
2274 /**
2275  * megasas_start_timer - Initializes sriov heartbeat timer object
2276  * @instance:		Adapter soft state
2277  *
2278  */
2279 void megasas_start_timer(struct megasas_instance *instance)
2280 {
2281 	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2282 
2283 	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2284 	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2285 	add_timer(timer);
2286 }
2287 
2288 static void
2289 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2290 
2291 static void
2292 process_fw_state_change_wq(struct work_struct *work);
2293 
2294 static void megasas_do_ocr(struct megasas_instance *instance)
2295 {
2296 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2297 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2298 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2299 		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2300 	}
2301 	instance->instancet->disable_intr(instance);
2302 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2303 	instance->issuepend_done = 0;
2304 
2305 	atomic_set(&instance->fw_outstanding, 0);
2306 	megasas_internal_reset_defer_cmds(instance);
2307 	process_fw_state_change_wq(&instance->work_init);
2308 }
2309 
2310 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2311 					    int initial)
2312 {
2313 	struct megasas_cmd *cmd;
2314 	struct megasas_dcmd_frame *dcmd;
2315 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2316 	dma_addr_t new_affiliation_111_h;
2317 	int ld, retval = 0;
2318 	u8 thisVf;
2319 
2320 	cmd = megasas_get_cmd(instance);
2321 
2322 	if (!cmd) {
2323 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2324 		       "Failed to get cmd for scsi%d\n",
2325 			instance->host->host_no);
2326 		return -ENOMEM;
2327 	}
2328 
2329 	dcmd = &cmd->frame->dcmd;
2330 
2331 	if (!instance->vf_affiliation_111) {
2332 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2333 		       "affiliation for scsi%d\n", instance->host->host_no);
2334 		megasas_return_cmd(instance, cmd);
2335 		return -ENOMEM;
2336 	}
2337 
2338 	if (initial)
2339 			memset(instance->vf_affiliation_111, 0,
2340 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2341 	else {
2342 		new_affiliation_111 =
2343 			dma_alloc_coherent(&instance->pdev->dev,
2344 					   sizeof(struct MR_LD_VF_AFFILIATION_111),
2345 					   &new_affiliation_111_h, GFP_KERNEL);
2346 		if (!new_affiliation_111) {
2347 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2348 			       "memory for new affiliation for scsi%d\n",
2349 			       instance->host->host_no);
2350 			megasas_return_cmd(instance, cmd);
2351 			return -ENOMEM;
2352 		}
2353 	}
2354 
2355 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2356 
2357 	dcmd->cmd = MFI_CMD_DCMD;
2358 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2359 	dcmd->sge_count = 1;
2360 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2361 	dcmd->timeout = 0;
2362 	dcmd->pad_0 = 0;
2363 	dcmd->data_xfer_len =
2364 		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2365 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2366 
2367 	if (initial)
2368 		dcmd->sgl.sge32[0].phys_addr =
2369 			cpu_to_le32(instance->vf_affiliation_111_h);
2370 	else
2371 		dcmd->sgl.sge32[0].phys_addr =
2372 			cpu_to_le32(new_affiliation_111_h);
2373 
2374 	dcmd->sgl.sge32[0].length = cpu_to_le32(
2375 		sizeof(struct MR_LD_VF_AFFILIATION_111));
2376 
2377 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2378 	       "scsi%d\n", instance->host->host_no);
2379 
2380 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2381 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2382 		       " failed with status 0x%x for scsi%d\n",
2383 		       dcmd->cmd_status, instance->host->host_no);
2384 		retval = 1; /* Do a scan if we couldn't get affiliation */
2385 		goto out;
2386 	}
2387 
2388 	if (!initial) {
2389 		thisVf = new_affiliation_111->thisVf;
2390 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2391 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2392 			    new_affiliation_111->map[ld].policy[thisVf]) {
2393 				dev_warn(&instance->pdev->dev, "SR-IOV: "
2394 				       "Got new LD/VF affiliation for scsi%d\n",
2395 				       instance->host->host_no);
2396 				memcpy(instance->vf_affiliation_111,
2397 				       new_affiliation_111,
2398 				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2399 				retval = 1;
2400 				goto out;
2401 			}
2402 	}
2403 out:
2404 	if (new_affiliation_111) {
2405 		dma_free_coherent(&instance->pdev->dev,
2406 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2407 				    new_affiliation_111,
2408 				    new_affiliation_111_h);
2409 	}
2410 
2411 	megasas_return_cmd(instance, cmd);
2412 
2413 	return retval;
2414 }
2415 
2416 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2417 					    int initial)
2418 {
2419 	struct megasas_cmd *cmd;
2420 	struct megasas_dcmd_frame *dcmd;
2421 	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2422 	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2423 	dma_addr_t new_affiliation_h;
2424 	int i, j, retval = 0, found = 0, doscan = 0;
2425 	u8 thisVf;
2426 
2427 	cmd = megasas_get_cmd(instance);
2428 
2429 	if (!cmd) {
2430 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2431 		       "Failed to get cmd for scsi%d\n",
2432 		       instance->host->host_no);
2433 		return -ENOMEM;
2434 	}
2435 
2436 	dcmd = &cmd->frame->dcmd;
2437 
2438 	if (!instance->vf_affiliation) {
2439 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2440 		       "affiliation for scsi%d\n", instance->host->host_no);
2441 		megasas_return_cmd(instance, cmd);
2442 		return -ENOMEM;
2443 	}
2444 
2445 	if (initial)
2446 		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2447 		       sizeof(struct MR_LD_VF_AFFILIATION));
2448 	else {
2449 		new_affiliation =
2450 			dma_alloc_coherent(&instance->pdev->dev,
2451 					   (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2452 					   &new_affiliation_h, GFP_KERNEL);
2453 		if (!new_affiliation) {
2454 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2455 			       "memory for new affiliation for scsi%d\n",
2456 			       instance->host->host_no);
2457 			megasas_return_cmd(instance, cmd);
2458 			return -ENOMEM;
2459 		}
2460 	}
2461 
2462 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2463 
2464 	dcmd->cmd = MFI_CMD_DCMD;
2465 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2466 	dcmd->sge_count = 1;
2467 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2468 	dcmd->timeout = 0;
2469 	dcmd->pad_0 = 0;
2470 	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2471 		sizeof(struct MR_LD_VF_AFFILIATION));
2472 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2473 
2474 	if (initial)
2475 		dcmd->sgl.sge32[0].phys_addr =
2476 			cpu_to_le32(instance->vf_affiliation_h);
2477 	else
2478 		dcmd->sgl.sge32[0].phys_addr =
2479 			cpu_to_le32(new_affiliation_h);
2480 
2481 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2482 		sizeof(struct MR_LD_VF_AFFILIATION));
2483 
2484 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2485 	       "scsi%d\n", instance->host->host_no);
2486 
2487 
2488 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2489 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2490 		       " failed with status 0x%x for scsi%d\n",
2491 		       dcmd->cmd_status, instance->host->host_no);
2492 		retval = 1; /* Do a scan if we couldn't get affiliation */
2493 		goto out;
2494 	}
2495 
2496 	if (!initial) {
2497 		if (!new_affiliation->ldCount) {
2498 			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2499 			       "affiliation for passive path for scsi%d\n",
2500 			       instance->host->host_no);
2501 			retval = 1;
2502 			goto out;
2503 		}
2504 		newmap = new_affiliation->map;
2505 		savedmap = instance->vf_affiliation->map;
2506 		thisVf = new_affiliation->thisVf;
2507 		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2508 			found = 0;
2509 			for (j = 0; j < instance->vf_affiliation->ldCount;
2510 			     j++) {
2511 				if (newmap->ref.targetId ==
2512 				    savedmap->ref.targetId) {
2513 					found = 1;
2514 					if (newmap->policy[thisVf] !=
2515 					    savedmap->policy[thisVf]) {
2516 						doscan = 1;
2517 						goto out;
2518 					}
2519 				}
2520 				savedmap = (struct MR_LD_VF_MAP *)
2521 					((unsigned char *)savedmap +
2522 					 savedmap->size);
2523 			}
2524 			if (!found && newmap->policy[thisVf] !=
2525 			    MR_LD_ACCESS_HIDDEN) {
2526 				doscan = 1;
2527 				goto out;
2528 			}
2529 			newmap = (struct MR_LD_VF_MAP *)
2530 				((unsigned char *)newmap + newmap->size);
2531 		}
2532 
2533 		newmap = new_affiliation->map;
2534 		savedmap = instance->vf_affiliation->map;
2535 
2536 		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2537 			found = 0;
2538 			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2539 				if (savedmap->ref.targetId ==
2540 				    newmap->ref.targetId) {
2541 					found = 1;
2542 					if (savedmap->policy[thisVf] !=
2543 					    newmap->policy[thisVf]) {
2544 						doscan = 1;
2545 						goto out;
2546 					}
2547 				}
2548 				newmap = (struct MR_LD_VF_MAP *)
2549 					((unsigned char *)newmap +
2550 					 newmap->size);
2551 			}
2552 			if (!found && savedmap->policy[thisVf] !=
2553 			    MR_LD_ACCESS_HIDDEN) {
2554 				doscan = 1;
2555 				goto out;
2556 			}
2557 			savedmap = (struct MR_LD_VF_MAP *)
2558 				((unsigned char *)savedmap +
2559 				 savedmap->size);
2560 		}
2561 	}
2562 out:
2563 	if (doscan) {
2564 		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2565 		       "affiliation for scsi%d\n", instance->host->host_no);
2566 		memcpy(instance->vf_affiliation, new_affiliation,
2567 		       new_affiliation->size);
2568 		retval = 1;
2569 	}
2570 
2571 	if (new_affiliation)
2572 		dma_free_coherent(&instance->pdev->dev,
2573 				    (MAX_LOGICAL_DRIVES + 1) *
2574 				    sizeof(struct MR_LD_VF_AFFILIATION),
2575 				    new_affiliation, new_affiliation_h);
2576 	megasas_return_cmd(instance, cmd);
2577 
2578 	return retval;
2579 }
2580 
2581 /* This function will get the current SR-IOV LD/VF affiliation */
2582 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2583 	int initial)
2584 {
2585 	int retval;
2586 
2587 	if (instance->PlasmaFW111)
2588 		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2589 	else
2590 		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2591 	return retval;
2592 }
2593 
2594 /* This function will tell FW to start the SR-IOV heartbeat */
2595 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2596 					 int initial)
2597 {
2598 	struct megasas_cmd *cmd;
2599 	struct megasas_dcmd_frame *dcmd;
2600 	int retval = 0;
2601 
2602 	cmd = megasas_get_cmd(instance);
2603 
2604 	if (!cmd) {
2605 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2606 		       "Failed to get cmd for scsi%d\n",
2607 		       instance->host->host_no);
2608 		return -ENOMEM;
2609 	}
2610 
2611 	dcmd = &cmd->frame->dcmd;
2612 
2613 	if (initial) {
2614 		instance->hb_host_mem =
2615 			dma_alloc_coherent(&instance->pdev->dev,
2616 					   sizeof(struct MR_CTRL_HB_HOST_MEM),
2617 					   &instance->hb_host_mem_h,
2618 					   GFP_KERNEL);
2619 		if (!instance->hb_host_mem) {
2620 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2621 			       " memory for heartbeat host memory for scsi%d\n",
2622 			       instance->host->host_no);
2623 			retval = -ENOMEM;
2624 			goto out;
2625 		}
2626 	}
2627 
2628 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2629 
2630 	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2631 	dcmd->cmd = MFI_CMD_DCMD;
2632 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2633 	dcmd->sge_count = 1;
2634 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2635 	dcmd->timeout = 0;
2636 	dcmd->pad_0 = 0;
2637 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2638 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2639 
2640 	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2641 				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2642 
2643 	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2644 	       instance->host->host_no);
2645 
2646 	if ((instance->adapter_type != MFI_SERIES) &&
2647 	    !instance->mask_interrupts)
2648 		retval = megasas_issue_blocked_cmd(instance, cmd,
2649 			MEGASAS_ROUTINE_WAIT_TIME_VF);
2650 	else
2651 		retval = megasas_issue_polled(instance, cmd);
2652 
2653 	if (retval) {
2654 		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2655 			"_MEM_ALLOC DCMD %s for scsi%d\n",
2656 			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2657 			"timed out" : "failed", instance->host->host_no);
2658 		retval = 1;
2659 	}
2660 
2661 out:
2662 	megasas_return_cmd(instance, cmd);
2663 
2664 	return retval;
2665 }
2666 
2667 /* Handler for SR-IOV heartbeat */
2668 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2669 {
2670 	struct megasas_instance *instance =
2671 		from_timer(instance, t, sriov_heartbeat_timer);
2672 
2673 	if (instance->hb_host_mem->HB.fwCounter !=
2674 	    instance->hb_host_mem->HB.driverCounter) {
2675 		instance->hb_host_mem->HB.driverCounter =
2676 			instance->hb_host_mem->HB.fwCounter;
2677 		mod_timer(&instance->sriov_heartbeat_timer,
2678 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2679 	} else {
2680 		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2681 		       "completed for scsi%d\n", instance->host->host_no);
2682 		schedule_work(&instance->work_init);
2683 	}
2684 }
2685 
2686 /**
2687  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2688  * @instance:				Adapter soft state
2689  *
2690  * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2691  * complete all its outstanding commands. Returns error if one or more IOs
2692  * are pending after this time period. It also marks the controller dead.
2693  */
2694 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2695 {
2696 	int i, sl, outstanding;
2697 	u32 reset_index;
2698 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2699 	unsigned long flags;
2700 	struct list_head clist_local;
2701 	struct megasas_cmd *reset_cmd;
2702 	u32 fw_state;
2703 
2704 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2705 		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2706 		__func__, __LINE__);
2707 		return FAILED;
2708 	}
2709 
2710 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2711 
2712 		INIT_LIST_HEAD(&clist_local);
2713 		spin_lock_irqsave(&instance->hba_lock, flags);
2714 		list_splice_init(&instance->internal_reset_pending_q,
2715 				&clist_local);
2716 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2717 
2718 		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2719 		for (i = 0; i < wait_time; i++) {
2720 			msleep(1000);
2721 			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2722 				break;
2723 		}
2724 
2725 		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2726 			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2727 			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2728 			return FAILED;
2729 		}
2730 
2731 		reset_index = 0;
2732 		while (!list_empty(&clist_local)) {
2733 			reset_cmd = list_entry((&clist_local)->next,
2734 						struct megasas_cmd, list);
2735 			list_del_init(&reset_cmd->list);
2736 			if (reset_cmd->scmd) {
2737 				reset_cmd->scmd->result = DID_REQUEUE << 16;
2738 				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2739 					reset_index, reset_cmd,
2740 					reset_cmd->scmd->cmnd[0]);
2741 
2742 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2743 				megasas_return_cmd(instance, reset_cmd);
2744 			} else if (reset_cmd->sync_cmd) {
2745 				dev_notice(&instance->pdev->dev, "%p synch cmds"
2746 						"reset queue\n",
2747 						reset_cmd);
2748 
2749 				reset_cmd->cmd_status_drv = DCMD_INIT;
2750 				instance->instancet->fire_cmd(instance,
2751 						reset_cmd->frame_phys_addr,
2752 						0, instance->reg_set);
2753 			} else {
2754 				dev_notice(&instance->pdev->dev, "%p unexpected"
2755 					"cmds lst\n",
2756 					reset_cmd);
2757 			}
2758 			reset_index++;
2759 		}
2760 
2761 		return SUCCESS;
2762 	}
2763 
2764 	for (i = 0; i < resetwaittime; i++) {
2765 		outstanding = atomic_read(&instance->fw_outstanding);
2766 
2767 		if (!outstanding)
2768 			break;
2769 
2770 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2771 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2772 			       "commands to complete\n",i,outstanding);
2773 			/*
2774 			 * Call cmd completion routine. Cmd to be
2775 			 * be completed directly without depending on isr.
2776 			 */
2777 			megasas_complete_cmd_dpc((unsigned long)instance);
2778 		}
2779 
2780 		msleep(1000);
2781 	}
2782 
2783 	i = 0;
2784 	outstanding = atomic_read(&instance->fw_outstanding);
2785 	fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2786 
2787 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2788 		goto no_outstanding;
2789 
2790 	if (instance->disableOnlineCtrlReset)
2791 		goto kill_hba_and_failed;
2792 	do {
2793 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2794 			dev_info(&instance->pdev->dev,
2795 				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2796 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2797 			if (i == 3)
2798 				goto kill_hba_and_failed;
2799 			megasas_do_ocr(instance);
2800 
2801 			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2802 				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2803 				__func__, __LINE__);
2804 				return FAILED;
2805 			}
2806 			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2807 				__func__, __LINE__);
2808 
2809 			for (sl = 0; sl < 10; sl++)
2810 				msleep(500);
2811 
2812 			outstanding = atomic_read(&instance->fw_outstanding);
2813 
2814 			fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2815 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2816 				goto no_outstanding;
2817 		}
2818 		i++;
2819 	} while (i <= 3);
2820 
2821 no_outstanding:
2822 
2823 	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2824 		__func__, __LINE__);
2825 	return SUCCESS;
2826 
2827 kill_hba_and_failed:
2828 
2829 	/* Reset not supported, kill adapter */
2830 	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2831 		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2832 		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2833 		atomic_read(&instance->fw_outstanding));
2834 	megasas_dump_pending_frames(instance);
2835 	megaraid_sas_kill_hba(instance);
2836 
2837 	return FAILED;
2838 }
2839 
2840 /**
2841  * megasas_generic_reset -	Generic reset routine
2842  * @scmd:			Mid-layer SCSI command
2843  *
2844  * This routine implements a generic reset handler for device, bus and host
2845  * reset requests. Device, bus and host specific reset handlers can use this
2846  * function after they do their specific tasks.
2847  */
2848 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2849 {
2850 	int ret_val;
2851 	struct megasas_instance *instance;
2852 
2853 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2854 
2855 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2856 		 scmd->cmnd[0], scmd->retries);
2857 
2858 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2859 		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2860 		return FAILED;
2861 	}
2862 
2863 	ret_val = megasas_wait_for_outstanding(instance);
2864 	if (ret_val == SUCCESS)
2865 		dev_notice(&instance->pdev->dev, "reset successful\n");
2866 	else
2867 		dev_err(&instance->pdev->dev, "failed to do reset\n");
2868 
2869 	return ret_val;
2870 }
2871 
2872 /**
2873  * megasas_reset_timer - quiesce the adapter if required
2874  * @scmd:		scsi cmnd
2875  *
2876  * Sets the FW busy flag and reduces the host->can_queue if the
2877  * cmd has not been completed within the timeout period.
2878  */
2879 static enum
2880 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2881 {
2882 	struct megasas_instance *instance;
2883 	unsigned long flags;
2884 
2885 	if (time_after(jiffies, scmd->jiffies_at_alloc +
2886 				(scmd_timeout * 2) * HZ)) {
2887 		return BLK_EH_DONE;
2888 	}
2889 
2890 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2891 	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2892 		/* FW is busy, throttle IO */
2893 		spin_lock_irqsave(instance->host->host_lock, flags);
2894 
2895 		instance->host->can_queue = instance->throttlequeuedepth;
2896 		instance->last_time = jiffies;
2897 		instance->flag |= MEGASAS_FW_BUSY;
2898 
2899 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2900 	}
2901 	return BLK_EH_RESET_TIMER;
2902 }
2903 
2904 /**
2905  * megasas_dump -	This function will print hexdump of provided buffer.
2906  * @buf:		Buffer to be dumped
2907  * @sz:		Size in bytes
2908  * @format:		Different formats of dumping e.g. format=n will
2909  *			cause only 'n' 32 bit words to be dumped in a single
2910  *			line.
2911  */
2912 inline void
2913 megasas_dump(void *buf, int sz, int format)
2914 {
2915 	int i;
2916 	__le32 *buf_loc = (__le32 *)buf;
2917 
2918 	for (i = 0; i < (sz / sizeof(__le32)); i++) {
2919 		if ((i % format) == 0) {
2920 			if (i != 0)
2921 				printk(KERN_CONT "\n");
2922 			printk(KERN_CONT "%08x: ", (i * 4));
2923 		}
2924 		printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
2925 	}
2926 	printk(KERN_CONT "\n");
2927 }
2928 
2929 /**
2930  * megasas_dump_reg_set -	This function will print hexdump of register set
2931  * @reg_set:	Register set to be dumped
2932  */
2933 inline void
2934 megasas_dump_reg_set(void __iomem *reg_set)
2935 {
2936 	unsigned int i, sz = 256;
2937 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2938 
2939 	for (i = 0; i < (sz / sizeof(u32)); i++)
2940 		printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
2941 }
2942 
2943 /**
2944  * megasas_dump_fusion_io -	This function will print key details
2945  *				of SCSI IO
2946  * @scmd:			SCSI command pointer of SCSI IO
2947  */
2948 void
2949 megasas_dump_fusion_io(struct scsi_cmnd *scmd)
2950 {
2951 	struct megasas_cmd_fusion *cmd;
2952 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2953 	struct megasas_instance *instance;
2954 
2955 	cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2956 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2957 
2958 	scmd_printk(KERN_INFO, scmd,
2959 		    "scmd: (0x%p)  retries: 0x%x  allowed: 0x%x\n",
2960 		    scmd, scmd->retries, scmd->allowed);
2961 	scsi_print_command(scmd);
2962 
2963 	if (cmd) {
2964 		req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
2965 		scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
2966 		scmd_printk(KERN_INFO, scmd,
2967 			    "RequestFlags:0x%x  MSIxIndex:0x%x  SMID:0x%x  LMID:0x%x  DevHandle:0x%x\n",
2968 			    req_desc->SCSIIO.RequestFlags,
2969 			    req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
2970 			    req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
2971 
2972 		printk(KERN_INFO "IO request frame:\n");
2973 		megasas_dump(cmd->io_request,
2974 			     MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
2975 		printk(KERN_INFO "Chain frame:\n");
2976 		megasas_dump(cmd->sg_frame,
2977 			     instance->max_chain_frame_sz, 8);
2978 	}
2979 
2980 }
2981 
2982 /*
2983  * megasas_dump_sys_regs - This function will dump system registers through
2984  *			    sysfs.
2985  * @reg_set:		    Pointer to System register set.
2986  * @buf:		    Buffer to which output is to be written.
2987  * @return:		    Number of bytes written to buffer.
2988  */
2989 static inline ssize_t
2990 megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
2991 {
2992 	unsigned int i, sz = 256;
2993 	int bytes_wrote = 0;
2994 	char *loc = (char *)buf;
2995 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2996 
2997 	for (i = 0; i < sz / sizeof(u32); i++) {
2998 		bytes_wrote += scnprintf(loc + bytes_wrote,
2999 					 PAGE_SIZE - bytes_wrote,
3000 					 "%08x: %08x\n", (i * 4),
3001 					 readl(&reg[i]));
3002 	}
3003 	return bytes_wrote;
3004 }
3005 
3006 /**
3007  * megasas_reset_bus_host -	Bus & host reset handler entry point
3008  * @scmd:			Mid-layer SCSI command
3009  */
3010 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
3011 {
3012 	int ret;
3013 	struct megasas_instance *instance;
3014 
3015 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3016 
3017 	scmd_printk(KERN_INFO, scmd,
3018 		"OCR is requested due to IO timeout!!\n");
3019 
3020 	scmd_printk(KERN_INFO, scmd,
3021 		"SCSI host state: %d  SCSI host busy: %d  FW outstanding: %d\n",
3022 		scmd->device->host->shost_state,
3023 		scsi_host_busy(scmd->device->host),
3024 		atomic_read(&instance->fw_outstanding));
3025 	/*
3026 	 * First wait for all commands to complete
3027 	 */
3028 	if (instance->adapter_type == MFI_SERIES) {
3029 		ret = megasas_generic_reset(scmd);
3030 	} else {
3031 		megasas_dump_fusion_io(scmd);
3032 		ret = megasas_reset_fusion(scmd->device->host,
3033 				SCSIIO_TIMEOUT_OCR);
3034 	}
3035 
3036 	return ret;
3037 }
3038 
3039 /**
3040  * megasas_task_abort - Issues task abort request to firmware
3041  *			(supported only for fusion adapters)
3042  * @scmd:		SCSI command pointer
3043  */
3044 static int megasas_task_abort(struct scsi_cmnd *scmd)
3045 {
3046 	int ret;
3047 	struct megasas_instance *instance;
3048 
3049 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3050 
3051 	if (instance->adapter_type != MFI_SERIES)
3052 		ret = megasas_task_abort_fusion(scmd);
3053 	else {
3054 		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
3055 		ret = FAILED;
3056 	}
3057 
3058 	return ret;
3059 }
3060 
3061 /**
3062  * megasas_reset_target:  Issues target reset request to firmware
3063  *                        (supported only for fusion adapters)
3064  * @scmd:                 SCSI command pointer
3065  */
3066 static int megasas_reset_target(struct scsi_cmnd *scmd)
3067 {
3068 	int ret;
3069 	struct megasas_instance *instance;
3070 
3071 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3072 
3073 	if (instance->adapter_type != MFI_SERIES)
3074 		ret = megasas_reset_target_fusion(scmd);
3075 	else {
3076 		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
3077 		ret = FAILED;
3078 	}
3079 
3080 	return ret;
3081 }
3082 
3083 /**
3084  * megasas_bios_param - Returns disk geometry for a disk
3085  * @sdev:		device handle
3086  * @bdev:		block device
3087  * @capacity:		drive capacity
3088  * @geom:		geometry parameters
3089  */
3090 static int
3091 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
3092 		 sector_t capacity, int geom[])
3093 {
3094 	int heads;
3095 	int sectors;
3096 	sector_t cylinders;
3097 	unsigned long tmp;
3098 
3099 	/* Default heads (64) & sectors (32) */
3100 	heads = 64;
3101 	sectors = 32;
3102 
3103 	tmp = heads * sectors;
3104 	cylinders = capacity;
3105 
3106 	sector_div(cylinders, tmp);
3107 
3108 	/*
3109 	 * Handle extended translation size for logical drives > 1Gb
3110 	 */
3111 
3112 	if (capacity >= 0x200000) {
3113 		heads = 255;
3114 		sectors = 63;
3115 		tmp = heads*sectors;
3116 		cylinders = capacity;
3117 		sector_div(cylinders, tmp);
3118 	}
3119 
3120 	geom[0] = heads;
3121 	geom[1] = sectors;
3122 	geom[2] = cylinders;
3123 
3124 	return 0;
3125 }
3126 
3127 static int megasas_map_queues(struct Scsi_Host *shost)
3128 {
3129 	struct megasas_instance *instance;
3130 
3131 	instance = (struct megasas_instance *)shost->hostdata;
3132 
3133 	if (shost->nr_hw_queues == 1)
3134 		return 0;
3135 
3136 	return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
3137 			instance->pdev, instance->low_latency_index_start);
3138 }
3139 
3140 static void megasas_aen_polling(struct work_struct *work);
3141 
3142 /**
3143  * megasas_service_aen -	Processes an event notification
3144  * @instance:			Adapter soft state
3145  * @cmd:			AEN command completed by the ISR
3146  *
3147  * For AEN, driver sends a command down to FW that is held by the FW till an
3148  * event occurs. When an event of interest occurs, FW completes the command
3149  * that it was previously holding.
3150  *
3151  * This routines sends SIGIO signal to processes that have registered with the
3152  * driver for AEN.
3153  */
3154 static void
3155 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
3156 {
3157 	unsigned long flags;
3158 
3159 	/*
3160 	 * Don't signal app if it is just an aborted previously registered aen
3161 	 */
3162 	if ((!cmd->abort_aen) && (instance->unload == 0)) {
3163 		spin_lock_irqsave(&poll_aen_lock, flags);
3164 		megasas_poll_wait_aen = 1;
3165 		spin_unlock_irqrestore(&poll_aen_lock, flags);
3166 		wake_up(&megasas_poll_wait);
3167 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
3168 	}
3169 	else
3170 		cmd->abort_aen = 0;
3171 
3172 	instance->aen_cmd = NULL;
3173 
3174 	megasas_return_cmd(instance, cmd);
3175 
3176 	if ((instance->unload == 0) &&
3177 		((instance->issuepend_done == 1))) {
3178 		struct megasas_aen_event *ev;
3179 
3180 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3181 		if (!ev) {
3182 			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3183 		} else {
3184 			ev->instance = instance;
3185 			instance->ev = ev;
3186 			INIT_DELAYED_WORK(&ev->hotplug_work,
3187 					  megasas_aen_polling);
3188 			schedule_delayed_work(&ev->hotplug_work, 0);
3189 		}
3190 	}
3191 }
3192 
3193 static ssize_t
3194 fw_crash_buffer_store(struct device *cdev,
3195 	struct device_attribute *attr, const char *buf, size_t count)
3196 {
3197 	struct Scsi_Host *shost = class_to_shost(cdev);
3198 	struct megasas_instance *instance =
3199 		(struct megasas_instance *) shost->hostdata;
3200 	int val = 0;
3201 	unsigned long flags;
3202 
3203 	if (kstrtoint(buf, 0, &val) != 0)
3204 		return -EINVAL;
3205 
3206 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3207 	instance->fw_crash_buffer_offset = val;
3208 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3209 	return strlen(buf);
3210 }
3211 
3212 static ssize_t
3213 fw_crash_buffer_show(struct device *cdev,
3214 	struct device_attribute *attr, char *buf)
3215 {
3216 	struct Scsi_Host *shost = class_to_shost(cdev);
3217 	struct megasas_instance *instance =
3218 		(struct megasas_instance *) shost->hostdata;
3219 	u32 size;
3220 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3221 	unsigned long chunk_left_bytes;
3222 	unsigned long src_addr;
3223 	unsigned long flags;
3224 	u32 buff_offset;
3225 
3226 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3227 	buff_offset = instance->fw_crash_buffer_offset;
3228 	if (!instance->crash_dump_buf &&
3229 		!((instance->fw_crash_state == AVAILABLE) ||
3230 		(instance->fw_crash_state == COPYING))) {
3231 		dev_err(&instance->pdev->dev,
3232 			"Firmware crash dump is not available\n");
3233 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3234 		return -EINVAL;
3235 	}
3236 
3237 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3238 		dev_err(&instance->pdev->dev,
3239 			"Firmware crash dump offset is out of range\n");
3240 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3241 		return 0;
3242 	}
3243 
3244 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3245 	chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
3246 	size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
3247 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3248 
3249 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3250 		(buff_offset % dmachunk);
3251 	memcpy(buf, (void *)src_addr, size);
3252 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3253 
3254 	return size;
3255 }
3256 
3257 static ssize_t
3258 fw_crash_buffer_size_show(struct device *cdev,
3259 	struct device_attribute *attr, char *buf)
3260 {
3261 	struct Scsi_Host *shost = class_to_shost(cdev);
3262 	struct megasas_instance *instance =
3263 		(struct megasas_instance *) shost->hostdata;
3264 
3265 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3266 		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3267 }
3268 
3269 static ssize_t
3270 fw_crash_state_store(struct device *cdev,
3271 	struct device_attribute *attr, const char *buf, size_t count)
3272 {
3273 	struct Scsi_Host *shost = class_to_shost(cdev);
3274 	struct megasas_instance *instance =
3275 		(struct megasas_instance *) shost->hostdata;
3276 	int val = 0;
3277 	unsigned long flags;
3278 
3279 	if (kstrtoint(buf, 0, &val) != 0)
3280 		return -EINVAL;
3281 
3282 	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3283 		dev_err(&instance->pdev->dev, "application updates invalid "
3284 			"firmware crash state\n");
3285 		return -EINVAL;
3286 	}
3287 
3288 	instance->fw_crash_state = val;
3289 
3290 	if ((val == COPIED) || (val == COPY_ERROR)) {
3291 		spin_lock_irqsave(&instance->crashdump_lock, flags);
3292 		megasas_free_host_crash_buffer(instance);
3293 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3294 		if (val == COPY_ERROR)
3295 			dev_info(&instance->pdev->dev, "application failed to "
3296 				"copy Firmware crash dump\n");
3297 		else
3298 			dev_info(&instance->pdev->dev, "Firmware crash dump "
3299 				"copied successfully\n");
3300 	}
3301 	return strlen(buf);
3302 }
3303 
3304 static ssize_t
3305 fw_crash_state_show(struct device *cdev,
3306 	struct device_attribute *attr, char *buf)
3307 {
3308 	struct Scsi_Host *shost = class_to_shost(cdev);
3309 	struct megasas_instance *instance =
3310 		(struct megasas_instance *) shost->hostdata;
3311 
3312 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3313 }
3314 
3315 static ssize_t
3316 page_size_show(struct device *cdev,
3317 	struct device_attribute *attr, char *buf)
3318 {
3319 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3320 }
3321 
3322 static ssize_t
3323 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3324 	char *buf)
3325 {
3326 	struct Scsi_Host *shost = class_to_shost(cdev);
3327 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3328 
3329 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3330 }
3331 
3332 static ssize_t
3333 fw_cmds_outstanding_show(struct device *cdev,
3334 				 struct device_attribute *attr, char *buf)
3335 {
3336 	struct Scsi_Host *shost = class_to_shost(cdev);
3337 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3338 
3339 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3340 }
3341 
3342 static ssize_t
3343 enable_sdev_max_qd_show(struct device *cdev,
3344 	struct device_attribute *attr, char *buf)
3345 {
3346 	struct Scsi_Host *shost = class_to_shost(cdev);
3347 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3348 
3349 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
3350 }
3351 
3352 static ssize_t
3353 enable_sdev_max_qd_store(struct device *cdev,
3354 	struct device_attribute *attr, const char *buf, size_t count)
3355 {
3356 	struct Scsi_Host *shost = class_to_shost(cdev);
3357 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3358 	u32 val = 0;
3359 	bool is_target_prop;
3360 	int ret_target_prop = DCMD_FAILED;
3361 	struct scsi_device *sdev;
3362 
3363 	if (kstrtou32(buf, 0, &val) != 0) {
3364 		pr_err("megasas: could not set enable_sdev_max_qd\n");
3365 		return -EINVAL;
3366 	}
3367 
3368 	mutex_lock(&instance->reset_mutex);
3369 	if (val)
3370 		instance->enable_sdev_max_qd = true;
3371 	else
3372 		instance->enable_sdev_max_qd = false;
3373 
3374 	shost_for_each_device(sdev, shost) {
3375 		ret_target_prop = megasas_get_target_prop(instance, sdev);
3376 		is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
3377 		megasas_set_fw_assisted_qd(sdev, is_target_prop);
3378 	}
3379 	mutex_unlock(&instance->reset_mutex);
3380 
3381 	return strlen(buf);
3382 }
3383 
3384 static ssize_t
3385 dump_system_regs_show(struct device *cdev,
3386 			       struct device_attribute *attr, char *buf)
3387 {
3388 	struct Scsi_Host *shost = class_to_shost(cdev);
3389 	struct megasas_instance *instance =
3390 			(struct megasas_instance *)shost->hostdata;
3391 
3392 	return megasas_dump_sys_regs(instance->reg_set, buf);
3393 }
3394 
3395 static ssize_t
3396 raid_map_id_show(struct device *cdev, struct device_attribute *attr,
3397 			  char *buf)
3398 {
3399 	struct Scsi_Host *shost = class_to_shost(cdev);
3400 	struct megasas_instance *instance =
3401 			(struct megasas_instance *)shost->hostdata;
3402 
3403 	return snprintf(buf, PAGE_SIZE, "%ld\n",
3404 			(unsigned long)instance->map_id);
3405 }
3406 
3407 static DEVICE_ATTR_RW(fw_crash_buffer);
3408 static DEVICE_ATTR_RO(fw_crash_buffer_size);
3409 static DEVICE_ATTR_RW(fw_crash_state);
3410 static DEVICE_ATTR_RO(page_size);
3411 static DEVICE_ATTR_RO(ldio_outstanding);
3412 static DEVICE_ATTR_RO(fw_cmds_outstanding);
3413 static DEVICE_ATTR_RW(enable_sdev_max_qd);
3414 static DEVICE_ATTR_RO(dump_system_regs);
3415 static DEVICE_ATTR_RO(raid_map_id);
3416 
3417 static struct device_attribute *megaraid_host_attrs[] = {
3418 	&dev_attr_fw_crash_buffer_size,
3419 	&dev_attr_fw_crash_buffer,
3420 	&dev_attr_fw_crash_state,
3421 	&dev_attr_page_size,
3422 	&dev_attr_ldio_outstanding,
3423 	&dev_attr_fw_cmds_outstanding,
3424 	&dev_attr_enable_sdev_max_qd,
3425 	&dev_attr_dump_system_regs,
3426 	&dev_attr_raid_map_id,
3427 	NULL,
3428 };
3429 
3430 /*
3431  * Scsi host template for megaraid_sas driver
3432  */
3433 static struct scsi_host_template megasas_template = {
3434 
3435 	.module = THIS_MODULE,
3436 	.name = "Avago SAS based MegaRAID driver",
3437 	.proc_name = "megaraid_sas",
3438 	.slave_configure = megasas_slave_configure,
3439 	.slave_alloc = megasas_slave_alloc,
3440 	.slave_destroy = megasas_slave_destroy,
3441 	.queuecommand = megasas_queue_command,
3442 	.eh_target_reset_handler = megasas_reset_target,
3443 	.eh_abort_handler = megasas_task_abort,
3444 	.eh_host_reset_handler = megasas_reset_bus_host,
3445 	.eh_timed_out = megasas_reset_timer,
3446 	.shost_attrs = megaraid_host_attrs,
3447 	.bios_param = megasas_bios_param,
3448 	.map_queues = megasas_map_queues,
3449 	.change_queue_depth = scsi_change_queue_depth,
3450 	.max_segment_size = 0xffffffff,
3451 };
3452 
3453 /**
3454  * megasas_complete_int_cmd -	Completes an internal command
3455  * @instance:			Adapter soft state
3456  * @cmd:			Command to be completed
3457  *
3458  * The megasas_issue_blocked_cmd() function waits for a command to complete
3459  * after it issues a command. This function wakes up that waiting routine by
3460  * calling wake_up() on the wait queue.
3461  */
3462 static void
3463 megasas_complete_int_cmd(struct megasas_instance *instance,
3464 			 struct megasas_cmd *cmd)
3465 {
3466 	if (cmd->cmd_status_drv == DCMD_INIT)
3467 		cmd->cmd_status_drv =
3468 		(cmd->frame->io.cmd_status == MFI_STAT_OK) ?
3469 		DCMD_SUCCESS : DCMD_FAILED;
3470 
3471 	wake_up(&instance->int_cmd_wait_q);
3472 }
3473 
3474 /**
3475  * megasas_complete_abort -	Completes aborting a command
3476  * @instance:			Adapter soft state
3477  * @cmd:			Cmd that was issued to abort another cmd
3478  *
3479  * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3480  * after it issues an abort on a previously issued command. This function
3481  * wakes up all functions waiting on the same wait queue.
3482  */
3483 static void
3484 megasas_complete_abort(struct megasas_instance *instance,
3485 		       struct megasas_cmd *cmd)
3486 {
3487 	if (cmd->sync_cmd) {
3488 		cmd->sync_cmd = 0;
3489 		cmd->cmd_status_drv = DCMD_SUCCESS;
3490 		wake_up(&instance->abort_cmd_wait_q);
3491 	}
3492 }
3493 
3494 /**
3495  * megasas_complete_cmd -	Completes a command
3496  * @instance:			Adapter soft state
3497  * @cmd:			Command to be completed
3498  * @alt_status:			If non-zero, use this value as status to
3499  *				SCSI mid-layer instead of the value returned
3500  *				by the FW. This should be used if caller wants
3501  *				an alternate status (as in the case of aborted
3502  *				commands)
3503  */
3504 void
3505 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3506 		     u8 alt_status)
3507 {
3508 	int exception = 0;
3509 	struct megasas_header *hdr = &cmd->frame->hdr;
3510 	unsigned long flags;
3511 	struct fusion_context *fusion = instance->ctrl_context;
3512 	u32 opcode, status;
3513 
3514 	/* flag for the retry reset */
3515 	cmd->retry_for_fw_reset = 0;
3516 
3517 	if (cmd->scmd)
3518 		cmd->scmd->SCp.ptr = NULL;
3519 
3520 	switch (hdr->cmd) {
3521 	case MFI_CMD_INVALID:
3522 		/* Some older 1068 controller FW may keep a pended
3523 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3524 		   when booting the kdump kernel.  Ignore this command to
3525 		   prevent a kernel panic on shutdown of the kdump kernel. */
3526 		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3527 		       "completed\n");
3528 		dev_warn(&instance->pdev->dev, "If you have a controller "
3529 		       "other than PERC5, please upgrade your firmware\n");
3530 		break;
3531 	case MFI_CMD_PD_SCSI_IO:
3532 	case MFI_CMD_LD_SCSI_IO:
3533 
3534 		/*
3535 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3536 		 * issued either through an IO path or an IOCTL path. If it
3537 		 * was via IOCTL, we will send it to internal completion.
3538 		 */
3539 		if (cmd->sync_cmd) {
3540 			cmd->sync_cmd = 0;
3541 			megasas_complete_int_cmd(instance, cmd);
3542 			break;
3543 		}
3544 		fallthrough;
3545 
3546 	case MFI_CMD_LD_READ:
3547 	case MFI_CMD_LD_WRITE:
3548 
3549 		if (alt_status) {
3550 			cmd->scmd->result = alt_status << 16;
3551 			exception = 1;
3552 		}
3553 
3554 		if (exception) {
3555 
3556 			atomic_dec(&instance->fw_outstanding);
3557 
3558 			scsi_dma_unmap(cmd->scmd);
3559 			cmd->scmd->scsi_done(cmd->scmd);
3560 			megasas_return_cmd(instance, cmd);
3561 
3562 			break;
3563 		}
3564 
3565 		switch (hdr->cmd_status) {
3566 
3567 		case MFI_STAT_OK:
3568 			cmd->scmd->result = DID_OK << 16;
3569 			break;
3570 
3571 		case MFI_STAT_SCSI_IO_FAILED:
3572 		case MFI_STAT_LD_INIT_IN_PROGRESS:
3573 			cmd->scmd->result =
3574 			    (DID_ERROR << 16) | hdr->scsi_status;
3575 			break;
3576 
3577 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3578 
3579 			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3580 
3581 			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3582 				memset(cmd->scmd->sense_buffer, 0,
3583 				       SCSI_SENSE_BUFFERSIZE);
3584 				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3585 				       hdr->sense_len);
3586 
3587 				cmd->scmd->result |= DRIVER_SENSE << 24;
3588 			}
3589 
3590 			break;
3591 
3592 		case MFI_STAT_LD_OFFLINE:
3593 		case MFI_STAT_DEVICE_NOT_FOUND:
3594 			cmd->scmd->result = DID_BAD_TARGET << 16;
3595 			break;
3596 
3597 		default:
3598 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3599 			       hdr->cmd_status);
3600 			cmd->scmd->result = DID_ERROR << 16;
3601 			break;
3602 		}
3603 
3604 		atomic_dec(&instance->fw_outstanding);
3605 
3606 		scsi_dma_unmap(cmd->scmd);
3607 		cmd->scmd->scsi_done(cmd->scmd);
3608 		megasas_return_cmd(instance, cmd);
3609 
3610 		break;
3611 
3612 	case MFI_CMD_SMP:
3613 	case MFI_CMD_STP:
3614 	case MFI_CMD_NVME:
3615 	case MFI_CMD_TOOLBOX:
3616 		megasas_complete_int_cmd(instance, cmd);
3617 		break;
3618 
3619 	case MFI_CMD_DCMD:
3620 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3621 		/* Check for LD map update */
3622 		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3623 			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3624 			fusion->fast_path_io = 0;
3625 			spin_lock_irqsave(instance->host->host_lock, flags);
3626 			status = cmd->frame->hdr.cmd_status;
3627 			instance->map_update_cmd = NULL;
3628 			if (status != MFI_STAT_OK) {
3629 				if (status != MFI_STAT_NOT_FOUND)
3630 					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3631 					       cmd->frame->hdr.cmd_status);
3632 				else {
3633 					megasas_return_cmd(instance, cmd);
3634 					spin_unlock_irqrestore(
3635 						instance->host->host_lock,
3636 						flags);
3637 					break;
3638 				}
3639 			}
3640 
3641 			megasas_return_cmd(instance, cmd);
3642 
3643 			/*
3644 			 * Set fast path IO to ZERO.
3645 			 * Validate Map will set proper value.
3646 			 * Meanwhile all IOs will go as LD IO.
3647 			 */
3648 			if (status == MFI_STAT_OK &&
3649 			    (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3650 				instance->map_id++;
3651 				fusion->fast_path_io = 1;
3652 			} else {
3653 				fusion->fast_path_io = 0;
3654 			}
3655 
3656 			megasas_sync_map_info(instance);
3657 			spin_unlock_irqrestore(instance->host->host_lock,
3658 					       flags);
3659 			break;
3660 		}
3661 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3662 		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3663 			spin_lock_irqsave(&poll_aen_lock, flags);
3664 			megasas_poll_wait_aen = 0;
3665 			spin_unlock_irqrestore(&poll_aen_lock, flags);
3666 		}
3667 
3668 		/* FW has an updated PD sequence */
3669 		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3670 			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3671 
3672 			spin_lock_irqsave(instance->host->host_lock, flags);
3673 			status = cmd->frame->hdr.cmd_status;
3674 			instance->jbod_seq_cmd = NULL;
3675 			megasas_return_cmd(instance, cmd);
3676 
3677 			if (status == MFI_STAT_OK) {
3678 				instance->pd_seq_map_id++;
3679 				/* Re-register a pd sync seq num cmd */
3680 				if (megasas_sync_pd_seq_num(instance, true))
3681 					instance->use_seqnum_jbod_fp = false;
3682 			} else
3683 				instance->use_seqnum_jbod_fp = false;
3684 
3685 			spin_unlock_irqrestore(instance->host->host_lock, flags);
3686 			break;
3687 		}
3688 
3689 		/*
3690 		 * See if got an event notification
3691 		 */
3692 		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3693 			megasas_service_aen(instance, cmd);
3694 		else
3695 			megasas_complete_int_cmd(instance, cmd);
3696 
3697 		break;
3698 
3699 	case MFI_CMD_ABORT:
3700 		/*
3701 		 * Cmd issued to abort another cmd returned
3702 		 */
3703 		megasas_complete_abort(instance, cmd);
3704 		break;
3705 
3706 	default:
3707 		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3708 		       hdr->cmd);
3709 		megasas_complete_int_cmd(instance, cmd);
3710 		break;
3711 	}
3712 }
3713 
3714 /**
3715  * megasas_issue_pending_cmds_again -	issue all pending cmds
3716  *					in FW again because of the fw reset
3717  * @instance:				Adapter soft state
3718  */
3719 static inline void
3720 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3721 {
3722 	struct megasas_cmd *cmd;
3723 	struct list_head clist_local;
3724 	union megasas_evt_class_locale class_locale;
3725 	unsigned long flags;
3726 	u32 seq_num;
3727 
3728 	INIT_LIST_HEAD(&clist_local);
3729 	spin_lock_irqsave(&instance->hba_lock, flags);
3730 	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3731 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3732 
3733 	while (!list_empty(&clist_local)) {
3734 		cmd = list_entry((&clist_local)->next,
3735 					struct megasas_cmd, list);
3736 		list_del_init(&cmd->list);
3737 
3738 		if (cmd->sync_cmd || cmd->scmd) {
3739 			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3740 				"detected to be pending while HBA reset\n",
3741 					cmd, cmd->scmd, cmd->sync_cmd);
3742 
3743 			cmd->retry_for_fw_reset++;
3744 
3745 			if (cmd->retry_for_fw_reset == 3) {
3746 				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3747 					"was tried multiple times during reset."
3748 					"Shutting down the HBA\n",
3749 					cmd, cmd->scmd, cmd->sync_cmd);
3750 				instance->instancet->disable_intr(instance);
3751 				atomic_set(&instance->fw_reset_no_pci_access, 1);
3752 				megaraid_sas_kill_hba(instance);
3753 				return;
3754 			}
3755 		}
3756 
3757 		if (cmd->sync_cmd == 1) {
3758 			if (cmd->scmd) {
3759 				dev_notice(&instance->pdev->dev, "unexpected"
3760 					"cmd attached to internal command!\n");
3761 			}
3762 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3763 						"on the internal reset queue,"
3764 						"issue it again.\n", cmd);
3765 			cmd->cmd_status_drv = DCMD_INIT;
3766 			instance->instancet->fire_cmd(instance,
3767 							cmd->frame_phys_addr,
3768 							0, instance->reg_set);
3769 		} else if (cmd->scmd) {
3770 			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3771 			"detected on the internal queue, issue again.\n",
3772 			cmd, cmd->scmd->cmnd[0]);
3773 
3774 			atomic_inc(&instance->fw_outstanding);
3775 			instance->instancet->fire_cmd(instance,
3776 					cmd->frame_phys_addr,
3777 					cmd->frame_count-1, instance->reg_set);
3778 		} else {
3779 			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3780 				"internal reset defer list while re-issue!!\n",
3781 				cmd);
3782 		}
3783 	}
3784 
3785 	if (instance->aen_cmd) {
3786 		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3787 		megasas_return_cmd(instance, instance->aen_cmd);
3788 
3789 		instance->aen_cmd = NULL;
3790 	}
3791 
3792 	/*
3793 	 * Initiate AEN (Asynchronous Event Notification)
3794 	 */
3795 	seq_num = instance->last_seq_num;
3796 	class_locale.members.reserved = 0;
3797 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3798 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3799 
3800 	megasas_register_aen(instance, seq_num, class_locale.word);
3801 }
3802 
3803 /*
3804  * Move the internal reset pending commands to a deferred queue.
3805  *
3806  * We move the commands pending at internal reset time to a
3807  * pending queue. This queue would be flushed after successful
3808  * completion of the internal reset sequence. if the internal reset
3809  * did not complete in time, the kernel reset handler would flush
3810  * these commands.
3811  */
3812 static void
3813 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3814 {
3815 	struct megasas_cmd *cmd;
3816 	int i;
3817 	u16 max_cmd = instance->max_fw_cmds;
3818 	u32 defer_index;
3819 	unsigned long flags;
3820 
3821 	defer_index = 0;
3822 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3823 	for (i = 0; i < max_cmd; i++) {
3824 		cmd = instance->cmd_list[i];
3825 		if (cmd->sync_cmd == 1 || cmd->scmd) {
3826 			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3827 					"on the defer queue as internal\n",
3828 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3829 
3830 			if (!list_empty(&cmd->list)) {
3831 				dev_notice(&instance->pdev->dev, "ERROR while"
3832 					" moving this cmd:%p, %d %p, it was"
3833 					"discovered on some list?\n",
3834 					cmd, cmd->sync_cmd, cmd->scmd);
3835 
3836 				list_del_init(&cmd->list);
3837 			}
3838 			defer_index++;
3839 			list_add_tail(&cmd->list,
3840 				&instance->internal_reset_pending_q);
3841 		}
3842 	}
3843 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3844 }
3845 
3846 
3847 static void
3848 process_fw_state_change_wq(struct work_struct *work)
3849 {
3850 	struct megasas_instance *instance =
3851 		container_of(work, struct megasas_instance, work_init);
3852 	u32 wait;
3853 	unsigned long flags;
3854 
3855     if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3856 		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3857 				atomic_read(&instance->adprecovery));
3858 		return ;
3859 	}
3860 
3861 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3862 		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3863 					"state, restarting it...\n");
3864 
3865 		instance->instancet->disable_intr(instance);
3866 		atomic_set(&instance->fw_outstanding, 0);
3867 
3868 		atomic_set(&instance->fw_reset_no_pci_access, 1);
3869 		instance->instancet->adp_reset(instance, instance->reg_set);
3870 		atomic_set(&instance->fw_reset_no_pci_access, 0);
3871 
3872 		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3873 					"initiating next stage...\n");
3874 
3875 		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3876 					"state 2 starting...\n");
3877 
3878 		/* waiting for about 20 second before start the second init */
3879 		for (wait = 0; wait < 30; wait++) {
3880 			msleep(1000);
3881 		}
3882 
3883 		if (megasas_transition_to_ready(instance, 1)) {
3884 			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3885 
3886 			atomic_set(&instance->fw_reset_no_pci_access, 1);
3887 			megaraid_sas_kill_hba(instance);
3888 			return ;
3889 		}
3890 
3891 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3892 			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3893 			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3894 			) {
3895 			*instance->consumer = *instance->producer;
3896 		} else {
3897 			*instance->consumer = 0;
3898 			*instance->producer = 0;
3899 		}
3900 
3901 		megasas_issue_init_mfi(instance);
3902 
3903 		spin_lock_irqsave(&instance->hba_lock, flags);
3904 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3905 		spin_unlock_irqrestore(&instance->hba_lock, flags);
3906 		instance->instancet->enable_intr(instance);
3907 
3908 		megasas_issue_pending_cmds_again(instance);
3909 		instance->issuepend_done = 1;
3910 	}
3911 }
3912 
3913 /**
3914  * megasas_deplete_reply_queue -	Processes all completed commands
3915  * @instance:				Adapter soft state
3916  * @alt_status:				Alternate status to be returned to
3917  *					SCSI mid-layer instead of the status
3918  *					returned by the FW
3919  * Note: this must be called with hba lock held
3920  */
3921 static int
3922 megasas_deplete_reply_queue(struct megasas_instance *instance,
3923 					u8 alt_status)
3924 {
3925 	u32 mfiStatus;
3926 	u32 fw_state;
3927 
3928 	if ((mfiStatus = instance->instancet->check_reset(instance,
3929 					instance->reg_set)) == 1) {
3930 		return IRQ_HANDLED;
3931 	}
3932 
3933 	mfiStatus = instance->instancet->clear_intr(instance);
3934 	if (mfiStatus == 0) {
3935 		/* Hardware may not set outbound_intr_status in MSI-X mode */
3936 		if (!instance->msix_vectors)
3937 			return IRQ_NONE;
3938 	}
3939 
3940 	instance->mfiStatus = mfiStatus;
3941 
3942 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3943 		fw_state = instance->instancet->read_fw_status_reg(
3944 				instance) & MFI_STATE_MASK;
3945 
3946 		if (fw_state != MFI_STATE_FAULT) {
3947 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
3948 						fw_state);
3949 		}
3950 
3951 		if ((fw_state == MFI_STATE_FAULT) &&
3952 				(instance->disableOnlineCtrlReset == 0)) {
3953 			dev_notice(&instance->pdev->dev, "wait adp restart\n");
3954 
3955 			if ((instance->pdev->device ==
3956 					PCI_DEVICE_ID_LSI_SAS1064R) ||
3957 				(instance->pdev->device ==
3958 					PCI_DEVICE_ID_DELL_PERC5) ||
3959 				(instance->pdev->device ==
3960 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3961 
3962 				*instance->consumer =
3963 					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3964 			}
3965 
3966 
3967 			instance->instancet->disable_intr(instance);
3968 			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3969 			instance->issuepend_done = 0;
3970 
3971 			atomic_set(&instance->fw_outstanding, 0);
3972 			megasas_internal_reset_defer_cmds(instance);
3973 
3974 			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3975 					fw_state, atomic_read(&instance->adprecovery));
3976 
3977 			schedule_work(&instance->work_init);
3978 			return IRQ_HANDLED;
3979 
3980 		} else {
3981 			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3982 				fw_state, instance->disableOnlineCtrlReset);
3983 		}
3984 	}
3985 
3986 	tasklet_schedule(&instance->isr_tasklet);
3987 	return IRQ_HANDLED;
3988 }
3989 
3990 /**
3991  * megasas_isr - isr entry point
3992  * @irq:	IRQ number
3993  * @devp:	IRQ context address
3994  */
3995 static irqreturn_t megasas_isr(int irq, void *devp)
3996 {
3997 	struct megasas_irq_context *irq_context = devp;
3998 	struct megasas_instance *instance = irq_context->instance;
3999 	unsigned long flags;
4000 	irqreturn_t rc;
4001 
4002 	if (atomic_read(&instance->fw_reset_no_pci_access))
4003 		return IRQ_HANDLED;
4004 
4005 	spin_lock_irqsave(&instance->hba_lock, flags);
4006 	rc = megasas_deplete_reply_queue(instance, DID_OK);
4007 	spin_unlock_irqrestore(&instance->hba_lock, flags);
4008 
4009 	return rc;
4010 }
4011 
4012 /**
4013  * megasas_transition_to_ready -	Move the FW to READY state
4014  * @instance:				Adapter soft state
4015  * @ocr:				Adapter reset state
4016  *
4017  * During the initialization, FW passes can potentially be in any one of
4018  * several possible states. If the FW in operational, waiting-for-handshake
4019  * states, driver must take steps to bring it to ready state. Otherwise, it
4020  * has to wait for the ready state.
4021  */
4022 int
4023 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
4024 {
4025 	int i;
4026 	u8 max_wait;
4027 	u32 fw_state;
4028 	u32 abs_state, curr_abs_state;
4029 
4030 	abs_state = instance->instancet->read_fw_status_reg(instance);
4031 	fw_state = abs_state & MFI_STATE_MASK;
4032 
4033 	if (fw_state != MFI_STATE_READY)
4034 		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
4035 		       " state\n");
4036 
4037 	while (fw_state != MFI_STATE_READY) {
4038 
4039 		switch (fw_state) {
4040 
4041 		case MFI_STATE_FAULT:
4042 			dev_printk(KERN_ERR, &instance->pdev->dev,
4043 				   "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
4044 				   abs_state & MFI_STATE_FAULT_CODE,
4045 				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4046 			if (ocr) {
4047 				max_wait = MEGASAS_RESET_WAIT_TIME;
4048 				break;
4049 			} else {
4050 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4051 				megasas_dump_reg_set(instance->reg_set);
4052 				return -ENODEV;
4053 			}
4054 
4055 		case MFI_STATE_WAIT_HANDSHAKE:
4056 			/*
4057 			 * Set the CLR bit in inbound doorbell
4058 			 */
4059 			if ((instance->pdev->device ==
4060 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4061 				(instance->pdev->device ==
4062 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4063 				(instance->adapter_type != MFI_SERIES))
4064 				writel(
4065 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4066 				  &instance->reg_set->doorbell);
4067 			else
4068 				writel(
4069 				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4070 					&instance->reg_set->inbound_doorbell);
4071 
4072 			max_wait = MEGASAS_RESET_WAIT_TIME;
4073 			break;
4074 
4075 		case MFI_STATE_BOOT_MESSAGE_PENDING:
4076 			if ((instance->pdev->device ==
4077 			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4078 				(instance->pdev->device ==
4079 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4080 				(instance->adapter_type != MFI_SERIES))
4081 				writel(MFI_INIT_HOTPLUG,
4082 				       &instance->reg_set->doorbell);
4083 			else
4084 				writel(MFI_INIT_HOTPLUG,
4085 					&instance->reg_set->inbound_doorbell);
4086 
4087 			max_wait = MEGASAS_RESET_WAIT_TIME;
4088 			break;
4089 
4090 		case MFI_STATE_OPERATIONAL:
4091 			/*
4092 			 * Bring it to READY state; assuming max wait 10 secs
4093 			 */
4094 			instance->instancet->disable_intr(instance);
4095 			if ((instance->pdev->device ==
4096 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4097 				(instance->pdev->device ==
4098 				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
4099 				(instance->adapter_type != MFI_SERIES)) {
4100 				writel(MFI_RESET_FLAGS,
4101 					&instance->reg_set->doorbell);
4102 
4103 				if (instance->adapter_type != MFI_SERIES) {
4104 					for (i = 0; i < (10 * 1000); i += 20) {
4105 						if (megasas_readl(
4106 							    instance,
4107 							    &instance->
4108 							    reg_set->
4109 							    doorbell) & 1)
4110 							msleep(20);
4111 						else
4112 							break;
4113 					}
4114 				}
4115 			} else
4116 				writel(MFI_RESET_FLAGS,
4117 					&instance->reg_set->inbound_doorbell);
4118 
4119 			max_wait = MEGASAS_RESET_WAIT_TIME;
4120 			break;
4121 
4122 		case MFI_STATE_UNDEFINED:
4123 			/*
4124 			 * This state should not last for more than 2 seconds
4125 			 */
4126 			max_wait = MEGASAS_RESET_WAIT_TIME;
4127 			break;
4128 
4129 		case MFI_STATE_BB_INIT:
4130 			max_wait = MEGASAS_RESET_WAIT_TIME;
4131 			break;
4132 
4133 		case MFI_STATE_FW_INIT:
4134 			max_wait = MEGASAS_RESET_WAIT_TIME;
4135 			break;
4136 
4137 		case MFI_STATE_FW_INIT_2:
4138 			max_wait = MEGASAS_RESET_WAIT_TIME;
4139 			break;
4140 
4141 		case MFI_STATE_DEVICE_SCAN:
4142 			max_wait = MEGASAS_RESET_WAIT_TIME;
4143 			break;
4144 
4145 		case MFI_STATE_FLUSH_CACHE:
4146 			max_wait = MEGASAS_RESET_WAIT_TIME;
4147 			break;
4148 
4149 		default:
4150 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
4151 			       fw_state);
4152 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4153 			megasas_dump_reg_set(instance->reg_set);
4154 			return -ENODEV;
4155 		}
4156 
4157 		/*
4158 		 * The cur_state should not last for more than max_wait secs
4159 		 */
4160 		for (i = 0; i < max_wait * 50; i++) {
4161 			curr_abs_state = instance->instancet->
4162 				read_fw_status_reg(instance);
4163 
4164 			if (abs_state == curr_abs_state) {
4165 				msleep(20);
4166 			} else
4167 				break;
4168 		}
4169 
4170 		/*
4171 		 * Return error if fw_state hasn't changed after max_wait
4172 		 */
4173 		if (curr_abs_state == abs_state) {
4174 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
4175 			       "in %d secs\n", fw_state, max_wait);
4176 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4177 			megasas_dump_reg_set(instance->reg_set);
4178 			return -ENODEV;
4179 		}
4180 
4181 		abs_state = curr_abs_state;
4182 		fw_state = curr_abs_state & MFI_STATE_MASK;
4183 	}
4184 	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
4185 
4186 	return 0;
4187 }
4188 
4189 /**
4190  * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
4191  * @instance:				Adapter soft state
4192  */
4193 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
4194 {
4195 	int i;
4196 	u16 max_cmd = instance->max_mfi_cmds;
4197 	struct megasas_cmd *cmd;
4198 
4199 	if (!instance->frame_dma_pool)
4200 		return;
4201 
4202 	/*
4203 	 * Return all frames to pool
4204 	 */
4205 	for (i = 0; i < max_cmd; i++) {
4206 
4207 		cmd = instance->cmd_list[i];
4208 
4209 		if (cmd->frame)
4210 			dma_pool_free(instance->frame_dma_pool, cmd->frame,
4211 				      cmd->frame_phys_addr);
4212 
4213 		if (cmd->sense)
4214 			dma_pool_free(instance->sense_dma_pool, cmd->sense,
4215 				      cmd->sense_phys_addr);
4216 	}
4217 
4218 	/*
4219 	 * Now destroy the pool itself
4220 	 */
4221 	dma_pool_destroy(instance->frame_dma_pool);
4222 	dma_pool_destroy(instance->sense_dma_pool);
4223 
4224 	instance->frame_dma_pool = NULL;
4225 	instance->sense_dma_pool = NULL;
4226 }
4227 
4228 /**
4229  * megasas_create_frame_pool -	Creates DMA pool for cmd frames
4230  * @instance:			Adapter soft state
4231  *
4232  * Each command packet has an embedded DMA memory buffer that is used for
4233  * filling MFI frame and the SG list that immediately follows the frame. This
4234  * function creates those DMA memory buffers for each command packet by using
4235  * PCI pool facility.
4236  */
4237 static int megasas_create_frame_pool(struct megasas_instance *instance)
4238 {
4239 	int i;
4240 	u16 max_cmd;
4241 	u32 frame_count;
4242 	struct megasas_cmd *cmd;
4243 
4244 	max_cmd = instance->max_mfi_cmds;
4245 
4246 	/*
4247 	 * For MFI controllers.
4248 	 * max_num_sge = 60
4249 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
4250 	 * Total 960 byte (15 MFI frame of 64 byte)
4251 	 *
4252 	 * Fusion adapter require only 3 extra frame.
4253 	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4254 	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
4255 	 * Total 192 byte (3 MFI frame of 64 byte)
4256 	 */
4257 	frame_count = (instance->adapter_type == MFI_SERIES) ?
4258 			(15 + 1) : (3 + 1);
4259 	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4260 	/*
4261 	 * Use DMA pool facility provided by PCI layer
4262 	 */
4263 	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4264 					&instance->pdev->dev,
4265 					instance->mfi_frame_size, 256, 0);
4266 
4267 	if (!instance->frame_dma_pool) {
4268 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4269 		return -ENOMEM;
4270 	}
4271 
4272 	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4273 						   &instance->pdev->dev, 128,
4274 						   4, 0);
4275 
4276 	if (!instance->sense_dma_pool) {
4277 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4278 
4279 		dma_pool_destroy(instance->frame_dma_pool);
4280 		instance->frame_dma_pool = NULL;
4281 
4282 		return -ENOMEM;
4283 	}
4284 
4285 	/*
4286 	 * Allocate and attach a frame to each of the commands in cmd_list.
4287 	 * By making cmd->index as the context instead of the &cmd, we can
4288 	 * always use 32bit context regardless of the architecture
4289 	 */
4290 	for (i = 0; i < max_cmd; i++) {
4291 
4292 		cmd = instance->cmd_list[i];
4293 
4294 		cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4295 					    GFP_KERNEL, &cmd->frame_phys_addr);
4296 
4297 		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4298 					    GFP_KERNEL, &cmd->sense_phys_addr);
4299 
4300 		/*
4301 		 * megasas_teardown_frame_pool() takes care of freeing
4302 		 * whatever has been allocated
4303 		 */
4304 		if (!cmd->frame || !cmd->sense) {
4305 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4306 			megasas_teardown_frame_pool(instance);
4307 			return -ENOMEM;
4308 		}
4309 
4310 		cmd->frame->io.context = cpu_to_le32(cmd->index);
4311 		cmd->frame->io.pad_0 = 0;
4312 		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4313 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4314 	}
4315 
4316 	return 0;
4317 }
4318 
4319 /**
4320  * megasas_free_cmds -	Free all the cmds in the free cmd pool
4321  * @instance:		Adapter soft state
4322  */
4323 void megasas_free_cmds(struct megasas_instance *instance)
4324 {
4325 	int i;
4326 
4327 	/* First free the MFI frame pool */
4328 	megasas_teardown_frame_pool(instance);
4329 
4330 	/* Free all the commands in the cmd_list */
4331 	for (i = 0; i < instance->max_mfi_cmds; i++)
4332 
4333 		kfree(instance->cmd_list[i]);
4334 
4335 	/* Free the cmd_list buffer itself */
4336 	kfree(instance->cmd_list);
4337 	instance->cmd_list = NULL;
4338 
4339 	INIT_LIST_HEAD(&instance->cmd_pool);
4340 }
4341 
4342 /**
4343  * megasas_alloc_cmds -	Allocates the command packets
4344  * @instance:		Adapter soft state
4345  *
4346  * Each command that is issued to the FW, whether IO commands from the OS or
4347  * internal commands like IOCTLs, are wrapped in local data structure called
4348  * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4349  * the FW.
4350  *
4351  * Each frame has a 32-bit field called context (tag). This context is used
4352  * to get back the megasas_cmd from the frame when a frame gets completed in
4353  * the ISR. Typically the address of the megasas_cmd itself would be used as
4354  * the context. But we wanted to keep the differences between 32 and 64 bit
4355  * systems to the mininum. We always use 32 bit integers for the context. In
4356  * this driver, the 32 bit values are the indices into an array cmd_list.
4357  * This array is used only to look up the megasas_cmd given the context. The
4358  * free commands themselves are maintained in a linked list called cmd_pool.
4359  */
4360 int megasas_alloc_cmds(struct megasas_instance *instance)
4361 {
4362 	int i;
4363 	int j;
4364 	u16 max_cmd;
4365 	struct megasas_cmd *cmd;
4366 
4367 	max_cmd = instance->max_mfi_cmds;
4368 
4369 	/*
4370 	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4371 	 * Allocate the dynamic array first and then allocate individual
4372 	 * commands.
4373 	 */
4374 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4375 
4376 	if (!instance->cmd_list) {
4377 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4378 		return -ENOMEM;
4379 	}
4380 
4381 	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4382 
4383 	for (i = 0; i < max_cmd; i++) {
4384 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4385 						GFP_KERNEL);
4386 
4387 		if (!instance->cmd_list[i]) {
4388 
4389 			for (j = 0; j < i; j++)
4390 				kfree(instance->cmd_list[j]);
4391 
4392 			kfree(instance->cmd_list);
4393 			instance->cmd_list = NULL;
4394 
4395 			return -ENOMEM;
4396 		}
4397 	}
4398 
4399 	for (i = 0; i < max_cmd; i++) {
4400 		cmd = instance->cmd_list[i];
4401 		memset(cmd, 0, sizeof(struct megasas_cmd));
4402 		cmd->index = i;
4403 		cmd->scmd = NULL;
4404 		cmd->instance = instance;
4405 
4406 		list_add_tail(&cmd->list, &instance->cmd_pool);
4407 	}
4408 
4409 	/*
4410 	 * Create a frame pool and assign one frame to each cmd
4411 	 */
4412 	if (megasas_create_frame_pool(instance)) {
4413 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4414 		megasas_free_cmds(instance);
4415 		return -ENOMEM;
4416 	}
4417 
4418 	return 0;
4419 }
4420 
4421 /*
4422  * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4423  * @instance:				Adapter soft state
4424  *
4425  * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4426  * or FW is not under OCR.
4427  */
4428 inline int
4429 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4430 
4431 	if (instance->adapter_type == MFI_SERIES)
4432 		return KILL_ADAPTER;
4433 	else if (instance->unload ||
4434 			test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
4435 				 &instance->reset_flags))
4436 		return IGNORE_TIMEOUT;
4437 	else
4438 		return INITIATE_OCR;
4439 }
4440 
4441 static void
4442 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4443 {
4444 	int ret;
4445 	struct megasas_cmd *cmd;
4446 	struct megasas_dcmd_frame *dcmd;
4447 
4448 	struct MR_PRIV_DEVICE *mr_device_priv_data;
4449 	u16 device_id = 0;
4450 
4451 	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4452 	cmd = megasas_get_cmd(instance);
4453 
4454 	if (!cmd) {
4455 		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4456 		return;
4457 	}
4458 
4459 	dcmd = &cmd->frame->dcmd;
4460 
4461 	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4462 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4463 
4464 	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4465 	dcmd->cmd = MFI_CMD_DCMD;
4466 	dcmd->cmd_status = 0xFF;
4467 	dcmd->sge_count = 1;
4468 	dcmd->flags = MFI_FRAME_DIR_READ;
4469 	dcmd->timeout = 0;
4470 	dcmd->pad_0 = 0;
4471 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4472 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4473 
4474 	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4475 				 sizeof(struct MR_PD_INFO));
4476 
4477 	if ((instance->adapter_type != MFI_SERIES) &&
4478 	    !instance->mask_interrupts)
4479 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4480 	else
4481 		ret = megasas_issue_polled(instance, cmd);
4482 
4483 	switch (ret) {
4484 	case DCMD_SUCCESS:
4485 		mr_device_priv_data = sdev->hostdata;
4486 		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4487 		mr_device_priv_data->interface_type =
4488 				instance->pd_info->state.ddf.pdType.intf;
4489 		break;
4490 
4491 	case DCMD_TIMEOUT:
4492 
4493 		switch (dcmd_timeout_ocr_possible(instance)) {
4494 		case INITIATE_OCR:
4495 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4496 			mutex_unlock(&instance->reset_mutex);
4497 			megasas_reset_fusion(instance->host,
4498 				MFI_IO_TIMEOUT_OCR);
4499 			mutex_lock(&instance->reset_mutex);
4500 			break;
4501 		case KILL_ADAPTER:
4502 			megaraid_sas_kill_hba(instance);
4503 			break;
4504 		case IGNORE_TIMEOUT:
4505 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4506 				__func__, __LINE__);
4507 			break;
4508 		}
4509 
4510 		break;
4511 	}
4512 
4513 	if (ret != DCMD_TIMEOUT)
4514 		megasas_return_cmd(instance, cmd);
4515 
4516 	return;
4517 }
4518 /*
4519  * megasas_get_pd_list_info -	Returns FW's pd_list structure
4520  * @instance:				Adapter soft state
4521  * @pd_list:				pd_list structure
4522  *
4523  * Issues an internal command (DCMD) to get the FW's controller PD
4524  * list structure.  This information is mainly used to find out SYSTEM
4525  * supported by the FW.
4526  */
4527 static int
4528 megasas_get_pd_list(struct megasas_instance *instance)
4529 {
4530 	int ret = 0, pd_index = 0;
4531 	struct megasas_cmd *cmd;
4532 	struct megasas_dcmd_frame *dcmd;
4533 	struct MR_PD_LIST *ci;
4534 	struct MR_PD_ADDRESS *pd_addr;
4535 
4536 	if (instance->pd_list_not_supported) {
4537 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4538 		"not supported by firmware\n");
4539 		return ret;
4540 	}
4541 
4542 	ci = instance->pd_list_buf;
4543 
4544 	cmd = megasas_get_cmd(instance);
4545 
4546 	if (!cmd) {
4547 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4548 		return -ENOMEM;
4549 	}
4550 
4551 	dcmd = &cmd->frame->dcmd;
4552 
4553 	memset(ci, 0, sizeof(*ci));
4554 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4555 
4556 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4557 	dcmd->mbox.b[1] = 0;
4558 	dcmd->cmd = MFI_CMD_DCMD;
4559 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4560 	dcmd->sge_count = 1;
4561 	dcmd->flags = MFI_FRAME_DIR_READ;
4562 	dcmd->timeout = 0;
4563 	dcmd->pad_0 = 0;
4564 	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4565 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4566 
4567 	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4568 				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4569 
4570 	if ((instance->adapter_type != MFI_SERIES) &&
4571 	    !instance->mask_interrupts)
4572 		ret = megasas_issue_blocked_cmd(instance, cmd,
4573 			MFI_IO_TIMEOUT_SECS);
4574 	else
4575 		ret = megasas_issue_polled(instance, cmd);
4576 
4577 	switch (ret) {
4578 	case DCMD_FAILED:
4579 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4580 			"failed/not supported by firmware\n");
4581 
4582 		if (instance->adapter_type != MFI_SERIES)
4583 			megaraid_sas_kill_hba(instance);
4584 		else
4585 			instance->pd_list_not_supported = 1;
4586 		break;
4587 	case DCMD_TIMEOUT:
4588 
4589 		switch (dcmd_timeout_ocr_possible(instance)) {
4590 		case INITIATE_OCR:
4591 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4592 			/*
4593 			 * DCMD failed from AEN path.
4594 			 * AEN path already hold reset_mutex to avoid PCI access
4595 			 * while OCR is in progress.
4596 			 */
4597 			mutex_unlock(&instance->reset_mutex);
4598 			megasas_reset_fusion(instance->host,
4599 						MFI_IO_TIMEOUT_OCR);
4600 			mutex_lock(&instance->reset_mutex);
4601 			break;
4602 		case KILL_ADAPTER:
4603 			megaraid_sas_kill_hba(instance);
4604 			break;
4605 		case IGNORE_TIMEOUT:
4606 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4607 				__func__, __LINE__);
4608 			break;
4609 		}
4610 
4611 		break;
4612 
4613 	case DCMD_SUCCESS:
4614 		pd_addr = ci->addr;
4615 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4616 			dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
4617 				 __func__, le32_to_cpu(ci->count));
4618 
4619 		if ((le32_to_cpu(ci->count) >
4620 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4621 			break;
4622 
4623 		memset(instance->local_pd_list, 0,
4624 				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4625 
4626 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4627 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4628 					le16_to_cpu(pd_addr->deviceId);
4629 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4630 					pd_addr->scsiDevType;
4631 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4632 					MR_PD_STATE_SYSTEM;
4633 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4634 				dev_info(&instance->pdev->dev,
4635 					 "PD%d: targetID: 0x%03x deviceType:0x%x\n",
4636 					 pd_index, le16_to_cpu(pd_addr->deviceId),
4637 					 pd_addr->scsiDevType);
4638 			pd_addr++;
4639 		}
4640 
4641 		memcpy(instance->pd_list, instance->local_pd_list,
4642 			sizeof(instance->pd_list));
4643 		break;
4644 
4645 	}
4646 
4647 	if (ret != DCMD_TIMEOUT)
4648 		megasas_return_cmd(instance, cmd);
4649 
4650 	return ret;
4651 }
4652 
4653 /*
4654  * megasas_get_ld_list_info -	Returns FW's ld_list structure
4655  * @instance:				Adapter soft state
4656  * @ld_list:				ld_list structure
4657  *
4658  * Issues an internal command (DCMD) to get the FW's controller PD
4659  * list structure.  This information is mainly used to find out SYSTEM
4660  * supported by the FW.
4661  */
4662 static int
4663 megasas_get_ld_list(struct megasas_instance *instance)
4664 {
4665 	int ret = 0, ld_index = 0, ids = 0;
4666 	struct megasas_cmd *cmd;
4667 	struct megasas_dcmd_frame *dcmd;
4668 	struct MR_LD_LIST *ci;
4669 	dma_addr_t ci_h = 0;
4670 	u32 ld_count;
4671 
4672 	ci = instance->ld_list_buf;
4673 	ci_h = instance->ld_list_buf_h;
4674 
4675 	cmd = megasas_get_cmd(instance);
4676 
4677 	if (!cmd) {
4678 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4679 		return -ENOMEM;
4680 	}
4681 
4682 	dcmd = &cmd->frame->dcmd;
4683 
4684 	memset(ci, 0, sizeof(*ci));
4685 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4686 
4687 	if (instance->supportmax256vd)
4688 		dcmd->mbox.b[0] = 1;
4689 	dcmd->cmd = MFI_CMD_DCMD;
4690 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4691 	dcmd->sge_count = 1;
4692 	dcmd->flags = MFI_FRAME_DIR_READ;
4693 	dcmd->timeout = 0;
4694 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4695 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4696 	dcmd->pad_0  = 0;
4697 
4698 	megasas_set_dma_settings(instance, dcmd, ci_h,
4699 				 sizeof(struct MR_LD_LIST));
4700 
4701 	if ((instance->adapter_type != MFI_SERIES) &&
4702 	    !instance->mask_interrupts)
4703 		ret = megasas_issue_blocked_cmd(instance, cmd,
4704 			MFI_IO_TIMEOUT_SECS);
4705 	else
4706 		ret = megasas_issue_polled(instance, cmd);
4707 
4708 	ld_count = le32_to_cpu(ci->ldCount);
4709 
4710 	switch (ret) {
4711 	case DCMD_FAILED:
4712 		megaraid_sas_kill_hba(instance);
4713 		break;
4714 	case DCMD_TIMEOUT:
4715 
4716 		switch (dcmd_timeout_ocr_possible(instance)) {
4717 		case INITIATE_OCR:
4718 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4719 			/*
4720 			 * DCMD failed from AEN path.
4721 			 * AEN path already hold reset_mutex to avoid PCI access
4722 			 * while OCR is in progress.
4723 			 */
4724 			mutex_unlock(&instance->reset_mutex);
4725 			megasas_reset_fusion(instance->host,
4726 						MFI_IO_TIMEOUT_OCR);
4727 			mutex_lock(&instance->reset_mutex);
4728 			break;
4729 		case KILL_ADAPTER:
4730 			megaraid_sas_kill_hba(instance);
4731 			break;
4732 		case IGNORE_TIMEOUT:
4733 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4734 				__func__, __LINE__);
4735 			break;
4736 		}
4737 
4738 		break;
4739 
4740 	case DCMD_SUCCESS:
4741 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4742 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4743 				 __func__, ld_count);
4744 
4745 		if (ld_count > instance->fw_supported_vd_count)
4746 			break;
4747 
4748 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4749 
4750 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4751 			if (ci->ldList[ld_index].state != 0) {
4752 				ids = ci->ldList[ld_index].ref.targetId;
4753 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4754 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4755 					dev_info(&instance->pdev->dev,
4756 						 "LD%d: targetID: 0x%03x\n",
4757 						 ld_index, ids);
4758 			}
4759 		}
4760 
4761 		break;
4762 	}
4763 
4764 	if (ret != DCMD_TIMEOUT)
4765 		megasas_return_cmd(instance, cmd);
4766 
4767 	return ret;
4768 }
4769 
4770 /**
4771  * megasas_ld_list_query -	Returns FW's ld_list structure
4772  * @instance:				Adapter soft state
4773  * @query_type:				ld_list structure type
4774  *
4775  * Issues an internal command (DCMD) to get the FW's controller PD
4776  * list structure.  This information is mainly used to find out SYSTEM
4777  * supported by the FW.
4778  */
4779 static int
4780 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4781 {
4782 	int ret = 0, ld_index = 0, ids = 0;
4783 	struct megasas_cmd *cmd;
4784 	struct megasas_dcmd_frame *dcmd;
4785 	struct MR_LD_TARGETID_LIST *ci;
4786 	dma_addr_t ci_h = 0;
4787 	u32 tgtid_count;
4788 
4789 	ci = instance->ld_targetid_list_buf;
4790 	ci_h = instance->ld_targetid_list_buf_h;
4791 
4792 	cmd = megasas_get_cmd(instance);
4793 
4794 	if (!cmd) {
4795 		dev_warn(&instance->pdev->dev,
4796 		         "megasas_ld_list_query: Failed to get cmd\n");
4797 		return -ENOMEM;
4798 	}
4799 
4800 	dcmd = &cmd->frame->dcmd;
4801 
4802 	memset(ci, 0, sizeof(*ci));
4803 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4804 
4805 	dcmd->mbox.b[0] = query_type;
4806 	if (instance->supportmax256vd)
4807 		dcmd->mbox.b[2] = 1;
4808 
4809 	dcmd->cmd = MFI_CMD_DCMD;
4810 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4811 	dcmd->sge_count = 1;
4812 	dcmd->flags = MFI_FRAME_DIR_READ;
4813 	dcmd->timeout = 0;
4814 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4815 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4816 	dcmd->pad_0  = 0;
4817 
4818 	megasas_set_dma_settings(instance, dcmd, ci_h,
4819 				 sizeof(struct MR_LD_TARGETID_LIST));
4820 
4821 	if ((instance->adapter_type != MFI_SERIES) &&
4822 	    !instance->mask_interrupts)
4823 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4824 	else
4825 		ret = megasas_issue_polled(instance, cmd);
4826 
4827 	switch (ret) {
4828 	case DCMD_FAILED:
4829 		dev_info(&instance->pdev->dev,
4830 			"DCMD not supported by firmware - %s %d\n",
4831 				__func__, __LINE__);
4832 		ret = megasas_get_ld_list(instance);
4833 		break;
4834 	case DCMD_TIMEOUT:
4835 		switch (dcmd_timeout_ocr_possible(instance)) {
4836 		case INITIATE_OCR:
4837 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4838 			/*
4839 			 * DCMD failed from AEN path.
4840 			 * AEN path already hold reset_mutex to avoid PCI access
4841 			 * while OCR is in progress.
4842 			 */
4843 			mutex_unlock(&instance->reset_mutex);
4844 			megasas_reset_fusion(instance->host,
4845 						MFI_IO_TIMEOUT_OCR);
4846 			mutex_lock(&instance->reset_mutex);
4847 			break;
4848 		case KILL_ADAPTER:
4849 			megaraid_sas_kill_hba(instance);
4850 			break;
4851 		case IGNORE_TIMEOUT:
4852 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4853 				__func__, __LINE__);
4854 			break;
4855 		}
4856 
4857 		break;
4858 	case DCMD_SUCCESS:
4859 		tgtid_count = le32_to_cpu(ci->count);
4860 
4861 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4862 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4863 				 __func__, tgtid_count);
4864 
4865 		if ((tgtid_count > (instance->fw_supported_vd_count)))
4866 			break;
4867 
4868 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4869 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4870 			ids = ci->targetId[ld_index];
4871 			instance->ld_ids[ids] = ci->targetId[ld_index];
4872 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4873 				dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
4874 					 ld_index, ci->targetId[ld_index]);
4875 		}
4876 
4877 		break;
4878 	}
4879 
4880 	if (ret != DCMD_TIMEOUT)
4881 		megasas_return_cmd(instance, cmd);
4882 
4883 	return ret;
4884 }
4885 
4886 /**
4887  * dcmd.opcode            - MR_DCMD_CTRL_DEVICE_LIST_GET
4888  * dcmd.mbox              - reserved
4889  * dcmd.sge IN            - ptr to return MR_HOST_DEVICE_LIST structure
4890  * Desc:    This DCMD will return the combined device list
4891  * Status:  MFI_STAT_OK - List returned successfully
4892  *          MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4893  *                                 disabled
4894  * @instance:			Adapter soft state
4895  * @is_probe:			Driver probe check
4896  * Return:			0 if DCMD succeeded
4897  *				 non-zero if failed
4898  */
4899 static int
4900 megasas_host_device_list_query(struct megasas_instance *instance,
4901 			       bool is_probe)
4902 {
4903 	int ret, i, target_id;
4904 	struct megasas_cmd *cmd;
4905 	struct megasas_dcmd_frame *dcmd;
4906 	struct MR_HOST_DEVICE_LIST *ci;
4907 	u32 count;
4908 	dma_addr_t ci_h;
4909 
4910 	ci = instance->host_device_list_buf;
4911 	ci_h = instance->host_device_list_buf_h;
4912 
4913 	cmd = megasas_get_cmd(instance);
4914 
4915 	if (!cmd) {
4916 		dev_warn(&instance->pdev->dev,
4917 			 "%s: failed to get cmd\n",
4918 			 __func__);
4919 		return -ENOMEM;
4920 	}
4921 
4922 	dcmd = &cmd->frame->dcmd;
4923 
4924 	memset(ci, 0, sizeof(*ci));
4925 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4926 
4927 	dcmd->mbox.b[0] = is_probe ? 0 : 1;
4928 	dcmd->cmd = MFI_CMD_DCMD;
4929 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4930 	dcmd->sge_count = 1;
4931 	dcmd->flags = MFI_FRAME_DIR_READ;
4932 	dcmd->timeout = 0;
4933 	dcmd->pad_0 = 0;
4934 	dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
4935 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
4936 
4937 	megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
4938 
4939 	if (!instance->mask_interrupts) {
4940 		ret = megasas_issue_blocked_cmd(instance, cmd,
4941 						MFI_IO_TIMEOUT_SECS);
4942 	} else {
4943 		ret = megasas_issue_polled(instance, cmd);
4944 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4945 	}
4946 
4947 	switch (ret) {
4948 	case DCMD_SUCCESS:
4949 		/* Fill the internal pd_list and ld_ids array based on
4950 		 * targetIds returned by FW
4951 		 */
4952 		count = le32_to_cpu(ci->count);
4953 
4954 		if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
4955 			break;
4956 
4957 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4958 			dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
4959 				 __func__, count);
4960 
4961 		memset(instance->local_pd_list, 0,
4962 		       MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4963 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4964 		for (i = 0; i < count; i++) {
4965 			target_id = le16_to_cpu(ci->host_device_list[i].target_id);
4966 			if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
4967 				instance->local_pd_list[target_id].tid = target_id;
4968 				instance->local_pd_list[target_id].driveType =
4969 						ci->host_device_list[i].scsi_type;
4970 				instance->local_pd_list[target_id].driveState =
4971 						MR_PD_STATE_SYSTEM;
4972 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4973 					dev_info(&instance->pdev->dev,
4974 						 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
4975 						 i, target_id, ci->host_device_list[i].scsi_type);
4976 			} else {
4977 				instance->ld_ids[target_id] = target_id;
4978 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4979 					dev_info(&instance->pdev->dev,
4980 						 "Device %d: LD targetID: 0x%03x\n",
4981 						 i, target_id);
4982 			}
4983 		}
4984 
4985 		memcpy(instance->pd_list, instance->local_pd_list,
4986 		       sizeof(instance->pd_list));
4987 		break;
4988 
4989 	case DCMD_TIMEOUT:
4990 		switch (dcmd_timeout_ocr_possible(instance)) {
4991 		case INITIATE_OCR:
4992 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4993 			mutex_unlock(&instance->reset_mutex);
4994 			megasas_reset_fusion(instance->host,
4995 				MFI_IO_TIMEOUT_OCR);
4996 			mutex_lock(&instance->reset_mutex);
4997 			break;
4998 		case KILL_ADAPTER:
4999 			megaraid_sas_kill_hba(instance);
5000 			break;
5001 		case IGNORE_TIMEOUT:
5002 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5003 				 __func__, __LINE__);
5004 			break;
5005 		}
5006 		break;
5007 	case DCMD_FAILED:
5008 		dev_err(&instance->pdev->dev,
5009 			"%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
5010 			__func__);
5011 		break;
5012 	}
5013 
5014 	if (ret != DCMD_TIMEOUT)
5015 		megasas_return_cmd(instance, cmd);
5016 
5017 	return ret;
5018 }
5019 
5020 /*
5021  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
5022  * instance			 : Controller's instance
5023 */
5024 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
5025 {
5026 	struct fusion_context *fusion;
5027 	u32 ventura_map_sz = 0;
5028 
5029 	fusion = instance->ctrl_context;
5030 	/* For MFI based controllers return dummy success */
5031 	if (!fusion)
5032 		return;
5033 
5034 	instance->supportmax256vd =
5035 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
5036 	/* Below is additional check to address future FW enhancement */
5037 	if (instance->ctrl_info_buf->max_lds > 64)
5038 		instance->supportmax256vd = 1;
5039 
5040 	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
5041 					* MEGASAS_MAX_DEV_PER_CHANNEL;
5042 	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
5043 					* MEGASAS_MAX_DEV_PER_CHANNEL;
5044 	if (instance->supportmax256vd) {
5045 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
5046 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5047 	} else {
5048 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5049 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5050 	}
5051 
5052 	dev_info(&instance->pdev->dev,
5053 		"FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
5054 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
5055 		instance->ctrl_info_buf->max_lds);
5056 
5057 	if (instance->max_raid_mapsize) {
5058 		ventura_map_sz = instance->max_raid_mapsize *
5059 						MR_MIN_MAP_SIZE; /* 64k */
5060 		fusion->current_map_sz = ventura_map_sz;
5061 		fusion->max_map_sz = ventura_map_sz;
5062 	} else {
5063 		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
5064 					(sizeof(struct MR_LD_SPAN_MAP) *
5065 					(instance->fw_supported_vd_count - 1));
5066 		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
5067 
5068 		fusion->max_map_sz =
5069 			max(fusion->old_map_sz, fusion->new_map_sz);
5070 
5071 		if (instance->supportmax256vd)
5072 			fusion->current_map_sz = fusion->new_map_sz;
5073 		else
5074 			fusion->current_map_sz = fusion->old_map_sz;
5075 	}
5076 	/* irrespective of FW raid maps, driver raid map is constant */
5077 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
5078 }
5079 
5080 /*
5081  * dcmd.opcode                - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
5082  * dcmd.hdr.length            - number of bytes to read
5083  * dcmd.sge                   - Ptr to MR_SNAPDUMP_PROPERTIES
5084  * Desc:			 Fill in snapdump properties
5085  * Status:			 MFI_STAT_OK- Command successful
5086  */
5087 void megasas_get_snapdump_properties(struct megasas_instance *instance)
5088 {
5089 	int ret = 0;
5090 	struct megasas_cmd *cmd;
5091 	struct megasas_dcmd_frame *dcmd;
5092 	struct MR_SNAPDUMP_PROPERTIES *ci;
5093 	dma_addr_t ci_h = 0;
5094 
5095 	ci = instance->snapdump_prop;
5096 	ci_h = instance->snapdump_prop_h;
5097 
5098 	if (!ci)
5099 		return;
5100 
5101 	cmd = megasas_get_cmd(instance);
5102 
5103 	if (!cmd) {
5104 		dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
5105 		return;
5106 	}
5107 
5108 	dcmd = &cmd->frame->dcmd;
5109 
5110 	memset(ci, 0, sizeof(*ci));
5111 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5112 
5113 	dcmd->cmd = MFI_CMD_DCMD;
5114 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5115 	dcmd->sge_count = 1;
5116 	dcmd->flags = MFI_FRAME_DIR_READ;
5117 	dcmd->timeout = 0;
5118 	dcmd->pad_0 = 0;
5119 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
5120 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
5121 
5122 	megasas_set_dma_settings(instance, dcmd, ci_h,
5123 				 sizeof(struct MR_SNAPDUMP_PROPERTIES));
5124 
5125 	if (!instance->mask_interrupts) {
5126 		ret = megasas_issue_blocked_cmd(instance, cmd,
5127 						MFI_IO_TIMEOUT_SECS);
5128 	} else {
5129 		ret = megasas_issue_polled(instance, cmd);
5130 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5131 	}
5132 
5133 	switch (ret) {
5134 	case DCMD_SUCCESS:
5135 		instance->snapdump_wait_time =
5136 			min_t(u8, ci->trigger_min_num_sec_before_ocr,
5137 				MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
5138 		break;
5139 
5140 	case DCMD_TIMEOUT:
5141 		switch (dcmd_timeout_ocr_possible(instance)) {
5142 		case INITIATE_OCR:
5143 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5144 			mutex_unlock(&instance->reset_mutex);
5145 			megasas_reset_fusion(instance->host,
5146 				MFI_IO_TIMEOUT_OCR);
5147 			mutex_lock(&instance->reset_mutex);
5148 			break;
5149 		case KILL_ADAPTER:
5150 			megaraid_sas_kill_hba(instance);
5151 			break;
5152 		case IGNORE_TIMEOUT:
5153 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5154 				__func__, __LINE__);
5155 			break;
5156 		}
5157 	}
5158 
5159 	if (ret != DCMD_TIMEOUT)
5160 		megasas_return_cmd(instance, cmd);
5161 }
5162 
5163 /**
5164  * megasas_get_controller_info -	Returns FW's controller structure
5165  * @instance:				Adapter soft state
5166  *
5167  * Issues an internal command (DCMD) to get the FW's controller structure.
5168  * This information is mainly used to find out the maximum IO transfer per
5169  * command supported by the FW.
5170  */
5171 int
5172 megasas_get_ctrl_info(struct megasas_instance *instance)
5173 {
5174 	int ret = 0;
5175 	struct megasas_cmd *cmd;
5176 	struct megasas_dcmd_frame *dcmd;
5177 	struct megasas_ctrl_info *ci;
5178 	dma_addr_t ci_h = 0;
5179 
5180 	ci = instance->ctrl_info_buf;
5181 	ci_h = instance->ctrl_info_buf_h;
5182 
5183 	cmd = megasas_get_cmd(instance);
5184 
5185 	if (!cmd) {
5186 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
5187 		return -ENOMEM;
5188 	}
5189 
5190 	dcmd = &cmd->frame->dcmd;
5191 
5192 	memset(ci, 0, sizeof(*ci));
5193 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5194 
5195 	dcmd->cmd = MFI_CMD_DCMD;
5196 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5197 	dcmd->sge_count = 1;
5198 	dcmd->flags = MFI_FRAME_DIR_READ;
5199 	dcmd->timeout = 0;
5200 	dcmd->pad_0 = 0;
5201 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
5202 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
5203 	dcmd->mbox.b[0] = 1;
5204 
5205 	megasas_set_dma_settings(instance, dcmd, ci_h,
5206 				 sizeof(struct megasas_ctrl_info));
5207 
5208 	if ((instance->adapter_type != MFI_SERIES) &&
5209 	    !instance->mask_interrupts) {
5210 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5211 	} else {
5212 		ret = megasas_issue_polled(instance, cmd);
5213 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5214 	}
5215 
5216 	switch (ret) {
5217 	case DCMD_SUCCESS:
5218 		/* Save required controller information in
5219 		 * CPU endianness format.
5220 		 */
5221 		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
5222 		le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
5223 		le32_to_cpus((u32 *)&ci->adapterOperations2);
5224 		le32_to_cpus((u32 *)&ci->adapterOperations3);
5225 		le16_to_cpus((u16 *)&ci->adapter_operations4);
5226 		le32_to_cpus((u32 *)&ci->adapter_operations5);
5227 
5228 		/* Update the latest Ext VD info.
5229 		 * From Init path, store current firmware details.
5230 		 * From OCR path, detect any firmware properties changes.
5231 		 * in case of Firmware upgrade without system reboot.
5232 		 */
5233 		megasas_update_ext_vd_details(instance);
5234 		instance->support_seqnum_jbod_fp =
5235 			ci->adapterOperations3.useSeqNumJbodFP;
5236 		instance->support_morethan256jbod =
5237 			ci->adapter_operations4.support_pd_map_target_id;
5238 		instance->support_nvme_passthru =
5239 			ci->adapter_operations4.support_nvme_passthru;
5240 		instance->support_pci_lane_margining =
5241 			ci->adapter_operations5.support_pci_lane_margining;
5242 		instance->task_abort_tmo = ci->TaskAbortTO;
5243 		instance->max_reset_tmo = ci->MaxResetTO;
5244 
5245 		/*Check whether controller is iMR or MR */
5246 		instance->is_imr = (ci->memory_size ? 0 : 1);
5247 
5248 		instance->snapdump_wait_time =
5249 			(ci->properties.on_off_properties2.enable_snap_dump ?
5250 			 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
5251 
5252 		instance->enable_fw_dev_list =
5253 			ci->properties.on_off_properties2.enable_fw_dev_list;
5254 
5255 		dev_info(&instance->pdev->dev,
5256 			"controller type\t: %s(%dMB)\n",
5257 			instance->is_imr ? "iMR" : "MR",
5258 			le16_to_cpu(ci->memory_size));
5259 
5260 		instance->disableOnlineCtrlReset =
5261 			ci->properties.OnOffProperties.disableOnlineCtrlReset;
5262 		instance->secure_jbod_support =
5263 			ci->adapterOperations3.supportSecurityonJBOD;
5264 		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
5265 			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
5266 		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
5267 			instance->secure_jbod_support ? "Yes" : "No");
5268 		dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
5269 			 instance->support_nvme_passthru ? "Yes" : "No");
5270 		dev_info(&instance->pdev->dev,
5271 			 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
5272 			 instance->task_abort_tmo, instance->max_reset_tmo);
5273 		dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
5274 			 instance->support_seqnum_jbod_fp ? "Yes" : "No");
5275 		dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
5276 			 instance->support_pci_lane_margining ? "Yes" : "No");
5277 
5278 		break;
5279 
5280 	case DCMD_TIMEOUT:
5281 		switch (dcmd_timeout_ocr_possible(instance)) {
5282 		case INITIATE_OCR:
5283 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5284 			mutex_unlock(&instance->reset_mutex);
5285 			megasas_reset_fusion(instance->host,
5286 				MFI_IO_TIMEOUT_OCR);
5287 			mutex_lock(&instance->reset_mutex);
5288 			break;
5289 		case KILL_ADAPTER:
5290 			megaraid_sas_kill_hba(instance);
5291 			break;
5292 		case IGNORE_TIMEOUT:
5293 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5294 				__func__, __LINE__);
5295 			break;
5296 		}
5297 		break;
5298 	case DCMD_FAILED:
5299 		megaraid_sas_kill_hba(instance);
5300 		break;
5301 
5302 	}
5303 
5304 	if (ret != DCMD_TIMEOUT)
5305 		megasas_return_cmd(instance, cmd);
5306 
5307 	return ret;
5308 }
5309 
5310 /*
5311  * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
5312  *					to firmware
5313  *
5314  * @instance:				Adapter soft state
5315  * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
5316 					MR_CRASH_BUF_TURN_OFF = 0
5317 					MR_CRASH_BUF_TURN_ON = 1
5318  * @return 0 on success non-zero on failure.
5319  * Issues an internal command (DCMD) to set parameters for crash dump feature.
5320  * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5321  * that driver supports crash dump feature. This DCMD will be sent only if
5322  * crash dump feature is supported by the FW.
5323  *
5324  */
5325 int megasas_set_crash_dump_params(struct megasas_instance *instance,
5326 	u8 crash_buf_state)
5327 {
5328 	int ret = 0;
5329 	struct megasas_cmd *cmd;
5330 	struct megasas_dcmd_frame *dcmd;
5331 
5332 	cmd = megasas_get_cmd(instance);
5333 
5334 	if (!cmd) {
5335 		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5336 		return -ENOMEM;
5337 	}
5338 
5339 
5340 	dcmd = &cmd->frame->dcmd;
5341 
5342 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5343 	dcmd->mbox.b[0] = crash_buf_state;
5344 	dcmd->cmd = MFI_CMD_DCMD;
5345 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5346 	dcmd->sge_count = 1;
5347 	dcmd->flags = MFI_FRAME_DIR_NONE;
5348 	dcmd->timeout = 0;
5349 	dcmd->pad_0 = 0;
5350 	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5351 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5352 
5353 	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5354 				 CRASH_DMA_BUF_SIZE);
5355 
5356 	if ((instance->adapter_type != MFI_SERIES) &&
5357 	    !instance->mask_interrupts)
5358 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5359 	else
5360 		ret = megasas_issue_polled(instance, cmd);
5361 
5362 	if (ret == DCMD_TIMEOUT) {
5363 		switch (dcmd_timeout_ocr_possible(instance)) {
5364 		case INITIATE_OCR:
5365 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5366 			megasas_reset_fusion(instance->host,
5367 					MFI_IO_TIMEOUT_OCR);
5368 			break;
5369 		case KILL_ADAPTER:
5370 			megaraid_sas_kill_hba(instance);
5371 			break;
5372 		case IGNORE_TIMEOUT:
5373 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5374 				__func__, __LINE__);
5375 			break;
5376 		}
5377 	} else
5378 		megasas_return_cmd(instance, cmd);
5379 
5380 	return ret;
5381 }
5382 
5383 /**
5384  * megasas_issue_init_mfi -	Initializes the FW
5385  * @instance:		Adapter soft state
5386  *
5387  * Issues the INIT MFI cmd
5388  */
5389 static int
5390 megasas_issue_init_mfi(struct megasas_instance *instance)
5391 {
5392 	__le32 context;
5393 	struct megasas_cmd *cmd;
5394 	struct megasas_init_frame *init_frame;
5395 	struct megasas_init_queue_info *initq_info;
5396 	dma_addr_t init_frame_h;
5397 	dma_addr_t initq_info_h;
5398 
5399 	/*
5400 	 * Prepare a init frame. Note the init frame points to queue info
5401 	 * structure. Each frame has SGL allocated after first 64 bytes. For
5402 	 * this frame - since we don't need any SGL - we use SGL's space as
5403 	 * queue info structure
5404 	 *
5405 	 * We will not get a NULL command below. We just created the pool.
5406 	 */
5407 	cmd = megasas_get_cmd(instance);
5408 
5409 	init_frame = (struct megasas_init_frame *)cmd->frame;
5410 	initq_info = (struct megasas_init_queue_info *)
5411 		((unsigned long)init_frame + 64);
5412 
5413 	init_frame_h = cmd->frame_phys_addr;
5414 	initq_info_h = init_frame_h + 64;
5415 
5416 	context = init_frame->context;
5417 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5418 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5419 	init_frame->context = context;
5420 
5421 	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5422 	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5423 
5424 	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5425 	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5426 
5427 	init_frame->cmd = MFI_CMD_INIT;
5428 	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5429 	init_frame->queue_info_new_phys_addr_lo =
5430 		cpu_to_le32(lower_32_bits(initq_info_h));
5431 	init_frame->queue_info_new_phys_addr_hi =
5432 		cpu_to_le32(upper_32_bits(initq_info_h));
5433 
5434 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5435 
5436 	/*
5437 	 * disable the intr before firing the init frame to FW
5438 	 */
5439 	instance->instancet->disable_intr(instance);
5440 
5441 	/*
5442 	 * Issue the init frame in polled mode
5443 	 */
5444 
5445 	if (megasas_issue_polled(instance, cmd)) {
5446 		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5447 		megasas_return_cmd(instance, cmd);
5448 		goto fail_fw_init;
5449 	}
5450 
5451 	megasas_return_cmd(instance, cmd);
5452 
5453 	return 0;
5454 
5455 fail_fw_init:
5456 	return -EINVAL;
5457 }
5458 
5459 static u32
5460 megasas_init_adapter_mfi(struct megasas_instance *instance)
5461 {
5462 	u32 context_sz;
5463 	u32 reply_q_sz;
5464 
5465 	/*
5466 	 * Get various operational parameters from status register
5467 	 */
5468 	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5469 	/*
5470 	 * Reduce the max supported cmds by 1. This is to ensure that the
5471 	 * reply_q_sz (1 more than the max cmd that driver may send)
5472 	 * does not exceed max cmds that the FW can support
5473 	 */
5474 	instance->max_fw_cmds = instance->max_fw_cmds-1;
5475 	instance->max_mfi_cmds = instance->max_fw_cmds;
5476 	instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5477 					0x10;
5478 	/*
5479 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5480 	 * are reserved for IOCTL + driver's internal DCMDs.
5481 	 */
5482 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5483 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5484 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5485 			MEGASAS_SKINNY_INT_CMDS);
5486 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5487 	} else {
5488 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5489 			MEGASAS_INT_CMDS);
5490 		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5491 	}
5492 
5493 	instance->cur_can_queue = instance->max_scsi_cmds;
5494 	/*
5495 	 * Create a pool of commands
5496 	 */
5497 	if (megasas_alloc_cmds(instance))
5498 		goto fail_alloc_cmds;
5499 
5500 	/*
5501 	 * Allocate memory for reply queue. Length of reply queue should
5502 	 * be _one_ more than the maximum commands handled by the firmware.
5503 	 *
5504 	 * Note: When FW completes commands, it places corresponding contex
5505 	 * values in this circular reply queue. This circular queue is a fairly
5506 	 * typical producer-consumer queue. FW is the producer (of completed
5507 	 * commands) and the driver is the consumer.
5508 	 */
5509 	context_sz = sizeof(u32);
5510 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5511 
5512 	instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5513 			reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5514 
5515 	if (!instance->reply_queue) {
5516 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5517 		goto fail_reply_queue;
5518 	}
5519 
5520 	if (megasas_issue_init_mfi(instance))
5521 		goto fail_fw_init;
5522 
5523 	if (megasas_get_ctrl_info(instance)) {
5524 		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5525 			"Fail from %s %d\n", instance->unique_id,
5526 			__func__, __LINE__);
5527 		goto fail_fw_init;
5528 	}
5529 
5530 	instance->fw_support_ieee = 0;
5531 	instance->fw_support_ieee =
5532 		(instance->instancet->read_fw_status_reg(instance) &
5533 		0x04000000);
5534 
5535 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5536 			instance->fw_support_ieee);
5537 
5538 	if (instance->fw_support_ieee)
5539 		instance->flag_ieee = 1;
5540 
5541 	return 0;
5542 
5543 fail_fw_init:
5544 
5545 	dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5546 			    instance->reply_queue, instance->reply_queue_h);
5547 fail_reply_queue:
5548 	megasas_free_cmds(instance);
5549 
5550 fail_alloc_cmds:
5551 	return 1;
5552 }
5553 
5554 static
5555 void megasas_setup_irq_poll(struct megasas_instance *instance)
5556 {
5557 	struct megasas_irq_context *irq_ctx;
5558 	u32 count, i;
5559 
5560 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5561 
5562 	/* Initialize IRQ poll */
5563 	for (i = 0; i < count; i++) {
5564 		irq_ctx = &instance->irq_context[i];
5565 		irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
5566 		irq_ctx->irq_poll_scheduled = false;
5567 		irq_poll_init(&irq_ctx->irqpoll,
5568 			      instance->threshold_reply_count,
5569 			      megasas_irqpoll);
5570 	}
5571 }
5572 
5573 /*
5574  * megasas_setup_irqs_ioapic -		register legacy interrupts.
5575  * @instance:				Adapter soft state
5576  *
5577  * Do not enable interrupt, only setup ISRs.
5578  *
5579  * Return 0 on success.
5580  */
5581 static int
5582 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5583 {
5584 	struct pci_dev *pdev;
5585 
5586 	pdev = instance->pdev;
5587 	instance->irq_context[0].instance = instance;
5588 	instance->irq_context[0].MSIxIndex = 0;
5589 	snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
5590 		"megasas", instance->host->host_no);
5591 	if (request_irq(pci_irq_vector(pdev, 0),
5592 			instance->instancet->service_isr, IRQF_SHARED,
5593 			instance->irq_context->name, &instance->irq_context[0])) {
5594 		dev_err(&instance->pdev->dev,
5595 				"Failed to register IRQ from %s %d\n",
5596 				__func__, __LINE__);
5597 		return -1;
5598 	}
5599 	instance->perf_mode = MR_LATENCY_PERF_MODE;
5600 	instance->low_latency_index_start = 0;
5601 	return 0;
5602 }
5603 
5604 /**
5605  * megasas_setup_irqs_msix -		register MSI-x interrupts.
5606  * @instance:				Adapter soft state
5607  * @is_probe:				Driver probe check
5608  *
5609  * Do not enable interrupt, only setup ISRs.
5610  *
5611  * Return 0 on success.
5612  */
5613 static int
5614 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5615 {
5616 	int i, j;
5617 	struct pci_dev *pdev;
5618 
5619 	pdev = instance->pdev;
5620 
5621 	/* Try MSI-x */
5622 	for (i = 0; i < instance->msix_vectors; i++) {
5623 		instance->irq_context[i].instance = instance;
5624 		instance->irq_context[i].MSIxIndex = i;
5625 		snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
5626 			"megasas", instance->host->host_no, i);
5627 		if (request_irq(pci_irq_vector(pdev, i),
5628 			instance->instancet->service_isr, 0, instance->irq_context[i].name,
5629 			&instance->irq_context[i])) {
5630 			dev_err(&instance->pdev->dev,
5631 				"Failed to register IRQ for vector %d.\n", i);
5632 			for (j = 0; j < i; j++) {
5633 				if (j < instance->low_latency_index_start)
5634 					irq_set_affinity_hint(
5635 						pci_irq_vector(pdev, j), NULL);
5636 				free_irq(pci_irq_vector(pdev, j),
5637 					 &instance->irq_context[j]);
5638 			}
5639 			/* Retry irq register for IO_APIC*/
5640 			instance->msix_vectors = 0;
5641 			instance->msix_load_balance = false;
5642 			if (is_probe) {
5643 				pci_free_irq_vectors(instance->pdev);
5644 				return megasas_setup_irqs_ioapic(instance);
5645 			} else {
5646 				return -1;
5647 			}
5648 		}
5649 	}
5650 
5651 	return 0;
5652 }
5653 
5654 /*
5655  * megasas_destroy_irqs-		unregister interrupts.
5656  * @instance:				Adapter soft state
5657  * return:				void
5658  */
5659 static void
5660 megasas_destroy_irqs(struct megasas_instance *instance) {
5661 
5662 	int i;
5663 	int count;
5664 	struct megasas_irq_context *irq_ctx;
5665 
5666 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5667 	if (instance->adapter_type != MFI_SERIES) {
5668 		for (i = 0; i < count; i++) {
5669 			irq_ctx = &instance->irq_context[i];
5670 			irq_poll_disable(&irq_ctx->irqpoll);
5671 		}
5672 	}
5673 
5674 	if (instance->msix_vectors)
5675 		for (i = 0; i < instance->msix_vectors; i++) {
5676 			if (i < instance->low_latency_index_start)
5677 				irq_set_affinity_hint(
5678 				    pci_irq_vector(instance->pdev, i), NULL);
5679 			free_irq(pci_irq_vector(instance->pdev, i),
5680 				 &instance->irq_context[i]);
5681 		}
5682 	else
5683 		free_irq(pci_irq_vector(instance->pdev, 0),
5684 			 &instance->irq_context[0]);
5685 }
5686 
5687 /**
5688  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5689  * @instance:				Adapter soft state
5690  *
5691  * Return 0 on success.
5692  */
5693 void
5694 megasas_setup_jbod_map(struct megasas_instance *instance)
5695 {
5696 	int i;
5697 	struct fusion_context *fusion = instance->ctrl_context;
5698 	u32 pd_seq_map_sz;
5699 
5700 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5701 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5702 
5703 	instance->use_seqnum_jbod_fp =
5704 		instance->support_seqnum_jbod_fp;
5705 	if (reset_devices || !fusion ||
5706 		!instance->support_seqnum_jbod_fp) {
5707 		dev_info(&instance->pdev->dev,
5708 			"JBOD sequence map is disabled %s %d\n",
5709 			__func__, __LINE__);
5710 		instance->use_seqnum_jbod_fp = false;
5711 		return;
5712 	}
5713 
5714 	if (fusion->pd_seq_sync[0])
5715 		goto skip_alloc;
5716 
5717 	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5718 		fusion->pd_seq_sync[i] = dma_alloc_coherent
5719 			(&instance->pdev->dev, pd_seq_map_sz,
5720 			&fusion->pd_seq_phys[i], GFP_KERNEL);
5721 		if (!fusion->pd_seq_sync[i]) {
5722 			dev_err(&instance->pdev->dev,
5723 				"Failed to allocate memory from %s %d\n",
5724 				__func__, __LINE__);
5725 			if (i == 1) {
5726 				dma_free_coherent(&instance->pdev->dev,
5727 					pd_seq_map_sz, fusion->pd_seq_sync[0],
5728 					fusion->pd_seq_phys[0]);
5729 				fusion->pd_seq_sync[0] = NULL;
5730 			}
5731 			instance->use_seqnum_jbod_fp = false;
5732 			return;
5733 		}
5734 	}
5735 
5736 skip_alloc:
5737 	if (!megasas_sync_pd_seq_num(instance, false) &&
5738 		!megasas_sync_pd_seq_num(instance, true))
5739 		instance->use_seqnum_jbod_fp = true;
5740 	else
5741 		instance->use_seqnum_jbod_fp = false;
5742 }
5743 
5744 static void megasas_setup_reply_map(struct megasas_instance *instance)
5745 {
5746 	const struct cpumask *mask;
5747 	unsigned int queue, cpu, low_latency_index_start;
5748 
5749 	low_latency_index_start = instance->low_latency_index_start;
5750 
5751 	for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
5752 		mask = pci_irq_get_affinity(instance->pdev, queue);
5753 		if (!mask)
5754 			goto fallback;
5755 
5756 		for_each_cpu(cpu, mask)
5757 			instance->reply_map[cpu] = queue;
5758 	}
5759 	return;
5760 
5761 fallback:
5762 	queue = low_latency_index_start;
5763 	for_each_possible_cpu(cpu) {
5764 		instance->reply_map[cpu] = queue;
5765 		if (queue == (instance->msix_vectors - 1))
5766 			queue = low_latency_index_start;
5767 		else
5768 			queue++;
5769 	}
5770 }
5771 
5772 /**
5773  * megasas_get_device_list -	Get the PD and LD device list from FW.
5774  * @instance:			Adapter soft state
5775  * @return:			Success or failure
5776  *
5777  * Issue DCMDs to Firmware to get the PD and LD list.
5778  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5779  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5780  */
5781 static
5782 int megasas_get_device_list(struct megasas_instance *instance)
5783 {
5784 	memset(instance->pd_list, 0,
5785 	       (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5786 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5787 
5788 	if (instance->enable_fw_dev_list) {
5789 		if (megasas_host_device_list_query(instance, true))
5790 			return FAILED;
5791 	} else {
5792 		if (megasas_get_pd_list(instance) < 0) {
5793 			dev_err(&instance->pdev->dev, "failed to get PD list\n");
5794 			return FAILED;
5795 		}
5796 
5797 		if (megasas_ld_list_query(instance,
5798 					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5799 			dev_err(&instance->pdev->dev, "failed to get LD list\n");
5800 			return FAILED;
5801 		}
5802 	}
5803 
5804 	return SUCCESS;
5805 }
5806 
5807 /**
5808  * megasas_set_high_iops_queue_affinity_hint -	Set affinity hint for high IOPS queues
5809  * @instance:					Adapter soft state
5810  * return:					void
5811  */
5812 static inline void
5813 megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
5814 {
5815 	int i;
5816 	int local_numa_node;
5817 
5818 	if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
5819 		local_numa_node = dev_to_node(&instance->pdev->dev);
5820 
5821 		for (i = 0; i < instance->low_latency_index_start; i++)
5822 			irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
5823 				cpumask_of_node(local_numa_node));
5824 	}
5825 }
5826 
5827 static int
5828 __megasas_alloc_irq_vectors(struct megasas_instance *instance)
5829 {
5830 	int i, irq_flags;
5831 	struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
5832 	struct irq_affinity *descp = &desc;
5833 
5834 	irq_flags = PCI_IRQ_MSIX;
5835 
5836 	if (instance->smp_affinity_enable)
5837 		irq_flags |= PCI_IRQ_AFFINITY;
5838 	else
5839 		descp = NULL;
5840 
5841 	i = pci_alloc_irq_vectors_affinity(instance->pdev,
5842 		instance->low_latency_index_start,
5843 		instance->msix_vectors, irq_flags, descp);
5844 
5845 	return i;
5846 }
5847 
5848 /**
5849  * megasas_alloc_irq_vectors -	Allocate IRQ vectors/enable MSI-x vectors
5850  * @instance:			Adapter soft state
5851  * return:			void
5852  */
5853 static void
5854 megasas_alloc_irq_vectors(struct megasas_instance *instance)
5855 {
5856 	int i;
5857 	unsigned int num_msix_req;
5858 
5859 	i = __megasas_alloc_irq_vectors(instance);
5860 
5861 	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
5862 	    (i != instance->msix_vectors)) {
5863 		if (instance->msix_vectors)
5864 			pci_free_irq_vectors(instance->pdev);
5865 		/* Disable Balanced IOPS mode and try realloc vectors */
5866 		instance->perf_mode = MR_LATENCY_PERF_MODE;
5867 		instance->low_latency_index_start = 1;
5868 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5869 
5870 		instance->msix_vectors = min(num_msix_req,
5871 				instance->msix_vectors);
5872 
5873 		i = __megasas_alloc_irq_vectors(instance);
5874 
5875 	}
5876 
5877 	dev_info(&instance->pdev->dev,
5878 		"requested/available msix %d/%d\n", instance->msix_vectors, i);
5879 
5880 	if (i > 0)
5881 		instance->msix_vectors = i;
5882 	else
5883 		instance->msix_vectors = 0;
5884 
5885 	if (instance->smp_affinity_enable)
5886 		megasas_set_high_iops_queue_affinity_hint(instance);
5887 }
5888 
5889 /**
5890  * megasas_init_fw -	Initializes the FW
5891  * @instance:		Adapter soft state
5892  *
5893  * This is the main function for initializing firmware
5894  */
5895 
5896 static int megasas_init_fw(struct megasas_instance *instance)
5897 {
5898 	u32 max_sectors_1;
5899 	u32 max_sectors_2, tmp_sectors, msix_enable;
5900 	u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5901 	resource_size_t base_addr;
5902 	void *base_addr_phys;
5903 	struct megasas_ctrl_info *ctrl_info = NULL;
5904 	unsigned long bar_list;
5905 	int i, j, loop;
5906 	struct IOV_111 *iovPtr;
5907 	struct fusion_context *fusion;
5908 	bool intr_coalescing;
5909 	unsigned int num_msix_req;
5910 	u16 lnksta, speed;
5911 
5912 	fusion = instance->ctrl_context;
5913 
5914 	/* Find first memory bar */
5915 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5916 	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5917 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5918 					 "megasas: LSI")) {
5919 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5920 		return -EBUSY;
5921 	}
5922 
5923 	base_addr = pci_resource_start(instance->pdev, instance->bar);
5924 	instance->reg_set = ioremap(base_addr, 8192);
5925 
5926 	if (!instance->reg_set) {
5927 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5928 		goto fail_ioremap;
5929 	}
5930 
5931 	base_addr_phys = &base_addr;
5932 	dev_printk(KERN_DEBUG, &instance->pdev->dev,
5933 		   "BAR:0x%lx  BAR's base_addr(phys):%pa  mapped virt_addr:0x%p\n",
5934 		   instance->bar, base_addr_phys, instance->reg_set);
5935 
5936 	if (instance->adapter_type != MFI_SERIES)
5937 		instance->instancet = &megasas_instance_template_fusion;
5938 	else {
5939 		switch (instance->pdev->device) {
5940 		case PCI_DEVICE_ID_LSI_SAS1078R:
5941 		case PCI_DEVICE_ID_LSI_SAS1078DE:
5942 			instance->instancet = &megasas_instance_template_ppc;
5943 			break;
5944 		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5945 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5946 			instance->instancet = &megasas_instance_template_gen2;
5947 			break;
5948 		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5949 		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5950 			instance->instancet = &megasas_instance_template_skinny;
5951 			break;
5952 		case PCI_DEVICE_ID_LSI_SAS1064R:
5953 		case PCI_DEVICE_ID_DELL_PERC5:
5954 		default:
5955 			instance->instancet = &megasas_instance_template_xscale;
5956 			instance->pd_list_not_supported = 1;
5957 			break;
5958 		}
5959 	}
5960 
5961 	if (megasas_transition_to_ready(instance, 0)) {
5962 		dev_info(&instance->pdev->dev,
5963 			 "Failed to transition controller to ready from %s!\n",
5964 			 __func__);
5965 		if (instance->adapter_type != MFI_SERIES) {
5966 			status_reg = instance->instancet->read_fw_status_reg(
5967 					instance);
5968 			if (status_reg & MFI_RESET_ADAPTER) {
5969 				if (megasas_adp_reset_wait_for_ready
5970 					(instance, true, 0) == FAILED)
5971 					goto fail_ready_state;
5972 			} else {
5973 				goto fail_ready_state;
5974 			}
5975 		} else {
5976 			atomic_set(&instance->fw_reset_no_pci_access, 1);
5977 			instance->instancet->adp_reset
5978 				(instance, instance->reg_set);
5979 			atomic_set(&instance->fw_reset_no_pci_access, 0);
5980 
5981 			/*waiting for about 30 second before retry*/
5982 			ssleep(30);
5983 
5984 			if (megasas_transition_to_ready(instance, 0))
5985 				goto fail_ready_state;
5986 		}
5987 
5988 		dev_info(&instance->pdev->dev,
5989 			 "FW restarted successfully from %s!\n",
5990 			 __func__);
5991 	}
5992 
5993 	megasas_init_ctrl_params(instance);
5994 
5995 	if (megasas_set_dma_mask(instance))
5996 		goto fail_ready_state;
5997 
5998 	if (megasas_alloc_ctrl_mem(instance))
5999 		goto fail_alloc_dma_buf;
6000 
6001 	if (megasas_alloc_ctrl_dma_buffers(instance))
6002 		goto fail_alloc_dma_buf;
6003 
6004 	fusion = instance->ctrl_context;
6005 
6006 	if (instance->adapter_type >= VENTURA_SERIES) {
6007 		scratch_pad_2 =
6008 			megasas_readl(instance,
6009 				      &instance->reg_set->outbound_scratch_pad_2);
6010 		instance->max_raid_mapsize = ((scratch_pad_2 >>
6011 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
6012 			MR_MAX_RAID_MAP_SIZE_MASK);
6013 	}
6014 
6015 	instance->enable_sdev_max_qd = enable_sdev_max_qd;
6016 
6017 	switch (instance->adapter_type) {
6018 	case VENTURA_SERIES:
6019 		fusion->pcie_bw_limitation = true;
6020 		break;
6021 	case AERO_SERIES:
6022 		fusion->r56_div_offload = true;
6023 		break;
6024 	default:
6025 		break;
6026 	}
6027 
6028 	/* Check if MSI-X is supported while in ready state */
6029 	msix_enable = (instance->instancet->read_fw_status_reg(instance) &
6030 		       0x4000000) >> 0x1a;
6031 	if (msix_enable && !msix_disable) {
6032 
6033 		scratch_pad_1 = megasas_readl
6034 			(instance, &instance->reg_set->outbound_scratch_pad_1);
6035 		/* Check max MSI-X vectors */
6036 		if (fusion) {
6037 			if (instance->adapter_type == THUNDERBOLT_SERIES) {
6038 				/* Thunderbolt Series*/
6039 				instance->msix_vectors = (scratch_pad_1
6040 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
6041 			} else {
6042 				instance->msix_vectors = ((scratch_pad_1
6043 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
6044 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
6045 
6046 				/*
6047 				 * For Invader series, > 8 MSI-x vectors
6048 				 * supported by FW/HW implies combined
6049 				 * reply queue mode is enabled.
6050 				 * For Ventura series, > 16 MSI-x vectors
6051 				 * supported by FW/HW implies combined
6052 				 * reply queue mode is enabled.
6053 				 */
6054 				switch (instance->adapter_type) {
6055 				case INVADER_SERIES:
6056 					if (instance->msix_vectors > 8)
6057 						instance->msix_combined = true;
6058 					break;
6059 				case AERO_SERIES:
6060 				case VENTURA_SERIES:
6061 					if (instance->msix_vectors > 16)
6062 						instance->msix_combined = true;
6063 					break;
6064 				}
6065 
6066 				if (rdpq_enable)
6067 					instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
6068 								1 : 0;
6069 
6070 				if (instance->adapter_type >= INVADER_SERIES &&
6071 				    !instance->msix_combined) {
6072 					instance->msix_load_balance = true;
6073 					instance->smp_affinity_enable = false;
6074 				}
6075 
6076 				/* Save 1-15 reply post index address to local memory
6077 				 * Index 0 is already saved from reg offset
6078 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
6079 				 */
6080 				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
6081 					instance->reply_post_host_index_addr[loop] =
6082 						(u32 __iomem *)
6083 						((u8 __iomem *)instance->reg_set +
6084 						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
6085 						+ (loop * 0x10));
6086 				}
6087 			}
6088 
6089 			dev_info(&instance->pdev->dev,
6090 				 "firmware supports msix\t: (%d)",
6091 				 instance->msix_vectors);
6092 			if (msix_vectors)
6093 				instance->msix_vectors = min(msix_vectors,
6094 					instance->msix_vectors);
6095 		} else /* MFI adapters */
6096 			instance->msix_vectors = 1;
6097 
6098 
6099 		/*
6100 		 * For Aero (if some conditions are met), driver will configure a
6101 		 * few additional reply queues with interrupt coalescing enabled.
6102 		 * These queues with interrupt coalescing enabled are called
6103 		 * High IOPS queues and rest of reply queues (based on number of
6104 		 * logical CPUs) are termed as Low latency queues.
6105 		 *
6106 		 * Total Number of reply queues = High IOPS queues + low latency queues
6107 		 *
6108 		 * For rest of fusion adapters, 1 additional reply queue will be
6109 		 * reserved for management commands, rest of reply queues
6110 		 * (based on number of logical CPUs) will be used for IOs and
6111 		 * referenced as IO queues.
6112 		 * Total Number of reply queues = 1 + IO queues
6113 		 *
6114 		 * MFI adapters supports single MSI-x so single reply queue
6115 		 * will be used for IO and management commands.
6116 		 */
6117 
6118 		intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
6119 								true : false;
6120 		if (intr_coalescing &&
6121 			(num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
6122 			(instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
6123 			instance->perf_mode = MR_BALANCED_PERF_MODE;
6124 		else
6125 			instance->perf_mode = MR_LATENCY_PERF_MODE;
6126 
6127 
6128 		if (instance->adapter_type == AERO_SERIES) {
6129 			pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
6130 			speed = lnksta & PCI_EXP_LNKSTA_CLS;
6131 
6132 			/*
6133 			 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
6134 			 * in latency perf mode and enable R1 PCI bandwidth algorithm
6135 			 */
6136 			if (speed < 0x4) {
6137 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6138 				fusion->pcie_bw_limitation = true;
6139 			}
6140 
6141 			/*
6142 			 * Performance mode settings provided through module parameter-perf_mode will
6143 			 * take affect only for:
6144 			 * 1. Aero family of adapters.
6145 			 * 2. When user sets module parameter- perf_mode in range of 0-2.
6146 			 */
6147 			if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
6148 				(perf_mode <= MR_LATENCY_PERF_MODE))
6149 				instance->perf_mode = perf_mode;
6150 			/*
6151 			 * If intr coalescing is not supported by controller FW, then IOPS
6152 			 * and Balanced modes are not feasible.
6153 			 */
6154 			if (!intr_coalescing)
6155 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6156 
6157 		}
6158 
6159 		if (instance->perf_mode == MR_BALANCED_PERF_MODE)
6160 			instance->low_latency_index_start =
6161 				MR_HIGH_IOPS_QUEUE_COUNT;
6162 		else
6163 			instance->low_latency_index_start = 1;
6164 
6165 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
6166 
6167 		instance->msix_vectors = min(num_msix_req,
6168 				instance->msix_vectors);
6169 
6170 		megasas_alloc_irq_vectors(instance);
6171 		if (!instance->msix_vectors)
6172 			instance->msix_load_balance = false;
6173 	}
6174 	/*
6175 	 * MSI-X host index 0 is common for all adapter.
6176 	 * It is used for all MPT based Adapters.
6177 	 */
6178 	if (instance->msix_combined) {
6179 		instance->reply_post_host_index_addr[0] =
6180 				(u32 *)((u8 *)instance->reg_set +
6181 				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
6182 	} else {
6183 		instance->reply_post_host_index_addr[0] =
6184 			(u32 *)((u8 *)instance->reg_set +
6185 			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
6186 	}
6187 
6188 	if (!instance->msix_vectors) {
6189 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6190 		if (i < 0)
6191 			goto fail_init_adapter;
6192 	}
6193 
6194 	megasas_setup_reply_map(instance);
6195 
6196 	dev_info(&instance->pdev->dev,
6197 		"current msix/online cpus\t: (%d/%d)\n",
6198 		instance->msix_vectors, (unsigned int)num_online_cpus());
6199 	dev_info(&instance->pdev->dev,
6200 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
6201 
6202 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6203 		(unsigned long)instance);
6204 
6205 	/*
6206 	 * Below are default value for legacy Firmware.
6207 	 * non-fusion based controllers
6208 	 */
6209 	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
6210 	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
6211 	/* Get operational params, sge flags, send init cmd to controller */
6212 	if (instance->instancet->init_adapter(instance))
6213 		goto fail_init_adapter;
6214 
6215 	if (instance->adapter_type >= VENTURA_SERIES) {
6216 		scratch_pad_3 =
6217 			megasas_readl(instance,
6218 				      &instance->reg_set->outbound_scratch_pad_3);
6219 		if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
6220 			MR_DEFAULT_NVME_PAGE_SHIFT)
6221 			instance->nvme_page_size =
6222 				(1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
6223 
6224 		dev_info(&instance->pdev->dev,
6225 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
6226 	}
6227 
6228 	if (instance->msix_vectors ?
6229 		megasas_setup_irqs_msix(instance, 1) :
6230 		megasas_setup_irqs_ioapic(instance))
6231 		goto fail_init_adapter;
6232 
6233 	if (instance->adapter_type != MFI_SERIES)
6234 		megasas_setup_irq_poll(instance);
6235 
6236 	instance->instancet->enable_intr(instance);
6237 
6238 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
6239 
6240 	megasas_setup_jbod_map(instance);
6241 
6242 	if (megasas_get_device_list(instance) != SUCCESS) {
6243 		dev_err(&instance->pdev->dev,
6244 			"%s: megasas_get_device_list failed\n",
6245 			__func__);
6246 		goto fail_get_ld_pd_list;
6247 	}
6248 
6249 	/* stream detection initialization */
6250 	if (instance->adapter_type >= VENTURA_SERIES) {
6251 		fusion->stream_detect_by_ld =
6252 			kcalloc(MAX_LOGICAL_DRIVES_EXT,
6253 				sizeof(struct LD_STREAM_DETECT *),
6254 				GFP_KERNEL);
6255 		if (!fusion->stream_detect_by_ld) {
6256 			dev_err(&instance->pdev->dev,
6257 				"unable to allocate stream detection for pool of LDs\n");
6258 			goto fail_get_ld_pd_list;
6259 		}
6260 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
6261 			fusion->stream_detect_by_ld[i] =
6262 				kzalloc(sizeof(struct LD_STREAM_DETECT),
6263 				GFP_KERNEL);
6264 			if (!fusion->stream_detect_by_ld[i]) {
6265 				dev_err(&instance->pdev->dev,
6266 					"unable to allocate stream detect by LD\n ");
6267 				for (j = 0; j < i; ++j)
6268 					kfree(fusion->stream_detect_by_ld[j]);
6269 				kfree(fusion->stream_detect_by_ld);
6270 				fusion->stream_detect_by_ld = NULL;
6271 				goto fail_get_ld_pd_list;
6272 			}
6273 			fusion->stream_detect_by_ld[i]->mru_bit_map
6274 				= MR_STREAM_BITMAP;
6275 		}
6276 	}
6277 
6278 	/*
6279 	 * Compute the max allowed sectors per IO: The controller info has two
6280 	 * limits on max sectors. Driver should use the minimum of these two.
6281 	 *
6282 	 * 1 << stripe_sz_ops.min = max sectors per strip
6283 	 *
6284 	 * Note that older firmwares ( < FW ver 30) didn't report information
6285 	 * to calculate max_sectors_1. So the number ended up as zero always.
6286 	 */
6287 	tmp_sectors = 0;
6288 	ctrl_info = instance->ctrl_info_buf;
6289 
6290 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
6291 		le16_to_cpu(ctrl_info->max_strips_per_io);
6292 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
6293 
6294 	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
6295 
6296 	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
6297 	instance->passive = ctrl_info->cluster.passive;
6298 	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
6299 	instance->UnevenSpanSupport =
6300 		ctrl_info->adapterOperations2.supportUnevenSpans;
6301 	if (instance->UnevenSpanSupport) {
6302 		struct fusion_context *fusion = instance->ctrl_context;
6303 		if (MR_ValidateMapInfo(instance, instance->map_id))
6304 			fusion->fast_path_io = 1;
6305 		else
6306 			fusion->fast_path_io = 0;
6307 
6308 	}
6309 	if (ctrl_info->host_interface.SRIOV) {
6310 		instance->requestorId = ctrl_info->iov.requestorId;
6311 		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
6312 			if (!ctrl_info->adapterOperations2.activePassive)
6313 			    instance->PlasmaFW111 = 1;
6314 
6315 			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
6316 			    instance->PlasmaFW111 ? "1.11" : "new");
6317 
6318 			if (instance->PlasmaFW111) {
6319 			    iovPtr = (struct IOV_111 *)
6320 				((unsigned char *)ctrl_info + IOV_111_OFFSET);
6321 			    instance->requestorId = iovPtr->requestorId;
6322 			}
6323 		}
6324 		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
6325 			instance->requestorId);
6326 	}
6327 
6328 	instance->crash_dump_fw_support =
6329 		ctrl_info->adapterOperations3.supportCrashDump;
6330 	instance->crash_dump_drv_support =
6331 		(instance->crash_dump_fw_support &&
6332 		instance->crash_dump_buf);
6333 	if (instance->crash_dump_drv_support)
6334 		megasas_set_crash_dump_params(instance,
6335 			MR_CRASH_BUF_TURN_OFF);
6336 
6337 	else {
6338 		if (instance->crash_dump_buf)
6339 			dma_free_coherent(&instance->pdev->dev,
6340 				CRASH_DMA_BUF_SIZE,
6341 				instance->crash_dump_buf,
6342 				instance->crash_dump_h);
6343 		instance->crash_dump_buf = NULL;
6344 	}
6345 
6346 	if (instance->snapdump_wait_time) {
6347 		megasas_get_snapdump_properties(instance);
6348 		dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
6349 			 instance->snapdump_wait_time);
6350 	}
6351 
6352 	dev_info(&instance->pdev->dev,
6353 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
6354 		le16_to_cpu(ctrl_info->pci.vendor_id),
6355 		le16_to_cpu(ctrl_info->pci.device_id),
6356 		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
6357 		le16_to_cpu(ctrl_info->pci.sub_device_id));
6358 	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
6359 		instance->UnevenSpanSupport ? "yes" : "no");
6360 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
6361 		instance->crash_dump_drv_support ? "yes" : "no");
6362 	dev_info(&instance->pdev->dev, "JBOD sequence map	: %s\n",
6363 		instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
6364 
6365 	instance->max_sectors_per_req = instance->max_num_sge *
6366 						SGE_BUFFER_SIZE / 512;
6367 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
6368 		instance->max_sectors_per_req = tmp_sectors;
6369 
6370 	/* Check for valid throttlequeuedepth module parameter */
6371 	if (throttlequeuedepth &&
6372 			throttlequeuedepth <= instance->max_scsi_cmds)
6373 		instance->throttlequeuedepth = throttlequeuedepth;
6374 	else
6375 		instance->throttlequeuedepth =
6376 				MEGASAS_THROTTLE_QUEUE_DEPTH;
6377 
6378 	if ((resetwaittime < 1) ||
6379 	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
6380 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
6381 
6382 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
6383 		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
6384 
6385 	/* Launch SR-IOV heartbeat timer */
6386 	if (instance->requestorId) {
6387 		if (!megasas_sriov_start_heartbeat(instance, 1)) {
6388 			megasas_start_timer(instance);
6389 		} else {
6390 			instance->skip_heartbeat_timer_del = 1;
6391 			goto fail_get_ld_pd_list;
6392 		}
6393 	}
6394 
6395 	/*
6396 	 * Create and start watchdog thread which will monitor
6397 	 * controller state every 1 sec and trigger OCR when
6398 	 * it enters fault state
6399 	 */
6400 	if (instance->adapter_type != MFI_SERIES)
6401 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
6402 			goto fail_start_watchdog;
6403 
6404 	return 0;
6405 
6406 fail_start_watchdog:
6407 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6408 		del_timer_sync(&instance->sriov_heartbeat_timer);
6409 fail_get_ld_pd_list:
6410 	instance->instancet->disable_intr(instance);
6411 	megasas_destroy_irqs(instance);
6412 fail_init_adapter:
6413 	if (instance->msix_vectors)
6414 		pci_free_irq_vectors(instance->pdev);
6415 	instance->msix_vectors = 0;
6416 fail_alloc_dma_buf:
6417 	megasas_free_ctrl_dma_buffers(instance);
6418 	megasas_free_ctrl_mem(instance);
6419 fail_ready_state:
6420 	iounmap(instance->reg_set);
6421 
6422 fail_ioremap:
6423 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6424 
6425 	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6426 		__func__, __LINE__);
6427 	return -EINVAL;
6428 }
6429 
6430 /**
6431  * megasas_release_mfi -	Reverses the FW initialization
6432  * @instance:			Adapter soft state
6433  */
6434 static void megasas_release_mfi(struct megasas_instance *instance)
6435 {
6436 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
6437 
6438 	if (instance->reply_queue)
6439 		dma_free_coherent(&instance->pdev->dev, reply_q_sz,
6440 			    instance->reply_queue, instance->reply_queue_h);
6441 
6442 	megasas_free_cmds(instance);
6443 
6444 	iounmap(instance->reg_set);
6445 
6446 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6447 }
6448 
6449 /**
6450  * megasas_get_seq_num -	Gets latest event sequence numbers
6451  * @instance:			Adapter soft state
6452  * @eli:			FW event log sequence numbers information
6453  *
6454  * FW maintains a log of all events in a non-volatile area. Upper layers would
6455  * usually find out the latest sequence number of the events, the seq number at
6456  * the boot etc. They would "read" all the events below the latest seq number
6457  * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
6458  * number), they would subsribe to AEN (asynchronous event notification) and
6459  * wait for the events to happen.
6460  */
6461 static int
6462 megasas_get_seq_num(struct megasas_instance *instance,
6463 		    struct megasas_evt_log_info *eli)
6464 {
6465 	struct megasas_cmd *cmd;
6466 	struct megasas_dcmd_frame *dcmd;
6467 	struct megasas_evt_log_info *el_info;
6468 	dma_addr_t el_info_h = 0;
6469 	int ret;
6470 
6471 	cmd = megasas_get_cmd(instance);
6472 
6473 	if (!cmd) {
6474 		return -ENOMEM;
6475 	}
6476 
6477 	dcmd = &cmd->frame->dcmd;
6478 	el_info = dma_alloc_coherent(&instance->pdev->dev,
6479 				     sizeof(struct megasas_evt_log_info),
6480 				     &el_info_h, GFP_KERNEL);
6481 	if (!el_info) {
6482 		megasas_return_cmd(instance, cmd);
6483 		return -ENOMEM;
6484 	}
6485 
6486 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6487 
6488 	dcmd->cmd = MFI_CMD_DCMD;
6489 	dcmd->cmd_status = 0x0;
6490 	dcmd->sge_count = 1;
6491 	dcmd->flags = MFI_FRAME_DIR_READ;
6492 	dcmd->timeout = 0;
6493 	dcmd->pad_0 = 0;
6494 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
6495 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
6496 
6497 	megasas_set_dma_settings(instance, dcmd, el_info_h,
6498 				 sizeof(struct megasas_evt_log_info));
6499 
6500 	ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
6501 	if (ret != DCMD_SUCCESS) {
6502 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6503 			__func__, __LINE__);
6504 		goto dcmd_failed;
6505 	}
6506 
6507 	/*
6508 	 * Copy the data back into callers buffer
6509 	 */
6510 	eli->newest_seq_num = el_info->newest_seq_num;
6511 	eli->oldest_seq_num = el_info->oldest_seq_num;
6512 	eli->clear_seq_num = el_info->clear_seq_num;
6513 	eli->shutdown_seq_num = el_info->shutdown_seq_num;
6514 	eli->boot_seq_num = el_info->boot_seq_num;
6515 
6516 dcmd_failed:
6517 	dma_free_coherent(&instance->pdev->dev,
6518 			sizeof(struct megasas_evt_log_info),
6519 			el_info, el_info_h);
6520 
6521 	megasas_return_cmd(instance, cmd);
6522 
6523 	return ret;
6524 }
6525 
6526 /**
6527  * megasas_register_aen -	Registers for asynchronous event notification
6528  * @instance:			Adapter soft state
6529  * @seq_num:			The starting sequence number
6530  * @class_locale_word:		Class of the event
6531  *
6532  * This function subscribes for AEN for events beyond the @seq_num. It requests
6533  * to be notified if and only if the event is of type @class_locale
6534  */
6535 static int
6536 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6537 		     u32 class_locale_word)
6538 {
6539 	int ret_val;
6540 	struct megasas_cmd *cmd;
6541 	struct megasas_dcmd_frame *dcmd;
6542 	union megasas_evt_class_locale curr_aen;
6543 	union megasas_evt_class_locale prev_aen;
6544 
6545 	/*
6546 	 * If there an AEN pending already (aen_cmd), check if the
6547 	 * class_locale of that pending AEN is inclusive of the new
6548 	 * AEN request we currently have. If it is, then we don't have
6549 	 * to do anything. In other words, whichever events the current
6550 	 * AEN request is subscribing to, have already been subscribed
6551 	 * to.
6552 	 *
6553 	 * If the old_cmd is _not_ inclusive, then we have to abort
6554 	 * that command, form a class_locale that is superset of both
6555 	 * old and current and re-issue to the FW
6556 	 */
6557 
6558 	curr_aen.word = class_locale_word;
6559 
6560 	if (instance->aen_cmd) {
6561 
6562 		prev_aen.word =
6563 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6564 
6565 		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6566 		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6567 			dev_info(&instance->pdev->dev,
6568 				 "%s %d out of range class %d send by application\n",
6569 				 __func__, __LINE__, curr_aen.members.class);
6570 			return 0;
6571 		}
6572 
6573 		/*
6574 		 * A class whose enum value is smaller is inclusive of all
6575 		 * higher values. If a PROGRESS (= -1) was previously
6576 		 * registered, then a new registration requests for higher
6577 		 * classes need not be sent to FW. They are automatically
6578 		 * included.
6579 		 *
6580 		 * Locale numbers don't have such hierarchy. They are bitmap
6581 		 * values
6582 		 */
6583 		if ((prev_aen.members.class <= curr_aen.members.class) &&
6584 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
6585 		      curr_aen.members.locale)) {
6586 			/*
6587 			 * Previously issued event registration includes
6588 			 * current request. Nothing to do.
6589 			 */
6590 			return 0;
6591 		} else {
6592 			curr_aen.members.locale |= prev_aen.members.locale;
6593 
6594 			if (prev_aen.members.class < curr_aen.members.class)
6595 				curr_aen.members.class = prev_aen.members.class;
6596 
6597 			instance->aen_cmd->abort_aen = 1;
6598 			ret_val = megasas_issue_blocked_abort_cmd(instance,
6599 								  instance->
6600 								  aen_cmd, 30);
6601 
6602 			if (ret_val) {
6603 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6604 				       "previous AEN command\n");
6605 				return ret_val;
6606 			}
6607 		}
6608 	}
6609 
6610 	cmd = megasas_get_cmd(instance);
6611 
6612 	if (!cmd)
6613 		return -ENOMEM;
6614 
6615 	dcmd = &cmd->frame->dcmd;
6616 
6617 	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6618 
6619 	/*
6620 	 * Prepare DCMD for aen registration
6621 	 */
6622 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6623 
6624 	dcmd->cmd = MFI_CMD_DCMD;
6625 	dcmd->cmd_status = 0x0;
6626 	dcmd->sge_count = 1;
6627 	dcmd->flags = MFI_FRAME_DIR_READ;
6628 	dcmd->timeout = 0;
6629 	dcmd->pad_0 = 0;
6630 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6631 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6632 	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6633 	instance->last_seq_num = seq_num;
6634 	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6635 
6636 	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6637 				 sizeof(struct megasas_evt_detail));
6638 
6639 	if (instance->aen_cmd != NULL) {
6640 		megasas_return_cmd(instance, cmd);
6641 		return 0;
6642 	}
6643 
6644 	/*
6645 	 * Store reference to the cmd used to register for AEN. When an
6646 	 * application wants us to register for AEN, we have to abort this
6647 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
6648 	 */
6649 	instance->aen_cmd = cmd;
6650 
6651 	/*
6652 	 * Issue the aen registration frame
6653 	 */
6654 	instance->instancet->issue_dcmd(instance, cmd);
6655 
6656 	return 0;
6657 }
6658 
6659 /* megasas_get_target_prop - Send DCMD with below details to firmware.
6660  *
6661  * This DCMD will fetch few properties of LD/system PD defined
6662  * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6663  *
6664  * DCMD send by drivers whenever new target is added to the OS.
6665  *
6666  * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
6667  * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
6668  *                       0 = system PD, 1 = LD.
6669  * dcmd.mbox.s[1]      - TargetID for LD/system PD.
6670  * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
6671  *
6672  * @instance:		Adapter soft state
6673  * @sdev:		OS provided scsi device
6674  *
6675  * Returns 0 on success non-zero on failure.
6676  */
6677 int
6678 megasas_get_target_prop(struct megasas_instance *instance,
6679 			struct scsi_device *sdev)
6680 {
6681 	int ret;
6682 	struct megasas_cmd *cmd;
6683 	struct megasas_dcmd_frame *dcmd;
6684 	u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
6685 			sdev->id;
6686 
6687 	cmd = megasas_get_cmd(instance);
6688 
6689 	if (!cmd) {
6690 		dev_err(&instance->pdev->dev,
6691 			"Failed to get cmd %s\n", __func__);
6692 		return -ENOMEM;
6693 	}
6694 
6695 	dcmd = &cmd->frame->dcmd;
6696 
6697 	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6698 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6699 	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6700 
6701 	dcmd->mbox.s[1] = cpu_to_le16(targetId);
6702 	dcmd->cmd = MFI_CMD_DCMD;
6703 	dcmd->cmd_status = 0xFF;
6704 	dcmd->sge_count = 1;
6705 	dcmd->flags = MFI_FRAME_DIR_READ;
6706 	dcmd->timeout = 0;
6707 	dcmd->pad_0 = 0;
6708 	dcmd->data_xfer_len =
6709 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6710 	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6711 
6712 	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6713 				 sizeof(struct MR_TARGET_PROPERTIES));
6714 
6715 	if ((instance->adapter_type != MFI_SERIES) &&
6716 	    !instance->mask_interrupts)
6717 		ret = megasas_issue_blocked_cmd(instance,
6718 						cmd, MFI_IO_TIMEOUT_SECS);
6719 	else
6720 		ret = megasas_issue_polled(instance, cmd);
6721 
6722 	switch (ret) {
6723 	case DCMD_TIMEOUT:
6724 		switch (dcmd_timeout_ocr_possible(instance)) {
6725 		case INITIATE_OCR:
6726 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6727 			mutex_unlock(&instance->reset_mutex);
6728 			megasas_reset_fusion(instance->host,
6729 					     MFI_IO_TIMEOUT_OCR);
6730 			mutex_lock(&instance->reset_mutex);
6731 			break;
6732 		case KILL_ADAPTER:
6733 			megaraid_sas_kill_hba(instance);
6734 			break;
6735 		case IGNORE_TIMEOUT:
6736 			dev_info(&instance->pdev->dev,
6737 				 "Ignore DCMD timeout: %s %d\n",
6738 				 __func__, __LINE__);
6739 			break;
6740 		}
6741 		break;
6742 
6743 	default:
6744 		megasas_return_cmd(instance, cmd);
6745 	}
6746 	if (ret != DCMD_SUCCESS)
6747 		dev_err(&instance->pdev->dev,
6748 			"return from %s %d return value %d\n",
6749 			__func__, __LINE__, ret);
6750 
6751 	return ret;
6752 }
6753 
6754 /**
6755  * megasas_start_aen -	Subscribes to AEN during driver load time
6756  * @instance:		Adapter soft state
6757  */
6758 static int megasas_start_aen(struct megasas_instance *instance)
6759 {
6760 	struct megasas_evt_log_info eli;
6761 	union megasas_evt_class_locale class_locale;
6762 
6763 	/*
6764 	 * Get the latest sequence number from FW
6765 	 */
6766 	memset(&eli, 0, sizeof(eli));
6767 
6768 	if (megasas_get_seq_num(instance, &eli))
6769 		return -1;
6770 
6771 	/*
6772 	 * Register AEN with FW for latest sequence number plus 1
6773 	 */
6774 	class_locale.members.reserved = 0;
6775 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
6776 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
6777 
6778 	return megasas_register_aen(instance,
6779 			le32_to_cpu(eli.newest_seq_num) + 1,
6780 			class_locale.word);
6781 }
6782 
6783 /**
6784  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
6785  * @instance:		Adapter soft state
6786  */
6787 static int megasas_io_attach(struct megasas_instance *instance)
6788 {
6789 	struct Scsi_Host *host = instance->host;
6790 
6791 	/*
6792 	 * Export parameters required by SCSI mid-layer
6793 	 */
6794 	host->unique_id = instance->unique_id;
6795 	host->can_queue = instance->max_scsi_cmds;
6796 	host->this_id = instance->init_id;
6797 	host->sg_tablesize = instance->max_num_sge;
6798 
6799 	if (instance->fw_support_ieee)
6800 		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6801 
6802 	/*
6803 	 * Check if the module parameter value for max_sectors can be used
6804 	 */
6805 	if (max_sectors && max_sectors < instance->max_sectors_per_req)
6806 		instance->max_sectors_per_req = max_sectors;
6807 	else {
6808 		if (max_sectors) {
6809 			if (((instance->pdev->device ==
6810 				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6811 				(instance->pdev->device ==
6812 				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6813 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
6814 				instance->max_sectors_per_req = max_sectors;
6815 			} else {
6816 			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6817 				"and <= %d (or < 1MB for GEN2 controller)\n",
6818 				instance->max_sectors_per_req);
6819 			}
6820 		}
6821 	}
6822 
6823 	host->max_sectors = instance->max_sectors_per_req;
6824 	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6825 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6826 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6827 	host->max_lun = MEGASAS_MAX_LUN;
6828 	host->max_cmd_len = 16;
6829 
6830 	/* Use shared host tagset only for fusion adaptors
6831 	 * if there are managed interrupts (smp affinity enabled case).
6832 	 * Single msix_vectors in kdump, so shared host tag is also disabled.
6833 	 */
6834 
6835 	host->host_tagset = 0;
6836 	host->nr_hw_queues = 1;
6837 
6838 	if ((instance->adapter_type != MFI_SERIES) &&
6839 		(instance->msix_vectors > instance->low_latency_index_start) &&
6840 		host_tagset_enable &&
6841 		instance->smp_affinity_enable) {
6842 		host->host_tagset = 1;
6843 		host->nr_hw_queues = instance->msix_vectors -
6844 			instance->low_latency_index_start;
6845 	}
6846 
6847 	dev_info(&instance->pdev->dev,
6848 		"Max firmware commands: %d shared with nr_hw_queues = %d\n",
6849 		instance->max_fw_cmds, host->nr_hw_queues);
6850 	/*
6851 	 * Notify the mid-layer about the new controller
6852 	 */
6853 	if (scsi_add_host(host, &instance->pdev->dev)) {
6854 		dev_err(&instance->pdev->dev,
6855 			"Failed to add host from %s %d\n",
6856 			__func__, __LINE__);
6857 		return -ENODEV;
6858 	}
6859 
6860 	return 0;
6861 }
6862 
6863 /**
6864  * megasas_set_dma_mask -	Set DMA mask for supported controllers
6865  *
6866  * @instance:		Adapter soft state
6867  * Description:
6868  *
6869  * For Ventura, driver/FW will operate in 63bit DMA addresses.
6870  *
6871  * For invader-
6872  *	By default, driver/FW will operate in 32bit DMA addresses
6873  *	for consistent DMA mapping but if 32 bit consistent
6874  *	DMA mask fails, driver will try with 63 bit consistent
6875  *	mask provided FW is true 63bit DMA capable
6876  *
6877  * For older controllers(Thunderbolt and MFI based adapters)-
6878  *	driver/FW will operate in 32 bit consistent DMA addresses.
6879  */
6880 static int
6881 megasas_set_dma_mask(struct megasas_instance *instance)
6882 {
6883 	u64 consistent_mask;
6884 	struct pci_dev *pdev;
6885 	u32 scratch_pad_1;
6886 
6887 	pdev = instance->pdev;
6888 	consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6889 				DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6890 
6891 	if (IS_DMA64) {
6892 		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6893 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6894 			goto fail_set_dma_mask;
6895 
6896 		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6897 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6898 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6899 			/*
6900 			 * If 32 bit DMA mask fails, then try for 64 bit mask
6901 			 * for FW capable of handling 64 bit DMA.
6902 			 */
6903 			scratch_pad_1 = megasas_readl
6904 				(instance, &instance->reg_set->outbound_scratch_pad_1);
6905 
6906 			if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6907 				goto fail_set_dma_mask;
6908 			else if (dma_set_mask_and_coherent(&pdev->dev,
6909 							   DMA_BIT_MASK(63)))
6910 				goto fail_set_dma_mask;
6911 		}
6912 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6913 		goto fail_set_dma_mask;
6914 
6915 	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6916 		instance->consistent_mask_64bit = false;
6917 	else
6918 		instance->consistent_mask_64bit = true;
6919 
6920 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6921 		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6922 		 (instance->consistent_mask_64bit ? "63" : "32"));
6923 
6924 	return 0;
6925 
6926 fail_set_dma_mask:
6927 	dev_err(&pdev->dev, "Failed to set DMA mask\n");
6928 	return -1;
6929 
6930 }
6931 
6932 /*
6933  * megasas_set_adapter_type -	Set adapter type.
6934  *				Supported controllers can be divided in
6935  *				different categories-
6936  *					enum MR_ADAPTER_TYPE {
6937  *						MFI_SERIES = 1,
6938  *						THUNDERBOLT_SERIES = 2,
6939  *						INVADER_SERIES = 3,
6940  *						VENTURA_SERIES = 4,
6941  *						AERO_SERIES = 5,
6942  *					};
6943  * @instance:			Adapter soft state
6944  * return:			void
6945  */
6946 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6947 {
6948 	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6949 	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6950 		instance->adapter_type = MFI_SERIES;
6951 	} else {
6952 		switch (instance->pdev->device) {
6953 		case PCI_DEVICE_ID_LSI_AERO_10E1:
6954 		case PCI_DEVICE_ID_LSI_AERO_10E2:
6955 		case PCI_DEVICE_ID_LSI_AERO_10E5:
6956 		case PCI_DEVICE_ID_LSI_AERO_10E6:
6957 			instance->adapter_type = AERO_SERIES;
6958 			break;
6959 		case PCI_DEVICE_ID_LSI_VENTURA:
6960 		case PCI_DEVICE_ID_LSI_CRUSADER:
6961 		case PCI_DEVICE_ID_LSI_HARPOON:
6962 		case PCI_DEVICE_ID_LSI_TOMCAT:
6963 		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6964 		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6965 			instance->adapter_type = VENTURA_SERIES;
6966 			break;
6967 		case PCI_DEVICE_ID_LSI_FUSION:
6968 		case PCI_DEVICE_ID_LSI_PLASMA:
6969 			instance->adapter_type = THUNDERBOLT_SERIES;
6970 			break;
6971 		case PCI_DEVICE_ID_LSI_INVADER:
6972 		case PCI_DEVICE_ID_LSI_INTRUDER:
6973 		case PCI_DEVICE_ID_LSI_INTRUDER_24:
6974 		case PCI_DEVICE_ID_LSI_CUTLASS_52:
6975 		case PCI_DEVICE_ID_LSI_CUTLASS_53:
6976 		case PCI_DEVICE_ID_LSI_FURY:
6977 			instance->adapter_type = INVADER_SERIES;
6978 			break;
6979 		default: /* For all other supported controllers */
6980 			instance->adapter_type = MFI_SERIES;
6981 			break;
6982 		}
6983 	}
6984 }
6985 
6986 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6987 {
6988 	instance->producer = dma_alloc_coherent(&instance->pdev->dev,
6989 			sizeof(u32), &instance->producer_h, GFP_KERNEL);
6990 	instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
6991 			sizeof(u32), &instance->consumer_h, GFP_KERNEL);
6992 
6993 	if (!instance->producer || !instance->consumer) {
6994 		dev_err(&instance->pdev->dev,
6995 			"Failed to allocate memory for producer, consumer\n");
6996 		return -1;
6997 	}
6998 
6999 	*instance->producer = 0;
7000 	*instance->consumer = 0;
7001 	return 0;
7002 }
7003 
7004 /**
7005  * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
7006  *				structures which are not common across MFI
7007  *				adapters and fusion adapters.
7008  *				For MFI based adapters, allocate producer and
7009  *				consumer buffers. For fusion adapters, allocate
7010  *				memory for fusion context.
7011  * @instance:			Adapter soft state
7012  * return:			0 for SUCCESS
7013  */
7014 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
7015 {
7016 	instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
7017 				      GFP_KERNEL);
7018 	if (!instance->reply_map)
7019 		return -ENOMEM;
7020 
7021 	switch (instance->adapter_type) {
7022 	case MFI_SERIES:
7023 		if (megasas_alloc_mfi_ctrl_mem(instance))
7024 			goto fail;
7025 		break;
7026 	case AERO_SERIES:
7027 	case VENTURA_SERIES:
7028 	case THUNDERBOLT_SERIES:
7029 	case INVADER_SERIES:
7030 		if (megasas_alloc_fusion_context(instance))
7031 			goto fail;
7032 		break;
7033 	}
7034 
7035 	return 0;
7036  fail:
7037 	kfree(instance->reply_map);
7038 	instance->reply_map = NULL;
7039 	return -ENOMEM;
7040 }
7041 
7042 /*
7043  * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
7044  *				producer, consumer buffers for MFI adapters
7045  *
7046  * @instance -			Adapter soft instance
7047  *
7048  */
7049 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
7050 {
7051 	kfree(instance->reply_map);
7052 	if (instance->adapter_type == MFI_SERIES) {
7053 		if (instance->producer)
7054 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7055 					    instance->producer,
7056 					    instance->producer_h);
7057 		if (instance->consumer)
7058 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7059 					    instance->consumer,
7060 					    instance->consumer_h);
7061 	} else {
7062 		megasas_free_fusion_context(instance);
7063 	}
7064 }
7065 
7066 /**
7067  * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
7068  *					driver load time
7069  *
7070  * @instance:				Adapter soft instance
7071  *
7072  * @return:				O for SUCCESS
7073  */
7074 static inline
7075 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
7076 {
7077 	struct pci_dev *pdev = instance->pdev;
7078 	struct fusion_context *fusion = instance->ctrl_context;
7079 
7080 	instance->evt_detail = dma_alloc_coherent(&pdev->dev,
7081 			sizeof(struct megasas_evt_detail),
7082 			&instance->evt_detail_h, GFP_KERNEL);
7083 
7084 	if (!instance->evt_detail) {
7085 		dev_err(&instance->pdev->dev,
7086 			"Failed to allocate event detail buffer\n");
7087 		return -ENOMEM;
7088 	}
7089 
7090 	if (fusion) {
7091 		fusion->ioc_init_request =
7092 			dma_alloc_coherent(&pdev->dev,
7093 					   sizeof(struct MPI2_IOC_INIT_REQUEST),
7094 					   &fusion->ioc_init_request_phys,
7095 					   GFP_KERNEL);
7096 
7097 		if (!fusion->ioc_init_request) {
7098 			dev_err(&pdev->dev,
7099 				"Failed to allocate PD list buffer\n");
7100 			return -ENOMEM;
7101 		}
7102 
7103 		instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
7104 				sizeof(struct MR_SNAPDUMP_PROPERTIES),
7105 				&instance->snapdump_prop_h, GFP_KERNEL);
7106 
7107 		if (!instance->snapdump_prop)
7108 			dev_err(&pdev->dev,
7109 				"Failed to allocate snapdump properties buffer\n");
7110 
7111 		instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
7112 							HOST_DEVICE_LIST_SZ,
7113 							&instance->host_device_list_buf_h,
7114 							GFP_KERNEL);
7115 
7116 		if (!instance->host_device_list_buf) {
7117 			dev_err(&pdev->dev,
7118 				"Failed to allocate targetid list buffer\n");
7119 			return -ENOMEM;
7120 		}
7121 
7122 	}
7123 
7124 	instance->pd_list_buf =
7125 		dma_alloc_coherent(&pdev->dev,
7126 				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7127 				     &instance->pd_list_buf_h, GFP_KERNEL);
7128 
7129 	if (!instance->pd_list_buf) {
7130 		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
7131 		return -ENOMEM;
7132 	}
7133 
7134 	instance->ctrl_info_buf =
7135 		dma_alloc_coherent(&pdev->dev,
7136 				     sizeof(struct megasas_ctrl_info),
7137 				     &instance->ctrl_info_buf_h, GFP_KERNEL);
7138 
7139 	if (!instance->ctrl_info_buf) {
7140 		dev_err(&pdev->dev,
7141 			"Failed to allocate controller info buffer\n");
7142 		return -ENOMEM;
7143 	}
7144 
7145 	instance->ld_list_buf =
7146 		dma_alloc_coherent(&pdev->dev,
7147 				     sizeof(struct MR_LD_LIST),
7148 				     &instance->ld_list_buf_h, GFP_KERNEL);
7149 
7150 	if (!instance->ld_list_buf) {
7151 		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
7152 		return -ENOMEM;
7153 	}
7154 
7155 	instance->ld_targetid_list_buf =
7156 		dma_alloc_coherent(&pdev->dev,
7157 				sizeof(struct MR_LD_TARGETID_LIST),
7158 				&instance->ld_targetid_list_buf_h, GFP_KERNEL);
7159 
7160 	if (!instance->ld_targetid_list_buf) {
7161 		dev_err(&pdev->dev,
7162 			"Failed to allocate LD targetid list buffer\n");
7163 		return -ENOMEM;
7164 	}
7165 
7166 	if (!reset_devices) {
7167 		instance->system_info_buf =
7168 			dma_alloc_coherent(&pdev->dev,
7169 					sizeof(struct MR_DRV_SYSTEM_INFO),
7170 					&instance->system_info_h, GFP_KERNEL);
7171 		instance->pd_info =
7172 			dma_alloc_coherent(&pdev->dev,
7173 					sizeof(struct MR_PD_INFO),
7174 					&instance->pd_info_h, GFP_KERNEL);
7175 		instance->tgt_prop =
7176 			dma_alloc_coherent(&pdev->dev,
7177 					sizeof(struct MR_TARGET_PROPERTIES),
7178 					&instance->tgt_prop_h, GFP_KERNEL);
7179 		instance->crash_dump_buf =
7180 			dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7181 					&instance->crash_dump_h, GFP_KERNEL);
7182 
7183 		if (!instance->system_info_buf)
7184 			dev_err(&instance->pdev->dev,
7185 				"Failed to allocate system info buffer\n");
7186 
7187 		if (!instance->pd_info)
7188 			dev_err(&instance->pdev->dev,
7189 				"Failed to allocate pd_info buffer\n");
7190 
7191 		if (!instance->tgt_prop)
7192 			dev_err(&instance->pdev->dev,
7193 				"Failed to allocate tgt_prop buffer\n");
7194 
7195 		if (!instance->crash_dump_buf)
7196 			dev_err(&instance->pdev->dev,
7197 				"Failed to allocate crash dump buffer\n");
7198 	}
7199 
7200 	return 0;
7201 }
7202 
7203 /*
7204  * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
7205  *					during driver load time
7206  *
7207  * @instance-				Adapter soft instance
7208  *
7209  */
7210 static inline
7211 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
7212 {
7213 	struct pci_dev *pdev = instance->pdev;
7214 	struct fusion_context *fusion = instance->ctrl_context;
7215 
7216 	if (instance->evt_detail)
7217 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
7218 				    instance->evt_detail,
7219 				    instance->evt_detail_h);
7220 
7221 	if (fusion && fusion->ioc_init_request)
7222 		dma_free_coherent(&pdev->dev,
7223 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
7224 				  fusion->ioc_init_request,
7225 				  fusion->ioc_init_request_phys);
7226 
7227 	if (instance->pd_list_buf)
7228 		dma_free_coherent(&pdev->dev,
7229 				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7230 				    instance->pd_list_buf,
7231 				    instance->pd_list_buf_h);
7232 
7233 	if (instance->ld_list_buf)
7234 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
7235 				    instance->ld_list_buf,
7236 				    instance->ld_list_buf_h);
7237 
7238 	if (instance->ld_targetid_list_buf)
7239 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
7240 				    instance->ld_targetid_list_buf,
7241 				    instance->ld_targetid_list_buf_h);
7242 
7243 	if (instance->ctrl_info_buf)
7244 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
7245 				    instance->ctrl_info_buf,
7246 				    instance->ctrl_info_buf_h);
7247 
7248 	if (instance->system_info_buf)
7249 		dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
7250 				    instance->system_info_buf,
7251 				    instance->system_info_h);
7252 
7253 	if (instance->pd_info)
7254 		dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
7255 				    instance->pd_info, instance->pd_info_h);
7256 
7257 	if (instance->tgt_prop)
7258 		dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
7259 				    instance->tgt_prop, instance->tgt_prop_h);
7260 
7261 	if (instance->crash_dump_buf)
7262 		dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7263 				    instance->crash_dump_buf,
7264 				    instance->crash_dump_h);
7265 
7266 	if (instance->snapdump_prop)
7267 		dma_free_coherent(&pdev->dev,
7268 				  sizeof(struct MR_SNAPDUMP_PROPERTIES),
7269 				  instance->snapdump_prop,
7270 				  instance->snapdump_prop_h);
7271 
7272 	if (instance->host_device_list_buf)
7273 		dma_free_coherent(&pdev->dev,
7274 				  HOST_DEVICE_LIST_SZ,
7275 				  instance->host_device_list_buf,
7276 				  instance->host_device_list_buf_h);
7277 
7278 }
7279 
7280 /*
7281  * megasas_init_ctrl_params -		Initialize controller's instance
7282  *					parameters before FW init
7283  * @instance -				Adapter soft instance
7284  * @return -				void
7285  */
7286 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
7287 {
7288 	instance->fw_crash_state = UNAVAILABLE;
7289 
7290 	megasas_poll_wait_aen = 0;
7291 	instance->issuepend_done = 1;
7292 	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
7293 
7294 	/*
7295 	 * Initialize locks and queues
7296 	 */
7297 	INIT_LIST_HEAD(&instance->cmd_pool);
7298 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
7299 
7300 	atomic_set(&instance->fw_outstanding, 0);
7301 	atomic64_set(&instance->total_io_count, 0);
7302 
7303 	init_waitqueue_head(&instance->int_cmd_wait_q);
7304 	init_waitqueue_head(&instance->abort_cmd_wait_q);
7305 
7306 	spin_lock_init(&instance->crashdump_lock);
7307 	spin_lock_init(&instance->mfi_pool_lock);
7308 	spin_lock_init(&instance->hba_lock);
7309 	spin_lock_init(&instance->stream_lock);
7310 	spin_lock_init(&instance->completion_lock);
7311 
7312 	mutex_init(&instance->reset_mutex);
7313 
7314 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
7315 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
7316 		instance->flag_ieee = 1;
7317 
7318 	megasas_dbg_lvl = 0;
7319 	instance->flag = 0;
7320 	instance->unload = 1;
7321 	instance->last_time = 0;
7322 	instance->disableOnlineCtrlReset = 1;
7323 	instance->UnevenSpanSupport = 0;
7324 	instance->smp_affinity_enable = smp_affinity_enable ? true : false;
7325 	instance->msix_load_balance = false;
7326 
7327 	if (instance->adapter_type != MFI_SERIES)
7328 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
7329 	else
7330 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
7331 }
7332 
7333 /**
7334  * megasas_probe_one -	PCI hotplug entry point
7335  * @pdev:		PCI device structure
7336  * @id:			PCI ids of supported hotplugged adapter
7337  */
7338 static int megasas_probe_one(struct pci_dev *pdev,
7339 			     const struct pci_device_id *id)
7340 {
7341 	int rval, pos;
7342 	struct Scsi_Host *host;
7343 	struct megasas_instance *instance;
7344 	u16 control = 0;
7345 
7346 	switch (pdev->device) {
7347 	case PCI_DEVICE_ID_LSI_AERO_10E0:
7348 	case PCI_DEVICE_ID_LSI_AERO_10E3:
7349 	case PCI_DEVICE_ID_LSI_AERO_10E4:
7350 	case PCI_DEVICE_ID_LSI_AERO_10E7:
7351 		dev_err(&pdev->dev, "Adapter is in non secure mode\n");
7352 		return 1;
7353 	case PCI_DEVICE_ID_LSI_AERO_10E1:
7354 	case PCI_DEVICE_ID_LSI_AERO_10E5:
7355 		dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
7356 		break;
7357 	}
7358 
7359 	/* Reset MSI-X in the kdump kernel */
7360 	if (reset_devices) {
7361 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
7362 		if (pos) {
7363 			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
7364 					     &control);
7365 			if (control & PCI_MSIX_FLAGS_ENABLE) {
7366 				dev_info(&pdev->dev, "resetting MSI-X\n");
7367 				pci_write_config_word(pdev,
7368 						      pos + PCI_MSIX_FLAGS,
7369 						      control &
7370 						      ~PCI_MSIX_FLAGS_ENABLE);
7371 			}
7372 		}
7373 	}
7374 
7375 	/*
7376 	 * PCI prepping: enable device set bus mastering and dma mask
7377 	 */
7378 	rval = pci_enable_device_mem(pdev);
7379 
7380 	if (rval) {
7381 		return rval;
7382 	}
7383 
7384 	pci_set_master(pdev);
7385 
7386 	host = scsi_host_alloc(&megasas_template,
7387 			       sizeof(struct megasas_instance));
7388 
7389 	if (!host) {
7390 		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
7391 		goto fail_alloc_instance;
7392 	}
7393 
7394 	instance = (struct megasas_instance *)host->hostdata;
7395 	memset(instance, 0, sizeof(*instance));
7396 	atomic_set(&instance->fw_reset_no_pci_access, 0);
7397 
7398 	/*
7399 	 * Initialize PCI related and misc parameters
7400 	 */
7401 	instance->pdev = pdev;
7402 	instance->host = host;
7403 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
7404 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
7405 
7406 	megasas_set_adapter_type(instance);
7407 
7408 	/*
7409 	 * Initialize MFI Firmware
7410 	 */
7411 	if (megasas_init_fw(instance))
7412 		goto fail_init_mfi;
7413 
7414 	if (instance->requestorId) {
7415 		if (instance->PlasmaFW111) {
7416 			instance->vf_affiliation_111 =
7417 				dma_alloc_coherent(&pdev->dev,
7418 					sizeof(struct MR_LD_VF_AFFILIATION_111),
7419 					&instance->vf_affiliation_111_h,
7420 					GFP_KERNEL);
7421 			if (!instance->vf_affiliation_111)
7422 				dev_warn(&pdev->dev, "Can't allocate "
7423 				       "memory for VF affiliation buffer\n");
7424 		} else {
7425 			instance->vf_affiliation =
7426 				dma_alloc_coherent(&pdev->dev,
7427 					(MAX_LOGICAL_DRIVES + 1) *
7428 					sizeof(struct MR_LD_VF_AFFILIATION),
7429 					&instance->vf_affiliation_h,
7430 					GFP_KERNEL);
7431 			if (!instance->vf_affiliation)
7432 				dev_warn(&pdev->dev, "Can't allocate "
7433 				       "memory for VF affiliation buffer\n");
7434 		}
7435 	}
7436 
7437 	/*
7438 	 * Store instance in PCI softstate
7439 	 */
7440 	pci_set_drvdata(pdev, instance);
7441 
7442 	/*
7443 	 * Add this controller to megasas_mgmt_info structure so that it
7444 	 * can be exported to management applications
7445 	 */
7446 	megasas_mgmt_info.count++;
7447 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
7448 	megasas_mgmt_info.max_index++;
7449 
7450 	/*
7451 	 * Register with SCSI mid-layer
7452 	 */
7453 	if (megasas_io_attach(instance))
7454 		goto fail_io_attach;
7455 
7456 	instance->unload = 0;
7457 	/*
7458 	 * Trigger SCSI to scan our drives
7459 	 */
7460 	if (!instance->enable_fw_dev_list ||
7461 	    (instance->host_device_list_buf->count > 0))
7462 		scsi_scan_host(host);
7463 
7464 	/*
7465 	 * Initiate AEN (Asynchronous Event Notification)
7466 	 */
7467 	if (megasas_start_aen(instance)) {
7468 		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
7469 		goto fail_start_aen;
7470 	}
7471 
7472 	megasas_setup_debugfs(instance);
7473 
7474 	/* Get current SR-IOV LD/VF affiliation */
7475 	if (instance->requestorId)
7476 		megasas_get_ld_vf_affiliation(instance, 1);
7477 
7478 	return 0;
7479 
7480 fail_start_aen:
7481 fail_io_attach:
7482 	megasas_mgmt_info.count--;
7483 	megasas_mgmt_info.max_index--;
7484 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
7485 
7486 	instance->instancet->disable_intr(instance);
7487 	megasas_destroy_irqs(instance);
7488 
7489 	if (instance->adapter_type != MFI_SERIES)
7490 		megasas_release_fusion(instance);
7491 	else
7492 		megasas_release_mfi(instance);
7493 	if (instance->msix_vectors)
7494 		pci_free_irq_vectors(instance->pdev);
7495 fail_init_mfi:
7496 	scsi_host_put(host);
7497 fail_alloc_instance:
7498 	pci_disable_device(pdev);
7499 
7500 	return -ENODEV;
7501 }
7502 
7503 /**
7504  * megasas_flush_cache -	Requests FW to flush all its caches
7505  * @instance:			Adapter soft state
7506  */
7507 static void megasas_flush_cache(struct megasas_instance *instance)
7508 {
7509 	struct megasas_cmd *cmd;
7510 	struct megasas_dcmd_frame *dcmd;
7511 
7512 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7513 		return;
7514 
7515 	cmd = megasas_get_cmd(instance);
7516 
7517 	if (!cmd)
7518 		return;
7519 
7520 	dcmd = &cmd->frame->dcmd;
7521 
7522 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7523 
7524 	dcmd->cmd = MFI_CMD_DCMD;
7525 	dcmd->cmd_status = 0x0;
7526 	dcmd->sge_count = 0;
7527 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7528 	dcmd->timeout = 0;
7529 	dcmd->pad_0 = 0;
7530 	dcmd->data_xfer_len = 0;
7531 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
7532 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
7533 
7534 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7535 			!= DCMD_SUCCESS) {
7536 		dev_err(&instance->pdev->dev,
7537 			"return from %s %d\n", __func__, __LINE__);
7538 		return;
7539 	}
7540 
7541 	megasas_return_cmd(instance, cmd);
7542 }
7543 
7544 /**
7545  * megasas_shutdown_controller -	Instructs FW to shutdown the controller
7546  * @instance:				Adapter soft state
7547  * @opcode:				Shutdown/Hibernate
7548  */
7549 static void megasas_shutdown_controller(struct megasas_instance *instance,
7550 					u32 opcode)
7551 {
7552 	struct megasas_cmd *cmd;
7553 	struct megasas_dcmd_frame *dcmd;
7554 
7555 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7556 		return;
7557 
7558 	cmd = megasas_get_cmd(instance);
7559 
7560 	if (!cmd)
7561 		return;
7562 
7563 	if (instance->aen_cmd)
7564 		megasas_issue_blocked_abort_cmd(instance,
7565 			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7566 	if (instance->map_update_cmd)
7567 		megasas_issue_blocked_abort_cmd(instance,
7568 			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7569 	if (instance->jbod_seq_cmd)
7570 		megasas_issue_blocked_abort_cmd(instance,
7571 			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7572 
7573 	dcmd = &cmd->frame->dcmd;
7574 
7575 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7576 
7577 	dcmd->cmd = MFI_CMD_DCMD;
7578 	dcmd->cmd_status = 0x0;
7579 	dcmd->sge_count = 0;
7580 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7581 	dcmd->timeout = 0;
7582 	dcmd->pad_0 = 0;
7583 	dcmd->data_xfer_len = 0;
7584 	dcmd->opcode = cpu_to_le32(opcode);
7585 
7586 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7587 			!= DCMD_SUCCESS) {
7588 		dev_err(&instance->pdev->dev,
7589 			"return from %s %d\n", __func__, __LINE__);
7590 		return;
7591 	}
7592 
7593 	megasas_return_cmd(instance, cmd);
7594 }
7595 
7596 /**
7597  * megasas_suspend -	driver suspend entry point
7598  * @dev:		Device structure
7599  */
7600 static int __maybe_unused
7601 megasas_suspend(struct device *dev)
7602 {
7603 	struct megasas_instance *instance;
7604 
7605 	instance = dev_get_drvdata(dev);
7606 
7607 	if (!instance)
7608 		return 0;
7609 
7610 	instance->unload = 1;
7611 
7612 	dev_info(dev, "%s is called\n", __func__);
7613 
7614 	/* Shutdown SR-IOV heartbeat timer */
7615 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7616 		del_timer_sync(&instance->sriov_heartbeat_timer);
7617 
7618 	/* Stop the FW fault detection watchdog */
7619 	if (instance->adapter_type != MFI_SERIES)
7620 		megasas_fusion_stop_watchdog(instance);
7621 
7622 	megasas_flush_cache(instance);
7623 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7624 
7625 	/* cancel the delayed work if this work still in queue */
7626 	if (instance->ev != NULL) {
7627 		struct megasas_aen_event *ev = instance->ev;
7628 		cancel_delayed_work_sync(&ev->hotplug_work);
7629 		instance->ev = NULL;
7630 	}
7631 
7632 	tasklet_kill(&instance->isr_tasklet);
7633 
7634 	pci_set_drvdata(instance->pdev, instance);
7635 	instance->instancet->disable_intr(instance);
7636 
7637 	megasas_destroy_irqs(instance);
7638 
7639 	if (instance->msix_vectors)
7640 		pci_free_irq_vectors(instance->pdev);
7641 
7642 	return 0;
7643 }
7644 
7645 /**
7646  * megasas_resume-      driver resume entry point
7647  * @dev:		Device structure
7648  */
7649 static int __maybe_unused
7650 megasas_resume(struct device *dev)
7651 {
7652 	int rval;
7653 	struct Scsi_Host *host;
7654 	struct megasas_instance *instance;
7655 	u32 status_reg;
7656 
7657 	instance = dev_get_drvdata(dev);
7658 
7659 	if (!instance)
7660 		return 0;
7661 
7662 	host = instance->host;
7663 
7664 	dev_info(dev, "%s is called\n", __func__);
7665 
7666 	/*
7667 	 * We expect the FW state to be READY
7668 	 */
7669 
7670 	if (megasas_transition_to_ready(instance, 0)) {
7671 		dev_info(&instance->pdev->dev,
7672 			 "Failed to transition controller to ready from %s!\n",
7673 			 __func__);
7674 		if (instance->adapter_type != MFI_SERIES) {
7675 			status_reg =
7676 				instance->instancet->read_fw_status_reg(instance);
7677 			if (!(status_reg & MFI_RESET_ADAPTER) ||
7678 				((megasas_adp_reset_wait_for_ready
7679 				(instance, true, 0)) == FAILED))
7680 				goto fail_ready_state;
7681 		} else {
7682 			atomic_set(&instance->fw_reset_no_pci_access, 1);
7683 			instance->instancet->adp_reset
7684 				(instance, instance->reg_set);
7685 			atomic_set(&instance->fw_reset_no_pci_access, 0);
7686 
7687 			/* waiting for about 30 seconds before retry */
7688 			ssleep(30);
7689 
7690 			if (megasas_transition_to_ready(instance, 0))
7691 				goto fail_ready_state;
7692 		}
7693 
7694 		dev_info(&instance->pdev->dev,
7695 			 "FW restarted successfully from %s!\n",
7696 			 __func__);
7697 	}
7698 	if (megasas_set_dma_mask(instance))
7699 		goto fail_set_dma_mask;
7700 
7701 	/*
7702 	 * Initialize MFI Firmware
7703 	 */
7704 
7705 	atomic_set(&instance->fw_outstanding, 0);
7706 	atomic_set(&instance->ldio_outstanding, 0);
7707 
7708 	/* Now re-enable MSI-X */
7709 	if (instance->msix_vectors)
7710 		megasas_alloc_irq_vectors(instance);
7711 
7712 	if (!instance->msix_vectors) {
7713 		rval = pci_alloc_irq_vectors(instance->pdev, 1, 1,
7714 					     PCI_IRQ_LEGACY);
7715 		if (rval < 0)
7716 			goto fail_reenable_msix;
7717 	}
7718 
7719 	megasas_setup_reply_map(instance);
7720 
7721 	if (instance->adapter_type != MFI_SERIES) {
7722 		megasas_reset_reply_desc(instance);
7723 		if (megasas_ioc_init_fusion(instance)) {
7724 			megasas_free_cmds(instance);
7725 			megasas_free_cmds_fusion(instance);
7726 			goto fail_init_mfi;
7727 		}
7728 		if (!megasas_get_map_info(instance))
7729 			megasas_sync_map_info(instance);
7730 	} else {
7731 		*instance->producer = 0;
7732 		*instance->consumer = 0;
7733 		if (megasas_issue_init_mfi(instance))
7734 			goto fail_init_mfi;
7735 	}
7736 
7737 	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7738 		goto fail_init_mfi;
7739 
7740 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7741 		     (unsigned long)instance);
7742 
7743 	if (instance->msix_vectors ?
7744 			megasas_setup_irqs_msix(instance, 0) :
7745 			megasas_setup_irqs_ioapic(instance))
7746 		goto fail_init_mfi;
7747 
7748 	if (instance->adapter_type != MFI_SERIES)
7749 		megasas_setup_irq_poll(instance);
7750 
7751 	/* Re-launch SR-IOV heartbeat timer */
7752 	if (instance->requestorId) {
7753 		if (!megasas_sriov_start_heartbeat(instance, 0))
7754 			megasas_start_timer(instance);
7755 		else {
7756 			instance->skip_heartbeat_timer_del = 1;
7757 			goto fail_init_mfi;
7758 		}
7759 	}
7760 
7761 	instance->instancet->enable_intr(instance);
7762 	megasas_setup_jbod_map(instance);
7763 	instance->unload = 0;
7764 
7765 	/*
7766 	 * Initiate AEN (Asynchronous Event Notification)
7767 	 */
7768 	if (megasas_start_aen(instance))
7769 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
7770 
7771 	/* Re-launch FW fault watchdog */
7772 	if (instance->adapter_type != MFI_SERIES)
7773 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7774 			goto fail_start_watchdog;
7775 
7776 	return 0;
7777 
7778 fail_start_watchdog:
7779 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7780 		del_timer_sync(&instance->sriov_heartbeat_timer);
7781 fail_init_mfi:
7782 	megasas_free_ctrl_dma_buffers(instance);
7783 	megasas_free_ctrl_mem(instance);
7784 	scsi_host_put(host);
7785 
7786 fail_reenable_msix:
7787 fail_set_dma_mask:
7788 fail_ready_state:
7789 
7790 	return -ENODEV;
7791 }
7792 
7793 static inline int
7794 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7795 {
7796 	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7797 	int i;
7798 	u8 adp_state;
7799 
7800 	for (i = 0; i < wait_time; i++) {
7801 		adp_state = atomic_read(&instance->adprecovery);
7802 		if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7803 		    (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7804 			break;
7805 
7806 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7807 			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7808 
7809 		msleep(1000);
7810 	}
7811 
7812 	if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7813 		dev_info(&instance->pdev->dev,
7814 			 "%s HBA failed to become operational, adp_state %d\n",
7815 			 __func__, adp_state);
7816 		return 1;
7817 	}
7818 
7819 	return 0;
7820 }
7821 
7822 /**
7823  * megasas_detach_one -	PCI hot"un"plug entry point
7824  * @pdev:		PCI device structure
7825  */
7826 static void megasas_detach_one(struct pci_dev *pdev)
7827 {
7828 	int i;
7829 	struct Scsi_Host *host;
7830 	struct megasas_instance *instance;
7831 	struct fusion_context *fusion;
7832 	u32 pd_seq_map_sz;
7833 
7834 	instance = pci_get_drvdata(pdev);
7835 
7836 	if (!instance)
7837 		return;
7838 
7839 	host = instance->host;
7840 	fusion = instance->ctrl_context;
7841 
7842 	/* Shutdown SR-IOV heartbeat timer */
7843 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7844 		del_timer_sync(&instance->sriov_heartbeat_timer);
7845 
7846 	/* Stop the FW fault detection watchdog */
7847 	if (instance->adapter_type != MFI_SERIES)
7848 		megasas_fusion_stop_watchdog(instance);
7849 
7850 	if (instance->fw_crash_state != UNAVAILABLE)
7851 		megasas_free_host_crash_buffer(instance);
7852 	scsi_remove_host(instance->host);
7853 	instance->unload = 1;
7854 
7855 	if (megasas_wait_for_adapter_operational(instance))
7856 		goto skip_firing_dcmds;
7857 
7858 	megasas_flush_cache(instance);
7859 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7860 
7861 skip_firing_dcmds:
7862 	/* cancel the delayed work if this work still in queue*/
7863 	if (instance->ev != NULL) {
7864 		struct megasas_aen_event *ev = instance->ev;
7865 		cancel_delayed_work_sync(&ev->hotplug_work);
7866 		instance->ev = NULL;
7867 	}
7868 
7869 	/* cancel all wait events */
7870 	wake_up_all(&instance->int_cmd_wait_q);
7871 
7872 	tasklet_kill(&instance->isr_tasklet);
7873 
7874 	/*
7875 	 * Take the instance off the instance array. Note that we will not
7876 	 * decrement the max_index. We let this array be sparse array
7877 	 */
7878 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7879 		if (megasas_mgmt_info.instance[i] == instance) {
7880 			megasas_mgmt_info.count--;
7881 			megasas_mgmt_info.instance[i] = NULL;
7882 
7883 			break;
7884 		}
7885 	}
7886 
7887 	instance->instancet->disable_intr(instance);
7888 
7889 	megasas_destroy_irqs(instance);
7890 
7891 	if (instance->msix_vectors)
7892 		pci_free_irq_vectors(instance->pdev);
7893 
7894 	if (instance->adapter_type >= VENTURA_SERIES) {
7895 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
7896 			kfree(fusion->stream_detect_by_ld[i]);
7897 		kfree(fusion->stream_detect_by_ld);
7898 		fusion->stream_detect_by_ld = NULL;
7899 	}
7900 
7901 
7902 	if (instance->adapter_type != MFI_SERIES) {
7903 		megasas_release_fusion(instance);
7904 			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
7905 				(sizeof(struct MR_PD_CFG_SEQ) *
7906 					(MAX_PHYSICAL_DEVICES - 1));
7907 		for (i = 0; i < 2 ; i++) {
7908 			if (fusion->ld_map[i])
7909 				dma_free_coherent(&instance->pdev->dev,
7910 						  fusion->max_map_sz,
7911 						  fusion->ld_map[i],
7912 						  fusion->ld_map_phys[i]);
7913 			if (fusion->ld_drv_map[i]) {
7914 				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
7915 					vfree(fusion->ld_drv_map[i]);
7916 				else
7917 					free_pages((ulong)fusion->ld_drv_map[i],
7918 						   fusion->drv_map_pages);
7919 			}
7920 
7921 			if (fusion->pd_seq_sync[i])
7922 				dma_free_coherent(&instance->pdev->dev,
7923 					pd_seq_map_sz,
7924 					fusion->pd_seq_sync[i],
7925 					fusion->pd_seq_phys[i]);
7926 		}
7927 	} else {
7928 		megasas_release_mfi(instance);
7929 	}
7930 
7931 	if (instance->vf_affiliation)
7932 		dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
7933 				    sizeof(struct MR_LD_VF_AFFILIATION),
7934 				    instance->vf_affiliation,
7935 				    instance->vf_affiliation_h);
7936 
7937 	if (instance->vf_affiliation_111)
7938 		dma_free_coherent(&pdev->dev,
7939 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
7940 				    instance->vf_affiliation_111,
7941 				    instance->vf_affiliation_111_h);
7942 
7943 	if (instance->hb_host_mem)
7944 		dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
7945 				    instance->hb_host_mem,
7946 				    instance->hb_host_mem_h);
7947 
7948 	megasas_free_ctrl_dma_buffers(instance);
7949 
7950 	megasas_free_ctrl_mem(instance);
7951 
7952 	megasas_destroy_debugfs(instance);
7953 
7954 	scsi_host_put(host);
7955 
7956 	pci_disable_device(pdev);
7957 }
7958 
7959 /**
7960  * megasas_shutdown -	Shutdown entry point
7961  * @pdev:		PCI device structure
7962  */
7963 static void megasas_shutdown(struct pci_dev *pdev)
7964 {
7965 	struct megasas_instance *instance = pci_get_drvdata(pdev);
7966 
7967 	if (!instance)
7968 		return;
7969 
7970 	instance->unload = 1;
7971 
7972 	if (megasas_wait_for_adapter_operational(instance))
7973 		goto skip_firing_dcmds;
7974 
7975 	megasas_flush_cache(instance);
7976 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7977 
7978 skip_firing_dcmds:
7979 	instance->instancet->disable_intr(instance);
7980 	megasas_destroy_irqs(instance);
7981 
7982 	if (instance->msix_vectors)
7983 		pci_free_irq_vectors(instance->pdev);
7984 }
7985 
7986 /*
7987  * megasas_mgmt_open -	char node "open" entry point
7988  * @inode:	char node inode
7989  * @filep:	char node file
7990  */
7991 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
7992 {
7993 	/*
7994 	 * Allow only those users with admin rights
7995 	 */
7996 	if (!capable(CAP_SYS_ADMIN))
7997 		return -EACCES;
7998 
7999 	return 0;
8000 }
8001 
8002 /*
8003  * megasas_mgmt_fasync -	Async notifier registration from applications
8004  * @fd:		char node file descriptor number
8005  * @filep:	char node file
8006  * @mode:	notifier on/off
8007  *
8008  * This function adds the calling process to a driver global queue. When an
8009  * event occurs, SIGIO will be sent to all processes in this queue.
8010  */
8011 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
8012 {
8013 	int rc;
8014 
8015 	mutex_lock(&megasas_async_queue_mutex);
8016 
8017 	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
8018 
8019 	mutex_unlock(&megasas_async_queue_mutex);
8020 
8021 	if (rc >= 0) {
8022 		/* For sanity check when we get ioctl */
8023 		filep->private_data = filep;
8024 		return 0;
8025 	}
8026 
8027 	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
8028 
8029 	return rc;
8030 }
8031 
8032 /*
8033  * megasas_mgmt_poll -  char node "poll" entry point
8034  * @filep:	char node file
8035  * @wait:	Events to poll for
8036  */
8037 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
8038 {
8039 	__poll_t mask;
8040 	unsigned long flags;
8041 
8042 	poll_wait(file, &megasas_poll_wait, wait);
8043 	spin_lock_irqsave(&poll_aen_lock, flags);
8044 	if (megasas_poll_wait_aen)
8045 		mask = (EPOLLIN | EPOLLRDNORM);
8046 	else
8047 		mask = 0;
8048 	megasas_poll_wait_aen = 0;
8049 	spin_unlock_irqrestore(&poll_aen_lock, flags);
8050 	return mask;
8051 }
8052 
8053 /*
8054  * megasas_set_crash_dump_params_ioctl:
8055  *		Send CRASH_DUMP_MODE DCMD to all controllers
8056  * @cmd:	MFI command frame
8057  */
8058 
8059 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
8060 {
8061 	struct megasas_instance *local_instance;
8062 	int i, error = 0;
8063 	int crash_support;
8064 
8065 	crash_support = cmd->frame->dcmd.mbox.w[0];
8066 
8067 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
8068 		local_instance = megasas_mgmt_info.instance[i];
8069 		if (local_instance && local_instance->crash_dump_drv_support) {
8070 			if ((atomic_read(&local_instance->adprecovery) ==
8071 				MEGASAS_HBA_OPERATIONAL) &&
8072 				!megasas_set_crash_dump_params(local_instance,
8073 					crash_support)) {
8074 				local_instance->crash_dump_app_support =
8075 					crash_support;
8076 				dev_info(&local_instance->pdev->dev,
8077 					"Application firmware crash "
8078 					"dump mode set success\n");
8079 				error = 0;
8080 			} else {
8081 				dev_info(&local_instance->pdev->dev,
8082 					"Application firmware crash "
8083 					"dump mode set failed\n");
8084 				error = -1;
8085 			}
8086 		}
8087 	}
8088 	return error;
8089 }
8090 
8091 /**
8092  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
8093  * @instance:			Adapter soft state
8094  * @user_ioc:			User's ioctl packet
8095  * @ioc:			ioctl packet
8096  */
8097 static int
8098 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
8099 		      struct megasas_iocpacket __user * user_ioc,
8100 		      struct megasas_iocpacket *ioc)
8101 {
8102 	struct megasas_sge64 *kern_sge64 = NULL;
8103 	struct megasas_sge32 *kern_sge32 = NULL;
8104 	struct megasas_cmd *cmd;
8105 	void *kbuff_arr[MAX_IOCTL_SGE];
8106 	dma_addr_t buf_handle = 0;
8107 	int error = 0, i;
8108 	void *sense = NULL;
8109 	dma_addr_t sense_handle;
8110 	void *sense_ptr;
8111 	u32 opcode = 0;
8112 	int ret = DCMD_SUCCESS;
8113 
8114 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
8115 
8116 	if (ioc->sge_count > MAX_IOCTL_SGE) {
8117 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
8118 		       ioc->sge_count, MAX_IOCTL_SGE);
8119 		return -EINVAL;
8120 	}
8121 
8122 	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
8123 	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
8124 	    !instance->support_nvme_passthru) ||
8125 	    ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
8126 	    !instance->support_pci_lane_margining)) {
8127 		dev_err(&instance->pdev->dev,
8128 			"Received invalid ioctl command 0x%x\n",
8129 			ioc->frame.hdr.cmd);
8130 		return -ENOTSUPP;
8131 	}
8132 
8133 	cmd = megasas_get_cmd(instance);
8134 	if (!cmd) {
8135 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
8136 		return -ENOMEM;
8137 	}
8138 
8139 	/*
8140 	 * User's IOCTL packet has 2 frames (maximum). Copy those two
8141 	 * frames into our cmd's frames. cmd->frame's context will get
8142 	 * overwritten when we copy from user's frames. So set that value
8143 	 * alone separately
8144 	 */
8145 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
8146 	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
8147 	cmd->frame->hdr.pad_0 = 0;
8148 
8149 	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
8150 
8151 	if (instance->consistent_mask_64bit)
8152 		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
8153 				       MFI_FRAME_SENSE64));
8154 	else
8155 		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
8156 					       MFI_FRAME_SENSE64));
8157 
8158 	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
8159 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
8160 
8161 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
8162 		mutex_lock(&instance->reset_mutex);
8163 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
8164 			megasas_return_cmd(instance, cmd);
8165 			mutex_unlock(&instance->reset_mutex);
8166 			return -1;
8167 		}
8168 		mutex_unlock(&instance->reset_mutex);
8169 	}
8170 
8171 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
8172 		error = megasas_set_crash_dump_params_ioctl(cmd);
8173 		megasas_return_cmd(instance, cmd);
8174 		return error;
8175 	}
8176 
8177 	/*
8178 	 * The management interface between applications and the fw uses
8179 	 * MFI frames. E.g, RAID configuration changes, LD property changes
8180 	 * etc are accomplishes through different kinds of MFI frames. The
8181 	 * driver needs to care only about substituting user buffers with
8182 	 * kernel buffers in SGLs. The location of SGL is embedded in the
8183 	 * struct iocpacket itself.
8184 	 */
8185 	if (instance->consistent_mask_64bit)
8186 		kern_sge64 = (struct megasas_sge64 *)
8187 			((unsigned long)cmd->frame + ioc->sgl_off);
8188 	else
8189 		kern_sge32 = (struct megasas_sge32 *)
8190 			((unsigned long)cmd->frame + ioc->sgl_off);
8191 
8192 	/*
8193 	 * For each user buffer, create a mirror buffer and copy in
8194 	 */
8195 	for (i = 0; i < ioc->sge_count; i++) {
8196 		if (!ioc->sgl[i].iov_len)
8197 			continue;
8198 
8199 		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
8200 						    ioc->sgl[i].iov_len,
8201 						    &buf_handle, GFP_KERNEL);
8202 		if (!kbuff_arr[i]) {
8203 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
8204 			       "kernel SGL buffer for IOCTL\n");
8205 			error = -ENOMEM;
8206 			goto out;
8207 		}
8208 
8209 		/*
8210 		 * We don't change the dma_coherent_mask, so
8211 		 * dma_alloc_coherent only returns 32bit addresses
8212 		 */
8213 		if (instance->consistent_mask_64bit) {
8214 			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
8215 			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8216 		} else {
8217 			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
8218 			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8219 		}
8220 
8221 		/*
8222 		 * We created a kernel buffer corresponding to the
8223 		 * user buffer. Now copy in from the user buffer
8224 		 */
8225 		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
8226 				   (u32) (ioc->sgl[i].iov_len))) {
8227 			error = -EFAULT;
8228 			goto out;
8229 		}
8230 	}
8231 
8232 	if (ioc->sense_len) {
8233 		/* make sure the pointer is part of the frame */
8234 		if (ioc->sense_off >
8235 		    (sizeof(union megasas_frame) - sizeof(__le64))) {
8236 			error = -EINVAL;
8237 			goto out;
8238 		}
8239 
8240 		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
8241 					     &sense_handle, GFP_KERNEL);
8242 		if (!sense) {
8243 			error = -ENOMEM;
8244 			goto out;
8245 		}
8246 
8247 		/* always store 64 bits regardless of addressing */
8248 		sense_ptr = (void *)cmd->frame + ioc->sense_off;
8249 		put_unaligned_le64(sense_handle, sense_ptr);
8250 	}
8251 
8252 	/*
8253 	 * Set the sync_cmd flag so that the ISR knows not to complete this
8254 	 * cmd to the SCSI mid-layer
8255 	 */
8256 	cmd->sync_cmd = 1;
8257 
8258 	ret = megasas_issue_blocked_cmd(instance, cmd, 0);
8259 	switch (ret) {
8260 	case DCMD_INIT:
8261 	case DCMD_BUSY:
8262 		cmd->sync_cmd = 0;
8263 		dev_err(&instance->pdev->dev,
8264 			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
8265 			 __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
8266 			 cmd->cmd_status_drv);
8267 		error = -EBUSY;
8268 		goto out;
8269 	}
8270 
8271 	cmd->sync_cmd = 0;
8272 
8273 	if (instance->unload == 1) {
8274 		dev_info(&instance->pdev->dev, "Driver unload is in progress "
8275 			"don't submit data to application\n");
8276 		goto out;
8277 	}
8278 	/*
8279 	 * copy out the kernel buffers to user buffers
8280 	 */
8281 	for (i = 0; i < ioc->sge_count; i++) {
8282 		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
8283 				 ioc->sgl[i].iov_len)) {
8284 			error = -EFAULT;
8285 			goto out;
8286 		}
8287 	}
8288 
8289 	/*
8290 	 * copy out the sense
8291 	 */
8292 	if (ioc->sense_len) {
8293 		void __user *uptr;
8294 		/*
8295 		 * sense_ptr points to the location that has the user
8296 		 * sense buffer address
8297 		 */
8298 		sense_ptr = (void *)ioc->frame.raw + ioc->sense_off;
8299 		if (in_compat_syscall())
8300 			uptr = compat_ptr(get_unaligned((compat_uptr_t *)
8301 							sense_ptr));
8302 		else
8303 			uptr = get_unaligned((void __user **)sense_ptr);
8304 
8305 		if (copy_to_user(uptr, sense, ioc->sense_len)) {
8306 			dev_err(&instance->pdev->dev, "Failed to copy out to user "
8307 					"sense data\n");
8308 			error = -EFAULT;
8309 			goto out;
8310 		}
8311 	}
8312 
8313 	/*
8314 	 * copy the status codes returned by the fw
8315 	 */
8316 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
8317 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
8318 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
8319 		error = -EFAULT;
8320 	}
8321 
8322 out:
8323 	if (sense) {
8324 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
8325 				    sense, sense_handle);
8326 	}
8327 
8328 	for (i = 0; i < ioc->sge_count; i++) {
8329 		if (kbuff_arr[i]) {
8330 			if (instance->consistent_mask_64bit)
8331 				dma_free_coherent(&instance->pdev->dev,
8332 					le32_to_cpu(kern_sge64[i].length),
8333 					kbuff_arr[i],
8334 					le64_to_cpu(kern_sge64[i].phys_addr));
8335 			else
8336 				dma_free_coherent(&instance->pdev->dev,
8337 					le32_to_cpu(kern_sge32[i].length),
8338 					kbuff_arr[i],
8339 					le32_to_cpu(kern_sge32[i].phys_addr));
8340 			kbuff_arr[i] = NULL;
8341 		}
8342 	}
8343 
8344 	megasas_return_cmd(instance, cmd);
8345 	return error;
8346 }
8347 
8348 static struct megasas_iocpacket *
8349 megasas_compat_iocpacket_get_user(void __user *arg)
8350 {
8351 	struct megasas_iocpacket *ioc;
8352 	struct compat_megasas_iocpacket __user *cioc = arg;
8353 	size_t size;
8354 	int err = -EFAULT;
8355 	int i;
8356 
8357 	ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
8358 	if (!ioc)
8359 		return ERR_PTR(-ENOMEM);
8360 	size = offsetof(struct megasas_iocpacket, frame) + sizeof(ioc->frame);
8361 	if (copy_from_user(ioc, arg, size))
8362 		goto out;
8363 
8364 	for (i = 0; i < MAX_IOCTL_SGE; i++) {
8365 		compat_uptr_t iov_base;
8366 
8367 		if (get_user(iov_base, &cioc->sgl[i].iov_base) ||
8368 		    get_user(ioc->sgl[i].iov_len, &cioc->sgl[i].iov_len))
8369 			goto out;
8370 
8371 		ioc->sgl[i].iov_base = compat_ptr(iov_base);
8372 	}
8373 
8374 	return ioc;
8375 out:
8376 	kfree(ioc);
8377 	return ERR_PTR(err);
8378 }
8379 
8380 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
8381 {
8382 	struct megasas_iocpacket __user *user_ioc =
8383 	    (struct megasas_iocpacket __user *)arg;
8384 	struct megasas_iocpacket *ioc;
8385 	struct megasas_instance *instance;
8386 	int error;
8387 
8388 	if (in_compat_syscall())
8389 		ioc = megasas_compat_iocpacket_get_user(user_ioc);
8390 	else
8391 		ioc = memdup_user(user_ioc, sizeof(struct megasas_iocpacket));
8392 
8393 	if (IS_ERR(ioc))
8394 		return PTR_ERR(ioc);
8395 
8396 	instance = megasas_lookup_instance(ioc->host_no);
8397 	if (!instance) {
8398 		error = -ENODEV;
8399 		goto out_kfree_ioc;
8400 	}
8401 
8402 	/* Block ioctls in VF mode */
8403 	if (instance->requestorId && !allow_vf_ioctls) {
8404 		error = -ENODEV;
8405 		goto out_kfree_ioc;
8406 	}
8407 
8408 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8409 		dev_err(&instance->pdev->dev, "Controller in crit error\n");
8410 		error = -ENODEV;
8411 		goto out_kfree_ioc;
8412 	}
8413 
8414 	if (instance->unload == 1) {
8415 		error = -ENODEV;
8416 		goto out_kfree_ioc;
8417 	}
8418 
8419 	if (down_interruptible(&instance->ioctl_sem)) {
8420 		error = -ERESTARTSYS;
8421 		goto out_kfree_ioc;
8422 	}
8423 
8424 	if  (megasas_wait_for_adapter_operational(instance)) {
8425 		error = -ENODEV;
8426 		goto out_up;
8427 	}
8428 
8429 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
8430 out_up:
8431 	up(&instance->ioctl_sem);
8432 
8433 out_kfree_ioc:
8434 	kfree(ioc);
8435 	return error;
8436 }
8437 
8438 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
8439 {
8440 	struct megasas_instance *instance;
8441 	struct megasas_aen aen;
8442 	int error;
8443 
8444 	if (file->private_data != file) {
8445 		printk(KERN_DEBUG "megasas: fasync_helper was not "
8446 		       "called first\n");
8447 		return -EINVAL;
8448 	}
8449 
8450 	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
8451 		return -EFAULT;
8452 
8453 	instance = megasas_lookup_instance(aen.host_no);
8454 
8455 	if (!instance)
8456 		return -ENODEV;
8457 
8458 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8459 		return -ENODEV;
8460 	}
8461 
8462 	if (instance->unload == 1) {
8463 		return -ENODEV;
8464 	}
8465 
8466 	if  (megasas_wait_for_adapter_operational(instance))
8467 		return -ENODEV;
8468 
8469 	mutex_lock(&instance->reset_mutex);
8470 	error = megasas_register_aen(instance, aen.seq_num,
8471 				     aen.class_locale_word);
8472 	mutex_unlock(&instance->reset_mutex);
8473 	return error;
8474 }
8475 
8476 /**
8477  * megasas_mgmt_ioctl -	char node ioctl entry point
8478  * @file:	char device file pointer
8479  * @cmd:	ioctl command
8480  * @arg:	ioctl command arguments address
8481  */
8482 static long
8483 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8484 {
8485 	switch (cmd) {
8486 	case MEGASAS_IOC_FIRMWARE:
8487 		return megasas_mgmt_ioctl_fw(file, arg);
8488 
8489 	case MEGASAS_IOC_GET_AEN:
8490 		return megasas_mgmt_ioctl_aen(file, arg);
8491 	}
8492 
8493 	return -ENOTTY;
8494 }
8495 
8496 #ifdef CONFIG_COMPAT
8497 static long
8498 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
8499 			  unsigned long arg)
8500 {
8501 	switch (cmd) {
8502 	case MEGASAS_IOC_FIRMWARE32:
8503 		return megasas_mgmt_ioctl_fw(file, arg);
8504 	case MEGASAS_IOC_GET_AEN:
8505 		return megasas_mgmt_ioctl_aen(file, arg);
8506 	}
8507 
8508 	return -ENOTTY;
8509 }
8510 #endif
8511 
8512 /*
8513  * File operations structure for management interface
8514  */
8515 static const struct file_operations megasas_mgmt_fops = {
8516 	.owner = THIS_MODULE,
8517 	.open = megasas_mgmt_open,
8518 	.fasync = megasas_mgmt_fasync,
8519 	.unlocked_ioctl = megasas_mgmt_ioctl,
8520 	.poll = megasas_mgmt_poll,
8521 #ifdef CONFIG_COMPAT
8522 	.compat_ioctl = megasas_mgmt_compat_ioctl,
8523 #endif
8524 	.llseek = noop_llseek,
8525 };
8526 
8527 static SIMPLE_DEV_PM_OPS(megasas_pm_ops, megasas_suspend, megasas_resume);
8528 
8529 /*
8530  * PCI hotplug support registration structure
8531  */
8532 static struct pci_driver megasas_pci_driver = {
8533 
8534 	.name = "megaraid_sas",
8535 	.id_table = megasas_pci_table,
8536 	.probe = megasas_probe_one,
8537 	.remove = megasas_detach_one,
8538 	.driver.pm = &megasas_pm_ops,
8539 	.shutdown = megasas_shutdown,
8540 };
8541 
8542 /*
8543  * Sysfs driver attributes
8544  */
8545 static ssize_t version_show(struct device_driver *dd, char *buf)
8546 {
8547 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
8548 			MEGASAS_VERSION);
8549 }
8550 static DRIVER_ATTR_RO(version);
8551 
8552 static ssize_t release_date_show(struct device_driver *dd, char *buf)
8553 {
8554 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
8555 		MEGASAS_RELDATE);
8556 }
8557 static DRIVER_ATTR_RO(release_date);
8558 
8559 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
8560 {
8561 	return sprintf(buf, "%u\n", support_poll_for_event);
8562 }
8563 static DRIVER_ATTR_RO(support_poll_for_event);
8564 
8565 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
8566 {
8567 	return sprintf(buf, "%u\n", support_device_change);
8568 }
8569 static DRIVER_ATTR_RO(support_device_change);
8570 
8571 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
8572 {
8573 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
8574 }
8575 
8576 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
8577 			     size_t count)
8578 {
8579 	int retval = count;
8580 
8581 	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8582 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8583 		retval = -EINVAL;
8584 	}
8585 	return retval;
8586 }
8587 static DRIVER_ATTR_RW(dbg_lvl);
8588 
8589 static ssize_t
8590 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8591 {
8592 	return sprintf(buf, "%u\n", support_nvme_encapsulation);
8593 }
8594 
8595 static DRIVER_ATTR_RO(support_nvme_encapsulation);
8596 
8597 static ssize_t
8598 support_pci_lane_margining_show(struct device_driver *dd, char *buf)
8599 {
8600 	return sprintf(buf, "%u\n", support_pci_lane_margining);
8601 }
8602 
8603 static DRIVER_ATTR_RO(support_pci_lane_margining);
8604 
8605 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8606 {
8607 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8608 	scsi_remove_device(sdev);
8609 	scsi_device_put(sdev);
8610 }
8611 
8612 /**
8613  * megasas_update_device_list -	Update the PD and LD device list from FW
8614  *				after an AEN event notification
8615  * @instance:			Adapter soft state
8616  * @event_type:			Indicates type of event (PD or LD event)
8617  *
8618  * @return:			Success or failure
8619  *
8620  * Issue DCMDs to Firmware to update the internal device list in driver.
8621  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8622  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8623  */
8624 static
8625 int megasas_update_device_list(struct megasas_instance *instance,
8626 			       int event_type)
8627 {
8628 	int dcmd_ret = DCMD_SUCCESS;
8629 
8630 	if (instance->enable_fw_dev_list) {
8631 		dcmd_ret = megasas_host_device_list_query(instance, false);
8632 		if (dcmd_ret != DCMD_SUCCESS)
8633 			goto out;
8634 	} else {
8635 		if (event_type & SCAN_PD_CHANNEL) {
8636 			dcmd_ret = megasas_get_pd_list(instance);
8637 
8638 			if (dcmd_ret != DCMD_SUCCESS)
8639 				goto out;
8640 		}
8641 
8642 		if (event_type & SCAN_VD_CHANNEL) {
8643 			if (!instance->requestorId ||
8644 			    (instance->requestorId &&
8645 			     megasas_get_ld_vf_affiliation(instance, 0))) {
8646 				dcmd_ret = megasas_ld_list_query(instance,
8647 						MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8648 				if (dcmd_ret != DCMD_SUCCESS)
8649 					goto out;
8650 			}
8651 		}
8652 	}
8653 
8654 out:
8655 	return dcmd_ret;
8656 }
8657 
8658 /**
8659  * megasas_add_remove_devices -	Add/remove devices to SCSI mid-layer
8660  *				after an AEN event notification
8661  * @instance:			Adapter soft state
8662  * @scan_type:			Indicates type of devices (PD/LD) to add
8663  * @return			void
8664  */
8665 static
8666 void megasas_add_remove_devices(struct megasas_instance *instance,
8667 				int scan_type)
8668 {
8669 	int i, j;
8670 	u16 pd_index = 0;
8671 	u16 ld_index = 0;
8672 	u16 channel = 0, id = 0;
8673 	struct Scsi_Host *host;
8674 	struct scsi_device *sdev1;
8675 	struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8676 	struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8677 
8678 	host = instance->host;
8679 
8680 	if (instance->enable_fw_dev_list) {
8681 		targetid_list = instance->host_device_list_buf;
8682 		for (i = 0; i < targetid_list->count; i++) {
8683 			targetid_entry = &targetid_list->host_device_list[i];
8684 			if (targetid_entry->flags.u.bits.is_sys_pd) {
8685 				channel = le16_to_cpu(targetid_entry->target_id) /
8686 						MEGASAS_MAX_DEV_PER_CHANNEL;
8687 				id = le16_to_cpu(targetid_entry->target_id) %
8688 						MEGASAS_MAX_DEV_PER_CHANNEL;
8689 			} else {
8690 				channel = MEGASAS_MAX_PD_CHANNELS +
8691 					  (le16_to_cpu(targetid_entry->target_id) /
8692 					   MEGASAS_MAX_DEV_PER_CHANNEL);
8693 				id = le16_to_cpu(targetid_entry->target_id) %
8694 						MEGASAS_MAX_DEV_PER_CHANNEL;
8695 			}
8696 			sdev1 = scsi_device_lookup(host, channel, id, 0);
8697 			if (!sdev1) {
8698 				scsi_add_device(host, channel, id, 0);
8699 			} else {
8700 				scsi_device_put(sdev1);
8701 			}
8702 		}
8703 	}
8704 
8705 	if (scan_type & SCAN_PD_CHANNEL) {
8706 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8707 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8708 				pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8709 				sdev1 = scsi_device_lookup(host, i, j, 0);
8710 				if (instance->pd_list[pd_index].driveState ==
8711 							MR_PD_STATE_SYSTEM) {
8712 					if (!sdev1)
8713 						scsi_add_device(host, i, j, 0);
8714 					else
8715 						scsi_device_put(sdev1);
8716 				} else {
8717 					if (sdev1)
8718 						megasas_remove_scsi_device(sdev1);
8719 				}
8720 			}
8721 		}
8722 	}
8723 
8724 	if (scan_type & SCAN_VD_CHANNEL) {
8725 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8726 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8727 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8728 				sdev1 = scsi_device_lookup(host,
8729 						MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8730 				if (instance->ld_ids[ld_index] != 0xff) {
8731 					if (!sdev1)
8732 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8733 					else
8734 						scsi_device_put(sdev1);
8735 				} else {
8736 					if (sdev1)
8737 						megasas_remove_scsi_device(sdev1);
8738 				}
8739 			}
8740 		}
8741 	}
8742 
8743 }
8744 
8745 static void
8746 megasas_aen_polling(struct work_struct *work)
8747 {
8748 	struct megasas_aen_event *ev =
8749 		container_of(work, struct megasas_aen_event, hotplug_work.work);
8750 	struct megasas_instance *instance = ev->instance;
8751 	union megasas_evt_class_locale class_locale;
8752 	int event_type = 0;
8753 	u32 seq_num;
8754 	int error;
8755 	u8  dcmd_ret = DCMD_SUCCESS;
8756 
8757 	if (!instance) {
8758 		printk(KERN_ERR "invalid instance!\n");
8759 		kfree(ev);
8760 		return;
8761 	}
8762 
8763 	/* Don't run the event workqueue thread if OCR is running */
8764 	mutex_lock(&instance->reset_mutex);
8765 
8766 	instance->ev = NULL;
8767 	if (instance->evt_detail) {
8768 		megasas_decode_evt(instance);
8769 
8770 		switch (le32_to_cpu(instance->evt_detail->code)) {
8771 
8772 		case MR_EVT_PD_INSERTED:
8773 		case MR_EVT_PD_REMOVED:
8774 			event_type = SCAN_PD_CHANNEL;
8775 			break;
8776 
8777 		case MR_EVT_LD_OFFLINE:
8778 		case MR_EVT_CFG_CLEARED:
8779 		case MR_EVT_LD_DELETED:
8780 		case MR_EVT_LD_CREATED:
8781 			event_type = SCAN_VD_CHANNEL;
8782 			break;
8783 
8784 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8785 		case MR_EVT_FOREIGN_CFG_IMPORTED:
8786 		case MR_EVT_LD_STATE_CHANGE:
8787 			event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8788 			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8789 				instance->host->host_no);
8790 			break;
8791 
8792 		case MR_EVT_CTRL_PROP_CHANGED:
8793 			dcmd_ret = megasas_get_ctrl_info(instance);
8794 			if (dcmd_ret == DCMD_SUCCESS &&
8795 			    instance->snapdump_wait_time) {
8796 				megasas_get_snapdump_properties(instance);
8797 				dev_info(&instance->pdev->dev,
8798 					 "Snap dump wait time\t: %d\n",
8799 					 instance->snapdump_wait_time);
8800 			}
8801 			break;
8802 		default:
8803 			event_type = 0;
8804 			break;
8805 		}
8806 	} else {
8807 		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8808 		mutex_unlock(&instance->reset_mutex);
8809 		kfree(ev);
8810 		return;
8811 	}
8812 
8813 	if (event_type)
8814 		dcmd_ret = megasas_update_device_list(instance, event_type);
8815 
8816 	mutex_unlock(&instance->reset_mutex);
8817 
8818 	if (event_type && dcmd_ret == DCMD_SUCCESS)
8819 		megasas_add_remove_devices(instance, event_type);
8820 
8821 	if (dcmd_ret == DCMD_SUCCESS)
8822 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8823 	else
8824 		seq_num = instance->last_seq_num;
8825 
8826 	/* Register AEN with FW for latest sequence number plus 1 */
8827 	class_locale.members.reserved = 0;
8828 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
8829 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
8830 
8831 	if (instance->aen_cmd != NULL) {
8832 		kfree(ev);
8833 		return;
8834 	}
8835 
8836 	mutex_lock(&instance->reset_mutex);
8837 	error = megasas_register_aen(instance, seq_num,
8838 					class_locale.word);
8839 	if (error)
8840 		dev_err(&instance->pdev->dev,
8841 			"register aen failed error %x\n", error);
8842 
8843 	mutex_unlock(&instance->reset_mutex);
8844 	kfree(ev);
8845 }
8846 
8847 /**
8848  * megasas_init - Driver load entry point
8849  */
8850 static int __init megasas_init(void)
8851 {
8852 	int rval;
8853 
8854 	/*
8855 	 * Booted in kdump kernel, minimize memory footprints by
8856 	 * disabling few features
8857 	 */
8858 	if (reset_devices) {
8859 		msix_vectors = 1;
8860 		rdpq_enable = 0;
8861 		dual_qdepth_disable = 1;
8862 	}
8863 
8864 	/*
8865 	 * Announce driver version and other information
8866 	 */
8867 	pr_info("megasas: %s\n", MEGASAS_VERSION);
8868 
8869 	spin_lock_init(&poll_aen_lock);
8870 
8871 	support_poll_for_event = 2;
8872 	support_device_change = 1;
8873 	support_nvme_encapsulation = true;
8874 	support_pci_lane_margining = true;
8875 
8876 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
8877 
8878 	/*
8879 	 * Register character device node
8880 	 */
8881 	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
8882 
8883 	if (rval < 0) {
8884 		printk(KERN_DEBUG "megasas: failed to open device node\n");
8885 		return rval;
8886 	}
8887 
8888 	megasas_mgmt_majorno = rval;
8889 
8890 	megasas_init_debugfs();
8891 
8892 	/*
8893 	 * Register ourselves as PCI hotplug module
8894 	 */
8895 	rval = pci_register_driver(&megasas_pci_driver);
8896 
8897 	if (rval) {
8898 		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
8899 		goto err_pcidrv;
8900 	}
8901 
8902 	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
8903 	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
8904 		pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
8905 		event_log_level = MFI_EVT_CLASS_CRITICAL;
8906 	}
8907 
8908 	rval = driver_create_file(&megasas_pci_driver.driver,
8909 				  &driver_attr_version);
8910 	if (rval)
8911 		goto err_dcf_attr_ver;
8912 
8913 	rval = driver_create_file(&megasas_pci_driver.driver,
8914 				  &driver_attr_release_date);
8915 	if (rval)
8916 		goto err_dcf_rel_date;
8917 
8918 	rval = driver_create_file(&megasas_pci_driver.driver,
8919 				&driver_attr_support_poll_for_event);
8920 	if (rval)
8921 		goto err_dcf_support_poll_for_event;
8922 
8923 	rval = driver_create_file(&megasas_pci_driver.driver,
8924 				  &driver_attr_dbg_lvl);
8925 	if (rval)
8926 		goto err_dcf_dbg_lvl;
8927 	rval = driver_create_file(&megasas_pci_driver.driver,
8928 				&driver_attr_support_device_change);
8929 	if (rval)
8930 		goto err_dcf_support_device_change;
8931 
8932 	rval = driver_create_file(&megasas_pci_driver.driver,
8933 				  &driver_attr_support_nvme_encapsulation);
8934 	if (rval)
8935 		goto err_dcf_support_nvme_encapsulation;
8936 
8937 	rval = driver_create_file(&megasas_pci_driver.driver,
8938 				  &driver_attr_support_pci_lane_margining);
8939 	if (rval)
8940 		goto err_dcf_support_pci_lane_margining;
8941 
8942 	return rval;
8943 
8944 err_dcf_support_pci_lane_margining:
8945 	driver_remove_file(&megasas_pci_driver.driver,
8946 			   &driver_attr_support_nvme_encapsulation);
8947 
8948 err_dcf_support_nvme_encapsulation:
8949 	driver_remove_file(&megasas_pci_driver.driver,
8950 			   &driver_attr_support_device_change);
8951 
8952 err_dcf_support_device_change:
8953 	driver_remove_file(&megasas_pci_driver.driver,
8954 			   &driver_attr_dbg_lvl);
8955 err_dcf_dbg_lvl:
8956 	driver_remove_file(&megasas_pci_driver.driver,
8957 			&driver_attr_support_poll_for_event);
8958 err_dcf_support_poll_for_event:
8959 	driver_remove_file(&megasas_pci_driver.driver,
8960 			   &driver_attr_release_date);
8961 err_dcf_rel_date:
8962 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8963 err_dcf_attr_ver:
8964 	pci_unregister_driver(&megasas_pci_driver);
8965 err_pcidrv:
8966 	megasas_exit_debugfs();
8967 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8968 	return rval;
8969 }
8970 
8971 /**
8972  * megasas_exit - Driver unload entry point
8973  */
8974 static void __exit megasas_exit(void)
8975 {
8976 	driver_remove_file(&megasas_pci_driver.driver,
8977 			   &driver_attr_dbg_lvl);
8978 	driver_remove_file(&megasas_pci_driver.driver,
8979 			&driver_attr_support_poll_for_event);
8980 	driver_remove_file(&megasas_pci_driver.driver,
8981 			&driver_attr_support_device_change);
8982 	driver_remove_file(&megasas_pci_driver.driver,
8983 			   &driver_attr_release_date);
8984 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8985 	driver_remove_file(&megasas_pci_driver.driver,
8986 			   &driver_attr_support_nvme_encapsulation);
8987 	driver_remove_file(&megasas_pci_driver.driver,
8988 			   &driver_attr_support_pci_lane_margining);
8989 
8990 	pci_unregister_driver(&megasas_pci_driver);
8991 	megasas_exit_debugfs();
8992 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8993 }
8994 
8995 module_init(megasas_init);
8996 module_exit(megasas_exit);
8997