1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Linux MegaRAID driver for SAS based RAID controllers
4  *
5  *  Copyright (c) 2003-2013  LSI Corporation
6  *  Copyright (c) 2013-2016  Avago Technologies
7  *  Copyright (c) 2016-2018  Broadcom Inc.
8  *
9  *  Authors: Broadcom Inc.
10  *           Sreenivas Bagalkote
11  *           Sumant Patro
12  *           Bo Yang
13  *           Adam Radford
14  *           Kashyap Desai <kashyap.desai@broadcom.com>
15  *           Sumit Saxena <sumit.saxena@broadcom.com>
16  *
17  *  Send feedback to: megaraidlinux.pdl@broadcom.com
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/uio.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <asm/unaligned.h>
33 #include <linux/fs.h>
34 #include <linux/compat.h>
35 #include <linux/blkdev.h>
36 #include <linux/mutex.h>
37 #include <linux/poll.h>
38 #include <linux/vmalloc.h>
39 #include <linux/irq_poll.h>
40 
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <scsi/scsi_dbg.h>
47 #include "megaraid_sas_fusion.h"
48 #include "megaraid_sas.h"
49 
50 /*
51  * Number of sectors per IO command
52  * Will be set in megasas_init_mfi if user does not provide
53  */
54 static unsigned int max_sectors;
55 module_param_named(max_sectors, max_sectors, int, 0444);
56 MODULE_PARM_DESC(max_sectors,
57 	"Maximum number of sectors per IO command");
58 
59 static int msix_disable;
60 module_param(msix_disable, int, 0444);
61 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
62 
63 static unsigned int msix_vectors;
64 module_param(msix_vectors, int, 0444);
65 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
66 
67 static int allow_vf_ioctls;
68 module_param(allow_vf_ioctls, int, 0444);
69 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
70 
71 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
72 module_param(throttlequeuedepth, int, 0444);
73 MODULE_PARM_DESC(throttlequeuedepth,
74 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
75 
76 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
77 module_param(resetwaittime, int, 0444);
78 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
79 
80 int smp_affinity_enable = 1;
81 module_param(smp_affinity_enable, int, 0444);
82 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
83 
84 int rdpq_enable = 1;
85 module_param(rdpq_enable, int, 0444);
86 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
87 
88 unsigned int dual_qdepth_disable;
89 module_param(dual_qdepth_disable, int, 0444);
90 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
91 
92 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
93 module_param(scmd_timeout, int, 0444);
94 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
95 
96 int perf_mode = -1;
97 module_param(perf_mode, int, 0444);
98 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
99 		"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
100 		"interrupt coalescing is enabled only on high iops queues\n\t\t"
101 		"1 - iops: High iops queues are not allocated &\n\t\t"
102 		"interrupt coalescing is enabled on all queues\n\t\t"
103 		"2 - latency: High iops queues are not allocated &\n\t\t"
104 		"interrupt coalescing is disabled on all queues\n\t\t"
105 		"default mode is 'balanced'"
106 		);
107 
108 MODULE_LICENSE("GPL");
109 MODULE_VERSION(MEGASAS_VERSION);
110 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
111 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
112 
113 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
114 static int megasas_get_pd_list(struct megasas_instance *instance);
115 static int megasas_ld_list_query(struct megasas_instance *instance,
116 				 u8 query_type);
117 static int megasas_issue_init_mfi(struct megasas_instance *instance);
118 static int megasas_register_aen(struct megasas_instance *instance,
119 				u32 seq_num, u32 class_locale_word);
120 static void megasas_get_pd_info(struct megasas_instance *instance,
121 				struct scsi_device *sdev);
122 
123 /*
124  * PCI ID table for all supported controllers
125  */
126 static struct pci_device_id megasas_pci_table[] = {
127 
128 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
129 	/* xscale IOP */
130 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
131 	/* ppc IOP */
132 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
133 	/* ppc IOP */
134 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
135 	/* gen2*/
136 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
137 	/* gen2*/
138 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
139 	/* skinny*/
140 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
141 	/* skinny*/
142 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
143 	/* xscale IOP, vega */
144 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
145 	/* xscale IOP */
146 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
147 	/* Fusion */
148 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
149 	/* Plasma */
150 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
151 	/* Invader */
152 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
153 	/* Fury */
154 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
155 	/* Intruder */
156 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
157 	/* Intruder 24 port*/
158 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
159 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
160 	/* VENTURA */
161 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
162 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
163 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
164 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
165 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
166 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
167 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
168 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
169 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
170 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
171 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
172 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
173 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
174 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
175 	{}
176 };
177 
178 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
179 
180 static int megasas_mgmt_majorno;
181 struct megasas_mgmt_info megasas_mgmt_info;
182 static struct fasync_struct *megasas_async_queue;
183 static DEFINE_MUTEX(megasas_async_queue_mutex);
184 
185 static int megasas_poll_wait_aen;
186 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
187 static u32 support_poll_for_event;
188 u32 megasas_dbg_lvl;
189 static u32 support_device_change;
190 static bool support_nvme_encapsulation;
191 static bool support_pci_lane_margining;
192 
193 /* define lock for aen poll */
194 spinlock_t poll_aen_lock;
195 
196 extern struct dentry *megasas_debugfs_root;
197 extern void megasas_init_debugfs(void);
198 extern void megasas_exit_debugfs(void);
199 extern void megasas_setup_debugfs(struct megasas_instance *instance);
200 extern void megasas_destroy_debugfs(struct megasas_instance *instance);
201 
202 void
203 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
204 		     u8 alt_status);
205 static u32
206 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
207 static int
208 megasas_adp_reset_gen2(struct megasas_instance *instance,
209 		       struct megasas_register_set __iomem *reg_set);
210 static irqreturn_t megasas_isr(int irq, void *devp);
211 static u32
212 megasas_init_adapter_mfi(struct megasas_instance *instance);
213 u32
214 megasas_build_and_issue_cmd(struct megasas_instance *instance,
215 			    struct scsi_cmnd *scmd);
216 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
217 int
218 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
219 	int seconds);
220 void megasas_fusion_ocr_wq(struct work_struct *work);
221 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
222 					 int initial);
223 static int
224 megasas_set_dma_mask(struct megasas_instance *instance);
225 static int
226 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
227 static inline void
228 megasas_free_ctrl_mem(struct megasas_instance *instance);
229 static inline int
230 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
231 static inline void
232 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
233 static inline void
234 megasas_init_ctrl_params(struct megasas_instance *instance);
235 
236 u32 megasas_readl(struct megasas_instance *instance,
237 		  const volatile void __iomem *addr)
238 {
239 	u32 i = 0, ret_val;
240 	/*
241 	 * Due to a HW errata in Aero controllers, reads to certain
242 	 * Fusion registers could intermittently return all zeroes.
243 	 * This behavior is transient in nature and subsequent reads will
244 	 * return valid value. As a workaround in driver, retry readl for
245 	 * upto three times until a non-zero value is read.
246 	 */
247 	if (instance->adapter_type == AERO_SERIES) {
248 		do {
249 			ret_val = readl(addr);
250 			i++;
251 		} while (ret_val == 0 && i < 3);
252 		return ret_val;
253 	} else {
254 		return readl(addr);
255 	}
256 }
257 
258 /**
259  * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
260  * @instance:			Adapter soft state
261  * @dcmd:			DCMD frame inside MFI command
262  * @dma_addr:			DMA address of buffer to be passed to FW
263  * @dma_len:			Length of DMA buffer to be passed to FW
264  * @return:			void
265  */
266 void megasas_set_dma_settings(struct megasas_instance *instance,
267 			      struct megasas_dcmd_frame *dcmd,
268 			      dma_addr_t dma_addr, u32 dma_len)
269 {
270 	if (instance->consistent_mask_64bit) {
271 		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
272 		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
273 		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
274 
275 	} else {
276 		dcmd->sgl.sge32[0].phys_addr =
277 				cpu_to_le32(lower_32_bits(dma_addr));
278 		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
279 		dcmd->flags = cpu_to_le16(dcmd->flags);
280 	}
281 }
282 
283 void
284 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
285 {
286 	instance->instancet->fire_cmd(instance,
287 		cmd->frame_phys_addr, 0, instance->reg_set);
288 	return;
289 }
290 
291 /**
292  * megasas_get_cmd -	Get a command from the free pool
293  * @instance:		Adapter soft state
294  *
295  * Returns a free command from the pool
296  */
297 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
298 						  *instance)
299 {
300 	unsigned long flags;
301 	struct megasas_cmd *cmd = NULL;
302 
303 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
304 
305 	if (!list_empty(&instance->cmd_pool)) {
306 		cmd = list_entry((&instance->cmd_pool)->next,
307 				 struct megasas_cmd, list);
308 		list_del_init(&cmd->list);
309 	} else {
310 		dev_err(&instance->pdev->dev, "Command pool empty!\n");
311 	}
312 
313 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
314 	return cmd;
315 }
316 
317 /**
318  * megasas_return_cmd -	Return a cmd to free command pool
319  * @instance:		Adapter soft state
320  * @cmd:		Command packet to be returned to free command pool
321  */
322 void
323 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
324 {
325 	unsigned long flags;
326 	u32 blk_tags;
327 	struct megasas_cmd_fusion *cmd_fusion;
328 	struct fusion_context *fusion = instance->ctrl_context;
329 
330 	/* This flag is used only for fusion adapter.
331 	 * Wait for Interrupt for Polled mode DCMD
332 	 */
333 	if (cmd->flags & DRV_DCMD_POLLED_MODE)
334 		return;
335 
336 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
337 
338 	if (fusion) {
339 		blk_tags = instance->max_scsi_cmds + cmd->index;
340 		cmd_fusion = fusion->cmd_list[blk_tags];
341 		megasas_return_cmd_fusion(instance, cmd_fusion);
342 	}
343 	cmd->scmd = NULL;
344 	cmd->frame_count = 0;
345 	cmd->flags = 0;
346 	memset(cmd->frame, 0, instance->mfi_frame_size);
347 	cmd->frame->io.context = cpu_to_le32(cmd->index);
348 	if (!fusion && reset_devices)
349 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
350 	list_add(&cmd->list, (&instance->cmd_pool)->next);
351 
352 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
353 
354 }
355 
356 static const char *
357 format_timestamp(uint32_t timestamp)
358 {
359 	static char buffer[32];
360 
361 	if ((timestamp & 0xff000000) == 0xff000000)
362 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
363 		0x00ffffff);
364 	else
365 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
366 	return buffer;
367 }
368 
369 static const char *
370 format_class(int8_t class)
371 {
372 	static char buffer[6];
373 
374 	switch (class) {
375 	case MFI_EVT_CLASS_DEBUG:
376 		return "debug";
377 	case MFI_EVT_CLASS_PROGRESS:
378 		return "progress";
379 	case MFI_EVT_CLASS_INFO:
380 		return "info";
381 	case MFI_EVT_CLASS_WARNING:
382 		return "WARN";
383 	case MFI_EVT_CLASS_CRITICAL:
384 		return "CRIT";
385 	case MFI_EVT_CLASS_FATAL:
386 		return "FATAL";
387 	case MFI_EVT_CLASS_DEAD:
388 		return "DEAD";
389 	default:
390 		snprintf(buffer, sizeof(buffer), "%d", class);
391 		return buffer;
392 	}
393 }
394 
395 /**
396   * megasas_decode_evt: Decode FW AEN event and print critical event
397   * for information.
398   * @instance:			Adapter soft state
399   */
400 static void
401 megasas_decode_evt(struct megasas_instance *instance)
402 {
403 	struct megasas_evt_detail *evt_detail = instance->evt_detail;
404 	union megasas_evt_class_locale class_locale;
405 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
406 
407 	if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
408 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
409 			le32_to_cpu(evt_detail->seq_num),
410 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
411 			(class_locale.members.locale),
412 			format_class(class_locale.members.class),
413 			evt_detail->description);
414 }
415 
416 /**
417 *	The following functions are defined for xscale
418 *	(deviceid : 1064R, PERC5) controllers
419 */
420 
421 /**
422  * megasas_enable_intr_xscale -	Enables interrupts
423  * @regs:			MFI register set
424  */
425 static inline void
426 megasas_enable_intr_xscale(struct megasas_instance *instance)
427 {
428 	struct megasas_register_set __iomem *regs;
429 
430 	regs = instance->reg_set;
431 	writel(0, &(regs)->outbound_intr_mask);
432 
433 	/* Dummy readl to force pci flush */
434 	readl(&regs->outbound_intr_mask);
435 }
436 
437 /**
438  * megasas_disable_intr_xscale -Disables interrupt
439  * @regs:			MFI register set
440  */
441 static inline void
442 megasas_disable_intr_xscale(struct megasas_instance *instance)
443 {
444 	struct megasas_register_set __iomem *regs;
445 	u32 mask = 0x1f;
446 
447 	regs = instance->reg_set;
448 	writel(mask, &regs->outbound_intr_mask);
449 	/* Dummy readl to force pci flush */
450 	readl(&regs->outbound_intr_mask);
451 }
452 
453 /**
454  * megasas_read_fw_status_reg_xscale - returns the current FW status value
455  * @regs:			MFI register set
456  */
457 static u32
458 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
459 {
460 	return readl(&instance->reg_set->outbound_msg_0);
461 }
462 /**
463  * megasas_clear_interrupt_xscale -	Check & clear interrupt
464  * @regs:				MFI register set
465  */
466 static int
467 megasas_clear_intr_xscale(struct megasas_instance *instance)
468 {
469 	u32 status;
470 	u32 mfiStatus = 0;
471 	struct megasas_register_set __iomem *regs;
472 	regs = instance->reg_set;
473 
474 	/*
475 	 * Check if it is our interrupt
476 	 */
477 	status = readl(&regs->outbound_intr_status);
478 
479 	if (status & MFI_OB_INTR_STATUS_MASK)
480 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
481 	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
482 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
483 
484 	/*
485 	 * Clear the interrupt by writing back the same value
486 	 */
487 	if (mfiStatus)
488 		writel(status, &regs->outbound_intr_status);
489 
490 	/* Dummy readl to force pci flush */
491 	readl(&regs->outbound_intr_status);
492 
493 	return mfiStatus;
494 }
495 
496 /**
497  * megasas_fire_cmd_xscale -	Sends command to the FW
498  * @frame_phys_addr :		Physical address of cmd
499  * @frame_count :		Number of frames for the command
500  * @regs :			MFI register set
501  */
502 static inline void
503 megasas_fire_cmd_xscale(struct megasas_instance *instance,
504 		dma_addr_t frame_phys_addr,
505 		u32 frame_count,
506 		struct megasas_register_set __iomem *regs)
507 {
508 	unsigned long flags;
509 
510 	spin_lock_irqsave(&instance->hba_lock, flags);
511 	writel((frame_phys_addr >> 3)|(frame_count),
512 	       &(regs)->inbound_queue_port);
513 	spin_unlock_irqrestore(&instance->hba_lock, flags);
514 }
515 
516 /**
517  * megasas_adp_reset_xscale -  For controller reset
518  * @regs:                              MFI register set
519  */
520 static int
521 megasas_adp_reset_xscale(struct megasas_instance *instance,
522 	struct megasas_register_set __iomem *regs)
523 {
524 	u32 i;
525 	u32 pcidata;
526 
527 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
528 
529 	for (i = 0; i < 3; i++)
530 		msleep(1000); /* sleep for 3 secs */
531 	pcidata  = 0;
532 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
533 	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
534 	if (pcidata & 0x2) {
535 		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
536 		pcidata &= ~0x2;
537 		pci_write_config_dword(instance->pdev,
538 				MFI_1068_PCSR_OFFSET, pcidata);
539 
540 		for (i = 0; i < 2; i++)
541 			msleep(1000); /* need to wait 2 secs again */
542 
543 		pcidata  = 0;
544 		pci_read_config_dword(instance->pdev,
545 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
546 		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
547 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
548 			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
549 			pcidata = 0;
550 			pci_write_config_dword(instance->pdev,
551 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
552 		}
553 	}
554 	return 0;
555 }
556 
557 /**
558  * megasas_check_reset_xscale -	For controller reset check
559  * @regs:				MFI register set
560  */
561 static int
562 megasas_check_reset_xscale(struct megasas_instance *instance,
563 		struct megasas_register_set __iomem *regs)
564 {
565 	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
566 	    (le32_to_cpu(*instance->consumer) ==
567 		MEGASAS_ADPRESET_INPROG_SIGN))
568 		return 1;
569 	return 0;
570 }
571 
572 static struct megasas_instance_template megasas_instance_template_xscale = {
573 
574 	.fire_cmd = megasas_fire_cmd_xscale,
575 	.enable_intr = megasas_enable_intr_xscale,
576 	.disable_intr = megasas_disable_intr_xscale,
577 	.clear_intr = megasas_clear_intr_xscale,
578 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
579 	.adp_reset = megasas_adp_reset_xscale,
580 	.check_reset = megasas_check_reset_xscale,
581 	.service_isr = megasas_isr,
582 	.tasklet = megasas_complete_cmd_dpc,
583 	.init_adapter = megasas_init_adapter_mfi,
584 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
585 	.issue_dcmd = megasas_issue_dcmd,
586 };
587 
588 /**
589 *	This is the end of set of functions & definitions specific
590 *	to xscale (deviceid : 1064R, PERC5) controllers
591 */
592 
593 /**
594 *	The following functions are defined for ppc (deviceid : 0x60)
595 *	controllers
596 */
597 
598 /**
599  * megasas_enable_intr_ppc -	Enables interrupts
600  * @regs:			MFI register set
601  */
602 static inline void
603 megasas_enable_intr_ppc(struct megasas_instance *instance)
604 {
605 	struct megasas_register_set __iomem *regs;
606 
607 	regs = instance->reg_set;
608 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
609 
610 	writel(~0x80000000, &(regs)->outbound_intr_mask);
611 
612 	/* Dummy readl to force pci flush */
613 	readl(&regs->outbound_intr_mask);
614 }
615 
616 /**
617  * megasas_disable_intr_ppc -	Disable interrupt
618  * @regs:			MFI register set
619  */
620 static inline void
621 megasas_disable_intr_ppc(struct megasas_instance *instance)
622 {
623 	struct megasas_register_set __iomem *regs;
624 	u32 mask = 0xFFFFFFFF;
625 
626 	regs = instance->reg_set;
627 	writel(mask, &regs->outbound_intr_mask);
628 	/* Dummy readl to force pci flush */
629 	readl(&regs->outbound_intr_mask);
630 }
631 
632 /**
633  * megasas_read_fw_status_reg_ppc - returns the current FW status value
634  * @regs:			MFI register set
635  */
636 static u32
637 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
638 {
639 	return readl(&instance->reg_set->outbound_scratch_pad_0);
640 }
641 
642 /**
643  * megasas_clear_interrupt_ppc -	Check & clear interrupt
644  * @regs:				MFI register set
645  */
646 static int
647 megasas_clear_intr_ppc(struct megasas_instance *instance)
648 {
649 	u32 status, mfiStatus = 0;
650 	struct megasas_register_set __iomem *regs;
651 	regs = instance->reg_set;
652 
653 	/*
654 	 * Check if it is our interrupt
655 	 */
656 	status = readl(&regs->outbound_intr_status);
657 
658 	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
659 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
660 
661 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
662 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
663 
664 	/*
665 	 * Clear the interrupt by writing back the same value
666 	 */
667 	writel(status, &regs->outbound_doorbell_clear);
668 
669 	/* Dummy readl to force pci flush */
670 	readl(&regs->outbound_doorbell_clear);
671 
672 	return mfiStatus;
673 }
674 
675 /**
676  * megasas_fire_cmd_ppc -	Sends command to the FW
677  * @frame_phys_addr :		Physical address of cmd
678  * @frame_count :		Number of frames for the command
679  * @regs :			MFI register set
680  */
681 static inline void
682 megasas_fire_cmd_ppc(struct megasas_instance *instance,
683 		dma_addr_t frame_phys_addr,
684 		u32 frame_count,
685 		struct megasas_register_set __iomem *regs)
686 {
687 	unsigned long flags;
688 
689 	spin_lock_irqsave(&instance->hba_lock, flags);
690 	writel((frame_phys_addr | (frame_count<<1))|1,
691 			&(regs)->inbound_queue_port);
692 	spin_unlock_irqrestore(&instance->hba_lock, flags);
693 }
694 
695 /**
696  * megasas_check_reset_ppc -	For controller reset check
697  * @regs:				MFI register set
698  */
699 static int
700 megasas_check_reset_ppc(struct megasas_instance *instance,
701 			struct megasas_register_set __iomem *regs)
702 {
703 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
704 		return 1;
705 
706 	return 0;
707 }
708 
709 static struct megasas_instance_template megasas_instance_template_ppc = {
710 
711 	.fire_cmd = megasas_fire_cmd_ppc,
712 	.enable_intr = megasas_enable_intr_ppc,
713 	.disable_intr = megasas_disable_intr_ppc,
714 	.clear_intr = megasas_clear_intr_ppc,
715 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
716 	.adp_reset = megasas_adp_reset_xscale,
717 	.check_reset = megasas_check_reset_ppc,
718 	.service_isr = megasas_isr,
719 	.tasklet = megasas_complete_cmd_dpc,
720 	.init_adapter = megasas_init_adapter_mfi,
721 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
722 	.issue_dcmd = megasas_issue_dcmd,
723 };
724 
725 /**
726  * megasas_enable_intr_skinny -	Enables interrupts
727  * @regs:			MFI register set
728  */
729 static inline void
730 megasas_enable_intr_skinny(struct megasas_instance *instance)
731 {
732 	struct megasas_register_set __iomem *regs;
733 
734 	regs = instance->reg_set;
735 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
736 
737 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
738 
739 	/* Dummy readl to force pci flush */
740 	readl(&regs->outbound_intr_mask);
741 }
742 
743 /**
744  * megasas_disable_intr_skinny -	Disables interrupt
745  * @regs:			MFI register set
746  */
747 static inline void
748 megasas_disable_intr_skinny(struct megasas_instance *instance)
749 {
750 	struct megasas_register_set __iomem *regs;
751 	u32 mask = 0xFFFFFFFF;
752 
753 	regs = instance->reg_set;
754 	writel(mask, &regs->outbound_intr_mask);
755 	/* Dummy readl to force pci flush */
756 	readl(&regs->outbound_intr_mask);
757 }
758 
759 /**
760  * megasas_read_fw_status_reg_skinny - returns the current FW status value
761  * @regs:			MFI register set
762  */
763 static u32
764 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
765 {
766 	return readl(&instance->reg_set->outbound_scratch_pad_0);
767 }
768 
769 /**
770  * megasas_clear_interrupt_skinny -	Check & clear interrupt
771  * @regs:				MFI register set
772  */
773 static int
774 megasas_clear_intr_skinny(struct megasas_instance *instance)
775 {
776 	u32 status;
777 	u32 mfiStatus = 0;
778 	struct megasas_register_set __iomem *regs;
779 	regs = instance->reg_set;
780 
781 	/*
782 	 * Check if it is our interrupt
783 	 */
784 	status = readl(&regs->outbound_intr_status);
785 
786 	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
787 		return 0;
788 	}
789 
790 	/*
791 	 * Check if it is our interrupt
792 	 */
793 	if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
794 	    MFI_STATE_FAULT) {
795 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
796 	} else
797 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
798 
799 	/*
800 	 * Clear the interrupt by writing back the same value
801 	 */
802 	writel(status, &regs->outbound_intr_status);
803 
804 	/*
805 	 * dummy read to flush PCI
806 	 */
807 	readl(&regs->outbound_intr_status);
808 
809 	return mfiStatus;
810 }
811 
812 /**
813  * megasas_fire_cmd_skinny -	Sends command to the FW
814  * @frame_phys_addr :		Physical address of cmd
815  * @frame_count :		Number of frames for the command
816  * @regs :			MFI register set
817  */
818 static inline void
819 megasas_fire_cmd_skinny(struct megasas_instance *instance,
820 			dma_addr_t frame_phys_addr,
821 			u32 frame_count,
822 			struct megasas_register_set __iomem *regs)
823 {
824 	unsigned long flags;
825 
826 	spin_lock_irqsave(&instance->hba_lock, flags);
827 	writel(upper_32_bits(frame_phys_addr),
828 	       &(regs)->inbound_high_queue_port);
829 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
830 	       &(regs)->inbound_low_queue_port);
831 	spin_unlock_irqrestore(&instance->hba_lock, flags);
832 }
833 
834 /**
835  * megasas_check_reset_skinny -	For controller reset check
836  * @regs:				MFI register set
837  */
838 static int
839 megasas_check_reset_skinny(struct megasas_instance *instance,
840 				struct megasas_register_set __iomem *regs)
841 {
842 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
843 		return 1;
844 
845 	return 0;
846 }
847 
848 static struct megasas_instance_template megasas_instance_template_skinny = {
849 
850 	.fire_cmd = megasas_fire_cmd_skinny,
851 	.enable_intr = megasas_enable_intr_skinny,
852 	.disable_intr = megasas_disable_intr_skinny,
853 	.clear_intr = megasas_clear_intr_skinny,
854 	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
855 	.adp_reset = megasas_adp_reset_gen2,
856 	.check_reset = megasas_check_reset_skinny,
857 	.service_isr = megasas_isr,
858 	.tasklet = megasas_complete_cmd_dpc,
859 	.init_adapter = megasas_init_adapter_mfi,
860 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
861 	.issue_dcmd = megasas_issue_dcmd,
862 };
863 
864 
865 /**
866 *	The following functions are defined for gen2 (deviceid : 0x78 0x79)
867 *	controllers
868 */
869 
870 /**
871  * megasas_enable_intr_gen2 -  Enables interrupts
872  * @regs:                      MFI register set
873  */
874 static inline void
875 megasas_enable_intr_gen2(struct megasas_instance *instance)
876 {
877 	struct megasas_register_set __iomem *regs;
878 
879 	regs = instance->reg_set;
880 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
881 
882 	/* write ~0x00000005 (4 & 1) to the intr mask*/
883 	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
884 
885 	/* Dummy readl to force pci flush */
886 	readl(&regs->outbound_intr_mask);
887 }
888 
889 /**
890  * megasas_disable_intr_gen2 - Disables interrupt
891  * @regs:                      MFI register set
892  */
893 static inline void
894 megasas_disable_intr_gen2(struct megasas_instance *instance)
895 {
896 	struct megasas_register_set __iomem *regs;
897 	u32 mask = 0xFFFFFFFF;
898 
899 	regs = instance->reg_set;
900 	writel(mask, &regs->outbound_intr_mask);
901 	/* Dummy readl to force pci flush */
902 	readl(&regs->outbound_intr_mask);
903 }
904 
905 /**
906  * megasas_read_fw_status_reg_gen2 - returns the current FW status value
907  * @regs:                      MFI register set
908  */
909 static u32
910 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
911 {
912 	return readl(&instance->reg_set->outbound_scratch_pad_0);
913 }
914 
915 /**
916  * megasas_clear_interrupt_gen2 -      Check & clear interrupt
917  * @regs:                              MFI register set
918  */
919 static int
920 megasas_clear_intr_gen2(struct megasas_instance *instance)
921 {
922 	u32 status;
923 	u32 mfiStatus = 0;
924 	struct megasas_register_set __iomem *regs;
925 	regs = instance->reg_set;
926 
927 	/*
928 	 * Check if it is our interrupt
929 	 */
930 	status = readl(&regs->outbound_intr_status);
931 
932 	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
933 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
934 	}
935 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
936 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
937 	}
938 
939 	/*
940 	 * Clear the interrupt by writing back the same value
941 	 */
942 	if (mfiStatus)
943 		writel(status, &regs->outbound_doorbell_clear);
944 
945 	/* Dummy readl to force pci flush */
946 	readl(&regs->outbound_intr_status);
947 
948 	return mfiStatus;
949 }
950 /**
951  * megasas_fire_cmd_gen2 -     Sends command to the FW
952  * @frame_phys_addr :          Physical address of cmd
953  * @frame_count :              Number of frames for the command
954  * @regs :                     MFI register set
955  */
956 static inline void
957 megasas_fire_cmd_gen2(struct megasas_instance *instance,
958 			dma_addr_t frame_phys_addr,
959 			u32 frame_count,
960 			struct megasas_register_set __iomem *regs)
961 {
962 	unsigned long flags;
963 
964 	spin_lock_irqsave(&instance->hba_lock, flags);
965 	writel((frame_phys_addr | (frame_count<<1))|1,
966 			&(regs)->inbound_queue_port);
967 	spin_unlock_irqrestore(&instance->hba_lock, flags);
968 }
969 
970 /**
971  * megasas_adp_reset_gen2 -	For controller reset
972  * @regs:				MFI register set
973  */
974 static int
975 megasas_adp_reset_gen2(struct megasas_instance *instance,
976 			struct megasas_register_set __iomem *reg_set)
977 {
978 	u32 retry = 0 ;
979 	u32 HostDiag;
980 	u32 __iomem *seq_offset = &reg_set->seq_offset;
981 	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
982 
983 	if (instance->instancet == &megasas_instance_template_skinny) {
984 		seq_offset = &reg_set->fusion_seq_offset;
985 		hostdiag_offset = &reg_set->fusion_host_diag;
986 	}
987 
988 	writel(0, seq_offset);
989 	writel(4, seq_offset);
990 	writel(0xb, seq_offset);
991 	writel(2, seq_offset);
992 	writel(7, seq_offset);
993 	writel(0xd, seq_offset);
994 
995 	msleep(1000);
996 
997 	HostDiag = (u32)readl(hostdiag_offset);
998 
999 	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
1000 		msleep(100);
1001 		HostDiag = (u32)readl(hostdiag_offset);
1002 		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
1003 					retry, HostDiag);
1004 
1005 		if (retry++ >= 100)
1006 			return 1;
1007 
1008 	}
1009 
1010 	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
1011 
1012 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
1013 
1014 	ssleep(10);
1015 
1016 	HostDiag = (u32)readl(hostdiag_offset);
1017 	while (HostDiag & DIAG_RESET_ADAPTER) {
1018 		msleep(100);
1019 		HostDiag = (u32)readl(hostdiag_offset);
1020 		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
1021 				retry, HostDiag);
1022 
1023 		if (retry++ >= 1000)
1024 			return 1;
1025 
1026 	}
1027 	return 0;
1028 }
1029 
1030 /**
1031  * megasas_check_reset_gen2 -	For controller reset check
1032  * @regs:				MFI register set
1033  */
1034 static int
1035 megasas_check_reset_gen2(struct megasas_instance *instance,
1036 		struct megasas_register_set __iomem *regs)
1037 {
1038 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1039 		return 1;
1040 
1041 	return 0;
1042 }
1043 
1044 static struct megasas_instance_template megasas_instance_template_gen2 = {
1045 
1046 	.fire_cmd = megasas_fire_cmd_gen2,
1047 	.enable_intr = megasas_enable_intr_gen2,
1048 	.disable_intr = megasas_disable_intr_gen2,
1049 	.clear_intr = megasas_clear_intr_gen2,
1050 	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1051 	.adp_reset = megasas_adp_reset_gen2,
1052 	.check_reset = megasas_check_reset_gen2,
1053 	.service_isr = megasas_isr,
1054 	.tasklet = megasas_complete_cmd_dpc,
1055 	.init_adapter = megasas_init_adapter_mfi,
1056 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1057 	.issue_dcmd = megasas_issue_dcmd,
1058 };
1059 
1060 /**
1061 *	This is the end of set of functions & definitions
1062 *       specific to gen2 (deviceid : 0x78, 0x79) controllers
1063 */
1064 
1065 /*
1066  * Template added for TB (Fusion)
1067  */
1068 extern struct megasas_instance_template megasas_instance_template_fusion;
1069 
1070 /**
1071  * megasas_issue_polled -	Issues a polling command
1072  * @instance:			Adapter soft state
1073  * @cmd:			Command packet to be issued
1074  *
1075  * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1076  */
1077 int
1078 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1079 {
1080 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1081 
1082 	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1083 	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1084 
1085 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1086 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1087 			__func__, __LINE__);
1088 		return DCMD_NOT_FIRED;
1089 	}
1090 
1091 	instance->instancet->issue_dcmd(instance, cmd);
1092 
1093 	return wait_and_poll(instance, cmd, instance->requestorId ?
1094 			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1095 }
1096 
1097 /**
1098  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1099  * @instance:			Adapter soft state
1100  * @cmd:			Command to be issued
1101  * @timeout:			Timeout in seconds
1102  *
1103  * This function waits on an event for the command to be returned from ISR.
1104  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1105  * Used to issue ioctl commands.
1106  */
1107 int
1108 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1109 			  struct megasas_cmd *cmd, int timeout)
1110 {
1111 	int ret = 0;
1112 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1113 
1114 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1115 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1116 			__func__, __LINE__);
1117 		return DCMD_NOT_FIRED;
1118 	}
1119 
1120 	instance->instancet->issue_dcmd(instance, cmd);
1121 
1122 	if (timeout) {
1123 		ret = wait_event_timeout(instance->int_cmd_wait_q,
1124 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1125 		if (!ret) {
1126 			dev_err(&instance->pdev->dev,
1127 				"DCMD(opcode: 0x%x) is timed out, func:%s\n",
1128 				cmd->frame->dcmd.opcode, __func__);
1129 			return DCMD_TIMEOUT;
1130 		}
1131 	} else
1132 		wait_event(instance->int_cmd_wait_q,
1133 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1134 
1135 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1136 		DCMD_SUCCESS : DCMD_FAILED;
1137 }
1138 
1139 /**
1140  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1141  * @instance:				Adapter soft state
1142  * @cmd_to_abort:			Previously issued cmd to be aborted
1143  * @timeout:				Timeout in seconds
1144  *
1145  * MFI firmware can abort previously issued AEN comamnd (automatic event
1146  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1147  * cmd and waits for return status.
1148  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1149  */
1150 static int
1151 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1152 				struct megasas_cmd *cmd_to_abort, int timeout)
1153 {
1154 	struct megasas_cmd *cmd;
1155 	struct megasas_abort_frame *abort_fr;
1156 	int ret = 0;
1157 	u32 opcode;
1158 
1159 	cmd = megasas_get_cmd(instance);
1160 
1161 	if (!cmd)
1162 		return -1;
1163 
1164 	abort_fr = &cmd->frame->abort;
1165 
1166 	/*
1167 	 * Prepare and issue the abort frame
1168 	 */
1169 	abort_fr->cmd = MFI_CMD_ABORT;
1170 	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1171 	abort_fr->flags = cpu_to_le16(0);
1172 	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1173 	abort_fr->abort_mfi_phys_addr_lo =
1174 		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1175 	abort_fr->abort_mfi_phys_addr_hi =
1176 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1177 
1178 	cmd->sync_cmd = 1;
1179 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1180 
1181 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1182 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1183 			__func__, __LINE__);
1184 		return DCMD_NOT_FIRED;
1185 	}
1186 
1187 	instance->instancet->issue_dcmd(instance, cmd);
1188 
1189 	if (timeout) {
1190 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1191 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1192 		if (!ret) {
1193 			opcode = cmd_to_abort->frame->dcmd.opcode;
1194 			dev_err(&instance->pdev->dev,
1195 				"Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
1196 				opcode,  __func__);
1197 			return DCMD_TIMEOUT;
1198 		}
1199 	} else
1200 		wait_event(instance->abort_cmd_wait_q,
1201 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1202 
1203 	cmd->sync_cmd = 0;
1204 
1205 	megasas_return_cmd(instance, cmd);
1206 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1207 		DCMD_SUCCESS : DCMD_FAILED;
1208 }
1209 
1210 /**
1211  * megasas_make_sgl32 -	Prepares 32-bit SGL
1212  * @instance:		Adapter soft state
1213  * @scp:		SCSI command from the mid-layer
1214  * @mfi_sgl:		SGL to be filled in
1215  *
1216  * If successful, this function returns the number of SG elements. Otherwise,
1217  * it returnes -1.
1218  */
1219 static int
1220 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1221 		   union megasas_sgl *mfi_sgl)
1222 {
1223 	int i;
1224 	int sge_count;
1225 	struct scatterlist *os_sgl;
1226 
1227 	sge_count = scsi_dma_map(scp);
1228 	BUG_ON(sge_count < 0);
1229 
1230 	if (sge_count) {
1231 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1232 			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1233 			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1234 		}
1235 	}
1236 	return sge_count;
1237 }
1238 
1239 /**
1240  * megasas_make_sgl64 -	Prepares 64-bit SGL
1241  * @instance:		Adapter soft state
1242  * @scp:		SCSI command from the mid-layer
1243  * @mfi_sgl:		SGL to be filled in
1244  *
1245  * If successful, this function returns the number of SG elements. Otherwise,
1246  * it returnes -1.
1247  */
1248 static int
1249 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1250 		   union megasas_sgl *mfi_sgl)
1251 {
1252 	int i;
1253 	int sge_count;
1254 	struct scatterlist *os_sgl;
1255 
1256 	sge_count = scsi_dma_map(scp);
1257 	BUG_ON(sge_count < 0);
1258 
1259 	if (sge_count) {
1260 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1261 			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1262 			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1263 		}
1264 	}
1265 	return sge_count;
1266 }
1267 
1268 /**
1269  * megasas_make_sgl_skinny - Prepares IEEE SGL
1270  * @instance:           Adapter soft state
1271  * @scp:                SCSI command from the mid-layer
1272  * @mfi_sgl:            SGL to be filled in
1273  *
1274  * If successful, this function returns the number of SG elements. Otherwise,
1275  * it returnes -1.
1276  */
1277 static int
1278 megasas_make_sgl_skinny(struct megasas_instance *instance,
1279 		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1280 {
1281 	int i;
1282 	int sge_count;
1283 	struct scatterlist *os_sgl;
1284 
1285 	sge_count = scsi_dma_map(scp);
1286 
1287 	if (sge_count) {
1288 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1289 			mfi_sgl->sge_skinny[i].length =
1290 				cpu_to_le32(sg_dma_len(os_sgl));
1291 			mfi_sgl->sge_skinny[i].phys_addr =
1292 				cpu_to_le64(sg_dma_address(os_sgl));
1293 			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1294 		}
1295 	}
1296 	return sge_count;
1297 }
1298 
1299  /**
1300  * megasas_get_frame_count - Computes the number of frames
1301  * @frame_type		: type of frame- io or pthru frame
1302  * @sge_count		: number of sg elements
1303  *
1304  * Returns the number of frames required for numnber of sge's (sge_count)
1305  */
1306 
1307 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1308 			u8 sge_count, u8 frame_type)
1309 {
1310 	int num_cnt;
1311 	int sge_bytes;
1312 	u32 sge_sz;
1313 	u32 frame_count = 0;
1314 
1315 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1316 	    sizeof(struct megasas_sge32);
1317 
1318 	if (instance->flag_ieee) {
1319 		sge_sz = sizeof(struct megasas_sge_skinny);
1320 	}
1321 
1322 	/*
1323 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1324 	 * 3 SGEs for 32-bit SGLs for ldio &
1325 	 * 1 SGEs for 64-bit SGLs and
1326 	 * 2 SGEs for 32-bit SGLs for pthru frame
1327 	 */
1328 	if (unlikely(frame_type == PTHRU_FRAME)) {
1329 		if (instance->flag_ieee == 1) {
1330 			num_cnt = sge_count - 1;
1331 		} else if (IS_DMA64)
1332 			num_cnt = sge_count - 1;
1333 		else
1334 			num_cnt = sge_count - 2;
1335 	} else {
1336 		if (instance->flag_ieee == 1) {
1337 			num_cnt = sge_count - 1;
1338 		} else if (IS_DMA64)
1339 			num_cnt = sge_count - 2;
1340 		else
1341 			num_cnt = sge_count - 3;
1342 	}
1343 
1344 	if (num_cnt > 0) {
1345 		sge_bytes = sge_sz * num_cnt;
1346 
1347 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1348 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1349 	}
1350 	/* Main frame */
1351 	frame_count += 1;
1352 
1353 	if (frame_count > 7)
1354 		frame_count = 8;
1355 	return frame_count;
1356 }
1357 
1358 /**
1359  * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1360  * @instance:		Adapter soft state
1361  * @scp:		SCSI command
1362  * @cmd:		Command to be prepared in
1363  *
1364  * This function prepares CDB commands. These are typcially pass-through
1365  * commands to the devices.
1366  */
1367 static int
1368 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1369 		   struct megasas_cmd *cmd)
1370 {
1371 	u32 is_logical;
1372 	u32 device_id;
1373 	u16 flags = 0;
1374 	struct megasas_pthru_frame *pthru;
1375 
1376 	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1377 	device_id = MEGASAS_DEV_INDEX(scp);
1378 	pthru = (struct megasas_pthru_frame *)cmd->frame;
1379 
1380 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1381 		flags = MFI_FRAME_DIR_WRITE;
1382 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1383 		flags = MFI_FRAME_DIR_READ;
1384 	else if (scp->sc_data_direction == DMA_NONE)
1385 		flags = MFI_FRAME_DIR_NONE;
1386 
1387 	if (instance->flag_ieee == 1) {
1388 		flags |= MFI_FRAME_IEEE;
1389 	}
1390 
1391 	/*
1392 	 * Prepare the DCDB frame
1393 	 */
1394 	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1395 	pthru->cmd_status = 0x0;
1396 	pthru->scsi_status = 0x0;
1397 	pthru->target_id = device_id;
1398 	pthru->lun = scp->device->lun;
1399 	pthru->cdb_len = scp->cmd_len;
1400 	pthru->timeout = 0;
1401 	pthru->pad_0 = 0;
1402 	pthru->flags = cpu_to_le16(flags);
1403 	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1404 
1405 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1406 
1407 	/*
1408 	 * If the command is for the tape device, set the
1409 	 * pthru timeout to the os layer timeout value.
1410 	 */
1411 	if (scp->device->type == TYPE_TAPE) {
1412 		if ((scp->request->timeout / HZ) > 0xFFFF)
1413 			pthru->timeout = cpu_to_le16(0xFFFF);
1414 		else
1415 			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1416 	}
1417 
1418 	/*
1419 	 * Construct SGL
1420 	 */
1421 	if (instance->flag_ieee == 1) {
1422 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1423 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1424 						      &pthru->sgl);
1425 	} else if (IS_DMA64) {
1426 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1427 		pthru->sge_count = megasas_make_sgl64(instance, scp,
1428 						      &pthru->sgl);
1429 	} else
1430 		pthru->sge_count = megasas_make_sgl32(instance, scp,
1431 						      &pthru->sgl);
1432 
1433 	if (pthru->sge_count > instance->max_num_sge) {
1434 		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1435 			pthru->sge_count);
1436 		return 0;
1437 	}
1438 
1439 	/*
1440 	 * Sense info specific
1441 	 */
1442 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1443 	pthru->sense_buf_phys_addr_hi =
1444 		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1445 	pthru->sense_buf_phys_addr_lo =
1446 		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1447 
1448 	/*
1449 	 * Compute the total number of frames this command consumes. FW uses
1450 	 * this number to pull sufficient number of frames from host memory.
1451 	 */
1452 	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1453 							PTHRU_FRAME);
1454 
1455 	return cmd->frame_count;
1456 }
1457 
1458 /**
1459  * megasas_build_ldio -	Prepares IOs to logical devices
1460  * @instance:		Adapter soft state
1461  * @scp:		SCSI command
1462  * @cmd:		Command to be prepared
1463  *
1464  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1465  */
1466 static int
1467 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1468 		   struct megasas_cmd *cmd)
1469 {
1470 	u32 device_id;
1471 	u8 sc = scp->cmnd[0];
1472 	u16 flags = 0;
1473 	struct megasas_io_frame *ldio;
1474 
1475 	device_id = MEGASAS_DEV_INDEX(scp);
1476 	ldio = (struct megasas_io_frame *)cmd->frame;
1477 
1478 	if (scp->sc_data_direction == DMA_TO_DEVICE)
1479 		flags = MFI_FRAME_DIR_WRITE;
1480 	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1481 		flags = MFI_FRAME_DIR_READ;
1482 
1483 	if (instance->flag_ieee == 1) {
1484 		flags |= MFI_FRAME_IEEE;
1485 	}
1486 
1487 	/*
1488 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1489 	 */
1490 	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1491 	ldio->cmd_status = 0x0;
1492 	ldio->scsi_status = 0x0;
1493 	ldio->target_id = device_id;
1494 	ldio->timeout = 0;
1495 	ldio->reserved_0 = 0;
1496 	ldio->pad_0 = 0;
1497 	ldio->flags = cpu_to_le16(flags);
1498 	ldio->start_lba_hi = 0;
1499 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1500 
1501 	/*
1502 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1503 	 */
1504 	if (scp->cmd_len == 6) {
1505 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1506 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1507 						 ((u32) scp->cmnd[2] << 8) |
1508 						 (u32) scp->cmnd[3]);
1509 
1510 		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1511 	}
1512 
1513 	/*
1514 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1515 	 */
1516 	else if (scp->cmd_len == 10) {
1517 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1518 					      ((u32) scp->cmnd[7] << 8));
1519 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1520 						 ((u32) scp->cmnd[3] << 16) |
1521 						 ((u32) scp->cmnd[4] << 8) |
1522 						 (u32) scp->cmnd[5]);
1523 	}
1524 
1525 	/*
1526 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1527 	 */
1528 	else if (scp->cmd_len == 12) {
1529 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1530 					      ((u32) scp->cmnd[7] << 16) |
1531 					      ((u32) scp->cmnd[8] << 8) |
1532 					      (u32) scp->cmnd[9]);
1533 
1534 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1535 						 ((u32) scp->cmnd[3] << 16) |
1536 						 ((u32) scp->cmnd[4] << 8) |
1537 						 (u32) scp->cmnd[5]);
1538 	}
1539 
1540 	/*
1541 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1542 	 */
1543 	else if (scp->cmd_len == 16) {
1544 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1545 					      ((u32) scp->cmnd[11] << 16) |
1546 					      ((u32) scp->cmnd[12] << 8) |
1547 					      (u32) scp->cmnd[13]);
1548 
1549 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1550 						 ((u32) scp->cmnd[7] << 16) |
1551 						 ((u32) scp->cmnd[8] << 8) |
1552 						 (u32) scp->cmnd[9]);
1553 
1554 		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1555 						 ((u32) scp->cmnd[3] << 16) |
1556 						 ((u32) scp->cmnd[4] << 8) |
1557 						 (u32) scp->cmnd[5]);
1558 
1559 	}
1560 
1561 	/*
1562 	 * Construct SGL
1563 	 */
1564 	if (instance->flag_ieee) {
1565 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1566 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1567 					      &ldio->sgl);
1568 	} else if (IS_DMA64) {
1569 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1570 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1571 	} else
1572 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1573 
1574 	if (ldio->sge_count > instance->max_num_sge) {
1575 		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1576 			ldio->sge_count);
1577 		return 0;
1578 	}
1579 
1580 	/*
1581 	 * Sense info specific
1582 	 */
1583 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1584 	ldio->sense_buf_phys_addr_hi = 0;
1585 	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1586 
1587 	/*
1588 	 * Compute the total number of frames this command consumes. FW uses
1589 	 * this number to pull sufficient number of frames from host memory.
1590 	 */
1591 	cmd->frame_count = megasas_get_frame_count(instance,
1592 			ldio->sge_count, IO_FRAME);
1593 
1594 	return cmd->frame_count;
1595 }
1596 
1597 /**
1598  * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1599  *				and whether it's RW or non RW
1600  * @scmd:			SCSI command
1601  *
1602  */
1603 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1604 {
1605 	int ret;
1606 
1607 	switch (cmd->cmnd[0]) {
1608 	case READ_10:
1609 	case WRITE_10:
1610 	case READ_12:
1611 	case WRITE_12:
1612 	case READ_6:
1613 	case WRITE_6:
1614 	case READ_16:
1615 	case WRITE_16:
1616 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1617 			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1618 		break;
1619 	default:
1620 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1621 			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1622 	}
1623 	return ret;
1624 }
1625 
1626  /**
1627  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1628  *					in FW
1629  * @instance:				Adapter soft state
1630  */
1631 static inline void
1632 megasas_dump_pending_frames(struct megasas_instance *instance)
1633 {
1634 	struct megasas_cmd *cmd;
1635 	int i,n;
1636 	union megasas_sgl *mfi_sgl;
1637 	struct megasas_io_frame *ldio;
1638 	struct megasas_pthru_frame *pthru;
1639 	u32 sgcount;
1640 	u16 max_cmd = instance->max_fw_cmds;
1641 
1642 	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1643 	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1644 	if (IS_DMA64)
1645 		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1646 	else
1647 		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1648 
1649 	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1650 	for (i = 0; i < max_cmd; i++) {
1651 		cmd = instance->cmd_list[i];
1652 		if (!cmd->scmd)
1653 			continue;
1654 		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1655 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1656 			ldio = (struct megasas_io_frame *)cmd->frame;
1657 			mfi_sgl = &ldio->sgl;
1658 			sgcount = ldio->sge_count;
1659 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1660 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1661 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1662 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1663 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1664 		} else {
1665 			pthru = (struct megasas_pthru_frame *) cmd->frame;
1666 			mfi_sgl = &pthru->sgl;
1667 			sgcount = pthru->sge_count;
1668 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1669 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1670 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1671 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1672 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1673 		}
1674 		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1675 			for (n = 0; n < sgcount; n++) {
1676 				if (IS_DMA64)
1677 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1678 						le32_to_cpu(mfi_sgl->sge64[n].length),
1679 						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1680 				else
1681 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1682 						le32_to_cpu(mfi_sgl->sge32[n].length),
1683 						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1684 			}
1685 		}
1686 	} /*for max_cmd*/
1687 	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1688 	for (i = 0; i < max_cmd; i++) {
1689 
1690 		cmd = instance->cmd_list[i];
1691 
1692 		if (cmd->sync_cmd == 1)
1693 			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1694 	}
1695 	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1696 }
1697 
1698 u32
1699 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1700 			    struct scsi_cmnd *scmd)
1701 {
1702 	struct megasas_cmd *cmd;
1703 	u32 frame_count;
1704 
1705 	cmd = megasas_get_cmd(instance);
1706 	if (!cmd)
1707 		return SCSI_MLQUEUE_HOST_BUSY;
1708 
1709 	/*
1710 	 * Logical drive command
1711 	 */
1712 	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1713 		frame_count = megasas_build_ldio(instance, scmd, cmd);
1714 	else
1715 		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1716 
1717 	if (!frame_count)
1718 		goto out_return_cmd;
1719 
1720 	cmd->scmd = scmd;
1721 	scmd->SCp.ptr = (char *)cmd;
1722 
1723 	/*
1724 	 * Issue the command to the FW
1725 	 */
1726 	atomic_inc(&instance->fw_outstanding);
1727 
1728 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1729 				cmd->frame_count-1, instance->reg_set);
1730 
1731 	return 0;
1732 out_return_cmd:
1733 	megasas_return_cmd(instance, cmd);
1734 	return SCSI_MLQUEUE_HOST_BUSY;
1735 }
1736 
1737 
1738 /**
1739  * megasas_queue_command -	Queue entry point
1740  * @scmd:			SCSI command to be queued
1741  * @done:			Callback entry point
1742  */
1743 static int
1744 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1745 {
1746 	struct megasas_instance *instance;
1747 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1748 
1749 	instance = (struct megasas_instance *)
1750 	    scmd->device->host->hostdata;
1751 
1752 	if (instance->unload == 1) {
1753 		scmd->result = DID_NO_CONNECT << 16;
1754 		scmd->scsi_done(scmd);
1755 		return 0;
1756 	}
1757 
1758 	if (instance->issuepend_done == 0)
1759 		return SCSI_MLQUEUE_HOST_BUSY;
1760 
1761 
1762 	/* Check for an mpio path and adjust behavior */
1763 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1764 		if (megasas_check_mpio_paths(instance, scmd) ==
1765 		    (DID_REQUEUE << 16)) {
1766 			return SCSI_MLQUEUE_HOST_BUSY;
1767 		} else {
1768 			scmd->result = DID_NO_CONNECT << 16;
1769 			scmd->scsi_done(scmd);
1770 			return 0;
1771 		}
1772 	}
1773 
1774 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1775 		scmd->result = DID_NO_CONNECT << 16;
1776 		scmd->scsi_done(scmd);
1777 		return 0;
1778 	}
1779 
1780 	mr_device_priv_data = scmd->device->hostdata;
1781 	if (!mr_device_priv_data) {
1782 		scmd->result = DID_NO_CONNECT << 16;
1783 		scmd->scsi_done(scmd);
1784 		return 0;
1785 	}
1786 
1787 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1788 		return SCSI_MLQUEUE_HOST_BUSY;
1789 
1790 	if (mr_device_priv_data->tm_busy)
1791 		return SCSI_MLQUEUE_DEVICE_BUSY;
1792 
1793 
1794 	scmd->result = 0;
1795 
1796 	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1797 	    (scmd->device->id >= instance->fw_supported_vd_count ||
1798 		scmd->device->lun)) {
1799 		scmd->result = DID_BAD_TARGET << 16;
1800 		goto out_done;
1801 	}
1802 
1803 	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1804 	    MEGASAS_IS_LOGICAL(scmd->device) &&
1805 	    (!instance->fw_sync_cache_support)) {
1806 		scmd->result = DID_OK << 16;
1807 		goto out_done;
1808 	}
1809 
1810 	return instance->instancet->build_and_issue_cmd(instance, scmd);
1811 
1812  out_done:
1813 	scmd->scsi_done(scmd);
1814 	return 0;
1815 }
1816 
1817 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1818 {
1819 	int i;
1820 
1821 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1822 
1823 		if ((megasas_mgmt_info.instance[i]) &&
1824 		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1825 			return megasas_mgmt_info.instance[i];
1826 	}
1827 
1828 	return NULL;
1829 }
1830 
1831 /*
1832 * megasas_set_dynamic_target_properties -
1833 * Device property set by driver may not be static and it is required to be
1834 * updated after OCR
1835 *
1836 * set tm_capable.
1837 * set dma alignment (only for eedp protection enable vd).
1838 *
1839 * @sdev: OS provided scsi device
1840 *
1841 * Returns void
1842 */
1843 void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1844 					   bool is_target_prop)
1845 {
1846 	u16 pd_index = 0, ld;
1847 	u32 device_id;
1848 	struct megasas_instance *instance;
1849 	struct fusion_context *fusion;
1850 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1851 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1852 	struct MR_LD_RAID *raid;
1853 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1854 
1855 	instance = megasas_lookup_instance(sdev->host->host_no);
1856 	fusion = instance->ctrl_context;
1857 	mr_device_priv_data = sdev->hostdata;
1858 
1859 	if (!fusion || !mr_device_priv_data)
1860 		return;
1861 
1862 	if (MEGASAS_IS_LOGICAL(sdev)) {
1863 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1864 					+ sdev->id;
1865 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1866 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1867 		if (ld >= instance->fw_supported_vd_count)
1868 			return;
1869 		raid = MR_LdRaidGet(ld, local_map_ptr);
1870 
1871 		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1872 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1873 
1874 		mr_device_priv_data->is_tm_capable =
1875 			raid->capability.tmCapable;
1876 	} else if (instance->use_seqnum_jbod_fp) {
1877 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1878 			sdev->id;
1879 		pd_sync = (void *)fusion->pd_seq_sync
1880 				[(instance->pd_seq_map_id - 1) & 1];
1881 		mr_device_priv_data->is_tm_capable =
1882 			pd_sync->seq[pd_index].capability.tmCapable;
1883 	}
1884 
1885 	if (is_target_prop && instance->tgt_prop->reset_tmo) {
1886 		/*
1887 		 * If FW provides a target reset timeout value, driver will use
1888 		 * it. If not set, fallback to default values.
1889 		 */
1890 		mr_device_priv_data->target_reset_tmo =
1891 			min_t(u8, instance->max_reset_tmo,
1892 			      instance->tgt_prop->reset_tmo);
1893 		mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1894 	} else {
1895 		mr_device_priv_data->target_reset_tmo =
1896 						MEGASAS_DEFAULT_TM_TIMEOUT;
1897 		mr_device_priv_data->task_abort_tmo =
1898 						MEGASAS_DEFAULT_TM_TIMEOUT;
1899 	}
1900 }
1901 
1902 /*
1903  * megasas_set_nvme_device_properties -
1904  * set nomerges=2
1905  * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1906  * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1907  *
1908  * MR firmware provides value in KB. Caller of this function converts
1909  * kb into bytes.
1910  *
1911  * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1912  * MR firmware provides value 128 as (32 * 4K) = 128K.
1913  *
1914  * @sdev:				scsi device
1915  * @max_io_size:				maximum io transfer size
1916  *
1917  */
1918 static inline void
1919 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1920 {
1921 	struct megasas_instance *instance;
1922 	u32 mr_nvme_pg_size;
1923 
1924 	instance = (struct megasas_instance *)sdev->host->hostdata;
1925 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1926 				MR_DEFAULT_NVME_PAGE_SIZE);
1927 
1928 	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1929 
1930 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1931 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1932 }
1933 
1934 
1935 /*
1936  * megasas_set_static_target_properties -
1937  * Device property set by driver are static and it is not required to be
1938  * updated after OCR.
1939  *
1940  * set io timeout
1941  * set device queue depth
1942  * set nvme device properties. see - megasas_set_nvme_device_properties
1943  *
1944  * @sdev:				scsi device
1945  * @is_target_prop			true, if fw provided target properties.
1946  */
1947 static void megasas_set_static_target_properties(struct scsi_device *sdev,
1948 						 bool is_target_prop)
1949 {
1950 	u8 interface_type;
1951 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1952 	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1953 	u32 tgt_device_qd;
1954 	struct megasas_instance *instance;
1955 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1956 
1957 	instance = megasas_lookup_instance(sdev->host->host_no);
1958 	mr_device_priv_data = sdev->hostdata;
1959 	interface_type  = mr_device_priv_data->interface_type;
1960 
1961 	/*
1962 	 * The RAID firmware may require extended timeouts.
1963 	 */
1964 	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1965 
1966 	switch (interface_type) {
1967 	case SAS_PD:
1968 		device_qd = MEGASAS_SAS_QD;
1969 		break;
1970 	case SATA_PD:
1971 		device_qd = MEGASAS_SATA_QD;
1972 		break;
1973 	case NVME_PD:
1974 		device_qd = MEGASAS_NVME_QD;
1975 		break;
1976 	}
1977 
1978 	if (is_target_prop) {
1979 		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1980 		if (tgt_device_qd &&
1981 		    (tgt_device_qd <= instance->host->can_queue))
1982 			device_qd = tgt_device_qd;
1983 
1984 		/* max_io_size_kb will be set to non zero for
1985 		 * nvme based vd and syspd.
1986 		 */
1987 		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1988 	}
1989 
1990 	if (instance->nvme_page_size && max_io_size_kb)
1991 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
1992 
1993 	scsi_change_queue_depth(sdev, device_qd);
1994 
1995 }
1996 
1997 
1998 static int megasas_slave_configure(struct scsi_device *sdev)
1999 {
2000 	u16 pd_index = 0;
2001 	struct megasas_instance *instance;
2002 	int ret_target_prop = DCMD_FAILED;
2003 	bool is_target_prop = false;
2004 
2005 	instance = megasas_lookup_instance(sdev->host->host_no);
2006 	if (instance->pd_list_not_supported) {
2007 		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
2008 			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2009 				sdev->id;
2010 			if (instance->pd_list[pd_index].driveState !=
2011 				MR_PD_STATE_SYSTEM)
2012 				return -ENXIO;
2013 		}
2014 	}
2015 
2016 	mutex_lock(&instance->reset_mutex);
2017 	/* Send DCMD to Firmware and cache the information */
2018 	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
2019 		megasas_get_pd_info(instance, sdev);
2020 
2021 	/* Some ventura firmware may not have instance->nvme_page_size set.
2022 	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
2023 	 */
2024 	if ((instance->tgt_prop) && (instance->nvme_page_size))
2025 		ret_target_prop = megasas_get_target_prop(instance, sdev);
2026 
2027 	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2028 	megasas_set_static_target_properties(sdev, is_target_prop);
2029 
2030 	/* This sdev property may change post OCR */
2031 	megasas_set_dynamic_target_properties(sdev, is_target_prop);
2032 
2033 	mutex_unlock(&instance->reset_mutex);
2034 
2035 	return 0;
2036 }
2037 
2038 static int megasas_slave_alloc(struct scsi_device *sdev)
2039 {
2040 	u16 pd_index = 0;
2041 	struct megasas_instance *instance ;
2042 	struct MR_PRIV_DEVICE *mr_device_priv_data;
2043 
2044 	instance = megasas_lookup_instance(sdev->host->host_no);
2045 	if (!MEGASAS_IS_LOGICAL(sdev)) {
2046 		/*
2047 		 * Open the OS scan to the SYSTEM PD
2048 		 */
2049 		pd_index =
2050 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2051 			sdev->id;
2052 		if ((instance->pd_list_not_supported ||
2053 			instance->pd_list[pd_index].driveState ==
2054 			MR_PD_STATE_SYSTEM)) {
2055 			goto scan_target;
2056 		}
2057 		return -ENXIO;
2058 	}
2059 
2060 scan_target:
2061 	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2062 					GFP_KERNEL);
2063 	if (!mr_device_priv_data)
2064 		return -ENOMEM;
2065 	sdev->hostdata = mr_device_priv_data;
2066 
2067 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2068 		   instance->r1_ldio_hint_default);
2069 	return 0;
2070 }
2071 
2072 static void megasas_slave_destroy(struct scsi_device *sdev)
2073 {
2074 	kfree(sdev->hostdata);
2075 	sdev->hostdata = NULL;
2076 }
2077 
2078 /*
2079 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2080 *                                       kill adapter
2081 * @instance:				Adapter soft state
2082 *
2083 */
2084 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2085 {
2086 	int i;
2087 	struct megasas_cmd *cmd_mfi;
2088 	struct megasas_cmd_fusion *cmd_fusion;
2089 	struct fusion_context *fusion = instance->ctrl_context;
2090 
2091 	/* Find all outstanding ioctls */
2092 	if (fusion) {
2093 		for (i = 0; i < instance->max_fw_cmds; i++) {
2094 			cmd_fusion = fusion->cmd_list[i];
2095 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2096 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2097 				if (cmd_mfi->sync_cmd &&
2098 				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2099 					cmd_mfi->frame->hdr.cmd_status =
2100 							MFI_STAT_WRONG_STATE;
2101 					megasas_complete_cmd(instance,
2102 							     cmd_mfi, DID_OK);
2103 				}
2104 			}
2105 		}
2106 	} else {
2107 		for (i = 0; i < instance->max_fw_cmds; i++) {
2108 			cmd_mfi = instance->cmd_list[i];
2109 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2110 				MFI_CMD_ABORT)
2111 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2112 		}
2113 	}
2114 }
2115 
2116 
2117 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2118 {
2119 	/* Set critical error to block I/O & ioctls in case caller didn't */
2120 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2121 	/* Wait 1 second to ensure IO or ioctls in build have posted */
2122 	msleep(1000);
2123 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2124 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2125 		(instance->adapter_type != MFI_SERIES)) {
2126 		if (!instance->requestorId) {
2127 			writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2128 			/* Flush */
2129 			readl(&instance->reg_set->doorbell);
2130 		}
2131 		if (instance->requestorId && instance->peerIsPresent)
2132 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2133 	} else {
2134 		writel(MFI_STOP_ADP,
2135 			&instance->reg_set->inbound_doorbell);
2136 	}
2137 	/* Complete outstanding ioctls when adapter is killed */
2138 	megasas_complete_outstanding_ioctls(instance);
2139 }
2140 
2141  /**
2142   * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2143   *					restored to max value
2144   * @instance:			Adapter soft state
2145   *
2146   */
2147 void
2148 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2149 {
2150 	unsigned long flags;
2151 
2152 	if (instance->flag & MEGASAS_FW_BUSY
2153 	    && time_after(jiffies, instance->last_time + 5 * HZ)
2154 	    && atomic_read(&instance->fw_outstanding) <
2155 	    instance->throttlequeuedepth + 1) {
2156 
2157 		spin_lock_irqsave(instance->host->host_lock, flags);
2158 		instance->flag &= ~MEGASAS_FW_BUSY;
2159 
2160 		instance->host->can_queue = instance->cur_can_queue;
2161 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2162 	}
2163 }
2164 
2165 /**
2166  * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2167  * @instance_addr:			Address of adapter soft state
2168  *
2169  * Tasklet to complete cmds
2170  */
2171 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2172 {
2173 	u32 producer;
2174 	u32 consumer;
2175 	u32 context;
2176 	struct megasas_cmd *cmd;
2177 	struct megasas_instance *instance =
2178 				(struct megasas_instance *)instance_addr;
2179 	unsigned long flags;
2180 
2181 	/* If we have already declared adapter dead, donot complete cmds */
2182 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2183 		return;
2184 
2185 	spin_lock_irqsave(&instance->completion_lock, flags);
2186 
2187 	producer = le32_to_cpu(*instance->producer);
2188 	consumer = le32_to_cpu(*instance->consumer);
2189 
2190 	while (consumer != producer) {
2191 		context = le32_to_cpu(instance->reply_queue[consumer]);
2192 		if (context >= instance->max_fw_cmds) {
2193 			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2194 				context);
2195 			BUG();
2196 		}
2197 
2198 		cmd = instance->cmd_list[context];
2199 
2200 		megasas_complete_cmd(instance, cmd, DID_OK);
2201 
2202 		consumer++;
2203 		if (consumer == (instance->max_fw_cmds + 1)) {
2204 			consumer = 0;
2205 		}
2206 	}
2207 
2208 	*instance->consumer = cpu_to_le32(producer);
2209 
2210 	spin_unlock_irqrestore(&instance->completion_lock, flags);
2211 
2212 	/*
2213 	 * Check if we can restore can_queue
2214 	 */
2215 	megasas_check_and_restore_queue_depth(instance);
2216 }
2217 
2218 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2219 
2220 /**
2221  * megasas_start_timer - Initializes sriov heartbeat timer object
2222  * @instance:		Adapter soft state
2223  *
2224  */
2225 void megasas_start_timer(struct megasas_instance *instance)
2226 {
2227 	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2228 
2229 	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2230 	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2231 	add_timer(timer);
2232 }
2233 
2234 static void
2235 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2236 
2237 static void
2238 process_fw_state_change_wq(struct work_struct *work);
2239 
2240 void megasas_do_ocr(struct megasas_instance *instance)
2241 {
2242 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2243 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2244 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2245 		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2246 	}
2247 	instance->instancet->disable_intr(instance);
2248 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2249 	instance->issuepend_done = 0;
2250 
2251 	atomic_set(&instance->fw_outstanding, 0);
2252 	megasas_internal_reset_defer_cmds(instance);
2253 	process_fw_state_change_wq(&instance->work_init);
2254 }
2255 
2256 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2257 					    int initial)
2258 {
2259 	struct megasas_cmd *cmd;
2260 	struct megasas_dcmd_frame *dcmd;
2261 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2262 	dma_addr_t new_affiliation_111_h;
2263 	int ld, retval = 0;
2264 	u8 thisVf;
2265 
2266 	cmd = megasas_get_cmd(instance);
2267 
2268 	if (!cmd) {
2269 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2270 		       "Failed to get cmd for scsi%d\n",
2271 			instance->host->host_no);
2272 		return -ENOMEM;
2273 	}
2274 
2275 	dcmd = &cmd->frame->dcmd;
2276 
2277 	if (!instance->vf_affiliation_111) {
2278 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2279 		       "affiliation for scsi%d\n", instance->host->host_no);
2280 		megasas_return_cmd(instance, cmd);
2281 		return -ENOMEM;
2282 	}
2283 
2284 	if (initial)
2285 			memset(instance->vf_affiliation_111, 0,
2286 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2287 	else {
2288 		new_affiliation_111 =
2289 			dma_alloc_coherent(&instance->pdev->dev,
2290 					   sizeof(struct MR_LD_VF_AFFILIATION_111),
2291 					   &new_affiliation_111_h, GFP_KERNEL);
2292 		if (!new_affiliation_111) {
2293 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2294 			       "memory for new affiliation for scsi%d\n",
2295 			       instance->host->host_no);
2296 			megasas_return_cmd(instance, cmd);
2297 			return -ENOMEM;
2298 		}
2299 	}
2300 
2301 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2302 
2303 	dcmd->cmd = MFI_CMD_DCMD;
2304 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2305 	dcmd->sge_count = 1;
2306 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2307 	dcmd->timeout = 0;
2308 	dcmd->pad_0 = 0;
2309 	dcmd->data_xfer_len =
2310 		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2311 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2312 
2313 	if (initial)
2314 		dcmd->sgl.sge32[0].phys_addr =
2315 			cpu_to_le32(instance->vf_affiliation_111_h);
2316 	else
2317 		dcmd->sgl.sge32[0].phys_addr =
2318 			cpu_to_le32(new_affiliation_111_h);
2319 
2320 	dcmd->sgl.sge32[0].length = cpu_to_le32(
2321 		sizeof(struct MR_LD_VF_AFFILIATION_111));
2322 
2323 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2324 	       "scsi%d\n", instance->host->host_no);
2325 
2326 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2327 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2328 		       " failed with status 0x%x for scsi%d\n",
2329 		       dcmd->cmd_status, instance->host->host_no);
2330 		retval = 1; /* Do a scan if we couldn't get affiliation */
2331 		goto out;
2332 	}
2333 
2334 	if (!initial) {
2335 		thisVf = new_affiliation_111->thisVf;
2336 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2337 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2338 			    new_affiliation_111->map[ld].policy[thisVf]) {
2339 				dev_warn(&instance->pdev->dev, "SR-IOV: "
2340 				       "Got new LD/VF affiliation for scsi%d\n",
2341 				       instance->host->host_no);
2342 				memcpy(instance->vf_affiliation_111,
2343 				       new_affiliation_111,
2344 				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2345 				retval = 1;
2346 				goto out;
2347 			}
2348 	}
2349 out:
2350 	if (new_affiliation_111) {
2351 		dma_free_coherent(&instance->pdev->dev,
2352 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2353 				    new_affiliation_111,
2354 				    new_affiliation_111_h);
2355 	}
2356 
2357 	megasas_return_cmd(instance, cmd);
2358 
2359 	return retval;
2360 }
2361 
2362 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2363 					    int initial)
2364 {
2365 	struct megasas_cmd *cmd;
2366 	struct megasas_dcmd_frame *dcmd;
2367 	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2368 	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2369 	dma_addr_t new_affiliation_h;
2370 	int i, j, retval = 0, found = 0, doscan = 0;
2371 	u8 thisVf;
2372 
2373 	cmd = megasas_get_cmd(instance);
2374 
2375 	if (!cmd) {
2376 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2377 		       "Failed to get cmd for scsi%d\n",
2378 		       instance->host->host_no);
2379 		return -ENOMEM;
2380 	}
2381 
2382 	dcmd = &cmd->frame->dcmd;
2383 
2384 	if (!instance->vf_affiliation) {
2385 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2386 		       "affiliation for scsi%d\n", instance->host->host_no);
2387 		megasas_return_cmd(instance, cmd);
2388 		return -ENOMEM;
2389 	}
2390 
2391 	if (initial)
2392 		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2393 		       sizeof(struct MR_LD_VF_AFFILIATION));
2394 	else {
2395 		new_affiliation =
2396 			dma_alloc_coherent(&instance->pdev->dev,
2397 					   (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2398 					   &new_affiliation_h, GFP_KERNEL);
2399 		if (!new_affiliation) {
2400 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2401 			       "memory for new affiliation for scsi%d\n",
2402 			       instance->host->host_no);
2403 			megasas_return_cmd(instance, cmd);
2404 			return -ENOMEM;
2405 		}
2406 	}
2407 
2408 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2409 
2410 	dcmd->cmd = MFI_CMD_DCMD;
2411 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2412 	dcmd->sge_count = 1;
2413 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2414 	dcmd->timeout = 0;
2415 	dcmd->pad_0 = 0;
2416 	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2417 		sizeof(struct MR_LD_VF_AFFILIATION));
2418 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2419 
2420 	if (initial)
2421 		dcmd->sgl.sge32[0].phys_addr =
2422 			cpu_to_le32(instance->vf_affiliation_h);
2423 	else
2424 		dcmd->sgl.sge32[0].phys_addr =
2425 			cpu_to_le32(new_affiliation_h);
2426 
2427 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2428 		sizeof(struct MR_LD_VF_AFFILIATION));
2429 
2430 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2431 	       "scsi%d\n", instance->host->host_no);
2432 
2433 
2434 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2435 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2436 		       " failed with status 0x%x for scsi%d\n",
2437 		       dcmd->cmd_status, instance->host->host_no);
2438 		retval = 1; /* Do a scan if we couldn't get affiliation */
2439 		goto out;
2440 	}
2441 
2442 	if (!initial) {
2443 		if (!new_affiliation->ldCount) {
2444 			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2445 			       "affiliation for passive path for scsi%d\n",
2446 			       instance->host->host_no);
2447 			retval = 1;
2448 			goto out;
2449 		}
2450 		newmap = new_affiliation->map;
2451 		savedmap = instance->vf_affiliation->map;
2452 		thisVf = new_affiliation->thisVf;
2453 		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2454 			found = 0;
2455 			for (j = 0; j < instance->vf_affiliation->ldCount;
2456 			     j++) {
2457 				if (newmap->ref.targetId ==
2458 				    savedmap->ref.targetId) {
2459 					found = 1;
2460 					if (newmap->policy[thisVf] !=
2461 					    savedmap->policy[thisVf]) {
2462 						doscan = 1;
2463 						goto out;
2464 					}
2465 				}
2466 				savedmap = (struct MR_LD_VF_MAP *)
2467 					((unsigned char *)savedmap +
2468 					 savedmap->size);
2469 			}
2470 			if (!found && newmap->policy[thisVf] !=
2471 			    MR_LD_ACCESS_HIDDEN) {
2472 				doscan = 1;
2473 				goto out;
2474 			}
2475 			newmap = (struct MR_LD_VF_MAP *)
2476 				((unsigned char *)newmap + newmap->size);
2477 		}
2478 
2479 		newmap = new_affiliation->map;
2480 		savedmap = instance->vf_affiliation->map;
2481 
2482 		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2483 			found = 0;
2484 			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2485 				if (savedmap->ref.targetId ==
2486 				    newmap->ref.targetId) {
2487 					found = 1;
2488 					if (savedmap->policy[thisVf] !=
2489 					    newmap->policy[thisVf]) {
2490 						doscan = 1;
2491 						goto out;
2492 					}
2493 				}
2494 				newmap = (struct MR_LD_VF_MAP *)
2495 					((unsigned char *)newmap +
2496 					 newmap->size);
2497 			}
2498 			if (!found && savedmap->policy[thisVf] !=
2499 			    MR_LD_ACCESS_HIDDEN) {
2500 				doscan = 1;
2501 				goto out;
2502 			}
2503 			savedmap = (struct MR_LD_VF_MAP *)
2504 				((unsigned char *)savedmap +
2505 				 savedmap->size);
2506 		}
2507 	}
2508 out:
2509 	if (doscan) {
2510 		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2511 		       "affiliation for scsi%d\n", instance->host->host_no);
2512 		memcpy(instance->vf_affiliation, new_affiliation,
2513 		       new_affiliation->size);
2514 		retval = 1;
2515 	}
2516 
2517 	if (new_affiliation)
2518 		dma_free_coherent(&instance->pdev->dev,
2519 				    (MAX_LOGICAL_DRIVES + 1) *
2520 				    sizeof(struct MR_LD_VF_AFFILIATION),
2521 				    new_affiliation, new_affiliation_h);
2522 	megasas_return_cmd(instance, cmd);
2523 
2524 	return retval;
2525 }
2526 
2527 /* This function will get the current SR-IOV LD/VF affiliation */
2528 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2529 	int initial)
2530 {
2531 	int retval;
2532 
2533 	if (instance->PlasmaFW111)
2534 		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2535 	else
2536 		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2537 	return retval;
2538 }
2539 
2540 /* This function will tell FW to start the SR-IOV heartbeat */
2541 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2542 					 int initial)
2543 {
2544 	struct megasas_cmd *cmd;
2545 	struct megasas_dcmd_frame *dcmd;
2546 	int retval = 0;
2547 
2548 	cmd = megasas_get_cmd(instance);
2549 
2550 	if (!cmd) {
2551 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2552 		       "Failed to get cmd for scsi%d\n",
2553 		       instance->host->host_no);
2554 		return -ENOMEM;
2555 	}
2556 
2557 	dcmd = &cmd->frame->dcmd;
2558 
2559 	if (initial) {
2560 		instance->hb_host_mem =
2561 			dma_alloc_coherent(&instance->pdev->dev,
2562 					   sizeof(struct MR_CTRL_HB_HOST_MEM),
2563 					   &instance->hb_host_mem_h,
2564 					   GFP_KERNEL);
2565 		if (!instance->hb_host_mem) {
2566 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2567 			       " memory for heartbeat host memory for scsi%d\n",
2568 			       instance->host->host_no);
2569 			retval = -ENOMEM;
2570 			goto out;
2571 		}
2572 	}
2573 
2574 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2575 
2576 	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2577 	dcmd->cmd = MFI_CMD_DCMD;
2578 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2579 	dcmd->sge_count = 1;
2580 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2581 	dcmd->timeout = 0;
2582 	dcmd->pad_0 = 0;
2583 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2584 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2585 
2586 	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2587 				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2588 
2589 	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2590 	       instance->host->host_no);
2591 
2592 	if ((instance->adapter_type != MFI_SERIES) &&
2593 	    !instance->mask_interrupts)
2594 		retval = megasas_issue_blocked_cmd(instance, cmd,
2595 			MEGASAS_ROUTINE_WAIT_TIME_VF);
2596 	else
2597 		retval = megasas_issue_polled(instance, cmd);
2598 
2599 	if (retval) {
2600 		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2601 			"_MEM_ALLOC DCMD %s for scsi%d\n",
2602 			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2603 			"timed out" : "failed", instance->host->host_no);
2604 		retval = 1;
2605 	}
2606 
2607 out:
2608 	megasas_return_cmd(instance, cmd);
2609 
2610 	return retval;
2611 }
2612 
2613 /* Handler for SR-IOV heartbeat */
2614 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2615 {
2616 	struct megasas_instance *instance =
2617 		from_timer(instance, t, sriov_heartbeat_timer);
2618 
2619 	if (instance->hb_host_mem->HB.fwCounter !=
2620 	    instance->hb_host_mem->HB.driverCounter) {
2621 		instance->hb_host_mem->HB.driverCounter =
2622 			instance->hb_host_mem->HB.fwCounter;
2623 		mod_timer(&instance->sriov_heartbeat_timer,
2624 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2625 	} else {
2626 		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2627 		       "completed for scsi%d\n", instance->host->host_no);
2628 		schedule_work(&instance->work_init);
2629 	}
2630 }
2631 
2632 /**
2633  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2634  * @instance:				Adapter soft state
2635  *
2636  * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2637  * complete all its outstanding commands. Returns error if one or more IOs
2638  * are pending after this time period. It also marks the controller dead.
2639  */
2640 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2641 {
2642 	int i, sl, outstanding;
2643 	u32 reset_index;
2644 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2645 	unsigned long flags;
2646 	struct list_head clist_local;
2647 	struct megasas_cmd *reset_cmd;
2648 	u32 fw_state;
2649 
2650 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2651 		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2652 		__func__, __LINE__);
2653 		return FAILED;
2654 	}
2655 
2656 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2657 
2658 		INIT_LIST_HEAD(&clist_local);
2659 		spin_lock_irqsave(&instance->hba_lock, flags);
2660 		list_splice_init(&instance->internal_reset_pending_q,
2661 				&clist_local);
2662 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2663 
2664 		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2665 		for (i = 0; i < wait_time; i++) {
2666 			msleep(1000);
2667 			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2668 				break;
2669 		}
2670 
2671 		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2672 			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2673 			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2674 			return FAILED;
2675 		}
2676 
2677 		reset_index = 0;
2678 		while (!list_empty(&clist_local)) {
2679 			reset_cmd = list_entry((&clist_local)->next,
2680 						struct megasas_cmd, list);
2681 			list_del_init(&reset_cmd->list);
2682 			if (reset_cmd->scmd) {
2683 				reset_cmd->scmd->result = DID_REQUEUE << 16;
2684 				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2685 					reset_index, reset_cmd,
2686 					reset_cmd->scmd->cmnd[0]);
2687 
2688 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2689 				megasas_return_cmd(instance, reset_cmd);
2690 			} else if (reset_cmd->sync_cmd) {
2691 				dev_notice(&instance->pdev->dev, "%p synch cmds"
2692 						"reset queue\n",
2693 						reset_cmd);
2694 
2695 				reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2696 				instance->instancet->fire_cmd(instance,
2697 						reset_cmd->frame_phys_addr,
2698 						0, instance->reg_set);
2699 			} else {
2700 				dev_notice(&instance->pdev->dev, "%p unexpected"
2701 					"cmds lst\n",
2702 					reset_cmd);
2703 			}
2704 			reset_index++;
2705 		}
2706 
2707 		return SUCCESS;
2708 	}
2709 
2710 	for (i = 0; i < resetwaittime; i++) {
2711 		outstanding = atomic_read(&instance->fw_outstanding);
2712 
2713 		if (!outstanding)
2714 			break;
2715 
2716 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2717 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2718 			       "commands to complete\n",i,outstanding);
2719 			/*
2720 			 * Call cmd completion routine. Cmd to be
2721 			 * be completed directly without depending on isr.
2722 			 */
2723 			megasas_complete_cmd_dpc((unsigned long)instance);
2724 		}
2725 
2726 		msleep(1000);
2727 	}
2728 
2729 	i = 0;
2730 	outstanding = atomic_read(&instance->fw_outstanding);
2731 	fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2732 
2733 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2734 		goto no_outstanding;
2735 
2736 	if (instance->disableOnlineCtrlReset)
2737 		goto kill_hba_and_failed;
2738 	do {
2739 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2740 			dev_info(&instance->pdev->dev,
2741 				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2742 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2743 			if (i == 3)
2744 				goto kill_hba_and_failed;
2745 			megasas_do_ocr(instance);
2746 
2747 			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2748 				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2749 				__func__, __LINE__);
2750 				return FAILED;
2751 			}
2752 			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2753 				__func__, __LINE__);
2754 
2755 			for (sl = 0; sl < 10; sl++)
2756 				msleep(500);
2757 
2758 			outstanding = atomic_read(&instance->fw_outstanding);
2759 
2760 			fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2761 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2762 				goto no_outstanding;
2763 		}
2764 		i++;
2765 	} while (i <= 3);
2766 
2767 no_outstanding:
2768 
2769 	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2770 		__func__, __LINE__);
2771 	return SUCCESS;
2772 
2773 kill_hba_and_failed:
2774 
2775 	/* Reset not supported, kill adapter */
2776 	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2777 		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2778 		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2779 		atomic_read(&instance->fw_outstanding));
2780 	megasas_dump_pending_frames(instance);
2781 	megaraid_sas_kill_hba(instance);
2782 
2783 	return FAILED;
2784 }
2785 
2786 /**
2787  * megasas_generic_reset -	Generic reset routine
2788  * @scmd:			Mid-layer SCSI command
2789  *
2790  * This routine implements a generic reset handler for device, bus and host
2791  * reset requests. Device, bus and host specific reset handlers can use this
2792  * function after they do their specific tasks.
2793  */
2794 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2795 {
2796 	int ret_val;
2797 	struct megasas_instance *instance;
2798 
2799 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2800 
2801 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2802 		 scmd->cmnd[0], scmd->retries);
2803 
2804 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2805 		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2806 		return FAILED;
2807 	}
2808 
2809 	ret_val = megasas_wait_for_outstanding(instance);
2810 	if (ret_val == SUCCESS)
2811 		dev_notice(&instance->pdev->dev, "reset successful\n");
2812 	else
2813 		dev_err(&instance->pdev->dev, "failed to do reset\n");
2814 
2815 	return ret_val;
2816 }
2817 
2818 /**
2819  * megasas_reset_timer - quiesce the adapter if required
2820  * @scmd:		scsi cmnd
2821  *
2822  * Sets the FW busy flag and reduces the host->can_queue if the
2823  * cmd has not been completed within the timeout period.
2824  */
2825 static enum
2826 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2827 {
2828 	struct megasas_instance *instance;
2829 	unsigned long flags;
2830 
2831 	if (time_after(jiffies, scmd->jiffies_at_alloc +
2832 				(scmd_timeout * 2) * HZ)) {
2833 		return BLK_EH_DONE;
2834 	}
2835 
2836 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2837 	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2838 		/* FW is busy, throttle IO */
2839 		spin_lock_irqsave(instance->host->host_lock, flags);
2840 
2841 		instance->host->can_queue = instance->throttlequeuedepth;
2842 		instance->last_time = jiffies;
2843 		instance->flag |= MEGASAS_FW_BUSY;
2844 
2845 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2846 	}
2847 	return BLK_EH_RESET_TIMER;
2848 }
2849 
2850 /**
2851  * megasas_dump -	This function will print hexdump of provided buffer.
2852  * @buf:		Buffer to be dumped
2853  * @sz:		Size in bytes
2854  * @format:		Different formats of dumping e.g. format=n will
2855  *			cause only 'n' 32 bit words to be dumped in a single
2856  *			line.
2857  */
2858 inline void
2859 megasas_dump(void *buf, int sz, int format)
2860 {
2861 	int i;
2862 	__le32 *buf_loc = (__le32 *)buf;
2863 
2864 	for (i = 0; i < (sz / sizeof(__le32)); i++) {
2865 		if ((i % format) == 0) {
2866 			if (i != 0)
2867 				printk(KERN_CONT "\n");
2868 			printk(KERN_CONT "%08x: ", (i * 4));
2869 		}
2870 		printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
2871 	}
2872 	printk(KERN_CONT "\n");
2873 }
2874 
2875 /**
2876  * megasas_dump_reg_set -	This function will print hexdump of register set
2877  * @buf:			Buffer to be dumped
2878  * @sz:				Size in bytes
2879  * @format:			Different formats of dumping e.g. format=n will
2880  *				cause only 'n' 32 bit words to be dumped in a
2881  *				single line.
2882  */
2883 inline void
2884 megasas_dump_reg_set(void __iomem *reg_set)
2885 {
2886 	unsigned int i, sz = 256;
2887 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2888 
2889 	for (i = 0; i < (sz / sizeof(u32)); i++)
2890 		printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
2891 }
2892 
2893 /**
2894  * megasas_dump_fusion_io -	This function will print key details
2895  *				of SCSI IO
2896  * @scmd:			SCSI command pointer of SCSI IO
2897  */
2898 void
2899 megasas_dump_fusion_io(struct scsi_cmnd *scmd)
2900 {
2901 	struct megasas_cmd_fusion *cmd;
2902 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2903 	struct megasas_instance *instance;
2904 
2905 	cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2906 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2907 
2908 	scmd_printk(KERN_INFO, scmd,
2909 		    "scmd: (0x%p)  retries: 0x%x  allowed: 0x%x\n",
2910 		    scmd, scmd->retries, scmd->allowed);
2911 	scsi_print_command(scmd);
2912 
2913 	if (cmd) {
2914 		req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
2915 		scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
2916 		scmd_printk(KERN_INFO, scmd,
2917 			    "RequestFlags:0x%x  MSIxIndex:0x%x  SMID:0x%x  LMID:0x%x  DevHandle:0x%x\n",
2918 			    req_desc->SCSIIO.RequestFlags,
2919 			    req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
2920 			    req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
2921 
2922 		printk(KERN_INFO "IO request frame:\n");
2923 		megasas_dump(cmd->io_request,
2924 			     MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
2925 		printk(KERN_INFO "Chain frame:\n");
2926 		megasas_dump(cmd->sg_frame,
2927 			     instance->max_chain_frame_sz, 8);
2928 	}
2929 
2930 }
2931 
2932 /*
2933  * megasas_dump_sys_regs - This function will dump system registers through
2934  *			    sysfs.
2935  * @reg_set:		    Pointer to System register set.
2936  * @buf:		    Buffer to which output is to be written.
2937  * @return:		    Number of bytes written to buffer.
2938  */
2939 static inline ssize_t
2940 megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
2941 {
2942 	unsigned int i, sz = 256;
2943 	int bytes_wrote = 0;
2944 	char *loc = (char *)buf;
2945 	u32 __iomem *reg = (u32 __iomem *)reg_set;
2946 
2947 	for (i = 0; i < sz / sizeof(u32); i++) {
2948 		bytes_wrote += snprintf(loc + bytes_wrote, PAGE_SIZE,
2949 					"%08x: %08x\n", (i * 4),
2950 					readl(&reg[i]));
2951 	}
2952 	return bytes_wrote;
2953 }
2954 
2955 /**
2956  * megasas_reset_bus_host -	Bus & host reset handler entry point
2957  */
2958 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2959 {
2960 	int ret;
2961 	struct megasas_instance *instance;
2962 
2963 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2964 
2965 	scmd_printk(KERN_INFO, scmd,
2966 		"OCR is requested due to IO timeout!!\n");
2967 
2968 	scmd_printk(KERN_INFO, scmd,
2969 		"SCSI host state: %d  SCSI host busy: %d  FW outstanding: %d\n",
2970 		scmd->device->host->shost_state,
2971 		scsi_host_busy(scmd->device->host),
2972 		atomic_read(&instance->fw_outstanding));
2973 	/*
2974 	 * First wait for all commands to complete
2975 	 */
2976 	if (instance->adapter_type == MFI_SERIES) {
2977 		ret = megasas_generic_reset(scmd);
2978 	} else {
2979 		megasas_dump_fusion_io(scmd);
2980 		ret = megasas_reset_fusion(scmd->device->host,
2981 				SCSIIO_TIMEOUT_OCR);
2982 	}
2983 
2984 	return ret;
2985 }
2986 
2987 /**
2988  * megasas_task_abort - Issues task abort request to firmware
2989  *			(supported only for fusion adapters)
2990  * @scmd:		SCSI command pointer
2991  */
2992 static int megasas_task_abort(struct scsi_cmnd *scmd)
2993 {
2994 	int ret;
2995 	struct megasas_instance *instance;
2996 
2997 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2998 
2999 	if (instance->adapter_type != MFI_SERIES)
3000 		ret = megasas_task_abort_fusion(scmd);
3001 	else {
3002 		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
3003 		ret = FAILED;
3004 	}
3005 
3006 	return ret;
3007 }
3008 
3009 /**
3010  * megasas_reset_target:  Issues target reset request to firmware
3011  *                        (supported only for fusion adapters)
3012  * @scmd:                 SCSI command pointer
3013  */
3014 static int megasas_reset_target(struct scsi_cmnd *scmd)
3015 {
3016 	int ret;
3017 	struct megasas_instance *instance;
3018 
3019 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3020 
3021 	if (instance->adapter_type != MFI_SERIES)
3022 		ret = megasas_reset_target_fusion(scmd);
3023 	else {
3024 		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
3025 		ret = FAILED;
3026 	}
3027 
3028 	return ret;
3029 }
3030 
3031 /**
3032  * megasas_bios_param - Returns disk geometry for a disk
3033  * @sdev:		device handle
3034  * @bdev:		block device
3035  * @capacity:		drive capacity
3036  * @geom:		geometry parameters
3037  */
3038 static int
3039 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
3040 		 sector_t capacity, int geom[])
3041 {
3042 	int heads;
3043 	int sectors;
3044 	sector_t cylinders;
3045 	unsigned long tmp;
3046 
3047 	/* Default heads (64) & sectors (32) */
3048 	heads = 64;
3049 	sectors = 32;
3050 
3051 	tmp = heads * sectors;
3052 	cylinders = capacity;
3053 
3054 	sector_div(cylinders, tmp);
3055 
3056 	/*
3057 	 * Handle extended translation size for logical drives > 1Gb
3058 	 */
3059 
3060 	if (capacity >= 0x200000) {
3061 		heads = 255;
3062 		sectors = 63;
3063 		tmp = heads*sectors;
3064 		cylinders = capacity;
3065 		sector_div(cylinders, tmp);
3066 	}
3067 
3068 	geom[0] = heads;
3069 	geom[1] = sectors;
3070 	geom[2] = cylinders;
3071 
3072 	return 0;
3073 }
3074 
3075 static void megasas_aen_polling(struct work_struct *work);
3076 
3077 /**
3078  * megasas_service_aen -	Processes an event notification
3079  * @instance:			Adapter soft state
3080  * @cmd:			AEN command completed by the ISR
3081  *
3082  * For AEN, driver sends a command down to FW that is held by the FW till an
3083  * event occurs. When an event of interest occurs, FW completes the command
3084  * that it was previously holding.
3085  *
3086  * This routines sends SIGIO signal to processes that have registered with the
3087  * driver for AEN.
3088  */
3089 static void
3090 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
3091 {
3092 	unsigned long flags;
3093 
3094 	/*
3095 	 * Don't signal app if it is just an aborted previously registered aen
3096 	 */
3097 	if ((!cmd->abort_aen) && (instance->unload == 0)) {
3098 		spin_lock_irqsave(&poll_aen_lock, flags);
3099 		megasas_poll_wait_aen = 1;
3100 		spin_unlock_irqrestore(&poll_aen_lock, flags);
3101 		wake_up(&megasas_poll_wait);
3102 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
3103 	}
3104 	else
3105 		cmd->abort_aen = 0;
3106 
3107 	instance->aen_cmd = NULL;
3108 
3109 	megasas_return_cmd(instance, cmd);
3110 
3111 	if ((instance->unload == 0) &&
3112 		((instance->issuepend_done == 1))) {
3113 		struct megasas_aen_event *ev;
3114 
3115 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3116 		if (!ev) {
3117 			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3118 		} else {
3119 			ev->instance = instance;
3120 			instance->ev = ev;
3121 			INIT_DELAYED_WORK(&ev->hotplug_work,
3122 					  megasas_aen_polling);
3123 			schedule_delayed_work(&ev->hotplug_work, 0);
3124 		}
3125 	}
3126 }
3127 
3128 static ssize_t
3129 fw_crash_buffer_store(struct device *cdev,
3130 	struct device_attribute *attr, const char *buf, size_t count)
3131 {
3132 	struct Scsi_Host *shost = class_to_shost(cdev);
3133 	struct megasas_instance *instance =
3134 		(struct megasas_instance *) shost->hostdata;
3135 	int val = 0;
3136 	unsigned long flags;
3137 
3138 	if (kstrtoint(buf, 0, &val) != 0)
3139 		return -EINVAL;
3140 
3141 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3142 	instance->fw_crash_buffer_offset = val;
3143 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3144 	return strlen(buf);
3145 }
3146 
3147 static ssize_t
3148 fw_crash_buffer_show(struct device *cdev,
3149 	struct device_attribute *attr, char *buf)
3150 {
3151 	struct Scsi_Host *shost = class_to_shost(cdev);
3152 	struct megasas_instance *instance =
3153 		(struct megasas_instance *) shost->hostdata;
3154 	u32 size;
3155 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3156 	unsigned long src_addr;
3157 	unsigned long flags;
3158 	u32 buff_offset;
3159 
3160 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3161 	buff_offset = instance->fw_crash_buffer_offset;
3162 	if (!instance->crash_dump_buf &&
3163 		!((instance->fw_crash_state == AVAILABLE) ||
3164 		(instance->fw_crash_state == COPYING))) {
3165 		dev_err(&instance->pdev->dev,
3166 			"Firmware crash dump is not available\n");
3167 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3168 		return -EINVAL;
3169 	}
3170 
3171 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3172 		dev_err(&instance->pdev->dev,
3173 			"Firmware crash dump offset is out of range\n");
3174 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3175 		return 0;
3176 	}
3177 
3178 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3179 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3180 
3181 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3182 		(buff_offset % dmachunk);
3183 	memcpy(buf, (void *)src_addr, size);
3184 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3185 
3186 	return size;
3187 }
3188 
3189 static ssize_t
3190 fw_crash_buffer_size_show(struct device *cdev,
3191 	struct device_attribute *attr, char *buf)
3192 {
3193 	struct Scsi_Host *shost = class_to_shost(cdev);
3194 	struct megasas_instance *instance =
3195 		(struct megasas_instance *) shost->hostdata;
3196 
3197 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3198 		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3199 }
3200 
3201 static ssize_t
3202 fw_crash_state_store(struct device *cdev,
3203 	struct device_attribute *attr, const char *buf, size_t count)
3204 {
3205 	struct Scsi_Host *shost = class_to_shost(cdev);
3206 	struct megasas_instance *instance =
3207 		(struct megasas_instance *) shost->hostdata;
3208 	int val = 0;
3209 	unsigned long flags;
3210 
3211 	if (kstrtoint(buf, 0, &val) != 0)
3212 		return -EINVAL;
3213 
3214 	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3215 		dev_err(&instance->pdev->dev, "application updates invalid "
3216 			"firmware crash state\n");
3217 		return -EINVAL;
3218 	}
3219 
3220 	instance->fw_crash_state = val;
3221 
3222 	if ((val == COPIED) || (val == COPY_ERROR)) {
3223 		spin_lock_irqsave(&instance->crashdump_lock, flags);
3224 		megasas_free_host_crash_buffer(instance);
3225 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3226 		if (val == COPY_ERROR)
3227 			dev_info(&instance->pdev->dev, "application failed to "
3228 				"copy Firmware crash dump\n");
3229 		else
3230 			dev_info(&instance->pdev->dev, "Firmware crash dump "
3231 				"copied successfully\n");
3232 	}
3233 	return strlen(buf);
3234 }
3235 
3236 static ssize_t
3237 fw_crash_state_show(struct device *cdev,
3238 	struct device_attribute *attr, char *buf)
3239 {
3240 	struct Scsi_Host *shost = class_to_shost(cdev);
3241 	struct megasas_instance *instance =
3242 		(struct megasas_instance *) shost->hostdata;
3243 
3244 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3245 }
3246 
3247 static ssize_t
3248 page_size_show(struct device *cdev,
3249 	struct device_attribute *attr, char *buf)
3250 {
3251 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3252 }
3253 
3254 static ssize_t
3255 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3256 	char *buf)
3257 {
3258 	struct Scsi_Host *shost = class_to_shost(cdev);
3259 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3260 
3261 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3262 }
3263 
3264 static ssize_t
3265 fw_cmds_outstanding_show(struct device *cdev,
3266 				 struct device_attribute *attr, char *buf)
3267 {
3268 	struct Scsi_Host *shost = class_to_shost(cdev);
3269 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3270 
3271 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3272 }
3273 
3274 static ssize_t
3275 dump_system_regs_show(struct device *cdev,
3276 			       struct device_attribute *attr, char *buf)
3277 {
3278 	struct Scsi_Host *shost = class_to_shost(cdev);
3279 	struct megasas_instance *instance =
3280 			(struct megasas_instance *)shost->hostdata;
3281 
3282 	return megasas_dump_sys_regs(instance->reg_set, buf);
3283 }
3284 
3285 static ssize_t
3286 raid_map_id_show(struct device *cdev, struct device_attribute *attr,
3287 			  char *buf)
3288 {
3289 	struct Scsi_Host *shost = class_to_shost(cdev);
3290 	struct megasas_instance *instance =
3291 			(struct megasas_instance *)shost->hostdata;
3292 
3293 	return snprintf(buf, PAGE_SIZE, "%ld\n",
3294 			(unsigned long)instance->map_id);
3295 }
3296 
3297 static DEVICE_ATTR_RW(fw_crash_buffer);
3298 static DEVICE_ATTR_RO(fw_crash_buffer_size);
3299 static DEVICE_ATTR_RW(fw_crash_state);
3300 static DEVICE_ATTR_RO(page_size);
3301 static DEVICE_ATTR_RO(ldio_outstanding);
3302 static DEVICE_ATTR_RO(fw_cmds_outstanding);
3303 static DEVICE_ATTR_RO(dump_system_regs);
3304 static DEVICE_ATTR_RO(raid_map_id);
3305 
3306 struct device_attribute *megaraid_host_attrs[] = {
3307 	&dev_attr_fw_crash_buffer_size,
3308 	&dev_attr_fw_crash_buffer,
3309 	&dev_attr_fw_crash_state,
3310 	&dev_attr_page_size,
3311 	&dev_attr_ldio_outstanding,
3312 	&dev_attr_fw_cmds_outstanding,
3313 	&dev_attr_dump_system_regs,
3314 	&dev_attr_raid_map_id,
3315 	NULL,
3316 };
3317 
3318 /*
3319  * Scsi host template for megaraid_sas driver
3320  */
3321 static struct scsi_host_template megasas_template = {
3322 
3323 	.module = THIS_MODULE,
3324 	.name = "Avago SAS based MegaRAID driver",
3325 	.proc_name = "megaraid_sas",
3326 	.slave_configure = megasas_slave_configure,
3327 	.slave_alloc = megasas_slave_alloc,
3328 	.slave_destroy = megasas_slave_destroy,
3329 	.queuecommand = megasas_queue_command,
3330 	.eh_target_reset_handler = megasas_reset_target,
3331 	.eh_abort_handler = megasas_task_abort,
3332 	.eh_host_reset_handler = megasas_reset_bus_host,
3333 	.eh_timed_out = megasas_reset_timer,
3334 	.shost_attrs = megaraid_host_attrs,
3335 	.bios_param = megasas_bios_param,
3336 	.change_queue_depth = scsi_change_queue_depth,
3337 	.no_write_same = 1,
3338 };
3339 
3340 /**
3341  * megasas_complete_int_cmd -	Completes an internal command
3342  * @instance:			Adapter soft state
3343  * @cmd:			Command to be completed
3344  *
3345  * The megasas_issue_blocked_cmd() function waits for a command to complete
3346  * after it issues a command. This function wakes up that waiting routine by
3347  * calling wake_up() on the wait queue.
3348  */
3349 static void
3350 megasas_complete_int_cmd(struct megasas_instance *instance,
3351 			 struct megasas_cmd *cmd)
3352 {
3353 	cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3354 	wake_up(&instance->int_cmd_wait_q);
3355 }
3356 
3357 /**
3358  * megasas_complete_abort -	Completes aborting a command
3359  * @instance:			Adapter soft state
3360  * @cmd:			Cmd that was issued to abort another cmd
3361  *
3362  * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3363  * after it issues an abort on a previously issued command. This function
3364  * wakes up all functions waiting on the same wait queue.
3365  */
3366 static void
3367 megasas_complete_abort(struct megasas_instance *instance,
3368 		       struct megasas_cmd *cmd)
3369 {
3370 	if (cmd->sync_cmd) {
3371 		cmd->sync_cmd = 0;
3372 		cmd->cmd_status_drv = 0;
3373 		wake_up(&instance->abort_cmd_wait_q);
3374 	}
3375 }
3376 
3377 /**
3378  * megasas_complete_cmd -	Completes a command
3379  * @instance:			Adapter soft state
3380  * @cmd:			Command to be completed
3381  * @alt_status:			If non-zero, use this value as status to
3382  *				SCSI mid-layer instead of the value returned
3383  *				by the FW. This should be used if caller wants
3384  *				an alternate status (as in the case of aborted
3385  *				commands)
3386  */
3387 void
3388 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3389 		     u8 alt_status)
3390 {
3391 	int exception = 0;
3392 	struct megasas_header *hdr = &cmd->frame->hdr;
3393 	unsigned long flags;
3394 	struct fusion_context *fusion = instance->ctrl_context;
3395 	u32 opcode, status;
3396 
3397 	/* flag for the retry reset */
3398 	cmd->retry_for_fw_reset = 0;
3399 
3400 	if (cmd->scmd)
3401 		cmd->scmd->SCp.ptr = NULL;
3402 
3403 	switch (hdr->cmd) {
3404 	case MFI_CMD_INVALID:
3405 		/* Some older 1068 controller FW may keep a pended
3406 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3407 		   when booting the kdump kernel.  Ignore this command to
3408 		   prevent a kernel panic on shutdown of the kdump kernel. */
3409 		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3410 		       "completed\n");
3411 		dev_warn(&instance->pdev->dev, "If you have a controller "
3412 		       "other than PERC5, please upgrade your firmware\n");
3413 		break;
3414 	case MFI_CMD_PD_SCSI_IO:
3415 	case MFI_CMD_LD_SCSI_IO:
3416 
3417 		/*
3418 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3419 		 * issued either through an IO path or an IOCTL path. If it
3420 		 * was via IOCTL, we will send it to internal completion.
3421 		 */
3422 		if (cmd->sync_cmd) {
3423 			cmd->sync_cmd = 0;
3424 			megasas_complete_int_cmd(instance, cmd);
3425 			break;
3426 		}
3427 		/* fall through */
3428 
3429 	case MFI_CMD_LD_READ:
3430 	case MFI_CMD_LD_WRITE:
3431 
3432 		if (alt_status) {
3433 			cmd->scmd->result = alt_status << 16;
3434 			exception = 1;
3435 		}
3436 
3437 		if (exception) {
3438 
3439 			atomic_dec(&instance->fw_outstanding);
3440 
3441 			scsi_dma_unmap(cmd->scmd);
3442 			cmd->scmd->scsi_done(cmd->scmd);
3443 			megasas_return_cmd(instance, cmd);
3444 
3445 			break;
3446 		}
3447 
3448 		switch (hdr->cmd_status) {
3449 
3450 		case MFI_STAT_OK:
3451 			cmd->scmd->result = DID_OK << 16;
3452 			break;
3453 
3454 		case MFI_STAT_SCSI_IO_FAILED:
3455 		case MFI_STAT_LD_INIT_IN_PROGRESS:
3456 			cmd->scmd->result =
3457 			    (DID_ERROR << 16) | hdr->scsi_status;
3458 			break;
3459 
3460 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3461 
3462 			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3463 
3464 			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3465 				memset(cmd->scmd->sense_buffer, 0,
3466 				       SCSI_SENSE_BUFFERSIZE);
3467 				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3468 				       hdr->sense_len);
3469 
3470 				cmd->scmd->result |= DRIVER_SENSE << 24;
3471 			}
3472 
3473 			break;
3474 
3475 		case MFI_STAT_LD_OFFLINE:
3476 		case MFI_STAT_DEVICE_NOT_FOUND:
3477 			cmd->scmd->result = DID_BAD_TARGET << 16;
3478 			break;
3479 
3480 		default:
3481 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3482 			       hdr->cmd_status);
3483 			cmd->scmd->result = DID_ERROR << 16;
3484 			break;
3485 		}
3486 
3487 		atomic_dec(&instance->fw_outstanding);
3488 
3489 		scsi_dma_unmap(cmd->scmd);
3490 		cmd->scmd->scsi_done(cmd->scmd);
3491 		megasas_return_cmd(instance, cmd);
3492 
3493 		break;
3494 
3495 	case MFI_CMD_SMP:
3496 	case MFI_CMD_STP:
3497 	case MFI_CMD_NVME:
3498 	case MFI_CMD_TOOLBOX:
3499 		megasas_complete_int_cmd(instance, cmd);
3500 		break;
3501 
3502 	case MFI_CMD_DCMD:
3503 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3504 		/* Check for LD map update */
3505 		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3506 			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3507 			fusion->fast_path_io = 0;
3508 			spin_lock_irqsave(instance->host->host_lock, flags);
3509 			status = cmd->frame->hdr.cmd_status;
3510 			instance->map_update_cmd = NULL;
3511 			if (status != MFI_STAT_OK) {
3512 				if (status != MFI_STAT_NOT_FOUND)
3513 					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3514 					       cmd->frame->hdr.cmd_status);
3515 				else {
3516 					megasas_return_cmd(instance, cmd);
3517 					spin_unlock_irqrestore(
3518 						instance->host->host_lock,
3519 						flags);
3520 					break;
3521 				}
3522 			}
3523 
3524 			megasas_return_cmd(instance, cmd);
3525 
3526 			/*
3527 			 * Set fast path IO to ZERO.
3528 			 * Validate Map will set proper value.
3529 			 * Meanwhile all IOs will go as LD IO.
3530 			 */
3531 			if (status == MFI_STAT_OK &&
3532 			    (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3533 				instance->map_id++;
3534 				fusion->fast_path_io = 1;
3535 			} else {
3536 				fusion->fast_path_io = 0;
3537 			}
3538 
3539 			megasas_sync_map_info(instance);
3540 			spin_unlock_irqrestore(instance->host->host_lock,
3541 					       flags);
3542 			break;
3543 		}
3544 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3545 		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3546 			spin_lock_irqsave(&poll_aen_lock, flags);
3547 			megasas_poll_wait_aen = 0;
3548 			spin_unlock_irqrestore(&poll_aen_lock, flags);
3549 		}
3550 
3551 		/* FW has an updated PD sequence */
3552 		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3553 			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3554 
3555 			spin_lock_irqsave(instance->host->host_lock, flags);
3556 			status = cmd->frame->hdr.cmd_status;
3557 			instance->jbod_seq_cmd = NULL;
3558 			megasas_return_cmd(instance, cmd);
3559 
3560 			if (status == MFI_STAT_OK) {
3561 				instance->pd_seq_map_id++;
3562 				/* Re-register a pd sync seq num cmd */
3563 				if (megasas_sync_pd_seq_num(instance, true))
3564 					instance->use_seqnum_jbod_fp = false;
3565 			} else
3566 				instance->use_seqnum_jbod_fp = false;
3567 
3568 			spin_unlock_irqrestore(instance->host->host_lock, flags);
3569 			break;
3570 		}
3571 
3572 		/*
3573 		 * See if got an event notification
3574 		 */
3575 		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3576 			megasas_service_aen(instance, cmd);
3577 		else
3578 			megasas_complete_int_cmd(instance, cmd);
3579 
3580 		break;
3581 
3582 	case MFI_CMD_ABORT:
3583 		/*
3584 		 * Cmd issued to abort another cmd returned
3585 		 */
3586 		megasas_complete_abort(instance, cmd);
3587 		break;
3588 
3589 	default:
3590 		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3591 		       hdr->cmd);
3592 		megasas_complete_int_cmd(instance, cmd);
3593 		break;
3594 	}
3595 }
3596 
3597 /**
3598  * megasas_issue_pending_cmds_again -	issue all pending cmds
3599  *					in FW again because of the fw reset
3600  * @instance:				Adapter soft state
3601  */
3602 static inline void
3603 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3604 {
3605 	struct megasas_cmd *cmd;
3606 	struct list_head clist_local;
3607 	union megasas_evt_class_locale class_locale;
3608 	unsigned long flags;
3609 	u32 seq_num;
3610 
3611 	INIT_LIST_HEAD(&clist_local);
3612 	spin_lock_irqsave(&instance->hba_lock, flags);
3613 	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3614 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3615 
3616 	while (!list_empty(&clist_local)) {
3617 		cmd = list_entry((&clist_local)->next,
3618 					struct megasas_cmd, list);
3619 		list_del_init(&cmd->list);
3620 
3621 		if (cmd->sync_cmd || cmd->scmd) {
3622 			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3623 				"detected to be pending while HBA reset\n",
3624 					cmd, cmd->scmd, cmd->sync_cmd);
3625 
3626 			cmd->retry_for_fw_reset++;
3627 
3628 			if (cmd->retry_for_fw_reset == 3) {
3629 				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3630 					"was tried multiple times during reset."
3631 					"Shutting down the HBA\n",
3632 					cmd, cmd->scmd, cmd->sync_cmd);
3633 				instance->instancet->disable_intr(instance);
3634 				atomic_set(&instance->fw_reset_no_pci_access, 1);
3635 				megaraid_sas_kill_hba(instance);
3636 				return;
3637 			}
3638 		}
3639 
3640 		if (cmd->sync_cmd == 1) {
3641 			if (cmd->scmd) {
3642 				dev_notice(&instance->pdev->dev, "unexpected"
3643 					"cmd attached to internal command!\n");
3644 			}
3645 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3646 						"on the internal reset queue,"
3647 						"issue it again.\n", cmd);
3648 			cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3649 			instance->instancet->fire_cmd(instance,
3650 							cmd->frame_phys_addr,
3651 							0, instance->reg_set);
3652 		} else if (cmd->scmd) {
3653 			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3654 			"detected on the internal queue, issue again.\n",
3655 			cmd, cmd->scmd->cmnd[0]);
3656 
3657 			atomic_inc(&instance->fw_outstanding);
3658 			instance->instancet->fire_cmd(instance,
3659 					cmd->frame_phys_addr,
3660 					cmd->frame_count-1, instance->reg_set);
3661 		} else {
3662 			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3663 				"internal reset defer list while re-issue!!\n",
3664 				cmd);
3665 		}
3666 	}
3667 
3668 	if (instance->aen_cmd) {
3669 		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3670 		megasas_return_cmd(instance, instance->aen_cmd);
3671 
3672 		instance->aen_cmd = NULL;
3673 	}
3674 
3675 	/*
3676 	 * Initiate AEN (Asynchronous Event Notification)
3677 	 */
3678 	seq_num = instance->last_seq_num;
3679 	class_locale.members.reserved = 0;
3680 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3681 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3682 
3683 	megasas_register_aen(instance, seq_num, class_locale.word);
3684 }
3685 
3686 /**
3687  * Move the internal reset pending commands to a deferred queue.
3688  *
3689  * We move the commands pending at internal reset time to a
3690  * pending queue. This queue would be flushed after successful
3691  * completion of the internal reset sequence. if the internal reset
3692  * did not complete in time, the kernel reset handler would flush
3693  * these commands.
3694  **/
3695 static void
3696 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3697 {
3698 	struct megasas_cmd *cmd;
3699 	int i;
3700 	u16 max_cmd = instance->max_fw_cmds;
3701 	u32 defer_index;
3702 	unsigned long flags;
3703 
3704 	defer_index = 0;
3705 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3706 	for (i = 0; i < max_cmd; i++) {
3707 		cmd = instance->cmd_list[i];
3708 		if (cmd->sync_cmd == 1 || cmd->scmd) {
3709 			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3710 					"on the defer queue as internal\n",
3711 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3712 
3713 			if (!list_empty(&cmd->list)) {
3714 				dev_notice(&instance->pdev->dev, "ERROR while"
3715 					" moving this cmd:%p, %d %p, it was"
3716 					"discovered on some list?\n",
3717 					cmd, cmd->sync_cmd, cmd->scmd);
3718 
3719 				list_del_init(&cmd->list);
3720 			}
3721 			defer_index++;
3722 			list_add_tail(&cmd->list,
3723 				&instance->internal_reset_pending_q);
3724 		}
3725 	}
3726 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3727 }
3728 
3729 
3730 static void
3731 process_fw_state_change_wq(struct work_struct *work)
3732 {
3733 	struct megasas_instance *instance =
3734 		container_of(work, struct megasas_instance, work_init);
3735 	u32 wait;
3736 	unsigned long flags;
3737 
3738     if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3739 		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3740 				atomic_read(&instance->adprecovery));
3741 		return ;
3742 	}
3743 
3744 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3745 		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3746 					"state, restarting it...\n");
3747 
3748 		instance->instancet->disable_intr(instance);
3749 		atomic_set(&instance->fw_outstanding, 0);
3750 
3751 		atomic_set(&instance->fw_reset_no_pci_access, 1);
3752 		instance->instancet->adp_reset(instance, instance->reg_set);
3753 		atomic_set(&instance->fw_reset_no_pci_access, 0);
3754 
3755 		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3756 					"initiating next stage...\n");
3757 
3758 		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3759 					"state 2 starting...\n");
3760 
3761 		/* waiting for about 20 second before start the second init */
3762 		for (wait = 0; wait < 30; wait++) {
3763 			msleep(1000);
3764 		}
3765 
3766 		if (megasas_transition_to_ready(instance, 1)) {
3767 			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3768 
3769 			atomic_set(&instance->fw_reset_no_pci_access, 1);
3770 			megaraid_sas_kill_hba(instance);
3771 			return ;
3772 		}
3773 
3774 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3775 			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3776 			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3777 			) {
3778 			*instance->consumer = *instance->producer;
3779 		} else {
3780 			*instance->consumer = 0;
3781 			*instance->producer = 0;
3782 		}
3783 
3784 		megasas_issue_init_mfi(instance);
3785 
3786 		spin_lock_irqsave(&instance->hba_lock, flags);
3787 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3788 		spin_unlock_irqrestore(&instance->hba_lock, flags);
3789 		instance->instancet->enable_intr(instance);
3790 
3791 		megasas_issue_pending_cmds_again(instance);
3792 		instance->issuepend_done = 1;
3793 	}
3794 }
3795 
3796 /**
3797  * megasas_deplete_reply_queue -	Processes all completed commands
3798  * @instance:				Adapter soft state
3799  * @alt_status:				Alternate status to be returned to
3800  *					SCSI mid-layer instead of the status
3801  *					returned by the FW
3802  * Note: this must be called with hba lock held
3803  */
3804 static int
3805 megasas_deplete_reply_queue(struct megasas_instance *instance,
3806 					u8 alt_status)
3807 {
3808 	u32 mfiStatus;
3809 	u32 fw_state;
3810 
3811 	if ((mfiStatus = instance->instancet->check_reset(instance,
3812 					instance->reg_set)) == 1) {
3813 		return IRQ_HANDLED;
3814 	}
3815 
3816 	mfiStatus = instance->instancet->clear_intr(instance);
3817 	if (mfiStatus == 0) {
3818 		/* Hardware may not set outbound_intr_status in MSI-X mode */
3819 		if (!instance->msix_vectors)
3820 			return IRQ_NONE;
3821 	}
3822 
3823 	instance->mfiStatus = mfiStatus;
3824 
3825 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3826 		fw_state = instance->instancet->read_fw_status_reg(
3827 				instance) & MFI_STATE_MASK;
3828 
3829 		if (fw_state != MFI_STATE_FAULT) {
3830 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
3831 						fw_state);
3832 		}
3833 
3834 		if ((fw_state == MFI_STATE_FAULT) &&
3835 				(instance->disableOnlineCtrlReset == 0)) {
3836 			dev_notice(&instance->pdev->dev, "wait adp restart\n");
3837 
3838 			if ((instance->pdev->device ==
3839 					PCI_DEVICE_ID_LSI_SAS1064R) ||
3840 				(instance->pdev->device ==
3841 					PCI_DEVICE_ID_DELL_PERC5) ||
3842 				(instance->pdev->device ==
3843 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3844 
3845 				*instance->consumer =
3846 					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3847 			}
3848 
3849 
3850 			instance->instancet->disable_intr(instance);
3851 			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3852 			instance->issuepend_done = 0;
3853 
3854 			atomic_set(&instance->fw_outstanding, 0);
3855 			megasas_internal_reset_defer_cmds(instance);
3856 
3857 			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3858 					fw_state, atomic_read(&instance->adprecovery));
3859 
3860 			schedule_work(&instance->work_init);
3861 			return IRQ_HANDLED;
3862 
3863 		} else {
3864 			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3865 				fw_state, instance->disableOnlineCtrlReset);
3866 		}
3867 	}
3868 
3869 	tasklet_schedule(&instance->isr_tasklet);
3870 	return IRQ_HANDLED;
3871 }
3872 /**
3873  * megasas_isr - isr entry point
3874  */
3875 static irqreturn_t megasas_isr(int irq, void *devp)
3876 {
3877 	struct megasas_irq_context *irq_context = devp;
3878 	struct megasas_instance *instance = irq_context->instance;
3879 	unsigned long flags;
3880 	irqreturn_t rc;
3881 
3882 	if (atomic_read(&instance->fw_reset_no_pci_access))
3883 		return IRQ_HANDLED;
3884 
3885 	spin_lock_irqsave(&instance->hba_lock, flags);
3886 	rc = megasas_deplete_reply_queue(instance, DID_OK);
3887 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3888 
3889 	return rc;
3890 }
3891 
3892 /**
3893  * megasas_transition_to_ready -	Move the FW to READY state
3894  * @instance:				Adapter soft state
3895  *
3896  * During the initialization, FW passes can potentially be in any one of
3897  * several possible states. If the FW in operational, waiting-for-handshake
3898  * states, driver must take steps to bring it to ready state. Otherwise, it
3899  * has to wait for the ready state.
3900  */
3901 int
3902 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3903 {
3904 	int i;
3905 	u8 max_wait;
3906 	u32 fw_state;
3907 	u32 abs_state, curr_abs_state;
3908 
3909 	abs_state = instance->instancet->read_fw_status_reg(instance);
3910 	fw_state = abs_state & MFI_STATE_MASK;
3911 
3912 	if (fw_state != MFI_STATE_READY)
3913 		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3914 		       " state\n");
3915 
3916 	while (fw_state != MFI_STATE_READY) {
3917 
3918 		switch (fw_state) {
3919 
3920 		case MFI_STATE_FAULT:
3921 			dev_printk(KERN_ERR, &instance->pdev->dev,
3922 				   "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
3923 				   abs_state & MFI_STATE_FAULT_CODE,
3924 				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
3925 			if (ocr) {
3926 				max_wait = MEGASAS_RESET_WAIT_TIME;
3927 				break;
3928 			} else {
3929 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
3930 				megasas_dump_reg_set(instance->reg_set);
3931 				return -ENODEV;
3932 			}
3933 
3934 		case MFI_STATE_WAIT_HANDSHAKE:
3935 			/*
3936 			 * Set the CLR bit in inbound doorbell
3937 			 */
3938 			if ((instance->pdev->device ==
3939 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3940 				(instance->pdev->device ==
3941 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3942 				(instance->adapter_type != MFI_SERIES))
3943 				writel(
3944 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3945 				  &instance->reg_set->doorbell);
3946 			else
3947 				writel(
3948 				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3949 					&instance->reg_set->inbound_doorbell);
3950 
3951 			max_wait = MEGASAS_RESET_WAIT_TIME;
3952 			break;
3953 
3954 		case MFI_STATE_BOOT_MESSAGE_PENDING:
3955 			if ((instance->pdev->device ==
3956 			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3957 				(instance->pdev->device ==
3958 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3959 				(instance->adapter_type != MFI_SERIES))
3960 				writel(MFI_INIT_HOTPLUG,
3961 				       &instance->reg_set->doorbell);
3962 			else
3963 				writel(MFI_INIT_HOTPLUG,
3964 					&instance->reg_set->inbound_doorbell);
3965 
3966 			max_wait = MEGASAS_RESET_WAIT_TIME;
3967 			break;
3968 
3969 		case MFI_STATE_OPERATIONAL:
3970 			/*
3971 			 * Bring it to READY state; assuming max wait 10 secs
3972 			 */
3973 			instance->instancet->disable_intr(instance);
3974 			if ((instance->pdev->device ==
3975 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3976 				(instance->pdev->device ==
3977 				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
3978 				(instance->adapter_type != MFI_SERIES)) {
3979 				writel(MFI_RESET_FLAGS,
3980 					&instance->reg_set->doorbell);
3981 
3982 				if (instance->adapter_type != MFI_SERIES) {
3983 					for (i = 0; i < (10 * 1000); i += 20) {
3984 						if (megasas_readl(
3985 							    instance,
3986 							    &instance->
3987 							    reg_set->
3988 							    doorbell) & 1)
3989 							msleep(20);
3990 						else
3991 							break;
3992 					}
3993 				}
3994 			} else
3995 				writel(MFI_RESET_FLAGS,
3996 					&instance->reg_set->inbound_doorbell);
3997 
3998 			max_wait = MEGASAS_RESET_WAIT_TIME;
3999 			break;
4000 
4001 		case MFI_STATE_UNDEFINED:
4002 			/*
4003 			 * This state should not last for more than 2 seconds
4004 			 */
4005 			max_wait = MEGASAS_RESET_WAIT_TIME;
4006 			break;
4007 
4008 		case MFI_STATE_BB_INIT:
4009 			max_wait = MEGASAS_RESET_WAIT_TIME;
4010 			break;
4011 
4012 		case MFI_STATE_FW_INIT:
4013 			max_wait = MEGASAS_RESET_WAIT_TIME;
4014 			break;
4015 
4016 		case MFI_STATE_FW_INIT_2:
4017 			max_wait = MEGASAS_RESET_WAIT_TIME;
4018 			break;
4019 
4020 		case MFI_STATE_DEVICE_SCAN:
4021 			max_wait = MEGASAS_RESET_WAIT_TIME;
4022 			break;
4023 
4024 		case MFI_STATE_FLUSH_CACHE:
4025 			max_wait = MEGASAS_RESET_WAIT_TIME;
4026 			break;
4027 
4028 		default:
4029 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
4030 			       fw_state);
4031 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4032 			megasas_dump_reg_set(instance->reg_set);
4033 			return -ENODEV;
4034 		}
4035 
4036 		/*
4037 		 * The cur_state should not last for more than max_wait secs
4038 		 */
4039 		for (i = 0; i < max_wait * 50; i++) {
4040 			curr_abs_state = instance->instancet->
4041 				read_fw_status_reg(instance);
4042 
4043 			if (abs_state == curr_abs_state) {
4044 				msleep(20);
4045 			} else
4046 				break;
4047 		}
4048 
4049 		/*
4050 		 * Return error if fw_state hasn't changed after max_wait
4051 		 */
4052 		if (curr_abs_state == abs_state) {
4053 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
4054 			       "in %d secs\n", fw_state, max_wait);
4055 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4056 			megasas_dump_reg_set(instance->reg_set);
4057 			return -ENODEV;
4058 		}
4059 
4060 		abs_state = curr_abs_state;
4061 		fw_state = curr_abs_state & MFI_STATE_MASK;
4062 	}
4063 	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
4064 
4065 	return 0;
4066 }
4067 
4068 /**
4069  * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
4070  * @instance:				Adapter soft state
4071  */
4072 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
4073 {
4074 	int i;
4075 	u16 max_cmd = instance->max_mfi_cmds;
4076 	struct megasas_cmd *cmd;
4077 
4078 	if (!instance->frame_dma_pool)
4079 		return;
4080 
4081 	/*
4082 	 * Return all frames to pool
4083 	 */
4084 	for (i = 0; i < max_cmd; i++) {
4085 
4086 		cmd = instance->cmd_list[i];
4087 
4088 		if (cmd->frame)
4089 			dma_pool_free(instance->frame_dma_pool, cmd->frame,
4090 				      cmd->frame_phys_addr);
4091 
4092 		if (cmd->sense)
4093 			dma_pool_free(instance->sense_dma_pool, cmd->sense,
4094 				      cmd->sense_phys_addr);
4095 	}
4096 
4097 	/*
4098 	 * Now destroy the pool itself
4099 	 */
4100 	dma_pool_destroy(instance->frame_dma_pool);
4101 	dma_pool_destroy(instance->sense_dma_pool);
4102 
4103 	instance->frame_dma_pool = NULL;
4104 	instance->sense_dma_pool = NULL;
4105 }
4106 
4107 /**
4108  * megasas_create_frame_pool -	Creates DMA pool for cmd frames
4109  * @instance:			Adapter soft state
4110  *
4111  * Each command packet has an embedded DMA memory buffer that is used for
4112  * filling MFI frame and the SG list that immediately follows the frame. This
4113  * function creates those DMA memory buffers for each command packet by using
4114  * PCI pool facility.
4115  */
4116 static int megasas_create_frame_pool(struct megasas_instance *instance)
4117 {
4118 	int i;
4119 	u16 max_cmd;
4120 	u32 frame_count;
4121 	struct megasas_cmd *cmd;
4122 
4123 	max_cmd = instance->max_mfi_cmds;
4124 
4125 	/*
4126 	 * For MFI controllers.
4127 	 * max_num_sge = 60
4128 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
4129 	 * Total 960 byte (15 MFI frame of 64 byte)
4130 	 *
4131 	 * Fusion adapter require only 3 extra frame.
4132 	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4133 	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
4134 	 * Total 192 byte (3 MFI frame of 64 byte)
4135 	 */
4136 	frame_count = (instance->adapter_type == MFI_SERIES) ?
4137 			(15 + 1) : (3 + 1);
4138 	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4139 	/*
4140 	 * Use DMA pool facility provided by PCI layer
4141 	 */
4142 	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4143 					&instance->pdev->dev,
4144 					instance->mfi_frame_size, 256, 0);
4145 
4146 	if (!instance->frame_dma_pool) {
4147 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4148 		return -ENOMEM;
4149 	}
4150 
4151 	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4152 						   &instance->pdev->dev, 128,
4153 						   4, 0);
4154 
4155 	if (!instance->sense_dma_pool) {
4156 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4157 
4158 		dma_pool_destroy(instance->frame_dma_pool);
4159 		instance->frame_dma_pool = NULL;
4160 
4161 		return -ENOMEM;
4162 	}
4163 
4164 	/*
4165 	 * Allocate and attach a frame to each of the commands in cmd_list.
4166 	 * By making cmd->index as the context instead of the &cmd, we can
4167 	 * always use 32bit context regardless of the architecture
4168 	 */
4169 	for (i = 0; i < max_cmd; i++) {
4170 
4171 		cmd = instance->cmd_list[i];
4172 
4173 		cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4174 					    GFP_KERNEL, &cmd->frame_phys_addr);
4175 
4176 		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4177 					    GFP_KERNEL, &cmd->sense_phys_addr);
4178 
4179 		/*
4180 		 * megasas_teardown_frame_pool() takes care of freeing
4181 		 * whatever has been allocated
4182 		 */
4183 		if (!cmd->frame || !cmd->sense) {
4184 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4185 			megasas_teardown_frame_pool(instance);
4186 			return -ENOMEM;
4187 		}
4188 
4189 		cmd->frame->io.context = cpu_to_le32(cmd->index);
4190 		cmd->frame->io.pad_0 = 0;
4191 		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4192 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4193 	}
4194 
4195 	return 0;
4196 }
4197 
4198 /**
4199  * megasas_free_cmds -	Free all the cmds in the free cmd pool
4200  * @instance:		Adapter soft state
4201  */
4202 void megasas_free_cmds(struct megasas_instance *instance)
4203 {
4204 	int i;
4205 
4206 	/* First free the MFI frame pool */
4207 	megasas_teardown_frame_pool(instance);
4208 
4209 	/* Free all the commands in the cmd_list */
4210 	for (i = 0; i < instance->max_mfi_cmds; i++)
4211 
4212 		kfree(instance->cmd_list[i]);
4213 
4214 	/* Free the cmd_list buffer itself */
4215 	kfree(instance->cmd_list);
4216 	instance->cmd_list = NULL;
4217 
4218 	INIT_LIST_HEAD(&instance->cmd_pool);
4219 }
4220 
4221 /**
4222  * megasas_alloc_cmds -	Allocates the command packets
4223  * @instance:		Adapter soft state
4224  *
4225  * Each command that is issued to the FW, whether IO commands from the OS or
4226  * internal commands like IOCTLs, are wrapped in local data structure called
4227  * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4228  * the FW.
4229  *
4230  * Each frame has a 32-bit field called context (tag). This context is used
4231  * to get back the megasas_cmd from the frame when a frame gets completed in
4232  * the ISR. Typically the address of the megasas_cmd itself would be used as
4233  * the context. But we wanted to keep the differences between 32 and 64 bit
4234  * systems to the mininum. We always use 32 bit integers for the context. In
4235  * this driver, the 32 bit values are the indices into an array cmd_list.
4236  * This array is used only to look up the megasas_cmd given the context. The
4237  * free commands themselves are maintained in a linked list called cmd_pool.
4238  */
4239 int megasas_alloc_cmds(struct megasas_instance *instance)
4240 {
4241 	int i;
4242 	int j;
4243 	u16 max_cmd;
4244 	struct megasas_cmd *cmd;
4245 
4246 	max_cmd = instance->max_mfi_cmds;
4247 
4248 	/*
4249 	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4250 	 * Allocate the dynamic array first and then allocate individual
4251 	 * commands.
4252 	 */
4253 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4254 
4255 	if (!instance->cmd_list) {
4256 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4257 		return -ENOMEM;
4258 	}
4259 
4260 	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4261 
4262 	for (i = 0; i < max_cmd; i++) {
4263 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4264 						GFP_KERNEL);
4265 
4266 		if (!instance->cmd_list[i]) {
4267 
4268 			for (j = 0; j < i; j++)
4269 				kfree(instance->cmd_list[j]);
4270 
4271 			kfree(instance->cmd_list);
4272 			instance->cmd_list = NULL;
4273 
4274 			return -ENOMEM;
4275 		}
4276 	}
4277 
4278 	for (i = 0; i < max_cmd; i++) {
4279 		cmd = instance->cmd_list[i];
4280 		memset(cmd, 0, sizeof(struct megasas_cmd));
4281 		cmd->index = i;
4282 		cmd->scmd = NULL;
4283 		cmd->instance = instance;
4284 
4285 		list_add_tail(&cmd->list, &instance->cmd_pool);
4286 	}
4287 
4288 	/*
4289 	 * Create a frame pool and assign one frame to each cmd
4290 	 */
4291 	if (megasas_create_frame_pool(instance)) {
4292 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4293 		megasas_free_cmds(instance);
4294 		return -ENOMEM;
4295 	}
4296 
4297 	return 0;
4298 }
4299 
4300 /*
4301  * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4302  * @instance:				Adapter soft state
4303  *
4304  * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4305  * or FW is not under OCR.
4306  */
4307 inline int
4308 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4309 
4310 	if (instance->adapter_type == MFI_SERIES)
4311 		return KILL_ADAPTER;
4312 	else if (instance->unload ||
4313 			test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
4314 		return IGNORE_TIMEOUT;
4315 	else
4316 		return INITIATE_OCR;
4317 }
4318 
4319 static void
4320 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4321 {
4322 	int ret;
4323 	struct megasas_cmd *cmd;
4324 	struct megasas_dcmd_frame *dcmd;
4325 
4326 	struct MR_PRIV_DEVICE *mr_device_priv_data;
4327 	u16 device_id = 0;
4328 
4329 	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4330 	cmd = megasas_get_cmd(instance);
4331 
4332 	if (!cmd) {
4333 		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4334 		return;
4335 	}
4336 
4337 	dcmd = &cmd->frame->dcmd;
4338 
4339 	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4340 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4341 
4342 	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4343 	dcmd->cmd = MFI_CMD_DCMD;
4344 	dcmd->cmd_status = 0xFF;
4345 	dcmd->sge_count = 1;
4346 	dcmd->flags = MFI_FRAME_DIR_READ;
4347 	dcmd->timeout = 0;
4348 	dcmd->pad_0 = 0;
4349 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4350 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4351 
4352 	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4353 				 sizeof(struct MR_PD_INFO));
4354 
4355 	if ((instance->adapter_type != MFI_SERIES) &&
4356 	    !instance->mask_interrupts)
4357 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4358 	else
4359 		ret = megasas_issue_polled(instance, cmd);
4360 
4361 	switch (ret) {
4362 	case DCMD_SUCCESS:
4363 		mr_device_priv_data = sdev->hostdata;
4364 		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4365 		mr_device_priv_data->interface_type =
4366 				instance->pd_info->state.ddf.pdType.intf;
4367 		break;
4368 
4369 	case DCMD_TIMEOUT:
4370 
4371 		switch (dcmd_timeout_ocr_possible(instance)) {
4372 		case INITIATE_OCR:
4373 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4374 			mutex_unlock(&instance->reset_mutex);
4375 			megasas_reset_fusion(instance->host,
4376 				MFI_IO_TIMEOUT_OCR);
4377 			mutex_lock(&instance->reset_mutex);
4378 			break;
4379 		case KILL_ADAPTER:
4380 			megaraid_sas_kill_hba(instance);
4381 			break;
4382 		case IGNORE_TIMEOUT:
4383 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4384 				__func__, __LINE__);
4385 			break;
4386 		}
4387 
4388 		break;
4389 	}
4390 
4391 	if (ret != DCMD_TIMEOUT)
4392 		megasas_return_cmd(instance, cmd);
4393 
4394 	return;
4395 }
4396 /*
4397  * megasas_get_pd_list_info -	Returns FW's pd_list structure
4398  * @instance:				Adapter soft state
4399  * @pd_list:				pd_list structure
4400  *
4401  * Issues an internal command (DCMD) to get the FW's controller PD
4402  * list structure.  This information is mainly used to find out SYSTEM
4403  * supported by the FW.
4404  */
4405 static int
4406 megasas_get_pd_list(struct megasas_instance *instance)
4407 {
4408 	int ret = 0, pd_index = 0;
4409 	struct megasas_cmd *cmd;
4410 	struct megasas_dcmd_frame *dcmd;
4411 	struct MR_PD_LIST *ci;
4412 	struct MR_PD_ADDRESS *pd_addr;
4413 
4414 	if (instance->pd_list_not_supported) {
4415 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4416 		"not supported by firmware\n");
4417 		return ret;
4418 	}
4419 
4420 	ci = instance->pd_list_buf;
4421 
4422 	cmd = megasas_get_cmd(instance);
4423 
4424 	if (!cmd) {
4425 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4426 		return -ENOMEM;
4427 	}
4428 
4429 	dcmd = &cmd->frame->dcmd;
4430 
4431 	memset(ci, 0, sizeof(*ci));
4432 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4433 
4434 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4435 	dcmd->mbox.b[1] = 0;
4436 	dcmd->cmd = MFI_CMD_DCMD;
4437 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4438 	dcmd->sge_count = 1;
4439 	dcmd->flags = MFI_FRAME_DIR_READ;
4440 	dcmd->timeout = 0;
4441 	dcmd->pad_0 = 0;
4442 	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4443 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4444 
4445 	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4446 				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4447 
4448 	if ((instance->adapter_type != MFI_SERIES) &&
4449 	    !instance->mask_interrupts)
4450 		ret = megasas_issue_blocked_cmd(instance, cmd,
4451 			MFI_IO_TIMEOUT_SECS);
4452 	else
4453 		ret = megasas_issue_polled(instance, cmd);
4454 
4455 	switch (ret) {
4456 	case DCMD_FAILED:
4457 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4458 			"failed/not supported by firmware\n");
4459 
4460 		if (instance->adapter_type != MFI_SERIES)
4461 			megaraid_sas_kill_hba(instance);
4462 		else
4463 			instance->pd_list_not_supported = 1;
4464 		break;
4465 	case DCMD_TIMEOUT:
4466 
4467 		switch (dcmd_timeout_ocr_possible(instance)) {
4468 		case INITIATE_OCR:
4469 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4470 			/*
4471 			 * DCMD failed from AEN path.
4472 			 * AEN path already hold reset_mutex to avoid PCI access
4473 			 * while OCR is in progress.
4474 			 */
4475 			mutex_unlock(&instance->reset_mutex);
4476 			megasas_reset_fusion(instance->host,
4477 						MFI_IO_TIMEOUT_OCR);
4478 			mutex_lock(&instance->reset_mutex);
4479 			break;
4480 		case KILL_ADAPTER:
4481 			megaraid_sas_kill_hba(instance);
4482 			break;
4483 		case IGNORE_TIMEOUT:
4484 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4485 				__func__, __LINE__);
4486 			break;
4487 		}
4488 
4489 		break;
4490 
4491 	case DCMD_SUCCESS:
4492 		pd_addr = ci->addr;
4493 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4494 			dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
4495 				 __func__, le32_to_cpu(ci->count));
4496 
4497 		if ((le32_to_cpu(ci->count) >
4498 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4499 			break;
4500 
4501 		memset(instance->local_pd_list, 0,
4502 				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4503 
4504 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4505 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4506 					le16_to_cpu(pd_addr->deviceId);
4507 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4508 					pd_addr->scsiDevType;
4509 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4510 					MR_PD_STATE_SYSTEM;
4511 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4512 				dev_info(&instance->pdev->dev,
4513 					 "PD%d: targetID: 0x%03x deviceType:0x%x\n",
4514 					 pd_index, le16_to_cpu(pd_addr->deviceId),
4515 					 pd_addr->scsiDevType);
4516 			pd_addr++;
4517 		}
4518 
4519 		memcpy(instance->pd_list, instance->local_pd_list,
4520 			sizeof(instance->pd_list));
4521 		break;
4522 
4523 	}
4524 
4525 	if (ret != DCMD_TIMEOUT)
4526 		megasas_return_cmd(instance, cmd);
4527 
4528 	return ret;
4529 }
4530 
4531 /*
4532  * megasas_get_ld_list_info -	Returns FW's ld_list structure
4533  * @instance:				Adapter soft state
4534  * @ld_list:				ld_list structure
4535  *
4536  * Issues an internal command (DCMD) to get the FW's controller PD
4537  * list structure.  This information is mainly used to find out SYSTEM
4538  * supported by the FW.
4539  */
4540 static int
4541 megasas_get_ld_list(struct megasas_instance *instance)
4542 {
4543 	int ret = 0, ld_index = 0, ids = 0;
4544 	struct megasas_cmd *cmd;
4545 	struct megasas_dcmd_frame *dcmd;
4546 	struct MR_LD_LIST *ci;
4547 	dma_addr_t ci_h = 0;
4548 	u32 ld_count;
4549 
4550 	ci = instance->ld_list_buf;
4551 	ci_h = instance->ld_list_buf_h;
4552 
4553 	cmd = megasas_get_cmd(instance);
4554 
4555 	if (!cmd) {
4556 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4557 		return -ENOMEM;
4558 	}
4559 
4560 	dcmd = &cmd->frame->dcmd;
4561 
4562 	memset(ci, 0, sizeof(*ci));
4563 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4564 
4565 	if (instance->supportmax256vd)
4566 		dcmd->mbox.b[0] = 1;
4567 	dcmd->cmd = MFI_CMD_DCMD;
4568 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4569 	dcmd->sge_count = 1;
4570 	dcmd->flags = MFI_FRAME_DIR_READ;
4571 	dcmd->timeout = 0;
4572 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4573 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4574 	dcmd->pad_0  = 0;
4575 
4576 	megasas_set_dma_settings(instance, dcmd, ci_h,
4577 				 sizeof(struct MR_LD_LIST));
4578 
4579 	if ((instance->adapter_type != MFI_SERIES) &&
4580 	    !instance->mask_interrupts)
4581 		ret = megasas_issue_blocked_cmd(instance, cmd,
4582 			MFI_IO_TIMEOUT_SECS);
4583 	else
4584 		ret = megasas_issue_polled(instance, cmd);
4585 
4586 	ld_count = le32_to_cpu(ci->ldCount);
4587 
4588 	switch (ret) {
4589 	case DCMD_FAILED:
4590 		megaraid_sas_kill_hba(instance);
4591 		break;
4592 	case DCMD_TIMEOUT:
4593 
4594 		switch (dcmd_timeout_ocr_possible(instance)) {
4595 		case INITIATE_OCR:
4596 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4597 			/*
4598 			 * DCMD failed from AEN path.
4599 			 * AEN path already hold reset_mutex to avoid PCI access
4600 			 * while OCR is in progress.
4601 			 */
4602 			mutex_unlock(&instance->reset_mutex);
4603 			megasas_reset_fusion(instance->host,
4604 						MFI_IO_TIMEOUT_OCR);
4605 			mutex_lock(&instance->reset_mutex);
4606 			break;
4607 		case KILL_ADAPTER:
4608 			megaraid_sas_kill_hba(instance);
4609 			break;
4610 		case IGNORE_TIMEOUT:
4611 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4612 				__func__, __LINE__);
4613 			break;
4614 		}
4615 
4616 		break;
4617 
4618 	case DCMD_SUCCESS:
4619 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4620 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4621 				 __func__, ld_count);
4622 
4623 		if (ld_count > instance->fw_supported_vd_count)
4624 			break;
4625 
4626 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4627 
4628 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4629 			if (ci->ldList[ld_index].state != 0) {
4630 				ids = ci->ldList[ld_index].ref.targetId;
4631 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4632 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4633 					dev_info(&instance->pdev->dev,
4634 						 "LD%d: targetID: 0x%03x\n",
4635 						 ld_index, ids);
4636 			}
4637 		}
4638 
4639 		break;
4640 	}
4641 
4642 	if (ret != DCMD_TIMEOUT)
4643 		megasas_return_cmd(instance, cmd);
4644 
4645 	return ret;
4646 }
4647 
4648 /**
4649  * megasas_ld_list_query -	Returns FW's ld_list structure
4650  * @instance:				Adapter soft state
4651  * @ld_list:				ld_list structure
4652  *
4653  * Issues an internal command (DCMD) to get the FW's controller PD
4654  * list structure.  This information is mainly used to find out SYSTEM
4655  * supported by the FW.
4656  */
4657 static int
4658 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4659 {
4660 	int ret = 0, ld_index = 0, ids = 0;
4661 	struct megasas_cmd *cmd;
4662 	struct megasas_dcmd_frame *dcmd;
4663 	struct MR_LD_TARGETID_LIST *ci;
4664 	dma_addr_t ci_h = 0;
4665 	u32 tgtid_count;
4666 
4667 	ci = instance->ld_targetid_list_buf;
4668 	ci_h = instance->ld_targetid_list_buf_h;
4669 
4670 	cmd = megasas_get_cmd(instance);
4671 
4672 	if (!cmd) {
4673 		dev_warn(&instance->pdev->dev,
4674 		         "megasas_ld_list_query: Failed to get cmd\n");
4675 		return -ENOMEM;
4676 	}
4677 
4678 	dcmd = &cmd->frame->dcmd;
4679 
4680 	memset(ci, 0, sizeof(*ci));
4681 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4682 
4683 	dcmd->mbox.b[0] = query_type;
4684 	if (instance->supportmax256vd)
4685 		dcmd->mbox.b[2] = 1;
4686 
4687 	dcmd->cmd = MFI_CMD_DCMD;
4688 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4689 	dcmd->sge_count = 1;
4690 	dcmd->flags = MFI_FRAME_DIR_READ;
4691 	dcmd->timeout = 0;
4692 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4693 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4694 	dcmd->pad_0  = 0;
4695 
4696 	megasas_set_dma_settings(instance, dcmd, ci_h,
4697 				 sizeof(struct MR_LD_TARGETID_LIST));
4698 
4699 	if ((instance->adapter_type != MFI_SERIES) &&
4700 	    !instance->mask_interrupts)
4701 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4702 	else
4703 		ret = megasas_issue_polled(instance, cmd);
4704 
4705 	switch (ret) {
4706 	case DCMD_FAILED:
4707 		dev_info(&instance->pdev->dev,
4708 			"DCMD not supported by firmware - %s %d\n",
4709 				__func__, __LINE__);
4710 		ret = megasas_get_ld_list(instance);
4711 		break;
4712 	case DCMD_TIMEOUT:
4713 		switch (dcmd_timeout_ocr_possible(instance)) {
4714 		case INITIATE_OCR:
4715 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4716 			/*
4717 			 * DCMD failed from AEN path.
4718 			 * AEN path already hold reset_mutex to avoid PCI access
4719 			 * while OCR is in progress.
4720 			 */
4721 			mutex_unlock(&instance->reset_mutex);
4722 			megasas_reset_fusion(instance->host,
4723 						MFI_IO_TIMEOUT_OCR);
4724 			mutex_lock(&instance->reset_mutex);
4725 			break;
4726 		case KILL_ADAPTER:
4727 			megaraid_sas_kill_hba(instance);
4728 			break;
4729 		case IGNORE_TIMEOUT:
4730 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4731 				__func__, __LINE__);
4732 			break;
4733 		}
4734 
4735 		break;
4736 	case DCMD_SUCCESS:
4737 		tgtid_count = le32_to_cpu(ci->count);
4738 
4739 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4740 			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4741 				 __func__, tgtid_count);
4742 
4743 		if ((tgtid_count > (instance->fw_supported_vd_count)))
4744 			break;
4745 
4746 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4747 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4748 			ids = ci->targetId[ld_index];
4749 			instance->ld_ids[ids] = ci->targetId[ld_index];
4750 			if (megasas_dbg_lvl & LD_PD_DEBUG)
4751 				dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
4752 					 ld_index, ci->targetId[ld_index]);
4753 		}
4754 
4755 		break;
4756 	}
4757 
4758 	if (ret != DCMD_TIMEOUT)
4759 		megasas_return_cmd(instance, cmd);
4760 
4761 	return ret;
4762 }
4763 
4764 /**
4765  * dcmd.opcode            - MR_DCMD_CTRL_DEVICE_LIST_GET
4766  * dcmd.mbox              - reserved
4767  * dcmd.sge IN            - ptr to return MR_HOST_DEVICE_LIST structure
4768  * Desc:    This DCMD will return the combined device list
4769  * Status:  MFI_STAT_OK - List returned successfully
4770  *          MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4771  *                                 disabled
4772  * @instance:			Adapter soft state
4773  * @is_probe:			Driver probe check
4774  * Return:			0 if DCMD succeeded
4775  *				 non-zero if failed
4776  */
4777 static int
4778 megasas_host_device_list_query(struct megasas_instance *instance,
4779 			       bool is_probe)
4780 {
4781 	int ret, i, target_id;
4782 	struct megasas_cmd *cmd;
4783 	struct megasas_dcmd_frame *dcmd;
4784 	struct MR_HOST_DEVICE_LIST *ci;
4785 	u32 count;
4786 	dma_addr_t ci_h;
4787 
4788 	ci = instance->host_device_list_buf;
4789 	ci_h = instance->host_device_list_buf_h;
4790 
4791 	cmd = megasas_get_cmd(instance);
4792 
4793 	if (!cmd) {
4794 		dev_warn(&instance->pdev->dev,
4795 			 "%s: failed to get cmd\n",
4796 			 __func__);
4797 		return -ENOMEM;
4798 	}
4799 
4800 	dcmd = &cmd->frame->dcmd;
4801 
4802 	memset(ci, 0, sizeof(*ci));
4803 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4804 
4805 	dcmd->mbox.b[0] = is_probe ? 0 : 1;
4806 	dcmd->cmd = MFI_CMD_DCMD;
4807 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4808 	dcmd->sge_count = 1;
4809 	dcmd->flags = MFI_FRAME_DIR_READ;
4810 	dcmd->timeout = 0;
4811 	dcmd->pad_0 = 0;
4812 	dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
4813 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
4814 
4815 	megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
4816 
4817 	if (!instance->mask_interrupts) {
4818 		ret = megasas_issue_blocked_cmd(instance, cmd,
4819 						MFI_IO_TIMEOUT_SECS);
4820 	} else {
4821 		ret = megasas_issue_polled(instance, cmd);
4822 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4823 	}
4824 
4825 	switch (ret) {
4826 	case DCMD_SUCCESS:
4827 		/* Fill the internal pd_list and ld_ids array based on
4828 		 * targetIds returned by FW
4829 		 */
4830 		count = le32_to_cpu(ci->count);
4831 
4832 		if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
4833 			break;
4834 
4835 		if (megasas_dbg_lvl & LD_PD_DEBUG)
4836 			dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
4837 				 __func__, count);
4838 
4839 		memset(instance->local_pd_list, 0,
4840 		       MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4841 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4842 		for (i = 0; i < count; i++) {
4843 			target_id = le16_to_cpu(ci->host_device_list[i].target_id);
4844 			if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
4845 				instance->local_pd_list[target_id].tid = target_id;
4846 				instance->local_pd_list[target_id].driveType =
4847 						ci->host_device_list[i].scsi_type;
4848 				instance->local_pd_list[target_id].driveState =
4849 						MR_PD_STATE_SYSTEM;
4850 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4851 					dev_info(&instance->pdev->dev,
4852 						 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
4853 						 i, target_id, ci->host_device_list[i].scsi_type);
4854 			} else {
4855 				instance->ld_ids[target_id] = target_id;
4856 				if (megasas_dbg_lvl & LD_PD_DEBUG)
4857 					dev_info(&instance->pdev->dev,
4858 						 "Device %d: LD targetID: 0x%03x\n",
4859 						 i, target_id);
4860 			}
4861 		}
4862 
4863 		memcpy(instance->pd_list, instance->local_pd_list,
4864 		       sizeof(instance->pd_list));
4865 		break;
4866 
4867 	case DCMD_TIMEOUT:
4868 		switch (dcmd_timeout_ocr_possible(instance)) {
4869 		case INITIATE_OCR:
4870 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4871 			mutex_unlock(&instance->reset_mutex);
4872 			megasas_reset_fusion(instance->host,
4873 				MFI_IO_TIMEOUT_OCR);
4874 			mutex_lock(&instance->reset_mutex);
4875 			break;
4876 		case KILL_ADAPTER:
4877 			megaraid_sas_kill_hba(instance);
4878 			break;
4879 		case IGNORE_TIMEOUT:
4880 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4881 				 __func__, __LINE__);
4882 			break;
4883 		}
4884 		break;
4885 	case DCMD_FAILED:
4886 		dev_err(&instance->pdev->dev,
4887 			"%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
4888 			__func__);
4889 		break;
4890 	}
4891 
4892 	if (ret != DCMD_TIMEOUT)
4893 		megasas_return_cmd(instance, cmd);
4894 
4895 	return ret;
4896 }
4897 
4898 /*
4899  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4900  * instance			 : Controller's instance
4901 */
4902 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4903 {
4904 	struct fusion_context *fusion;
4905 	u32 ventura_map_sz = 0;
4906 
4907 	fusion = instance->ctrl_context;
4908 	/* For MFI based controllers return dummy success */
4909 	if (!fusion)
4910 		return;
4911 
4912 	instance->supportmax256vd =
4913 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
4914 	/* Below is additional check to address future FW enhancement */
4915 	if (instance->ctrl_info_buf->max_lds > 64)
4916 		instance->supportmax256vd = 1;
4917 
4918 	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4919 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4920 	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4921 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4922 	if (instance->supportmax256vd) {
4923 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4924 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4925 	} else {
4926 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4927 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4928 	}
4929 
4930 	dev_info(&instance->pdev->dev,
4931 		"FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
4932 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
4933 		instance->ctrl_info_buf->max_lds);
4934 
4935 	if (instance->max_raid_mapsize) {
4936 		ventura_map_sz = instance->max_raid_mapsize *
4937 						MR_MIN_MAP_SIZE; /* 64k */
4938 		fusion->current_map_sz = ventura_map_sz;
4939 		fusion->max_map_sz = ventura_map_sz;
4940 	} else {
4941 		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
4942 					(sizeof(struct MR_LD_SPAN_MAP) *
4943 					(instance->fw_supported_vd_count - 1));
4944 		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
4945 
4946 		fusion->max_map_sz =
4947 			max(fusion->old_map_sz, fusion->new_map_sz);
4948 
4949 		if (instance->supportmax256vd)
4950 			fusion->current_map_sz = fusion->new_map_sz;
4951 		else
4952 			fusion->current_map_sz = fusion->old_map_sz;
4953 	}
4954 	/* irrespective of FW raid maps, driver raid map is constant */
4955 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4956 }
4957 
4958 /*
4959  * dcmd.opcode                - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
4960  * dcmd.hdr.length            - number of bytes to read
4961  * dcmd.sge                   - Ptr to MR_SNAPDUMP_PROPERTIES
4962  * Desc:			 Fill in snapdump properties
4963  * Status:			 MFI_STAT_OK- Command successful
4964  */
4965 void megasas_get_snapdump_properties(struct megasas_instance *instance)
4966 {
4967 	int ret = 0;
4968 	struct megasas_cmd *cmd;
4969 	struct megasas_dcmd_frame *dcmd;
4970 	struct MR_SNAPDUMP_PROPERTIES *ci;
4971 	dma_addr_t ci_h = 0;
4972 
4973 	ci = instance->snapdump_prop;
4974 	ci_h = instance->snapdump_prop_h;
4975 
4976 	if (!ci)
4977 		return;
4978 
4979 	cmd = megasas_get_cmd(instance);
4980 
4981 	if (!cmd) {
4982 		dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
4983 		return;
4984 	}
4985 
4986 	dcmd = &cmd->frame->dcmd;
4987 
4988 	memset(ci, 0, sizeof(*ci));
4989 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4990 
4991 	dcmd->cmd = MFI_CMD_DCMD;
4992 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4993 	dcmd->sge_count = 1;
4994 	dcmd->flags = MFI_FRAME_DIR_READ;
4995 	dcmd->timeout = 0;
4996 	dcmd->pad_0 = 0;
4997 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
4998 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
4999 
5000 	megasas_set_dma_settings(instance, dcmd, ci_h,
5001 				 sizeof(struct MR_SNAPDUMP_PROPERTIES));
5002 
5003 	if (!instance->mask_interrupts) {
5004 		ret = megasas_issue_blocked_cmd(instance, cmd,
5005 						MFI_IO_TIMEOUT_SECS);
5006 	} else {
5007 		ret = megasas_issue_polled(instance, cmd);
5008 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5009 	}
5010 
5011 	switch (ret) {
5012 	case DCMD_SUCCESS:
5013 		instance->snapdump_wait_time =
5014 			min_t(u8, ci->trigger_min_num_sec_before_ocr,
5015 				MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
5016 		break;
5017 
5018 	case DCMD_TIMEOUT:
5019 		switch (dcmd_timeout_ocr_possible(instance)) {
5020 		case INITIATE_OCR:
5021 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5022 			mutex_unlock(&instance->reset_mutex);
5023 			megasas_reset_fusion(instance->host,
5024 				MFI_IO_TIMEOUT_OCR);
5025 			mutex_lock(&instance->reset_mutex);
5026 			break;
5027 		case KILL_ADAPTER:
5028 			megaraid_sas_kill_hba(instance);
5029 			break;
5030 		case IGNORE_TIMEOUT:
5031 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5032 				__func__, __LINE__);
5033 			break;
5034 		}
5035 	}
5036 
5037 	if (ret != DCMD_TIMEOUT)
5038 		megasas_return_cmd(instance, cmd);
5039 }
5040 
5041 /**
5042  * megasas_get_controller_info -	Returns FW's controller structure
5043  * @instance:				Adapter soft state
5044  *
5045  * Issues an internal command (DCMD) to get the FW's controller structure.
5046  * This information is mainly used to find out the maximum IO transfer per
5047  * command supported by the FW.
5048  */
5049 int
5050 megasas_get_ctrl_info(struct megasas_instance *instance)
5051 {
5052 	int ret = 0;
5053 	struct megasas_cmd *cmd;
5054 	struct megasas_dcmd_frame *dcmd;
5055 	struct megasas_ctrl_info *ci;
5056 	dma_addr_t ci_h = 0;
5057 
5058 	ci = instance->ctrl_info_buf;
5059 	ci_h = instance->ctrl_info_buf_h;
5060 
5061 	cmd = megasas_get_cmd(instance);
5062 
5063 	if (!cmd) {
5064 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
5065 		return -ENOMEM;
5066 	}
5067 
5068 	dcmd = &cmd->frame->dcmd;
5069 
5070 	memset(ci, 0, sizeof(*ci));
5071 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5072 
5073 	dcmd->cmd = MFI_CMD_DCMD;
5074 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5075 	dcmd->sge_count = 1;
5076 	dcmd->flags = MFI_FRAME_DIR_READ;
5077 	dcmd->timeout = 0;
5078 	dcmd->pad_0 = 0;
5079 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
5080 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
5081 	dcmd->mbox.b[0] = 1;
5082 
5083 	megasas_set_dma_settings(instance, dcmd, ci_h,
5084 				 sizeof(struct megasas_ctrl_info));
5085 
5086 	if ((instance->adapter_type != MFI_SERIES) &&
5087 	    !instance->mask_interrupts) {
5088 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5089 	} else {
5090 		ret = megasas_issue_polled(instance, cmd);
5091 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5092 	}
5093 
5094 	switch (ret) {
5095 	case DCMD_SUCCESS:
5096 		/* Save required controller information in
5097 		 * CPU endianness format.
5098 		 */
5099 		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
5100 		le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
5101 		le32_to_cpus((u32 *)&ci->adapterOperations2);
5102 		le32_to_cpus((u32 *)&ci->adapterOperations3);
5103 		le16_to_cpus((u16 *)&ci->adapter_operations4);
5104 		le32_to_cpus((u32 *)&ci->adapter_operations5);
5105 
5106 		/* Update the latest Ext VD info.
5107 		 * From Init path, store current firmware details.
5108 		 * From OCR path, detect any firmware properties changes.
5109 		 * in case of Firmware upgrade without system reboot.
5110 		 */
5111 		megasas_update_ext_vd_details(instance);
5112 		instance->support_seqnum_jbod_fp =
5113 			ci->adapterOperations3.useSeqNumJbodFP;
5114 		instance->support_morethan256jbod =
5115 			ci->adapter_operations4.support_pd_map_target_id;
5116 		instance->support_nvme_passthru =
5117 			ci->adapter_operations4.support_nvme_passthru;
5118 		instance->support_pci_lane_margining =
5119 			ci->adapter_operations5.support_pci_lane_margining;
5120 		instance->task_abort_tmo = ci->TaskAbortTO;
5121 		instance->max_reset_tmo = ci->MaxResetTO;
5122 
5123 		/*Check whether controller is iMR or MR */
5124 		instance->is_imr = (ci->memory_size ? 0 : 1);
5125 
5126 		instance->snapdump_wait_time =
5127 			(ci->properties.on_off_properties2.enable_snap_dump ?
5128 			 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
5129 
5130 		instance->enable_fw_dev_list =
5131 			ci->properties.on_off_properties2.enable_fw_dev_list;
5132 
5133 		dev_info(&instance->pdev->dev,
5134 			"controller type\t: %s(%dMB)\n",
5135 			instance->is_imr ? "iMR" : "MR",
5136 			le16_to_cpu(ci->memory_size));
5137 
5138 		instance->disableOnlineCtrlReset =
5139 			ci->properties.OnOffProperties.disableOnlineCtrlReset;
5140 		instance->secure_jbod_support =
5141 			ci->adapterOperations3.supportSecurityonJBOD;
5142 		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
5143 			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
5144 		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
5145 			instance->secure_jbod_support ? "Yes" : "No");
5146 		dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
5147 			 instance->support_nvme_passthru ? "Yes" : "No");
5148 		dev_info(&instance->pdev->dev,
5149 			 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
5150 			 instance->task_abort_tmo, instance->max_reset_tmo);
5151 		dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
5152 			 instance->support_seqnum_jbod_fp ? "Yes" : "No");
5153 		dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
5154 			 instance->support_pci_lane_margining ? "Yes" : "No");
5155 
5156 		break;
5157 
5158 	case DCMD_TIMEOUT:
5159 		switch (dcmd_timeout_ocr_possible(instance)) {
5160 		case INITIATE_OCR:
5161 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5162 			mutex_unlock(&instance->reset_mutex);
5163 			megasas_reset_fusion(instance->host,
5164 				MFI_IO_TIMEOUT_OCR);
5165 			mutex_lock(&instance->reset_mutex);
5166 			break;
5167 		case KILL_ADAPTER:
5168 			megaraid_sas_kill_hba(instance);
5169 			break;
5170 		case IGNORE_TIMEOUT:
5171 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5172 				__func__, __LINE__);
5173 			break;
5174 		}
5175 		break;
5176 	case DCMD_FAILED:
5177 		megaraid_sas_kill_hba(instance);
5178 		break;
5179 
5180 	}
5181 
5182 	if (ret != DCMD_TIMEOUT)
5183 		megasas_return_cmd(instance, cmd);
5184 
5185 	return ret;
5186 }
5187 
5188 /*
5189  * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
5190  *					to firmware
5191  *
5192  * @instance:				Adapter soft state
5193  * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
5194 					MR_CRASH_BUF_TURN_OFF = 0
5195 					MR_CRASH_BUF_TURN_ON = 1
5196  * @return 0 on success non-zero on failure.
5197  * Issues an internal command (DCMD) to set parameters for crash dump feature.
5198  * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5199  * that driver supports crash dump feature. This DCMD will be sent only if
5200  * crash dump feature is supported by the FW.
5201  *
5202  */
5203 int megasas_set_crash_dump_params(struct megasas_instance *instance,
5204 	u8 crash_buf_state)
5205 {
5206 	int ret = 0;
5207 	struct megasas_cmd *cmd;
5208 	struct megasas_dcmd_frame *dcmd;
5209 
5210 	cmd = megasas_get_cmd(instance);
5211 
5212 	if (!cmd) {
5213 		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5214 		return -ENOMEM;
5215 	}
5216 
5217 
5218 	dcmd = &cmd->frame->dcmd;
5219 
5220 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5221 	dcmd->mbox.b[0] = crash_buf_state;
5222 	dcmd->cmd = MFI_CMD_DCMD;
5223 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5224 	dcmd->sge_count = 1;
5225 	dcmd->flags = MFI_FRAME_DIR_NONE;
5226 	dcmd->timeout = 0;
5227 	dcmd->pad_0 = 0;
5228 	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5229 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5230 
5231 	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5232 				 CRASH_DMA_BUF_SIZE);
5233 
5234 	if ((instance->adapter_type != MFI_SERIES) &&
5235 	    !instance->mask_interrupts)
5236 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5237 	else
5238 		ret = megasas_issue_polled(instance, cmd);
5239 
5240 	if (ret == DCMD_TIMEOUT) {
5241 		switch (dcmd_timeout_ocr_possible(instance)) {
5242 		case INITIATE_OCR:
5243 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5244 			megasas_reset_fusion(instance->host,
5245 					MFI_IO_TIMEOUT_OCR);
5246 			break;
5247 		case KILL_ADAPTER:
5248 			megaraid_sas_kill_hba(instance);
5249 			break;
5250 		case IGNORE_TIMEOUT:
5251 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5252 				__func__, __LINE__);
5253 			break;
5254 		}
5255 	} else
5256 		megasas_return_cmd(instance, cmd);
5257 
5258 	return ret;
5259 }
5260 
5261 /**
5262  * megasas_issue_init_mfi -	Initializes the FW
5263  * @instance:		Adapter soft state
5264  *
5265  * Issues the INIT MFI cmd
5266  */
5267 static int
5268 megasas_issue_init_mfi(struct megasas_instance *instance)
5269 {
5270 	__le32 context;
5271 	struct megasas_cmd *cmd;
5272 	struct megasas_init_frame *init_frame;
5273 	struct megasas_init_queue_info *initq_info;
5274 	dma_addr_t init_frame_h;
5275 	dma_addr_t initq_info_h;
5276 
5277 	/*
5278 	 * Prepare a init frame. Note the init frame points to queue info
5279 	 * structure. Each frame has SGL allocated after first 64 bytes. For
5280 	 * this frame - since we don't need any SGL - we use SGL's space as
5281 	 * queue info structure
5282 	 *
5283 	 * We will not get a NULL command below. We just created the pool.
5284 	 */
5285 	cmd = megasas_get_cmd(instance);
5286 
5287 	init_frame = (struct megasas_init_frame *)cmd->frame;
5288 	initq_info = (struct megasas_init_queue_info *)
5289 		((unsigned long)init_frame + 64);
5290 
5291 	init_frame_h = cmd->frame_phys_addr;
5292 	initq_info_h = init_frame_h + 64;
5293 
5294 	context = init_frame->context;
5295 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5296 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5297 	init_frame->context = context;
5298 
5299 	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5300 	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5301 
5302 	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5303 	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5304 
5305 	init_frame->cmd = MFI_CMD_INIT;
5306 	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5307 	init_frame->queue_info_new_phys_addr_lo =
5308 		cpu_to_le32(lower_32_bits(initq_info_h));
5309 	init_frame->queue_info_new_phys_addr_hi =
5310 		cpu_to_le32(upper_32_bits(initq_info_h));
5311 
5312 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5313 
5314 	/*
5315 	 * disable the intr before firing the init frame to FW
5316 	 */
5317 	instance->instancet->disable_intr(instance);
5318 
5319 	/*
5320 	 * Issue the init frame in polled mode
5321 	 */
5322 
5323 	if (megasas_issue_polled(instance, cmd)) {
5324 		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5325 		megasas_return_cmd(instance, cmd);
5326 		goto fail_fw_init;
5327 	}
5328 
5329 	megasas_return_cmd(instance, cmd);
5330 
5331 	return 0;
5332 
5333 fail_fw_init:
5334 	return -EINVAL;
5335 }
5336 
5337 static u32
5338 megasas_init_adapter_mfi(struct megasas_instance *instance)
5339 {
5340 	u32 context_sz;
5341 	u32 reply_q_sz;
5342 
5343 	/*
5344 	 * Get various operational parameters from status register
5345 	 */
5346 	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5347 	/*
5348 	 * Reduce the max supported cmds by 1. This is to ensure that the
5349 	 * reply_q_sz (1 more than the max cmd that driver may send)
5350 	 * does not exceed max cmds that the FW can support
5351 	 */
5352 	instance->max_fw_cmds = instance->max_fw_cmds-1;
5353 	instance->max_mfi_cmds = instance->max_fw_cmds;
5354 	instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5355 					0x10;
5356 	/*
5357 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5358 	 * are reserved for IOCTL + driver's internal DCMDs.
5359 	 */
5360 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5361 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5362 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5363 			MEGASAS_SKINNY_INT_CMDS);
5364 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5365 	} else {
5366 		instance->max_scsi_cmds = (instance->max_fw_cmds -
5367 			MEGASAS_INT_CMDS);
5368 		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5369 	}
5370 
5371 	instance->cur_can_queue = instance->max_scsi_cmds;
5372 	/*
5373 	 * Create a pool of commands
5374 	 */
5375 	if (megasas_alloc_cmds(instance))
5376 		goto fail_alloc_cmds;
5377 
5378 	/*
5379 	 * Allocate memory for reply queue. Length of reply queue should
5380 	 * be _one_ more than the maximum commands handled by the firmware.
5381 	 *
5382 	 * Note: When FW completes commands, it places corresponding contex
5383 	 * values in this circular reply queue. This circular queue is a fairly
5384 	 * typical producer-consumer queue. FW is the producer (of completed
5385 	 * commands) and the driver is the consumer.
5386 	 */
5387 	context_sz = sizeof(u32);
5388 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5389 
5390 	instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5391 			reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5392 
5393 	if (!instance->reply_queue) {
5394 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5395 		goto fail_reply_queue;
5396 	}
5397 
5398 	if (megasas_issue_init_mfi(instance))
5399 		goto fail_fw_init;
5400 
5401 	if (megasas_get_ctrl_info(instance)) {
5402 		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5403 			"Fail from %s %d\n", instance->unique_id,
5404 			__func__, __LINE__);
5405 		goto fail_fw_init;
5406 	}
5407 
5408 	instance->fw_support_ieee = 0;
5409 	instance->fw_support_ieee =
5410 		(instance->instancet->read_fw_status_reg(instance) &
5411 		0x04000000);
5412 
5413 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5414 			instance->fw_support_ieee);
5415 
5416 	if (instance->fw_support_ieee)
5417 		instance->flag_ieee = 1;
5418 
5419 	return 0;
5420 
5421 fail_fw_init:
5422 
5423 	dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5424 			    instance->reply_queue, instance->reply_queue_h);
5425 fail_reply_queue:
5426 	megasas_free_cmds(instance);
5427 
5428 fail_alloc_cmds:
5429 	return 1;
5430 }
5431 
5432 static
5433 void megasas_setup_irq_poll(struct megasas_instance *instance)
5434 {
5435 	struct megasas_irq_context *irq_ctx;
5436 	u32 count, i;
5437 
5438 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5439 
5440 	/* Initialize IRQ poll */
5441 	for (i = 0; i < count; i++) {
5442 		irq_ctx = &instance->irq_context[i];
5443 		irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
5444 		irq_ctx->irq_poll_scheduled = false;
5445 		irq_poll_init(&irq_ctx->irqpoll,
5446 			      instance->threshold_reply_count,
5447 			      megasas_irqpoll);
5448 	}
5449 }
5450 
5451 /*
5452  * megasas_setup_irqs_ioapic -		register legacy interrupts.
5453  * @instance:				Adapter soft state
5454  *
5455  * Do not enable interrupt, only setup ISRs.
5456  *
5457  * Return 0 on success.
5458  */
5459 static int
5460 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5461 {
5462 	struct pci_dev *pdev;
5463 
5464 	pdev = instance->pdev;
5465 	instance->irq_context[0].instance = instance;
5466 	instance->irq_context[0].MSIxIndex = 0;
5467 	if (request_irq(pci_irq_vector(pdev, 0),
5468 			instance->instancet->service_isr, IRQF_SHARED,
5469 			"megasas", &instance->irq_context[0])) {
5470 		dev_err(&instance->pdev->dev,
5471 				"Failed to register IRQ from %s %d\n",
5472 				__func__, __LINE__);
5473 		return -1;
5474 	}
5475 	instance->perf_mode = MR_LATENCY_PERF_MODE;
5476 	instance->low_latency_index_start = 0;
5477 	return 0;
5478 }
5479 
5480 /**
5481  * megasas_setup_irqs_msix -		register MSI-x interrupts.
5482  * @instance:				Adapter soft state
5483  * @is_probe:				Driver probe check
5484  *
5485  * Do not enable interrupt, only setup ISRs.
5486  *
5487  * Return 0 on success.
5488  */
5489 static int
5490 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5491 {
5492 	int i, j;
5493 	struct pci_dev *pdev;
5494 
5495 	pdev = instance->pdev;
5496 
5497 	/* Try MSI-x */
5498 	for (i = 0; i < instance->msix_vectors; i++) {
5499 		instance->irq_context[i].instance = instance;
5500 		instance->irq_context[i].MSIxIndex = i;
5501 		if (request_irq(pci_irq_vector(pdev, i),
5502 			instance->instancet->service_isr, 0, "megasas",
5503 			&instance->irq_context[i])) {
5504 			dev_err(&instance->pdev->dev,
5505 				"Failed to register IRQ for vector %d.\n", i);
5506 			for (j = 0; j < i; j++)
5507 				free_irq(pci_irq_vector(pdev, j),
5508 					 &instance->irq_context[j]);
5509 			/* Retry irq register for IO_APIC*/
5510 			instance->msix_vectors = 0;
5511 			instance->msix_load_balance = false;
5512 			if (is_probe) {
5513 				pci_free_irq_vectors(instance->pdev);
5514 				return megasas_setup_irqs_ioapic(instance);
5515 			} else {
5516 				return -1;
5517 			}
5518 		}
5519 	}
5520 
5521 	return 0;
5522 }
5523 
5524 /*
5525  * megasas_destroy_irqs-		unregister interrupts.
5526  * @instance:				Adapter soft state
5527  * return:				void
5528  */
5529 static void
5530 megasas_destroy_irqs(struct megasas_instance *instance) {
5531 
5532 	int i;
5533 	int count;
5534 	struct megasas_irq_context *irq_ctx;
5535 
5536 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5537 	if (instance->adapter_type != MFI_SERIES) {
5538 		for (i = 0; i < count; i++) {
5539 			irq_ctx = &instance->irq_context[i];
5540 			irq_poll_disable(&irq_ctx->irqpoll);
5541 		}
5542 	}
5543 
5544 	if (instance->msix_vectors)
5545 		for (i = 0; i < instance->msix_vectors; i++) {
5546 			free_irq(pci_irq_vector(instance->pdev, i),
5547 				 &instance->irq_context[i]);
5548 		}
5549 	else
5550 		free_irq(pci_irq_vector(instance->pdev, 0),
5551 			 &instance->irq_context[0]);
5552 }
5553 
5554 /**
5555  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5556  * @instance:				Adapter soft state
5557  * @is_probe:				Driver probe check
5558  *
5559  * Return 0 on success.
5560  */
5561 void
5562 megasas_setup_jbod_map(struct megasas_instance *instance)
5563 {
5564 	int i;
5565 	struct fusion_context *fusion = instance->ctrl_context;
5566 	u32 pd_seq_map_sz;
5567 
5568 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5569 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5570 
5571 	instance->use_seqnum_jbod_fp =
5572 		instance->support_seqnum_jbod_fp;
5573 	if (reset_devices || !fusion ||
5574 		!instance->support_seqnum_jbod_fp) {
5575 		dev_info(&instance->pdev->dev,
5576 			"JBOD sequence map is disabled %s %d\n",
5577 			__func__, __LINE__);
5578 		instance->use_seqnum_jbod_fp = false;
5579 		return;
5580 	}
5581 
5582 	if (fusion->pd_seq_sync[0])
5583 		goto skip_alloc;
5584 
5585 	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5586 		fusion->pd_seq_sync[i] = dma_alloc_coherent
5587 			(&instance->pdev->dev, pd_seq_map_sz,
5588 			&fusion->pd_seq_phys[i], GFP_KERNEL);
5589 		if (!fusion->pd_seq_sync[i]) {
5590 			dev_err(&instance->pdev->dev,
5591 				"Failed to allocate memory from %s %d\n",
5592 				__func__, __LINE__);
5593 			if (i == 1) {
5594 				dma_free_coherent(&instance->pdev->dev,
5595 					pd_seq_map_sz, fusion->pd_seq_sync[0],
5596 					fusion->pd_seq_phys[0]);
5597 				fusion->pd_seq_sync[0] = NULL;
5598 			}
5599 			instance->use_seqnum_jbod_fp = false;
5600 			return;
5601 		}
5602 	}
5603 
5604 skip_alloc:
5605 	if (!megasas_sync_pd_seq_num(instance, false) &&
5606 		!megasas_sync_pd_seq_num(instance, true))
5607 		instance->use_seqnum_jbod_fp = true;
5608 	else
5609 		instance->use_seqnum_jbod_fp = false;
5610 }
5611 
5612 static void megasas_setup_reply_map(struct megasas_instance *instance)
5613 {
5614 	const struct cpumask *mask;
5615 	unsigned int queue, cpu, low_latency_index_start;
5616 
5617 	low_latency_index_start = instance->low_latency_index_start;
5618 
5619 	for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
5620 		mask = pci_irq_get_affinity(instance->pdev, queue);
5621 		if (!mask)
5622 			goto fallback;
5623 
5624 		for_each_cpu(cpu, mask)
5625 			instance->reply_map[cpu] = queue;
5626 	}
5627 	return;
5628 
5629 fallback:
5630 	queue = low_latency_index_start;
5631 	for_each_possible_cpu(cpu) {
5632 		instance->reply_map[cpu] = queue;
5633 		if (queue == (instance->msix_vectors - 1))
5634 			queue = low_latency_index_start;
5635 		else
5636 			queue++;
5637 	}
5638 }
5639 
5640 /**
5641  * megasas_get_device_list -	Get the PD and LD device list from FW.
5642  * @instance:			Adapter soft state
5643  * @return:			Success or failure
5644  *
5645  * Issue DCMDs to Firmware to get the PD and LD list.
5646  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5647  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5648  */
5649 static
5650 int megasas_get_device_list(struct megasas_instance *instance)
5651 {
5652 	memset(instance->pd_list, 0,
5653 	       (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5654 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5655 
5656 	if (instance->enable_fw_dev_list) {
5657 		if (megasas_host_device_list_query(instance, true))
5658 			return FAILED;
5659 	} else {
5660 		if (megasas_get_pd_list(instance) < 0) {
5661 			dev_err(&instance->pdev->dev, "failed to get PD list\n");
5662 			return FAILED;
5663 		}
5664 
5665 		if (megasas_ld_list_query(instance,
5666 					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5667 			dev_err(&instance->pdev->dev, "failed to get LD list\n");
5668 			return FAILED;
5669 		}
5670 	}
5671 
5672 	return SUCCESS;
5673 }
5674 
5675 /**
5676  * megasas_set_high_iops_queue_affinity_hint -	Set affinity hint for high IOPS queues
5677  * @instance:					Adapter soft state
5678  * return:					void
5679  */
5680 static inline void
5681 megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
5682 {
5683 	int i;
5684 	int local_numa_node;
5685 
5686 	if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
5687 		local_numa_node = dev_to_node(&instance->pdev->dev);
5688 
5689 		for (i = 0; i < instance->low_latency_index_start; i++)
5690 			irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
5691 				cpumask_of_node(local_numa_node));
5692 	}
5693 }
5694 
5695 static int
5696 __megasas_alloc_irq_vectors(struct megasas_instance *instance)
5697 {
5698 	int i, irq_flags;
5699 	struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
5700 	struct irq_affinity *descp = &desc;
5701 
5702 	irq_flags = PCI_IRQ_MSIX;
5703 
5704 	if (instance->smp_affinity_enable)
5705 		irq_flags |= PCI_IRQ_AFFINITY;
5706 	else
5707 		descp = NULL;
5708 
5709 	i = pci_alloc_irq_vectors_affinity(instance->pdev,
5710 		instance->low_latency_index_start,
5711 		instance->msix_vectors, irq_flags, descp);
5712 
5713 	return i;
5714 }
5715 
5716 /**
5717  * megasas_alloc_irq_vectors -	Allocate IRQ vectors/enable MSI-x vectors
5718  * @instance:			Adapter soft state
5719  * return:			void
5720  */
5721 static void
5722 megasas_alloc_irq_vectors(struct megasas_instance *instance)
5723 {
5724 	int i;
5725 	unsigned int num_msix_req;
5726 
5727 	i = __megasas_alloc_irq_vectors(instance);
5728 
5729 	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
5730 	    (i != instance->msix_vectors)) {
5731 		if (instance->msix_vectors)
5732 			pci_free_irq_vectors(instance->pdev);
5733 		/* Disable Balanced IOPS mode and try realloc vectors */
5734 		instance->perf_mode = MR_LATENCY_PERF_MODE;
5735 		instance->low_latency_index_start = 1;
5736 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5737 
5738 		instance->msix_vectors = min(num_msix_req,
5739 				instance->msix_vectors);
5740 
5741 		i = __megasas_alloc_irq_vectors(instance);
5742 
5743 	}
5744 
5745 	dev_info(&instance->pdev->dev,
5746 		"requested/available msix %d/%d\n", instance->msix_vectors, i);
5747 
5748 	if (i > 0)
5749 		instance->msix_vectors = i;
5750 	else
5751 		instance->msix_vectors = 0;
5752 
5753 	if (instance->smp_affinity_enable)
5754 		megasas_set_high_iops_queue_affinity_hint(instance);
5755 }
5756 
5757 /**
5758  * megasas_init_fw -	Initializes the FW
5759  * @instance:		Adapter soft state
5760  *
5761  * This is the main function for initializing firmware
5762  */
5763 
5764 static int megasas_init_fw(struct megasas_instance *instance)
5765 {
5766 	u32 max_sectors_1;
5767 	u32 max_sectors_2, tmp_sectors, msix_enable;
5768 	u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5769 	resource_size_t base_addr;
5770 	void *base_addr_phys;
5771 	struct megasas_ctrl_info *ctrl_info = NULL;
5772 	unsigned long bar_list;
5773 	int i, j, loop;
5774 	struct IOV_111 *iovPtr;
5775 	struct fusion_context *fusion;
5776 	bool intr_coalescing;
5777 	unsigned int num_msix_req;
5778 	u16 lnksta, speed;
5779 
5780 	fusion = instance->ctrl_context;
5781 
5782 	/* Find first memory bar */
5783 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5784 	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5785 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5786 					 "megasas: LSI")) {
5787 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5788 		return -EBUSY;
5789 	}
5790 
5791 	base_addr = pci_resource_start(instance->pdev, instance->bar);
5792 	instance->reg_set = ioremap_nocache(base_addr, 8192);
5793 
5794 	if (!instance->reg_set) {
5795 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5796 		goto fail_ioremap;
5797 	}
5798 
5799 	base_addr_phys = &base_addr;
5800 	dev_printk(KERN_DEBUG, &instance->pdev->dev,
5801 		   "BAR:0x%lx  BAR's base_addr(phys):%pa  mapped virt_addr:0x%p\n",
5802 		   instance->bar, base_addr_phys, instance->reg_set);
5803 
5804 	if (instance->adapter_type != MFI_SERIES)
5805 		instance->instancet = &megasas_instance_template_fusion;
5806 	else {
5807 		switch (instance->pdev->device) {
5808 		case PCI_DEVICE_ID_LSI_SAS1078R:
5809 		case PCI_DEVICE_ID_LSI_SAS1078DE:
5810 			instance->instancet = &megasas_instance_template_ppc;
5811 			break;
5812 		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5813 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5814 			instance->instancet = &megasas_instance_template_gen2;
5815 			break;
5816 		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5817 		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5818 			instance->instancet = &megasas_instance_template_skinny;
5819 			break;
5820 		case PCI_DEVICE_ID_LSI_SAS1064R:
5821 		case PCI_DEVICE_ID_DELL_PERC5:
5822 		default:
5823 			instance->instancet = &megasas_instance_template_xscale;
5824 			instance->pd_list_not_supported = 1;
5825 			break;
5826 		}
5827 	}
5828 
5829 	if (megasas_transition_to_ready(instance, 0)) {
5830 		dev_info(&instance->pdev->dev,
5831 			 "Failed to transition controller to ready from %s!\n",
5832 			 __func__);
5833 		if (instance->adapter_type != MFI_SERIES) {
5834 			status_reg = instance->instancet->read_fw_status_reg(
5835 					instance);
5836 			if (status_reg & MFI_RESET_ADAPTER) {
5837 				if (megasas_adp_reset_wait_for_ready
5838 					(instance, true, 0) == FAILED)
5839 					goto fail_ready_state;
5840 			} else {
5841 				goto fail_ready_state;
5842 			}
5843 		} else {
5844 			atomic_set(&instance->fw_reset_no_pci_access, 1);
5845 			instance->instancet->adp_reset
5846 				(instance, instance->reg_set);
5847 			atomic_set(&instance->fw_reset_no_pci_access, 0);
5848 
5849 			/*waiting for about 30 second before retry*/
5850 			ssleep(30);
5851 
5852 			if (megasas_transition_to_ready(instance, 0))
5853 				goto fail_ready_state;
5854 		}
5855 
5856 		dev_info(&instance->pdev->dev,
5857 			 "FW restarted successfully from %s!\n",
5858 			 __func__);
5859 	}
5860 
5861 	megasas_init_ctrl_params(instance);
5862 
5863 	if (megasas_set_dma_mask(instance))
5864 		goto fail_ready_state;
5865 
5866 	if (megasas_alloc_ctrl_mem(instance))
5867 		goto fail_alloc_dma_buf;
5868 
5869 	if (megasas_alloc_ctrl_dma_buffers(instance))
5870 		goto fail_alloc_dma_buf;
5871 
5872 	fusion = instance->ctrl_context;
5873 
5874 	if (instance->adapter_type >= VENTURA_SERIES) {
5875 		scratch_pad_2 =
5876 			megasas_readl(instance,
5877 				      &instance->reg_set->outbound_scratch_pad_2);
5878 		instance->max_raid_mapsize = ((scratch_pad_2 >>
5879 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5880 			MR_MAX_RAID_MAP_SIZE_MASK);
5881 	}
5882 
5883 	switch (instance->adapter_type) {
5884 	case VENTURA_SERIES:
5885 		fusion->pcie_bw_limitation = true;
5886 		break;
5887 	case AERO_SERIES:
5888 		fusion->r56_div_offload = true;
5889 		break;
5890 	default:
5891 		break;
5892 	}
5893 
5894 	/* Check if MSI-X is supported while in ready state */
5895 	msix_enable = (instance->instancet->read_fw_status_reg(instance) &
5896 		       0x4000000) >> 0x1a;
5897 	if (msix_enable && !msix_disable) {
5898 
5899 		scratch_pad_1 = megasas_readl
5900 			(instance, &instance->reg_set->outbound_scratch_pad_1);
5901 		/* Check max MSI-X vectors */
5902 		if (fusion) {
5903 			if (instance->adapter_type == THUNDERBOLT_SERIES) {
5904 				/* Thunderbolt Series*/
5905 				instance->msix_vectors = (scratch_pad_1
5906 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5907 			} else {
5908 				instance->msix_vectors = ((scratch_pad_1
5909 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5910 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5911 
5912 				/*
5913 				 * For Invader series, > 8 MSI-x vectors
5914 				 * supported by FW/HW implies combined
5915 				 * reply queue mode is enabled.
5916 				 * For Ventura series, > 16 MSI-x vectors
5917 				 * supported by FW/HW implies combined
5918 				 * reply queue mode is enabled.
5919 				 */
5920 				switch (instance->adapter_type) {
5921 				case INVADER_SERIES:
5922 					if (instance->msix_vectors > 8)
5923 						instance->msix_combined = true;
5924 					break;
5925 				case AERO_SERIES:
5926 				case VENTURA_SERIES:
5927 					if (instance->msix_vectors > 16)
5928 						instance->msix_combined = true;
5929 					break;
5930 				}
5931 
5932 				if (rdpq_enable)
5933 					instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
5934 								1 : 0;
5935 
5936 				if (!instance->msix_combined) {
5937 					instance->msix_load_balance = true;
5938 					instance->smp_affinity_enable = false;
5939 				}
5940 
5941 				/* Save 1-15 reply post index address to local memory
5942 				 * Index 0 is already saved from reg offset
5943 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5944 				 */
5945 				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5946 					instance->reply_post_host_index_addr[loop] =
5947 						(u32 __iomem *)
5948 						((u8 __iomem *)instance->reg_set +
5949 						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5950 						+ (loop * 0x10));
5951 				}
5952 			}
5953 
5954 			dev_info(&instance->pdev->dev,
5955 				 "firmware supports msix\t: (%d)",
5956 				 instance->msix_vectors);
5957 			if (msix_vectors)
5958 				instance->msix_vectors = min(msix_vectors,
5959 					instance->msix_vectors);
5960 		} else /* MFI adapters */
5961 			instance->msix_vectors = 1;
5962 
5963 
5964 		/*
5965 		 * For Aero (if some conditions are met), driver will configure a
5966 		 * few additional reply queues with interrupt coalescing enabled.
5967 		 * These queues with interrupt coalescing enabled are called
5968 		 * High IOPS queues and rest of reply queues (based on number of
5969 		 * logical CPUs) are termed as Low latency queues.
5970 		 *
5971 		 * Total Number of reply queues = High IOPS queues + low latency queues
5972 		 *
5973 		 * For rest of fusion adapters, 1 additional reply queue will be
5974 		 * reserved for management commands, rest of reply queues
5975 		 * (based on number of logical CPUs) will be used for IOs and
5976 		 * referenced as IO queues.
5977 		 * Total Number of reply queues = 1 + IO queues
5978 		 *
5979 		 * MFI adapters supports single MSI-x so single reply queue
5980 		 * will be used for IO and management commands.
5981 		 */
5982 
5983 		intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
5984 								true : false;
5985 		if (intr_coalescing &&
5986 			(num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
5987 			(instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
5988 			instance->perf_mode = MR_BALANCED_PERF_MODE;
5989 		else
5990 			instance->perf_mode = MR_LATENCY_PERF_MODE;
5991 
5992 
5993 		if (instance->adapter_type == AERO_SERIES) {
5994 			pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
5995 			speed = lnksta & PCI_EXP_LNKSTA_CLS;
5996 
5997 			/*
5998 			 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
5999 			 * in latency perf mode and enable R1 PCI bandwidth algorithm
6000 			 */
6001 			if (speed < 0x4) {
6002 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6003 				fusion->pcie_bw_limitation = true;
6004 			}
6005 
6006 			/*
6007 			 * Performance mode settings provided through module parameter-perf_mode will
6008 			 * take affect only for:
6009 			 * 1. Aero family of adapters.
6010 			 * 2. When user sets module parameter- perf_mode in range of 0-2.
6011 			 */
6012 			if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
6013 				(perf_mode <= MR_LATENCY_PERF_MODE))
6014 				instance->perf_mode = perf_mode;
6015 			/*
6016 			 * If intr coalescing is not supported by controller FW, then IOPS
6017 			 * and Balanced modes are not feasible.
6018 			 */
6019 			if (!intr_coalescing)
6020 				instance->perf_mode = MR_LATENCY_PERF_MODE;
6021 
6022 		}
6023 
6024 		if (instance->perf_mode == MR_BALANCED_PERF_MODE)
6025 			instance->low_latency_index_start =
6026 				MR_HIGH_IOPS_QUEUE_COUNT;
6027 		else
6028 			instance->low_latency_index_start = 1;
6029 
6030 		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
6031 
6032 		instance->msix_vectors = min(num_msix_req,
6033 				instance->msix_vectors);
6034 
6035 		megasas_alloc_irq_vectors(instance);
6036 		if (!instance->msix_vectors)
6037 			instance->msix_load_balance = false;
6038 	}
6039 	/*
6040 	 * MSI-X host index 0 is common for all adapter.
6041 	 * It is used for all MPT based Adapters.
6042 	 */
6043 	if (instance->msix_combined) {
6044 		instance->reply_post_host_index_addr[0] =
6045 				(u32 *)((u8 *)instance->reg_set +
6046 				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
6047 	} else {
6048 		instance->reply_post_host_index_addr[0] =
6049 			(u32 *)((u8 *)instance->reg_set +
6050 			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
6051 	}
6052 
6053 	if (!instance->msix_vectors) {
6054 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6055 		if (i < 0)
6056 			goto fail_init_adapter;
6057 	}
6058 
6059 	megasas_setup_reply_map(instance);
6060 
6061 	dev_info(&instance->pdev->dev,
6062 		"current msix/online cpus\t: (%d/%d)\n",
6063 		instance->msix_vectors, (unsigned int)num_online_cpus());
6064 	dev_info(&instance->pdev->dev,
6065 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
6066 
6067 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6068 		(unsigned long)instance);
6069 
6070 	/*
6071 	 * Below are default value for legacy Firmware.
6072 	 * non-fusion based controllers
6073 	 */
6074 	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
6075 	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
6076 	/* Get operational params, sge flags, send init cmd to controller */
6077 	if (instance->instancet->init_adapter(instance))
6078 		goto fail_init_adapter;
6079 
6080 	if (instance->adapter_type >= VENTURA_SERIES) {
6081 		scratch_pad_3 =
6082 			megasas_readl(instance,
6083 				      &instance->reg_set->outbound_scratch_pad_3);
6084 		if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
6085 			MR_DEFAULT_NVME_PAGE_SHIFT)
6086 			instance->nvme_page_size =
6087 				(1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
6088 
6089 		dev_info(&instance->pdev->dev,
6090 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
6091 	}
6092 
6093 	if (instance->msix_vectors ?
6094 		megasas_setup_irqs_msix(instance, 1) :
6095 		megasas_setup_irqs_ioapic(instance))
6096 		goto fail_init_adapter;
6097 
6098 	if (instance->adapter_type != MFI_SERIES)
6099 		megasas_setup_irq_poll(instance);
6100 
6101 	instance->instancet->enable_intr(instance);
6102 
6103 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
6104 
6105 	megasas_setup_jbod_map(instance);
6106 
6107 	if (megasas_get_device_list(instance) != SUCCESS) {
6108 		dev_err(&instance->pdev->dev,
6109 			"%s: megasas_get_device_list failed\n",
6110 			__func__);
6111 		goto fail_get_ld_pd_list;
6112 	}
6113 
6114 	/* stream detection initialization */
6115 	if (instance->adapter_type >= VENTURA_SERIES) {
6116 		fusion->stream_detect_by_ld =
6117 			kcalloc(MAX_LOGICAL_DRIVES_EXT,
6118 				sizeof(struct LD_STREAM_DETECT *),
6119 				GFP_KERNEL);
6120 		if (!fusion->stream_detect_by_ld) {
6121 			dev_err(&instance->pdev->dev,
6122 				"unable to allocate stream detection for pool of LDs\n");
6123 			goto fail_get_ld_pd_list;
6124 		}
6125 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
6126 			fusion->stream_detect_by_ld[i] =
6127 				kzalloc(sizeof(struct LD_STREAM_DETECT),
6128 				GFP_KERNEL);
6129 			if (!fusion->stream_detect_by_ld[i]) {
6130 				dev_err(&instance->pdev->dev,
6131 					"unable to allocate stream detect by LD\n ");
6132 				for (j = 0; j < i; ++j)
6133 					kfree(fusion->stream_detect_by_ld[j]);
6134 				kfree(fusion->stream_detect_by_ld);
6135 				fusion->stream_detect_by_ld = NULL;
6136 				goto fail_get_ld_pd_list;
6137 			}
6138 			fusion->stream_detect_by_ld[i]->mru_bit_map
6139 				= MR_STREAM_BITMAP;
6140 		}
6141 	}
6142 
6143 	/*
6144 	 * Compute the max allowed sectors per IO: The controller info has two
6145 	 * limits on max sectors. Driver should use the minimum of these two.
6146 	 *
6147 	 * 1 << stripe_sz_ops.min = max sectors per strip
6148 	 *
6149 	 * Note that older firmwares ( < FW ver 30) didn't report information
6150 	 * to calculate max_sectors_1. So the number ended up as zero always.
6151 	 */
6152 	tmp_sectors = 0;
6153 	ctrl_info = instance->ctrl_info_buf;
6154 
6155 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
6156 		le16_to_cpu(ctrl_info->max_strips_per_io);
6157 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
6158 
6159 	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
6160 
6161 	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
6162 	instance->passive = ctrl_info->cluster.passive;
6163 	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
6164 	instance->UnevenSpanSupport =
6165 		ctrl_info->adapterOperations2.supportUnevenSpans;
6166 	if (instance->UnevenSpanSupport) {
6167 		struct fusion_context *fusion = instance->ctrl_context;
6168 		if (MR_ValidateMapInfo(instance, instance->map_id))
6169 			fusion->fast_path_io = 1;
6170 		else
6171 			fusion->fast_path_io = 0;
6172 
6173 	}
6174 	if (ctrl_info->host_interface.SRIOV) {
6175 		instance->requestorId = ctrl_info->iov.requestorId;
6176 		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
6177 			if (!ctrl_info->adapterOperations2.activePassive)
6178 			    instance->PlasmaFW111 = 1;
6179 
6180 			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
6181 			    instance->PlasmaFW111 ? "1.11" : "new");
6182 
6183 			if (instance->PlasmaFW111) {
6184 			    iovPtr = (struct IOV_111 *)
6185 				((unsigned char *)ctrl_info + IOV_111_OFFSET);
6186 			    instance->requestorId = iovPtr->requestorId;
6187 			}
6188 		}
6189 		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
6190 			instance->requestorId);
6191 	}
6192 
6193 	instance->crash_dump_fw_support =
6194 		ctrl_info->adapterOperations3.supportCrashDump;
6195 	instance->crash_dump_drv_support =
6196 		(instance->crash_dump_fw_support &&
6197 		instance->crash_dump_buf);
6198 	if (instance->crash_dump_drv_support)
6199 		megasas_set_crash_dump_params(instance,
6200 			MR_CRASH_BUF_TURN_OFF);
6201 
6202 	else {
6203 		if (instance->crash_dump_buf)
6204 			dma_free_coherent(&instance->pdev->dev,
6205 				CRASH_DMA_BUF_SIZE,
6206 				instance->crash_dump_buf,
6207 				instance->crash_dump_h);
6208 		instance->crash_dump_buf = NULL;
6209 	}
6210 
6211 	if (instance->snapdump_wait_time) {
6212 		megasas_get_snapdump_properties(instance);
6213 		dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
6214 			 instance->snapdump_wait_time);
6215 	}
6216 
6217 	dev_info(&instance->pdev->dev,
6218 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
6219 		le16_to_cpu(ctrl_info->pci.vendor_id),
6220 		le16_to_cpu(ctrl_info->pci.device_id),
6221 		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
6222 		le16_to_cpu(ctrl_info->pci.sub_device_id));
6223 	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
6224 		instance->UnevenSpanSupport ? "yes" : "no");
6225 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
6226 		instance->crash_dump_drv_support ? "yes" : "no");
6227 	dev_info(&instance->pdev->dev, "JBOD sequence map	: %s\n",
6228 		instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
6229 
6230 	instance->max_sectors_per_req = instance->max_num_sge *
6231 						SGE_BUFFER_SIZE / 512;
6232 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
6233 		instance->max_sectors_per_req = tmp_sectors;
6234 
6235 	/* Check for valid throttlequeuedepth module parameter */
6236 	if (throttlequeuedepth &&
6237 			throttlequeuedepth <= instance->max_scsi_cmds)
6238 		instance->throttlequeuedepth = throttlequeuedepth;
6239 	else
6240 		instance->throttlequeuedepth =
6241 				MEGASAS_THROTTLE_QUEUE_DEPTH;
6242 
6243 	if ((resetwaittime < 1) ||
6244 	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
6245 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
6246 
6247 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
6248 		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
6249 
6250 	/* Launch SR-IOV heartbeat timer */
6251 	if (instance->requestorId) {
6252 		if (!megasas_sriov_start_heartbeat(instance, 1)) {
6253 			megasas_start_timer(instance);
6254 		} else {
6255 			instance->skip_heartbeat_timer_del = 1;
6256 			goto fail_get_ld_pd_list;
6257 		}
6258 	}
6259 
6260 	/*
6261 	 * Create and start watchdog thread which will monitor
6262 	 * controller state every 1 sec and trigger OCR when
6263 	 * it enters fault state
6264 	 */
6265 	if (instance->adapter_type != MFI_SERIES)
6266 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
6267 			goto fail_start_watchdog;
6268 
6269 	return 0;
6270 
6271 fail_start_watchdog:
6272 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6273 		del_timer_sync(&instance->sriov_heartbeat_timer);
6274 fail_get_ld_pd_list:
6275 	instance->instancet->disable_intr(instance);
6276 	megasas_destroy_irqs(instance);
6277 fail_init_adapter:
6278 	if (instance->msix_vectors)
6279 		pci_free_irq_vectors(instance->pdev);
6280 	instance->msix_vectors = 0;
6281 fail_alloc_dma_buf:
6282 	megasas_free_ctrl_dma_buffers(instance);
6283 	megasas_free_ctrl_mem(instance);
6284 fail_ready_state:
6285 	iounmap(instance->reg_set);
6286 
6287 fail_ioremap:
6288 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6289 
6290 	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6291 		__func__, __LINE__);
6292 	return -EINVAL;
6293 }
6294 
6295 /**
6296  * megasas_release_mfi -	Reverses the FW initialization
6297  * @instance:			Adapter soft state
6298  */
6299 static void megasas_release_mfi(struct megasas_instance *instance)
6300 {
6301 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
6302 
6303 	if (instance->reply_queue)
6304 		dma_free_coherent(&instance->pdev->dev, reply_q_sz,
6305 			    instance->reply_queue, instance->reply_queue_h);
6306 
6307 	megasas_free_cmds(instance);
6308 
6309 	iounmap(instance->reg_set);
6310 
6311 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6312 }
6313 
6314 /**
6315  * megasas_get_seq_num -	Gets latest event sequence numbers
6316  * @instance:			Adapter soft state
6317  * @eli:			FW event log sequence numbers information
6318  *
6319  * FW maintains a log of all events in a non-volatile area. Upper layers would
6320  * usually find out the latest sequence number of the events, the seq number at
6321  * the boot etc. They would "read" all the events below the latest seq number
6322  * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
6323  * number), they would subsribe to AEN (asynchronous event notification) and
6324  * wait for the events to happen.
6325  */
6326 static int
6327 megasas_get_seq_num(struct megasas_instance *instance,
6328 		    struct megasas_evt_log_info *eli)
6329 {
6330 	struct megasas_cmd *cmd;
6331 	struct megasas_dcmd_frame *dcmd;
6332 	struct megasas_evt_log_info *el_info;
6333 	dma_addr_t el_info_h = 0;
6334 	int ret;
6335 
6336 	cmd = megasas_get_cmd(instance);
6337 
6338 	if (!cmd) {
6339 		return -ENOMEM;
6340 	}
6341 
6342 	dcmd = &cmd->frame->dcmd;
6343 	el_info = dma_alloc_coherent(&instance->pdev->dev,
6344 				     sizeof(struct megasas_evt_log_info),
6345 				     &el_info_h, GFP_KERNEL);
6346 	if (!el_info) {
6347 		megasas_return_cmd(instance, cmd);
6348 		return -ENOMEM;
6349 	}
6350 
6351 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6352 
6353 	dcmd->cmd = MFI_CMD_DCMD;
6354 	dcmd->cmd_status = 0x0;
6355 	dcmd->sge_count = 1;
6356 	dcmd->flags = MFI_FRAME_DIR_READ;
6357 	dcmd->timeout = 0;
6358 	dcmd->pad_0 = 0;
6359 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
6360 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
6361 
6362 	megasas_set_dma_settings(instance, dcmd, el_info_h,
6363 				 sizeof(struct megasas_evt_log_info));
6364 
6365 	ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
6366 	if (ret != DCMD_SUCCESS) {
6367 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6368 			__func__, __LINE__);
6369 		goto dcmd_failed;
6370 	}
6371 
6372 	/*
6373 	 * Copy the data back into callers buffer
6374 	 */
6375 	eli->newest_seq_num = el_info->newest_seq_num;
6376 	eli->oldest_seq_num = el_info->oldest_seq_num;
6377 	eli->clear_seq_num = el_info->clear_seq_num;
6378 	eli->shutdown_seq_num = el_info->shutdown_seq_num;
6379 	eli->boot_seq_num = el_info->boot_seq_num;
6380 
6381 dcmd_failed:
6382 	dma_free_coherent(&instance->pdev->dev,
6383 			sizeof(struct megasas_evt_log_info),
6384 			el_info, el_info_h);
6385 
6386 	megasas_return_cmd(instance, cmd);
6387 
6388 	return ret;
6389 }
6390 
6391 /**
6392  * megasas_register_aen -	Registers for asynchronous event notification
6393  * @instance:			Adapter soft state
6394  * @seq_num:			The starting sequence number
6395  * @class_locale:		Class of the event
6396  *
6397  * This function subscribes for AEN for events beyond the @seq_num. It requests
6398  * to be notified if and only if the event is of type @class_locale
6399  */
6400 static int
6401 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6402 		     u32 class_locale_word)
6403 {
6404 	int ret_val;
6405 	struct megasas_cmd *cmd;
6406 	struct megasas_dcmd_frame *dcmd;
6407 	union megasas_evt_class_locale curr_aen;
6408 	union megasas_evt_class_locale prev_aen;
6409 
6410 	/*
6411 	 * If there an AEN pending already (aen_cmd), check if the
6412 	 * class_locale of that pending AEN is inclusive of the new
6413 	 * AEN request we currently have. If it is, then we don't have
6414 	 * to do anything. In other words, whichever events the current
6415 	 * AEN request is subscribing to, have already been subscribed
6416 	 * to.
6417 	 *
6418 	 * If the old_cmd is _not_ inclusive, then we have to abort
6419 	 * that command, form a class_locale that is superset of both
6420 	 * old and current and re-issue to the FW
6421 	 */
6422 
6423 	curr_aen.word = class_locale_word;
6424 
6425 	if (instance->aen_cmd) {
6426 
6427 		prev_aen.word =
6428 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6429 
6430 		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6431 		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6432 			dev_info(&instance->pdev->dev,
6433 				 "%s %d out of range class %d send by application\n",
6434 				 __func__, __LINE__, curr_aen.members.class);
6435 			return 0;
6436 		}
6437 
6438 		/*
6439 		 * A class whose enum value is smaller is inclusive of all
6440 		 * higher values. If a PROGRESS (= -1) was previously
6441 		 * registered, then a new registration requests for higher
6442 		 * classes need not be sent to FW. They are automatically
6443 		 * included.
6444 		 *
6445 		 * Locale numbers don't have such hierarchy. They are bitmap
6446 		 * values
6447 		 */
6448 		if ((prev_aen.members.class <= curr_aen.members.class) &&
6449 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
6450 		      curr_aen.members.locale)) {
6451 			/*
6452 			 * Previously issued event registration includes
6453 			 * current request. Nothing to do.
6454 			 */
6455 			return 0;
6456 		} else {
6457 			curr_aen.members.locale |= prev_aen.members.locale;
6458 
6459 			if (prev_aen.members.class < curr_aen.members.class)
6460 				curr_aen.members.class = prev_aen.members.class;
6461 
6462 			instance->aen_cmd->abort_aen = 1;
6463 			ret_val = megasas_issue_blocked_abort_cmd(instance,
6464 								  instance->
6465 								  aen_cmd, 30);
6466 
6467 			if (ret_val) {
6468 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6469 				       "previous AEN command\n");
6470 				return ret_val;
6471 			}
6472 		}
6473 	}
6474 
6475 	cmd = megasas_get_cmd(instance);
6476 
6477 	if (!cmd)
6478 		return -ENOMEM;
6479 
6480 	dcmd = &cmd->frame->dcmd;
6481 
6482 	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6483 
6484 	/*
6485 	 * Prepare DCMD for aen registration
6486 	 */
6487 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6488 
6489 	dcmd->cmd = MFI_CMD_DCMD;
6490 	dcmd->cmd_status = 0x0;
6491 	dcmd->sge_count = 1;
6492 	dcmd->flags = MFI_FRAME_DIR_READ;
6493 	dcmd->timeout = 0;
6494 	dcmd->pad_0 = 0;
6495 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6496 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6497 	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6498 	instance->last_seq_num = seq_num;
6499 	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6500 
6501 	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6502 				 sizeof(struct megasas_evt_detail));
6503 
6504 	if (instance->aen_cmd != NULL) {
6505 		megasas_return_cmd(instance, cmd);
6506 		return 0;
6507 	}
6508 
6509 	/*
6510 	 * Store reference to the cmd used to register for AEN. When an
6511 	 * application wants us to register for AEN, we have to abort this
6512 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
6513 	 */
6514 	instance->aen_cmd = cmd;
6515 
6516 	/*
6517 	 * Issue the aen registration frame
6518 	 */
6519 	instance->instancet->issue_dcmd(instance, cmd);
6520 
6521 	return 0;
6522 }
6523 
6524 /* megasas_get_target_prop - Send DCMD with below details to firmware.
6525  *
6526  * This DCMD will fetch few properties of LD/system PD defined
6527  * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6528  *
6529  * DCMD send by drivers whenever new target is added to the OS.
6530  *
6531  * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
6532  * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
6533  *                       0 = system PD, 1 = LD.
6534  * dcmd.mbox.s[1]      - TargetID for LD/system PD.
6535  * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
6536  *
6537  * @instance:		Adapter soft state
6538  * @sdev:		OS provided scsi device
6539  *
6540  * Returns 0 on success non-zero on failure.
6541  */
6542 int
6543 megasas_get_target_prop(struct megasas_instance *instance,
6544 			struct scsi_device *sdev)
6545 {
6546 	int ret;
6547 	struct megasas_cmd *cmd;
6548 	struct megasas_dcmd_frame *dcmd;
6549 	u16 targetId = (sdev->channel % 2) + sdev->id;
6550 
6551 	cmd = megasas_get_cmd(instance);
6552 
6553 	if (!cmd) {
6554 		dev_err(&instance->pdev->dev,
6555 			"Failed to get cmd %s\n", __func__);
6556 		return -ENOMEM;
6557 	}
6558 
6559 	dcmd = &cmd->frame->dcmd;
6560 
6561 	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6562 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6563 	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6564 
6565 	dcmd->mbox.s[1] = cpu_to_le16(targetId);
6566 	dcmd->cmd = MFI_CMD_DCMD;
6567 	dcmd->cmd_status = 0xFF;
6568 	dcmd->sge_count = 1;
6569 	dcmd->flags = MFI_FRAME_DIR_READ;
6570 	dcmd->timeout = 0;
6571 	dcmd->pad_0 = 0;
6572 	dcmd->data_xfer_len =
6573 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6574 	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6575 
6576 	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6577 				 sizeof(struct MR_TARGET_PROPERTIES));
6578 
6579 	if ((instance->adapter_type != MFI_SERIES) &&
6580 	    !instance->mask_interrupts)
6581 		ret = megasas_issue_blocked_cmd(instance,
6582 						cmd, MFI_IO_TIMEOUT_SECS);
6583 	else
6584 		ret = megasas_issue_polled(instance, cmd);
6585 
6586 	switch (ret) {
6587 	case DCMD_TIMEOUT:
6588 		switch (dcmd_timeout_ocr_possible(instance)) {
6589 		case INITIATE_OCR:
6590 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6591 			mutex_unlock(&instance->reset_mutex);
6592 			megasas_reset_fusion(instance->host,
6593 					     MFI_IO_TIMEOUT_OCR);
6594 			mutex_lock(&instance->reset_mutex);
6595 			break;
6596 		case KILL_ADAPTER:
6597 			megaraid_sas_kill_hba(instance);
6598 			break;
6599 		case IGNORE_TIMEOUT:
6600 			dev_info(&instance->pdev->dev,
6601 				 "Ignore DCMD timeout: %s %d\n",
6602 				 __func__, __LINE__);
6603 			break;
6604 		}
6605 		break;
6606 
6607 	default:
6608 		megasas_return_cmd(instance, cmd);
6609 	}
6610 	if (ret != DCMD_SUCCESS)
6611 		dev_err(&instance->pdev->dev,
6612 			"return from %s %d return value %d\n",
6613 			__func__, __LINE__, ret);
6614 
6615 	return ret;
6616 }
6617 
6618 /**
6619  * megasas_start_aen -	Subscribes to AEN during driver load time
6620  * @instance:		Adapter soft state
6621  */
6622 static int megasas_start_aen(struct megasas_instance *instance)
6623 {
6624 	struct megasas_evt_log_info eli;
6625 	union megasas_evt_class_locale class_locale;
6626 
6627 	/*
6628 	 * Get the latest sequence number from FW
6629 	 */
6630 	memset(&eli, 0, sizeof(eli));
6631 
6632 	if (megasas_get_seq_num(instance, &eli))
6633 		return -1;
6634 
6635 	/*
6636 	 * Register AEN with FW for latest sequence number plus 1
6637 	 */
6638 	class_locale.members.reserved = 0;
6639 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
6640 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
6641 
6642 	return megasas_register_aen(instance,
6643 			le32_to_cpu(eli.newest_seq_num) + 1,
6644 			class_locale.word);
6645 }
6646 
6647 /**
6648  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
6649  * @instance:		Adapter soft state
6650  */
6651 static int megasas_io_attach(struct megasas_instance *instance)
6652 {
6653 	struct Scsi_Host *host = instance->host;
6654 
6655 	/*
6656 	 * Export parameters required by SCSI mid-layer
6657 	 */
6658 	host->unique_id = instance->unique_id;
6659 	host->can_queue = instance->max_scsi_cmds;
6660 	host->this_id = instance->init_id;
6661 	host->sg_tablesize = instance->max_num_sge;
6662 
6663 	if (instance->fw_support_ieee)
6664 		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6665 
6666 	/*
6667 	 * Check if the module parameter value for max_sectors can be used
6668 	 */
6669 	if (max_sectors && max_sectors < instance->max_sectors_per_req)
6670 		instance->max_sectors_per_req = max_sectors;
6671 	else {
6672 		if (max_sectors) {
6673 			if (((instance->pdev->device ==
6674 				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6675 				(instance->pdev->device ==
6676 				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6677 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
6678 				instance->max_sectors_per_req = max_sectors;
6679 			} else {
6680 			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6681 				"and <= %d (or < 1MB for GEN2 controller)\n",
6682 				instance->max_sectors_per_req);
6683 			}
6684 		}
6685 	}
6686 
6687 	host->max_sectors = instance->max_sectors_per_req;
6688 	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6689 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6690 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6691 	host->max_lun = MEGASAS_MAX_LUN;
6692 	host->max_cmd_len = 16;
6693 
6694 	/*
6695 	 * Notify the mid-layer about the new controller
6696 	 */
6697 	if (scsi_add_host(host, &instance->pdev->dev)) {
6698 		dev_err(&instance->pdev->dev,
6699 			"Failed to add host from %s %d\n",
6700 			__func__, __LINE__);
6701 		return -ENODEV;
6702 	}
6703 
6704 	return 0;
6705 }
6706 
6707 /**
6708  * megasas_set_dma_mask -	Set DMA mask for supported controllers
6709  *
6710  * @instance:		Adapter soft state
6711  * Description:
6712  *
6713  * For Ventura, driver/FW will operate in 63bit DMA addresses.
6714  *
6715  * For invader-
6716  *	By default, driver/FW will operate in 32bit DMA addresses
6717  *	for consistent DMA mapping but if 32 bit consistent
6718  *	DMA mask fails, driver will try with 63 bit consistent
6719  *	mask provided FW is true 63bit DMA capable
6720  *
6721  * For older controllers(Thunderbolt and MFI based adapters)-
6722  *	driver/FW will operate in 32 bit consistent DMA addresses.
6723  */
6724 static int
6725 megasas_set_dma_mask(struct megasas_instance *instance)
6726 {
6727 	u64 consistent_mask;
6728 	struct pci_dev *pdev;
6729 	u32 scratch_pad_1;
6730 
6731 	pdev = instance->pdev;
6732 	consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6733 				DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6734 
6735 	if (IS_DMA64) {
6736 		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6737 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6738 			goto fail_set_dma_mask;
6739 
6740 		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6741 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6742 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6743 			/*
6744 			 * If 32 bit DMA mask fails, then try for 64 bit mask
6745 			 * for FW capable of handling 64 bit DMA.
6746 			 */
6747 			scratch_pad_1 = megasas_readl
6748 				(instance, &instance->reg_set->outbound_scratch_pad_1);
6749 
6750 			if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6751 				goto fail_set_dma_mask;
6752 			else if (dma_set_mask_and_coherent(&pdev->dev,
6753 							   DMA_BIT_MASK(63)))
6754 				goto fail_set_dma_mask;
6755 		}
6756 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6757 		goto fail_set_dma_mask;
6758 
6759 	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6760 		instance->consistent_mask_64bit = false;
6761 	else
6762 		instance->consistent_mask_64bit = true;
6763 
6764 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6765 		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6766 		 (instance->consistent_mask_64bit ? "63" : "32"));
6767 
6768 	return 0;
6769 
6770 fail_set_dma_mask:
6771 	dev_err(&pdev->dev, "Failed to set DMA mask\n");
6772 	return -1;
6773 
6774 }
6775 
6776 /*
6777  * megasas_set_adapter_type -	Set adapter type.
6778  *				Supported controllers can be divided in
6779  *				different categories-
6780  *					enum MR_ADAPTER_TYPE {
6781  *						MFI_SERIES = 1,
6782  *						THUNDERBOLT_SERIES = 2,
6783  *						INVADER_SERIES = 3,
6784  *						VENTURA_SERIES = 4,
6785  *						AERO_SERIES = 5,
6786  *					};
6787  * @instance:			Adapter soft state
6788  * return:			void
6789  */
6790 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6791 {
6792 	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6793 	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6794 		instance->adapter_type = MFI_SERIES;
6795 	} else {
6796 		switch (instance->pdev->device) {
6797 		case PCI_DEVICE_ID_LSI_AERO_10E1:
6798 		case PCI_DEVICE_ID_LSI_AERO_10E2:
6799 		case PCI_DEVICE_ID_LSI_AERO_10E5:
6800 		case PCI_DEVICE_ID_LSI_AERO_10E6:
6801 			instance->adapter_type = AERO_SERIES;
6802 			break;
6803 		case PCI_DEVICE_ID_LSI_VENTURA:
6804 		case PCI_DEVICE_ID_LSI_CRUSADER:
6805 		case PCI_DEVICE_ID_LSI_HARPOON:
6806 		case PCI_DEVICE_ID_LSI_TOMCAT:
6807 		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6808 		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6809 			instance->adapter_type = VENTURA_SERIES;
6810 			break;
6811 		case PCI_DEVICE_ID_LSI_FUSION:
6812 		case PCI_DEVICE_ID_LSI_PLASMA:
6813 			instance->adapter_type = THUNDERBOLT_SERIES;
6814 			break;
6815 		case PCI_DEVICE_ID_LSI_INVADER:
6816 		case PCI_DEVICE_ID_LSI_INTRUDER:
6817 		case PCI_DEVICE_ID_LSI_INTRUDER_24:
6818 		case PCI_DEVICE_ID_LSI_CUTLASS_52:
6819 		case PCI_DEVICE_ID_LSI_CUTLASS_53:
6820 		case PCI_DEVICE_ID_LSI_FURY:
6821 			instance->adapter_type = INVADER_SERIES;
6822 			break;
6823 		default: /* For all other supported controllers */
6824 			instance->adapter_type = MFI_SERIES;
6825 			break;
6826 		}
6827 	}
6828 }
6829 
6830 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6831 {
6832 	instance->producer = dma_alloc_coherent(&instance->pdev->dev,
6833 			sizeof(u32), &instance->producer_h, GFP_KERNEL);
6834 	instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
6835 			sizeof(u32), &instance->consumer_h, GFP_KERNEL);
6836 
6837 	if (!instance->producer || !instance->consumer) {
6838 		dev_err(&instance->pdev->dev,
6839 			"Failed to allocate memory for producer, consumer\n");
6840 		return -1;
6841 	}
6842 
6843 	*instance->producer = 0;
6844 	*instance->consumer = 0;
6845 	return 0;
6846 }
6847 
6848 /**
6849  * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
6850  *				structures which are not common across MFI
6851  *				adapters and fusion adapters.
6852  *				For MFI based adapters, allocate producer and
6853  *				consumer buffers. For fusion adapters, allocate
6854  *				memory for fusion context.
6855  * @instance:			Adapter soft state
6856  * return:			0 for SUCCESS
6857  */
6858 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6859 {
6860 	instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
6861 				      GFP_KERNEL);
6862 	if (!instance->reply_map)
6863 		return -ENOMEM;
6864 
6865 	switch (instance->adapter_type) {
6866 	case MFI_SERIES:
6867 		if (megasas_alloc_mfi_ctrl_mem(instance))
6868 			goto fail;
6869 		break;
6870 	case AERO_SERIES:
6871 	case VENTURA_SERIES:
6872 	case THUNDERBOLT_SERIES:
6873 	case INVADER_SERIES:
6874 		if (megasas_alloc_fusion_context(instance))
6875 			goto fail;
6876 		break;
6877 	}
6878 
6879 	return 0;
6880  fail:
6881 	kfree(instance->reply_map);
6882 	instance->reply_map = NULL;
6883 	return -ENOMEM;
6884 }
6885 
6886 /*
6887  * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
6888  *				producer, consumer buffers for MFI adapters
6889  *
6890  * @instance -			Adapter soft instance
6891  *
6892  */
6893 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6894 {
6895 	kfree(instance->reply_map);
6896 	if (instance->adapter_type == MFI_SERIES) {
6897 		if (instance->producer)
6898 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
6899 					    instance->producer,
6900 					    instance->producer_h);
6901 		if (instance->consumer)
6902 			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
6903 					    instance->consumer,
6904 					    instance->consumer_h);
6905 	} else {
6906 		megasas_free_fusion_context(instance);
6907 	}
6908 }
6909 
6910 /**
6911  * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
6912  *					driver load time
6913  *
6914  * @instance-				Adapter soft instance
6915  * @return-				O for SUCCESS
6916  */
6917 static inline
6918 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6919 {
6920 	struct pci_dev *pdev = instance->pdev;
6921 	struct fusion_context *fusion = instance->ctrl_context;
6922 
6923 	instance->evt_detail = dma_alloc_coherent(&pdev->dev,
6924 			sizeof(struct megasas_evt_detail),
6925 			&instance->evt_detail_h, GFP_KERNEL);
6926 
6927 	if (!instance->evt_detail) {
6928 		dev_err(&instance->pdev->dev,
6929 			"Failed to allocate event detail buffer\n");
6930 		return -ENOMEM;
6931 	}
6932 
6933 	if (fusion) {
6934 		fusion->ioc_init_request =
6935 			dma_alloc_coherent(&pdev->dev,
6936 					   sizeof(struct MPI2_IOC_INIT_REQUEST),
6937 					   &fusion->ioc_init_request_phys,
6938 					   GFP_KERNEL);
6939 
6940 		if (!fusion->ioc_init_request) {
6941 			dev_err(&pdev->dev,
6942 				"Failed to allocate PD list buffer\n");
6943 			return -ENOMEM;
6944 		}
6945 
6946 		instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
6947 				sizeof(struct MR_SNAPDUMP_PROPERTIES),
6948 				&instance->snapdump_prop_h, GFP_KERNEL);
6949 
6950 		if (!instance->snapdump_prop)
6951 			dev_err(&pdev->dev,
6952 				"Failed to allocate snapdump properties buffer\n");
6953 
6954 		instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
6955 							HOST_DEVICE_LIST_SZ,
6956 							&instance->host_device_list_buf_h,
6957 							GFP_KERNEL);
6958 
6959 		if (!instance->host_device_list_buf) {
6960 			dev_err(&pdev->dev,
6961 				"Failed to allocate targetid list buffer\n");
6962 			return -ENOMEM;
6963 		}
6964 
6965 	}
6966 
6967 	instance->pd_list_buf =
6968 		dma_alloc_coherent(&pdev->dev,
6969 				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6970 				     &instance->pd_list_buf_h, GFP_KERNEL);
6971 
6972 	if (!instance->pd_list_buf) {
6973 		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
6974 		return -ENOMEM;
6975 	}
6976 
6977 	instance->ctrl_info_buf =
6978 		dma_alloc_coherent(&pdev->dev,
6979 				     sizeof(struct megasas_ctrl_info),
6980 				     &instance->ctrl_info_buf_h, GFP_KERNEL);
6981 
6982 	if (!instance->ctrl_info_buf) {
6983 		dev_err(&pdev->dev,
6984 			"Failed to allocate controller info buffer\n");
6985 		return -ENOMEM;
6986 	}
6987 
6988 	instance->ld_list_buf =
6989 		dma_alloc_coherent(&pdev->dev,
6990 				     sizeof(struct MR_LD_LIST),
6991 				     &instance->ld_list_buf_h, GFP_KERNEL);
6992 
6993 	if (!instance->ld_list_buf) {
6994 		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
6995 		return -ENOMEM;
6996 	}
6997 
6998 	instance->ld_targetid_list_buf =
6999 		dma_alloc_coherent(&pdev->dev,
7000 				sizeof(struct MR_LD_TARGETID_LIST),
7001 				&instance->ld_targetid_list_buf_h, GFP_KERNEL);
7002 
7003 	if (!instance->ld_targetid_list_buf) {
7004 		dev_err(&pdev->dev,
7005 			"Failed to allocate LD targetid list buffer\n");
7006 		return -ENOMEM;
7007 	}
7008 
7009 	if (!reset_devices) {
7010 		instance->system_info_buf =
7011 			dma_alloc_coherent(&pdev->dev,
7012 					sizeof(struct MR_DRV_SYSTEM_INFO),
7013 					&instance->system_info_h, GFP_KERNEL);
7014 		instance->pd_info =
7015 			dma_alloc_coherent(&pdev->dev,
7016 					sizeof(struct MR_PD_INFO),
7017 					&instance->pd_info_h, GFP_KERNEL);
7018 		instance->tgt_prop =
7019 			dma_alloc_coherent(&pdev->dev,
7020 					sizeof(struct MR_TARGET_PROPERTIES),
7021 					&instance->tgt_prop_h, GFP_KERNEL);
7022 		instance->crash_dump_buf =
7023 			dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7024 					&instance->crash_dump_h, GFP_KERNEL);
7025 
7026 		if (!instance->system_info_buf)
7027 			dev_err(&instance->pdev->dev,
7028 				"Failed to allocate system info buffer\n");
7029 
7030 		if (!instance->pd_info)
7031 			dev_err(&instance->pdev->dev,
7032 				"Failed to allocate pd_info buffer\n");
7033 
7034 		if (!instance->tgt_prop)
7035 			dev_err(&instance->pdev->dev,
7036 				"Failed to allocate tgt_prop buffer\n");
7037 
7038 		if (!instance->crash_dump_buf)
7039 			dev_err(&instance->pdev->dev,
7040 				"Failed to allocate crash dump buffer\n");
7041 	}
7042 
7043 	return 0;
7044 }
7045 
7046 /*
7047  * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
7048  *					during driver load time
7049  *
7050  * @instance-				Adapter soft instance
7051  *
7052  */
7053 static inline
7054 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
7055 {
7056 	struct pci_dev *pdev = instance->pdev;
7057 	struct fusion_context *fusion = instance->ctrl_context;
7058 
7059 	if (instance->evt_detail)
7060 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
7061 				    instance->evt_detail,
7062 				    instance->evt_detail_h);
7063 
7064 	if (fusion && fusion->ioc_init_request)
7065 		dma_free_coherent(&pdev->dev,
7066 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
7067 				  fusion->ioc_init_request,
7068 				  fusion->ioc_init_request_phys);
7069 
7070 	if (instance->pd_list_buf)
7071 		dma_free_coherent(&pdev->dev,
7072 				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7073 				    instance->pd_list_buf,
7074 				    instance->pd_list_buf_h);
7075 
7076 	if (instance->ld_list_buf)
7077 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
7078 				    instance->ld_list_buf,
7079 				    instance->ld_list_buf_h);
7080 
7081 	if (instance->ld_targetid_list_buf)
7082 		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
7083 				    instance->ld_targetid_list_buf,
7084 				    instance->ld_targetid_list_buf_h);
7085 
7086 	if (instance->ctrl_info_buf)
7087 		dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
7088 				    instance->ctrl_info_buf,
7089 				    instance->ctrl_info_buf_h);
7090 
7091 	if (instance->system_info_buf)
7092 		dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
7093 				    instance->system_info_buf,
7094 				    instance->system_info_h);
7095 
7096 	if (instance->pd_info)
7097 		dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
7098 				    instance->pd_info, instance->pd_info_h);
7099 
7100 	if (instance->tgt_prop)
7101 		dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
7102 				    instance->tgt_prop, instance->tgt_prop_h);
7103 
7104 	if (instance->crash_dump_buf)
7105 		dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7106 				    instance->crash_dump_buf,
7107 				    instance->crash_dump_h);
7108 
7109 	if (instance->snapdump_prop)
7110 		dma_free_coherent(&pdev->dev,
7111 				  sizeof(struct MR_SNAPDUMP_PROPERTIES),
7112 				  instance->snapdump_prop,
7113 				  instance->snapdump_prop_h);
7114 
7115 	if (instance->host_device_list_buf)
7116 		dma_free_coherent(&pdev->dev,
7117 				  HOST_DEVICE_LIST_SZ,
7118 				  instance->host_device_list_buf,
7119 				  instance->host_device_list_buf_h);
7120 
7121 }
7122 
7123 /*
7124  * megasas_init_ctrl_params -		Initialize controller's instance
7125  *					parameters before FW init
7126  * @instance -				Adapter soft instance
7127  * @return -				void
7128  */
7129 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
7130 {
7131 	instance->fw_crash_state = UNAVAILABLE;
7132 
7133 	megasas_poll_wait_aen = 0;
7134 	instance->issuepend_done = 1;
7135 	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
7136 
7137 	/*
7138 	 * Initialize locks and queues
7139 	 */
7140 	INIT_LIST_HEAD(&instance->cmd_pool);
7141 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
7142 
7143 	atomic_set(&instance->fw_outstanding, 0);
7144 	atomic64_set(&instance->total_io_count, 0);
7145 
7146 	init_waitqueue_head(&instance->int_cmd_wait_q);
7147 	init_waitqueue_head(&instance->abort_cmd_wait_q);
7148 
7149 	spin_lock_init(&instance->crashdump_lock);
7150 	spin_lock_init(&instance->mfi_pool_lock);
7151 	spin_lock_init(&instance->hba_lock);
7152 	spin_lock_init(&instance->stream_lock);
7153 	spin_lock_init(&instance->completion_lock);
7154 
7155 	mutex_init(&instance->reset_mutex);
7156 
7157 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
7158 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
7159 		instance->flag_ieee = 1;
7160 
7161 	megasas_dbg_lvl = 0;
7162 	instance->flag = 0;
7163 	instance->unload = 1;
7164 	instance->last_time = 0;
7165 	instance->disableOnlineCtrlReset = 1;
7166 	instance->UnevenSpanSupport = 0;
7167 	instance->smp_affinity_enable = smp_affinity_enable ? true : false;
7168 	instance->msix_load_balance = false;
7169 
7170 	if (instance->adapter_type != MFI_SERIES)
7171 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
7172 	else
7173 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
7174 }
7175 
7176 /**
7177  * megasas_probe_one -	PCI hotplug entry point
7178  * @pdev:		PCI device structure
7179  * @id:			PCI ids of supported hotplugged adapter
7180  */
7181 static int megasas_probe_one(struct pci_dev *pdev,
7182 			     const struct pci_device_id *id)
7183 {
7184 	int rval, pos;
7185 	struct Scsi_Host *host;
7186 	struct megasas_instance *instance;
7187 	u16 control = 0;
7188 
7189 	switch (pdev->device) {
7190 	case PCI_DEVICE_ID_LSI_AERO_10E0:
7191 	case PCI_DEVICE_ID_LSI_AERO_10E3:
7192 	case PCI_DEVICE_ID_LSI_AERO_10E4:
7193 	case PCI_DEVICE_ID_LSI_AERO_10E7:
7194 		dev_err(&pdev->dev, "Adapter is in non secure mode\n");
7195 		return 1;
7196 	case PCI_DEVICE_ID_LSI_AERO_10E1:
7197 	case PCI_DEVICE_ID_LSI_AERO_10E5:
7198 		dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
7199 		break;
7200 	}
7201 
7202 	/* Reset MSI-X in the kdump kernel */
7203 	if (reset_devices) {
7204 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
7205 		if (pos) {
7206 			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
7207 					     &control);
7208 			if (control & PCI_MSIX_FLAGS_ENABLE) {
7209 				dev_info(&pdev->dev, "resetting MSI-X\n");
7210 				pci_write_config_word(pdev,
7211 						      pos + PCI_MSIX_FLAGS,
7212 						      control &
7213 						      ~PCI_MSIX_FLAGS_ENABLE);
7214 			}
7215 		}
7216 	}
7217 
7218 	/*
7219 	 * PCI prepping: enable device set bus mastering and dma mask
7220 	 */
7221 	rval = pci_enable_device_mem(pdev);
7222 
7223 	if (rval) {
7224 		return rval;
7225 	}
7226 
7227 	pci_set_master(pdev);
7228 
7229 	host = scsi_host_alloc(&megasas_template,
7230 			       sizeof(struct megasas_instance));
7231 
7232 	if (!host) {
7233 		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
7234 		goto fail_alloc_instance;
7235 	}
7236 
7237 	instance = (struct megasas_instance *)host->hostdata;
7238 	memset(instance, 0, sizeof(*instance));
7239 	atomic_set(&instance->fw_reset_no_pci_access, 0);
7240 
7241 	/*
7242 	 * Initialize PCI related and misc parameters
7243 	 */
7244 	instance->pdev = pdev;
7245 	instance->host = host;
7246 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
7247 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
7248 
7249 	megasas_set_adapter_type(instance);
7250 
7251 	/*
7252 	 * Initialize MFI Firmware
7253 	 */
7254 	if (megasas_init_fw(instance))
7255 		goto fail_init_mfi;
7256 
7257 	if (instance->requestorId) {
7258 		if (instance->PlasmaFW111) {
7259 			instance->vf_affiliation_111 =
7260 				dma_alloc_coherent(&pdev->dev,
7261 					sizeof(struct MR_LD_VF_AFFILIATION_111),
7262 					&instance->vf_affiliation_111_h,
7263 					GFP_KERNEL);
7264 			if (!instance->vf_affiliation_111)
7265 				dev_warn(&pdev->dev, "Can't allocate "
7266 				       "memory for VF affiliation buffer\n");
7267 		} else {
7268 			instance->vf_affiliation =
7269 				dma_alloc_coherent(&pdev->dev,
7270 					(MAX_LOGICAL_DRIVES + 1) *
7271 					sizeof(struct MR_LD_VF_AFFILIATION),
7272 					&instance->vf_affiliation_h,
7273 					GFP_KERNEL);
7274 			if (!instance->vf_affiliation)
7275 				dev_warn(&pdev->dev, "Can't allocate "
7276 				       "memory for VF affiliation buffer\n");
7277 		}
7278 	}
7279 
7280 	/*
7281 	 * Store instance in PCI softstate
7282 	 */
7283 	pci_set_drvdata(pdev, instance);
7284 
7285 	/*
7286 	 * Add this controller to megasas_mgmt_info structure so that it
7287 	 * can be exported to management applications
7288 	 */
7289 	megasas_mgmt_info.count++;
7290 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
7291 	megasas_mgmt_info.max_index++;
7292 
7293 	/*
7294 	 * Register with SCSI mid-layer
7295 	 */
7296 	if (megasas_io_attach(instance))
7297 		goto fail_io_attach;
7298 
7299 	instance->unload = 0;
7300 	/*
7301 	 * Trigger SCSI to scan our drives
7302 	 */
7303 	if (!instance->enable_fw_dev_list ||
7304 	    (instance->host_device_list_buf->count > 0))
7305 		scsi_scan_host(host);
7306 
7307 	/*
7308 	 * Initiate AEN (Asynchronous Event Notification)
7309 	 */
7310 	if (megasas_start_aen(instance)) {
7311 		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
7312 		goto fail_start_aen;
7313 	}
7314 
7315 	megasas_setup_debugfs(instance);
7316 
7317 	/* Get current SR-IOV LD/VF affiliation */
7318 	if (instance->requestorId)
7319 		megasas_get_ld_vf_affiliation(instance, 1);
7320 
7321 	return 0;
7322 
7323 fail_start_aen:
7324 fail_io_attach:
7325 	megasas_mgmt_info.count--;
7326 	megasas_mgmt_info.max_index--;
7327 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
7328 
7329 	instance->instancet->disable_intr(instance);
7330 	megasas_destroy_irqs(instance);
7331 
7332 	if (instance->adapter_type != MFI_SERIES)
7333 		megasas_release_fusion(instance);
7334 	else
7335 		megasas_release_mfi(instance);
7336 	if (instance->msix_vectors)
7337 		pci_free_irq_vectors(instance->pdev);
7338 fail_init_mfi:
7339 	scsi_host_put(host);
7340 fail_alloc_instance:
7341 	pci_disable_device(pdev);
7342 
7343 	return -ENODEV;
7344 }
7345 
7346 /**
7347  * megasas_flush_cache -	Requests FW to flush all its caches
7348  * @instance:			Adapter soft state
7349  */
7350 static void megasas_flush_cache(struct megasas_instance *instance)
7351 {
7352 	struct megasas_cmd *cmd;
7353 	struct megasas_dcmd_frame *dcmd;
7354 
7355 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7356 		return;
7357 
7358 	cmd = megasas_get_cmd(instance);
7359 
7360 	if (!cmd)
7361 		return;
7362 
7363 	dcmd = &cmd->frame->dcmd;
7364 
7365 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7366 
7367 	dcmd->cmd = MFI_CMD_DCMD;
7368 	dcmd->cmd_status = 0x0;
7369 	dcmd->sge_count = 0;
7370 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7371 	dcmd->timeout = 0;
7372 	dcmd->pad_0 = 0;
7373 	dcmd->data_xfer_len = 0;
7374 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
7375 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
7376 
7377 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7378 			!= DCMD_SUCCESS) {
7379 		dev_err(&instance->pdev->dev,
7380 			"return from %s %d\n", __func__, __LINE__);
7381 		return;
7382 	}
7383 
7384 	megasas_return_cmd(instance, cmd);
7385 }
7386 
7387 /**
7388  * megasas_shutdown_controller -	Instructs FW to shutdown the controller
7389  * @instance:				Adapter soft state
7390  * @opcode:				Shutdown/Hibernate
7391  */
7392 static void megasas_shutdown_controller(struct megasas_instance *instance,
7393 					u32 opcode)
7394 {
7395 	struct megasas_cmd *cmd;
7396 	struct megasas_dcmd_frame *dcmd;
7397 
7398 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7399 		return;
7400 
7401 	cmd = megasas_get_cmd(instance);
7402 
7403 	if (!cmd)
7404 		return;
7405 
7406 	if (instance->aen_cmd)
7407 		megasas_issue_blocked_abort_cmd(instance,
7408 			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7409 	if (instance->map_update_cmd)
7410 		megasas_issue_blocked_abort_cmd(instance,
7411 			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7412 	if (instance->jbod_seq_cmd)
7413 		megasas_issue_blocked_abort_cmd(instance,
7414 			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7415 
7416 	dcmd = &cmd->frame->dcmd;
7417 
7418 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7419 
7420 	dcmd->cmd = MFI_CMD_DCMD;
7421 	dcmd->cmd_status = 0x0;
7422 	dcmd->sge_count = 0;
7423 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7424 	dcmd->timeout = 0;
7425 	dcmd->pad_0 = 0;
7426 	dcmd->data_xfer_len = 0;
7427 	dcmd->opcode = cpu_to_le32(opcode);
7428 
7429 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7430 			!= DCMD_SUCCESS) {
7431 		dev_err(&instance->pdev->dev,
7432 			"return from %s %d\n", __func__, __LINE__);
7433 		return;
7434 	}
7435 
7436 	megasas_return_cmd(instance, cmd);
7437 }
7438 
7439 #ifdef CONFIG_PM
7440 /**
7441  * megasas_suspend -	driver suspend entry point
7442  * @pdev:		PCI device structure
7443  * @state:		PCI power state to suspend routine
7444  */
7445 static int
7446 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
7447 {
7448 	struct megasas_instance *instance;
7449 
7450 	instance = pci_get_drvdata(pdev);
7451 
7452 	if (!instance)
7453 		return 0;
7454 
7455 	instance->unload = 1;
7456 
7457 	dev_info(&pdev->dev, "%s is called\n", __func__);
7458 
7459 	/* Shutdown SR-IOV heartbeat timer */
7460 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7461 		del_timer_sync(&instance->sriov_heartbeat_timer);
7462 
7463 	/* Stop the FW fault detection watchdog */
7464 	if (instance->adapter_type != MFI_SERIES)
7465 		megasas_fusion_stop_watchdog(instance);
7466 
7467 	megasas_flush_cache(instance);
7468 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7469 
7470 	/* cancel the delayed work if this work still in queue */
7471 	if (instance->ev != NULL) {
7472 		struct megasas_aen_event *ev = instance->ev;
7473 		cancel_delayed_work_sync(&ev->hotplug_work);
7474 		instance->ev = NULL;
7475 	}
7476 
7477 	tasklet_kill(&instance->isr_tasklet);
7478 
7479 	pci_set_drvdata(instance->pdev, instance);
7480 	instance->instancet->disable_intr(instance);
7481 
7482 	megasas_destroy_irqs(instance);
7483 
7484 	if (instance->msix_vectors)
7485 		pci_free_irq_vectors(instance->pdev);
7486 
7487 	pci_save_state(pdev);
7488 	pci_disable_device(pdev);
7489 
7490 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
7491 
7492 	return 0;
7493 }
7494 
7495 /**
7496  * megasas_resume-      driver resume entry point
7497  * @pdev:               PCI device structure
7498  */
7499 static int
7500 megasas_resume(struct pci_dev *pdev)
7501 {
7502 	int rval;
7503 	struct Scsi_Host *host;
7504 	struct megasas_instance *instance;
7505 	int irq_flags = PCI_IRQ_LEGACY;
7506 
7507 	instance = pci_get_drvdata(pdev);
7508 
7509 	if (!instance)
7510 		return 0;
7511 
7512 	host = instance->host;
7513 	pci_set_power_state(pdev, PCI_D0);
7514 	pci_enable_wake(pdev, PCI_D0, 0);
7515 	pci_restore_state(pdev);
7516 
7517 	dev_info(&pdev->dev, "%s is called\n", __func__);
7518 	/*
7519 	 * PCI prepping: enable device set bus mastering and dma mask
7520 	 */
7521 	rval = pci_enable_device_mem(pdev);
7522 
7523 	if (rval) {
7524 		dev_err(&pdev->dev, "Enable device failed\n");
7525 		return rval;
7526 	}
7527 
7528 	pci_set_master(pdev);
7529 
7530 	/*
7531 	 * We expect the FW state to be READY
7532 	 */
7533 	if (megasas_transition_to_ready(instance, 0))
7534 		goto fail_ready_state;
7535 
7536 	if (megasas_set_dma_mask(instance))
7537 		goto fail_set_dma_mask;
7538 
7539 	/*
7540 	 * Initialize MFI Firmware
7541 	 */
7542 
7543 	atomic_set(&instance->fw_outstanding, 0);
7544 	atomic_set(&instance->ldio_outstanding, 0);
7545 
7546 	/* Now re-enable MSI-X */
7547 	if (instance->msix_vectors) {
7548 		irq_flags = PCI_IRQ_MSIX;
7549 		if (instance->smp_affinity_enable)
7550 			irq_flags |= PCI_IRQ_AFFINITY;
7551 	}
7552 	rval = pci_alloc_irq_vectors(instance->pdev, 1,
7553 				     instance->msix_vectors ?
7554 				     instance->msix_vectors : 1, irq_flags);
7555 	if (rval < 0)
7556 		goto fail_reenable_msix;
7557 
7558 	megasas_setup_reply_map(instance);
7559 
7560 	if (instance->adapter_type != MFI_SERIES) {
7561 		megasas_reset_reply_desc(instance);
7562 		if (megasas_ioc_init_fusion(instance)) {
7563 			megasas_free_cmds(instance);
7564 			megasas_free_cmds_fusion(instance);
7565 			goto fail_init_mfi;
7566 		}
7567 		if (!megasas_get_map_info(instance))
7568 			megasas_sync_map_info(instance);
7569 	} else {
7570 		*instance->producer = 0;
7571 		*instance->consumer = 0;
7572 		if (megasas_issue_init_mfi(instance))
7573 			goto fail_init_mfi;
7574 	}
7575 
7576 	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7577 		goto fail_init_mfi;
7578 
7579 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7580 		     (unsigned long)instance);
7581 
7582 	if (instance->msix_vectors ?
7583 			megasas_setup_irqs_msix(instance, 0) :
7584 			megasas_setup_irqs_ioapic(instance))
7585 		goto fail_init_mfi;
7586 
7587 	if (instance->adapter_type != MFI_SERIES)
7588 		megasas_setup_irq_poll(instance);
7589 
7590 	/* Re-launch SR-IOV heartbeat timer */
7591 	if (instance->requestorId) {
7592 		if (!megasas_sriov_start_heartbeat(instance, 0))
7593 			megasas_start_timer(instance);
7594 		else {
7595 			instance->skip_heartbeat_timer_del = 1;
7596 			goto fail_init_mfi;
7597 		}
7598 	}
7599 
7600 	instance->instancet->enable_intr(instance);
7601 	megasas_setup_jbod_map(instance);
7602 	instance->unload = 0;
7603 
7604 	/*
7605 	 * Initiate AEN (Asynchronous Event Notification)
7606 	 */
7607 	if (megasas_start_aen(instance))
7608 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
7609 
7610 	/* Re-launch FW fault watchdog */
7611 	if (instance->adapter_type != MFI_SERIES)
7612 		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7613 			goto fail_start_watchdog;
7614 
7615 	return 0;
7616 
7617 fail_start_watchdog:
7618 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7619 		del_timer_sync(&instance->sriov_heartbeat_timer);
7620 fail_init_mfi:
7621 	megasas_free_ctrl_dma_buffers(instance);
7622 	megasas_free_ctrl_mem(instance);
7623 	scsi_host_put(host);
7624 
7625 fail_reenable_msix:
7626 fail_set_dma_mask:
7627 fail_ready_state:
7628 
7629 	pci_disable_device(pdev);
7630 
7631 	return -ENODEV;
7632 }
7633 #else
7634 #define megasas_suspend	NULL
7635 #define megasas_resume	NULL
7636 #endif
7637 
7638 static inline int
7639 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7640 {
7641 	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7642 	int i;
7643 	u8 adp_state;
7644 
7645 	for (i = 0; i < wait_time; i++) {
7646 		adp_state = atomic_read(&instance->adprecovery);
7647 		if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7648 		    (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7649 			break;
7650 
7651 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7652 			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7653 
7654 		msleep(1000);
7655 	}
7656 
7657 	if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7658 		dev_info(&instance->pdev->dev,
7659 			 "%s HBA failed to become operational, adp_state %d\n",
7660 			 __func__, adp_state);
7661 		return 1;
7662 	}
7663 
7664 	return 0;
7665 }
7666 
7667 /**
7668  * megasas_detach_one -	PCI hot"un"plug entry point
7669  * @pdev:		PCI device structure
7670  */
7671 static void megasas_detach_one(struct pci_dev *pdev)
7672 {
7673 	int i;
7674 	struct Scsi_Host *host;
7675 	struct megasas_instance *instance;
7676 	struct fusion_context *fusion;
7677 	u32 pd_seq_map_sz;
7678 
7679 	instance = pci_get_drvdata(pdev);
7680 
7681 	if (!instance)
7682 		return;
7683 
7684 	host = instance->host;
7685 	fusion = instance->ctrl_context;
7686 
7687 	/* Shutdown SR-IOV heartbeat timer */
7688 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7689 		del_timer_sync(&instance->sriov_heartbeat_timer);
7690 
7691 	/* Stop the FW fault detection watchdog */
7692 	if (instance->adapter_type != MFI_SERIES)
7693 		megasas_fusion_stop_watchdog(instance);
7694 
7695 	if (instance->fw_crash_state != UNAVAILABLE)
7696 		megasas_free_host_crash_buffer(instance);
7697 	scsi_remove_host(instance->host);
7698 	instance->unload = 1;
7699 
7700 	if (megasas_wait_for_adapter_operational(instance))
7701 		goto skip_firing_dcmds;
7702 
7703 	megasas_flush_cache(instance);
7704 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7705 
7706 skip_firing_dcmds:
7707 	/* cancel the delayed work if this work still in queue*/
7708 	if (instance->ev != NULL) {
7709 		struct megasas_aen_event *ev = instance->ev;
7710 		cancel_delayed_work_sync(&ev->hotplug_work);
7711 		instance->ev = NULL;
7712 	}
7713 
7714 	/* cancel all wait events */
7715 	wake_up_all(&instance->int_cmd_wait_q);
7716 
7717 	tasklet_kill(&instance->isr_tasklet);
7718 
7719 	/*
7720 	 * Take the instance off the instance array. Note that we will not
7721 	 * decrement the max_index. We let this array be sparse array
7722 	 */
7723 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7724 		if (megasas_mgmt_info.instance[i] == instance) {
7725 			megasas_mgmt_info.count--;
7726 			megasas_mgmt_info.instance[i] = NULL;
7727 
7728 			break;
7729 		}
7730 	}
7731 
7732 	instance->instancet->disable_intr(instance);
7733 
7734 	megasas_destroy_irqs(instance);
7735 
7736 	if (instance->msix_vectors)
7737 		pci_free_irq_vectors(instance->pdev);
7738 
7739 	if (instance->adapter_type >= VENTURA_SERIES) {
7740 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
7741 			kfree(fusion->stream_detect_by_ld[i]);
7742 		kfree(fusion->stream_detect_by_ld);
7743 		fusion->stream_detect_by_ld = NULL;
7744 	}
7745 
7746 
7747 	if (instance->adapter_type != MFI_SERIES) {
7748 		megasas_release_fusion(instance);
7749 			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
7750 				(sizeof(struct MR_PD_CFG_SEQ) *
7751 					(MAX_PHYSICAL_DEVICES - 1));
7752 		for (i = 0; i < 2 ; i++) {
7753 			if (fusion->ld_map[i])
7754 				dma_free_coherent(&instance->pdev->dev,
7755 						  fusion->max_map_sz,
7756 						  fusion->ld_map[i],
7757 						  fusion->ld_map_phys[i]);
7758 			if (fusion->ld_drv_map[i]) {
7759 				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
7760 					vfree(fusion->ld_drv_map[i]);
7761 				else
7762 					free_pages((ulong)fusion->ld_drv_map[i],
7763 						   fusion->drv_map_pages);
7764 			}
7765 
7766 			if (fusion->pd_seq_sync[i])
7767 				dma_free_coherent(&instance->pdev->dev,
7768 					pd_seq_map_sz,
7769 					fusion->pd_seq_sync[i],
7770 					fusion->pd_seq_phys[i]);
7771 		}
7772 	} else {
7773 		megasas_release_mfi(instance);
7774 	}
7775 
7776 	if (instance->vf_affiliation)
7777 		dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
7778 				    sizeof(struct MR_LD_VF_AFFILIATION),
7779 				    instance->vf_affiliation,
7780 				    instance->vf_affiliation_h);
7781 
7782 	if (instance->vf_affiliation_111)
7783 		dma_free_coherent(&pdev->dev,
7784 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
7785 				    instance->vf_affiliation_111,
7786 				    instance->vf_affiliation_111_h);
7787 
7788 	if (instance->hb_host_mem)
7789 		dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
7790 				    instance->hb_host_mem,
7791 				    instance->hb_host_mem_h);
7792 
7793 	megasas_free_ctrl_dma_buffers(instance);
7794 
7795 	megasas_free_ctrl_mem(instance);
7796 
7797 	megasas_destroy_debugfs(instance);
7798 
7799 	scsi_host_put(host);
7800 
7801 	pci_disable_device(pdev);
7802 }
7803 
7804 /**
7805  * megasas_shutdown -	Shutdown entry point
7806  * @device:		Generic device structure
7807  */
7808 static void megasas_shutdown(struct pci_dev *pdev)
7809 {
7810 	struct megasas_instance *instance = pci_get_drvdata(pdev);
7811 
7812 	if (!instance)
7813 		return;
7814 
7815 	instance->unload = 1;
7816 
7817 	if (megasas_wait_for_adapter_operational(instance))
7818 		goto skip_firing_dcmds;
7819 
7820 	megasas_flush_cache(instance);
7821 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7822 
7823 skip_firing_dcmds:
7824 	instance->instancet->disable_intr(instance);
7825 	megasas_destroy_irqs(instance);
7826 
7827 	if (instance->msix_vectors)
7828 		pci_free_irq_vectors(instance->pdev);
7829 }
7830 
7831 /**
7832  * megasas_mgmt_open -	char node "open" entry point
7833  */
7834 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
7835 {
7836 	/*
7837 	 * Allow only those users with admin rights
7838 	 */
7839 	if (!capable(CAP_SYS_ADMIN))
7840 		return -EACCES;
7841 
7842 	return 0;
7843 }
7844 
7845 /**
7846  * megasas_mgmt_fasync -	Async notifier registration from applications
7847  *
7848  * This function adds the calling process to a driver global queue. When an
7849  * event occurs, SIGIO will be sent to all processes in this queue.
7850  */
7851 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
7852 {
7853 	int rc;
7854 
7855 	mutex_lock(&megasas_async_queue_mutex);
7856 
7857 	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
7858 
7859 	mutex_unlock(&megasas_async_queue_mutex);
7860 
7861 	if (rc >= 0) {
7862 		/* For sanity check when we get ioctl */
7863 		filep->private_data = filep;
7864 		return 0;
7865 	}
7866 
7867 	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
7868 
7869 	return rc;
7870 }
7871 
7872 /**
7873  * megasas_mgmt_poll -  char node "poll" entry point
7874  * */
7875 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
7876 {
7877 	__poll_t mask;
7878 	unsigned long flags;
7879 
7880 	poll_wait(file, &megasas_poll_wait, wait);
7881 	spin_lock_irqsave(&poll_aen_lock, flags);
7882 	if (megasas_poll_wait_aen)
7883 		mask = (EPOLLIN | EPOLLRDNORM);
7884 	else
7885 		mask = 0;
7886 	megasas_poll_wait_aen = 0;
7887 	spin_unlock_irqrestore(&poll_aen_lock, flags);
7888 	return mask;
7889 }
7890 
7891 /*
7892  * megasas_set_crash_dump_params_ioctl:
7893  *		Send CRASH_DUMP_MODE DCMD to all controllers
7894  * @cmd:	MFI command frame
7895  */
7896 
7897 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
7898 {
7899 	struct megasas_instance *local_instance;
7900 	int i, error = 0;
7901 	int crash_support;
7902 
7903 	crash_support = cmd->frame->dcmd.mbox.w[0];
7904 
7905 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7906 		local_instance = megasas_mgmt_info.instance[i];
7907 		if (local_instance && local_instance->crash_dump_drv_support) {
7908 			if ((atomic_read(&local_instance->adprecovery) ==
7909 				MEGASAS_HBA_OPERATIONAL) &&
7910 				!megasas_set_crash_dump_params(local_instance,
7911 					crash_support)) {
7912 				local_instance->crash_dump_app_support =
7913 					crash_support;
7914 				dev_info(&local_instance->pdev->dev,
7915 					"Application firmware crash "
7916 					"dump mode set success\n");
7917 				error = 0;
7918 			} else {
7919 				dev_info(&local_instance->pdev->dev,
7920 					"Application firmware crash "
7921 					"dump mode set failed\n");
7922 				error = -1;
7923 			}
7924 		}
7925 	}
7926 	return error;
7927 }
7928 
7929 /**
7930  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
7931  * @instance:			Adapter soft state
7932  * @argp:			User's ioctl packet
7933  */
7934 static int
7935 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
7936 		      struct megasas_iocpacket __user * user_ioc,
7937 		      struct megasas_iocpacket *ioc)
7938 {
7939 	struct megasas_sge64 *kern_sge64 = NULL;
7940 	struct megasas_sge32 *kern_sge32 = NULL;
7941 	struct megasas_cmd *cmd;
7942 	void *kbuff_arr[MAX_IOCTL_SGE];
7943 	dma_addr_t buf_handle = 0;
7944 	int error = 0, i;
7945 	void *sense = NULL;
7946 	dma_addr_t sense_handle;
7947 	unsigned long *sense_ptr;
7948 	u32 opcode = 0;
7949 
7950 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
7951 
7952 	if (ioc->sge_count > MAX_IOCTL_SGE) {
7953 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
7954 		       ioc->sge_count, MAX_IOCTL_SGE);
7955 		return -EINVAL;
7956 	}
7957 
7958 	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
7959 	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
7960 	    !instance->support_nvme_passthru) ||
7961 	    ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
7962 	    !instance->support_pci_lane_margining)) {
7963 		dev_err(&instance->pdev->dev,
7964 			"Received invalid ioctl command 0x%x\n",
7965 			ioc->frame.hdr.cmd);
7966 		return -ENOTSUPP;
7967 	}
7968 
7969 	cmd = megasas_get_cmd(instance);
7970 	if (!cmd) {
7971 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
7972 		return -ENOMEM;
7973 	}
7974 
7975 	/*
7976 	 * User's IOCTL packet has 2 frames (maximum). Copy those two
7977 	 * frames into our cmd's frames. cmd->frame's context will get
7978 	 * overwritten when we copy from user's frames. So set that value
7979 	 * alone separately
7980 	 */
7981 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
7982 	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
7983 	cmd->frame->hdr.pad_0 = 0;
7984 
7985 	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
7986 
7987 	if (instance->consistent_mask_64bit)
7988 		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
7989 				       MFI_FRAME_SENSE64));
7990 	else
7991 		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
7992 					       MFI_FRAME_SENSE64));
7993 
7994 	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
7995 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
7996 
7997 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
7998 		mutex_lock(&instance->reset_mutex);
7999 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
8000 			megasas_return_cmd(instance, cmd);
8001 			mutex_unlock(&instance->reset_mutex);
8002 			return -1;
8003 		}
8004 		mutex_unlock(&instance->reset_mutex);
8005 	}
8006 
8007 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
8008 		error = megasas_set_crash_dump_params_ioctl(cmd);
8009 		megasas_return_cmd(instance, cmd);
8010 		return error;
8011 	}
8012 
8013 	/*
8014 	 * The management interface between applications and the fw uses
8015 	 * MFI frames. E.g, RAID configuration changes, LD property changes
8016 	 * etc are accomplishes through different kinds of MFI frames. The
8017 	 * driver needs to care only about substituting user buffers with
8018 	 * kernel buffers in SGLs. The location of SGL is embedded in the
8019 	 * struct iocpacket itself.
8020 	 */
8021 	if (instance->consistent_mask_64bit)
8022 		kern_sge64 = (struct megasas_sge64 *)
8023 			((unsigned long)cmd->frame + ioc->sgl_off);
8024 	else
8025 		kern_sge32 = (struct megasas_sge32 *)
8026 			((unsigned long)cmd->frame + ioc->sgl_off);
8027 
8028 	/*
8029 	 * For each user buffer, create a mirror buffer and copy in
8030 	 */
8031 	for (i = 0; i < ioc->sge_count; i++) {
8032 		if (!ioc->sgl[i].iov_len)
8033 			continue;
8034 
8035 		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
8036 						    ioc->sgl[i].iov_len,
8037 						    &buf_handle, GFP_KERNEL);
8038 		if (!kbuff_arr[i]) {
8039 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
8040 			       "kernel SGL buffer for IOCTL\n");
8041 			error = -ENOMEM;
8042 			goto out;
8043 		}
8044 
8045 		/*
8046 		 * We don't change the dma_coherent_mask, so
8047 		 * dma_alloc_coherent only returns 32bit addresses
8048 		 */
8049 		if (instance->consistent_mask_64bit) {
8050 			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
8051 			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8052 		} else {
8053 			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
8054 			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8055 		}
8056 
8057 		/*
8058 		 * We created a kernel buffer corresponding to the
8059 		 * user buffer. Now copy in from the user buffer
8060 		 */
8061 		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
8062 				   (u32) (ioc->sgl[i].iov_len))) {
8063 			error = -EFAULT;
8064 			goto out;
8065 		}
8066 	}
8067 
8068 	if (ioc->sense_len) {
8069 		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
8070 					     &sense_handle, GFP_KERNEL);
8071 		if (!sense) {
8072 			error = -ENOMEM;
8073 			goto out;
8074 		}
8075 
8076 		sense_ptr =
8077 		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
8078 		if (instance->consistent_mask_64bit)
8079 			*sense_ptr = cpu_to_le64(sense_handle);
8080 		else
8081 			*sense_ptr = cpu_to_le32(sense_handle);
8082 	}
8083 
8084 	/*
8085 	 * Set the sync_cmd flag so that the ISR knows not to complete this
8086 	 * cmd to the SCSI mid-layer
8087 	 */
8088 	cmd->sync_cmd = 1;
8089 	if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
8090 		cmd->sync_cmd = 0;
8091 		dev_err(&instance->pdev->dev,
8092 			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
8093 			__func__, __LINE__, cmd->frame->hdr.cmd, opcode,
8094 			cmd->cmd_status_drv);
8095 		return -EBUSY;
8096 	}
8097 
8098 	cmd->sync_cmd = 0;
8099 
8100 	if (instance->unload == 1) {
8101 		dev_info(&instance->pdev->dev, "Driver unload is in progress "
8102 			"don't submit data to application\n");
8103 		goto out;
8104 	}
8105 	/*
8106 	 * copy out the kernel buffers to user buffers
8107 	 */
8108 	for (i = 0; i < ioc->sge_count; i++) {
8109 		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
8110 				 ioc->sgl[i].iov_len)) {
8111 			error = -EFAULT;
8112 			goto out;
8113 		}
8114 	}
8115 
8116 	/*
8117 	 * copy out the sense
8118 	 */
8119 	if (ioc->sense_len) {
8120 		/*
8121 		 * sense_ptr points to the location that has the user
8122 		 * sense buffer address
8123 		 */
8124 		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
8125 				ioc->sense_off);
8126 
8127 		if (copy_to_user((void __user *)((unsigned long)
8128 				 get_unaligned((unsigned long *)sense_ptr)),
8129 				 sense, ioc->sense_len)) {
8130 			dev_err(&instance->pdev->dev, "Failed to copy out to user "
8131 					"sense data\n");
8132 			error = -EFAULT;
8133 			goto out;
8134 		}
8135 	}
8136 
8137 	/*
8138 	 * copy the status codes returned by the fw
8139 	 */
8140 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
8141 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
8142 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
8143 		error = -EFAULT;
8144 	}
8145 
8146 out:
8147 	if (sense) {
8148 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
8149 				    sense, sense_handle);
8150 	}
8151 
8152 	for (i = 0; i < ioc->sge_count; i++) {
8153 		if (kbuff_arr[i]) {
8154 			if (instance->consistent_mask_64bit)
8155 				dma_free_coherent(&instance->pdev->dev,
8156 					le32_to_cpu(kern_sge64[i].length),
8157 					kbuff_arr[i],
8158 					le64_to_cpu(kern_sge64[i].phys_addr));
8159 			else
8160 				dma_free_coherent(&instance->pdev->dev,
8161 					le32_to_cpu(kern_sge32[i].length),
8162 					kbuff_arr[i],
8163 					le32_to_cpu(kern_sge32[i].phys_addr));
8164 			kbuff_arr[i] = NULL;
8165 		}
8166 	}
8167 
8168 	megasas_return_cmd(instance, cmd);
8169 	return error;
8170 }
8171 
8172 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
8173 {
8174 	struct megasas_iocpacket __user *user_ioc =
8175 	    (struct megasas_iocpacket __user *)arg;
8176 	struct megasas_iocpacket *ioc;
8177 	struct megasas_instance *instance;
8178 	int error;
8179 
8180 	ioc = memdup_user(user_ioc, sizeof(*ioc));
8181 	if (IS_ERR(ioc))
8182 		return PTR_ERR(ioc);
8183 
8184 	instance = megasas_lookup_instance(ioc->host_no);
8185 	if (!instance) {
8186 		error = -ENODEV;
8187 		goto out_kfree_ioc;
8188 	}
8189 
8190 	/* Block ioctls in VF mode */
8191 	if (instance->requestorId && !allow_vf_ioctls) {
8192 		error = -ENODEV;
8193 		goto out_kfree_ioc;
8194 	}
8195 
8196 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8197 		dev_err(&instance->pdev->dev, "Controller in crit error\n");
8198 		error = -ENODEV;
8199 		goto out_kfree_ioc;
8200 	}
8201 
8202 	if (instance->unload == 1) {
8203 		error = -ENODEV;
8204 		goto out_kfree_ioc;
8205 	}
8206 
8207 	if (down_interruptible(&instance->ioctl_sem)) {
8208 		error = -ERESTARTSYS;
8209 		goto out_kfree_ioc;
8210 	}
8211 
8212 	if  (megasas_wait_for_adapter_operational(instance)) {
8213 		error = -ENODEV;
8214 		goto out_up;
8215 	}
8216 
8217 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
8218 out_up:
8219 	up(&instance->ioctl_sem);
8220 
8221 out_kfree_ioc:
8222 	kfree(ioc);
8223 	return error;
8224 }
8225 
8226 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
8227 {
8228 	struct megasas_instance *instance;
8229 	struct megasas_aen aen;
8230 	int error;
8231 
8232 	if (file->private_data != file) {
8233 		printk(KERN_DEBUG "megasas: fasync_helper was not "
8234 		       "called first\n");
8235 		return -EINVAL;
8236 	}
8237 
8238 	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
8239 		return -EFAULT;
8240 
8241 	instance = megasas_lookup_instance(aen.host_no);
8242 
8243 	if (!instance)
8244 		return -ENODEV;
8245 
8246 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8247 		return -ENODEV;
8248 	}
8249 
8250 	if (instance->unload == 1) {
8251 		return -ENODEV;
8252 	}
8253 
8254 	if  (megasas_wait_for_adapter_operational(instance))
8255 		return -ENODEV;
8256 
8257 	mutex_lock(&instance->reset_mutex);
8258 	error = megasas_register_aen(instance, aen.seq_num,
8259 				     aen.class_locale_word);
8260 	mutex_unlock(&instance->reset_mutex);
8261 	return error;
8262 }
8263 
8264 /**
8265  * megasas_mgmt_ioctl -	char node ioctl entry point
8266  */
8267 static long
8268 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8269 {
8270 	switch (cmd) {
8271 	case MEGASAS_IOC_FIRMWARE:
8272 		return megasas_mgmt_ioctl_fw(file, arg);
8273 
8274 	case MEGASAS_IOC_GET_AEN:
8275 		return megasas_mgmt_ioctl_aen(file, arg);
8276 	}
8277 
8278 	return -ENOTTY;
8279 }
8280 
8281 #ifdef CONFIG_COMPAT
8282 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
8283 {
8284 	struct compat_megasas_iocpacket __user *cioc =
8285 	    (struct compat_megasas_iocpacket __user *)arg;
8286 	struct megasas_iocpacket __user *ioc =
8287 	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
8288 	int i;
8289 	int error = 0;
8290 	compat_uptr_t ptr;
8291 	u32 local_sense_off;
8292 	u32 local_sense_len;
8293 	u32 user_sense_off;
8294 
8295 	if (clear_user(ioc, sizeof(*ioc)))
8296 		return -EFAULT;
8297 
8298 	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
8299 	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
8300 	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
8301 	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
8302 	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
8303 	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
8304 		return -EFAULT;
8305 
8306 	/*
8307 	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
8308 	 * sense_len is not null, so prepare the 64bit value under
8309 	 * the same condition.
8310 	 */
8311 	if (get_user(local_sense_off, &ioc->sense_off) ||
8312 		get_user(local_sense_len, &ioc->sense_len) ||
8313 		get_user(user_sense_off, &cioc->sense_off))
8314 		return -EFAULT;
8315 
8316 	if (local_sense_off != user_sense_off)
8317 		return -EINVAL;
8318 
8319 	if (local_sense_len) {
8320 		void __user **sense_ioc_ptr =
8321 			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
8322 		compat_uptr_t *sense_cioc_ptr =
8323 			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
8324 		if (get_user(ptr, sense_cioc_ptr) ||
8325 		    put_user(compat_ptr(ptr), sense_ioc_ptr))
8326 			return -EFAULT;
8327 	}
8328 
8329 	for (i = 0; i < MAX_IOCTL_SGE; i++) {
8330 		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
8331 		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
8332 		    copy_in_user(&ioc->sgl[i].iov_len,
8333 				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
8334 			return -EFAULT;
8335 	}
8336 
8337 	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
8338 
8339 	if (copy_in_user(&cioc->frame.hdr.cmd_status,
8340 			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
8341 		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
8342 		return -EFAULT;
8343 	}
8344 	return error;
8345 }
8346 
8347 static long
8348 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
8349 			  unsigned long arg)
8350 {
8351 	switch (cmd) {
8352 	case MEGASAS_IOC_FIRMWARE32:
8353 		return megasas_mgmt_compat_ioctl_fw(file, arg);
8354 	case MEGASAS_IOC_GET_AEN:
8355 		return megasas_mgmt_ioctl_aen(file, arg);
8356 	}
8357 
8358 	return -ENOTTY;
8359 }
8360 #endif
8361 
8362 /*
8363  * File operations structure for management interface
8364  */
8365 static const struct file_operations megasas_mgmt_fops = {
8366 	.owner = THIS_MODULE,
8367 	.open = megasas_mgmt_open,
8368 	.fasync = megasas_mgmt_fasync,
8369 	.unlocked_ioctl = megasas_mgmt_ioctl,
8370 	.poll = megasas_mgmt_poll,
8371 #ifdef CONFIG_COMPAT
8372 	.compat_ioctl = megasas_mgmt_compat_ioctl,
8373 #endif
8374 	.llseek = noop_llseek,
8375 };
8376 
8377 /*
8378  * PCI hotplug support registration structure
8379  */
8380 static struct pci_driver megasas_pci_driver = {
8381 
8382 	.name = "megaraid_sas",
8383 	.id_table = megasas_pci_table,
8384 	.probe = megasas_probe_one,
8385 	.remove = megasas_detach_one,
8386 	.suspend = megasas_suspend,
8387 	.resume = megasas_resume,
8388 	.shutdown = megasas_shutdown,
8389 };
8390 
8391 /*
8392  * Sysfs driver attributes
8393  */
8394 static ssize_t version_show(struct device_driver *dd, char *buf)
8395 {
8396 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
8397 			MEGASAS_VERSION);
8398 }
8399 static DRIVER_ATTR_RO(version);
8400 
8401 static ssize_t release_date_show(struct device_driver *dd, char *buf)
8402 {
8403 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
8404 		MEGASAS_RELDATE);
8405 }
8406 static DRIVER_ATTR_RO(release_date);
8407 
8408 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
8409 {
8410 	return sprintf(buf, "%u\n", support_poll_for_event);
8411 }
8412 static DRIVER_ATTR_RO(support_poll_for_event);
8413 
8414 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
8415 {
8416 	return sprintf(buf, "%u\n", support_device_change);
8417 }
8418 static DRIVER_ATTR_RO(support_device_change);
8419 
8420 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
8421 {
8422 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
8423 }
8424 
8425 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
8426 			     size_t count)
8427 {
8428 	int retval = count;
8429 
8430 	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8431 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8432 		retval = -EINVAL;
8433 	}
8434 	return retval;
8435 }
8436 static DRIVER_ATTR_RW(dbg_lvl);
8437 
8438 static ssize_t
8439 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8440 {
8441 	return sprintf(buf, "%u\n", support_nvme_encapsulation);
8442 }
8443 
8444 static DRIVER_ATTR_RO(support_nvme_encapsulation);
8445 
8446 static ssize_t
8447 support_pci_lane_margining_show(struct device_driver *dd, char *buf)
8448 {
8449 	return sprintf(buf, "%u\n", support_pci_lane_margining);
8450 }
8451 
8452 static DRIVER_ATTR_RO(support_pci_lane_margining);
8453 
8454 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8455 {
8456 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8457 	scsi_remove_device(sdev);
8458 	scsi_device_put(sdev);
8459 }
8460 
8461 /**
8462  * megasas_update_device_list -	Update the PD and LD device list from FW
8463  *				after an AEN event notification
8464  * @instance:			Adapter soft state
8465  * @event_type:			Indicates type of event (PD or LD event)
8466  *
8467  * @return:			Success or failure
8468  *
8469  * Issue DCMDs to Firmware to update the internal device list in driver.
8470  * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8471  * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8472  */
8473 static
8474 int megasas_update_device_list(struct megasas_instance *instance,
8475 			       int event_type)
8476 {
8477 	int dcmd_ret = DCMD_SUCCESS;
8478 
8479 	if (instance->enable_fw_dev_list) {
8480 		dcmd_ret = megasas_host_device_list_query(instance, false);
8481 		if (dcmd_ret != DCMD_SUCCESS)
8482 			goto out;
8483 	} else {
8484 		if (event_type & SCAN_PD_CHANNEL) {
8485 			dcmd_ret = megasas_get_pd_list(instance);
8486 
8487 			if (dcmd_ret != DCMD_SUCCESS)
8488 				goto out;
8489 		}
8490 
8491 		if (event_type & SCAN_VD_CHANNEL) {
8492 			if (!instance->requestorId ||
8493 			    (instance->requestorId &&
8494 			     megasas_get_ld_vf_affiliation(instance, 0))) {
8495 				dcmd_ret = megasas_ld_list_query(instance,
8496 						MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8497 				if (dcmd_ret != DCMD_SUCCESS)
8498 					goto out;
8499 			}
8500 		}
8501 	}
8502 
8503 out:
8504 	return dcmd_ret;
8505 }
8506 
8507 /**
8508  * megasas_add_remove_devices -	Add/remove devices to SCSI mid-layer
8509  *				after an AEN event notification
8510  * @instance:			Adapter soft state
8511  * @scan_type:			Indicates type of devices (PD/LD) to add
8512  * @return			void
8513  */
8514 static
8515 void megasas_add_remove_devices(struct megasas_instance *instance,
8516 				int scan_type)
8517 {
8518 	int i, j;
8519 	u16 pd_index = 0;
8520 	u16 ld_index = 0;
8521 	u16 channel = 0, id = 0;
8522 	struct Scsi_Host *host;
8523 	struct scsi_device *sdev1;
8524 	struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8525 	struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8526 
8527 	host = instance->host;
8528 
8529 	if (instance->enable_fw_dev_list) {
8530 		targetid_list = instance->host_device_list_buf;
8531 		for (i = 0; i < targetid_list->count; i++) {
8532 			targetid_entry = &targetid_list->host_device_list[i];
8533 			if (targetid_entry->flags.u.bits.is_sys_pd) {
8534 				channel = le16_to_cpu(targetid_entry->target_id) /
8535 						MEGASAS_MAX_DEV_PER_CHANNEL;
8536 				id = le16_to_cpu(targetid_entry->target_id) %
8537 						MEGASAS_MAX_DEV_PER_CHANNEL;
8538 			} else {
8539 				channel = MEGASAS_MAX_PD_CHANNELS +
8540 					  (le16_to_cpu(targetid_entry->target_id) /
8541 					   MEGASAS_MAX_DEV_PER_CHANNEL);
8542 				id = le16_to_cpu(targetid_entry->target_id) %
8543 						MEGASAS_MAX_DEV_PER_CHANNEL;
8544 			}
8545 			sdev1 = scsi_device_lookup(host, channel, id, 0);
8546 			if (!sdev1) {
8547 				scsi_add_device(host, channel, id, 0);
8548 			} else {
8549 				scsi_device_put(sdev1);
8550 			}
8551 		}
8552 	}
8553 
8554 	if (scan_type & SCAN_PD_CHANNEL) {
8555 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8556 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8557 				pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8558 				sdev1 = scsi_device_lookup(host, i, j, 0);
8559 				if (instance->pd_list[pd_index].driveState ==
8560 							MR_PD_STATE_SYSTEM) {
8561 					if (!sdev1)
8562 						scsi_add_device(host, i, j, 0);
8563 					else
8564 						scsi_device_put(sdev1);
8565 				} else {
8566 					if (sdev1)
8567 						megasas_remove_scsi_device(sdev1);
8568 				}
8569 			}
8570 		}
8571 	}
8572 
8573 	if (scan_type & SCAN_VD_CHANNEL) {
8574 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8575 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8576 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8577 				sdev1 = scsi_device_lookup(host,
8578 						MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8579 				if (instance->ld_ids[ld_index] != 0xff) {
8580 					if (!sdev1)
8581 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8582 					else
8583 						scsi_device_put(sdev1);
8584 				} else {
8585 					if (sdev1)
8586 						megasas_remove_scsi_device(sdev1);
8587 				}
8588 			}
8589 		}
8590 	}
8591 
8592 }
8593 
8594 static void
8595 megasas_aen_polling(struct work_struct *work)
8596 {
8597 	struct megasas_aen_event *ev =
8598 		container_of(work, struct megasas_aen_event, hotplug_work.work);
8599 	struct megasas_instance *instance = ev->instance;
8600 	union megasas_evt_class_locale class_locale;
8601 	int event_type = 0;
8602 	u32 seq_num;
8603 	int error;
8604 	u8  dcmd_ret = DCMD_SUCCESS;
8605 
8606 	if (!instance) {
8607 		printk(KERN_ERR "invalid instance!\n");
8608 		kfree(ev);
8609 		return;
8610 	}
8611 
8612 	/* Don't run the event workqueue thread if OCR is running */
8613 	mutex_lock(&instance->reset_mutex);
8614 
8615 	instance->ev = NULL;
8616 	if (instance->evt_detail) {
8617 		megasas_decode_evt(instance);
8618 
8619 		switch (le32_to_cpu(instance->evt_detail->code)) {
8620 
8621 		case MR_EVT_PD_INSERTED:
8622 		case MR_EVT_PD_REMOVED:
8623 			event_type = SCAN_PD_CHANNEL;
8624 			break;
8625 
8626 		case MR_EVT_LD_OFFLINE:
8627 		case MR_EVT_CFG_CLEARED:
8628 		case MR_EVT_LD_DELETED:
8629 		case MR_EVT_LD_CREATED:
8630 			event_type = SCAN_VD_CHANNEL;
8631 			break;
8632 
8633 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8634 		case MR_EVT_FOREIGN_CFG_IMPORTED:
8635 		case MR_EVT_LD_STATE_CHANGE:
8636 			event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8637 			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8638 				instance->host->host_no);
8639 			break;
8640 
8641 		case MR_EVT_CTRL_PROP_CHANGED:
8642 			dcmd_ret = megasas_get_ctrl_info(instance);
8643 			if (dcmd_ret == DCMD_SUCCESS &&
8644 			    instance->snapdump_wait_time) {
8645 				megasas_get_snapdump_properties(instance);
8646 				dev_info(&instance->pdev->dev,
8647 					 "Snap dump wait time\t: %d\n",
8648 					 instance->snapdump_wait_time);
8649 			}
8650 			break;
8651 		default:
8652 			event_type = 0;
8653 			break;
8654 		}
8655 	} else {
8656 		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8657 		mutex_unlock(&instance->reset_mutex);
8658 		kfree(ev);
8659 		return;
8660 	}
8661 
8662 	if (event_type)
8663 		dcmd_ret = megasas_update_device_list(instance, event_type);
8664 
8665 	mutex_unlock(&instance->reset_mutex);
8666 
8667 	if (event_type && dcmd_ret == DCMD_SUCCESS)
8668 		megasas_add_remove_devices(instance, event_type);
8669 
8670 	if (dcmd_ret == DCMD_SUCCESS)
8671 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8672 	else
8673 		seq_num = instance->last_seq_num;
8674 
8675 	/* Register AEN with FW for latest sequence number plus 1 */
8676 	class_locale.members.reserved = 0;
8677 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
8678 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
8679 
8680 	if (instance->aen_cmd != NULL) {
8681 		kfree(ev);
8682 		return;
8683 	}
8684 
8685 	mutex_lock(&instance->reset_mutex);
8686 	error = megasas_register_aen(instance, seq_num,
8687 					class_locale.word);
8688 	if (error)
8689 		dev_err(&instance->pdev->dev,
8690 			"register aen failed error %x\n", error);
8691 
8692 	mutex_unlock(&instance->reset_mutex);
8693 	kfree(ev);
8694 }
8695 
8696 /**
8697  * megasas_init - Driver load entry point
8698  */
8699 static int __init megasas_init(void)
8700 {
8701 	int rval;
8702 
8703 	/*
8704 	 * Booted in kdump kernel, minimize memory footprints by
8705 	 * disabling few features
8706 	 */
8707 	if (reset_devices) {
8708 		msix_vectors = 1;
8709 		rdpq_enable = 0;
8710 		dual_qdepth_disable = 1;
8711 	}
8712 
8713 	/*
8714 	 * Announce driver version and other information
8715 	 */
8716 	pr_info("megasas: %s\n", MEGASAS_VERSION);
8717 
8718 	spin_lock_init(&poll_aen_lock);
8719 
8720 	support_poll_for_event = 2;
8721 	support_device_change = 1;
8722 	support_nvme_encapsulation = true;
8723 	support_pci_lane_margining = true;
8724 
8725 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
8726 
8727 	/*
8728 	 * Register character device node
8729 	 */
8730 	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
8731 
8732 	if (rval < 0) {
8733 		printk(KERN_DEBUG "megasas: failed to open device node\n");
8734 		return rval;
8735 	}
8736 
8737 	megasas_mgmt_majorno = rval;
8738 
8739 	megasas_init_debugfs();
8740 
8741 	/*
8742 	 * Register ourselves as PCI hotplug module
8743 	 */
8744 	rval = pci_register_driver(&megasas_pci_driver);
8745 
8746 	if (rval) {
8747 		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
8748 		goto err_pcidrv;
8749 	}
8750 
8751 	rval = driver_create_file(&megasas_pci_driver.driver,
8752 				  &driver_attr_version);
8753 	if (rval)
8754 		goto err_dcf_attr_ver;
8755 
8756 	rval = driver_create_file(&megasas_pci_driver.driver,
8757 				  &driver_attr_release_date);
8758 	if (rval)
8759 		goto err_dcf_rel_date;
8760 
8761 	rval = driver_create_file(&megasas_pci_driver.driver,
8762 				&driver_attr_support_poll_for_event);
8763 	if (rval)
8764 		goto err_dcf_support_poll_for_event;
8765 
8766 	rval = driver_create_file(&megasas_pci_driver.driver,
8767 				  &driver_attr_dbg_lvl);
8768 	if (rval)
8769 		goto err_dcf_dbg_lvl;
8770 	rval = driver_create_file(&megasas_pci_driver.driver,
8771 				&driver_attr_support_device_change);
8772 	if (rval)
8773 		goto err_dcf_support_device_change;
8774 
8775 	rval = driver_create_file(&megasas_pci_driver.driver,
8776 				  &driver_attr_support_nvme_encapsulation);
8777 	if (rval)
8778 		goto err_dcf_support_nvme_encapsulation;
8779 
8780 	rval = driver_create_file(&megasas_pci_driver.driver,
8781 				  &driver_attr_support_pci_lane_margining);
8782 	if (rval)
8783 		goto err_dcf_support_pci_lane_margining;
8784 
8785 	return rval;
8786 
8787 err_dcf_support_pci_lane_margining:
8788 	driver_remove_file(&megasas_pci_driver.driver,
8789 			   &driver_attr_support_nvme_encapsulation);
8790 
8791 err_dcf_support_nvme_encapsulation:
8792 	driver_remove_file(&megasas_pci_driver.driver,
8793 			   &driver_attr_support_device_change);
8794 
8795 err_dcf_support_device_change:
8796 	driver_remove_file(&megasas_pci_driver.driver,
8797 			   &driver_attr_dbg_lvl);
8798 err_dcf_dbg_lvl:
8799 	driver_remove_file(&megasas_pci_driver.driver,
8800 			&driver_attr_support_poll_for_event);
8801 err_dcf_support_poll_for_event:
8802 	driver_remove_file(&megasas_pci_driver.driver,
8803 			   &driver_attr_release_date);
8804 err_dcf_rel_date:
8805 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8806 err_dcf_attr_ver:
8807 	pci_unregister_driver(&megasas_pci_driver);
8808 err_pcidrv:
8809 	megasas_exit_debugfs();
8810 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8811 	return rval;
8812 }
8813 
8814 /**
8815  * megasas_exit - Driver unload entry point
8816  */
8817 static void __exit megasas_exit(void)
8818 {
8819 	driver_remove_file(&megasas_pci_driver.driver,
8820 			   &driver_attr_dbg_lvl);
8821 	driver_remove_file(&megasas_pci_driver.driver,
8822 			&driver_attr_support_poll_for_event);
8823 	driver_remove_file(&megasas_pci_driver.driver,
8824 			&driver_attr_support_device_change);
8825 	driver_remove_file(&megasas_pci_driver.driver,
8826 			   &driver_attr_release_date);
8827 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8828 	driver_remove_file(&megasas_pci_driver.driver,
8829 			   &driver_attr_support_nvme_encapsulation);
8830 	driver_remove_file(&megasas_pci_driver.driver,
8831 			   &driver_attr_support_pci_lane_margining);
8832 
8833 	pci_unregister_driver(&megasas_pci_driver);
8834 	megasas_exit_debugfs();
8835 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8836 }
8837 
8838 module_init(megasas_init);
8839 module_exit(megasas_exit);
8840