1 /*
2  *  Linux MegaRAID driver for SAS based RAID controllers
3  *
4  *  Copyright (c) 2003-2013  LSI Corporation
5  *  Copyright (c) 2013-2014  Avago Technologies
6  *
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation; either version 2
10  *  of the License, or (at your option) any later version.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License
18  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  *
20  *  Authors: Avago Technologies
21  *           Sreenivas Bagalkote
22  *           Sumant Patro
23  *           Bo Yang
24  *           Adam Radford
25  *           Kashyap Desai <kashyap.desai@avagotech.com>
26  *           Sumit Saxena <sumit.saxena@avagotech.com>
27  *
28  *  Send feedback to: megaraidlinux.pdl@avagotech.com
29  *
30  *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31  *  San Jose, California 95131
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/slab.h>
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
47 #include <linux/fs.h>
48 #include <linux/compat.h>
49 #include <linux/blkdev.h>
50 #include <linux/mutex.h>
51 #include <linux/poll.h>
52 #include <linux/vmalloc.h>
53 
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsi_tcq.h>
59 #include "megaraid_sas_fusion.h"
60 #include "megaraid_sas.h"
61 
62 /*
63  * Number of sectors per IO command
64  * Will be set in megasas_init_mfi if user does not provide
65  */
66 static unsigned int max_sectors;
67 module_param_named(max_sectors, max_sectors, int, 0);
68 MODULE_PARM_DESC(max_sectors,
69 	"Maximum number of sectors per IO command");
70 
71 static int msix_disable;
72 module_param(msix_disable, int, S_IRUGO);
73 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
74 
75 static unsigned int msix_vectors;
76 module_param(msix_vectors, int, S_IRUGO);
77 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
78 
79 static int allow_vf_ioctls;
80 module_param(allow_vf_ioctls, int, S_IRUGO);
81 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
82 
83 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
84 module_param(throttlequeuedepth, int, S_IRUGO);
85 MODULE_PARM_DESC(throttlequeuedepth,
86 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
87 
88 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
89 module_param(resetwaittime, int, S_IRUGO);
90 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
91 		 "before resetting adapter. Default: 180");
92 
93 int smp_affinity_enable = 1;
94 module_param(smp_affinity_enable, int, S_IRUGO);
95 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
96 
97 int rdpq_enable = 1;
98 module_param(rdpq_enable, int, S_IRUGO);
99 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
100 
101 unsigned int dual_qdepth_disable;
102 module_param(dual_qdepth_disable, int, S_IRUGO);
103 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
104 
105 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
106 module_param(scmd_timeout, int, S_IRUGO);
107 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
108 
109 MODULE_LICENSE("GPL");
110 MODULE_VERSION(MEGASAS_VERSION);
111 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
112 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
113 
114 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
115 static int megasas_get_pd_list(struct megasas_instance *instance);
116 static int megasas_ld_list_query(struct megasas_instance *instance,
117 				 u8 query_type);
118 static int megasas_issue_init_mfi(struct megasas_instance *instance);
119 static int megasas_register_aen(struct megasas_instance *instance,
120 				u32 seq_num, u32 class_locale_word);
121 static void megasas_get_pd_info(struct megasas_instance *instance,
122 				struct scsi_device *sdev);
123 static int megasas_get_target_prop(struct megasas_instance *instance,
124 				   struct scsi_device *sdev);
125 /*
126  * PCI ID table for all supported controllers
127  */
128 static struct pci_device_id megasas_pci_table[] = {
129 
130 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
131 	/* xscale IOP */
132 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
133 	/* ppc IOP */
134 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
135 	/* ppc IOP */
136 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
137 	/* gen2*/
138 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
139 	/* gen2*/
140 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
141 	/* skinny*/
142 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
143 	/* skinny*/
144 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
145 	/* xscale IOP, vega */
146 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
147 	/* xscale IOP */
148 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
149 	/* Fusion */
150 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
151 	/* Plasma */
152 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
153 	/* Invader */
154 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
155 	/* Fury */
156 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
157 	/* Intruder */
158 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
159 	/* Intruder 24 port*/
160 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
161 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
162 	/* VENTURA */
163 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
164 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
165 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
166 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
167 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
168 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
169 	{}
170 };
171 
172 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
173 
174 static int megasas_mgmt_majorno;
175 struct megasas_mgmt_info megasas_mgmt_info;
176 static struct fasync_struct *megasas_async_queue;
177 static DEFINE_MUTEX(megasas_async_queue_mutex);
178 
179 static int megasas_poll_wait_aen;
180 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
181 static u32 support_poll_for_event;
182 u32 megasas_dbg_lvl;
183 static u32 support_device_change;
184 static bool support_nvme_encapsulation;
185 
186 /* define lock for aen poll */
187 spinlock_t poll_aen_lock;
188 
189 void
190 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
191 		     u8 alt_status);
192 static u32
193 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
194 static int
195 megasas_adp_reset_gen2(struct megasas_instance *instance,
196 		       struct megasas_register_set __iomem *reg_set);
197 static irqreturn_t megasas_isr(int irq, void *devp);
198 static u32
199 megasas_init_adapter_mfi(struct megasas_instance *instance);
200 u32
201 megasas_build_and_issue_cmd(struct megasas_instance *instance,
202 			    struct scsi_cmnd *scmd);
203 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
204 int
205 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
206 	int seconds);
207 void megasas_fusion_ocr_wq(struct work_struct *work);
208 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
209 					 int initial);
210 static int
211 megasas_set_dma_mask(struct megasas_instance *instance);
212 static int
213 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
214 static inline void
215 megasas_free_ctrl_mem(struct megasas_instance *instance);
216 static inline int
217 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
218 static inline void
219 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
220 static inline void
221 megasas_init_ctrl_params(struct megasas_instance *instance);
222 
223 /**
224  * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
225  * @instance:			Adapter soft state
226  * @dcmd:			DCMD frame inside MFI command
227  * @dma_addr:			DMA address of buffer to be passed to FW
228  * @dma_len:			Length of DMA buffer to be passed to FW
229  * @return:			void
230  */
231 void megasas_set_dma_settings(struct megasas_instance *instance,
232 			      struct megasas_dcmd_frame *dcmd,
233 			      dma_addr_t dma_addr, u32 dma_len)
234 {
235 	if (instance->consistent_mask_64bit) {
236 		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
237 		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
238 		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
239 
240 	} else {
241 		dcmd->sgl.sge32[0].phys_addr =
242 				cpu_to_le32(lower_32_bits(dma_addr));
243 		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
244 		dcmd->flags = cpu_to_le16(dcmd->flags);
245 	}
246 }
247 
248 void
249 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
250 {
251 	instance->instancet->fire_cmd(instance,
252 		cmd->frame_phys_addr, 0, instance->reg_set);
253 	return;
254 }
255 
256 /**
257  * megasas_get_cmd -	Get a command from the free pool
258  * @instance:		Adapter soft state
259  *
260  * Returns a free command from the pool
261  */
262 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
263 						  *instance)
264 {
265 	unsigned long flags;
266 	struct megasas_cmd *cmd = NULL;
267 
268 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
269 
270 	if (!list_empty(&instance->cmd_pool)) {
271 		cmd = list_entry((&instance->cmd_pool)->next,
272 				 struct megasas_cmd, list);
273 		list_del_init(&cmd->list);
274 	} else {
275 		dev_err(&instance->pdev->dev, "Command pool empty!\n");
276 	}
277 
278 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
279 	return cmd;
280 }
281 
282 /**
283  * megasas_return_cmd -	Return a cmd to free command pool
284  * @instance:		Adapter soft state
285  * @cmd:		Command packet to be returned to free command pool
286  */
287 void
288 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
289 {
290 	unsigned long flags;
291 	u32 blk_tags;
292 	struct megasas_cmd_fusion *cmd_fusion;
293 	struct fusion_context *fusion = instance->ctrl_context;
294 
295 	/* This flag is used only for fusion adapter.
296 	 * Wait for Interrupt for Polled mode DCMD
297 	 */
298 	if (cmd->flags & DRV_DCMD_POLLED_MODE)
299 		return;
300 
301 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
302 
303 	if (fusion) {
304 		blk_tags = instance->max_scsi_cmds + cmd->index;
305 		cmd_fusion = fusion->cmd_list[blk_tags];
306 		megasas_return_cmd_fusion(instance, cmd_fusion);
307 	}
308 	cmd->scmd = NULL;
309 	cmd->frame_count = 0;
310 	cmd->flags = 0;
311 	memset(cmd->frame, 0, instance->mfi_frame_size);
312 	cmd->frame->io.context = cpu_to_le32(cmd->index);
313 	if (!fusion && reset_devices)
314 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
315 	list_add(&cmd->list, (&instance->cmd_pool)->next);
316 
317 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
318 
319 }
320 
321 static const char *
322 format_timestamp(uint32_t timestamp)
323 {
324 	static char buffer[32];
325 
326 	if ((timestamp & 0xff000000) == 0xff000000)
327 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
328 		0x00ffffff);
329 	else
330 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
331 	return buffer;
332 }
333 
334 static const char *
335 format_class(int8_t class)
336 {
337 	static char buffer[6];
338 
339 	switch (class) {
340 	case MFI_EVT_CLASS_DEBUG:
341 		return "debug";
342 	case MFI_EVT_CLASS_PROGRESS:
343 		return "progress";
344 	case MFI_EVT_CLASS_INFO:
345 		return "info";
346 	case MFI_EVT_CLASS_WARNING:
347 		return "WARN";
348 	case MFI_EVT_CLASS_CRITICAL:
349 		return "CRIT";
350 	case MFI_EVT_CLASS_FATAL:
351 		return "FATAL";
352 	case MFI_EVT_CLASS_DEAD:
353 		return "DEAD";
354 	default:
355 		snprintf(buffer, sizeof(buffer), "%d", class);
356 		return buffer;
357 	}
358 }
359 
360 /**
361   * megasas_decode_evt: Decode FW AEN event and print critical event
362   * for information.
363   * @instance:			Adapter soft state
364   */
365 static void
366 megasas_decode_evt(struct megasas_instance *instance)
367 {
368 	struct megasas_evt_detail *evt_detail = instance->evt_detail;
369 	union megasas_evt_class_locale class_locale;
370 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
371 
372 	if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
373 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
374 			le32_to_cpu(evt_detail->seq_num),
375 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
376 			(class_locale.members.locale),
377 			format_class(class_locale.members.class),
378 			evt_detail->description);
379 }
380 
381 /**
382 *	The following functions are defined for xscale
383 *	(deviceid : 1064R, PERC5) controllers
384 */
385 
386 /**
387  * megasas_enable_intr_xscale -	Enables interrupts
388  * @regs:			MFI register set
389  */
390 static inline void
391 megasas_enable_intr_xscale(struct megasas_instance *instance)
392 {
393 	struct megasas_register_set __iomem *regs;
394 
395 	regs = instance->reg_set;
396 	writel(0, &(regs)->outbound_intr_mask);
397 
398 	/* Dummy readl to force pci flush */
399 	readl(&regs->outbound_intr_mask);
400 }
401 
402 /**
403  * megasas_disable_intr_xscale -Disables interrupt
404  * @regs:			MFI register set
405  */
406 static inline void
407 megasas_disable_intr_xscale(struct megasas_instance *instance)
408 {
409 	struct megasas_register_set __iomem *regs;
410 	u32 mask = 0x1f;
411 
412 	regs = instance->reg_set;
413 	writel(mask, &regs->outbound_intr_mask);
414 	/* Dummy readl to force pci flush */
415 	readl(&regs->outbound_intr_mask);
416 }
417 
418 /**
419  * megasas_read_fw_status_reg_xscale - returns the current FW status value
420  * @regs:			MFI register set
421  */
422 static u32
423 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
424 {
425 	return readl(&(regs)->outbound_msg_0);
426 }
427 /**
428  * megasas_clear_interrupt_xscale -	Check & clear interrupt
429  * @regs:				MFI register set
430  */
431 static int
432 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
433 {
434 	u32 status;
435 	u32 mfiStatus = 0;
436 
437 	/*
438 	 * Check if it is our interrupt
439 	 */
440 	status = readl(&regs->outbound_intr_status);
441 
442 	if (status & MFI_OB_INTR_STATUS_MASK)
443 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
444 	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
445 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
446 
447 	/*
448 	 * Clear the interrupt by writing back the same value
449 	 */
450 	if (mfiStatus)
451 		writel(status, &regs->outbound_intr_status);
452 
453 	/* Dummy readl to force pci flush */
454 	readl(&regs->outbound_intr_status);
455 
456 	return mfiStatus;
457 }
458 
459 /**
460  * megasas_fire_cmd_xscale -	Sends command to the FW
461  * @frame_phys_addr :		Physical address of cmd
462  * @frame_count :		Number of frames for the command
463  * @regs :			MFI register set
464  */
465 static inline void
466 megasas_fire_cmd_xscale(struct megasas_instance *instance,
467 		dma_addr_t frame_phys_addr,
468 		u32 frame_count,
469 		struct megasas_register_set __iomem *regs)
470 {
471 	unsigned long flags;
472 
473 	spin_lock_irqsave(&instance->hba_lock, flags);
474 	writel((frame_phys_addr >> 3)|(frame_count),
475 	       &(regs)->inbound_queue_port);
476 	spin_unlock_irqrestore(&instance->hba_lock, flags);
477 }
478 
479 /**
480  * megasas_adp_reset_xscale -  For controller reset
481  * @regs:                              MFI register set
482  */
483 static int
484 megasas_adp_reset_xscale(struct megasas_instance *instance,
485 	struct megasas_register_set __iomem *regs)
486 {
487 	u32 i;
488 	u32 pcidata;
489 
490 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
491 
492 	for (i = 0; i < 3; i++)
493 		msleep(1000); /* sleep for 3 secs */
494 	pcidata  = 0;
495 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
496 	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
497 	if (pcidata & 0x2) {
498 		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
499 		pcidata &= ~0x2;
500 		pci_write_config_dword(instance->pdev,
501 				MFI_1068_PCSR_OFFSET, pcidata);
502 
503 		for (i = 0; i < 2; i++)
504 			msleep(1000); /* need to wait 2 secs again */
505 
506 		pcidata  = 0;
507 		pci_read_config_dword(instance->pdev,
508 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
509 		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
510 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
511 			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
512 			pcidata = 0;
513 			pci_write_config_dword(instance->pdev,
514 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
515 		}
516 	}
517 	return 0;
518 }
519 
520 /**
521  * megasas_check_reset_xscale -	For controller reset check
522  * @regs:				MFI register set
523  */
524 static int
525 megasas_check_reset_xscale(struct megasas_instance *instance,
526 		struct megasas_register_set __iomem *regs)
527 {
528 	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
529 	    (le32_to_cpu(*instance->consumer) ==
530 		MEGASAS_ADPRESET_INPROG_SIGN))
531 		return 1;
532 	return 0;
533 }
534 
535 static struct megasas_instance_template megasas_instance_template_xscale = {
536 
537 	.fire_cmd = megasas_fire_cmd_xscale,
538 	.enable_intr = megasas_enable_intr_xscale,
539 	.disable_intr = megasas_disable_intr_xscale,
540 	.clear_intr = megasas_clear_intr_xscale,
541 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
542 	.adp_reset = megasas_adp_reset_xscale,
543 	.check_reset = megasas_check_reset_xscale,
544 	.service_isr = megasas_isr,
545 	.tasklet = megasas_complete_cmd_dpc,
546 	.init_adapter = megasas_init_adapter_mfi,
547 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
548 	.issue_dcmd = megasas_issue_dcmd,
549 };
550 
551 /**
552 *	This is the end of set of functions & definitions specific
553 *	to xscale (deviceid : 1064R, PERC5) controllers
554 */
555 
556 /**
557 *	The following functions are defined for ppc (deviceid : 0x60)
558 *	controllers
559 */
560 
561 /**
562  * megasas_enable_intr_ppc -	Enables interrupts
563  * @regs:			MFI register set
564  */
565 static inline void
566 megasas_enable_intr_ppc(struct megasas_instance *instance)
567 {
568 	struct megasas_register_set __iomem *regs;
569 
570 	regs = instance->reg_set;
571 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
572 
573 	writel(~0x80000000, &(regs)->outbound_intr_mask);
574 
575 	/* Dummy readl to force pci flush */
576 	readl(&regs->outbound_intr_mask);
577 }
578 
579 /**
580  * megasas_disable_intr_ppc -	Disable interrupt
581  * @regs:			MFI register set
582  */
583 static inline void
584 megasas_disable_intr_ppc(struct megasas_instance *instance)
585 {
586 	struct megasas_register_set __iomem *regs;
587 	u32 mask = 0xFFFFFFFF;
588 
589 	regs = instance->reg_set;
590 	writel(mask, &regs->outbound_intr_mask);
591 	/* Dummy readl to force pci flush */
592 	readl(&regs->outbound_intr_mask);
593 }
594 
595 /**
596  * megasas_read_fw_status_reg_ppc - returns the current FW status value
597  * @regs:			MFI register set
598  */
599 static u32
600 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
601 {
602 	return readl(&(regs)->outbound_scratch_pad);
603 }
604 
605 /**
606  * megasas_clear_interrupt_ppc -	Check & clear interrupt
607  * @regs:				MFI register set
608  */
609 static int
610 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
611 {
612 	u32 status, mfiStatus = 0;
613 
614 	/*
615 	 * Check if it is our interrupt
616 	 */
617 	status = readl(&regs->outbound_intr_status);
618 
619 	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
620 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
621 
622 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
623 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
624 
625 	/*
626 	 * Clear the interrupt by writing back the same value
627 	 */
628 	writel(status, &regs->outbound_doorbell_clear);
629 
630 	/* Dummy readl to force pci flush */
631 	readl(&regs->outbound_doorbell_clear);
632 
633 	return mfiStatus;
634 }
635 
636 /**
637  * megasas_fire_cmd_ppc -	Sends command to the FW
638  * @frame_phys_addr :		Physical address of cmd
639  * @frame_count :		Number of frames for the command
640  * @regs :			MFI register set
641  */
642 static inline void
643 megasas_fire_cmd_ppc(struct megasas_instance *instance,
644 		dma_addr_t frame_phys_addr,
645 		u32 frame_count,
646 		struct megasas_register_set __iomem *regs)
647 {
648 	unsigned long flags;
649 
650 	spin_lock_irqsave(&instance->hba_lock, flags);
651 	writel((frame_phys_addr | (frame_count<<1))|1,
652 			&(regs)->inbound_queue_port);
653 	spin_unlock_irqrestore(&instance->hba_lock, flags);
654 }
655 
656 /**
657  * megasas_check_reset_ppc -	For controller reset check
658  * @regs:				MFI register set
659  */
660 static int
661 megasas_check_reset_ppc(struct megasas_instance *instance,
662 			struct megasas_register_set __iomem *regs)
663 {
664 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
665 		return 1;
666 
667 	return 0;
668 }
669 
670 static struct megasas_instance_template megasas_instance_template_ppc = {
671 
672 	.fire_cmd = megasas_fire_cmd_ppc,
673 	.enable_intr = megasas_enable_intr_ppc,
674 	.disable_intr = megasas_disable_intr_ppc,
675 	.clear_intr = megasas_clear_intr_ppc,
676 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
677 	.adp_reset = megasas_adp_reset_xscale,
678 	.check_reset = megasas_check_reset_ppc,
679 	.service_isr = megasas_isr,
680 	.tasklet = megasas_complete_cmd_dpc,
681 	.init_adapter = megasas_init_adapter_mfi,
682 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
683 	.issue_dcmd = megasas_issue_dcmd,
684 };
685 
686 /**
687  * megasas_enable_intr_skinny -	Enables interrupts
688  * @regs:			MFI register set
689  */
690 static inline void
691 megasas_enable_intr_skinny(struct megasas_instance *instance)
692 {
693 	struct megasas_register_set __iomem *regs;
694 
695 	regs = instance->reg_set;
696 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
697 
698 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
699 
700 	/* Dummy readl to force pci flush */
701 	readl(&regs->outbound_intr_mask);
702 }
703 
704 /**
705  * megasas_disable_intr_skinny -	Disables interrupt
706  * @regs:			MFI register set
707  */
708 static inline void
709 megasas_disable_intr_skinny(struct megasas_instance *instance)
710 {
711 	struct megasas_register_set __iomem *regs;
712 	u32 mask = 0xFFFFFFFF;
713 
714 	regs = instance->reg_set;
715 	writel(mask, &regs->outbound_intr_mask);
716 	/* Dummy readl to force pci flush */
717 	readl(&regs->outbound_intr_mask);
718 }
719 
720 /**
721  * megasas_read_fw_status_reg_skinny - returns the current FW status value
722  * @regs:			MFI register set
723  */
724 static u32
725 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
726 {
727 	return readl(&(regs)->outbound_scratch_pad);
728 }
729 
730 /**
731  * megasas_clear_interrupt_skinny -	Check & clear interrupt
732  * @regs:				MFI register set
733  */
734 static int
735 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
736 {
737 	u32 status;
738 	u32 mfiStatus = 0;
739 
740 	/*
741 	 * Check if it is our interrupt
742 	 */
743 	status = readl(&regs->outbound_intr_status);
744 
745 	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
746 		return 0;
747 	}
748 
749 	/*
750 	 * Check if it is our interrupt
751 	 */
752 	if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
753 	    MFI_STATE_FAULT) {
754 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
755 	} else
756 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
757 
758 	/*
759 	 * Clear the interrupt by writing back the same value
760 	 */
761 	writel(status, &regs->outbound_intr_status);
762 
763 	/*
764 	 * dummy read to flush PCI
765 	 */
766 	readl(&regs->outbound_intr_status);
767 
768 	return mfiStatus;
769 }
770 
771 /**
772  * megasas_fire_cmd_skinny -	Sends command to the FW
773  * @frame_phys_addr :		Physical address of cmd
774  * @frame_count :		Number of frames for the command
775  * @regs :			MFI register set
776  */
777 static inline void
778 megasas_fire_cmd_skinny(struct megasas_instance *instance,
779 			dma_addr_t frame_phys_addr,
780 			u32 frame_count,
781 			struct megasas_register_set __iomem *regs)
782 {
783 	unsigned long flags;
784 
785 	spin_lock_irqsave(&instance->hba_lock, flags);
786 	writel(upper_32_bits(frame_phys_addr),
787 	       &(regs)->inbound_high_queue_port);
788 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
789 	       &(regs)->inbound_low_queue_port);
790 	mmiowb();
791 	spin_unlock_irqrestore(&instance->hba_lock, flags);
792 }
793 
794 /**
795  * megasas_check_reset_skinny -	For controller reset check
796  * @regs:				MFI register set
797  */
798 static int
799 megasas_check_reset_skinny(struct megasas_instance *instance,
800 				struct megasas_register_set __iomem *regs)
801 {
802 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
803 		return 1;
804 
805 	return 0;
806 }
807 
808 static struct megasas_instance_template megasas_instance_template_skinny = {
809 
810 	.fire_cmd = megasas_fire_cmd_skinny,
811 	.enable_intr = megasas_enable_intr_skinny,
812 	.disable_intr = megasas_disable_intr_skinny,
813 	.clear_intr = megasas_clear_intr_skinny,
814 	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
815 	.adp_reset = megasas_adp_reset_gen2,
816 	.check_reset = megasas_check_reset_skinny,
817 	.service_isr = megasas_isr,
818 	.tasklet = megasas_complete_cmd_dpc,
819 	.init_adapter = megasas_init_adapter_mfi,
820 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
821 	.issue_dcmd = megasas_issue_dcmd,
822 };
823 
824 
825 /**
826 *	The following functions are defined for gen2 (deviceid : 0x78 0x79)
827 *	controllers
828 */
829 
830 /**
831  * megasas_enable_intr_gen2 -  Enables interrupts
832  * @regs:                      MFI register set
833  */
834 static inline void
835 megasas_enable_intr_gen2(struct megasas_instance *instance)
836 {
837 	struct megasas_register_set __iomem *regs;
838 
839 	regs = instance->reg_set;
840 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
841 
842 	/* write ~0x00000005 (4 & 1) to the intr mask*/
843 	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
844 
845 	/* Dummy readl to force pci flush */
846 	readl(&regs->outbound_intr_mask);
847 }
848 
849 /**
850  * megasas_disable_intr_gen2 - Disables interrupt
851  * @regs:                      MFI register set
852  */
853 static inline void
854 megasas_disable_intr_gen2(struct megasas_instance *instance)
855 {
856 	struct megasas_register_set __iomem *regs;
857 	u32 mask = 0xFFFFFFFF;
858 
859 	regs = instance->reg_set;
860 	writel(mask, &regs->outbound_intr_mask);
861 	/* Dummy readl to force pci flush */
862 	readl(&regs->outbound_intr_mask);
863 }
864 
865 /**
866  * megasas_read_fw_status_reg_gen2 - returns the current FW status value
867  * @regs:                      MFI register set
868  */
869 static u32
870 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
871 {
872 	return readl(&(regs)->outbound_scratch_pad);
873 }
874 
875 /**
876  * megasas_clear_interrupt_gen2 -      Check & clear interrupt
877  * @regs:                              MFI register set
878  */
879 static int
880 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
881 {
882 	u32 status;
883 	u32 mfiStatus = 0;
884 
885 	/*
886 	 * Check if it is our interrupt
887 	 */
888 	status = readl(&regs->outbound_intr_status);
889 
890 	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
891 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
892 	}
893 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
894 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
895 	}
896 
897 	/*
898 	 * Clear the interrupt by writing back the same value
899 	 */
900 	if (mfiStatus)
901 		writel(status, &regs->outbound_doorbell_clear);
902 
903 	/* Dummy readl to force pci flush */
904 	readl(&regs->outbound_intr_status);
905 
906 	return mfiStatus;
907 }
908 /**
909  * megasas_fire_cmd_gen2 -     Sends command to the FW
910  * @frame_phys_addr :          Physical address of cmd
911  * @frame_count :              Number of frames for the command
912  * @regs :                     MFI register set
913  */
914 static inline void
915 megasas_fire_cmd_gen2(struct megasas_instance *instance,
916 			dma_addr_t frame_phys_addr,
917 			u32 frame_count,
918 			struct megasas_register_set __iomem *regs)
919 {
920 	unsigned long flags;
921 
922 	spin_lock_irqsave(&instance->hba_lock, flags);
923 	writel((frame_phys_addr | (frame_count<<1))|1,
924 			&(regs)->inbound_queue_port);
925 	spin_unlock_irqrestore(&instance->hba_lock, flags);
926 }
927 
928 /**
929  * megasas_adp_reset_gen2 -	For controller reset
930  * @regs:				MFI register set
931  */
932 static int
933 megasas_adp_reset_gen2(struct megasas_instance *instance,
934 			struct megasas_register_set __iomem *reg_set)
935 {
936 	u32 retry = 0 ;
937 	u32 HostDiag;
938 	u32 __iomem *seq_offset = &reg_set->seq_offset;
939 	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
940 
941 	if (instance->instancet == &megasas_instance_template_skinny) {
942 		seq_offset = &reg_set->fusion_seq_offset;
943 		hostdiag_offset = &reg_set->fusion_host_diag;
944 	}
945 
946 	writel(0, seq_offset);
947 	writel(4, seq_offset);
948 	writel(0xb, seq_offset);
949 	writel(2, seq_offset);
950 	writel(7, seq_offset);
951 	writel(0xd, seq_offset);
952 
953 	msleep(1000);
954 
955 	HostDiag = (u32)readl(hostdiag_offset);
956 
957 	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
958 		msleep(100);
959 		HostDiag = (u32)readl(hostdiag_offset);
960 		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
961 					retry, HostDiag);
962 
963 		if (retry++ >= 100)
964 			return 1;
965 
966 	}
967 
968 	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
969 
970 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
971 
972 	ssleep(10);
973 
974 	HostDiag = (u32)readl(hostdiag_offset);
975 	while (HostDiag & DIAG_RESET_ADAPTER) {
976 		msleep(100);
977 		HostDiag = (u32)readl(hostdiag_offset);
978 		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
979 				retry, HostDiag);
980 
981 		if (retry++ >= 1000)
982 			return 1;
983 
984 	}
985 	return 0;
986 }
987 
988 /**
989  * megasas_check_reset_gen2 -	For controller reset check
990  * @regs:				MFI register set
991  */
992 static int
993 megasas_check_reset_gen2(struct megasas_instance *instance,
994 		struct megasas_register_set __iomem *regs)
995 {
996 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
997 		return 1;
998 
999 	return 0;
1000 }
1001 
1002 static struct megasas_instance_template megasas_instance_template_gen2 = {
1003 
1004 	.fire_cmd = megasas_fire_cmd_gen2,
1005 	.enable_intr = megasas_enable_intr_gen2,
1006 	.disable_intr = megasas_disable_intr_gen2,
1007 	.clear_intr = megasas_clear_intr_gen2,
1008 	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1009 	.adp_reset = megasas_adp_reset_gen2,
1010 	.check_reset = megasas_check_reset_gen2,
1011 	.service_isr = megasas_isr,
1012 	.tasklet = megasas_complete_cmd_dpc,
1013 	.init_adapter = megasas_init_adapter_mfi,
1014 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1015 	.issue_dcmd = megasas_issue_dcmd,
1016 };
1017 
1018 /**
1019 *	This is the end of set of functions & definitions
1020 *       specific to gen2 (deviceid : 0x78, 0x79) controllers
1021 */
1022 
1023 /*
1024  * Template added for TB (Fusion)
1025  */
1026 extern struct megasas_instance_template megasas_instance_template_fusion;
1027 
1028 /**
1029  * megasas_issue_polled -	Issues a polling command
1030  * @instance:			Adapter soft state
1031  * @cmd:			Command packet to be issued
1032  *
1033  * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1034  */
1035 int
1036 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1037 {
1038 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1039 
1040 	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1041 	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1042 
1043 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1044 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1045 			__func__, __LINE__);
1046 		return DCMD_NOT_FIRED;
1047 	}
1048 
1049 	instance->instancet->issue_dcmd(instance, cmd);
1050 
1051 	return wait_and_poll(instance, cmd, instance->requestorId ?
1052 			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1053 }
1054 
1055 /**
1056  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1057  * @instance:			Adapter soft state
1058  * @cmd:			Command to be issued
1059  * @timeout:			Timeout in seconds
1060  *
1061  * This function waits on an event for the command to be returned from ISR.
1062  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1063  * Used to issue ioctl commands.
1064  */
1065 int
1066 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1067 			  struct megasas_cmd *cmd, int timeout)
1068 {
1069 	int ret = 0;
1070 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1071 
1072 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1073 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1074 			__func__, __LINE__);
1075 		return DCMD_NOT_FIRED;
1076 	}
1077 
1078 	instance->instancet->issue_dcmd(instance, cmd);
1079 
1080 	if (timeout) {
1081 		ret = wait_event_timeout(instance->int_cmd_wait_q,
1082 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1083 		if (!ret) {
1084 			dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
1085 				__func__, __LINE__);
1086 			return DCMD_TIMEOUT;
1087 		}
1088 	} else
1089 		wait_event(instance->int_cmd_wait_q,
1090 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1091 
1092 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1093 		DCMD_SUCCESS : DCMD_FAILED;
1094 }
1095 
1096 /**
1097  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1098  * @instance:				Adapter soft state
1099  * @cmd_to_abort:			Previously issued cmd to be aborted
1100  * @timeout:				Timeout in seconds
1101  *
1102  * MFI firmware can abort previously issued AEN comamnd (automatic event
1103  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1104  * cmd and waits for return status.
1105  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1106  */
1107 static int
1108 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1109 				struct megasas_cmd *cmd_to_abort, int timeout)
1110 {
1111 	struct megasas_cmd *cmd;
1112 	struct megasas_abort_frame *abort_fr;
1113 	int ret = 0;
1114 
1115 	cmd = megasas_get_cmd(instance);
1116 
1117 	if (!cmd)
1118 		return -1;
1119 
1120 	abort_fr = &cmd->frame->abort;
1121 
1122 	/*
1123 	 * Prepare and issue the abort frame
1124 	 */
1125 	abort_fr->cmd = MFI_CMD_ABORT;
1126 	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1127 	abort_fr->flags = cpu_to_le16(0);
1128 	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1129 	abort_fr->abort_mfi_phys_addr_lo =
1130 		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1131 	abort_fr->abort_mfi_phys_addr_hi =
1132 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1133 
1134 	cmd->sync_cmd = 1;
1135 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1136 
1137 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1138 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1139 			__func__, __LINE__);
1140 		return DCMD_NOT_FIRED;
1141 	}
1142 
1143 	instance->instancet->issue_dcmd(instance, cmd);
1144 
1145 	if (timeout) {
1146 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1147 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1148 		if (!ret) {
1149 			dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
1150 				__func__, __LINE__);
1151 			return DCMD_TIMEOUT;
1152 		}
1153 	} else
1154 		wait_event(instance->abort_cmd_wait_q,
1155 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1156 
1157 	cmd->sync_cmd = 0;
1158 
1159 	megasas_return_cmd(instance, cmd);
1160 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1161 		DCMD_SUCCESS : DCMD_FAILED;
1162 }
1163 
1164 /**
1165  * megasas_make_sgl32 -	Prepares 32-bit SGL
1166  * @instance:		Adapter soft state
1167  * @scp:		SCSI command from the mid-layer
1168  * @mfi_sgl:		SGL to be filled in
1169  *
1170  * If successful, this function returns the number of SG elements. Otherwise,
1171  * it returnes -1.
1172  */
1173 static int
1174 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1175 		   union megasas_sgl *mfi_sgl)
1176 {
1177 	int i;
1178 	int sge_count;
1179 	struct scatterlist *os_sgl;
1180 
1181 	sge_count = scsi_dma_map(scp);
1182 	BUG_ON(sge_count < 0);
1183 
1184 	if (sge_count) {
1185 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1186 			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1187 			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1188 		}
1189 	}
1190 	return sge_count;
1191 }
1192 
1193 /**
1194  * megasas_make_sgl64 -	Prepares 64-bit SGL
1195  * @instance:		Adapter soft state
1196  * @scp:		SCSI command from the mid-layer
1197  * @mfi_sgl:		SGL to be filled in
1198  *
1199  * If successful, this function returns the number of SG elements. Otherwise,
1200  * it returnes -1.
1201  */
1202 static int
1203 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1204 		   union megasas_sgl *mfi_sgl)
1205 {
1206 	int i;
1207 	int sge_count;
1208 	struct scatterlist *os_sgl;
1209 
1210 	sge_count = scsi_dma_map(scp);
1211 	BUG_ON(sge_count < 0);
1212 
1213 	if (sge_count) {
1214 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1215 			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1216 			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1217 		}
1218 	}
1219 	return sge_count;
1220 }
1221 
1222 /**
1223  * megasas_make_sgl_skinny - Prepares IEEE SGL
1224  * @instance:           Adapter soft state
1225  * @scp:                SCSI command from the mid-layer
1226  * @mfi_sgl:            SGL to be filled in
1227  *
1228  * If successful, this function returns the number of SG elements. Otherwise,
1229  * it returnes -1.
1230  */
1231 static int
1232 megasas_make_sgl_skinny(struct megasas_instance *instance,
1233 		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1234 {
1235 	int i;
1236 	int sge_count;
1237 	struct scatterlist *os_sgl;
1238 
1239 	sge_count = scsi_dma_map(scp);
1240 
1241 	if (sge_count) {
1242 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1243 			mfi_sgl->sge_skinny[i].length =
1244 				cpu_to_le32(sg_dma_len(os_sgl));
1245 			mfi_sgl->sge_skinny[i].phys_addr =
1246 				cpu_to_le64(sg_dma_address(os_sgl));
1247 			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1248 		}
1249 	}
1250 	return sge_count;
1251 }
1252 
1253  /**
1254  * megasas_get_frame_count - Computes the number of frames
1255  * @frame_type		: type of frame- io or pthru frame
1256  * @sge_count		: number of sg elements
1257  *
1258  * Returns the number of frames required for numnber of sge's (sge_count)
1259  */
1260 
1261 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1262 			u8 sge_count, u8 frame_type)
1263 {
1264 	int num_cnt;
1265 	int sge_bytes;
1266 	u32 sge_sz;
1267 	u32 frame_count = 0;
1268 
1269 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1270 	    sizeof(struct megasas_sge32);
1271 
1272 	if (instance->flag_ieee) {
1273 		sge_sz = sizeof(struct megasas_sge_skinny);
1274 	}
1275 
1276 	/*
1277 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1278 	 * 3 SGEs for 32-bit SGLs for ldio &
1279 	 * 1 SGEs for 64-bit SGLs and
1280 	 * 2 SGEs for 32-bit SGLs for pthru frame
1281 	 */
1282 	if (unlikely(frame_type == PTHRU_FRAME)) {
1283 		if (instance->flag_ieee == 1) {
1284 			num_cnt = sge_count - 1;
1285 		} else if (IS_DMA64)
1286 			num_cnt = sge_count - 1;
1287 		else
1288 			num_cnt = sge_count - 2;
1289 	} else {
1290 		if (instance->flag_ieee == 1) {
1291 			num_cnt = sge_count - 1;
1292 		} else if (IS_DMA64)
1293 			num_cnt = sge_count - 2;
1294 		else
1295 			num_cnt = sge_count - 3;
1296 	}
1297 
1298 	if (num_cnt > 0) {
1299 		sge_bytes = sge_sz * num_cnt;
1300 
1301 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1302 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1303 	}
1304 	/* Main frame */
1305 	frame_count += 1;
1306 
1307 	if (frame_count > 7)
1308 		frame_count = 8;
1309 	return frame_count;
1310 }
1311 
1312 /**
1313  * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1314  * @instance:		Adapter soft state
1315  * @scp:		SCSI command
1316  * @cmd:		Command to be prepared in
1317  *
1318  * This function prepares CDB commands. These are typcially pass-through
1319  * commands to the devices.
1320  */
1321 static int
1322 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1323 		   struct megasas_cmd *cmd)
1324 {
1325 	u32 is_logical;
1326 	u32 device_id;
1327 	u16 flags = 0;
1328 	struct megasas_pthru_frame *pthru;
1329 
1330 	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1331 	device_id = MEGASAS_DEV_INDEX(scp);
1332 	pthru = (struct megasas_pthru_frame *)cmd->frame;
1333 
1334 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1335 		flags = MFI_FRAME_DIR_WRITE;
1336 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1337 		flags = MFI_FRAME_DIR_READ;
1338 	else if (scp->sc_data_direction == PCI_DMA_NONE)
1339 		flags = MFI_FRAME_DIR_NONE;
1340 
1341 	if (instance->flag_ieee == 1) {
1342 		flags |= MFI_FRAME_IEEE;
1343 	}
1344 
1345 	/*
1346 	 * Prepare the DCDB frame
1347 	 */
1348 	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1349 	pthru->cmd_status = 0x0;
1350 	pthru->scsi_status = 0x0;
1351 	pthru->target_id = device_id;
1352 	pthru->lun = scp->device->lun;
1353 	pthru->cdb_len = scp->cmd_len;
1354 	pthru->timeout = 0;
1355 	pthru->pad_0 = 0;
1356 	pthru->flags = cpu_to_le16(flags);
1357 	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1358 
1359 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1360 
1361 	/*
1362 	 * If the command is for the tape device, set the
1363 	 * pthru timeout to the os layer timeout value.
1364 	 */
1365 	if (scp->device->type == TYPE_TAPE) {
1366 		if ((scp->request->timeout / HZ) > 0xFFFF)
1367 			pthru->timeout = cpu_to_le16(0xFFFF);
1368 		else
1369 			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1370 	}
1371 
1372 	/*
1373 	 * Construct SGL
1374 	 */
1375 	if (instance->flag_ieee == 1) {
1376 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1377 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1378 						      &pthru->sgl);
1379 	} else if (IS_DMA64) {
1380 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1381 		pthru->sge_count = megasas_make_sgl64(instance, scp,
1382 						      &pthru->sgl);
1383 	} else
1384 		pthru->sge_count = megasas_make_sgl32(instance, scp,
1385 						      &pthru->sgl);
1386 
1387 	if (pthru->sge_count > instance->max_num_sge) {
1388 		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1389 			pthru->sge_count);
1390 		return 0;
1391 	}
1392 
1393 	/*
1394 	 * Sense info specific
1395 	 */
1396 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1397 	pthru->sense_buf_phys_addr_hi =
1398 		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1399 	pthru->sense_buf_phys_addr_lo =
1400 		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1401 
1402 	/*
1403 	 * Compute the total number of frames this command consumes. FW uses
1404 	 * this number to pull sufficient number of frames from host memory.
1405 	 */
1406 	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1407 							PTHRU_FRAME);
1408 
1409 	return cmd->frame_count;
1410 }
1411 
1412 /**
1413  * megasas_build_ldio -	Prepares IOs to logical devices
1414  * @instance:		Adapter soft state
1415  * @scp:		SCSI command
1416  * @cmd:		Command to be prepared
1417  *
1418  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1419  */
1420 static int
1421 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1422 		   struct megasas_cmd *cmd)
1423 {
1424 	u32 device_id;
1425 	u8 sc = scp->cmnd[0];
1426 	u16 flags = 0;
1427 	struct megasas_io_frame *ldio;
1428 
1429 	device_id = MEGASAS_DEV_INDEX(scp);
1430 	ldio = (struct megasas_io_frame *)cmd->frame;
1431 
1432 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1433 		flags = MFI_FRAME_DIR_WRITE;
1434 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1435 		flags = MFI_FRAME_DIR_READ;
1436 
1437 	if (instance->flag_ieee == 1) {
1438 		flags |= MFI_FRAME_IEEE;
1439 	}
1440 
1441 	/*
1442 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1443 	 */
1444 	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1445 	ldio->cmd_status = 0x0;
1446 	ldio->scsi_status = 0x0;
1447 	ldio->target_id = device_id;
1448 	ldio->timeout = 0;
1449 	ldio->reserved_0 = 0;
1450 	ldio->pad_0 = 0;
1451 	ldio->flags = cpu_to_le16(flags);
1452 	ldio->start_lba_hi = 0;
1453 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1454 
1455 	/*
1456 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1457 	 */
1458 	if (scp->cmd_len == 6) {
1459 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1460 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1461 						 ((u32) scp->cmnd[2] << 8) |
1462 						 (u32) scp->cmnd[3]);
1463 
1464 		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1465 	}
1466 
1467 	/*
1468 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1469 	 */
1470 	else if (scp->cmd_len == 10) {
1471 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1472 					      ((u32) scp->cmnd[7] << 8));
1473 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1474 						 ((u32) scp->cmnd[3] << 16) |
1475 						 ((u32) scp->cmnd[4] << 8) |
1476 						 (u32) scp->cmnd[5]);
1477 	}
1478 
1479 	/*
1480 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1481 	 */
1482 	else if (scp->cmd_len == 12) {
1483 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1484 					      ((u32) scp->cmnd[7] << 16) |
1485 					      ((u32) scp->cmnd[8] << 8) |
1486 					      (u32) scp->cmnd[9]);
1487 
1488 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1489 						 ((u32) scp->cmnd[3] << 16) |
1490 						 ((u32) scp->cmnd[4] << 8) |
1491 						 (u32) scp->cmnd[5]);
1492 	}
1493 
1494 	/*
1495 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1496 	 */
1497 	else if (scp->cmd_len == 16) {
1498 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1499 					      ((u32) scp->cmnd[11] << 16) |
1500 					      ((u32) scp->cmnd[12] << 8) |
1501 					      (u32) scp->cmnd[13]);
1502 
1503 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1504 						 ((u32) scp->cmnd[7] << 16) |
1505 						 ((u32) scp->cmnd[8] << 8) |
1506 						 (u32) scp->cmnd[9]);
1507 
1508 		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1509 						 ((u32) scp->cmnd[3] << 16) |
1510 						 ((u32) scp->cmnd[4] << 8) |
1511 						 (u32) scp->cmnd[5]);
1512 
1513 	}
1514 
1515 	/*
1516 	 * Construct SGL
1517 	 */
1518 	if (instance->flag_ieee) {
1519 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1520 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1521 					      &ldio->sgl);
1522 	} else if (IS_DMA64) {
1523 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1524 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1525 	} else
1526 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1527 
1528 	if (ldio->sge_count > instance->max_num_sge) {
1529 		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1530 			ldio->sge_count);
1531 		return 0;
1532 	}
1533 
1534 	/*
1535 	 * Sense info specific
1536 	 */
1537 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1538 	ldio->sense_buf_phys_addr_hi = 0;
1539 	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1540 
1541 	/*
1542 	 * Compute the total number of frames this command consumes. FW uses
1543 	 * this number to pull sufficient number of frames from host memory.
1544 	 */
1545 	cmd->frame_count = megasas_get_frame_count(instance,
1546 			ldio->sge_count, IO_FRAME);
1547 
1548 	return cmd->frame_count;
1549 }
1550 
1551 /**
1552  * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1553  *				and whether it's RW or non RW
1554  * @scmd:			SCSI command
1555  *
1556  */
1557 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1558 {
1559 	int ret;
1560 
1561 	switch (cmd->cmnd[0]) {
1562 	case READ_10:
1563 	case WRITE_10:
1564 	case READ_12:
1565 	case WRITE_12:
1566 	case READ_6:
1567 	case WRITE_6:
1568 	case READ_16:
1569 	case WRITE_16:
1570 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1571 			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1572 		break;
1573 	default:
1574 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1575 			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1576 	}
1577 	return ret;
1578 }
1579 
1580  /**
1581  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1582  *					in FW
1583  * @instance:				Adapter soft state
1584  */
1585 static inline void
1586 megasas_dump_pending_frames(struct megasas_instance *instance)
1587 {
1588 	struct megasas_cmd *cmd;
1589 	int i,n;
1590 	union megasas_sgl *mfi_sgl;
1591 	struct megasas_io_frame *ldio;
1592 	struct megasas_pthru_frame *pthru;
1593 	u32 sgcount;
1594 	u16 max_cmd = instance->max_fw_cmds;
1595 
1596 	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1597 	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1598 	if (IS_DMA64)
1599 		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1600 	else
1601 		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1602 
1603 	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1604 	for (i = 0; i < max_cmd; i++) {
1605 		cmd = instance->cmd_list[i];
1606 		if (!cmd->scmd)
1607 			continue;
1608 		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1609 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1610 			ldio = (struct megasas_io_frame *)cmd->frame;
1611 			mfi_sgl = &ldio->sgl;
1612 			sgcount = ldio->sge_count;
1613 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1614 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1615 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1616 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1617 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1618 		} else {
1619 			pthru = (struct megasas_pthru_frame *) cmd->frame;
1620 			mfi_sgl = &pthru->sgl;
1621 			sgcount = pthru->sge_count;
1622 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1623 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1624 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1625 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1626 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1627 		}
1628 		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1629 			for (n = 0; n < sgcount; n++) {
1630 				if (IS_DMA64)
1631 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1632 						le32_to_cpu(mfi_sgl->sge64[n].length),
1633 						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1634 				else
1635 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1636 						le32_to_cpu(mfi_sgl->sge32[n].length),
1637 						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1638 			}
1639 		}
1640 	} /*for max_cmd*/
1641 	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1642 	for (i = 0; i < max_cmd; i++) {
1643 
1644 		cmd = instance->cmd_list[i];
1645 
1646 		if (cmd->sync_cmd == 1)
1647 			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1648 	}
1649 	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1650 }
1651 
1652 u32
1653 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1654 			    struct scsi_cmnd *scmd)
1655 {
1656 	struct megasas_cmd *cmd;
1657 	u32 frame_count;
1658 
1659 	cmd = megasas_get_cmd(instance);
1660 	if (!cmd)
1661 		return SCSI_MLQUEUE_HOST_BUSY;
1662 
1663 	/*
1664 	 * Logical drive command
1665 	 */
1666 	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1667 		frame_count = megasas_build_ldio(instance, scmd, cmd);
1668 	else
1669 		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1670 
1671 	if (!frame_count)
1672 		goto out_return_cmd;
1673 
1674 	cmd->scmd = scmd;
1675 	scmd->SCp.ptr = (char *)cmd;
1676 
1677 	/*
1678 	 * Issue the command to the FW
1679 	 */
1680 	atomic_inc(&instance->fw_outstanding);
1681 
1682 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1683 				cmd->frame_count-1, instance->reg_set);
1684 
1685 	return 0;
1686 out_return_cmd:
1687 	megasas_return_cmd(instance, cmd);
1688 	return SCSI_MLQUEUE_HOST_BUSY;
1689 }
1690 
1691 
1692 /**
1693  * megasas_queue_command -	Queue entry point
1694  * @scmd:			SCSI command to be queued
1695  * @done:			Callback entry point
1696  */
1697 static int
1698 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1699 {
1700 	struct megasas_instance *instance;
1701 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1702 
1703 	instance = (struct megasas_instance *)
1704 	    scmd->device->host->hostdata;
1705 
1706 	if (instance->unload == 1) {
1707 		scmd->result = DID_NO_CONNECT << 16;
1708 		scmd->scsi_done(scmd);
1709 		return 0;
1710 	}
1711 
1712 	if (instance->issuepend_done == 0)
1713 		return SCSI_MLQUEUE_HOST_BUSY;
1714 
1715 
1716 	/* Check for an mpio path and adjust behavior */
1717 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1718 		if (megasas_check_mpio_paths(instance, scmd) ==
1719 		    (DID_REQUEUE << 16)) {
1720 			return SCSI_MLQUEUE_HOST_BUSY;
1721 		} else {
1722 			scmd->result = DID_NO_CONNECT << 16;
1723 			scmd->scsi_done(scmd);
1724 			return 0;
1725 		}
1726 	}
1727 
1728 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1729 		scmd->result = DID_NO_CONNECT << 16;
1730 		scmd->scsi_done(scmd);
1731 		return 0;
1732 	}
1733 
1734 	mr_device_priv_data = scmd->device->hostdata;
1735 	if (!mr_device_priv_data) {
1736 		scmd->result = DID_NO_CONNECT << 16;
1737 		scmd->scsi_done(scmd);
1738 		return 0;
1739 	}
1740 
1741 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1742 		return SCSI_MLQUEUE_HOST_BUSY;
1743 
1744 	if (mr_device_priv_data->tm_busy)
1745 		return SCSI_MLQUEUE_DEVICE_BUSY;
1746 
1747 
1748 	scmd->result = 0;
1749 
1750 	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1751 	    (scmd->device->id >= instance->fw_supported_vd_count ||
1752 		scmd->device->lun)) {
1753 		scmd->result = DID_BAD_TARGET << 16;
1754 		goto out_done;
1755 	}
1756 
1757 	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1758 	    MEGASAS_IS_LOGICAL(scmd->device) &&
1759 	    (!instance->fw_sync_cache_support)) {
1760 		scmd->result = DID_OK << 16;
1761 		goto out_done;
1762 	}
1763 
1764 	return instance->instancet->build_and_issue_cmd(instance, scmd);
1765 
1766  out_done:
1767 	scmd->scsi_done(scmd);
1768 	return 0;
1769 }
1770 
1771 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1772 {
1773 	int i;
1774 
1775 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1776 
1777 		if ((megasas_mgmt_info.instance[i]) &&
1778 		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1779 			return megasas_mgmt_info.instance[i];
1780 	}
1781 
1782 	return NULL;
1783 }
1784 
1785 /*
1786 * megasas_set_dynamic_target_properties -
1787 * Device property set by driver may not be static and it is required to be
1788 * updated after OCR
1789 *
1790 * set tm_capable.
1791 * set dma alignment (only for eedp protection enable vd).
1792 *
1793 * @sdev: OS provided scsi device
1794 *
1795 * Returns void
1796 */
1797 void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
1798 {
1799 	u16 pd_index = 0, ld;
1800 	u32 device_id;
1801 	struct megasas_instance *instance;
1802 	struct fusion_context *fusion;
1803 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1804 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1805 	struct MR_LD_RAID *raid;
1806 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1807 
1808 	instance = megasas_lookup_instance(sdev->host->host_no);
1809 	fusion = instance->ctrl_context;
1810 	mr_device_priv_data = sdev->hostdata;
1811 
1812 	if (!fusion || !mr_device_priv_data)
1813 		return;
1814 
1815 	if (MEGASAS_IS_LOGICAL(sdev)) {
1816 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1817 					+ sdev->id;
1818 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1819 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1820 		if (ld >= instance->fw_supported_vd_count)
1821 			return;
1822 		raid = MR_LdRaidGet(ld, local_map_ptr);
1823 
1824 		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1825 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1826 
1827 		mr_device_priv_data->is_tm_capable =
1828 			raid->capability.tmCapable;
1829 	} else if (instance->use_seqnum_jbod_fp) {
1830 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1831 			sdev->id;
1832 		pd_sync = (void *)fusion->pd_seq_sync
1833 				[(instance->pd_seq_map_id - 1) & 1];
1834 		mr_device_priv_data->is_tm_capable =
1835 			pd_sync->seq[pd_index].capability.tmCapable;
1836 	}
1837 }
1838 
1839 /*
1840  * megasas_set_nvme_device_properties -
1841  * set nomerges=2
1842  * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1843  * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1844  *
1845  * MR firmware provides value in KB. Caller of this function converts
1846  * kb into bytes.
1847  *
1848  * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1849  * MR firmware provides value 128 as (32 * 4K) = 128K.
1850  *
1851  * @sdev:				scsi device
1852  * @max_io_size:				maximum io transfer size
1853  *
1854  */
1855 static inline void
1856 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1857 {
1858 	struct megasas_instance *instance;
1859 	u32 mr_nvme_pg_size;
1860 
1861 	instance = (struct megasas_instance *)sdev->host->hostdata;
1862 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1863 				MR_DEFAULT_NVME_PAGE_SIZE);
1864 
1865 	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1866 
1867 	queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1868 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1869 }
1870 
1871 
1872 /*
1873  * megasas_set_static_target_properties -
1874  * Device property set by driver are static and it is not required to be
1875  * updated after OCR.
1876  *
1877  * set io timeout
1878  * set device queue depth
1879  * set nvme device properties. see - megasas_set_nvme_device_properties
1880  *
1881  * @sdev:				scsi device
1882  * @is_target_prop			true, if fw provided target properties.
1883  */
1884 static void megasas_set_static_target_properties(struct scsi_device *sdev,
1885 						 bool is_target_prop)
1886 {
1887 	u16	target_index = 0;
1888 	u8 interface_type;
1889 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1890 	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1891 	u32 tgt_device_qd;
1892 	struct megasas_instance *instance;
1893 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1894 
1895 	instance = megasas_lookup_instance(sdev->host->host_no);
1896 	mr_device_priv_data = sdev->hostdata;
1897 	interface_type  = mr_device_priv_data->interface_type;
1898 
1899 	/*
1900 	 * The RAID firmware may require extended timeouts.
1901 	 */
1902 	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1903 
1904 	target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1905 
1906 	switch (interface_type) {
1907 	case SAS_PD:
1908 		device_qd = MEGASAS_SAS_QD;
1909 		break;
1910 	case SATA_PD:
1911 		device_qd = MEGASAS_SATA_QD;
1912 		break;
1913 	case NVME_PD:
1914 		device_qd = MEGASAS_NVME_QD;
1915 		break;
1916 	}
1917 
1918 	if (is_target_prop) {
1919 		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1920 		if (tgt_device_qd &&
1921 		    (tgt_device_qd <= instance->host->can_queue))
1922 			device_qd = tgt_device_qd;
1923 
1924 		/* max_io_size_kb will be set to non zero for
1925 		 * nvme based vd and syspd.
1926 		 */
1927 		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1928 	}
1929 
1930 	if (instance->nvme_page_size && max_io_size_kb)
1931 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
1932 
1933 	scsi_change_queue_depth(sdev, device_qd);
1934 
1935 }
1936 
1937 
1938 static int megasas_slave_configure(struct scsi_device *sdev)
1939 {
1940 	u16 pd_index = 0;
1941 	struct megasas_instance *instance;
1942 	int ret_target_prop = DCMD_FAILED;
1943 	bool is_target_prop = false;
1944 
1945 	instance = megasas_lookup_instance(sdev->host->host_no);
1946 	if (instance->pd_list_not_supported) {
1947 		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
1948 			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1949 				sdev->id;
1950 			if (instance->pd_list[pd_index].driveState !=
1951 				MR_PD_STATE_SYSTEM)
1952 				return -ENXIO;
1953 		}
1954 	}
1955 
1956 	mutex_lock(&instance->reset_mutex);
1957 	/* Send DCMD to Firmware and cache the information */
1958 	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
1959 		megasas_get_pd_info(instance, sdev);
1960 
1961 	/* Some ventura firmware may not have instance->nvme_page_size set.
1962 	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
1963 	 */
1964 	if ((instance->tgt_prop) && (instance->nvme_page_size))
1965 		ret_target_prop = megasas_get_target_prop(instance, sdev);
1966 
1967 	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
1968 	megasas_set_static_target_properties(sdev, is_target_prop);
1969 
1970 	mutex_unlock(&instance->reset_mutex);
1971 
1972 	/* This sdev property may change post OCR */
1973 	megasas_set_dynamic_target_properties(sdev);
1974 
1975 	return 0;
1976 }
1977 
1978 static int megasas_slave_alloc(struct scsi_device *sdev)
1979 {
1980 	u16 pd_index = 0;
1981 	struct megasas_instance *instance ;
1982 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1983 
1984 	instance = megasas_lookup_instance(sdev->host->host_no);
1985 	if (!MEGASAS_IS_LOGICAL(sdev)) {
1986 		/*
1987 		 * Open the OS scan to the SYSTEM PD
1988 		 */
1989 		pd_index =
1990 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1991 			sdev->id;
1992 		if ((instance->pd_list_not_supported ||
1993 			instance->pd_list[pd_index].driveState ==
1994 			MR_PD_STATE_SYSTEM)) {
1995 			goto scan_target;
1996 		}
1997 		return -ENXIO;
1998 	}
1999 
2000 scan_target:
2001 	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2002 					GFP_KERNEL);
2003 	if (!mr_device_priv_data)
2004 		return -ENOMEM;
2005 	sdev->hostdata = mr_device_priv_data;
2006 
2007 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2008 		   instance->r1_ldio_hint_default);
2009 	return 0;
2010 }
2011 
2012 static void megasas_slave_destroy(struct scsi_device *sdev)
2013 {
2014 	kfree(sdev->hostdata);
2015 	sdev->hostdata = NULL;
2016 }
2017 
2018 /*
2019 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2020 *                                       kill adapter
2021 * @instance:				Adapter soft state
2022 *
2023 */
2024 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2025 {
2026 	int i;
2027 	struct megasas_cmd *cmd_mfi;
2028 	struct megasas_cmd_fusion *cmd_fusion;
2029 	struct fusion_context *fusion = instance->ctrl_context;
2030 
2031 	/* Find all outstanding ioctls */
2032 	if (fusion) {
2033 		for (i = 0; i < instance->max_fw_cmds; i++) {
2034 			cmd_fusion = fusion->cmd_list[i];
2035 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2036 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2037 				if (cmd_mfi->sync_cmd &&
2038 				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2039 					cmd_mfi->frame->hdr.cmd_status =
2040 							MFI_STAT_WRONG_STATE;
2041 					megasas_complete_cmd(instance,
2042 							     cmd_mfi, DID_OK);
2043 				}
2044 			}
2045 		}
2046 	} else {
2047 		for (i = 0; i < instance->max_fw_cmds; i++) {
2048 			cmd_mfi = instance->cmd_list[i];
2049 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2050 				MFI_CMD_ABORT)
2051 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2052 		}
2053 	}
2054 }
2055 
2056 
2057 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2058 {
2059 	/* Set critical error to block I/O & ioctls in case caller didn't */
2060 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2061 	/* Wait 1 second to ensure IO or ioctls in build have posted */
2062 	msleep(1000);
2063 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2064 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2065 		(instance->adapter_type != MFI_SERIES)) {
2066 		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2067 		/* Flush */
2068 		readl(&instance->reg_set->doorbell);
2069 		if (instance->requestorId && instance->peerIsPresent)
2070 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2071 	} else {
2072 		writel(MFI_STOP_ADP,
2073 			&instance->reg_set->inbound_doorbell);
2074 	}
2075 	/* Complete outstanding ioctls when adapter is killed */
2076 	megasas_complete_outstanding_ioctls(instance);
2077 }
2078 
2079  /**
2080   * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2081   *					restored to max value
2082   * @instance:			Adapter soft state
2083   *
2084   */
2085 void
2086 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2087 {
2088 	unsigned long flags;
2089 
2090 	if (instance->flag & MEGASAS_FW_BUSY
2091 	    && time_after(jiffies, instance->last_time + 5 * HZ)
2092 	    && atomic_read(&instance->fw_outstanding) <
2093 	    instance->throttlequeuedepth + 1) {
2094 
2095 		spin_lock_irqsave(instance->host->host_lock, flags);
2096 		instance->flag &= ~MEGASAS_FW_BUSY;
2097 
2098 		instance->host->can_queue = instance->cur_can_queue;
2099 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2100 	}
2101 }
2102 
2103 /**
2104  * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2105  * @instance_addr:			Address of adapter soft state
2106  *
2107  * Tasklet to complete cmds
2108  */
2109 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2110 {
2111 	u32 producer;
2112 	u32 consumer;
2113 	u32 context;
2114 	struct megasas_cmd *cmd;
2115 	struct megasas_instance *instance =
2116 				(struct megasas_instance *)instance_addr;
2117 	unsigned long flags;
2118 
2119 	/* If we have already declared adapter dead, donot complete cmds */
2120 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2121 		return;
2122 
2123 	spin_lock_irqsave(&instance->completion_lock, flags);
2124 
2125 	producer = le32_to_cpu(*instance->producer);
2126 	consumer = le32_to_cpu(*instance->consumer);
2127 
2128 	while (consumer != producer) {
2129 		context = le32_to_cpu(instance->reply_queue[consumer]);
2130 		if (context >= instance->max_fw_cmds) {
2131 			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2132 				context);
2133 			BUG();
2134 		}
2135 
2136 		cmd = instance->cmd_list[context];
2137 
2138 		megasas_complete_cmd(instance, cmd, DID_OK);
2139 
2140 		consumer++;
2141 		if (consumer == (instance->max_fw_cmds + 1)) {
2142 			consumer = 0;
2143 		}
2144 	}
2145 
2146 	*instance->consumer = cpu_to_le32(producer);
2147 
2148 	spin_unlock_irqrestore(&instance->completion_lock, flags);
2149 
2150 	/*
2151 	 * Check if we can restore can_queue
2152 	 */
2153 	megasas_check_and_restore_queue_depth(instance);
2154 }
2155 
2156 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2157 
2158 /**
2159  * megasas_start_timer - Initializes sriov heartbeat timer object
2160  * @instance:		Adapter soft state
2161  *
2162  */
2163 void megasas_start_timer(struct megasas_instance *instance)
2164 {
2165 	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2166 
2167 	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2168 	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2169 	add_timer(timer);
2170 }
2171 
2172 static void
2173 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2174 
2175 static void
2176 process_fw_state_change_wq(struct work_struct *work);
2177 
2178 void megasas_do_ocr(struct megasas_instance *instance)
2179 {
2180 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2181 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2182 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2183 		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2184 	}
2185 	instance->instancet->disable_intr(instance);
2186 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2187 	instance->issuepend_done = 0;
2188 
2189 	atomic_set(&instance->fw_outstanding, 0);
2190 	megasas_internal_reset_defer_cmds(instance);
2191 	process_fw_state_change_wq(&instance->work_init);
2192 }
2193 
2194 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2195 					    int initial)
2196 {
2197 	struct megasas_cmd *cmd;
2198 	struct megasas_dcmd_frame *dcmd;
2199 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2200 	dma_addr_t new_affiliation_111_h;
2201 	int ld, retval = 0;
2202 	u8 thisVf;
2203 
2204 	cmd = megasas_get_cmd(instance);
2205 
2206 	if (!cmd) {
2207 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2208 		       "Failed to get cmd for scsi%d\n",
2209 			instance->host->host_no);
2210 		return -ENOMEM;
2211 	}
2212 
2213 	dcmd = &cmd->frame->dcmd;
2214 
2215 	if (!instance->vf_affiliation_111) {
2216 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2217 		       "affiliation for scsi%d\n", instance->host->host_no);
2218 		megasas_return_cmd(instance, cmd);
2219 		return -ENOMEM;
2220 	}
2221 
2222 	if (initial)
2223 			memset(instance->vf_affiliation_111, 0,
2224 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2225 	else {
2226 		new_affiliation_111 =
2227 			pci_alloc_consistent(instance->pdev,
2228 					     sizeof(struct MR_LD_VF_AFFILIATION_111),
2229 					     &new_affiliation_111_h);
2230 		if (!new_affiliation_111) {
2231 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2232 			       "memory for new affiliation for scsi%d\n",
2233 			       instance->host->host_no);
2234 			megasas_return_cmd(instance, cmd);
2235 			return -ENOMEM;
2236 		}
2237 		memset(new_affiliation_111, 0,
2238 		       sizeof(struct MR_LD_VF_AFFILIATION_111));
2239 	}
2240 
2241 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2242 
2243 	dcmd->cmd = MFI_CMD_DCMD;
2244 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2245 	dcmd->sge_count = 1;
2246 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2247 	dcmd->timeout = 0;
2248 	dcmd->pad_0 = 0;
2249 	dcmd->data_xfer_len =
2250 		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2251 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2252 
2253 	if (initial)
2254 		dcmd->sgl.sge32[0].phys_addr =
2255 			cpu_to_le32(instance->vf_affiliation_111_h);
2256 	else
2257 		dcmd->sgl.sge32[0].phys_addr =
2258 			cpu_to_le32(new_affiliation_111_h);
2259 
2260 	dcmd->sgl.sge32[0].length = cpu_to_le32(
2261 		sizeof(struct MR_LD_VF_AFFILIATION_111));
2262 
2263 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2264 	       "scsi%d\n", instance->host->host_no);
2265 
2266 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2267 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2268 		       " failed with status 0x%x for scsi%d\n",
2269 		       dcmd->cmd_status, instance->host->host_no);
2270 		retval = 1; /* Do a scan if we couldn't get affiliation */
2271 		goto out;
2272 	}
2273 
2274 	if (!initial) {
2275 		thisVf = new_affiliation_111->thisVf;
2276 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2277 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2278 			    new_affiliation_111->map[ld].policy[thisVf]) {
2279 				dev_warn(&instance->pdev->dev, "SR-IOV: "
2280 				       "Got new LD/VF affiliation for scsi%d\n",
2281 				       instance->host->host_no);
2282 				memcpy(instance->vf_affiliation_111,
2283 				       new_affiliation_111,
2284 				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2285 				retval = 1;
2286 				goto out;
2287 			}
2288 	}
2289 out:
2290 	if (new_affiliation_111) {
2291 		pci_free_consistent(instance->pdev,
2292 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2293 				    new_affiliation_111,
2294 				    new_affiliation_111_h);
2295 	}
2296 
2297 	megasas_return_cmd(instance, cmd);
2298 
2299 	return retval;
2300 }
2301 
2302 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2303 					    int initial)
2304 {
2305 	struct megasas_cmd *cmd;
2306 	struct megasas_dcmd_frame *dcmd;
2307 	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2308 	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2309 	dma_addr_t new_affiliation_h;
2310 	int i, j, retval = 0, found = 0, doscan = 0;
2311 	u8 thisVf;
2312 
2313 	cmd = megasas_get_cmd(instance);
2314 
2315 	if (!cmd) {
2316 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2317 		       "Failed to get cmd for scsi%d\n",
2318 		       instance->host->host_no);
2319 		return -ENOMEM;
2320 	}
2321 
2322 	dcmd = &cmd->frame->dcmd;
2323 
2324 	if (!instance->vf_affiliation) {
2325 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2326 		       "affiliation for scsi%d\n", instance->host->host_no);
2327 		megasas_return_cmd(instance, cmd);
2328 		return -ENOMEM;
2329 	}
2330 
2331 	if (initial)
2332 		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2333 		       sizeof(struct MR_LD_VF_AFFILIATION));
2334 	else {
2335 		new_affiliation =
2336 			pci_alloc_consistent(instance->pdev,
2337 					     (MAX_LOGICAL_DRIVES + 1) *
2338 					     sizeof(struct MR_LD_VF_AFFILIATION),
2339 					     &new_affiliation_h);
2340 		if (!new_affiliation) {
2341 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2342 			       "memory for new affiliation for scsi%d\n",
2343 			       instance->host->host_no);
2344 			megasas_return_cmd(instance, cmd);
2345 			return -ENOMEM;
2346 		}
2347 		memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2348 		       sizeof(struct MR_LD_VF_AFFILIATION));
2349 	}
2350 
2351 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2352 
2353 	dcmd->cmd = MFI_CMD_DCMD;
2354 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2355 	dcmd->sge_count = 1;
2356 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2357 	dcmd->timeout = 0;
2358 	dcmd->pad_0 = 0;
2359 	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2360 		sizeof(struct MR_LD_VF_AFFILIATION));
2361 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2362 
2363 	if (initial)
2364 		dcmd->sgl.sge32[0].phys_addr =
2365 			cpu_to_le32(instance->vf_affiliation_h);
2366 	else
2367 		dcmd->sgl.sge32[0].phys_addr =
2368 			cpu_to_le32(new_affiliation_h);
2369 
2370 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2371 		sizeof(struct MR_LD_VF_AFFILIATION));
2372 
2373 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2374 	       "scsi%d\n", instance->host->host_no);
2375 
2376 
2377 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2378 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2379 		       " failed with status 0x%x for scsi%d\n",
2380 		       dcmd->cmd_status, instance->host->host_no);
2381 		retval = 1; /* Do a scan if we couldn't get affiliation */
2382 		goto out;
2383 	}
2384 
2385 	if (!initial) {
2386 		if (!new_affiliation->ldCount) {
2387 			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2388 			       "affiliation for passive path for scsi%d\n",
2389 			       instance->host->host_no);
2390 			retval = 1;
2391 			goto out;
2392 		}
2393 		newmap = new_affiliation->map;
2394 		savedmap = instance->vf_affiliation->map;
2395 		thisVf = new_affiliation->thisVf;
2396 		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2397 			found = 0;
2398 			for (j = 0; j < instance->vf_affiliation->ldCount;
2399 			     j++) {
2400 				if (newmap->ref.targetId ==
2401 				    savedmap->ref.targetId) {
2402 					found = 1;
2403 					if (newmap->policy[thisVf] !=
2404 					    savedmap->policy[thisVf]) {
2405 						doscan = 1;
2406 						goto out;
2407 					}
2408 				}
2409 				savedmap = (struct MR_LD_VF_MAP *)
2410 					((unsigned char *)savedmap +
2411 					 savedmap->size);
2412 			}
2413 			if (!found && newmap->policy[thisVf] !=
2414 			    MR_LD_ACCESS_HIDDEN) {
2415 				doscan = 1;
2416 				goto out;
2417 			}
2418 			newmap = (struct MR_LD_VF_MAP *)
2419 				((unsigned char *)newmap + newmap->size);
2420 		}
2421 
2422 		newmap = new_affiliation->map;
2423 		savedmap = instance->vf_affiliation->map;
2424 
2425 		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2426 			found = 0;
2427 			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2428 				if (savedmap->ref.targetId ==
2429 				    newmap->ref.targetId) {
2430 					found = 1;
2431 					if (savedmap->policy[thisVf] !=
2432 					    newmap->policy[thisVf]) {
2433 						doscan = 1;
2434 						goto out;
2435 					}
2436 				}
2437 				newmap = (struct MR_LD_VF_MAP *)
2438 					((unsigned char *)newmap +
2439 					 newmap->size);
2440 			}
2441 			if (!found && savedmap->policy[thisVf] !=
2442 			    MR_LD_ACCESS_HIDDEN) {
2443 				doscan = 1;
2444 				goto out;
2445 			}
2446 			savedmap = (struct MR_LD_VF_MAP *)
2447 				((unsigned char *)savedmap +
2448 				 savedmap->size);
2449 		}
2450 	}
2451 out:
2452 	if (doscan) {
2453 		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2454 		       "affiliation for scsi%d\n", instance->host->host_no);
2455 		memcpy(instance->vf_affiliation, new_affiliation,
2456 		       new_affiliation->size);
2457 		retval = 1;
2458 	}
2459 
2460 	if (new_affiliation)
2461 		pci_free_consistent(instance->pdev,
2462 				    (MAX_LOGICAL_DRIVES + 1) *
2463 				    sizeof(struct MR_LD_VF_AFFILIATION),
2464 				    new_affiliation, new_affiliation_h);
2465 	megasas_return_cmd(instance, cmd);
2466 
2467 	return retval;
2468 }
2469 
2470 /* This function will get the current SR-IOV LD/VF affiliation */
2471 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2472 	int initial)
2473 {
2474 	int retval;
2475 
2476 	if (instance->PlasmaFW111)
2477 		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2478 	else
2479 		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2480 	return retval;
2481 }
2482 
2483 /* This function will tell FW to start the SR-IOV heartbeat */
2484 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2485 					 int initial)
2486 {
2487 	struct megasas_cmd *cmd;
2488 	struct megasas_dcmd_frame *dcmd;
2489 	int retval = 0;
2490 
2491 	cmd = megasas_get_cmd(instance);
2492 
2493 	if (!cmd) {
2494 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2495 		       "Failed to get cmd for scsi%d\n",
2496 		       instance->host->host_no);
2497 		return -ENOMEM;
2498 	}
2499 
2500 	dcmd = &cmd->frame->dcmd;
2501 
2502 	if (initial) {
2503 		instance->hb_host_mem =
2504 			pci_zalloc_consistent(instance->pdev,
2505 					      sizeof(struct MR_CTRL_HB_HOST_MEM),
2506 					      &instance->hb_host_mem_h);
2507 		if (!instance->hb_host_mem) {
2508 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2509 			       " memory for heartbeat host memory for scsi%d\n",
2510 			       instance->host->host_no);
2511 			retval = -ENOMEM;
2512 			goto out;
2513 		}
2514 	}
2515 
2516 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2517 
2518 	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2519 	dcmd->cmd = MFI_CMD_DCMD;
2520 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2521 	dcmd->sge_count = 1;
2522 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2523 	dcmd->timeout = 0;
2524 	dcmd->pad_0 = 0;
2525 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2526 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2527 
2528 	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2529 				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2530 
2531 	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2532 	       instance->host->host_no);
2533 
2534 	if ((instance->adapter_type != MFI_SERIES) &&
2535 	    !instance->mask_interrupts)
2536 		retval = megasas_issue_blocked_cmd(instance, cmd,
2537 			MEGASAS_ROUTINE_WAIT_TIME_VF);
2538 	else
2539 		retval = megasas_issue_polled(instance, cmd);
2540 
2541 	if (retval) {
2542 		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2543 			"_MEM_ALLOC DCMD %s for scsi%d\n",
2544 			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2545 			"timed out" : "failed", instance->host->host_no);
2546 		retval = 1;
2547 	}
2548 
2549 out:
2550 	megasas_return_cmd(instance, cmd);
2551 
2552 	return retval;
2553 }
2554 
2555 /* Handler for SR-IOV heartbeat */
2556 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2557 {
2558 	struct megasas_instance *instance =
2559 		from_timer(instance, t, sriov_heartbeat_timer);
2560 
2561 	if (instance->hb_host_mem->HB.fwCounter !=
2562 	    instance->hb_host_mem->HB.driverCounter) {
2563 		instance->hb_host_mem->HB.driverCounter =
2564 			instance->hb_host_mem->HB.fwCounter;
2565 		mod_timer(&instance->sriov_heartbeat_timer,
2566 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2567 	} else {
2568 		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2569 		       "completed for scsi%d\n", instance->host->host_no);
2570 		schedule_work(&instance->work_init);
2571 	}
2572 }
2573 
2574 /**
2575  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2576  * @instance:				Adapter soft state
2577  *
2578  * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2579  * complete all its outstanding commands. Returns error if one or more IOs
2580  * are pending after this time period. It also marks the controller dead.
2581  */
2582 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2583 {
2584 	int i, sl, outstanding;
2585 	u32 reset_index;
2586 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2587 	unsigned long flags;
2588 	struct list_head clist_local;
2589 	struct megasas_cmd *reset_cmd;
2590 	u32 fw_state;
2591 
2592 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2593 		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2594 		__func__, __LINE__);
2595 		return FAILED;
2596 	}
2597 
2598 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2599 
2600 		INIT_LIST_HEAD(&clist_local);
2601 		spin_lock_irqsave(&instance->hba_lock, flags);
2602 		list_splice_init(&instance->internal_reset_pending_q,
2603 				&clist_local);
2604 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2605 
2606 		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2607 		for (i = 0; i < wait_time; i++) {
2608 			msleep(1000);
2609 			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2610 				break;
2611 		}
2612 
2613 		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2614 			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2615 			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2616 			return FAILED;
2617 		}
2618 
2619 		reset_index = 0;
2620 		while (!list_empty(&clist_local)) {
2621 			reset_cmd = list_entry((&clist_local)->next,
2622 						struct megasas_cmd, list);
2623 			list_del_init(&reset_cmd->list);
2624 			if (reset_cmd->scmd) {
2625 				reset_cmd->scmd->result = DID_REQUEUE << 16;
2626 				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2627 					reset_index, reset_cmd,
2628 					reset_cmd->scmd->cmnd[0]);
2629 
2630 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2631 				megasas_return_cmd(instance, reset_cmd);
2632 			} else if (reset_cmd->sync_cmd) {
2633 				dev_notice(&instance->pdev->dev, "%p synch cmds"
2634 						"reset queue\n",
2635 						reset_cmd);
2636 
2637 				reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2638 				instance->instancet->fire_cmd(instance,
2639 						reset_cmd->frame_phys_addr,
2640 						0, instance->reg_set);
2641 			} else {
2642 				dev_notice(&instance->pdev->dev, "%p unexpected"
2643 					"cmds lst\n",
2644 					reset_cmd);
2645 			}
2646 			reset_index++;
2647 		}
2648 
2649 		return SUCCESS;
2650 	}
2651 
2652 	for (i = 0; i < resetwaittime; i++) {
2653 		outstanding = atomic_read(&instance->fw_outstanding);
2654 
2655 		if (!outstanding)
2656 			break;
2657 
2658 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2659 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2660 			       "commands to complete\n",i,outstanding);
2661 			/*
2662 			 * Call cmd completion routine. Cmd to be
2663 			 * be completed directly without depending on isr.
2664 			 */
2665 			megasas_complete_cmd_dpc((unsigned long)instance);
2666 		}
2667 
2668 		msleep(1000);
2669 	}
2670 
2671 	i = 0;
2672 	outstanding = atomic_read(&instance->fw_outstanding);
2673 	fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2674 
2675 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2676 		goto no_outstanding;
2677 
2678 	if (instance->disableOnlineCtrlReset)
2679 		goto kill_hba_and_failed;
2680 	do {
2681 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2682 			dev_info(&instance->pdev->dev,
2683 				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
2684 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2685 			if (i == 3)
2686 				goto kill_hba_and_failed;
2687 			megasas_do_ocr(instance);
2688 
2689 			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2690 				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2691 				__func__, __LINE__);
2692 				return FAILED;
2693 			}
2694 			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2695 				__func__, __LINE__);
2696 
2697 			for (sl = 0; sl < 10; sl++)
2698 				msleep(500);
2699 
2700 			outstanding = atomic_read(&instance->fw_outstanding);
2701 
2702 			fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2703 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2704 				goto no_outstanding;
2705 		}
2706 		i++;
2707 	} while (i <= 3);
2708 
2709 no_outstanding:
2710 
2711 	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2712 		__func__, __LINE__);
2713 	return SUCCESS;
2714 
2715 kill_hba_and_failed:
2716 
2717 	/* Reset not supported, kill adapter */
2718 	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2719 		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2720 		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2721 		atomic_read(&instance->fw_outstanding));
2722 	megasas_dump_pending_frames(instance);
2723 	megaraid_sas_kill_hba(instance);
2724 
2725 	return FAILED;
2726 }
2727 
2728 /**
2729  * megasas_generic_reset -	Generic reset routine
2730  * @scmd:			Mid-layer SCSI command
2731  *
2732  * This routine implements a generic reset handler for device, bus and host
2733  * reset requests. Device, bus and host specific reset handlers can use this
2734  * function after they do their specific tasks.
2735  */
2736 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2737 {
2738 	int ret_val;
2739 	struct megasas_instance *instance;
2740 
2741 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2742 
2743 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2744 		 scmd->cmnd[0], scmd->retries);
2745 
2746 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2747 		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2748 		return FAILED;
2749 	}
2750 
2751 	ret_val = megasas_wait_for_outstanding(instance);
2752 	if (ret_val == SUCCESS)
2753 		dev_notice(&instance->pdev->dev, "reset successful\n");
2754 	else
2755 		dev_err(&instance->pdev->dev, "failed to do reset\n");
2756 
2757 	return ret_val;
2758 }
2759 
2760 /**
2761  * megasas_reset_timer - quiesce the adapter if required
2762  * @scmd:		scsi cmnd
2763  *
2764  * Sets the FW busy flag and reduces the host->can_queue if the
2765  * cmd has not been completed within the timeout period.
2766  */
2767 static enum
2768 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2769 {
2770 	struct megasas_instance *instance;
2771 	unsigned long flags;
2772 
2773 	if (time_after(jiffies, scmd->jiffies_at_alloc +
2774 				(scmd_timeout * 2) * HZ)) {
2775 		return BLK_EH_NOT_HANDLED;
2776 	}
2777 
2778 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2779 	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2780 		/* FW is busy, throttle IO */
2781 		spin_lock_irqsave(instance->host->host_lock, flags);
2782 
2783 		instance->host->can_queue = instance->throttlequeuedepth;
2784 		instance->last_time = jiffies;
2785 		instance->flag |= MEGASAS_FW_BUSY;
2786 
2787 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2788 	}
2789 	return BLK_EH_RESET_TIMER;
2790 }
2791 
2792 /**
2793  * megasas_dump_frame -	This function will dump MPT/MFI frame
2794  */
2795 static inline void
2796 megasas_dump_frame(void *mpi_request, int sz)
2797 {
2798 	int i;
2799 	__le32 *mfp = (__le32 *)mpi_request;
2800 
2801 	printk(KERN_INFO "IO request frame:\n\t");
2802 	for (i = 0; i < sz / sizeof(__le32); i++) {
2803 		if (i && ((i % 8) == 0))
2804 			printk("\n\t");
2805 		printk("%08x ", le32_to_cpu(mfp[i]));
2806 	}
2807 	printk("\n");
2808 }
2809 
2810 /**
2811  * megasas_reset_bus_host -	Bus & host reset handler entry point
2812  */
2813 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2814 {
2815 	int ret;
2816 	struct megasas_instance *instance;
2817 
2818 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2819 
2820 	scmd_printk(KERN_INFO, scmd,
2821 		"Controller reset is requested due to IO timeout\n"
2822 		"SCSI command pointer: (%p)\t SCSI host state: %d\t"
2823 		" SCSI host busy: %d\t FW outstanding: %d\n",
2824 		scmd, scmd->device->host->shost_state,
2825 		atomic_read((atomic_t *)&scmd->device->host->host_busy),
2826 		atomic_read(&instance->fw_outstanding));
2827 
2828 	/*
2829 	 * First wait for all commands to complete
2830 	 */
2831 	if (instance->adapter_type == MFI_SERIES) {
2832 		ret = megasas_generic_reset(scmd);
2833 	} else {
2834 		struct megasas_cmd_fusion *cmd;
2835 		cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2836 		if (cmd)
2837 			megasas_dump_frame(cmd->io_request,
2838 				MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
2839 		ret = megasas_reset_fusion(scmd->device->host,
2840 				SCSIIO_TIMEOUT_OCR);
2841 	}
2842 
2843 	return ret;
2844 }
2845 
2846 /**
2847  * megasas_task_abort - Issues task abort request to firmware
2848  *			(supported only for fusion adapters)
2849  * @scmd:		SCSI command pointer
2850  */
2851 static int megasas_task_abort(struct scsi_cmnd *scmd)
2852 {
2853 	int ret;
2854 	struct megasas_instance *instance;
2855 
2856 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2857 
2858 	if (instance->adapter_type != MFI_SERIES)
2859 		ret = megasas_task_abort_fusion(scmd);
2860 	else {
2861 		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
2862 		ret = FAILED;
2863 	}
2864 
2865 	return ret;
2866 }
2867 
2868 /**
2869  * megasas_reset_target:  Issues target reset request to firmware
2870  *                        (supported only for fusion adapters)
2871  * @scmd:                 SCSI command pointer
2872  */
2873 static int megasas_reset_target(struct scsi_cmnd *scmd)
2874 {
2875 	int ret;
2876 	struct megasas_instance *instance;
2877 
2878 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2879 
2880 	if (instance->adapter_type != MFI_SERIES)
2881 		ret = megasas_reset_target_fusion(scmd);
2882 	else {
2883 		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
2884 		ret = FAILED;
2885 	}
2886 
2887 	return ret;
2888 }
2889 
2890 /**
2891  * megasas_bios_param - Returns disk geometry for a disk
2892  * @sdev:		device handle
2893  * @bdev:		block device
2894  * @capacity:		drive capacity
2895  * @geom:		geometry parameters
2896  */
2897 static int
2898 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2899 		 sector_t capacity, int geom[])
2900 {
2901 	int heads;
2902 	int sectors;
2903 	sector_t cylinders;
2904 	unsigned long tmp;
2905 
2906 	/* Default heads (64) & sectors (32) */
2907 	heads = 64;
2908 	sectors = 32;
2909 
2910 	tmp = heads * sectors;
2911 	cylinders = capacity;
2912 
2913 	sector_div(cylinders, tmp);
2914 
2915 	/*
2916 	 * Handle extended translation size for logical drives > 1Gb
2917 	 */
2918 
2919 	if (capacity >= 0x200000) {
2920 		heads = 255;
2921 		sectors = 63;
2922 		tmp = heads*sectors;
2923 		cylinders = capacity;
2924 		sector_div(cylinders, tmp);
2925 	}
2926 
2927 	geom[0] = heads;
2928 	geom[1] = sectors;
2929 	geom[2] = cylinders;
2930 
2931 	return 0;
2932 }
2933 
2934 static void megasas_aen_polling(struct work_struct *work);
2935 
2936 /**
2937  * megasas_service_aen -	Processes an event notification
2938  * @instance:			Adapter soft state
2939  * @cmd:			AEN command completed by the ISR
2940  *
2941  * For AEN, driver sends a command down to FW that is held by the FW till an
2942  * event occurs. When an event of interest occurs, FW completes the command
2943  * that it was previously holding.
2944  *
2945  * This routines sends SIGIO signal to processes that have registered with the
2946  * driver for AEN.
2947  */
2948 static void
2949 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2950 {
2951 	unsigned long flags;
2952 
2953 	/*
2954 	 * Don't signal app if it is just an aborted previously registered aen
2955 	 */
2956 	if ((!cmd->abort_aen) && (instance->unload == 0)) {
2957 		spin_lock_irqsave(&poll_aen_lock, flags);
2958 		megasas_poll_wait_aen = 1;
2959 		spin_unlock_irqrestore(&poll_aen_lock, flags);
2960 		wake_up(&megasas_poll_wait);
2961 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
2962 	}
2963 	else
2964 		cmd->abort_aen = 0;
2965 
2966 	instance->aen_cmd = NULL;
2967 
2968 	megasas_return_cmd(instance, cmd);
2969 
2970 	if ((instance->unload == 0) &&
2971 		((instance->issuepend_done == 1))) {
2972 		struct megasas_aen_event *ev;
2973 
2974 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2975 		if (!ev) {
2976 			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
2977 		} else {
2978 			ev->instance = instance;
2979 			instance->ev = ev;
2980 			INIT_DELAYED_WORK(&ev->hotplug_work,
2981 					  megasas_aen_polling);
2982 			schedule_delayed_work(&ev->hotplug_work, 0);
2983 		}
2984 	}
2985 }
2986 
2987 static ssize_t
2988 megasas_fw_crash_buffer_store(struct device *cdev,
2989 	struct device_attribute *attr, const char *buf, size_t count)
2990 {
2991 	struct Scsi_Host *shost = class_to_shost(cdev);
2992 	struct megasas_instance *instance =
2993 		(struct megasas_instance *) shost->hostdata;
2994 	int val = 0;
2995 	unsigned long flags;
2996 
2997 	if (kstrtoint(buf, 0, &val) != 0)
2998 		return -EINVAL;
2999 
3000 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3001 	instance->fw_crash_buffer_offset = val;
3002 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3003 	return strlen(buf);
3004 }
3005 
3006 static ssize_t
3007 megasas_fw_crash_buffer_show(struct device *cdev,
3008 	struct device_attribute *attr, char *buf)
3009 {
3010 	struct Scsi_Host *shost = class_to_shost(cdev);
3011 	struct megasas_instance *instance =
3012 		(struct megasas_instance *) shost->hostdata;
3013 	u32 size;
3014 	unsigned long buff_addr;
3015 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3016 	unsigned long src_addr;
3017 	unsigned long flags;
3018 	u32 buff_offset;
3019 
3020 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3021 	buff_offset = instance->fw_crash_buffer_offset;
3022 	if (!instance->crash_dump_buf &&
3023 		!((instance->fw_crash_state == AVAILABLE) ||
3024 		(instance->fw_crash_state == COPYING))) {
3025 		dev_err(&instance->pdev->dev,
3026 			"Firmware crash dump is not available\n");
3027 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3028 		return -EINVAL;
3029 	}
3030 
3031 	buff_addr = (unsigned long) buf;
3032 
3033 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3034 		dev_err(&instance->pdev->dev,
3035 			"Firmware crash dump offset is out of range\n");
3036 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3037 		return 0;
3038 	}
3039 
3040 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3041 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3042 
3043 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3044 		(buff_offset % dmachunk);
3045 	memcpy(buf, (void *)src_addr, size);
3046 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3047 
3048 	return size;
3049 }
3050 
3051 static ssize_t
3052 megasas_fw_crash_buffer_size_show(struct device *cdev,
3053 	struct device_attribute *attr, char *buf)
3054 {
3055 	struct Scsi_Host *shost = class_to_shost(cdev);
3056 	struct megasas_instance *instance =
3057 		(struct megasas_instance *) shost->hostdata;
3058 
3059 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3060 		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3061 }
3062 
3063 static ssize_t
3064 megasas_fw_crash_state_store(struct device *cdev,
3065 	struct device_attribute *attr, const char *buf, size_t count)
3066 {
3067 	struct Scsi_Host *shost = class_to_shost(cdev);
3068 	struct megasas_instance *instance =
3069 		(struct megasas_instance *) shost->hostdata;
3070 	int val = 0;
3071 	unsigned long flags;
3072 
3073 	if (kstrtoint(buf, 0, &val) != 0)
3074 		return -EINVAL;
3075 
3076 	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3077 		dev_err(&instance->pdev->dev, "application updates invalid "
3078 			"firmware crash state\n");
3079 		return -EINVAL;
3080 	}
3081 
3082 	instance->fw_crash_state = val;
3083 
3084 	if ((val == COPIED) || (val == COPY_ERROR)) {
3085 		spin_lock_irqsave(&instance->crashdump_lock, flags);
3086 		megasas_free_host_crash_buffer(instance);
3087 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3088 		if (val == COPY_ERROR)
3089 			dev_info(&instance->pdev->dev, "application failed to "
3090 				"copy Firmware crash dump\n");
3091 		else
3092 			dev_info(&instance->pdev->dev, "Firmware crash dump "
3093 				"copied successfully\n");
3094 	}
3095 	return strlen(buf);
3096 }
3097 
3098 static ssize_t
3099 megasas_fw_crash_state_show(struct device *cdev,
3100 	struct device_attribute *attr, char *buf)
3101 {
3102 	struct Scsi_Host *shost = class_to_shost(cdev);
3103 	struct megasas_instance *instance =
3104 		(struct megasas_instance *) shost->hostdata;
3105 
3106 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3107 }
3108 
3109 static ssize_t
3110 megasas_page_size_show(struct device *cdev,
3111 	struct device_attribute *attr, char *buf)
3112 {
3113 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3114 }
3115 
3116 static ssize_t
3117 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3118 	char *buf)
3119 {
3120 	struct Scsi_Host *shost = class_to_shost(cdev);
3121 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3122 
3123 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3124 }
3125 
3126 static ssize_t
3127 megasas_fw_cmds_outstanding_show(struct device *cdev,
3128 				 struct device_attribute *attr, char *buf)
3129 {
3130 	struct Scsi_Host *shost = class_to_shost(cdev);
3131 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3132 
3133 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3134 }
3135 
3136 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
3137 	megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
3138 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
3139 	megasas_fw_crash_buffer_size_show, NULL);
3140 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
3141 	megasas_fw_crash_state_show, megasas_fw_crash_state_store);
3142 static DEVICE_ATTR(page_size, S_IRUGO,
3143 	megasas_page_size_show, NULL);
3144 static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
3145 	megasas_ldio_outstanding_show, NULL);
3146 static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
3147 	megasas_fw_cmds_outstanding_show, NULL);
3148 
3149 struct device_attribute *megaraid_host_attrs[] = {
3150 	&dev_attr_fw_crash_buffer_size,
3151 	&dev_attr_fw_crash_buffer,
3152 	&dev_attr_fw_crash_state,
3153 	&dev_attr_page_size,
3154 	&dev_attr_ldio_outstanding,
3155 	&dev_attr_fw_cmds_outstanding,
3156 	NULL,
3157 };
3158 
3159 /*
3160  * Scsi host template for megaraid_sas driver
3161  */
3162 static struct scsi_host_template megasas_template = {
3163 
3164 	.module = THIS_MODULE,
3165 	.name = "Avago SAS based MegaRAID driver",
3166 	.proc_name = "megaraid_sas",
3167 	.slave_configure = megasas_slave_configure,
3168 	.slave_alloc = megasas_slave_alloc,
3169 	.slave_destroy = megasas_slave_destroy,
3170 	.queuecommand = megasas_queue_command,
3171 	.eh_target_reset_handler = megasas_reset_target,
3172 	.eh_abort_handler = megasas_task_abort,
3173 	.eh_host_reset_handler = megasas_reset_bus_host,
3174 	.eh_timed_out = megasas_reset_timer,
3175 	.shost_attrs = megaraid_host_attrs,
3176 	.bios_param = megasas_bios_param,
3177 	.use_clustering = ENABLE_CLUSTERING,
3178 	.change_queue_depth = scsi_change_queue_depth,
3179 	.no_write_same = 1,
3180 };
3181 
3182 /**
3183  * megasas_complete_int_cmd -	Completes an internal command
3184  * @instance:			Adapter soft state
3185  * @cmd:			Command to be completed
3186  *
3187  * The megasas_issue_blocked_cmd() function waits for a command to complete
3188  * after it issues a command. This function wakes up that waiting routine by
3189  * calling wake_up() on the wait queue.
3190  */
3191 static void
3192 megasas_complete_int_cmd(struct megasas_instance *instance,
3193 			 struct megasas_cmd *cmd)
3194 {
3195 	cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3196 	wake_up(&instance->int_cmd_wait_q);
3197 }
3198 
3199 /**
3200  * megasas_complete_abort -	Completes aborting a command
3201  * @instance:			Adapter soft state
3202  * @cmd:			Cmd that was issued to abort another cmd
3203  *
3204  * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3205  * after it issues an abort on a previously issued command. This function
3206  * wakes up all functions waiting on the same wait queue.
3207  */
3208 static void
3209 megasas_complete_abort(struct megasas_instance *instance,
3210 		       struct megasas_cmd *cmd)
3211 {
3212 	if (cmd->sync_cmd) {
3213 		cmd->sync_cmd = 0;
3214 		cmd->cmd_status_drv = 0;
3215 		wake_up(&instance->abort_cmd_wait_q);
3216 	}
3217 }
3218 
3219 /**
3220  * megasas_complete_cmd -	Completes a command
3221  * @instance:			Adapter soft state
3222  * @cmd:			Command to be completed
3223  * @alt_status:			If non-zero, use this value as status to
3224  *				SCSI mid-layer instead of the value returned
3225  *				by the FW. This should be used if caller wants
3226  *				an alternate status (as in the case of aborted
3227  *				commands)
3228  */
3229 void
3230 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3231 		     u8 alt_status)
3232 {
3233 	int exception = 0;
3234 	struct megasas_header *hdr = &cmd->frame->hdr;
3235 	unsigned long flags;
3236 	struct fusion_context *fusion = instance->ctrl_context;
3237 	u32 opcode, status;
3238 
3239 	/* flag for the retry reset */
3240 	cmd->retry_for_fw_reset = 0;
3241 
3242 	if (cmd->scmd)
3243 		cmd->scmd->SCp.ptr = NULL;
3244 
3245 	switch (hdr->cmd) {
3246 	case MFI_CMD_INVALID:
3247 		/* Some older 1068 controller FW may keep a pended
3248 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3249 		   when booting the kdump kernel.  Ignore this command to
3250 		   prevent a kernel panic on shutdown of the kdump kernel. */
3251 		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3252 		       "completed\n");
3253 		dev_warn(&instance->pdev->dev, "If you have a controller "
3254 		       "other than PERC5, please upgrade your firmware\n");
3255 		break;
3256 	case MFI_CMD_PD_SCSI_IO:
3257 	case MFI_CMD_LD_SCSI_IO:
3258 
3259 		/*
3260 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3261 		 * issued either through an IO path or an IOCTL path. If it
3262 		 * was via IOCTL, we will send it to internal completion.
3263 		 */
3264 		if (cmd->sync_cmd) {
3265 			cmd->sync_cmd = 0;
3266 			megasas_complete_int_cmd(instance, cmd);
3267 			break;
3268 		}
3269 
3270 	case MFI_CMD_LD_READ:
3271 	case MFI_CMD_LD_WRITE:
3272 
3273 		if (alt_status) {
3274 			cmd->scmd->result = alt_status << 16;
3275 			exception = 1;
3276 		}
3277 
3278 		if (exception) {
3279 
3280 			atomic_dec(&instance->fw_outstanding);
3281 
3282 			scsi_dma_unmap(cmd->scmd);
3283 			cmd->scmd->scsi_done(cmd->scmd);
3284 			megasas_return_cmd(instance, cmd);
3285 
3286 			break;
3287 		}
3288 
3289 		switch (hdr->cmd_status) {
3290 
3291 		case MFI_STAT_OK:
3292 			cmd->scmd->result = DID_OK << 16;
3293 			break;
3294 
3295 		case MFI_STAT_SCSI_IO_FAILED:
3296 		case MFI_STAT_LD_INIT_IN_PROGRESS:
3297 			cmd->scmd->result =
3298 			    (DID_ERROR << 16) | hdr->scsi_status;
3299 			break;
3300 
3301 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3302 
3303 			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3304 
3305 			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3306 				memset(cmd->scmd->sense_buffer, 0,
3307 				       SCSI_SENSE_BUFFERSIZE);
3308 				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3309 				       hdr->sense_len);
3310 
3311 				cmd->scmd->result |= DRIVER_SENSE << 24;
3312 			}
3313 
3314 			break;
3315 
3316 		case MFI_STAT_LD_OFFLINE:
3317 		case MFI_STAT_DEVICE_NOT_FOUND:
3318 			cmd->scmd->result = DID_BAD_TARGET << 16;
3319 			break;
3320 
3321 		default:
3322 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3323 			       hdr->cmd_status);
3324 			cmd->scmd->result = DID_ERROR << 16;
3325 			break;
3326 		}
3327 
3328 		atomic_dec(&instance->fw_outstanding);
3329 
3330 		scsi_dma_unmap(cmd->scmd);
3331 		cmd->scmd->scsi_done(cmd->scmd);
3332 		megasas_return_cmd(instance, cmd);
3333 
3334 		break;
3335 
3336 	case MFI_CMD_SMP:
3337 	case MFI_CMD_STP:
3338 	case MFI_CMD_NVME:
3339 		megasas_complete_int_cmd(instance, cmd);
3340 		break;
3341 
3342 	case MFI_CMD_DCMD:
3343 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3344 		/* Check for LD map update */
3345 		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3346 			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3347 			fusion->fast_path_io = 0;
3348 			spin_lock_irqsave(instance->host->host_lock, flags);
3349 			status = cmd->frame->hdr.cmd_status;
3350 			instance->map_update_cmd = NULL;
3351 			if (status != MFI_STAT_OK) {
3352 				if (status != MFI_STAT_NOT_FOUND)
3353 					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3354 					       cmd->frame->hdr.cmd_status);
3355 				else {
3356 					megasas_return_cmd(instance, cmd);
3357 					spin_unlock_irqrestore(
3358 						instance->host->host_lock,
3359 						flags);
3360 					break;
3361 				}
3362 			}
3363 
3364 			megasas_return_cmd(instance, cmd);
3365 
3366 			/*
3367 			 * Set fast path IO to ZERO.
3368 			 * Validate Map will set proper value.
3369 			 * Meanwhile all IOs will go as LD IO.
3370 			 */
3371 			if (status == MFI_STAT_OK &&
3372 			    (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3373 				instance->map_id++;
3374 				fusion->fast_path_io = 1;
3375 			} else {
3376 				fusion->fast_path_io = 0;
3377 			}
3378 
3379 			megasas_sync_map_info(instance);
3380 			spin_unlock_irqrestore(instance->host->host_lock,
3381 					       flags);
3382 			break;
3383 		}
3384 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3385 		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3386 			spin_lock_irqsave(&poll_aen_lock, flags);
3387 			megasas_poll_wait_aen = 0;
3388 			spin_unlock_irqrestore(&poll_aen_lock, flags);
3389 		}
3390 
3391 		/* FW has an updated PD sequence */
3392 		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3393 			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3394 
3395 			spin_lock_irqsave(instance->host->host_lock, flags);
3396 			status = cmd->frame->hdr.cmd_status;
3397 			instance->jbod_seq_cmd = NULL;
3398 			megasas_return_cmd(instance, cmd);
3399 
3400 			if (status == MFI_STAT_OK) {
3401 				instance->pd_seq_map_id++;
3402 				/* Re-register a pd sync seq num cmd */
3403 				if (megasas_sync_pd_seq_num(instance, true))
3404 					instance->use_seqnum_jbod_fp = false;
3405 			} else
3406 				instance->use_seqnum_jbod_fp = false;
3407 
3408 			spin_unlock_irqrestore(instance->host->host_lock, flags);
3409 			break;
3410 		}
3411 
3412 		/*
3413 		 * See if got an event notification
3414 		 */
3415 		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3416 			megasas_service_aen(instance, cmd);
3417 		else
3418 			megasas_complete_int_cmd(instance, cmd);
3419 
3420 		break;
3421 
3422 	case MFI_CMD_ABORT:
3423 		/*
3424 		 * Cmd issued to abort another cmd returned
3425 		 */
3426 		megasas_complete_abort(instance, cmd);
3427 		break;
3428 
3429 	default:
3430 		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3431 		       hdr->cmd);
3432 		megasas_complete_int_cmd(instance, cmd);
3433 		break;
3434 	}
3435 }
3436 
3437 /**
3438  * megasas_issue_pending_cmds_again -	issue all pending cmds
3439  *					in FW again because of the fw reset
3440  * @instance:				Adapter soft state
3441  */
3442 static inline void
3443 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3444 {
3445 	struct megasas_cmd *cmd;
3446 	struct list_head clist_local;
3447 	union megasas_evt_class_locale class_locale;
3448 	unsigned long flags;
3449 	u32 seq_num;
3450 
3451 	INIT_LIST_HEAD(&clist_local);
3452 	spin_lock_irqsave(&instance->hba_lock, flags);
3453 	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3454 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3455 
3456 	while (!list_empty(&clist_local)) {
3457 		cmd = list_entry((&clist_local)->next,
3458 					struct megasas_cmd, list);
3459 		list_del_init(&cmd->list);
3460 
3461 		if (cmd->sync_cmd || cmd->scmd) {
3462 			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3463 				"detected to be pending while HBA reset\n",
3464 					cmd, cmd->scmd, cmd->sync_cmd);
3465 
3466 			cmd->retry_for_fw_reset++;
3467 
3468 			if (cmd->retry_for_fw_reset == 3) {
3469 				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3470 					"was tried multiple times during reset."
3471 					"Shutting down the HBA\n",
3472 					cmd, cmd->scmd, cmd->sync_cmd);
3473 				instance->instancet->disable_intr(instance);
3474 				atomic_set(&instance->fw_reset_no_pci_access, 1);
3475 				megaraid_sas_kill_hba(instance);
3476 				return;
3477 			}
3478 		}
3479 
3480 		if (cmd->sync_cmd == 1) {
3481 			if (cmd->scmd) {
3482 				dev_notice(&instance->pdev->dev, "unexpected"
3483 					"cmd attached to internal command!\n");
3484 			}
3485 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3486 						"on the internal reset queue,"
3487 						"issue it again.\n", cmd);
3488 			cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3489 			instance->instancet->fire_cmd(instance,
3490 							cmd->frame_phys_addr,
3491 							0, instance->reg_set);
3492 		} else if (cmd->scmd) {
3493 			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3494 			"detected on the internal queue, issue again.\n",
3495 			cmd, cmd->scmd->cmnd[0]);
3496 
3497 			atomic_inc(&instance->fw_outstanding);
3498 			instance->instancet->fire_cmd(instance,
3499 					cmd->frame_phys_addr,
3500 					cmd->frame_count-1, instance->reg_set);
3501 		} else {
3502 			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3503 				"internal reset defer list while re-issue!!\n",
3504 				cmd);
3505 		}
3506 	}
3507 
3508 	if (instance->aen_cmd) {
3509 		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3510 		megasas_return_cmd(instance, instance->aen_cmd);
3511 
3512 		instance->aen_cmd = NULL;
3513 	}
3514 
3515 	/*
3516 	 * Initiate AEN (Asynchronous Event Notification)
3517 	 */
3518 	seq_num = instance->last_seq_num;
3519 	class_locale.members.reserved = 0;
3520 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3521 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3522 
3523 	megasas_register_aen(instance, seq_num, class_locale.word);
3524 }
3525 
3526 /**
3527  * Move the internal reset pending commands to a deferred queue.
3528  *
3529  * We move the commands pending at internal reset time to a
3530  * pending queue. This queue would be flushed after successful
3531  * completion of the internal reset sequence. if the internal reset
3532  * did not complete in time, the kernel reset handler would flush
3533  * these commands.
3534  **/
3535 static void
3536 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3537 {
3538 	struct megasas_cmd *cmd;
3539 	int i;
3540 	u16 max_cmd = instance->max_fw_cmds;
3541 	u32 defer_index;
3542 	unsigned long flags;
3543 
3544 	defer_index = 0;
3545 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3546 	for (i = 0; i < max_cmd; i++) {
3547 		cmd = instance->cmd_list[i];
3548 		if (cmd->sync_cmd == 1 || cmd->scmd) {
3549 			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3550 					"on the defer queue as internal\n",
3551 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3552 
3553 			if (!list_empty(&cmd->list)) {
3554 				dev_notice(&instance->pdev->dev, "ERROR while"
3555 					" moving this cmd:%p, %d %p, it was"
3556 					"discovered on some list?\n",
3557 					cmd, cmd->sync_cmd, cmd->scmd);
3558 
3559 				list_del_init(&cmd->list);
3560 			}
3561 			defer_index++;
3562 			list_add_tail(&cmd->list,
3563 				&instance->internal_reset_pending_q);
3564 		}
3565 	}
3566 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3567 }
3568 
3569 
3570 static void
3571 process_fw_state_change_wq(struct work_struct *work)
3572 {
3573 	struct megasas_instance *instance =
3574 		container_of(work, struct megasas_instance, work_init);
3575 	u32 wait;
3576 	unsigned long flags;
3577 
3578     if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3579 		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3580 				atomic_read(&instance->adprecovery));
3581 		return ;
3582 	}
3583 
3584 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3585 		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3586 					"state, restarting it...\n");
3587 
3588 		instance->instancet->disable_intr(instance);
3589 		atomic_set(&instance->fw_outstanding, 0);
3590 
3591 		atomic_set(&instance->fw_reset_no_pci_access, 1);
3592 		instance->instancet->adp_reset(instance, instance->reg_set);
3593 		atomic_set(&instance->fw_reset_no_pci_access, 0);
3594 
3595 		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3596 					"initiating next stage...\n");
3597 
3598 		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3599 					"state 2 starting...\n");
3600 
3601 		/* waiting for about 20 second before start the second init */
3602 		for (wait = 0; wait < 30; wait++) {
3603 			msleep(1000);
3604 		}
3605 
3606 		if (megasas_transition_to_ready(instance, 1)) {
3607 			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3608 
3609 			atomic_set(&instance->fw_reset_no_pci_access, 1);
3610 			megaraid_sas_kill_hba(instance);
3611 			return ;
3612 		}
3613 
3614 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3615 			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3616 			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3617 			) {
3618 			*instance->consumer = *instance->producer;
3619 		} else {
3620 			*instance->consumer = 0;
3621 			*instance->producer = 0;
3622 		}
3623 
3624 		megasas_issue_init_mfi(instance);
3625 
3626 		spin_lock_irqsave(&instance->hba_lock, flags);
3627 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3628 		spin_unlock_irqrestore(&instance->hba_lock, flags);
3629 		instance->instancet->enable_intr(instance);
3630 
3631 		megasas_issue_pending_cmds_again(instance);
3632 		instance->issuepend_done = 1;
3633 	}
3634 }
3635 
3636 /**
3637  * megasas_deplete_reply_queue -	Processes all completed commands
3638  * @instance:				Adapter soft state
3639  * @alt_status:				Alternate status to be returned to
3640  *					SCSI mid-layer instead of the status
3641  *					returned by the FW
3642  * Note: this must be called with hba lock held
3643  */
3644 static int
3645 megasas_deplete_reply_queue(struct megasas_instance *instance,
3646 					u8 alt_status)
3647 {
3648 	u32 mfiStatus;
3649 	u32 fw_state;
3650 
3651 	if ((mfiStatus = instance->instancet->check_reset(instance,
3652 					instance->reg_set)) == 1) {
3653 		return IRQ_HANDLED;
3654 	}
3655 
3656 	if ((mfiStatus = instance->instancet->clear_intr(
3657 						instance->reg_set)
3658 						) == 0) {
3659 		/* Hardware may not set outbound_intr_status in MSI-X mode */
3660 		if (!instance->msix_vectors)
3661 			return IRQ_NONE;
3662 	}
3663 
3664 	instance->mfiStatus = mfiStatus;
3665 
3666 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3667 		fw_state = instance->instancet->read_fw_status_reg(
3668 				instance->reg_set) & MFI_STATE_MASK;
3669 
3670 		if (fw_state != MFI_STATE_FAULT) {
3671 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
3672 						fw_state);
3673 		}
3674 
3675 		if ((fw_state == MFI_STATE_FAULT) &&
3676 				(instance->disableOnlineCtrlReset == 0)) {
3677 			dev_notice(&instance->pdev->dev, "wait adp restart\n");
3678 
3679 			if ((instance->pdev->device ==
3680 					PCI_DEVICE_ID_LSI_SAS1064R) ||
3681 				(instance->pdev->device ==
3682 					PCI_DEVICE_ID_DELL_PERC5) ||
3683 				(instance->pdev->device ==
3684 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3685 
3686 				*instance->consumer =
3687 					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3688 			}
3689 
3690 
3691 			instance->instancet->disable_intr(instance);
3692 			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3693 			instance->issuepend_done = 0;
3694 
3695 			atomic_set(&instance->fw_outstanding, 0);
3696 			megasas_internal_reset_defer_cmds(instance);
3697 
3698 			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3699 					fw_state, atomic_read(&instance->adprecovery));
3700 
3701 			schedule_work(&instance->work_init);
3702 			return IRQ_HANDLED;
3703 
3704 		} else {
3705 			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3706 				fw_state, instance->disableOnlineCtrlReset);
3707 		}
3708 	}
3709 
3710 	tasklet_schedule(&instance->isr_tasklet);
3711 	return IRQ_HANDLED;
3712 }
3713 /**
3714  * megasas_isr - isr entry point
3715  */
3716 static irqreturn_t megasas_isr(int irq, void *devp)
3717 {
3718 	struct megasas_irq_context *irq_context = devp;
3719 	struct megasas_instance *instance = irq_context->instance;
3720 	unsigned long flags;
3721 	irqreturn_t rc;
3722 
3723 	if (atomic_read(&instance->fw_reset_no_pci_access))
3724 		return IRQ_HANDLED;
3725 
3726 	spin_lock_irqsave(&instance->hba_lock, flags);
3727 	rc = megasas_deplete_reply_queue(instance, DID_OK);
3728 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3729 
3730 	return rc;
3731 }
3732 
3733 /**
3734  * megasas_transition_to_ready -	Move the FW to READY state
3735  * @instance:				Adapter soft state
3736  *
3737  * During the initialization, FW passes can potentially be in any one of
3738  * several possible states. If the FW in operational, waiting-for-handshake
3739  * states, driver must take steps to bring it to ready state. Otherwise, it
3740  * has to wait for the ready state.
3741  */
3742 int
3743 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3744 {
3745 	int i;
3746 	u8 max_wait;
3747 	u32 fw_state;
3748 	u32 cur_state;
3749 	u32 abs_state, curr_abs_state;
3750 
3751 	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
3752 	fw_state = abs_state & MFI_STATE_MASK;
3753 
3754 	if (fw_state != MFI_STATE_READY)
3755 		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3756 		       " state\n");
3757 
3758 	while (fw_state != MFI_STATE_READY) {
3759 
3760 		switch (fw_state) {
3761 
3762 		case MFI_STATE_FAULT:
3763 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3764 			if (ocr) {
3765 				max_wait = MEGASAS_RESET_WAIT_TIME;
3766 				cur_state = MFI_STATE_FAULT;
3767 				break;
3768 			} else
3769 				return -ENODEV;
3770 
3771 		case MFI_STATE_WAIT_HANDSHAKE:
3772 			/*
3773 			 * Set the CLR bit in inbound doorbell
3774 			 */
3775 			if ((instance->pdev->device ==
3776 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3777 				(instance->pdev->device ==
3778 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3779 				(instance->adapter_type != MFI_SERIES))
3780 				writel(
3781 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3782 				  &instance->reg_set->doorbell);
3783 			else
3784 				writel(
3785 				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3786 					&instance->reg_set->inbound_doorbell);
3787 
3788 			max_wait = MEGASAS_RESET_WAIT_TIME;
3789 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
3790 			break;
3791 
3792 		case MFI_STATE_BOOT_MESSAGE_PENDING:
3793 			if ((instance->pdev->device ==
3794 			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3795 				(instance->pdev->device ==
3796 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3797 				(instance->adapter_type != MFI_SERIES))
3798 				writel(MFI_INIT_HOTPLUG,
3799 				       &instance->reg_set->doorbell);
3800 			else
3801 				writel(MFI_INIT_HOTPLUG,
3802 					&instance->reg_set->inbound_doorbell);
3803 
3804 			max_wait = MEGASAS_RESET_WAIT_TIME;
3805 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3806 			break;
3807 
3808 		case MFI_STATE_OPERATIONAL:
3809 			/*
3810 			 * Bring it to READY state; assuming max wait 10 secs
3811 			 */
3812 			instance->instancet->disable_intr(instance);
3813 			if ((instance->pdev->device ==
3814 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3815 				(instance->pdev->device ==
3816 				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
3817 				(instance->adapter_type != MFI_SERIES)) {
3818 				writel(MFI_RESET_FLAGS,
3819 					&instance->reg_set->doorbell);
3820 
3821 				if (instance->adapter_type != MFI_SERIES) {
3822 					for (i = 0; i < (10 * 1000); i += 20) {
3823 						if (readl(
3824 							    &instance->
3825 							    reg_set->
3826 							    doorbell) & 1)
3827 							msleep(20);
3828 						else
3829 							break;
3830 					}
3831 				}
3832 			} else
3833 				writel(MFI_RESET_FLAGS,
3834 					&instance->reg_set->inbound_doorbell);
3835 
3836 			max_wait = MEGASAS_RESET_WAIT_TIME;
3837 			cur_state = MFI_STATE_OPERATIONAL;
3838 			break;
3839 
3840 		case MFI_STATE_UNDEFINED:
3841 			/*
3842 			 * This state should not last for more than 2 seconds
3843 			 */
3844 			max_wait = MEGASAS_RESET_WAIT_TIME;
3845 			cur_state = MFI_STATE_UNDEFINED;
3846 			break;
3847 
3848 		case MFI_STATE_BB_INIT:
3849 			max_wait = MEGASAS_RESET_WAIT_TIME;
3850 			cur_state = MFI_STATE_BB_INIT;
3851 			break;
3852 
3853 		case MFI_STATE_FW_INIT:
3854 			max_wait = MEGASAS_RESET_WAIT_TIME;
3855 			cur_state = MFI_STATE_FW_INIT;
3856 			break;
3857 
3858 		case MFI_STATE_FW_INIT_2:
3859 			max_wait = MEGASAS_RESET_WAIT_TIME;
3860 			cur_state = MFI_STATE_FW_INIT_2;
3861 			break;
3862 
3863 		case MFI_STATE_DEVICE_SCAN:
3864 			max_wait = MEGASAS_RESET_WAIT_TIME;
3865 			cur_state = MFI_STATE_DEVICE_SCAN;
3866 			break;
3867 
3868 		case MFI_STATE_FLUSH_CACHE:
3869 			max_wait = MEGASAS_RESET_WAIT_TIME;
3870 			cur_state = MFI_STATE_FLUSH_CACHE;
3871 			break;
3872 
3873 		default:
3874 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3875 			       fw_state);
3876 			return -ENODEV;
3877 		}
3878 
3879 		/*
3880 		 * The cur_state should not last for more than max_wait secs
3881 		 */
3882 		for (i = 0; i < (max_wait * 1000); i++) {
3883 			curr_abs_state = instance->instancet->
3884 				read_fw_status_reg(instance->reg_set);
3885 
3886 			if (abs_state == curr_abs_state) {
3887 				msleep(1);
3888 			} else
3889 				break;
3890 		}
3891 
3892 		/*
3893 		 * Return error if fw_state hasn't changed after max_wait
3894 		 */
3895 		if (curr_abs_state == abs_state) {
3896 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3897 			       "in %d secs\n", fw_state, max_wait);
3898 			return -ENODEV;
3899 		}
3900 
3901 		abs_state = curr_abs_state;
3902 		fw_state = curr_abs_state & MFI_STATE_MASK;
3903 	}
3904 	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3905 
3906 	return 0;
3907 }
3908 
3909 /**
3910  * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
3911  * @instance:				Adapter soft state
3912  */
3913 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3914 {
3915 	int i;
3916 	u16 max_cmd = instance->max_mfi_cmds;
3917 	struct megasas_cmd *cmd;
3918 
3919 	if (!instance->frame_dma_pool)
3920 		return;
3921 
3922 	/*
3923 	 * Return all frames to pool
3924 	 */
3925 	for (i = 0; i < max_cmd; i++) {
3926 
3927 		cmd = instance->cmd_list[i];
3928 
3929 		if (cmd->frame)
3930 			dma_pool_free(instance->frame_dma_pool, cmd->frame,
3931 				      cmd->frame_phys_addr);
3932 
3933 		if (cmd->sense)
3934 			dma_pool_free(instance->sense_dma_pool, cmd->sense,
3935 				      cmd->sense_phys_addr);
3936 	}
3937 
3938 	/*
3939 	 * Now destroy the pool itself
3940 	 */
3941 	dma_pool_destroy(instance->frame_dma_pool);
3942 	dma_pool_destroy(instance->sense_dma_pool);
3943 
3944 	instance->frame_dma_pool = NULL;
3945 	instance->sense_dma_pool = NULL;
3946 }
3947 
3948 /**
3949  * megasas_create_frame_pool -	Creates DMA pool for cmd frames
3950  * @instance:			Adapter soft state
3951  *
3952  * Each command packet has an embedded DMA memory buffer that is used for
3953  * filling MFI frame and the SG list that immediately follows the frame. This
3954  * function creates those DMA memory buffers for each command packet by using
3955  * PCI pool facility.
3956  */
3957 static int megasas_create_frame_pool(struct megasas_instance *instance)
3958 {
3959 	int i;
3960 	u16 max_cmd;
3961 	u32 sge_sz;
3962 	u32 frame_count;
3963 	struct megasas_cmd *cmd;
3964 
3965 	max_cmd = instance->max_mfi_cmds;
3966 
3967 	/*
3968 	 * Size of our frame is 64 bytes for MFI frame, followed by max SG
3969 	 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
3970 	 */
3971 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
3972 	    sizeof(struct megasas_sge32);
3973 
3974 	if (instance->flag_ieee)
3975 		sge_sz = sizeof(struct megasas_sge_skinny);
3976 
3977 	/*
3978 	 * For MFI controllers.
3979 	 * max_num_sge = 60
3980 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
3981 	 * Total 960 byte (15 MFI frame of 64 byte)
3982 	 *
3983 	 * Fusion adapter require only 3 extra frame.
3984 	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
3985 	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
3986 	 * Total 192 byte (3 MFI frame of 64 byte)
3987 	 */
3988 	frame_count = (instance->adapter_type == MFI_SERIES) ?
3989 			(15 + 1) : (3 + 1);
3990 	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
3991 	/*
3992 	 * Use DMA pool facility provided by PCI layer
3993 	 */
3994 	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
3995 					&instance->pdev->dev,
3996 					instance->mfi_frame_size, 256, 0);
3997 
3998 	if (!instance->frame_dma_pool) {
3999 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4000 		return -ENOMEM;
4001 	}
4002 
4003 	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4004 						   &instance->pdev->dev, 128,
4005 						   4, 0);
4006 
4007 	if (!instance->sense_dma_pool) {
4008 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4009 
4010 		dma_pool_destroy(instance->frame_dma_pool);
4011 		instance->frame_dma_pool = NULL;
4012 
4013 		return -ENOMEM;
4014 	}
4015 
4016 	/*
4017 	 * Allocate and attach a frame to each of the commands in cmd_list.
4018 	 * By making cmd->index as the context instead of the &cmd, we can
4019 	 * always use 32bit context regardless of the architecture
4020 	 */
4021 	for (i = 0; i < max_cmd; i++) {
4022 
4023 		cmd = instance->cmd_list[i];
4024 
4025 		cmd->frame = dma_pool_alloc(instance->frame_dma_pool,
4026 					    GFP_KERNEL, &cmd->frame_phys_addr);
4027 
4028 		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4029 					    GFP_KERNEL, &cmd->sense_phys_addr);
4030 
4031 		/*
4032 		 * megasas_teardown_frame_pool() takes care of freeing
4033 		 * whatever has been allocated
4034 		 */
4035 		if (!cmd->frame || !cmd->sense) {
4036 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4037 			megasas_teardown_frame_pool(instance);
4038 			return -ENOMEM;
4039 		}
4040 
4041 		memset(cmd->frame, 0, instance->mfi_frame_size);
4042 		cmd->frame->io.context = cpu_to_le32(cmd->index);
4043 		cmd->frame->io.pad_0 = 0;
4044 		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4045 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4046 	}
4047 
4048 	return 0;
4049 }
4050 
4051 /**
4052  * megasas_free_cmds -	Free all the cmds in the free cmd pool
4053  * @instance:		Adapter soft state
4054  */
4055 void megasas_free_cmds(struct megasas_instance *instance)
4056 {
4057 	int i;
4058 
4059 	/* First free the MFI frame pool */
4060 	megasas_teardown_frame_pool(instance);
4061 
4062 	/* Free all the commands in the cmd_list */
4063 	for (i = 0; i < instance->max_mfi_cmds; i++)
4064 
4065 		kfree(instance->cmd_list[i]);
4066 
4067 	/* Free the cmd_list buffer itself */
4068 	kfree(instance->cmd_list);
4069 	instance->cmd_list = NULL;
4070 
4071 	INIT_LIST_HEAD(&instance->cmd_pool);
4072 }
4073 
4074 /**
4075  * megasas_alloc_cmds -	Allocates the command packets
4076  * @instance:		Adapter soft state
4077  *
4078  * Each command that is issued to the FW, whether IO commands from the OS or
4079  * internal commands like IOCTLs, are wrapped in local data structure called
4080  * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4081  * the FW.
4082  *
4083  * Each frame has a 32-bit field called context (tag). This context is used
4084  * to get back the megasas_cmd from the frame when a frame gets completed in
4085  * the ISR. Typically the address of the megasas_cmd itself would be used as
4086  * the context. But we wanted to keep the differences between 32 and 64 bit
4087  * systems to the mininum. We always use 32 bit integers for the context. In
4088  * this driver, the 32 bit values are the indices into an array cmd_list.
4089  * This array is used only to look up the megasas_cmd given the context. The
4090  * free commands themselves are maintained in a linked list called cmd_pool.
4091  */
4092 int megasas_alloc_cmds(struct megasas_instance *instance)
4093 {
4094 	int i;
4095 	int j;
4096 	u16 max_cmd;
4097 	struct megasas_cmd *cmd;
4098 
4099 	max_cmd = instance->max_mfi_cmds;
4100 
4101 	/*
4102 	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4103 	 * Allocate the dynamic array first and then allocate individual
4104 	 * commands.
4105 	 */
4106 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4107 
4108 	if (!instance->cmd_list) {
4109 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4110 		return -ENOMEM;
4111 	}
4112 
4113 	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4114 
4115 	for (i = 0; i < max_cmd; i++) {
4116 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4117 						GFP_KERNEL);
4118 
4119 		if (!instance->cmd_list[i]) {
4120 
4121 			for (j = 0; j < i; j++)
4122 				kfree(instance->cmd_list[j]);
4123 
4124 			kfree(instance->cmd_list);
4125 			instance->cmd_list = NULL;
4126 
4127 			return -ENOMEM;
4128 		}
4129 	}
4130 
4131 	for (i = 0; i < max_cmd; i++) {
4132 		cmd = instance->cmd_list[i];
4133 		memset(cmd, 0, sizeof(struct megasas_cmd));
4134 		cmd->index = i;
4135 		cmd->scmd = NULL;
4136 		cmd->instance = instance;
4137 
4138 		list_add_tail(&cmd->list, &instance->cmd_pool);
4139 	}
4140 
4141 	/*
4142 	 * Create a frame pool and assign one frame to each cmd
4143 	 */
4144 	if (megasas_create_frame_pool(instance)) {
4145 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4146 		megasas_free_cmds(instance);
4147 	}
4148 
4149 	return 0;
4150 }
4151 
4152 /*
4153  * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4154  * @instance:				Adapter soft state
4155  *
4156  * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4157  * or FW is not under OCR.
4158  */
4159 inline int
4160 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4161 
4162 	if (instance->adapter_type == MFI_SERIES)
4163 		return KILL_ADAPTER;
4164 	else if (instance->unload ||
4165 			test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
4166 		return IGNORE_TIMEOUT;
4167 	else
4168 		return INITIATE_OCR;
4169 }
4170 
4171 static void
4172 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4173 {
4174 	int ret;
4175 	struct megasas_cmd *cmd;
4176 	struct megasas_dcmd_frame *dcmd;
4177 
4178 	struct MR_PRIV_DEVICE *mr_device_priv_data;
4179 	u16 device_id = 0;
4180 
4181 	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4182 	cmd = megasas_get_cmd(instance);
4183 
4184 	if (!cmd) {
4185 		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4186 		return;
4187 	}
4188 
4189 	dcmd = &cmd->frame->dcmd;
4190 
4191 	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4192 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4193 
4194 	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4195 	dcmd->cmd = MFI_CMD_DCMD;
4196 	dcmd->cmd_status = 0xFF;
4197 	dcmd->sge_count = 1;
4198 	dcmd->flags = MFI_FRAME_DIR_READ;
4199 	dcmd->timeout = 0;
4200 	dcmd->pad_0 = 0;
4201 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4202 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4203 
4204 	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4205 				 sizeof(struct MR_PD_INFO));
4206 
4207 	if ((instance->adapter_type != MFI_SERIES) &&
4208 	    !instance->mask_interrupts)
4209 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4210 	else
4211 		ret = megasas_issue_polled(instance, cmd);
4212 
4213 	switch (ret) {
4214 	case DCMD_SUCCESS:
4215 		mr_device_priv_data = sdev->hostdata;
4216 		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4217 		mr_device_priv_data->interface_type =
4218 				instance->pd_info->state.ddf.pdType.intf;
4219 		break;
4220 
4221 	case DCMD_TIMEOUT:
4222 
4223 		switch (dcmd_timeout_ocr_possible(instance)) {
4224 		case INITIATE_OCR:
4225 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4226 			megasas_reset_fusion(instance->host,
4227 				MFI_IO_TIMEOUT_OCR);
4228 			break;
4229 		case KILL_ADAPTER:
4230 			megaraid_sas_kill_hba(instance);
4231 			break;
4232 		case IGNORE_TIMEOUT:
4233 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4234 				__func__, __LINE__);
4235 			break;
4236 		}
4237 
4238 		break;
4239 	}
4240 
4241 	if (ret != DCMD_TIMEOUT)
4242 		megasas_return_cmd(instance, cmd);
4243 
4244 	return;
4245 }
4246 /*
4247  * megasas_get_pd_list_info -	Returns FW's pd_list structure
4248  * @instance:				Adapter soft state
4249  * @pd_list:				pd_list structure
4250  *
4251  * Issues an internal command (DCMD) to get the FW's controller PD
4252  * list structure.  This information is mainly used to find out SYSTEM
4253  * supported by the FW.
4254  */
4255 static int
4256 megasas_get_pd_list(struct megasas_instance *instance)
4257 {
4258 	int ret = 0, pd_index = 0;
4259 	struct megasas_cmd *cmd;
4260 	struct megasas_dcmd_frame *dcmd;
4261 	struct MR_PD_LIST *ci;
4262 	struct MR_PD_ADDRESS *pd_addr;
4263 	dma_addr_t ci_h = 0;
4264 
4265 	if (instance->pd_list_not_supported) {
4266 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4267 		"not supported by firmware\n");
4268 		return ret;
4269 	}
4270 
4271 	ci = instance->pd_list_buf;
4272 	ci_h = instance->pd_list_buf_h;
4273 
4274 	cmd = megasas_get_cmd(instance);
4275 
4276 	if (!cmd) {
4277 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4278 		return -ENOMEM;
4279 	}
4280 
4281 	dcmd = &cmd->frame->dcmd;
4282 
4283 	memset(ci, 0, sizeof(*ci));
4284 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4285 
4286 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4287 	dcmd->mbox.b[1] = 0;
4288 	dcmd->cmd = MFI_CMD_DCMD;
4289 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4290 	dcmd->sge_count = 1;
4291 	dcmd->flags = MFI_FRAME_DIR_READ;
4292 	dcmd->timeout = 0;
4293 	dcmd->pad_0 = 0;
4294 	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4295 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4296 
4297 	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4298 				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4299 
4300 	if ((instance->adapter_type != MFI_SERIES) &&
4301 	    !instance->mask_interrupts)
4302 		ret = megasas_issue_blocked_cmd(instance, cmd,
4303 			MFI_IO_TIMEOUT_SECS);
4304 	else
4305 		ret = megasas_issue_polled(instance, cmd);
4306 
4307 	switch (ret) {
4308 	case DCMD_FAILED:
4309 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4310 			"failed/not supported by firmware\n");
4311 
4312 		if (instance->adapter_type != MFI_SERIES)
4313 			megaraid_sas_kill_hba(instance);
4314 		else
4315 			instance->pd_list_not_supported = 1;
4316 		break;
4317 	case DCMD_TIMEOUT:
4318 
4319 		switch (dcmd_timeout_ocr_possible(instance)) {
4320 		case INITIATE_OCR:
4321 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4322 			/*
4323 			 * DCMD failed from AEN path.
4324 			 * AEN path already hold reset_mutex to avoid PCI access
4325 			 * while OCR is in progress.
4326 			 */
4327 			mutex_unlock(&instance->reset_mutex);
4328 			megasas_reset_fusion(instance->host,
4329 						MFI_IO_TIMEOUT_OCR);
4330 			mutex_lock(&instance->reset_mutex);
4331 			break;
4332 		case KILL_ADAPTER:
4333 			megaraid_sas_kill_hba(instance);
4334 			break;
4335 		case IGNORE_TIMEOUT:
4336 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4337 				__func__, __LINE__);
4338 			break;
4339 		}
4340 
4341 		break;
4342 
4343 	case DCMD_SUCCESS:
4344 		pd_addr = ci->addr;
4345 
4346 		if ((le32_to_cpu(ci->count) >
4347 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4348 			break;
4349 
4350 		memset(instance->local_pd_list, 0,
4351 				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4352 
4353 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4354 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4355 					le16_to_cpu(pd_addr->deviceId);
4356 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4357 					pd_addr->scsiDevType;
4358 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4359 					MR_PD_STATE_SYSTEM;
4360 			pd_addr++;
4361 		}
4362 
4363 		memcpy(instance->pd_list, instance->local_pd_list,
4364 			sizeof(instance->pd_list));
4365 		break;
4366 
4367 	}
4368 
4369 	if (ret != DCMD_TIMEOUT)
4370 		megasas_return_cmd(instance, cmd);
4371 
4372 	return ret;
4373 }
4374 
4375 /*
4376  * megasas_get_ld_list_info -	Returns FW's ld_list structure
4377  * @instance:				Adapter soft state
4378  * @ld_list:				ld_list structure
4379  *
4380  * Issues an internal command (DCMD) to get the FW's controller PD
4381  * list structure.  This information is mainly used to find out SYSTEM
4382  * supported by the FW.
4383  */
4384 static int
4385 megasas_get_ld_list(struct megasas_instance *instance)
4386 {
4387 	int ret = 0, ld_index = 0, ids = 0;
4388 	struct megasas_cmd *cmd;
4389 	struct megasas_dcmd_frame *dcmd;
4390 	struct MR_LD_LIST *ci;
4391 	dma_addr_t ci_h = 0;
4392 	u32 ld_count;
4393 
4394 	ci = instance->ld_list_buf;
4395 	ci_h = instance->ld_list_buf_h;
4396 
4397 	cmd = megasas_get_cmd(instance);
4398 
4399 	if (!cmd) {
4400 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4401 		return -ENOMEM;
4402 	}
4403 
4404 	dcmd = &cmd->frame->dcmd;
4405 
4406 	memset(ci, 0, sizeof(*ci));
4407 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4408 
4409 	if (instance->supportmax256vd)
4410 		dcmd->mbox.b[0] = 1;
4411 	dcmd->cmd = MFI_CMD_DCMD;
4412 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4413 	dcmd->sge_count = 1;
4414 	dcmd->flags = MFI_FRAME_DIR_READ;
4415 	dcmd->timeout = 0;
4416 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4417 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4418 	dcmd->pad_0  = 0;
4419 
4420 	megasas_set_dma_settings(instance, dcmd, ci_h,
4421 				 sizeof(struct MR_LD_LIST));
4422 
4423 	if ((instance->adapter_type != MFI_SERIES) &&
4424 	    !instance->mask_interrupts)
4425 		ret = megasas_issue_blocked_cmd(instance, cmd,
4426 			MFI_IO_TIMEOUT_SECS);
4427 	else
4428 		ret = megasas_issue_polled(instance, cmd);
4429 
4430 	ld_count = le32_to_cpu(ci->ldCount);
4431 
4432 	switch (ret) {
4433 	case DCMD_FAILED:
4434 		megaraid_sas_kill_hba(instance);
4435 		break;
4436 	case DCMD_TIMEOUT:
4437 
4438 		switch (dcmd_timeout_ocr_possible(instance)) {
4439 		case INITIATE_OCR:
4440 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4441 			/*
4442 			 * DCMD failed from AEN path.
4443 			 * AEN path already hold reset_mutex to avoid PCI access
4444 			 * while OCR is in progress.
4445 			 */
4446 			mutex_unlock(&instance->reset_mutex);
4447 			megasas_reset_fusion(instance->host,
4448 						MFI_IO_TIMEOUT_OCR);
4449 			mutex_lock(&instance->reset_mutex);
4450 			break;
4451 		case KILL_ADAPTER:
4452 			megaraid_sas_kill_hba(instance);
4453 			break;
4454 		case IGNORE_TIMEOUT:
4455 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4456 				__func__, __LINE__);
4457 			break;
4458 		}
4459 
4460 		break;
4461 
4462 	case DCMD_SUCCESS:
4463 		if (ld_count > instance->fw_supported_vd_count)
4464 			break;
4465 
4466 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4467 
4468 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4469 			if (ci->ldList[ld_index].state != 0) {
4470 				ids = ci->ldList[ld_index].ref.targetId;
4471 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4472 			}
4473 		}
4474 
4475 		break;
4476 	}
4477 
4478 	if (ret != DCMD_TIMEOUT)
4479 		megasas_return_cmd(instance, cmd);
4480 
4481 	return ret;
4482 }
4483 
4484 /**
4485  * megasas_ld_list_query -	Returns FW's ld_list structure
4486  * @instance:				Adapter soft state
4487  * @ld_list:				ld_list structure
4488  *
4489  * Issues an internal command (DCMD) to get the FW's controller PD
4490  * list structure.  This information is mainly used to find out SYSTEM
4491  * supported by the FW.
4492  */
4493 static int
4494 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4495 {
4496 	int ret = 0, ld_index = 0, ids = 0;
4497 	struct megasas_cmd *cmd;
4498 	struct megasas_dcmd_frame *dcmd;
4499 	struct MR_LD_TARGETID_LIST *ci;
4500 	dma_addr_t ci_h = 0;
4501 	u32 tgtid_count;
4502 
4503 	ci = instance->ld_targetid_list_buf;
4504 	ci_h = instance->ld_targetid_list_buf_h;
4505 
4506 	cmd = megasas_get_cmd(instance);
4507 
4508 	if (!cmd) {
4509 		dev_warn(&instance->pdev->dev,
4510 		         "megasas_ld_list_query: Failed to get cmd\n");
4511 		return -ENOMEM;
4512 	}
4513 
4514 	dcmd = &cmd->frame->dcmd;
4515 
4516 	memset(ci, 0, sizeof(*ci));
4517 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4518 
4519 	dcmd->mbox.b[0] = query_type;
4520 	if (instance->supportmax256vd)
4521 		dcmd->mbox.b[2] = 1;
4522 
4523 	dcmd->cmd = MFI_CMD_DCMD;
4524 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4525 	dcmd->sge_count = 1;
4526 	dcmd->flags = MFI_FRAME_DIR_READ;
4527 	dcmd->timeout = 0;
4528 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4529 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4530 	dcmd->pad_0  = 0;
4531 
4532 	megasas_set_dma_settings(instance, dcmd, ci_h,
4533 				 sizeof(struct MR_LD_TARGETID_LIST));
4534 
4535 	if ((instance->adapter_type != MFI_SERIES) &&
4536 	    !instance->mask_interrupts)
4537 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4538 	else
4539 		ret = megasas_issue_polled(instance, cmd);
4540 
4541 	switch (ret) {
4542 	case DCMD_FAILED:
4543 		dev_info(&instance->pdev->dev,
4544 			"DCMD not supported by firmware - %s %d\n",
4545 				__func__, __LINE__);
4546 		ret = megasas_get_ld_list(instance);
4547 		break;
4548 	case DCMD_TIMEOUT:
4549 		switch (dcmd_timeout_ocr_possible(instance)) {
4550 		case INITIATE_OCR:
4551 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4552 			/*
4553 			 * DCMD failed from AEN path.
4554 			 * AEN path already hold reset_mutex to avoid PCI access
4555 			 * while OCR is in progress.
4556 			 */
4557 			mutex_unlock(&instance->reset_mutex);
4558 			megasas_reset_fusion(instance->host,
4559 						MFI_IO_TIMEOUT_OCR);
4560 			mutex_lock(&instance->reset_mutex);
4561 			break;
4562 		case KILL_ADAPTER:
4563 			megaraid_sas_kill_hba(instance);
4564 			break;
4565 		case IGNORE_TIMEOUT:
4566 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4567 				__func__, __LINE__);
4568 			break;
4569 		}
4570 
4571 		break;
4572 	case DCMD_SUCCESS:
4573 		tgtid_count = le32_to_cpu(ci->count);
4574 
4575 		if ((tgtid_count > (instance->fw_supported_vd_count)))
4576 			break;
4577 
4578 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4579 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4580 			ids = ci->targetId[ld_index];
4581 			instance->ld_ids[ids] = ci->targetId[ld_index];
4582 		}
4583 
4584 		break;
4585 	}
4586 
4587 	if (ret != DCMD_TIMEOUT)
4588 		megasas_return_cmd(instance, cmd);
4589 
4590 	return ret;
4591 }
4592 
4593 /*
4594  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4595  * instance			 : Controller's instance
4596 */
4597 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4598 {
4599 	struct fusion_context *fusion;
4600 	u32 ventura_map_sz = 0;
4601 
4602 	fusion = instance->ctrl_context;
4603 	/* For MFI based controllers return dummy success */
4604 	if (!fusion)
4605 		return;
4606 
4607 	instance->supportmax256vd =
4608 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
4609 	/* Below is additional check to address future FW enhancement */
4610 	if (instance->ctrl_info_buf->max_lds > 64)
4611 		instance->supportmax256vd = 1;
4612 
4613 	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4614 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4615 	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4616 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4617 	if (instance->supportmax256vd) {
4618 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4619 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4620 	} else {
4621 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4622 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4623 	}
4624 
4625 	dev_info(&instance->pdev->dev,
4626 		"firmware type\t: %s\n",
4627 		instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4628 		"Legacy(64 VD) firmware");
4629 
4630 	if (instance->max_raid_mapsize) {
4631 		ventura_map_sz = instance->max_raid_mapsize *
4632 						MR_MIN_MAP_SIZE; /* 64k */
4633 		fusion->current_map_sz = ventura_map_sz;
4634 		fusion->max_map_sz = ventura_map_sz;
4635 	} else {
4636 		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
4637 					(sizeof(struct MR_LD_SPAN_MAP) *
4638 					(instance->fw_supported_vd_count - 1));
4639 		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
4640 
4641 		fusion->max_map_sz =
4642 			max(fusion->old_map_sz, fusion->new_map_sz);
4643 
4644 		if (instance->supportmax256vd)
4645 			fusion->current_map_sz = fusion->new_map_sz;
4646 		else
4647 			fusion->current_map_sz = fusion->old_map_sz;
4648 	}
4649 	/* irrespective of FW raid maps, driver raid map is constant */
4650 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4651 }
4652 
4653 /**
4654  * megasas_get_controller_info -	Returns FW's controller structure
4655  * @instance:				Adapter soft state
4656  *
4657  * Issues an internal command (DCMD) to get the FW's controller structure.
4658  * This information is mainly used to find out the maximum IO transfer per
4659  * command supported by the FW.
4660  */
4661 int
4662 megasas_get_ctrl_info(struct megasas_instance *instance)
4663 {
4664 	int ret = 0;
4665 	struct megasas_cmd *cmd;
4666 	struct megasas_dcmd_frame *dcmd;
4667 	struct megasas_ctrl_info *ci;
4668 	dma_addr_t ci_h = 0;
4669 
4670 	ci = instance->ctrl_info_buf;
4671 	ci_h = instance->ctrl_info_buf_h;
4672 
4673 	cmd = megasas_get_cmd(instance);
4674 
4675 	if (!cmd) {
4676 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4677 		return -ENOMEM;
4678 	}
4679 
4680 	dcmd = &cmd->frame->dcmd;
4681 
4682 	memset(ci, 0, sizeof(*ci));
4683 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4684 
4685 	dcmd->cmd = MFI_CMD_DCMD;
4686 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4687 	dcmd->sge_count = 1;
4688 	dcmd->flags = MFI_FRAME_DIR_READ;
4689 	dcmd->timeout = 0;
4690 	dcmd->pad_0 = 0;
4691 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4692 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4693 	dcmd->mbox.b[0] = 1;
4694 
4695 	megasas_set_dma_settings(instance, dcmd, ci_h,
4696 				 sizeof(struct megasas_ctrl_info));
4697 
4698 	if ((instance->adapter_type != MFI_SERIES) &&
4699 	    !instance->mask_interrupts) {
4700 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4701 	} else {
4702 		ret = megasas_issue_polled(instance, cmd);
4703 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4704 	}
4705 
4706 	switch (ret) {
4707 	case DCMD_SUCCESS:
4708 		/* Save required controller information in
4709 		 * CPU endianness format.
4710 		 */
4711 		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
4712 		le32_to_cpus((u32 *)&ci->adapterOperations2);
4713 		le32_to_cpus((u32 *)&ci->adapterOperations3);
4714 		le16_to_cpus((u16 *)&ci->adapter_operations4);
4715 
4716 		/* Update the latest Ext VD info.
4717 		 * From Init path, store current firmware details.
4718 		 * From OCR path, detect any firmware properties changes.
4719 		 * in case of Firmware upgrade without system reboot.
4720 		 */
4721 		megasas_update_ext_vd_details(instance);
4722 		instance->use_seqnum_jbod_fp =
4723 			ci->adapterOperations3.useSeqNumJbodFP;
4724 		instance->support_morethan256jbod =
4725 			ci->adapter_operations4.support_pd_map_target_id;
4726 		instance->support_nvme_passthru =
4727 			ci->adapter_operations4.support_nvme_passthru;
4728 
4729 		/*Check whether controller is iMR or MR */
4730 		instance->is_imr = (ci->memory_size ? 0 : 1);
4731 		dev_info(&instance->pdev->dev,
4732 			"controller type\t: %s(%dMB)\n",
4733 			instance->is_imr ? "iMR" : "MR",
4734 			le16_to_cpu(ci->memory_size));
4735 
4736 		instance->disableOnlineCtrlReset =
4737 			ci->properties.OnOffProperties.disableOnlineCtrlReset;
4738 		instance->secure_jbod_support =
4739 			ci->adapterOperations3.supportSecurityonJBOD;
4740 		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4741 			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4742 		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4743 			instance->secure_jbod_support ? "Yes" : "No");
4744 		dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
4745 			 instance->support_nvme_passthru ? "Yes" : "No");
4746 		break;
4747 
4748 	case DCMD_TIMEOUT:
4749 		switch (dcmd_timeout_ocr_possible(instance)) {
4750 		case INITIATE_OCR:
4751 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4752 			megasas_reset_fusion(instance->host,
4753 				MFI_IO_TIMEOUT_OCR);
4754 			break;
4755 		case KILL_ADAPTER:
4756 			megaraid_sas_kill_hba(instance);
4757 			break;
4758 		case IGNORE_TIMEOUT:
4759 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4760 				__func__, __LINE__);
4761 			break;
4762 		}
4763 	case DCMD_FAILED:
4764 		megaraid_sas_kill_hba(instance);
4765 		break;
4766 
4767 	}
4768 
4769 	megasas_return_cmd(instance, cmd);
4770 
4771 
4772 	return ret;
4773 }
4774 
4775 /*
4776  * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
4777  *					to firmware
4778  *
4779  * @instance:				Adapter soft state
4780  * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
4781 					MR_CRASH_BUF_TURN_OFF = 0
4782 					MR_CRASH_BUF_TURN_ON = 1
4783  * @return 0 on success non-zero on failure.
4784  * Issues an internal command (DCMD) to set parameters for crash dump feature.
4785  * Driver will send address of crash dump DMA buffer and set mbox to tell FW
4786  * that driver supports crash dump feature. This DCMD will be sent only if
4787  * crash dump feature is supported by the FW.
4788  *
4789  */
4790 int megasas_set_crash_dump_params(struct megasas_instance *instance,
4791 	u8 crash_buf_state)
4792 {
4793 	int ret = 0;
4794 	struct megasas_cmd *cmd;
4795 	struct megasas_dcmd_frame *dcmd;
4796 
4797 	cmd = megasas_get_cmd(instance);
4798 
4799 	if (!cmd) {
4800 		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
4801 		return -ENOMEM;
4802 	}
4803 
4804 
4805 	dcmd = &cmd->frame->dcmd;
4806 
4807 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4808 	dcmd->mbox.b[0] = crash_buf_state;
4809 	dcmd->cmd = MFI_CMD_DCMD;
4810 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4811 	dcmd->sge_count = 1;
4812 	dcmd->flags = MFI_FRAME_DIR_NONE;
4813 	dcmd->timeout = 0;
4814 	dcmd->pad_0 = 0;
4815 	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4816 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4817 
4818 	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
4819 				 CRASH_DMA_BUF_SIZE);
4820 
4821 	if ((instance->adapter_type != MFI_SERIES) &&
4822 	    !instance->mask_interrupts)
4823 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4824 	else
4825 		ret = megasas_issue_polled(instance, cmd);
4826 
4827 	if (ret == DCMD_TIMEOUT) {
4828 		switch (dcmd_timeout_ocr_possible(instance)) {
4829 		case INITIATE_OCR:
4830 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4831 			megasas_reset_fusion(instance->host,
4832 					MFI_IO_TIMEOUT_OCR);
4833 			break;
4834 		case KILL_ADAPTER:
4835 			megaraid_sas_kill_hba(instance);
4836 			break;
4837 		case IGNORE_TIMEOUT:
4838 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4839 				__func__, __LINE__);
4840 			break;
4841 		}
4842 	} else
4843 		megasas_return_cmd(instance, cmd);
4844 
4845 	return ret;
4846 }
4847 
4848 /**
4849  * megasas_issue_init_mfi -	Initializes the FW
4850  * @instance:		Adapter soft state
4851  *
4852  * Issues the INIT MFI cmd
4853  */
4854 static int
4855 megasas_issue_init_mfi(struct megasas_instance *instance)
4856 {
4857 	__le32 context;
4858 	struct megasas_cmd *cmd;
4859 	struct megasas_init_frame *init_frame;
4860 	struct megasas_init_queue_info *initq_info;
4861 	dma_addr_t init_frame_h;
4862 	dma_addr_t initq_info_h;
4863 
4864 	/*
4865 	 * Prepare a init frame. Note the init frame points to queue info
4866 	 * structure. Each frame has SGL allocated after first 64 bytes. For
4867 	 * this frame - since we don't need any SGL - we use SGL's space as
4868 	 * queue info structure
4869 	 *
4870 	 * We will not get a NULL command below. We just created the pool.
4871 	 */
4872 	cmd = megasas_get_cmd(instance);
4873 
4874 	init_frame = (struct megasas_init_frame *)cmd->frame;
4875 	initq_info = (struct megasas_init_queue_info *)
4876 		((unsigned long)init_frame + 64);
4877 
4878 	init_frame_h = cmd->frame_phys_addr;
4879 	initq_info_h = init_frame_h + 64;
4880 
4881 	context = init_frame->context;
4882 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
4883 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
4884 	init_frame->context = context;
4885 
4886 	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
4887 	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
4888 
4889 	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
4890 	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
4891 
4892 	init_frame->cmd = MFI_CMD_INIT;
4893 	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
4894 	init_frame->queue_info_new_phys_addr_lo =
4895 		cpu_to_le32(lower_32_bits(initq_info_h));
4896 	init_frame->queue_info_new_phys_addr_hi =
4897 		cpu_to_le32(upper_32_bits(initq_info_h));
4898 
4899 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
4900 
4901 	/*
4902 	 * disable the intr before firing the init frame to FW
4903 	 */
4904 	instance->instancet->disable_intr(instance);
4905 
4906 	/*
4907 	 * Issue the init frame in polled mode
4908 	 */
4909 
4910 	if (megasas_issue_polled(instance, cmd)) {
4911 		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
4912 		megasas_return_cmd(instance, cmd);
4913 		goto fail_fw_init;
4914 	}
4915 
4916 	megasas_return_cmd(instance, cmd);
4917 
4918 	return 0;
4919 
4920 fail_fw_init:
4921 	return -EINVAL;
4922 }
4923 
4924 static u32
4925 megasas_init_adapter_mfi(struct megasas_instance *instance)
4926 {
4927 	struct megasas_register_set __iomem *reg_set;
4928 	u32 context_sz;
4929 	u32 reply_q_sz;
4930 
4931 	reg_set = instance->reg_set;
4932 
4933 	/*
4934 	 * Get various operational parameters from status register
4935 	 */
4936 	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
4937 	/*
4938 	 * Reduce the max supported cmds by 1. This is to ensure that the
4939 	 * reply_q_sz (1 more than the max cmd that driver may send)
4940 	 * does not exceed max cmds that the FW can support
4941 	 */
4942 	instance->max_fw_cmds = instance->max_fw_cmds-1;
4943 	instance->max_mfi_cmds = instance->max_fw_cmds;
4944 	instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
4945 					0x10;
4946 	/*
4947 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
4948 	 * are reserved for IOCTL + driver's internal DCMDs.
4949 	 */
4950 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4951 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4952 		instance->max_scsi_cmds = (instance->max_fw_cmds -
4953 			MEGASAS_SKINNY_INT_CMDS);
4954 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4955 	} else {
4956 		instance->max_scsi_cmds = (instance->max_fw_cmds -
4957 			MEGASAS_INT_CMDS);
4958 		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
4959 	}
4960 
4961 	instance->cur_can_queue = instance->max_scsi_cmds;
4962 	/*
4963 	 * Create a pool of commands
4964 	 */
4965 	if (megasas_alloc_cmds(instance))
4966 		goto fail_alloc_cmds;
4967 
4968 	/*
4969 	 * Allocate memory for reply queue. Length of reply queue should
4970 	 * be _one_ more than the maximum commands handled by the firmware.
4971 	 *
4972 	 * Note: When FW completes commands, it places corresponding contex
4973 	 * values in this circular reply queue. This circular queue is a fairly
4974 	 * typical producer-consumer queue. FW is the producer (of completed
4975 	 * commands) and the driver is the consumer.
4976 	 */
4977 	context_sz = sizeof(u32);
4978 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
4979 
4980 	instance->reply_queue = pci_alloc_consistent(instance->pdev,
4981 						     reply_q_sz,
4982 						     &instance->reply_queue_h);
4983 
4984 	if (!instance->reply_queue) {
4985 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
4986 		goto fail_reply_queue;
4987 	}
4988 
4989 	if (megasas_issue_init_mfi(instance))
4990 		goto fail_fw_init;
4991 
4992 	if (megasas_get_ctrl_info(instance)) {
4993 		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4994 			"Fail from %s %d\n", instance->unique_id,
4995 			__func__, __LINE__);
4996 		goto fail_fw_init;
4997 	}
4998 
4999 	instance->fw_support_ieee = 0;
5000 	instance->fw_support_ieee =
5001 		(instance->instancet->read_fw_status_reg(reg_set) &
5002 		0x04000000);
5003 
5004 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5005 			instance->fw_support_ieee);
5006 
5007 	if (instance->fw_support_ieee)
5008 		instance->flag_ieee = 1;
5009 
5010 	return 0;
5011 
5012 fail_fw_init:
5013 
5014 	pci_free_consistent(instance->pdev, reply_q_sz,
5015 			    instance->reply_queue, instance->reply_queue_h);
5016 fail_reply_queue:
5017 	megasas_free_cmds(instance);
5018 
5019 fail_alloc_cmds:
5020 	return 1;
5021 }
5022 
5023 /*
5024  * megasas_setup_irqs_ioapic -		register legacy interrupts.
5025  * @instance:				Adapter soft state
5026  *
5027  * Do not enable interrupt, only setup ISRs.
5028  *
5029  * Return 0 on success.
5030  */
5031 static int
5032 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5033 {
5034 	struct pci_dev *pdev;
5035 
5036 	pdev = instance->pdev;
5037 	instance->irq_context[0].instance = instance;
5038 	instance->irq_context[0].MSIxIndex = 0;
5039 	if (request_irq(pci_irq_vector(pdev, 0),
5040 			instance->instancet->service_isr, IRQF_SHARED,
5041 			"megasas", &instance->irq_context[0])) {
5042 		dev_err(&instance->pdev->dev,
5043 				"Failed to register IRQ from %s %d\n",
5044 				__func__, __LINE__);
5045 		return -1;
5046 	}
5047 	return 0;
5048 }
5049 
5050 /**
5051  * megasas_setup_irqs_msix -		register MSI-x interrupts.
5052  * @instance:				Adapter soft state
5053  * @is_probe:				Driver probe check
5054  *
5055  * Do not enable interrupt, only setup ISRs.
5056  *
5057  * Return 0 on success.
5058  */
5059 static int
5060 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5061 {
5062 	int i, j;
5063 	struct pci_dev *pdev;
5064 
5065 	pdev = instance->pdev;
5066 
5067 	/* Try MSI-x */
5068 	for (i = 0; i < instance->msix_vectors; i++) {
5069 		instance->irq_context[i].instance = instance;
5070 		instance->irq_context[i].MSIxIndex = i;
5071 		if (request_irq(pci_irq_vector(pdev, i),
5072 			instance->instancet->service_isr, 0, "megasas",
5073 			&instance->irq_context[i])) {
5074 			dev_err(&instance->pdev->dev,
5075 				"Failed to register IRQ for vector %d.\n", i);
5076 			for (j = 0; j < i; j++)
5077 				free_irq(pci_irq_vector(pdev, j),
5078 					 &instance->irq_context[j]);
5079 			/* Retry irq register for IO_APIC*/
5080 			instance->msix_vectors = 0;
5081 			if (is_probe) {
5082 				pci_free_irq_vectors(instance->pdev);
5083 				return megasas_setup_irqs_ioapic(instance);
5084 			} else {
5085 				return -1;
5086 			}
5087 		}
5088 	}
5089 	return 0;
5090 }
5091 
5092 /*
5093  * megasas_destroy_irqs-		unregister interrupts.
5094  * @instance:				Adapter soft state
5095  * return:				void
5096  */
5097 static void
5098 megasas_destroy_irqs(struct megasas_instance *instance) {
5099 
5100 	int i;
5101 
5102 	if (instance->msix_vectors)
5103 		for (i = 0; i < instance->msix_vectors; i++) {
5104 			free_irq(pci_irq_vector(instance->pdev, i),
5105 				 &instance->irq_context[i]);
5106 		}
5107 	else
5108 		free_irq(pci_irq_vector(instance->pdev, 0),
5109 			 &instance->irq_context[0]);
5110 }
5111 
5112 /**
5113  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5114  * @instance:				Adapter soft state
5115  * @is_probe:				Driver probe check
5116  *
5117  * Return 0 on success.
5118  */
5119 void
5120 megasas_setup_jbod_map(struct megasas_instance *instance)
5121 {
5122 	int i;
5123 	struct fusion_context *fusion = instance->ctrl_context;
5124 	u32 pd_seq_map_sz;
5125 
5126 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5127 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5128 
5129 	if (reset_devices || !fusion ||
5130 		!instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
5131 		dev_info(&instance->pdev->dev,
5132 			"Jbod map is not supported %s %d\n",
5133 			__func__, __LINE__);
5134 		instance->use_seqnum_jbod_fp = false;
5135 		return;
5136 	}
5137 
5138 	if (fusion->pd_seq_sync[0])
5139 		goto skip_alloc;
5140 
5141 	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5142 		fusion->pd_seq_sync[i] = dma_alloc_coherent
5143 			(&instance->pdev->dev, pd_seq_map_sz,
5144 			&fusion->pd_seq_phys[i], GFP_KERNEL);
5145 		if (!fusion->pd_seq_sync[i]) {
5146 			dev_err(&instance->pdev->dev,
5147 				"Failed to allocate memory from %s %d\n",
5148 				__func__, __LINE__);
5149 			if (i == 1) {
5150 				dma_free_coherent(&instance->pdev->dev,
5151 					pd_seq_map_sz, fusion->pd_seq_sync[0],
5152 					fusion->pd_seq_phys[0]);
5153 				fusion->pd_seq_sync[0] = NULL;
5154 			}
5155 			instance->use_seqnum_jbod_fp = false;
5156 			return;
5157 		}
5158 	}
5159 
5160 skip_alloc:
5161 	if (!megasas_sync_pd_seq_num(instance, false) &&
5162 		!megasas_sync_pd_seq_num(instance, true))
5163 		instance->use_seqnum_jbod_fp = true;
5164 	else
5165 		instance->use_seqnum_jbod_fp = false;
5166 }
5167 
5168 /**
5169  * megasas_init_fw -	Initializes the FW
5170  * @instance:		Adapter soft state
5171  *
5172  * This is the main function for initializing firmware
5173  */
5174 
5175 static int megasas_init_fw(struct megasas_instance *instance)
5176 {
5177 	u32 max_sectors_1;
5178 	u32 max_sectors_2, tmp_sectors, msix_enable;
5179 	u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
5180 	resource_size_t base_addr;
5181 	struct megasas_register_set __iomem *reg_set;
5182 	struct megasas_ctrl_info *ctrl_info = NULL;
5183 	unsigned long bar_list;
5184 	int i, j, loop, fw_msix_count = 0;
5185 	struct IOV_111 *iovPtr;
5186 	struct fusion_context *fusion;
5187 
5188 	fusion = instance->ctrl_context;
5189 
5190 	/* Find first memory bar */
5191 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5192 	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5193 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5194 					 "megasas: LSI")) {
5195 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5196 		return -EBUSY;
5197 	}
5198 
5199 	base_addr = pci_resource_start(instance->pdev, instance->bar);
5200 	instance->reg_set = ioremap_nocache(base_addr, 8192);
5201 
5202 	if (!instance->reg_set) {
5203 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5204 		goto fail_ioremap;
5205 	}
5206 
5207 	reg_set = instance->reg_set;
5208 
5209 	if (instance->adapter_type != MFI_SERIES)
5210 		instance->instancet = &megasas_instance_template_fusion;
5211 	else {
5212 		switch (instance->pdev->device) {
5213 		case PCI_DEVICE_ID_LSI_SAS1078R:
5214 		case PCI_DEVICE_ID_LSI_SAS1078DE:
5215 			instance->instancet = &megasas_instance_template_ppc;
5216 			break;
5217 		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5218 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5219 			instance->instancet = &megasas_instance_template_gen2;
5220 			break;
5221 		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5222 		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5223 			instance->instancet = &megasas_instance_template_skinny;
5224 			break;
5225 		case PCI_DEVICE_ID_LSI_SAS1064R:
5226 		case PCI_DEVICE_ID_DELL_PERC5:
5227 		default:
5228 			instance->instancet = &megasas_instance_template_xscale;
5229 			instance->pd_list_not_supported = 1;
5230 			break;
5231 		}
5232 	}
5233 
5234 	if (megasas_transition_to_ready(instance, 0)) {
5235 		atomic_set(&instance->fw_reset_no_pci_access, 1);
5236 		instance->instancet->adp_reset
5237 			(instance, instance->reg_set);
5238 		atomic_set(&instance->fw_reset_no_pci_access, 0);
5239 		dev_info(&instance->pdev->dev,
5240 			"FW restarted successfully from %s!\n",
5241 			__func__);
5242 
5243 		/*waitting for about 30 second before retry*/
5244 		ssleep(30);
5245 
5246 		if (megasas_transition_to_ready(instance, 0))
5247 			goto fail_ready_state;
5248 	}
5249 
5250 	megasas_init_ctrl_params(instance);
5251 
5252 	if (megasas_set_dma_mask(instance))
5253 		goto fail_ready_state;
5254 
5255 	if (megasas_alloc_ctrl_mem(instance))
5256 		goto fail_alloc_dma_buf;
5257 
5258 	if (megasas_alloc_ctrl_dma_buffers(instance))
5259 		goto fail_alloc_dma_buf;
5260 
5261 	fusion = instance->ctrl_context;
5262 
5263 	if (instance->adapter_type == VENTURA_SERIES) {
5264 		scratch_pad_3 =
5265 			readl(&instance->reg_set->outbound_scratch_pad_3);
5266 		instance->max_raid_mapsize = ((scratch_pad_3 >>
5267 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5268 			MR_MAX_RAID_MAP_SIZE_MASK);
5269 	}
5270 
5271 	/* Check if MSI-X is supported while in ready state */
5272 	msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
5273 		       0x4000000) >> 0x1a;
5274 	if (msix_enable && !msix_disable) {
5275 		int irq_flags = PCI_IRQ_MSIX;
5276 
5277 		scratch_pad_2 = readl
5278 			(&instance->reg_set->outbound_scratch_pad_2);
5279 		/* Check max MSI-X vectors */
5280 		if (fusion) {
5281 			if (instance->adapter_type == THUNDERBOLT_SERIES) {
5282 				/* Thunderbolt Series*/
5283 				instance->msix_vectors = (scratch_pad_2
5284 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5285 				fw_msix_count = instance->msix_vectors;
5286 			} else { /* Invader series supports more than 8 MSI-x vectors*/
5287 				instance->msix_vectors = ((scratch_pad_2
5288 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5289 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5290 				if (instance->msix_vectors > 16)
5291 					instance->msix_combined = true;
5292 
5293 				if (rdpq_enable)
5294 					instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5295 								1 : 0;
5296 				fw_msix_count = instance->msix_vectors;
5297 				/* Save 1-15 reply post index address to local memory
5298 				 * Index 0 is already saved from reg offset
5299 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5300 				 */
5301 				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5302 					instance->reply_post_host_index_addr[loop] =
5303 						(u32 __iomem *)
5304 						((u8 __iomem *)instance->reg_set +
5305 						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5306 						+ (loop * 0x10));
5307 				}
5308 			}
5309 			if (msix_vectors)
5310 				instance->msix_vectors = min(msix_vectors,
5311 					instance->msix_vectors);
5312 		} else /* MFI adapters */
5313 			instance->msix_vectors = 1;
5314 		/* Don't bother allocating more MSI-X vectors than cpus */
5315 		instance->msix_vectors = min(instance->msix_vectors,
5316 					     (unsigned int)num_online_cpus());
5317 		if (smp_affinity_enable)
5318 			irq_flags |= PCI_IRQ_AFFINITY;
5319 		i = pci_alloc_irq_vectors(instance->pdev, 1,
5320 					  instance->msix_vectors, irq_flags);
5321 		if (i > 0)
5322 			instance->msix_vectors = i;
5323 		else
5324 			instance->msix_vectors = 0;
5325 	}
5326 	/*
5327 	 * MSI-X host index 0 is common for all adapter.
5328 	 * It is used for all MPT based Adapters.
5329 	 */
5330 	if (instance->msix_combined) {
5331 		instance->reply_post_host_index_addr[0] =
5332 				(u32 *)((u8 *)instance->reg_set +
5333 				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
5334 	} else {
5335 		instance->reply_post_host_index_addr[0] =
5336 			(u32 *)((u8 *)instance->reg_set +
5337 			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5338 	}
5339 
5340 	if (!instance->msix_vectors) {
5341 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5342 		if (i < 0)
5343 			goto fail_setup_irqs;
5344 	}
5345 
5346 	dev_info(&instance->pdev->dev,
5347 		"firmware supports msix\t: (%d)", fw_msix_count);
5348 	dev_info(&instance->pdev->dev,
5349 		"current msix/online cpus\t: (%d/%d)\n",
5350 		instance->msix_vectors, (unsigned int)num_online_cpus());
5351 	dev_info(&instance->pdev->dev,
5352 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
5353 
5354 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5355 		(unsigned long)instance);
5356 
5357 	/*
5358 	 * Below are default value for legacy Firmware.
5359 	 * non-fusion based controllers
5360 	 */
5361 	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5362 	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5363 	/* Get operational params, sge flags, send init cmd to controller */
5364 	if (instance->instancet->init_adapter(instance))
5365 		goto fail_init_adapter;
5366 
5367 	if (instance->adapter_type == VENTURA_SERIES) {
5368 		scratch_pad_4 =
5369 			readl(&instance->reg_set->outbound_scratch_pad_4);
5370 		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
5371 			MR_DEFAULT_NVME_PAGE_SHIFT)
5372 			instance->nvme_page_size =
5373 				(1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
5374 
5375 		dev_info(&instance->pdev->dev,
5376 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
5377 	}
5378 
5379 	if (instance->msix_vectors ?
5380 		megasas_setup_irqs_msix(instance, 1) :
5381 		megasas_setup_irqs_ioapic(instance))
5382 		goto fail_init_adapter;
5383 
5384 	instance->instancet->enable_intr(instance);
5385 
5386 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
5387 
5388 	megasas_setup_jbod_map(instance);
5389 
5390 	/** for passthrough
5391 	 * the following function will get the PD LIST.
5392 	 */
5393 	memset(instance->pd_list, 0,
5394 		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5395 	if (megasas_get_pd_list(instance) < 0) {
5396 		dev_err(&instance->pdev->dev, "failed to get PD list\n");
5397 		goto fail_get_ld_pd_list;
5398 	}
5399 
5400 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5401 
5402 	/* stream detection initialization */
5403 	if (instance->adapter_type == VENTURA_SERIES) {
5404 		fusion->stream_detect_by_ld =
5405 			kzalloc(sizeof(struct LD_STREAM_DETECT *)
5406 			* MAX_LOGICAL_DRIVES_EXT,
5407 			GFP_KERNEL);
5408 		if (!fusion->stream_detect_by_ld) {
5409 			dev_err(&instance->pdev->dev,
5410 				"unable to allocate stream detection for pool of LDs\n");
5411 			goto fail_get_ld_pd_list;
5412 		}
5413 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
5414 			fusion->stream_detect_by_ld[i] =
5415 				kzalloc(sizeof(struct LD_STREAM_DETECT),
5416 				GFP_KERNEL);
5417 			if (!fusion->stream_detect_by_ld[i]) {
5418 				dev_err(&instance->pdev->dev,
5419 					"unable to allocate stream detect by LD\n ");
5420 				for (j = 0; j < i; ++j)
5421 					kfree(fusion->stream_detect_by_ld[j]);
5422 				kfree(fusion->stream_detect_by_ld);
5423 				fusion->stream_detect_by_ld = NULL;
5424 				goto fail_get_ld_pd_list;
5425 			}
5426 			fusion->stream_detect_by_ld[i]->mru_bit_map
5427 				= MR_STREAM_BITMAP;
5428 		}
5429 	}
5430 
5431 	if (megasas_ld_list_query(instance,
5432 				  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5433 		goto fail_get_ld_pd_list;
5434 
5435 	/*
5436 	 * Compute the max allowed sectors per IO: The controller info has two
5437 	 * limits on max sectors. Driver should use the minimum of these two.
5438 	 *
5439 	 * 1 << stripe_sz_ops.min = max sectors per strip
5440 	 *
5441 	 * Note that older firmwares ( < FW ver 30) didn't report information
5442 	 * to calculate max_sectors_1. So the number ended up as zero always.
5443 	 */
5444 	tmp_sectors = 0;
5445 	ctrl_info = instance->ctrl_info_buf;
5446 
5447 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5448 		le16_to_cpu(ctrl_info->max_strips_per_io);
5449 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
5450 
5451 	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
5452 
5453 	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
5454 	instance->passive = ctrl_info->cluster.passive;
5455 	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
5456 	instance->UnevenSpanSupport =
5457 		ctrl_info->adapterOperations2.supportUnevenSpans;
5458 	if (instance->UnevenSpanSupport) {
5459 		struct fusion_context *fusion = instance->ctrl_context;
5460 		if (MR_ValidateMapInfo(instance, instance->map_id))
5461 			fusion->fast_path_io = 1;
5462 		else
5463 			fusion->fast_path_io = 0;
5464 
5465 	}
5466 	if (ctrl_info->host_interface.SRIOV) {
5467 		instance->requestorId = ctrl_info->iov.requestorId;
5468 		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
5469 			if (!ctrl_info->adapterOperations2.activePassive)
5470 			    instance->PlasmaFW111 = 1;
5471 
5472 			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
5473 			    instance->PlasmaFW111 ? "1.11" : "new");
5474 
5475 			if (instance->PlasmaFW111) {
5476 			    iovPtr = (struct IOV_111 *)
5477 				((unsigned char *)ctrl_info + IOV_111_OFFSET);
5478 			    instance->requestorId = iovPtr->requestorId;
5479 			}
5480 		}
5481 		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
5482 			instance->requestorId);
5483 	}
5484 
5485 	instance->crash_dump_fw_support =
5486 		ctrl_info->adapterOperations3.supportCrashDump;
5487 	instance->crash_dump_drv_support =
5488 		(instance->crash_dump_fw_support &&
5489 		instance->crash_dump_buf);
5490 	if (instance->crash_dump_drv_support)
5491 		megasas_set_crash_dump_params(instance,
5492 			MR_CRASH_BUF_TURN_OFF);
5493 
5494 	else {
5495 		if (instance->crash_dump_buf)
5496 			pci_free_consistent(instance->pdev,
5497 				CRASH_DMA_BUF_SIZE,
5498 				instance->crash_dump_buf,
5499 				instance->crash_dump_h);
5500 		instance->crash_dump_buf = NULL;
5501 	}
5502 
5503 
5504 	dev_info(&instance->pdev->dev,
5505 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
5506 		le16_to_cpu(ctrl_info->pci.vendor_id),
5507 		le16_to_cpu(ctrl_info->pci.device_id),
5508 		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
5509 		le16_to_cpu(ctrl_info->pci.sub_device_id));
5510 	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
5511 		instance->UnevenSpanSupport ? "yes" : "no");
5512 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
5513 		instance->crash_dump_drv_support ? "yes" : "no");
5514 	dev_info(&instance->pdev->dev, "jbod sync map		: %s\n",
5515 		instance->use_seqnum_jbod_fp ? "yes" : "no");
5516 
5517 
5518 	instance->max_sectors_per_req = instance->max_num_sge *
5519 						SGE_BUFFER_SIZE / 512;
5520 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
5521 		instance->max_sectors_per_req = tmp_sectors;
5522 
5523 	/* Check for valid throttlequeuedepth module parameter */
5524 	if (throttlequeuedepth &&
5525 			throttlequeuedepth <= instance->max_scsi_cmds)
5526 		instance->throttlequeuedepth = throttlequeuedepth;
5527 	else
5528 		instance->throttlequeuedepth =
5529 				MEGASAS_THROTTLE_QUEUE_DEPTH;
5530 
5531 	if ((resetwaittime < 1) ||
5532 	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
5533 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
5534 
5535 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
5536 		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
5537 
5538 	/* Launch SR-IOV heartbeat timer */
5539 	if (instance->requestorId) {
5540 		if (!megasas_sriov_start_heartbeat(instance, 1))
5541 			megasas_start_timer(instance);
5542 		else
5543 			instance->skip_heartbeat_timer_del = 1;
5544 	}
5545 
5546 	return 0;
5547 
5548 fail_get_ld_pd_list:
5549 	instance->instancet->disable_intr(instance);
5550 fail_init_adapter:
5551 	megasas_destroy_irqs(instance);
5552 fail_setup_irqs:
5553 	if (instance->msix_vectors)
5554 		pci_free_irq_vectors(instance->pdev);
5555 	instance->msix_vectors = 0;
5556 fail_alloc_dma_buf:
5557 	megasas_free_ctrl_dma_buffers(instance);
5558 	megasas_free_ctrl_mem(instance);
5559 fail_ready_state:
5560 	iounmap(instance->reg_set);
5561 
5562 fail_ioremap:
5563 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5564 
5565 	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5566 		__func__, __LINE__);
5567 	return -EINVAL;
5568 }
5569 
5570 /**
5571  * megasas_release_mfi -	Reverses the FW initialization
5572  * @instance:			Adapter soft state
5573  */
5574 static void megasas_release_mfi(struct megasas_instance *instance)
5575 {
5576 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5577 
5578 	if (instance->reply_queue)
5579 		pci_free_consistent(instance->pdev, reply_q_sz,
5580 			    instance->reply_queue, instance->reply_queue_h);
5581 
5582 	megasas_free_cmds(instance);
5583 
5584 	iounmap(instance->reg_set);
5585 
5586 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5587 }
5588 
5589 /**
5590  * megasas_get_seq_num -	Gets latest event sequence numbers
5591  * @instance:			Adapter soft state
5592  * @eli:			FW event log sequence numbers information
5593  *
5594  * FW maintains a log of all events in a non-volatile area. Upper layers would
5595  * usually find out the latest sequence number of the events, the seq number at
5596  * the boot etc. They would "read" all the events below the latest seq number
5597  * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
5598  * number), they would subsribe to AEN (asynchronous event notification) and
5599  * wait for the events to happen.
5600  */
5601 static int
5602 megasas_get_seq_num(struct megasas_instance *instance,
5603 		    struct megasas_evt_log_info *eli)
5604 {
5605 	struct megasas_cmd *cmd;
5606 	struct megasas_dcmd_frame *dcmd;
5607 	struct megasas_evt_log_info *el_info;
5608 	dma_addr_t el_info_h = 0;
5609 	int ret;
5610 
5611 	cmd = megasas_get_cmd(instance);
5612 
5613 	if (!cmd) {
5614 		return -ENOMEM;
5615 	}
5616 
5617 	dcmd = &cmd->frame->dcmd;
5618 	el_info = pci_alloc_consistent(instance->pdev,
5619 				       sizeof(struct megasas_evt_log_info),
5620 				       &el_info_h);
5621 
5622 	if (!el_info) {
5623 		megasas_return_cmd(instance, cmd);
5624 		return -ENOMEM;
5625 	}
5626 
5627 	memset(el_info, 0, sizeof(*el_info));
5628 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5629 
5630 	dcmd->cmd = MFI_CMD_DCMD;
5631 	dcmd->cmd_status = 0x0;
5632 	dcmd->sge_count = 1;
5633 	dcmd->flags = MFI_FRAME_DIR_READ;
5634 	dcmd->timeout = 0;
5635 	dcmd->pad_0 = 0;
5636 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5637 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5638 
5639 	megasas_set_dma_settings(instance, dcmd, el_info_h,
5640 				 sizeof(struct megasas_evt_log_info));
5641 
5642 	ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5643 	if (ret != DCMD_SUCCESS) {
5644 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5645 			__func__, __LINE__);
5646 		goto dcmd_failed;
5647 	}
5648 
5649 	/*
5650 	 * Copy the data back into callers buffer
5651 	 */
5652 	eli->newest_seq_num = el_info->newest_seq_num;
5653 	eli->oldest_seq_num = el_info->oldest_seq_num;
5654 	eli->clear_seq_num = el_info->clear_seq_num;
5655 	eli->shutdown_seq_num = el_info->shutdown_seq_num;
5656 	eli->boot_seq_num = el_info->boot_seq_num;
5657 
5658 dcmd_failed:
5659 	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
5660 			    el_info, el_info_h);
5661 
5662 	megasas_return_cmd(instance, cmd);
5663 
5664 	return ret;
5665 }
5666 
5667 /**
5668  * megasas_register_aen -	Registers for asynchronous event notification
5669  * @instance:			Adapter soft state
5670  * @seq_num:			The starting sequence number
5671  * @class_locale:		Class of the event
5672  *
5673  * This function subscribes for AEN for events beyond the @seq_num. It requests
5674  * to be notified if and only if the event is of type @class_locale
5675  */
5676 static int
5677 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5678 		     u32 class_locale_word)
5679 {
5680 	int ret_val;
5681 	struct megasas_cmd *cmd;
5682 	struct megasas_dcmd_frame *dcmd;
5683 	union megasas_evt_class_locale curr_aen;
5684 	union megasas_evt_class_locale prev_aen;
5685 
5686 	/*
5687 	 * If there an AEN pending already (aen_cmd), check if the
5688 	 * class_locale of that pending AEN is inclusive of the new
5689 	 * AEN request we currently have. If it is, then we don't have
5690 	 * to do anything. In other words, whichever events the current
5691 	 * AEN request is subscribing to, have already been subscribed
5692 	 * to.
5693 	 *
5694 	 * If the old_cmd is _not_ inclusive, then we have to abort
5695 	 * that command, form a class_locale that is superset of both
5696 	 * old and current and re-issue to the FW
5697 	 */
5698 
5699 	curr_aen.word = class_locale_word;
5700 
5701 	if (instance->aen_cmd) {
5702 
5703 		prev_aen.word =
5704 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
5705 
5706 		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
5707 		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
5708 			dev_info(&instance->pdev->dev,
5709 				 "%s %d out of range class %d send by application\n",
5710 				 __func__, __LINE__, curr_aen.members.class);
5711 			return 0;
5712 		}
5713 
5714 		/*
5715 		 * A class whose enum value is smaller is inclusive of all
5716 		 * higher values. If a PROGRESS (= -1) was previously
5717 		 * registered, then a new registration requests for higher
5718 		 * classes need not be sent to FW. They are automatically
5719 		 * included.
5720 		 *
5721 		 * Locale numbers don't have such hierarchy. They are bitmap
5722 		 * values
5723 		 */
5724 		if ((prev_aen.members.class <= curr_aen.members.class) &&
5725 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
5726 		      curr_aen.members.locale)) {
5727 			/*
5728 			 * Previously issued event registration includes
5729 			 * current request. Nothing to do.
5730 			 */
5731 			return 0;
5732 		} else {
5733 			curr_aen.members.locale |= prev_aen.members.locale;
5734 
5735 			if (prev_aen.members.class < curr_aen.members.class)
5736 				curr_aen.members.class = prev_aen.members.class;
5737 
5738 			instance->aen_cmd->abort_aen = 1;
5739 			ret_val = megasas_issue_blocked_abort_cmd(instance,
5740 								  instance->
5741 								  aen_cmd, 30);
5742 
5743 			if (ret_val) {
5744 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
5745 				       "previous AEN command\n");
5746 				return ret_val;
5747 			}
5748 		}
5749 	}
5750 
5751 	cmd = megasas_get_cmd(instance);
5752 
5753 	if (!cmd)
5754 		return -ENOMEM;
5755 
5756 	dcmd = &cmd->frame->dcmd;
5757 
5758 	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
5759 
5760 	/*
5761 	 * Prepare DCMD for aen registration
5762 	 */
5763 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5764 
5765 	dcmd->cmd = MFI_CMD_DCMD;
5766 	dcmd->cmd_status = 0x0;
5767 	dcmd->sge_count = 1;
5768 	dcmd->flags = MFI_FRAME_DIR_READ;
5769 	dcmd->timeout = 0;
5770 	dcmd->pad_0 = 0;
5771 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
5772 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
5773 	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
5774 	instance->last_seq_num = seq_num;
5775 	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
5776 
5777 	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
5778 				 sizeof(struct megasas_evt_detail));
5779 
5780 	if (instance->aen_cmd != NULL) {
5781 		megasas_return_cmd(instance, cmd);
5782 		return 0;
5783 	}
5784 
5785 	/*
5786 	 * Store reference to the cmd used to register for AEN. When an
5787 	 * application wants us to register for AEN, we have to abort this
5788 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
5789 	 */
5790 	instance->aen_cmd = cmd;
5791 
5792 	/*
5793 	 * Issue the aen registration frame
5794 	 */
5795 	instance->instancet->issue_dcmd(instance, cmd);
5796 
5797 	return 0;
5798 }
5799 
5800 /* megasas_get_target_prop - Send DCMD with below details to firmware.
5801  *
5802  * This DCMD will fetch few properties of LD/system PD defined
5803  * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
5804  *
5805  * DCMD send by drivers whenever new target is added to the OS.
5806  *
5807  * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
5808  * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
5809  *                       0 = system PD, 1 = LD.
5810  * dcmd.mbox.s[1]      - TargetID for LD/system PD.
5811  * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
5812  *
5813  * @instance:		Adapter soft state
5814  * @sdev:		OS provided scsi device
5815  *
5816  * Returns 0 on success non-zero on failure.
5817  */
5818 static int
5819 megasas_get_target_prop(struct megasas_instance *instance,
5820 			struct scsi_device *sdev)
5821 {
5822 	int ret;
5823 	struct megasas_cmd *cmd;
5824 	struct megasas_dcmd_frame *dcmd;
5825 	u16 targetId = (sdev->channel % 2) + sdev->id;
5826 
5827 	cmd = megasas_get_cmd(instance);
5828 
5829 	if (!cmd) {
5830 		dev_err(&instance->pdev->dev,
5831 			"Failed to get cmd %s\n", __func__);
5832 		return -ENOMEM;
5833 	}
5834 
5835 	dcmd = &cmd->frame->dcmd;
5836 
5837 	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
5838 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5839 	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
5840 
5841 	dcmd->mbox.s[1] = cpu_to_le16(targetId);
5842 	dcmd->cmd = MFI_CMD_DCMD;
5843 	dcmd->cmd_status = 0xFF;
5844 	dcmd->sge_count = 1;
5845 	dcmd->flags = MFI_FRAME_DIR_READ;
5846 	dcmd->timeout = 0;
5847 	dcmd->pad_0 = 0;
5848 	dcmd->data_xfer_len =
5849 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5850 	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
5851 
5852 	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
5853 				 sizeof(struct MR_TARGET_PROPERTIES));
5854 
5855 	if ((instance->adapter_type != MFI_SERIES) &&
5856 	    !instance->mask_interrupts)
5857 		ret = megasas_issue_blocked_cmd(instance,
5858 						cmd, MFI_IO_TIMEOUT_SECS);
5859 	else
5860 		ret = megasas_issue_polled(instance, cmd);
5861 
5862 	switch (ret) {
5863 	case DCMD_TIMEOUT:
5864 		switch (dcmd_timeout_ocr_possible(instance)) {
5865 		case INITIATE_OCR:
5866 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5867 			megasas_reset_fusion(instance->host,
5868 					     MFI_IO_TIMEOUT_OCR);
5869 			break;
5870 		case KILL_ADAPTER:
5871 			megaraid_sas_kill_hba(instance);
5872 			break;
5873 		case IGNORE_TIMEOUT:
5874 			dev_info(&instance->pdev->dev,
5875 				 "Ignore DCMD timeout: %s %d\n",
5876 				 __func__, __LINE__);
5877 			break;
5878 		}
5879 		break;
5880 
5881 	default:
5882 		megasas_return_cmd(instance, cmd);
5883 	}
5884 	if (ret != DCMD_SUCCESS)
5885 		dev_err(&instance->pdev->dev,
5886 			"return from %s %d return value %d\n",
5887 			__func__, __LINE__, ret);
5888 
5889 	return ret;
5890 }
5891 
5892 /**
5893  * megasas_start_aen -	Subscribes to AEN during driver load time
5894  * @instance:		Adapter soft state
5895  */
5896 static int megasas_start_aen(struct megasas_instance *instance)
5897 {
5898 	struct megasas_evt_log_info eli;
5899 	union megasas_evt_class_locale class_locale;
5900 
5901 	/*
5902 	 * Get the latest sequence number from FW
5903 	 */
5904 	memset(&eli, 0, sizeof(eli));
5905 
5906 	if (megasas_get_seq_num(instance, &eli))
5907 		return -1;
5908 
5909 	/*
5910 	 * Register AEN with FW for latest sequence number plus 1
5911 	 */
5912 	class_locale.members.reserved = 0;
5913 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
5914 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
5915 
5916 	return megasas_register_aen(instance,
5917 			le32_to_cpu(eli.newest_seq_num) + 1,
5918 			class_locale.word);
5919 }
5920 
5921 /**
5922  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
5923  * @instance:		Adapter soft state
5924  */
5925 static int megasas_io_attach(struct megasas_instance *instance)
5926 {
5927 	struct Scsi_Host *host = instance->host;
5928 
5929 	/*
5930 	 * Export parameters required by SCSI mid-layer
5931 	 */
5932 	host->unique_id = instance->unique_id;
5933 	host->can_queue = instance->max_scsi_cmds;
5934 	host->this_id = instance->init_id;
5935 	host->sg_tablesize = instance->max_num_sge;
5936 
5937 	if (instance->fw_support_ieee)
5938 		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
5939 
5940 	/*
5941 	 * Check if the module parameter value for max_sectors can be used
5942 	 */
5943 	if (max_sectors && max_sectors < instance->max_sectors_per_req)
5944 		instance->max_sectors_per_req = max_sectors;
5945 	else {
5946 		if (max_sectors) {
5947 			if (((instance->pdev->device ==
5948 				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
5949 				(instance->pdev->device ==
5950 				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
5951 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
5952 				instance->max_sectors_per_req = max_sectors;
5953 			} else {
5954 			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
5955 				"and <= %d (or < 1MB for GEN2 controller)\n",
5956 				instance->max_sectors_per_req);
5957 			}
5958 		}
5959 	}
5960 
5961 	host->max_sectors = instance->max_sectors_per_req;
5962 	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
5963 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
5964 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
5965 	host->max_lun = MEGASAS_MAX_LUN;
5966 	host->max_cmd_len = 16;
5967 
5968 	/*
5969 	 * Notify the mid-layer about the new controller
5970 	 */
5971 	if (scsi_add_host(host, &instance->pdev->dev)) {
5972 		dev_err(&instance->pdev->dev,
5973 			"Failed to add host from %s %d\n",
5974 			__func__, __LINE__);
5975 		return -ENODEV;
5976 	}
5977 
5978 	return 0;
5979 }
5980 
5981 /**
5982  * megasas_set_dma_mask -	Set DMA mask for supported controllers
5983  *
5984  * @instance:		Adapter soft state
5985  * Description:
5986  *
5987  * For Ventura, driver/FW will operate in 64bit DMA addresses.
5988  *
5989  * For invader-
5990  *	By default, driver/FW will operate in 32bit DMA addresses
5991  *	for consistent DMA mapping but if 32 bit consistent
5992  *	DMA mask fails, driver will try with 64 bit consistent
5993  *	mask provided FW is true 64bit DMA capable
5994  *
5995  * For older controllers(Thunderbolt and MFI based adapters)-
5996  *	driver/FW will operate in 32 bit consistent DMA addresses.
5997  */
5998 static int
5999 megasas_set_dma_mask(struct megasas_instance *instance)
6000 {
6001 	u64 consistent_mask;
6002 	struct pci_dev *pdev;
6003 	u32 scratch_pad_2;
6004 
6005 	pdev = instance->pdev;
6006 	consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
6007 				DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
6008 
6009 	if (IS_DMA64) {
6010 		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6011 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6012 			goto fail_set_dma_mask;
6013 
6014 		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
6015 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6016 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6017 			/*
6018 			 * If 32 bit DMA mask fails, then try for 64 bit mask
6019 			 * for FW capable of handling 64 bit DMA.
6020 			 */
6021 			scratch_pad_2 = readl
6022 				(&instance->reg_set->outbound_scratch_pad_2);
6023 
6024 			if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6025 				goto fail_set_dma_mask;
6026 			else if (dma_set_mask_and_coherent(&pdev->dev,
6027 							   DMA_BIT_MASK(64)))
6028 				goto fail_set_dma_mask;
6029 		}
6030 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6031 		goto fail_set_dma_mask;
6032 
6033 	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6034 		instance->consistent_mask_64bit = false;
6035 	else
6036 		instance->consistent_mask_64bit = true;
6037 
6038 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6039 		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
6040 		 (instance->consistent_mask_64bit ? "64" : "32"));
6041 
6042 	return 0;
6043 
6044 fail_set_dma_mask:
6045 	dev_err(&pdev->dev, "Failed to set DMA mask\n");
6046 	return -1;
6047 
6048 }
6049 
6050 /*
6051  * megasas_set_adapter_type -	Set adapter type.
6052  *				Supported controllers can be divided in
6053  *				4 categories-  enum MR_ADAPTER_TYPE {
6054  *							MFI_SERIES = 1,
6055  *							THUNDERBOLT_SERIES = 2,
6056  *							INVADER_SERIES = 3,
6057  *							VENTURA_SERIES = 4,
6058  *						};
6059  * @instance:			Adapter soft state
6060  * return:			void
6061  */
6062 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6063 {
6064 	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6065 	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6066 		instance->adapter_type = MFI_SERIES;
6067 	} else {
6068 		switch (instance->pdev->device) {
6069 		case PCI_DEVICE_ID_LSI_VENTURA:
6070 		case PCI_DEVICE_ID_LSI_CRUSADER:
6071 		case PCI_DEVICE_ID_LSI_HARPOON:
6072 		case PCI_DEVICE_ID_LSI_TOMCAT:
6073 		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6074 		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6075 			instance->adapter_type = VENTURA_SERIES;
6076 			break;
6077 		case PCI_DEVICE_ID_LSI_FUSION:
6078 		case PCI_DEVICE_ID_LSI_PLASMA:
6079 			instance->adapter_type = THUNDERBOLT_SERIES;
6080 			break;
6081 		case PCI_DEVICE_ID_LSI_INVADER:
6082 		case PCI_DEVICE_ID_LSI_INTRUDER:
6083 		case PCI_DEVICE_ID_LSI_INTRUDER_24:
6084 		case PCI_DEVICE_ID_LSI_CUTLASS_52:
6085 		case PCI_DEVICE_ID_LSI_CUTLASS_53:
6086 		case PCI_DEVICE_ID_LSI_FURY:
6087 			instance->adapter_type = INVADER_SERIES;
6088 			break;
6089 		default: /* For all other supported controllers */
6090 			instance->adapter_type = MFI_SERIES;
6091 			break;
6092 		}
6093 	}
6094 }
6095 
6096 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6097 {
6098 	instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
6099 						  &instance->producer_h);
6100 	instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
6101 						  &instance->consumer_h);
6102 
6103 	if (!instance->producer || !instance->consumer) {
6104 		dev_err(&instance->pdev->dev,
6105 			"Failed to allocate memory for producer, consumer\n");
6106 		return -1;
6107 	}
6108 
6109 	*instance->producer = 0;
6110 	*instance->consumer = 0;
6111 	return 0;
6112 }
6113 
6114 /**
6115  * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
6116  *				structures which are not common across MFI
6117  *				adapters and fusion adapters.
6118  *				For MFI based adapters, allocate producer and
6119  *				consumer buffers. For fusion adapters, allocate
6120  *				memory for fusion context.
6121  * @instance:			Adapter soft state
6122  * return:			0 for SUCCESS
6123  */
6124 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6125 {
6126 	switch (instance->adapter_type) {
6127 	case MFI_SERIES:
6128 		if (megasas_alloc_mfi_ctrl_mem(instance))
6129 			return -ENOMEM;
6130 		break;
6131 	case VENTURA_SERIES:
6132 	case THUNDERBOLT_SERIES:
6133 	case INVADER_SERIES:
6134 		if (megasas_alloc_fusion_context(instance))
6135 			return -ENOMEM;
6136 		break;
6137 	}
6138 
6139 	return 0;
6140 }
6141 
6142 /*
6143  * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
6144  *				producer, consumer buffers for MFI adapters
6145  *
6146  * @instance -			Adapter soft instance
6147  *
6148  */
6149 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6150 {
6151 	if (instance->adapter_type == MFI_SERIES) {
6152 		if (instance->producer)
6153 			pci_free_consistent(instance->pdev, sizeof(u32),
6154 					    instance->producer,
6155 					    instance->producer_h);
6156 		if (instance->consumer)
6157 			pci_free_consistent(instance->pdev, sizeof(u32),
6158 					    instance->consumer,
6159 					    instance->consumer_h);
6160 	} else {
6161 		megasas_free_fusion_context(instance);
6162 	}
6163 }
6164 
6165 /**
6166  * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
6167  *					driver load time
6168  *
6169  * @instance-				Adapter soft instance
6170  * @return-				O for SUCCESS
6171  */
6172 static inline
6173 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6174 {
6175 	struct pci_dev *pdev = instance->pdev;
6176 	struct fusion_context *fusion = instance->ctrl_context;
6177 
6178 	instance->evt_detail =
6179 		pci_alloc_consistent(pdev,
6180 				     sizeof(struct megasas_evt_detail),
6181 				     &instance->evt_detail_h);
6182 
6183 	if (!instance->evt_detail) {
6184 		dev_err(&instance->pdev->dev,
6185 			"Failed to allocate event detail buffer\n");
6186 		return -ENOMEM;
6187 	}
6188 
6189 	if (fusion) {
6190 		fusion->ioc_init_request =
6191 			dma_alloc_coherent(&pdev->dev,
6192 					   sizeof(struct MPI2_IOC_INIT_REQUEST),
6193 					   &fusion->ioc_init_request_phys,
6194 					   GFP_KERNEL);
6195 
6196 		if (!fusion->ioc_init_request) {
6197 			dev_err(&pdev->dev,
6198 				"Failed to allocate PD list buffer\n");
6199 			return -ENOMEM;
6200 		}
6201 	}
6202 
6203 	instance->pd_list_buf =
6204 		pci_alloc_consistent(pdev,
6205 				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6206 				     &instance->pd_list_buf_h);
6207 
6208 	if (!instance->pd_list_buf) {
6209 		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
6210 		return -ENOMEM;
6211 	}
6212 
6213 	instance->ctrl_info_buf =
6214 		pci_alloc_consistent(pdev,
6215 				     sizeof(struct megasas_ctrl_info),
6216 				     &instance->ctrl_info_buf_h);
6217 
6218 	if (!instance->ctrl_info_buf) {
6219 		dev_err(&pdev->dev,
6220 			"Failed to allocate controller info buffer\n");
6221 		return -ENOMEM;
6222 	}
6223 
6224 	instance->ld_list_buf =
6225 		pci_alloc_consistent(pdev,
6226 				     sizeof(struct MR_LD_LIST),
6227 				     &instance->ld_list_buf_h);
6228 
6229 	if (!instance->ld_list_buf) {
6230 		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
6231 		return -ENOMEM;
6232 	}
6233 
6234 	instance->ld_targetid_list_buf =
6235 		pci_alloc_consistent(pdev,
6236 				     sizeof(struct MR_LD_TARGETID_LIST),
6237 				     &instance->ld_targetid_list_buf_h);
6238 
6239 	if (!instance->ld_targetid_list_buf) {
6240 		dev_err(&pdev->dev,
6241 			"Failed to allocate LD targetid list buffer\n");
6242 		return -ENOMEM;
6243 	}
6244 
6245 	if (!reset_devices) {
6246 		instance->system_info_buf =
6247 			pci_alloc_consistent(pdev,
6248 					     sizeof(struct MR_DRV_SYSTEM_INFO),
6249 					     &instance->system_info_h);
6250 		instance->pd_info =
6251 			pci_alloc_consistent(pdev,
6252 					     sizeof(struct MR_PD_INFO),
6253 					     &instance->pd_info_h);
6254 		instance->tgt_prop =
6255 			pci_alloc_consistent(pdev,
6256 					     sizeof(struct MR_TARGET_PROPERTIES),
6257 					     &instance->tgt_prop_h);
6258 		instance->crash_dump_buf =
6259 			pci_alloc_consistent(pdev,
6260 					     CRASH_DMA_BUF_SIZE,
6261 					     &instance->crash_dump_h);
6262 
6263 		if (!instance->system_info_buf)
6264 			dev_err(&instance->pdev->dev,
6265 				"Failed to allocate system info buffer\n");
6266 
6267 		if (!instance->pd_info)
6268 			dev_err(&instance->pdev->dev,
6269 				"Failed to allocate pd_info buffer\n");
6270 
6271 		if (!instance->tgt_prop)
6272 			dev_err(&instance->pdev->dev,
6273 				"Failed to allocate tgt_prop buffer\n");
6274 
6275 		if (!instance->crash_dump_buf)
6276 			dev_err(&instance->pdev->dev,
6277 				"Failed to allocate crash dump buffer\n");
6278 	}
6279 
6280 	return 0;
6281 }
6282 
6283 /*
6284  * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
6285  *					during driver load time
6286  *
6287  * @instance-				Adapter soft instance
6288  *
6289  */
6290 static inline
6291 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
6292 {
6293 	struct pci_dev *pdev = instance->pdev;
6294 	struct fusion_context *fusion = instance->ctrl_context;
6295 
6296 	if (instance->evt_detail)
6297 		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6298 				    instance->evt_detail,
6299 				    instance->evt_detail_h);
6300 
6301 	if (fusion && fusion->ioc_init_request)
6302 		dma_free_coherent(&pdev->dev,
6303 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
6304 				  fusion->ioc_init_request,
6305 				  fusion->ioc_init_request_phys);
6306 
6307 	if (instance->pd_list_buf)
6308 		pci_free_consistent(pdev,
6309 				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6310 				    instance->pd_list_buf,
6311 				    instance->pd_list_buf_h);
6312 
6313 	if (instance->ld_list_buf)
6314 		pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
6315 				    instance->ld_list_buf,
6316 				    instance->ld_list_buf_h);
6317 
6318 	if (instance->ld_targetid_list_buf)
6319 		pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
6320 				    instance->ld_targetid_list_buf,
6321 				    instance->ld_targetid_list_buf_h);
6322 
6323 	if (instance->ctrl_info_buf)
6324 		pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
6325 				    instance->ctrl_info_buf,
6326 				    instance->ctrl_info_buf_h);
6327 
6328 	if (instance->system_info_buf)
6329 		pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6330 				    instance->system_info_buf,
6331 				    instance->system_info_h);
6332 
6333 	if (instance->pd_info)
6334 		pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6335 				    instance->pd_info, instance->pd_info_h);
6336 
6337 	if (instance->tgt_prop)
6338 		pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6339 				    instance->tgt_prop, instance->tgt_prop_h);
6340 
6341 	if (instance->crash_dump_buf)
6342 		pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6343 				    instance->crash_dump_buf,
6344 				    instance->crash_dump_h);
6345 }
6346 
6347 /*
6348  * megasas_init_ctrl_params -		Initialize controller's instance
6349  *					parameters before FW init
6350  * @instance -				Adapter soft instance
6351  * @return -				void
6352  */
6353 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
6354 {
6355 	instance->fw_crash_state = UNAVAILABLE;
6356 
6357 	megasas_poll_wait_aen = 0;
6358 	instance->issuepend_done = 1;
6359 	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
6360 
6361 	/*
6362 	 * Initialize locks and queues
6363 	 */
6364 	INIT_LIST_HEAD(&instance->cmd_pool);
6365 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
6366 
6367 	atomic_set(&instance->fw_outstanding, 0);
6368 
6369 	init_waitqueue_head(&instance->int_cmd_wait_q);
6370 	init_waitqueue_head(&instance->abort_cmd_wait_q);
6371 
6372 	spin_lock_init(&instance->crashdump_lock);
6373 	spin_lock_init(&instance->mfi_pool_lock);
6374 	spin_lock_init(&instance->hba_lock);
6375 	spin_lock_init(&instance->stream_lock);
6376 	spin_lock_init(&instance->completion_lock);
6377 
6378 	mutex_init(&instance->reset_mutex);
6379 
6380 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
6381 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
6382 		instance->flag_ieee = 1;
6383 
6384 	megasas_dbg_lvl = 0;
6385 	instance->flag = 0;
6386 	instance->unload = 1;
6387 	instance->last_time = 0;
6388 	instance->disableOnlineCtrlReset = 1;
6389 	instance->UnevenSpanSupport = 0;
6390 
6391 	if (instance->adapter_type != MFI_SERIES) {
6392 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
6393 		INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
6394 	} else {
6395 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
6396 	}
6397 }
6398 
6399 /**
6400  * megasas_probe_one -	PCI hotplug entry point
6401  * @pdev:		PCI device structure
6402  * @id:			PCI ids of supported hotplugged adapter
6403  */
6404 static int megasas_probe_one(struct pci_dev *pdev,
6405 			     const struct pci_device_id *id)
6406 {
6407 	int rval, pos;
6408 	struct Scsi_Host *host;
6409 	struct megasas_instance *instance;
6410 	u16 control = 0;
6411 
6412 	/* Reset MSI-X in the kdump kernel */
6413 	if (reset_devices) {
6414 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
6415 		if (pos) {
6416 			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
6417 					     &control);
6418 			if (control & PCI_MSIX_FLAGS_ENABLE) {
6419 				dev_info(&pdev->dev, "resetting MSI-X\n");
6420 				pci_write_config_word(pdev,
6421 						      pos + PCI_MSIX_FLAGS,
6422 						      control &
6423 						      ~PCI_MSIX_FLAGS_ENABLE);
6424 			}
6425 		}
6426 	}
6427 
6428 	/*
6429 	 * PCI prepping: enable device set bus mastering and dma mask
6430 	 */
6431 	rval = pci_enable_device_mem(pdev);
6432 
6433 	if (rval) {
6434 		return rval;
6435 	}
6436 
6437 	pci_set_master(pdev);
6438 
6439 	host = scsi_host_alloc(&megasas_template,
6440 			       sizeof(struct megasas_instance));
6441 
6442 	if (!host) {
6443 		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
6444 		goto fail_alloc_instance;
6445 	}
6446 
6447 	instance = (struct megasas_instance *)host->hostdata;
6448 	memset(instance, 0, sizeof(*instance));
6449 	atomic_set(&instance->fw_reset_no_pci_access, 0);
6450 
6451 	/*
6452 	 * Initialize PCI related and misc parameters
6453 	 */
6454 	instance->pdev = pdev;
6455 	instance->host = host;
6456 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
6457 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
6458 
6459 	megasas_set_adapter_type(instance);
6460 
6461 	/*
6462 	 * Initialize MFI Firmware
6463 	 */
6464 	if (megasas_init_fw(instance))
6465 		goto fail_init_mfi;
6466 
6467 	if (instance->requestorId) {
6468 		if (instance->PlasmaFW111) {
6469 			instance->vf_affiliation_111 =
6470 				pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
6471 						     &instance->vf_affiliation_111_h);
6472 			if (!instance->vf_affiliation_111)
6473 				dev_warn(&pdev->dev, "Can't allocate "
6474 				       "memory for VF affiliation buffer\n");
6475 		} else {
6476 			instance->vf_affiliation =
6477 				pci_alloc_consistent(pdev,
6478 						     (MAX_LOGICAL_DRIVES + 1) *
6479 						     sizeof(struct MR_LD_VF_AFFILIATION),
6480 						     &instance->vf_affiliation_h);
6481 			if (!instance->vf_affiliation)
6482 				dev_warn(&pdev->dev, "Can't allocate "
6483 				       "memory for VF affiliation buffer\n");
6484 		}
6485 	}
6486 
6487 	/*
6488 	 * Store instance in PCI softstate
6489 	 */
6490 	pci_set_drvdata(pdev, instance);
6491 
6492 	/*
6493 	 * Add this controller to megasas_mgmt_info structure so that it
6494 	 * can be exported to management applications
6495 	 */
6496 	megasas_mgmt_info.count++;
6497 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
6498 	megasas_mgmt_info.max_index++;
6499 
6500 	/*
6501 	 * Register with SCSI mid-layer
6502 	 */
6503 	if (megasas_io_attach(instance))
6504 		goto fail_io_attach;
6505 
6506 	instance->unload = 0;
6507 	/*
6508 	 * Trigger SCSI to scan our drives
6509 	 */
6510 	scsi_scan_host(host);
6511 
6512 	/*
6513 	 * Initiate AEN (Asynchronous Event Notification)
6514 	 */
6515 	if (megasas_start_aen(instance)) {
6516 		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
6517 		goto fail_start_aen;
6518 	}
6519 
6520 	/* Get current SR-IOV LD/VF affiliation */
6521 	if (instance->requestorId)
6522 		megasas_get_ld_vf_affiliation(instance, 1);
6523 
6524 	return 0;
6525 
6526 fail_start_aen:
6527 fail_io_attach:
6528 	megasas_mgmt_info.count--;
6529 	megasas_mgmt_info.max_index--;
6530 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6531 
6532 	instance->instancet->disable_intr(instance);
6533 	megasas_destroy_irqs(instance);
6534 
6535 	if (instance->adapter_type != MFI_SERIES)
6536 		megasas_release_fusion(instance);
6537 	else
6538 		megasas_release_mfi(instance);
6539 	if (instance->msix_vectors)
6540 		pci_free_irq_vectors(instance->pdev);
6541 fail_init_mfi:
6542 	scsi_host_put(host);
6543 
6544 fail_alloc_instance:
6545 	pci_disable_device(pdev);
6546 
6547 	return -ENODEV;
6548 }
6549 
6550 /**
6551  * megasas_flush_cache -	Requests FW to flush all its caches
6552  * @instance:			Adapter soft state
6553  */
6554 static void megasas_flush_cache(struct megasas_instance *instance)
6555 {
6556 	struct megasas_cmd *cmd;
6557 	struct megasas_dcmd_frame *dcmd;
6558 
6559 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6560 		return;
6561 
6562 	cmd = megasas_get_cmd(instance);
6563 
6564 	if (!cmd)
6565 		return;
6566 
6567 	dcmd = &cmd->frame->dcmd;
6568 
6569 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6570 
6571 	dcmd->cmd = MFI_CMD_DCMD;
6572 	dcmd->cmd_status = 0x0;
6573 	dcmd->sge_count = 0;
6574 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6575 	dcmd->timeout = 0;
6576 	dcmd->pad_0 = 0;
6577 	dcmd->data_xfer_len = 0;
6578 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
6579 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
6580 
6581 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6582 			!= DCMD_SUCCESS) {
6583 		dev_err(&instance->pdev->dev,
6584 			"return from %s %d\n", __func__, __LINE__);
6585 		return;
6586 	}
6587 
6588 	megasas_return_cmd(instance, cmd);
6589 }
6590 
6591 /**
6592  * megasas_shutdown_controller -	Instructs FW to shutdown the controller
6593  * @instance:				Adapter soft state
6594  * @opcode:				Shutdown/Hibernate
6595  */
6596 static void megasas_shutdown_controller(struct megasas_instance *instance,
6597 					u32 opcode)
6598 {
6599 	struct megasas_cmd *cmd;
6600 	struct megasas_dcmd_frame *dcmd;
6601 
6602 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6603 		return;
6604 
6605 	cmd = megasas_get_cmd(instance);
6606 
6607 	if (!cmd)
6608 		return;
6609 
6610 	if (instance->aen_cmd)
6611 		megasas_issue_blocked_abort_cmd(instance,
6612 			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
6613 	if (instance->map_update_cmd)
6614 		megasas_issue_blocked_abort_cmd(instance,
6615 			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
6616 	if (instance->jbod_seq_cmd)
6617 		megasas_issue_blocked_abort_cmd(instance,
6618 			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
6619 
6620 	dcmd = &cmd->frame->dcmd;
6621 
6622 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6623 
6624 	dcmd->cmd = MFI_CMD_DCMD;
6625 	dcmd->cmd_status = 0x0;
6626 	dcmd->sge_count = 0;
6627 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6628 	dcmd->timeout = 0;
6629 	dcmd->pad_0 = 0;
6630 	dcmd->data_xfer_len = 0;
6631 	dcmd->opcode = cpu_to_le32(opcode);
6632 
6633 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6634 			!= DCMD_SUCCESS) {
6635 		dev_err(&instance->pdev->dev,
6636 			"return from %s %d\n", __func__, __LINE__);
6637 		return;
6638 	}
6639 
6640 	megasas_return_cmd(instance, cmd);
6641 }
6642 
6643 #ifdef CONFIG_PM
6644 /**
6645  * megasas_suspend -	driver suspend entry point
6646  * @pdev:		PCI device structure
6647  * @state:		PCI power state to suspend routine
6648  */
6649 static int
6650 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6651 {
6652 	struct Scsi_Host *host;
6653 	struct megasas_instance *instance;
6654 
6655 	instance = pci_get_drvdata(pdev);
6656 	host = instance->host;
6657 	instance->unload = 1;
6658 
6659 	/* Shutdown SR-IOV heartbeat timer */
6660 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6661 		del_timer_sync(&instance->sriov_heartbeat_timer);
6662 
6663 	megasas_flush_cache(instance);
6664 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
6665 
6666 	/* cancel the delayed work if this work still in queue */
6667 	if (instance->ev != NULL) {
6668 		struct megasas_aen_event *ev = instance->ev;
6669 		cancel_delayed_work_sync(&ev->hotplug_work);
6670 		instance->ev = NULL;
6671 	}
6672 
6673 	tasklet_kill(&instance->isr_tasklet);
6674 
6675 	pci_set_drvdata(instance->pdev, instance);
6676 	instance->instancet->disable_intr(instance);
6677 
6678 	megasas_destroy_irqs(instance);
6679 
6680 	if (instance->msix_vectors)
6681 		pci_free_irq_vectors(instance->pdev);
6682 
6683 	pci_save_state(pdev);
6684 	pci_disable_device(pdev);
6685 
6686 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
6687 
6688 	return 0;
6689 }
6690 
6691 /**
6692  * megasas_resume-      driver resume entry point
6693  * @pdev:               PCI device structure
6694  */
6695 static int
6696 megasas_resume(struct pci_dev *pdev)
6697 {
6698 	int rval;
6699 	struct Scsi_Host *host;
6700 	struct megasas_instance *instance;
6701 	int irq_flags = PCI_IRQ_LEGACY;
6702 
6703 	instance = pci_get_drvdata(pdev);
6704 	host = instance->host;
6705 	pci_set_power_state(pdev, PCI_D0);
6706 	pci_enable_wake(pdev, PCI_D0, 0);
6707 	pci_restore_state(pdev);
6708 
6709 	/*
6710 	 * PCI prepping: enable device set bus mastering and dma mask
6711 	 */
6712 	rval = pci_enable_device_mem(pdev);
6713 
6714 	if (rval) {
6715 		dev_err(&pdev->dev, "Enable device failed\n");
6716 		return rval;
6717 	}
6718 
6719 	pci_set_master(pdev);
6720 
6721 	/*
6722 	 * We expect the FW state to be READY
6723 	 */
6724 	if (megasas_transition_to_ready(instance, 0))
6725 		goto fail_ready_state;
6726 
6727 	if (megasas_set_dma_mask(instance))
6728 		goto fail_set_dma_mask;
6729 
6730 	/*
6731 	 * Initialize MFI Firmware
6732 	 */
6733 
6734 	atomic_set(&instance->fw_outstanding, 0);
6735 	atomic_set(&instance->ldio_outstanding, 0);
6736 
6737 	/* Now re-enable MSI-X */
6738 	if (instance->msix_vectors) {
6739 		irq_flags = PCI_IRQ_MSIX;
6740 		if (smp_affinity_enable)
6741 			irq_flags |= PCI_IRQ_AFFINITY;
6742 	}
6743 	rval = pci_alloc_irq_vectors(instance->pdev, 1,
6744 				     instance->msix_vectors ?
6745 				     instance->msix_vectors : 1, irq_flags);
6746 	if (rval < 0)
6747 		goto fail_reenable_msix;
6748 
6749 	if (instance->adapter_type != MFI_SERIES) {
6750 		megasas_reset_reply_desc(instance);
6751 		if (megasas_ioc_init_fusion(instance)) {
6752 			megasas_free_cmds(instance);
6753 			megasas_free_cmds_fusion(instance);
6754 			goto fail_init_mfi;
6755 		}
6756 		if (!megasas_get_map_info(instance))
6757 			megasas_sync_map_info(instance);
6758 	} else {
6759 		*instance->producer = 0;
6760 		*instance->consumer = 0;
6761 		if (megasas_issue_init_mfi(instance))
6762 			goto fail_init_mfi;
6763 	}
6764 
6765 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6766 		     (unsigned long)instance);
6767 
6768 	if (instance->msix_vectors ?
6769 			megasas_setup_irqs_msix(instance, 0) :
6770 			megasas_setup_irqs_ioapic(instance))
6771 		goto fail_init_mfi;
6772 
6773 	/* Re-launch SR-IOV heartbeat timer */
6774 	if (instance->requestorId) {
6775 		if (!megasas_sriov_start_heartbeat(instance, 0))
6776 			megasas_start_timer(instance);
6777 		else {
6778 			instance->skip_heartbeat_timer_del = 1;
6779 			goto fail_init_mfi;
6780 		}
6781 	}
6782 
6783 	instance->instancet->enable_intr(instance);
6784 	megasas_setup_jbod_map(instance);
6785 	instance->unload = 0;
6786 
6787 	/*
6788 	 * Initiate AEN (Asynchronous Event Notification)
6789 	 */
6790 	if (megasas_start_aen(instance))
6791 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
6792 
6793 	return 0;
6794 
6795 fail_init_mfi:
6796 	megasas_free_ctrl_dma_buffers(instance);
6797 	megasas_free_ctrl_mem(instance);
6798 	scsi_host_put(host);
6799 
6800 fail_reenable_msix:
6801 fail_set_dma_mask:
6802 fail_ready_state:
6803 
6804 	pci_disable_device(pdev);
6805 
6806 	return -ENODEV;
6807 }
6808 #else
6809 #define megasas_suspend	NULL
6810 #define megasas_resume	NULL
6811 #endif
6812 
6813 static inline int
6814 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
6815 {
6816 	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
6817 	int i;
6818 
6819 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6820 		return 1;
6821 
6822 	for (i = 0; i < wait_time; i++) {
6823 		if (atomic_read(&instance->adprecovery)	== MEGASAS_HBA_OPERATIONAL)
6824 			break;
6825 
6826 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
6827 			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
6828 
6829 		msleep(1000);
6830 	}
6831 
6832 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6833 		dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
6834 			__func__);
6835 		return 1;
6836 	}
6837 
6838 	return 0;
6839 }
6840 
6841 /**
6842  * megasas_detach_one -	PCI hot"un"plug entry point
6843  * @pdev:		PCI device structure
6844  */
6845 static void megasas_detach_one(struct pci_dev *pdev)
6846 {
6847 	int i;
6848 	struct Scsi_Host *host;
6849 	struct megasas_instance *instance;
6850 	struct fusion_context *fusion;
6851 	u32 pd_seq_map_sz;
6852 
6853 	instance = pci_get_drvdata(pdev);
6854 	host = instance->host;
6855 	fusion = instance->ctrl_context;
6856 
6857 	/* Shutdown SR-IOV heartbeat timer */
6858 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6859 		del_timer_sync(&instance->sriov_heartbeat_timer);
6860 
6861 	if (instance->fw_crash_state != UNAVAILABLE)
6862 		megasas_free_host_crash_buffer(instance);
6863 	scsi_remove_host(instance->host);
6864 	instance->unload = 1;
6865 
6866 	if (megasas_wait_for_adapter_operational(instance))
6867 		goto skip_firing_dcmds;
6868 
6869 	megasas_flush_cache(instance);
6870 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6871 
6872 skip_firing_dcmds:
6873 	/* cancel the delayed work if this work still in queue*/
6874 	if (instance->ev != NULL) {
6875 		struct megasas_aen_event *ev = instance->ev;
6876 		cancel_delayed_work_sync(&ev->hotplug_work);
6877 		instance->ev = NULL;
6878 	}
6879 
6880 	/* cancel all wait events */
6881 	wake_up_all(&instance->int_cmd_wait_q);
6882 
6883 	tasklet_kill(&instance->isr_tasklet);
6884 
6885 	/*
6886 	 * Take the instance off the instance array. Note that we will not
6887 	 * decrement the max_index. We let this array be sparse array
6888 	 */
6889 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6890 		if (megasas_mgmt_info.instance[i] == instance) {
6891 			megasas_mgmt_info.count--;
6892 			megasas_mgmt_info.instance[i] = NULL;
6893 
6894 			break;
6895 		}
6896 	}
6897 
6898 	instance->instancet->disable_intr(instance);
6899 
6900 	megasas_destroy_irqs(instance);
6901 
6902 	if (instance->msix_vectors)
6903 		pci_free_irq_vectors(instance->pdev);
6904 
6905 	if (instance->adapter_type == VENTURA_SERIES) {
6906 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
6907 			kfree(fusion->stream_detect_by_ld[i]);
6908 		kfree(fusion->stream_detect_by_ld);
6909 		fusion->stream_detect_by_ld = NULL;
6910 	}
6911 
6912 
6913 	if (instance->adapter_type != MFI_SERIES) {
6914 		megasas_release_fusion(instance);
6915 			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
6916 				(sizeof(struct MR_PD_CFG_SEQ) *
6917 					(MAX_PHYSICAL_DEVICES - 1));
6918 		for (i = 0; i < 2 ; i++) {
6919 			if (fusion->ld_map[i])
6920 				dma_free_coherent(&instance->pdev->dev,
6921 						  fusion->max_map_sz,
6922 						  fusion->ld_map[i],
6923 						  fusion->ld_map_phys[i]);
6924 			if (fusion->ld_drv_map[i]) {
6925 				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
6926 					vfree(fusion->ld_drv_map[i]);
6927 				else
6928 					free_pages((ulong)fusion->ld_drv_map[i],
6929 						   fusion->drv_map_pages);
6930 			}
6931 
6932 			if (fusion->pd_seq_sync[i])
6933 				dma_free_coherent(&instance->pdev->dev,
6934 					pd_seq_map_sz,
6935 					fusion->pd_seq_sync[i],
6936 					fusion->pd_seq_phys[i]);
6937 		}
6938 	} else {
6939 		megasas_release_mfi(instance);
6940 	}
6941 
6942 	if (instance->vf_affiliation)
6943 		pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6944 				    sizeof(struct MR_LD_VF_AFFILIATION),
6945 				    instance->vf_affiliation,
6946 				    instance->vf_affiliation_h);
6947 
6948 	if (instance->vf_affiliation_111)
6949 		pci_free_consistent(pdev,
6950 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
6951 				    instance->vf_affiliation_111,
6952 				    instance->vf_affiliation_111_h);
6953 
6954 	if (instance->hb_host_mem)
6955 		pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
6956 				    instance->hb_host_mem,
6957 				    instance->hb_host_mem_h);
6958 
6959 	megasas_free_ctrl_dma_buffers(instance);
6960 
6961 	megasas_free_ctrl_mem(instance);
6962 
6963 	scsi_host_put(host);
6964 
6965 	pci_disable_device(pdev);
6966 }
6967 
6968 /**
6969  * megasas_shutdown -	Shutdown entry point
6970  * @device:		Generic device structure
6971  */
6972 static void megasas_shutdown(struct pci_dev *pdev)
6973 {
6974 	struct megasas_instance *instance = pci_get_drvdata(pdev);
6975 
6976 	instance->unload = 1;
6977 
6978 	if (megasas_wait_for_adapter_operational(instance))
6979 		goto skip_firing_dcmds;
6980 
6981 	megasas_flush_cache(instance);
6982 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6983 
6984 skip_firing_dcmds:
6985 	instance->instancet->disable_intr(instance);
6986 	megasas_destroy_irqs(instance);
6987 
6988 	if (instance->msix_vectors)
6989 		pci_free_irq_vectors(instance->pdev);
6990 }
6991 
6992 /**
6993  * megasas_mgmt_open -	char node "open" entry point
6994  */
6995 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
6996 {
6997 	/*
6998 	 * Allow only those users with admin rights
6999 	 */
7000 	if (!capable(CAP_SYS_ADMIN))
7001 		return -EACCES;
7002 
7003 	return 0;
7004 }
7005 
7006 /**
7007  * megasas_mgmt_fasync -	Async notifier registration from applications
7008  *
7009  * This function adds the calling process to a driver global queue. When an
7010  * event occurs, SIGIO will be sent to all processes in this queue.
7011  */
7012 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
7013 {
7014 	int rc;
7015 
7016 	mutex_lock(&megasas_async_queue_mutex);
7017 
7018 	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
7019 
7020 	mutex_unlock(&megasas_async_queue_mutex);
7021 
7022 	if (rc >= 0) {
7023 		/* For sanity check when we get ioctl */
7024 		filep->private_data = filep;
7025 		return 0;
7026 	}
7027 
7028 	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
7029 
7030 	return rc;
7031 }
7032 
7033 /**
7034  * megasas_mgmt_poll -  char node "poll" entry point
7035  * */
7036 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
7037 {
7038 	__poll_t mask;
7039 	unsigned long flags;
7040 
7041 	poll_wait(file, &megasas_poll_wait, wait);
7042 	spin_lock_irqsave(&poll_aen_lock, flags);
7043 	if (megasas_poll_wait_aen)
7044 		mask = (EPOLLIN | EPOLLRDNORM);
7045 	else
7046 		mask = 0;
7047 	megasas_poll_wait_aen = 0;
7048 	spin_unlock_irqrestore(&poll_aen_lock, flags);
7049 	return mask;
7050 }
7051 
7052 /*
7053  * megasas_set_crash_dump_params_ioctl:
7054  *		Send CRASH_DUMP_MODE DCMD to all controllers
7055  * @cmd:	MFI command frame
7056  */
7057 
7058 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
7059 {
7060 	struct megasas_instance *local_instance;
7061 	int i, error = 0;
7062 	int crash_support;
7063 
7064 	crash_support = cmd->frame->dcmd.mbox.w[0];
7065 
7066 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7067 		local_instance = megasas_mgmt_info.instance[i];
7068 		if (local_instance && local_instance->crash_dump_drv_support) {
7069 			if ((atomic_read(&local_instance->adprecovery) ==
7070 				MEGASAS_HBA_OPERATIONAL) &&
7071 				!megasas_set_crash_dump_params(local_instance,
7072 					crash_support)) {
7073 				local_instance->crash_dump_app_support =
7074 					crash_support;
7075 				dev_info(&local_instance->pdev->dev,
7076 					"Application firmware crash "
7077 					"dump mode set success\n");
7078 				error = 0;
7079 			} else {
7080 				dev_info(&local_instance->pdev->dev,
7081 					"Application firmware crash "
7082 					"dump mode set failed\n");
7083 				error = -1;
7084 			}
7085 		}
7086 	}
7087 	return error;
7088 }
7089 
7090 /**
7091  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
7092  * @instance:			Adapter soft state
7093  * @argp:			User's ioctl packet
7094  */
7095 static int
7096 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
7097 		      struct megasas_iocpacket __user * user_ioc,
7098 		      struct megasas_iocpacket *ioc)
7099 {
7100 	struct megasas_sge64 *kern_sge64 = NULL;
7101 	struct megasas_sge32 *kern_sge32 = NULL;
7102 	struct megasas_cmd *cmd;
7103 	void *kbuff_arr[MAX_IOCTL_SGE];
7104 	dma_addr_t buf_handle = 0;
7105 	int error = 0, i;
7106 	void *sense = NULL;
7107 	dma_addr_t sense_handle;
7108 	unsigned long *sense_ptr;
7109 	u32 opcode = 0;
7110 
7111 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
7112 
7113 	if (ioc->sge_count > MAX_IOCTL_SGE) {
7114 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
7115 		       ioc->sge_count, MAX_IOCTL_SGE);
7116 		return -EINVAL;
7117 	}
7118 
7119 	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
7120 	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
7121 	    !instance->support_nvme_passthru)) {
7122 		dev_err(&instance->pdev->dev,
7123 			"Received invalid ioctl command 0x%x\n",
7124 			ioc->frame.hdr.cmd);
7125 		return -ENOTSUPP;
7126 	}
7127 
7128 	cmd = megasas_get_cmd(instance);
7129 	if (!cmd) {
7130 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
7131 		return -ENOMEM;
7132 	}
7133 
7134 	/*
7135 	 * User's IOCTL packet has 2 frames (maximum). Copy those two
7136 	 * frames into our cmd's frames. cmd->frame's context will get
7137 	 * overwritten when we copy from user's frames. So set that value
7138 	 * alone separately
7139 	 */
7140 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
7141 	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
7142 	cmd->frame->hdr.pad_0 = 0;
7143 
7144 	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
7145 
7146 	if (instance->consistent_mask_64bit)
7147 		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
7148 				       MFI_FRAME_SENSE64));
7149 	else
7150 		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
7151 					       MFI_FRAME_SENSE64));
7152 
7153 	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
7154 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
7155 
7156 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
7157 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
7158 			megasas_return_cmd(instance, cmd);
7159 			return -1;
7160 		}
7161 	}
7162 
7163 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
7164 		error = megasas_set_crash_dump_params_ioctl(cmd);
7165 		megasas_return_cmd(instance, cmd);
7166 		return error;
7167 	}
7168 
7169 	/*
7170 	 * The management interface between applications and the fw uses
7171 	 * MFI frames. E.g, RAID configuration changes, LD property changes
7172 	 * etc are accomplishes through different kinds of MFI frames. The
7173 	 * driver needs to care only about substituting user buffers with
7174 	 * kernel buffers in SGLs. The location of SGL is embedded in the
7175 	 * struct iocpacket itself.
7176 	 */
7177 	if (instance->consistent_mask_64bit)
7178 		kern_sge64 = (struct megasas_sge64 *)
7179 			((unsigned long)cmd->frame + ioc->sgl_off);
7180 	else
7181 		kern_sge32 = (struct megasas_sge32 *)
7182 			((unsigned long)cmd->frame + ioc->sgl_off);
7183 
7184 	/*
7185 	 * For each user buffer, create a mirror buffer and copy in
7186 	 */
7187 	for (i = 0; i < ioc->sge_count; i++) {
7188 		if (!ioc->sgl[i].iov_len)
7189 			continue;
7190 
7191 		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
7192 						    ioc->sgl[i].iov_len,
7193 						    &buf_handle, GFP_KERNEL);
7194 		if (!kbuff_arr[i]) {
7195 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
7196 			       "kernel SGL buffer for IOCTL\n");
7197 			error = -ENOMEM;
7198 			goto out;
7199 		}
7200 
7201 		/*
7202 		 * We don't change the dma_coherent_mask, so
7203 		 * pci_alloc_consistent only returns 32bit addresses
7204 		 */
7205 		if (instance->consistent_mask_64bit) {
7206 			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
7207 			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7208 		} else {
7209 			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
7210 			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7211 		}
7212 
7213 		/*
7214 		 * We created a kernel buffer corresponding to the
7215 		 * user buffer. Now copy in from the user buffer
7216 		 */
7217 		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
7218 				   (u32) (ioc->sgl[i].iov_len))) {
7219 			error = -EFAULT;
7220 			goto out;
7221 		}
7222 	}
7223 
7224 	if (ioc->sense_len) {
7225 		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
7226 					     &sense_handle, GFP_KERNEL);
7227 		if (!sense) {
7228 			error = -ENOMEM;
7229 			goto out;
7230 		}
7231 
7232 		sense_ptr =
7233 		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
7234 		if (instance->consistent_mask_64bit)
7235 			*sense_ptr = cpu_to_le64(sense_handle);
7236 		else
7237 			*sense_ptr = cpu_to_le32(sense_handle);
7238 	}
7239 
7240 	/*
7241 	 * Set the sync_cmd flag so that the ISR knows not to complete this
7242 	 * cmd to the SCSI mid-layer
7243 	 */
7244 	cmd->sync_cmd = 1;
7245 	if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
7246 		cmd->sync_cmd = 0;
7247 		dev_err(&instance->pdev->dev,
7248 			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
7249 			__func__, __LINE__, cmd->frame->hdr.cmd, opcode,
7250 			cmd->cmd_status_drv);
7251 		return -EBUSY;
7252 	}
7253 
7254 	cmd->sync_cmd = 0;
7255 
7256 	if (instance->unload == 1) {
7257 		dev_info(&instance->pdev->dev, "Driver unload is in progress "
7258 			"don't submit data to application\n");
7259 		goto out;
7260 	}
7261 	/*
7262 	 * copy out the kernel buffers to user buffers
7263 	 */
7264 	for (i = 0; i < ioc->sge_count; i++) {
7265 		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
7266 				 ioc->sgl[i].iov_len)) {
7267 			error = -EFAULT;
7268 			goto out;
7269 		}
7270 	}
7271 
7272 	/*
7273 	 * copy out the sense
7274 	 */
7275 	if (ioc->sense_len) {
7276 		/*
7277 		 * sense_ptr points to the location that has the user
7278 		 * sense buffer address
7279 		 */
7280 		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
7281 				ioc->sense_off);
7282 
7283 		if (copy_to_user((void __user *)((unsigned long)
7284 				 get_unaligned((unsigned long *)sense_ptr)),
7285 				 sense, ioc->sense_len)) {
7286 			dev_err(&instance->pdev->dev, "Failed to copy out to user "
7287 					"sense data\n");
7288 			error = -EFAULT;
7289 			goto out;
7290 		}
7291 	}
7292 
7293 	/*
7294 	 * copy the status codes returned by the fw
7295 	 */
7296 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
7297 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
7298 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
7299 		error = -EFAULT;
7300 	}
7301 
7302 out:
7303 	if (sense) {
7304 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
7305 				    sense, sense_handle);
7306 	}
7307 
7308 	for (i = 0; i < ioc->sge_count; i++) {
7309 		if (kbuff_arr[i]) {
7310 			if (instance->consistent_mask_64bit)
7311 				dma_free_coherent(&instance->pdev->dev,
7312 					le32_to_cpu(kern_sge64[i].length),
7313 					kbuff_arr[i],
7314 					le64_to_cpu(kern_sge64[i].phys_addr));
7315 			else
7316 				dma_free_coherent(&instance->pdev->dev,
7317 					le32_to_cpu(kern_sge32[i].length),
7318 					kbuff_arr[i],
7319 					le32_to_cpu(kern_sge32[i].phys_addr));
7320 			kbuff_arr[i] = NULL;
7321 		}
7322 	}
7323 
7324 	megasas_return_cmd(instance, cmd);
7325 	return error;
7326 }
7327 
7328 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
7329 {
7330 	struct megasas_iocpacket __user *user_ioc =
7331 	    (struct megasas_iocpacket __user *)arg;
7332 	struct megasas_iocpacket *ioc;
7333 	struct megasas_instance *instance;
7334 	int error;
7335 
7336 	ioc = memdup_user(user_ioc, sizeof(*ioc));
7337 	if (IS_ERR(ioc))
7338 		return PTR_ERR(ioc);
7339 
7340 	instance = megasas_lookup_instance(ioc->host_no);
7341 	if (!instance) {
7342 		error = -ENODEV;
7343 		goto out_kfree_ioc;
7344 	}
7345 
7346 	/* Block ioctls in VF mode */
7347 	if (instance->requestorId && !allow_vf_ioctls) {
7348 		error = -ENODEV;
7349 		goto out_kfree_ioc;
7350 	}
7351 
7352 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7353 		dev_err(&instance->pdev->dev, "Controller in crit error\n");
7354 		error = -ENODEV;
7355 		goto out_kfree_ioc;
7356 	}
7357 
7358 	if (instance->unload == 1) {
7359 		error = -ENODEV;
7360 		goto out_kfree_ioc;
7361 	}
7362 
7363 	if (down_interruptible(&instance->ioctl_sem)) {
7364 		error = -ERESTARTSYS;
7365 		goto out_kfree_ioc;
7366 	}
7367 
7368 	if  (megasas_wait_for_adapter_operational(instance)) {
7369 		error = -ENODEV;
7370 		goto out_up;
7371 	}
7372 
7373 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
7374 out_up:
7375 	up(&instance->ioctl_sem);
7376 
7377 out_kfree_ioc:
7378 	kfree(ioc);
7379 	return error;
7380 }
7381 
7382 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
7383 {
7384 	struct megasas_instance *instance;
7385 	struct megasas_aen aen;
7386 	int error;
7387 
7388 	if (file->private_data != file) {
7389 		printk(KERN_DEBUG "megasas: fasync_helper was not "
7390 		       "called first\n");
7391 		return -EINVAL;
7392 	}
7393 
7394 	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
7395 		return -EFAULT;
7396 
7397 	instance = megasas_lookup_instance(aen.host_no);
7398 
7399 	if (!instance)
7400 		return -ENODEV;
7401 
7402 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7403 		return -ENODEV;
7404 	}
7405 
7406 	if (instance->unload == 1) {
7407 		return -ENODEV;
7408 	}
7409 
7410 	if  (megasas_wait_for_adapter_operational(instance))
7411 		return -ENODEV;
7412 
7413 	mutex_lock(&instance->reset_mutex);
7414 	error = megasas_register_aen(instance, aen.seq_num,
7415 				     aen.class_locale_word);
7416 	mutex_unlock(&instance->reset_mutex);
7417 	return error;
7418 }
7419 
7420 /**
7421  * megasas_mgmt_ioctl -	char node ioctl entry point
7422  */
7423 static long
7424 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7425 {
7426 	switch (cmd) {
7427 	case MEGASAS_IOC_FIRMWARE:
7428 		return megasas_mgmt_ioctl_fw(file, arg);
7429 
7430 	case MEGASAS_IOC_GET_AEN:
7431 		return megasas_mgmt_ioctl_aen(file, arg);
7432 	}
7433 
7434 	return -ENOTTY;
7435 }
7436 
7437 #ifdef CONFIG_COMPAT
7438 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
7439 {
7440 	struct compat_megasas_iocpacket __user *cioc =
7441 	    (struct compat_megasas_iocpacket __user *)arg;
7442 	struct megasas_iocpacket __user *ioc =
7443 	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
7444 	int i;
7445 	int error = 0;
7446 	compat_uptr_t ptr;
7447 	u32 local_sense_off;
7448 	u32 local_sense_len;
7449 	u32 user_sense_off;
7450 
7451 	if (clear_user(ioc, sizeof(*ioc)))
7452 		return -EFAULT;
7453 
7454 	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
7455 	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
7456 	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
7457 	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
7458 	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
7459 	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
7460 		return -EFAULT;
7461 
7462 	/*
7463 	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
7464 	 * sense_len is not null, so prepare the 64bit value under
7465 	 * the same condition.
7466 	 */
7467 	if (get_user(local_sense_off, &ioc->sense_off) ||
7468 		get_user(local_sense_len, &ioc->sense_len) ||
7469 		get_user(user_sense_off, &cioc->sense_off))
7470 		return -EFAULT;
7471 
7472 	if (local_sense_len) {
7473 		void __user **sense_ioc_ptr =
7474 			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
7475 		compat_uptr_t *sense_cioc_ptr =
7476 			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
7477 		if (get_user(ptr, sense_cioc_ptr) ||
7478 		    put_user(compat_ptr(ptr), sense_ioc_ptr))
7479 			return -EFAULT;
7480 	}
7481 
7482 	for (i = 0; i < MAX_IOCTL_SGE; i++) {
7483 		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
7484 		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
7485 		    copy_in_user(&ioc->sgl[i].iov_len,
7486 				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
7487 			return -EFAULT;
7488 	}
7489 
7490 	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
7491 
7492 	if (copy_in_user(&cioc->frame.hdr.cmd_status,
7493 			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
7494 		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
7495 		return -EFAULT;
7496 	}
7497 	return error;
7498 }
7499 
7500 static long
7501 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
7502 			  unsigned long arg)
7503 {
7504 	switch (cmd) {
7505 	case MEGASAS_IOC_FIRMWARE32:
7506 		return megasas_mgmt_compat_ioctl_fw(file, arg);
7507 	case MEGASAS_IOC_GET_AEN:
7508 		return megasas_mgmt_ioctl_aen(file, arg);
7509 	}
7510 
7511 	return -ENOTTY;
7512 }
7513 #endif
7514 
7515 /*
7516  * File operations structure for management interface
7517  */
7518 static const struct file_operations megasas_mgmt_fops = {
7519 	.owner = THIS_MODULE,
7520 	.open = megasas_mgmt_open,
7521 	.fasync = megasas_mgmt_fasync,
7522 	.unlocked_ioctl = megasas_mgmt_ioctl,
7523 	.poll = megasas_mgmt_poll,
7524 #ifdef CONFIG_COMPAT
7525 	.compat_ioctl = megasas_mgmt_compat_ioctl,
7526 #endif
7527 	.llseek = noop_llseek,
7528 };
7529 
7530 /*
7531  * PCI hotplug support registration structure
7532  */
7533 static struct pci_driver megasas_pci_driver = {
7534 
7535 	.name = "megaraid_sas",
7536 	.id_table = megasas_pci_table,
7537 	.probe = megasas_probe_one,
7538 	.remove = megasas_detach_one,
7539 	.suspend = megasas_suspend,
7540 	.resume = megasas_resume,
7541 	.shutdown = megasas_shutdown,
7542 };
7543 
7544 /*
7545  * Sysfs driver attributes
7546  */
7547 static ssize_t version_show(struct device_driver *dd, char *buf)
7548 {
7549 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
7550 			MEGASAS_VERSION);
7551 }
7552 static DRIVER_ATTR_RO(version);
7553 
7554 static ssize_t release_date_show(struct device_driver *dd, char *buf)
7555 {
7556 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
7557 		MEGASAS_RELDATE);
7558 }
7559 static DRIVER_ATTR_RO(release_date);
7560 
7561 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
7562 {
7563 	return sprintf(buf, "%u\n", support_poll_for_event);
7564 }
7565 static DRIVER_ATTR_RO(support_poll_for_event);
7566 
7567 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
7568 {
7569 	return sprintf(buf, "%u\n", support_device_change);
7570 }
7571 static DRIVER_ATTR_RO(support_device_change);
7572 
7573 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
7574 {
7575 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
7576 }
7577 
7578 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
7579 			     size_t count)
7580 {
7581 	int retval = count;
7582 
7583 	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
7584 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
7585 		retval = -EINVAL;
7586 	}
7587 	return retval;
7588 }
7589 static DRIVER_ATTR_RW(dbg_lvl);
7590 
7591 static ssize_t
7592 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
7593 {
7594 	return sprintf(buf, "%u\n", support_nvme_encapsulation);
7595 }
7596 
7597 static DRIVER_ATTR_RO(support_nvme_encapsulation);
7598 
7599 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
7600 {
7601 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
7602 	scsi_remove_device(sdev);
7603 	scsi_device_put(sdev);
7604 }
7605 
7606 static void
7607 megasas_aen_polling(struct work_struct *work)
7608 {
7609 	struct megasas_aen_event *ev =
7610 		container_of(work, struct megasas_aen_event, hotplug_work.work);
7611 	struct megasas_instance *instance = ev->instance;
7612 	union megasas_evt_class_locale class_locale;
7613 	struct  Scsi_Host *host;
7614 	struct  scsi_device *sdev1;
7615 	u16     pd_index = 0;
7616 	u16	ld_index = 0;
7617 	int     i, j, doscan = 0;
7618 	u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
7619 	int error;
7620 	u8  dcmd_ret = DCMD_SUCCESS;
7621 
7622 	if (!instance) {
7623 		printk(KERN_ERR "invalid instance!\n");
7624 		kfree(ev);
7625 		return;
7626 	}
7627 
7628 	/* Adjust event workqueue thread wait time for VF mode */
7629 	if (instance->requestorId)
7630 		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7631 
7632 	/* Don't run the event workqueue thread if OCR is running */
7633 	mutex_lock(&instance->reset_mutex);
7634 
7635 	instance->ev = NULL;
7636 	host = instance->host;
7637 	if (instance->evt_detail) {
7638 		megasas_decode_evt(instance);
7639 
7640 		switch (le32_to_cpu(instance->evt_detail->code)) {
7641 
7642 		case MR_EVT_PD_INSERTED:
7643 		case MR_EVT_PD_REMOVED:
7644 			dcmd_ret = megasas_get_pd_list(instance);
7645 			if (dcmd_ret == DCMD_SUCCESS)
7646 				doscan = SCAN_PD_CHANNEL;
7647 			break;
7648 
7649 		case MR_EVT_LD_OFFLINE:
7650 		case MR_EVT_CFG_CLEARED:
7651 		case MR_EVT_LD_DELETED:
7652 		case MR_EVT_LD_CREATED:
7653 			if (!instance->requestorId ||
7654 				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7655 				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7656 
7657 			if (dcmd_ret == DCMD_SUCCESS)
7658 				doscan = SCAN_VD_CHANNEL;
7659 
7660 			break;
7661 
7662 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
7663 		case MR_EVT_FOREIGN_CFG_IMPORTED:
7664 		case MR_EVT_LD_STATE_CHANGE:
7665 			dcmd_ret = megasas_get_pd_list(instance);
7666 
7667 			if (dcmd_ret != DCMD_SUCCESS)
7668 				break;
7669 
7670 			if (!instance->requestorId ||
7671 				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7672 				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7673 
7674 			if (dcmd_ret != DCMD_SUCCESS)
7675 				break;
7676 
7677 			doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
7678 			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7679 				instance->host->host_no);
7680 			break;
7681 
7682 		case MR_EVT_CTRL_PROP_CHANGED:
7683 				dcmd_ret = megasas_get_ctrl_info(instance);
7684 				break;
7685 		default:
7686 			doscan = 0;
7687 			break;
7688 		}
7689 	} else {
7690 		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
7691 		mutex_unlock(&instance->reset_mutex);
7692 		kfree(ev);
7693 		return;
7694 	}
7695 
7696 	mutex_unlock(&instance->reset_mutex);
7697 
7698 	if (doscan & SCAN_PD_CHANNEL) {
7699 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7700 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7701 				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7702 				sdev1 = scsi_device_lookup(host, i, j, 0);
7703 				if (instance->pd_list[pd_index].driveState ==
7704 							MR_PD_STATE_SYSTEM) {
7705 					if (!sdev1)
7706 						scsi_add_device(host, i, j, 0);
7707 					else
7708 						scsi_device_put(sdev1);
7709 				} else {
7710 					if (sdev1)
7711 						megasas_remove_scsi_device(sdev1);
7712 				}
7713 			}
7714 		}
7715 	}
7716 
7717 	if (doscan & SCAN_VD_CHANNEL) {
7718 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7719 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7720 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7721 				sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7722 				if (instance->ld_ids[ld_index] != 0xff) {
7723 					if (!sdev1)
7724 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7725 					else
7726 						scsi_device_put(sdev1);
7727 				} else {
7728 					if (sdev1)
7729 						megasas_remove_scsi_device(sdev1);
7730 				}
7731 			}
7732 		}
7733 	}
7734 
7735 	if (dcmd_ret == DCMD_SUCCESS)
7736 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7737 	else
7738 		seq_num = instance->last_seq_num;
7739 
7740 	/* Register AEN with FW for latest sequence number plus 1 */
7741 	class_locale.members.reserved = 0;
7742 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
7743 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
7744 
7745 	if (instance->aen_cmd != NULL) {
7746 		kfree(ev);
7747 		return;
7748 	}
7749 
7750 	mutex_lock(&instance->reset_mutex);
7751 	error = megasas_register_aen(instance, seq_num,
7752 					class_locale.word);
7753 	if (error)
7754 		dev_err(&instance->pdev->dev,
7755 			"register aen failed error %x\n", error);
7756 
7757 	mutex_unlock(&instance->reset_mutex);
7758 	kfree(ev);
7759 }
7760 
7761 /**
7762  * megasas_init - Driver load entry point
7763  */
7764 static int __init megasas_init(void)
7765 {
7766 	int rval;
7767 
7768 	/*
7769 	 * Booted in kdump kernel, minimize memory footprints by
7770 	 * disabling few features
7771 	 */
7772 	if (reset_devices) {
7773 		msix_vectors = 1;
7774 		rdpq_enable = 0;
7775 		dual_qdepth_disable = 1;
7776 	}
7777 
7778 	/*
7779 	 * Announce driver version and other information
7780 	 */
7781 	pr_info("megasas: %s\n", MEGASAS_VERSION);
7782 
7783 	spin_lock_init(&poll_aen_lock);
7784 
7785 	support_poll_for_event = 2;
7786 	support_device_change = 1;
7787 	support_nvme_encapsulation = true;
7788 
7789 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
7790 
7791 	/*
7792 	 * Register character device node
7793 	 */
7794 	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
7795 
7796 	if (rval < 0) {
7797 		printk(KERN_DEBUG "megasas: failed to open device node\n");
7798 		return rval;
7799 	}
7800 
7801 	megasas_mgmt_majorno = rval;
7802 
7803 	/*
7804 	 * Register ourselves as PCI hotplug module
7805 	 */
7806 	rval = pci_register_driver(&megasas_pci_driver);
7807 
7808 	if (rval) {
7809 		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
7810 		goto err_pcidrv;
7811 	}
7812 
7813 	rval = driver_create_file(&megasas_pci_driver.driver,
7814 				  &driver_attr_version);
7815 	if (rval)
7816 		goto err_dcf_attr_ver;
7817 
7818 	rval = driver_create_file(&megasas_pci_driver.driver,
7819 				  &driver_attr_release_date);
7820 	if (rval)
7821 		goto err_dcf_rel_date;
7822 
7823 	rval = driver_create_file(&megasas_pci_driver.driver,
7824 				&driver_attr_support_poll_for_event);
7825 	if (rval)
7826 		goto err_dcf_support_poll_for_event;
7827 
7828 	rval = driver_create_file(&megasas_pci_driver.driver,
7829 				  &driver_attr_dbg_lvl);
7830 	if (rval)
7831 		goto err_dcf_dbg_lvl;
7832 	rval = driver_create_file(&megasas_pci_driver.driver,
7833 				&driver_attr_support_device_change);
7834 	if (rval)
7835 		goto err_dcf_support_device_change;
7836 
7837 	rval = driver_create_file(&megasas_pci_driver.driver,
7838 				  &driver_attr_support_nvme_encapsulation);
7839 	if (rval)
7840 		goto err_dcf_support_nvme_encapsulation;
7841 
7842 	return rval;
7843 
7844 err_dcf_support_nvme_encapsulation:
7845 	driver_remove_file(&megasas_pci_driver.driver,
7846 			   &driver_attr_support_device_change);
7847 
7848 err_dcf_support_device_change:
7849 	driver_remove_file(&megasas_pci_driver.driver,
7850 			   &driver_attr_dbg_lvl);
7851 err_dcf_dbg_lvl:
7852 	driver_remove_file(&megasas_pci_driver.driver,
7853 			&driver_attr_support_poll_for_event);
7854 err_dcf_support_poll_for_event:
7855 	driver_remove_file(&megasas_pci_driver.driver,
7856 			   &driver_attr_release_date);
7857 err_dcf_rel_date:
7858 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7859 err_dcf_attr_ver:
7860 	pci_unregister_driver(&megasas_pci_driver);
7861 err_pcidrv:
7862 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7863 	return rval;
7864 }
7865 
7866 /**
7867  * megasas_exit - Driver unload entry point
7868  */
7869 static void __exit megasas_exit(void)
7870 {
7871 	driver_remove_file(&megasas_pci_driver.driver,
7872 			   &driver_attr_dbg_lvl);
7873 	driver_remove_file(&megasas_pci_driver.driver,
7874 			&driver_attr_support_poll_for_event);
7875 	driver_remove_file(&megasas_pci_driver.driver,
7876 			&driver_attr_support_device_change);
7877 	driver_remove_file(&megasas_pci_driver.driver,
7878 			   &driver_attr_release_date);
7879 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7880 	driver_remove_file(&megasas_pci_driver.driver,
7881 			   &driver_attr_support_nvme_encapsulation);
7882 
7883 	pci_unregister_driver(&megasas_pci_driver);
7884 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7885 }
7886 
7887 module_init(megasas_init);
7888 module_exit(megasas_exit);
7889