xref: /openbmc/linux/drivers/scsi/megaraid/megaraid_sas_base.c (revision b240b419db5d624ce7a5a397d6f62a1a686009ec)
1 /*
2  *  Linux MegaRAID driver for SAS based RAID controllers
3  *
4  *  Copyright (c) 2003-2013  LSI Corporation
5  *  Copyright (c) 2013-2014  Avago Technologies
6  *
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation; either version 2
10  *  of the License, or (at your option) any later version.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License
18  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  *
20  *  Authors: Avago Technologies
21  *           Sreenivas Bagalkote
22  *           Sumant Patro
23  *           Bo Yang
24  *           Adam Radford
25  *           Kashyap Desai <kashyap.desai@avagotech.com>
26  *           Sumit Saxena <sumit.saxena@avagotech.com>
27  *
28  *  Send feedback to: megaraidlinux.pdl@avagotech.com
29  *
30  *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31  *  San Jose, California 95131
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/slab.h>
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
47 #include <linux/fs.h>
48 #include <linux/compat.h>
49 #include <linux/blkdev.h>
50 #include <linux/mutex.h>
51 #include <linux/poll.h>
52 #include <linux/vmalloc.h>
53 
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsi_tcq.h>
59 #include "megaraid_sas_fusion.h"
60 #include "megaraid_sas.h"
61 
62 /*
63  * Number of sectors per IO command
64  * Will be set in megasas_init_mfi if user does not provide
65  */
66 static unsigned int max_sectors;
67 module_param_named(max_sectors, max_sectors, int, 0);
68 MODULE_PARM_DESC(max_sectors,
69 	"Maximum number of sectors per IO command");
70 
71 static int msix_disable;
72 module_param(msix_disable, int, S_IRUGO);
73 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
74 
75 static unsigned int msix_vectors;
76 module_param(msix_vectors, int, S_IRUGO);
77 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
78 
79 static int allow_vf_ioctls;
80 module_param(allow_vf_ioctls, int, S_IRUGO);
81 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
82 
83 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
84 module_param(throttlequeuedepth, int, S_IRUGO);
85 MODULE_PARM_DESC(throttlequeuedepth,
86 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
87 
88 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
89 module_param(resetwaittime, int, S_IRUGO);
90 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
91 		 "before resetting adapter. Default: 180");
92 
93 int smp_affinity_enable = 1;
94 module_param(smp_affinity_enable, int, S_IRUGO);
95 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
96 
97 int rdpq_enable = 1;
98 module_param(rdpq_enable, int, S_IRUGO);
99 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
100 
101 unsigned int dual_qdepth_disable;
102 module_param(dual_qdepth_disable, int, S_IRUGO);
103 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
104 
105 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
106 module_param(scmd_timeout, int, S_IRUGO);
107 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
108 
109 MODULE_LICENSE("GPL");
110 MODULE_VERSION(MEGASAS_VERSION);
111 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
112 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
113 
114 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
115 static int megasas_get_pd_list(struct megasas_instance *instance);
116 static int megasas_ld_list_query(struct megasas_instance *instance,
117 				 u8 query_type);
118 static int megasas_issue_init_mfi(struct megasas_instance *instance);
119 static int megasas_register_aen(struct megasas_instance *instance,
120 				u32 seq_num, u32 class_locale_word);
121 static void megasas_get_pd_info(struct megasas_instance *instance,
122 				struct scsi_device *sdev);
123 static int megasas_get_target_prop(struct megasas_instance *instance,
124 				   struct scsi_device *sdev);
125 /*
126  * PCI ID table for all supported controllers
127  */
128 static struct pci_device_id megasas_pci_table[] = {
129 
130 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
131 	/* xscale IOP */
132 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
133 	/* ppc IOP */
134 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
135 	/* ppc IOP */
136 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
137 	/* gen2*/
138 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
139 	/* gen2*/
140 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
141 	/* skinny*/
142 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
143 	/* skinny*/
144 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
145 	/* xscale IOP, vega */
146 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
147 	/* xscale IOP */
148 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
149 	/* Fusion */
150 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
151 	/* Plasma */
152 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
153 	/* Invader */
154 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
155 	/* Fury */
156 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
157 	/* Intruder */
158 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
159 	/* Intruder 24 port*/
160 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
161 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
162 	/* VENTURA */
163 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
164 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
165 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
166 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
167 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
168 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
169 	{}
170 };
171 
172 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
173 
174 static int megasas_mgmt_majorno;
175 struct megasas_mgmt_info megasas_mgmt_info;
176 static struct fasync_struct *megasas_async_queue;
177 static DEFINE_MUTEX(megasas_async_queue_mutex);
178 
179 static int megasas_poll_wait_aen;
180 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
181 static u32 support_poll_for_event;
182 u32 megasas_dbg_lvl;
183 static u32 support_device_change;
184 static bool support_nvme_encapsulation;
185 
186 /* define lock for aen poll */
187 spinlock_t poll_aen_lock;
188 
189 void
190 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
191 		     u8 alt_status);
192 static u32
193 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
194 static int
195 megasas_adp_reset_gen2(struct megasas_instance *instance,
196 		       struct megasas_register_set __iomem *reg_set);
197 static irqreturn_t megasas_isr(int irq, void *devp);
198 static u32
199 megasas_init_adapter_mfi(struct megasas_instance *instance);
200 u32
201 megasas_build_and_issue_cmd(struct megasas_instance *instance,
202 			    struct scsi_cmnd *scmd);
203 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
204 int
205 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
206 	int seconds);
207 void megasas_fusion_ocr_wq(struct work_struct *work);
208 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
209 					 int initial);
210 static int
211 megasas_set_dma_mask(struct megasas_instance *instance);
212 static int
213 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
214 static inline void
215 megasas_free_ctrl_mem(struct megasas_instance *instance);
216 static inline int
217 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
218 static inline void
219 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
220 static inline void
221 megasas_init_ctrl_params(struct megasas_instance *instance);
222 
223 /**
224  * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
225  * @instance:			Adapter soft state
226  * @dcmd:			DCMD frame inside MFI command
227  * @dma_addr:			DMA address of buffer to be passed to FW
228  * @dma_len:			Length of DMA buffer to be passed to FW
229  * @return:			void
230  */
231 void megasas_set_dma_settings(struct megasas_instance *instance,
232 			      struct megasas_dcmd_frame *dcmd,
233 			      dma_addr_t dma_addr, u32 dma_len)
234 {
235 	if (instance->consistent_mask_64bit) {
236 		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
237 		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
238 		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
239 
240 	} else {
241 		dcmd->sgl.sge32[0].phys_addr =
242 				cpu_to_le32(lower_32_bits(dma_addr));
243 		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
244 		dcmd->flags = cpu_to_le16(dcmd->flags);
245 	}
246 }
247 
248 void
249 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
250 {
251 	instance->instancet->fire_cmd(instance,
252 		cmd->frame_phys_addr, 0, instance->reg_set);
253 	return;
254 }
255 
256 /**
257  * megasas_get_cmd -	Get a command from the free pool
258  * @instance:		Adapter soft state
259  *
260  * Returns a free command from the pool
261  */
262 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
263 						  *instance)
264 {
265 	unsigned long flags;
266 	struct megasas_cmd *cmd = NULL;
267 
268 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
269 
270 	if (!list_empty(&instance->cmd_pool)) {
271 		cmd = list_entry((&instance->cmd_pool)->next,
272 				 struct megasas_cmd, list);
273 		list_del_init(&cmd->list);
274 	} else {
275 		dev_err(&instance->pdev->dev, "Command pool empty!\n");
276 	}
277 
278 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
279 	return cmd;
280 }
281 
282 /**
283  * megasas_return_cmd -	Return a cmd to free command pool
284  * @instance:		Adapter soft state
285  * @cmd:		Command packet to be returned to free command pool
286  */
287 void
288 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
289 {
290 	unsigned long flags;
291 	u32 blk_tags;
292 	struct megasas_cmd_fusion *cmd_fusion;
293 	struct fusion_context *fusion = instance->ctrl_context;
294 
295 	/* This flag is used only for fusion adapter.
296 	 * Wait for Interrupt for Polled mode DCMD
297 	 */
298 	if (cmd->flags & DRV_DCMD_POLLED_MODE)
299 		return;
300 
301 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
302 
303 	if (fusion) {
304 		blk_tags = instance->max_scsi_cmds + cmd->index;
305 		cmd_fusion = fusion->cmd_list[blk_tags];
306 		megasas_return_cmd_fusion(instance, cmd_fusion);
307 	}
308 	cmd->scmd = NULL;
309 	cmd->frame_count = 0;
310 	cmd->flags = 0;
311 	memset(cmd->frame, 0, instance->mfi_frame_size);
312 	cmd->frame->io.context = cpu_to_le32(cmd->index);
313 	if (!fusion && reset_devices)
314 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
315 	list_add(&cmd->list, (&instance->cmd_pool)->next);
316 
317 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
318 
319 }
320 
321 static const char *
322 format_timestamp(uint32_t timestamp)
323 {
324 	static char buffer[32];
325 
326 	if ((timestamp & 0xff000000) == 0xff000000)
327 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
328 		0x00ffffff);
329 	else
330 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
331 	return buffer;
332 }
333 
334 static const char *
335 format_class(int8_t class)
336 {
337 	static char buffer[6];
338 
339 	switch (class) {
340 	case MFI_EVT_CLASS_DEBUG:
341 		return "debug";
342 	case MFI_EVT_CLASS_PROGRESS:
343 		return "progress";
344 	case MFI_EVT_CLASS_INFO:
345 		return "info";
346 	case MFI_EVT_CLASS_WARNING:
347 		return "WARN";
348 	case MFI_EVT_CLASS_CRITICAL:
349 		return "CRIT";
350 	case MFI_EVT_CLASS_FATAL:
351 		return "FATAL";
352 	case MFI_EVT_CLASS_DEAD:
353 		return "DEAD";
354 	default:
355 		snprintf(buffer, sizeof(buffer), "%d", class);
356 		return buffer;
357 	}
358 }
359 
360 /**
361   * megasas_decode_evt: Decode FW AEN event and print critical event
362   * for information.
363   * @instance:			Adapter soft state
364   */
365 static void
366 megasas_decode_evt(struct megasas_instance *instance)
367 {
368 	struct megasas_evt_detail *evt_detail = instance->evt_detail;
369 	union megasas_evt_class_locale class_locale;
370 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
371 
372 	if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
373 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
374 			le32_to_cpu(evt_detail->seq_num),
375 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
376 			(class_locale.members.locale),
377 			format_class(class_locale.members.class),
378 			evt_detail->description);
379 }
380 
381 /**
382 *	The following functions are defined for xscale
383 *	(deviceid : 1064R, PERC5) controllers
384 */
385 
386 /**
387  * megasas_enable_intr_xscale -	Enables interrupts
388  * @regs:			MFI register set
389  */
390 static inline void
391 megasas_enable_intr_xscale(struct megasas_instance *instance)
392 {
393 	struct megasas_register_set __iomem *regs;
394 
395 	regs = instance->reg_set;
396 	writel(0, &(regs)->outbound_intr_mask);
397 
398 	/* Dummy readl to force pci flush */
399 	readl(&regs->outbound_intr_mask);
400 }
401 
402 /**
403  * megasas_disable_intr_xscale -Disables interrupt
404  * @regs:			MFI register set
405  */
406 static inline void
407 megasas_disable_intr_xscale(struct megasas_instance *instance)
408 {
409 	struct megasas_register_set __iomem *regs;
410 	u32 mask = 0x1f;
411 
412 	regs = instance->reg_set;
413 	writel(mask, &regs->outbound_intr_mask);
414 	/* Dummy readl to force pci flush */
415 	readl(&regs->outbound_intr_mask);
416 }
417 
418 /**
419  * megasas_read_fw_status_reg_xscale - returns the current FW status value
420  * @regs:			MFI register set
421  */
422 static u32
423 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
424 {
425 	return readl(&(regs)->outbound_msg_0);
426 }
427 /**
428  * megasas_clear_interrupt_xscale -	Check & clear interrupt
429  * @regs:				MFI register set
430  */
431 static int
432 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
433 {
434 	u32 status;
435 	u32 mfiStatus = 0;
436 
437 	/*
438 	 * Check if it is our interrupt
439 	 */
440 	status = readl(&regs->outbound_intr_status);
441 
442 	if (status & MFI_OB_INTR_STATUS_MASK)
443 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
444 	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
445 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
446 
447 	/*
448 	 * Clear the interrupt by writing back the same value
449 	 */
450 	if (mfiStatus)
451 		writel(status, &regs->outbound_intr_status);
452 
453 	/* Dummy readl to force pci flush */
454 	readl(&regs->outbound_intr_status);
455 
456 	return mfiStatus;
457 }
458 
459 /**
460  * megasas_fire_cmd_xscale -	Sends command to the FW
461  * @frame_phys_addr :		Physical address of cmd
462  * @frame_count :		Number of frames for the command
463  * @regs :			MFI register set
464  */
465 static inline void
466 megasas_fire_cmd_xscale(struct megasas_instance *instance,
467 		dma_addr_t frame_phys_addr,
468 		u32 frame_count,
469 		struct megasas_register_set __iomem *regs)
470 {
471 	unsigned long flags;
472 
473 	spin_lock_irqsave(&instance->hba_lock, flags);
474 	writel((frame_phys_addr >> 3)|(frame_count),
475 	       &(regs)->inbound_queue_port);
476 	spin_unlock_irqrestore(&instance->hba_lock, flags);
477 }
478 
479 /**
480  * megasas_adp_reset_xscale -  For controller reset
481  * @regs:                              MFI register set
482  */
483 static int
484 megasas_adp_reset_xscale(struct megasas_instance *instance,
485 	struct megasas_register_set __iomem *regs)
486 {
487 	u32 i;
488 	u32 pcidata;
489 
490 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
491 
492 	for (i = 0; i < 3; i++)
493 		msleep(1000); /* sleep for 3 secs */
494 	pcidata  = 0;
495 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
496 	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
497 	if (pcidata & 0x2) {
498 		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
499 		pcidata &= ~0x2;
500 		pci_write_config_dword(instance->pdev,
501 				MFI_1068_PCSR_OFFSET, pcidata);
502 
503 		for (i = 0; i < 2; i++)
504 			msleep(1000); /* need to wait 2 secs again */
505 
506 		pcidata  = 0;
507 		pci_read_config_dword(instance->pdev,
508 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
509 		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
510 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
511 			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
512 			pcidata = 0;
513 			pci_write_config_dword(instance->pdev,
514 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
515 		}
516 	}
517 	return 0;
518 }
519 
520 /**
521  * megasas_check_reset_xscale -	For controller reset check
522  * @regs:				MFI register set
523  */
524 static int
525 megasas_check_reset_xscale(struct megasas_instance *instance,
526 		struct megasas_register_set __iomem *regs)
527 {
528 	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
529 	    (le32_to_cpu(*instance->consumer) ==
530 		MEGASAS_ADPRESET_INPROG_SIGN))
531 		return 1;
532 	return 0;
533 }
534 
535 static struct megasas_instance_template megasas_instance_template_xscale = {
536 
537 	.fire_cmd = megasas_fire_cmd_xscale,
538 	.enable_intr = megasas_enable_intr_xscale,
539 	.disable_intr = megasas_disable_intr_xscale,
540 	.clear_intr = megasas_clear_intr_xscale,
541 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
542 	.adp_reset = megasas_adp_reset_xscale,
543 	.check_reset = megasas_check_reset_xscale,
544 	.service_isr = megasas_isr,
545 	.tasklet = megasas_complete_cmd_dpc,
546 	.init_adapter = megasas_init_adapter_mfi,
547 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
548 	.issue_dcmd = megasas_issue_dcmd,
549 };
550 
551 /**
552 *	This is the end of set of functions & definitions specific
553 *	to xscale (deviceid : 1064R, PERC5) controllers
554 */
555 
556 /**
557 *	The following functions are defined for ppc (deviceid : 0x60)
558 *	controllers
559 */
560 
561 /**
562  * megasas_enable_intr_ppc -	Enables interrupts
563  * @regs:			MFI register set
564  */
565 static inline void
566 megasas_enable_intr_ppc(struct megasas_instance *instance)
567 {
568 	struct megasas_register_set __iomem *regs;
569 
570 	regs = instance->reg_set;
571 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
572 
573 	writel(~0x80000000, &(regs)->outbound_intr_mask);
574 
575 	/* Dummy readl to force pci flush */
576 	readl(&regs->outbound_intr_mask);
577 }
578 
579 /**
580  * megasas_disable_intr_ppc -	Disable interrupt
581  * @regs:			MFI register set
582  */
583 static inline void
584 megasas_disable_intr_ppc(struct megasas_instance *instance)
585 {
586 	struct megasas_register_set __iomem *regs;
587 	u32 mask = 0xFFFFFFFF;
588 
589 	regs = instance->reg_set;
590 	writel(mask, &regs->outbound_intr_mask);
591 	/* Dummy readl to force pci flush */
592 	readl(&regs->outbound_intr_mask);
593 }
594 
595 /**
596  * megasas_read_fw_status_reg_ppc - returns the current FW status value
597  * @regs:			MFI register set
598  */
599 static u32
600 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
601 {
602 	return readl(&(regs)->outbound_scratch_pad);
603 }
604 
605 /**
606  * megasas_clear_interrupt_ppc -	Check & clear interrupt
607  * @regs:				MFI register set
608  */
609 static int
610 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
611 {
612 	u32 status, mfiStatus = 0;
613 
614 	/*
615 	 * Check if it is our interrupt
616 	 */
617 	status = readl(&regs->outbound_intr_status);
618 
619 	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
620 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
621 
622 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
623 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
624 
625 	/*
626 	 * Clear the interrupt by writing back the same value
627 	 */
628 	writel(status, &regs->outbound_doorbell_clear);
629 
630 	/* Dummy readl to force pci flush */
631 	readl(&regs->outbound_doorbell_clear);
632 
633 	return mfiStatus;
634 }
635 
636 /**
637  * megasas_fire_cmd_ppc -	Sends command to the FW
638  * @frame_phys_addr :		Physical address of cmd
639  * @frame_count :		Number of frames for the command
640  * @regs :			MFI register set
641  */
642 static inline void
643 megasas_fire_cmd_ppc(struct megasas_instance *instance,
644 		dma_addr_t frame_phys_addr,
645 		u32 frame_count,
646 		struct megasas_register_set __iomem *regs)
647 {
648 	unsigned long flags;
649 
650 	spin_lock_irqsave(&instance->hba_lock, flags);
651 	writel((frame_phys_addr | (frame_count<<1))|1,
652 			&(regs)->inbound_queue_port);
653 	spin_unlock_irqrestore(&instance->hba_lock, flags);
654 }
655 
656 /**
657  * megasas_check_reset_ppc -	For controller reset check
658  * @regs:				MFI register set
659  */
660 static int
661 megasas_check_reset_ppc(struct megasas_instance *instance,
662 			struct megasas_register_set __iomem *regs)
663 {
664 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
665 		return 1;
666 
667 	return 0;
668 }
669 
670 static struct megasas_instance_template megasas_instance_template_ppc = {
671 
672 	.fire_cmd = megasas_fire_cmd_ppc,
673 	.enable_intr = megasas_enable_intr_ppc,
674 	.disable_intr = megasas_disable_intr_ppc,
675 	.clear_intr = megasas_clear_intr_ppc,
676 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
677 	.adp_reset = megasas_adp_reset_xscale,
678 	.check_reset = megasas_check_reset_ppc,
679 	.service_isr = megasas_isr,
680 	.tasklet = megasas_complete_cmd_dpc,
681 	.init_adapter = megasas_init_adapter_mfi,
682 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
683 	.issue_dcmd = megasas_issue_dcmd,
684 };
685 
686 /**
687  * megasas_enable_intr_skinny -	Enables interrupts
688  * @regs:			MFI register set
689  */
690 static inline void
691 megasas_enable_intr_skinny(struct megasas_instance *instance)
692 {
693 	struct megasas_register_set __iomem *regs;
694 
695 	regs = instance->reg_set;
696 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
697 
698 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
699 
700 	/* Dummy readl to force pci flush */
701 	readl(&regs->outbound_intr_mask);
702 }
703 
704 /**
705  * megasas_disable_intr_skinny -	Disables interrupt
706  * @regs:			MFI register set
707  */
708 static inline void
709 megasas_disable_intr_skinny(struct megasas_instance *instance)
710 {
711 	struct megasas_register_set __iomem *regs;
712 	u32 mask = 0xFFFFFFFF;
713 
714 	regs = instance->reg_set;
715 	writel(mask, &regs->outbound_intr_mask);
716 	/* Dummy readl to force pci flush */
717 	readl(&regs->outbound_intr_mask);
718 }
719 
720 /**
721  * megasas_read_fw_status_reg_skinny - returns the current FW status value
722  * @regs:			MFI register set
723  */
724 static u32
725 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
726 {
727 	return readl(&(regs)->outbound_scratch_pad);
728 }
729 
730 /**
731  * megasas_clear_interrupt_skinny -	Check & clear interrupt
732  * @regs:				MFI register set
733  */
734 static int
735 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
736 {
737 	u32 status;
738 	u32 mfiStatus = 0;
739 
740 	/*
741 	 * Check if it is our interrupt
742 	 */
743 	status = readl(&regs->outbound_intr_status);
744 
745 	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
746 		return 0;
747 	}
748 
749 	/*
750 	 * Check if it is our interrupt
751 	 */
752 	if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
753 	    MFI_STATE_FAULT) {
754 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
755 	} else
756 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
757 
758 	/*
759 	 * Clear the interrupt by writing back the same value
760 	 */
761 	writel(status, &regs->outbound_intr_status);
762 
763 	/*
764 	 * dummy read to flush PCI
765 	 */
766 	readl(&regs->outbound_intr_status);
767 
768 	return mfiStatus;
769 }
770 
771 /**
772  * megasas_fire_cmd_skinny -	Sends command to the FW
773  * @frame_phys_addr :		Physical address of cmd
774  * @frame_count :		Number of frames for the command
775  * @regs :			MFI register set
776  */
777 static inline void
778 megasas_fire_cmd_skinny(struct megasas_instance *instance,
779 			dma_addr_t frame_phys_addr,
780 			u32 frame_count,
781 			struct megasas_register_set __iomem *regs)
782 {
783 	unsigned long flags;
784 
785 	spin_lock_irqsave(&instance->hba_lock, flags);
786 	writel(upper_32_bits(frame_phys_addr),
787 	       &(regs)->inbound_high_queue_port);
788 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
789 	       &(regs)->inbound_low_queue_port);
790 	mmiowb();
791 	spin_unlock_irqrestore(&instance->hba_lock, flags);
792 }
793 
794 /**
795  * megasas_check_reset_skinny -	For controller reset check
796  * @regs:				MFI register set
797  */
798 static int
799 megasas_check_reset_skinny(struct megasas_instance *instance,
800 				struct megasas_register_set __iomem *regs)
801 {
802 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
803 		return 1;
804 
805 	return 0;
806 }
807 
808 static struct megasas_instance_template megasas_instance_template_skinny = {
809 
810 	.fire_cmd = megasas_fire_cmd_skinny,
811 	.enable_intr = megasas_enable_intr_skinny,
812 	.disable_intr = megasas_disable_intr_skinny,
813 	.clear_intr = megasas_clear_intr_skinny,
814 	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
815 	.adp_reset = megasas_adp_reset_gen2,
816 	.check_reset = megasas_check_reset_skinny,
817 	.service_isr = megasas_isr,
818 	.tasklet = megasas_complete_cmd_dpc,
819 	.init_adapter = megasas_init_adapter_mfi,
820 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
821 	.issue_dcmd = megasas_issue_dcmd,
822 };
823 
824 
825 /**
826 *	The following functions are defined for gen2 (deviceid : 0x78 0x79)
827 *	controllers
828 */
829 
830 /**
831  * megasas_enable_intr_gen2 -  Enables interrupts
832  * @regs:                      MFI register set
833  */
834 static inline void
835 megasas_enable_intr_gen2(struct megasas_instance *instance)
836 {
837 	struct megasas_register_set __iomem *regs;
838 
839 	regs = instance->reg_set;
840 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
841 
842 	/* write ~0x00000005 (4 & 1) to the intr mask*/
843 	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
844 
845 	/* Dummy readl to force pci flush */
846 	readl(&regs->outbound_intr_mask);
847 }
848 
849 /**
850  * megasas_disable_intr_gen2 - Disables interrupt
851  * @regs:                      MFI register set
852  */
853 static inline void
854 megasas_disable_intr_gen2(struct megasas_instance *instance)
855 {
856 	struct megasas_register_set __iomem *regs;
857 	u32 mask = 0xFFFFFFFF;
858 
859 	regs = instance->reg_set;
860 	writel(mask, &regs->outbound_intr_mask);
861 	/* Dummy readl to force pci flush */
862 	readl(&regs->outbound_intr_mask);
863 }
864 
865 /**
866  * megasas_read_fw_status_reg_gen2 - returns the current FW status value
867  * @regs:                      MFI register set
868  */
869 static u32
870 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
871 {
872 	return readl(&(regs)->outbound_scratch_pad);
873 }
874 
875 /**
876  * megasas_clear_interrupt_gen2 -      Check & clear interrupt
877  * @regs:                              MFI register set
878  */
879 static int
880 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
881 {
882 	u32 status;
883 	u32 mfiStatus = 0;
884 
885 	/*
886 	 * Check if it is our interrupt
887 	 */
888 	status = readl(&regs->outbound_intr_status);
889 
890 	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
891 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
892 	}
893 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
894 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
895 	}
896 
897 	/*
898 	 * Clear the interrupt by writing back the same value
899 	 */
900 	if (mfiStatus)
901 		writel(status, &regs->outbound_doorbell_clear);
902 
903 	/* Dummy readl to force pci flush */
904 	readl(&regs->outbound_intr_status);
905 
906 	return mfiStatus;
907 }
908 /**
909  * megasas_fire_cmd_gen2 -     Sends command to the FW
910  * @frame_phys_addr :          Physical address of cmd
911  * @frame_count :              Number of frames for the command
912  * @regs :                     MFI register set
913  */
914 static inline void
915 megasas_fire_cmd_gen2(struct megasas_instance *instance,
916 			dma_addr_t frame_phys_addr,
917 			u32 frame_count,
918 			struct megasas_register_set __iomem *regs)
919 {
920 	unsigned long flags;
921 
922 	spin_lock_irqsave(&instance->hba_lock, flags);
923 	writel((frame_phys_addr | (frame_count<<1))|1,
924 			&(regs)->inbound_queue_port);
925 	spin_unlock_irqrestore(&instance->hba_lock, flags);
926 }
927 
928 /**
929  * megasas_adp_reset_gen2 -	For controller reset
930  * @regs:				MFI register set
931  */
932 static int
933 megasas_adp_reset_gen2(struct megasas_instance *instance,
934 			struct megasas_register_set __iomem *reg_set)
935 {
936 	u32 retry = 0 ;
937 	u32 HostDiag;
938 	u32 __iomem *seq_offset = &reg_set->seq_offset;
939 	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
940 
941 	if (instance->instancet == &megasas_instance_template_skinny) {
942 		seq_offset = &reg_set->fusion_seq_offset;
943 		hostdiag_offset = &reg_set->fusion_host_diag;
944 	}
945 
946 	writel(0, seq_offset);
947 	writel(4, seq_offset);
948 	writel(0xb, seq_offset);
949 	writel(2, seq_offset);
950 	writel(7, seq_offset);
951 	writel(0xd, seq_offset);
952 
953 	msleep(1000);
954 
955 	HostDiag = (u32)readl(hostdiag_offset);
956 
957 	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
958 		msleep(100);
959 		HostDiag = (u32)readl(hostdiag_offset);
960 		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
961 					retry, HostDiag);
962 
963 		if (retry++ >= 100)
964 			return 1;
965 
966 	}
967 
968 	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
969 
970 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
971 
972 	ssleep(10);
973 
974 	HostDiag = (u32)readl(hostdiag_offset);
975 	while (HostDiag & DIAG_RESET_ADAPTER) {
976 		msleep(100);
977 		HostDiag = (u32)readl(hostdiag_offset);
978 		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
979 				retry, HostDiag);
980 
981 		if (retry++ >= 1000)
982 			return 1;
983 
984 	}
985 	return 0;
986 }
987 
988 /**
989  * megasas_check_reset_gen2 -	For controller reset check
990  * @regs:				MFI register set
991  */
992 static int
993 megasas_check_reset_gen2(struct megasas_instance *instance,
994 		struct megasas_register_set __iomem *regs)
995 {
996 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
997 		return 1;
998 
999 	return 0;
1000 }
1001 
1002 static struct megasas_instance_template megasas_instance_template_gen2 = {
1003 
1004 	.fire_cmd = megasas_fire_cmd_gen2,
1005 	.enable_intr = megasas_enable_intr_gen2,
1006 	.disable_intr = megasas_disable_intr_gen2,
1007 	.clear_intr = megasas_clear_intr_gen2,
1008 	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1009 	.adp_reset = megasas_adp_reset_gen2,
1010 	.check_reset = megasas_check_reset_gen2,
1011 	.service_isr = megasas_isr,
1012 	.tasklet = megasas_complete_cmd_dpc,
1013 	.init_adapter = megasas_init_adapter_mfi,
1014 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1015 	.issue_dcmd = megasas_issue_dcmd,
1016 };
1017 
1018 /**
1019 *	This is the end of set of functions & definitions
1020 *       specific to gen2 (deviceid : 0x78, 0x79) controllers
1021 */
1022 
1023 /*
1024  * Template added for TB (Fusion)
1025  */
1026 extern struct megasas_instance_template megasas_instance_template_fusion;
1027 
1028 /**
1029  * megasas_issue_polled -	Issues a polling command
1030  * @instance:			Adapter soft state
1031  * @cmd:			Command packet to be issued
1032  *
1033  * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1034  */
1035 int
1036 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1037 {
1038 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1039 
1040 	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1041 	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1042 
1043 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1044 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1045 			__func__, __LINE__);
1046 		return DCMD_NOT_FIRED;
1047 	}
1048 
1049 	instance->instancet->issue_dcmd(instance, cmd);
1050 
1051 	return wait_and_poll(instance, cmd, instance->requestorId ?
1052 			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1053 }
1054 
1055 /**
1056  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1057  * @instance:			Adapter soft state
1058  * @cmd:			Command to be issued
1059  * @timeout:			Timeout in seconds
1060  *
1061  * This function waits on an event for the command to be returned from ISR.
1062  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1063  * Used to issue ioctl commands.
1064  */
1065 int
1066 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1067 			  struct megasas_cmd *cmd, int timeout)
1068 {
1069 	int ret = 0;
1070 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1071 
1072 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1073 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1074 			__func__, __LINE__);
1075 		return DCMD_NOT_FIRED;
1076 	}
1077 
1078 	instance->instancet->issue_dcmd(instance, cmd);
1079 
1080 	if (timeout) {
1081 		ret = wait_event_timeout(instance->int_cmd_wait_q,
1082 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1083 		if (!ret) {
1084 			dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
1085 				__func__, __LINE__);
1086 			return DCMD_TIMEOUT;
1087 		}
1088 	} else
1089 		wait_event(instance->int_cmd_wait_q,
1090 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1091 
1092 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1093 		DCMD_SUCCESS : DCMD_FAILED;
1094 }
1095 
1096 /**
1097  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1098  * @instance:				Adapter soft state
1099  * @cmd_to_abort:			Previously issued cmd to be aborted
1100  * @timeout:				Timeout in seconds
1101  *
1102  * MFI firmware can abort previously issued AEN comamnd (automatic event
1103  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1104  * cmd and waits for return status.
1105  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1106  */
1107 static int
1108 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1109 				struct megasas_cmd *cmd_to_abort, int timeout)
1110 {
1111 	struct megasas_cmd *cmd;
1112 	struct megasas_abort_frame *abort_fr;
1113 	int ret = 0;
1114 
1115 	cmd = megasas_get_cmd(instance);
1116 
1117 	if (!cmd)
1118 		return -1;
1119 
1120 	abort_fr = &cmd->frame->abort;
1121 
1122 	/*
1123 	 * Prepare and issue the abort frame
1124 	 */
1125 	abort_fr->cmd = MFI_CMD_ABORT;
1126 	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1127 	abort_fr->flags = cpu_to_le16(0);
1128 	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1129 	abort_fr->abort_mfi_phys_addr_lo =
1130 		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1131 	abort_fr->abort_mfi_phys_addr_hi =
1132 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1133 
1134 	cmd->sync_cmd = 1;
1135 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1136 
1137 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1138 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1139 			__func__, __LINE__);
1140 		return DCMD_NOT_FIRED;
1141 	}
1142 
1143 	instance->instancet->issue_dcmd(instance, cmd);
1144 
1145 	if (timeout) {
1146 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1147 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1148 		if (!ret) {
1149 			dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
1150 				__func__, __LINE__);
1151 			return DCMD_TIMEOUT;
1152 		}
1153 	} else
1154 		wait_event(instance->abort_cmd_wait_q,
1155 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1156 
1157 	cmd->sync_cmd = 0;
1158 
1159 	megasas_return_cmd(instance, cmd);
1160 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1161 		DCMD_SUCCESS : DCMD_FAILED;
1162 }
1163 
1164 /**
1165  * megasas_make_sgl32 -	Prepares 32-bit SGL
1166  * @instance:		Adapter soft state
1167  * @scp:		SCSI command from the mid-layer
1168  * @mfi_sgl:		SGL to be filled in
1169  *
1170  * If successful, this function returns the number of SG elements. Otherwise,
1171  * it returnes -1.
1172  */
1173 static int
1174 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1175 		   union megasas_sgl *mfi_sgl)
1176 {
1177 	int i;
1178 	int sge_count;
1179 	struct scatterlist *os_sgl;
1180 
1181 	sge_count = scsi_dma_map(scp);
1182 	BUG_ON(sge_count < 0);
1183 
1184 	if (sge_count) {
1185 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1186 			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1187 			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1188 		}
1189 	}
1190 	return sge_count;
1191 }
1192 
1193 /**
1194  * megasas_make_sgl64 -	Prepares 64-bit SGL
1195  * @instance:		Adapter soft state
1196  * @scp:		SCSI command from the mid-layer
1197  * @mfi_sgl:		SGL to be filled in
1198  *
1199  * If successful, this function returns the number of SG elements. Otherwise,
1200  * it returnes -1.
1201  */
1202 static int
1203 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1204 		   union megasas_sgl *mfi_sgl)
1205 {
1206 	int i;
1207 	int sge_count;
1208 	struct scatterlist *os_sgl;
1209 
1210 	sge_count = scsi_dma_map(scp);
1211 	BUG_ON(sge_count < 0);
1212 
1213 	if (sge_count) {
1214 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1215 			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1216 			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1217 		}
1218 	}
1219 	return sge_count;
1220 }
1221 
1222 /**
1223  * megasas_make_sgl_skinny - Prepares IEEE SGL
1224  * @instance:           Adapter soft state
1225  * @scp:                SCSI command from the mid-layer
1226  * @mfi_sgl:            SGL to be filled in
1227  *
1228  * If successful, this function returns the number of SG elements. Otherwise,
1229  * it returnes -1.
1230  */
1231 static int
1232 megasas_make_sgl_skinny(struct megasas_instance *instance,
1233 		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1234 {
1235 	int i;
1236 	int sge_count;
1237 	struct scatterlist *os_sgl;
1238 
1239 	sge_count = scsi_dma_map(scp);
1240 
1241 	if (sge_count) {
1242 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1243 			mfi_sgl->sge_skinny[i].length =
1244 				cpu_to_le32(sg_dma_len(os_sgl));
1245 			mfi_sgl->sge_skinny[i].phys_addr =
1246 				cpu_to_le64(sg_dma_address(os_sgl));
1247 			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1248 		}
1249 	}
1250 	return sge_count;
1251 }
1252 
1253  /**
1254  * megasas_get_frame_count - Computes the number of frames
1255  * @frame_type		: type of frame- io or pthru frame
1256  * @sge_count		: number of sg elements
1257  *
1258  * Returns the number of frames required for numnber of sge's (sge_count)
1259  */
1260 
1261 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1262 			u8 sge_count, u8 frame_type)
1263 {
1264 	int num_cnt;
1265 	int sge_bytes;
1266 	u32 sge_sz;
1267 	u32 frame_count = 0;
1268 
1269 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1270 	    sizeof(struct megasas_sge32);
1271 
1272 	if (instance->flag_ieee) {
1273 		sge_sz = sizeof(struct megasas_sge_skinny);
1274 	}
1275 
1276 	/*
1277 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1278 	 * 3 SGEs for 32-bit SGLs for ldio &
1279 	 * 1 SGEs for 64-bit SGLs and
1280 	 * 2 SGEs for 32-bit SGLs for pthru frame
1281 	 */
1282 	if (unlikely(frame_type == PTHRU_FRAME)) {
1283 		if (instance->flag_ieee == 1) {
1284 			num_cnt = sge_count - 1;
1285 		} else if (IS_DMA64)
1286 			num_cnt = sge_count - 1;
1287 		else
1288 			num_cnt = sge_count - 2;
1289 	} else {
1290 		if (instance->flag_ieee == 1) {
1291 			num_cnt = sge_count - 1;
1292 		} else if (IS_DMA64)
1293 			num_cnt = sge_count - 2;
1294 		else
1295 			num_cnt = sge_count - 3;
1296 	}
1297 
1298 	if (num_cnt > 0) {
1299 		sge_bytes = sge_sz * num_cnt;
1300 
1301 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1302 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1303 	}
1304 	/* Main frame */
1305 	frame_count += 1;
1306 
1307 	if (frame_count > 7)
1308 		frame_count = 8;
1309 	return frame_count;
1310 }
1311 
1312 /**
1313  * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1314  * @instance:		Adapter soft state
1315  * @scp:		SCSI command
1316  * @cmd:		Command to be prepared in
1317  *
1318  * This function prepares CDB commands. These are typcially pass-through
1319  * commands to the devices.
1320  */
1321 static int
1322 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1323 		   struct megasas_cmd *cmd)
1324 {
1325 	u32 is_logical;
1326 	u32 device_id;
1327 	u16 flags = 0;
1328 	struct megasas_pthru_frame *pthru;
1329 
1330 	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1331 	device_id = MEGASAS_DEV_INDEX(scp);
1332 	pthru = (struct megasas_pthru_frame *)cmd->frame;
1333 
1334 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1335 		flags = MFI_FRAME_DIR_WRITE;
1336 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1337 		flags = MFI_FRAME_DIR_READ;
1338 	else if (scp->sc_data_direction == PCI_DMA_NONE)
1339 		flags = MFI_FRAME_DIR_NONE;
1340 
1341 	if (instance->flag_ieee == 1) {
1342 		flags |= MFI_FRAME_IEEE;
1343 	}
1344 
1345 	/*
1346 	 * Prepare the DCDB frame
1347 	 */
1348 	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1349 	pthru->cmd_status = 0x0;
1350 	pthru->scsi_status = 0x0;
1351 	pthru->target_id = device_id;
1352 	pthru->lun = scp->device->lun;
1353 	pthru->cdb_len = scp->cmd_len;
1354 	pthru->timeout = 0;
1355 	pthru->pad_0 = 0;
1356 	pthru->flags = cpu_to_le16(flags);
1357 	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1358 
1359 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1360 
1361 	/*
1362 	 * If the command is for the tape device, set the
1363 	 * pthru timeout to the os layer timeout value.
1364 	 */
1365 	if (scp->device->type == TYPE_TAPE) {
1366 		if ((scp->request->timeout / HZ) > 0xFFFF)
1367 			pthru->timeout = cpu_to_le16(0xFFFF);
1368 		else
1369 			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1370 	}
1371 
1372 	/*
1373 	 * Construct SGL
1374 	 */
1375 	if (instance->flag_ieee == 1) {
1376 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1377 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1378 						      &pthru->sgl);
1379 	} else if (IS_DMA64) {
1380 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1381 		pthru->sge_count = megasas_make_sgl64(instance, scp,
1382 						      &pthru->sgl);
1383 	} else
1384 		pthru->sge_count = megasas_make_sgl32(instance, scp,
1385 						      &pthru->sgl);
1386 
1387 	if (pthru->sge_count > instance->max_num_sge) {
1388 		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1389 			pthru->sge_count);
1390 		return 0;
1391 	}
1392 
1393 	/*
1394 	 * Sense info specific
1395 	 */
1396 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1397 	pthru->sense_buf_phys_addr_hi =
1398 		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1399 	pthru->sense_buf_phys_addr_lo =
1400 		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1401 
1402 	/*
1403 	 * Compute the total number of frames this command consumes. FW uses
1404 	 * this number to pull sufficient number of frames from host memory.
1405 	 */
1406 	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1407 							PTHRU_FRAME);
1408 
1409 	return cmd->frame_count;
1410 }
1411 
1412 /**
1413  * megasas_build_ldio -	Prepares IOs to logical devices
1414  * @instance:		Adapter soft state
1415  * @scp:		SCSI command
1416  * @cmd:		Command to be prepared
1417  *
1418  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1419  */
1420 static int
1421 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1422 		   struct megasas_cmd *cmd)
1423 {
1424 	u32 device_id;
1425 	u8 sc = scp->cmnd[0];
1426 	u16 flags = 0;
1427 	struct megasas_io_frame *ldio;
1428 
1429 	device_id = MEGASAS_DEV_INDEX(scp);
1430 	ldio = (struct megasas_io_frame *)cmd->frame;
1431 
1432 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1433 		flags = MFI_FRAME_DIR_WRITE;
1434 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1435 		flags = MFI_FRAME_DIR_READ;
1436 
1437 	if (instance->flag_ieee == 1) {
1438 		flags |= MFI_FRAME_IEEE;
1439 	}
1440 
1441 	/*
1442 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1443 	 */
1444 	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1445 	ldio->cmd_status = 0x0;
1446 	ldio->scsi_status = 0x0;
1447 	ldio->target_id = device_id;
1448 	ldio->timeout = 0;
1449 	ldio->reserved_0 = 0;
1450 	ldio->pad_0 = 0;
1451 	ldio->flags = cpu_to_le16(flags);
1452 	ldio->start_lba_hi = 0;
1453 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1454 
1455 	/*
1456 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1457 	 */
1458 	if (scp->cmd_len == 6) {
1459 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1460 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1461 						 ((u32) scp->cmnd[2] << 8) |
1462 						 (u32) scp->cmnd[3]);
1463 
1464 		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1465 	}
1466 
1467 	/*
1468 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1469 	 */
1470 	else if (scp->cmd_len == 10) {
1471 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1472 					      ((u32) scp->cmnd[7] << 8));
1473 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1474 						 ((u32) scp->cmnd[3] << 16) |
1475 						 ((u32) scp->cmnd[4] << 8) |
1476 						 (u32) scp->cmnd[5]);
1477 	}
1478 
1479 	/*
1480 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1481 	 */
1482 	else if (scp->cmd_len == 12) {
1483 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1484 					      ((u32) scp->cmnd[7] << 16) |
1485 					      ((u32) scp->cmnd[8] << 8) |
1486 					      (u32) scp->cmnd[9]);
1487 
1488 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1489 						 ((u32) scp->cmnd[3] << 16) |
1490 						 ((u32) scp->cmnd[4] << 8) |
1491 						 (u32) scp->cmnd[5]);
1492 	}
1493 
1494 	/*
1495 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1496 	 */
1497 	else if (scp->cmd_len == 16) {
1498 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1499 					      ((u32) scp->cmnd[11] << 16) |
1500 					      ((u32) scp->cmnd[12] << 8) |
1501 					      (u32) scp->cmnd[13]);
1502 
1503 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1504 						 ((u32) scp->cmnd[7] << 16) |
1505 						 ((u32) scp->cmnd[8] << 8) |
1506 						 (u32) scp->cmnd[9]);
1507 
1508 		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1509 						 ((u32) scp->cmnd[3] << 16) |
1510 						 ((u32) scp->cmnd[4] << 8) |
1511 						 (u32) scp->cmnd[5]);
1512 
1513 	}
1514 
1515 	/*
1516 	 * Construct SGL
1517 	 */
1518 	if (instance->flag_ieee) {
1519 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1520 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1521 					      &ldio->sgl);
1522 	} else if (IS_DMA64) {
1523 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1524 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1525 	} else
1526 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1527 
1528 	if (ldio->sge_count > instance->max_num_sge) {
1529 		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1530 			ldio->sge_count);
1531 		return 0;
1532 	}
1533 
1534 	/*
1535 	 * Sense info specific
1536 	 */
1537 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1538 	ldio->sense_buf_phys_addr_hi = 0;
1539 	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1540 
1541 	/*
1542 	 * Compute the total number of frames this command consumes. FW uses
1543 	 * this number to pull sufficient number of frames from host memory.
1544 	 */
1545 	cmd->frame_count = megasas_get_frame_count(instance,
1546 			ldio->sge_count, IO_FRAME);
1547 
1548 	return cmd->frame_count;
1549 }
1550 
1551 /**
1552  * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1553  *				and whether it's RW or non RW
1554  * @scmd:			SCSI command
1555  *
1556  */
1557 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1558 {
1559 	int ret;
1560 
1561 	switch (cmd->cmnd[0]) {
1562 	case READ_10:
1563 	case WRITE_10:
1564 	case READ_12:
1565 	case WRITE_12:
1566 	case READ_6:
1567 	case WRITE_6:
1568 	case READ_16:
1569 	case WRITE_16:
1570 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1571 			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1572 		break;
1573 	default:
1574 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1575 			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1576 	}
1577 	return ret;
1578 }
1579 
1580  /**
1581  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1582  *					in FW
1583  * @instance:				Adapter soft state
1584  */
1585 static inline void
1586 megasas_dump_pending_frames(struct megasas_instance *instance)
1587 {
1588 	struct megasas_cmd *cmd;
1589 	int i,n;
1590 	union megasas_sgl *mfi_sgl;
1591 	struct megasas_io_frame *ldio;
1592 	struct megasas_pthru_frame *pthru;
1593 	u32 sgcount;
1594 	u16 max_cmd = instance->max_fw_cmds;
1595 
1596 	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1597 	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1598 	if (IS_DMA64)
1599 		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1600 	else
1601 		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1602 
1603 	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1604 	for (i = 0; i < max_cmd; i++) {
1605 		cmd = instance->cmd_list[i];
1606 		if (!cmd->scmd)
1607 			continue;
1608 		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1609 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1610 			ldio = (struct megasas_io_frame *)cmd->frame;
1611 			mfi_sgl = &ldio->sgl;
1612 			sgcount = ldio->sge_count;
1613 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1614 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1615 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1616 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1617 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1618 		} else {
1619 			pthru = (struct megasas_pthru_frame *) cmd->frame;
1620 			mfi_sgl = &pthru->sgl;
1621 			sgcount = pthru->sge_count;
1622 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1623 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1624 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1625 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1626 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1627 		}
1628 		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1629 			for (n = 0; n < sgcount; n++) {
1630 				if (IS_DMA64)
1631 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1632 						le32_to_cpu(mfi_sgl->sge64[n].length),
1633 						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1634 				else
1635 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1636 						le32_to_cpu(mfi_sgl->sge32[n].length),
1637 						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1638 			}
1639 		}
1640 	} /*for max_cmd*/
1641 	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1642 	for (i = 0; i < max_cmd; i++) {
1643 
1644 		cmd = instance->cmd_list[i];
1645 
1646 		if (cmd->sync_cmd == 1)
1647 			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1648 	}
1649 	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1650 }
1651 
1652 u32
1653 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1654 			    struct scsi_cmnd *scmd)
1655 {
1656 	struct megasas_cmd *cmd;
1657 	u32 frame_count;
1658 
1659 	cmd = megasas_get_cmd(instance);
1660 	if (!cmd)
1661 		return SCSI_MLQUEUE_HOST_BUSY;
1662 
1663 	/*
1664 	 * Logical drive command
1665 	 */
1666 	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1667 		frame_count = megasas_build_ldio(instance, scmd, cmd);
1668 	else
1669 		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1670 
1671 	if (!frame_count)
1672 		goto out_return_cmd;
1673 
1674 	cmd->scmd = scmd;
1675 	scmd->SCp.ptr = (char *)cmd;
1676 
1677 	/*
1678 	 * Issue the command to the FW
1679 	 */
1680 	atomic_inc(&instance->fw_outstanding);
1681 
1682 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1683 				cmd->frame_count-1, instance->reg_set);
1684 
1685 	return 0;
1686 out_return_cmd:
1687 	megasas_return_cmd(instance, cmd);
1688 	return SCSI_MLQUEUE_HOST_BUSY;
1689 }
1690 
1691 
1692 /**
1693  * megasas_queue_command -	Queue entry point
1694  * @scmd:			SCSI command to be queued
1695  * @done:			Callback entry point
1696  */
1697 static int
1698 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1699 {
1700 	struct megasas_instance *instance;
1701 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1702 
1703 	instance = (struct megasas_instance *)
1704 	    scmd->device->host->hostdata;
1705 
1706 	if (instance->unload == 1) {
1707 		scmd->result = DID_NO_CONNECT << 16;
1708 		scmd->scsi_done(scmd);
1709 		return 0;
1710 	}
1711 
1712 	if (instance->issuepend_done == 0)
1713 		return SCSI_MLQUEUE_HOST_BUSY;
1714 
1715 
1716 	/* Check for an mpio path and adjust behavior */
1717 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1718 		if (megasas_check_mpio_paths(instance, scmd) ==
1719 		    (DID_REQUEUE << 16)) {
1720 			return SCSI_MLQUEUE_HOST_BUSY;
1721 		} else {
1722 			scmd->result = DID_NO_CONNECT << 16;
1723 			scmd->scsi_done(scmd);
1724 			return 0;
1725 		}
1726 	}
1727 
1728 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1729 		scmd->result = DID_NO_CONNECT << 16;
1730 		scmd->scsi_done(scmd);
1731 		return 0;
1732 	}
1733 
1734 	mr_device_priv_data = scmd->device->hostdata;
1735 	if (!mr_device_priv_data) {
1736 		scmd->result = DID_NO_CONNECT << 16;
1737 		scmd->scsi_done(scmd);
1738 		return 0;
1739 	}
1740 
1741 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1742 		return SCSI_MLQUEUE_HOST_BUSY;
1743 
1744 	if (mr_device_priv_data->tm_busy)
1745 		return SCSI_MLQUEUE_DEVICE_BUSY;
1746 
1747 
1748 	scmd->result = 0;
1749 
1750 	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1751 	    (scmd->device->id >= instance->fw_supported_vd_count ||
1752 		scmd->device->lun)) {
1753 		scmd->result = DID_BAD_TARGET << 16;
1754 		goto out_done;
1755 	}
1756 
1757 	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1758 	    MEGASAS_IS_LOGICAL(scmd->device) &&
1759 	    (!instance->fw_sync_cache_support)) {
1760 		scmd->result = DID_OK << 16;
1761 		goto out_done;
1762 	}
1763 
1764 	return instance->instancet->build_and_issue_cmd(instance, scmd);
1765 
1766  out_done:
1767 	scmd->scsi_done(scmd);
1768 	return 0;
1769 }
1770 
1771 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1772 {
1773 	int i;
1774 
1775 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1776 
1777 		if ((megasas_mgmt_info.instance[i]) &&
1778 		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1779 			return megasas_mgmt_info.instance[i];
1780 	}
1781 
1782 	return NULL;
1783 }
1784 
1785 /*
1786 * megasas_set_dynamic_target_properties -
1787 * Device property set by driver may not be static and it is required to be
1788 * updated after OCR
1789 *
1790 * set tm_capable.
1791 * set dma alignment (only for eedp protection enable vd).
1792 *
1793 * @sdev: OS provided scsi device
1794 *
1795 * Returns void
1796 */
1797 void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
1798 {
1799 	u16 pd_index = 0, ld;
1800 	u32 device_id;
1801 	struct megasas_instance *instance;
1802 	struct fusion_context *fusion;
1803 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1804 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1805 	struct MR_LD_RAID *raid;
1806 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1807 
1808 	instance = megasas_lookup_instance(sdev->host->host_no);
1809 	fusion = instance->ctrl_context;
1810 	mr_device_priv_data = sdev->hostdata;
1811 
1812 	if (!fusion || !mr_device_priv_data)
1813 		return;
1814 
1815 	if (MEGASAS_IS_LOGICAL(sdev)) {
1816 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1817 					+ sdev->id;
1818 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1819 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1820 		if (ld >= instance->fw_supported_vd_count)
1821 			return;
1822 		raid = MR_LdRaidGet(ld, local_map_ptr);
1823 
1824 		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1825 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1826 
1827 		mr_device_priv_data->is_tm_capable =
1828 			raid->capability.tmCapable;
1829 	} else if (instance->use_seqnum_jbod_fp) {
1830 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1831 			sdev->id;
1832 		pd_sync = (void *)fusion->pd_seq_sync
1833 				[(instance->pd_seq_map_id - 1) & 1];
1834 		mr_device_priv_data->is_tm_capable =
1835 			pd_sync->seq[pd_index].capability.tmCapable;
1836 	}
1837 }
1838 
1839 /*
1840  * megasas_set_nvme_device_properties -
1841  * set nomerges=2
1842  * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1843  * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1844  *
1845  * MR firmware provides value in KB. Caller of this function converts
1846  * kb into bytes.
1847  *
1848  * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1849  * MR firmware provides value 128 as (32 * 4K) = 128K.
1850  *
1851  * @sdev:				scsi device
1852  * @max_io_size:				maximum io transfer size
1853  *
1854  */
1855 static inline void
1856 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1857 {
1858 	struct megasas_instance *instance;
1859 	u32 mr_nvme_pg_size;
1860 
1861 	instance = (struct megasas_instance *)sdev->host->hostdata;
1862 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1863 				MR_DEFAULT_NVME_PAGE_SIZE);
1864 
1865 	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1866 
1867 	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1868 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1869 }
1870 
1871 
1872 /*
1873  * megasas_set_static_target_properties -
1874  * Device property set by driver are static and it is not required to be
1875  * updated after OCR.
1876  *
1877  * set io timeout
1878  * set device queue depth
1879  * set nvme device properties. see - megasas_set_nvme_device_properties
1880  *
1881  * @sdev:				scsi device
1882  * @is_target_prop			true, if fw provided target properties.
1883  */
1884 static void megasas_set_static_target_properties(struct scsi_device *sdev,
1885 						 bool is_target_prop)
1886 {
1887 	u16	target_index = 0;
1888 	u8 interface_type;
1889 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1890 	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1891 	u32 tgt_device_qd;
1892 	struct megasas_instance *instance;
1893 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1894 
1895 	instance = megasas_lookup_instance(sdev->host->host_no);
1896 	mr_device_priv_data = sdev->hostdata;
1897 	interface_type  = mr_device_priv_data->interface_type;
1898 
1899 	/*
1900 	 * The RAID firmware may require extended timeouts.
1901 	 */
1902 	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1903 
1904 	target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1905 
1906 	switch (interface_type) {
1907 	case SAS_PD:
1908 		device_qd = MEGASAS_SAS_QD;
1909 		break;
1910 	case SATA_PD:
1911 		device_qd = MEGASAS_SATA_QD;
1912 		break;
1913 	case NVME_PD:
1914 		device_qd = MEGASAS_NVME_QD;
1915 		break;
1916 	}
1917 
1918 	if (is_target_prop) {
1919 		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1920 		if (tgt_device_qd &&
1921 		    (tgt_device_qd <= instance->host->can_queue))
1922 			device_qd = tgt_device_qd;
1923 
1924 		/* max_io_size_kb will be set to non zero for
1925 		 * nvme based vd and syspd.
1926 		 */
1927 		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1928 	}
1929 
1930 	if (instance->nvme_page_size && max_io_size_kb)
1931 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
1932 
1933 	scsi_change_queue_depth(sdev, device_qd);
1934 
1935 }
1936 
1937 
1938 static int megasas_slave_configure(struct scsi_device *sdev)
1939 {
1940 	u16 pd_index = 0;
1941 	struct megasas_instance *instance;
1942 	int ret_target_prop = DCMD_FAILED;
1943 	bool is_target_prop = false;
1944 
1945 	instance = megasas_lookup_instance(sdev->host->host_no);
1946 	if (instance->pd_list_not_supported) {
1947 		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
1948 			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1949 				sdev->id;
1950 			if (instance->pd_list[pd_index].driveState !=
1951 				MR_PD_STATE_SYSTEM)
1952 				return -ENXIO;
1953 		}
1954 	}
1955 
1956 	mutex_lock(&instance->reset_mutex);
1957 	/* Send DCMD to Firmware and cache the information */
1958 	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
1959 		megasas_get_pd_info(instance, sdev);
1960 
1961 	/* Some ventura firmware may not have instance->nvme_page_size set.
1962 	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
1963 	 */
1964 	if ((instance->tgt_prop) && (instance->nvme_page_size))
1965 		ret_target_prop = megasas_get_target_prop(instance, sdev);
1966 
1967 	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
1968 	megasas_set_static_target_properties(sdev, is_target_prop);
1969 
1970 	mutex_unlock(&instance->reset_mutex);
1971 
1972 	/* This sdev property may change post OCR */
1973 	megasas_set_dynamic_target_properties(sdev);
1974 
1975 	return 0;
1976 }
1977 
1978 static int megasas_slave_alloc(struct scsi_device *sdev)
1979 {
1980 	u16 pd_index = 0;
1981 	struct megasas_instance *instance ;
1982 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1983 
1984 	instance = megasas_lookup_instance(sdev->host->host_no);
1985 	if (!MEGASAS_IS_LOGICAL(sdev)) {
1986 		/*
1987 		 * Open the OS scan to the SYSTEM PD
1988 		 */
1989 		pd_index =
1990 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1991 			sdev->id;
1992 		if ((instance->pd_list_not_supported ||
1993 			instance->pd_list[pd_index].driveState ==
1994 			MR_PD_STATE_SYSTEM)) {
1995 			goto scan_target;
1996 		}
1997 		return -ENXIO;
1998 	}
1999 
2000 scan_target:
2001 	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2002 					GFP_KERNEL);
2003 	if (!mr_device_priv_data)
2004 		return -ENOMEM;
2005 	sdev->hostdata = mr_device_priv_data;
2006 
2007 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2008 		   instance->r1_ldio_hint_default);
2009 	return 0;
2010 }
2011 
2012 static void megasas_slave_destroy(struct scsi_device *sdev)
2013 {
2014 	kfree(sdev->hostdata);
2015 	sdev->hostdata = NULL;
2016 }
2017 
2018 /*
2019 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2020 *                                       kill adapter
2021 * @instance:				Adapter soft state
2022 *
2023 */
2024 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2025 {
2026 	int i;
2027 	struct megasas_cmd *cmd_mfi;
2028 	struct megasas_cmd_fusion *cmd_fusion;
2029 	struct fusion_context *fusion = instance->ctrl_context;
2030 
2031 	/* Find all outstanding ioctls */
2032 	if (fusion) {
2033 		for (i = 0; i < instance->max_fw_cmds; i++) {
2034 			cmd_fusion = fusion->cmd_list[i];
2035 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2036 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2037 				if (cmd_mfi->sync_cmd &&
2038 				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2039 					cmd_mfi->frame->hdr.cmd_status =
2040 							MFI_STAT_WRONG_STATE;
2041 					megasas_complete_cmd(instance,
2042 							     cmd_mfi, DID_OK);
2043 				}
2044 			}
2045 		}
2046 	} else {
2047 		for (i = 0; i < instance->max_fw_cmds; i++) {
2048 			cmd_mfi = instance->cmd_list[i];
2049 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2050 				MFI_CMD_ABORT)
2051 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2052 		}
2053 	}
2054 }
2055 
2056 
2057 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2058 {
2059 	/* Set critical error to block I/O & ioctls in case caller didn't */
2060 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2061 	/* Wait 1 second to ensure IO or ioctls in build have posted */
2062 	msleep(1000);
2063 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2064 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2065 		(instance->adapter_type != MFI_SERIES)) {
2066 		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2067 		/* Flush */
2068 		readl(&instance->reg_set->doorbell);
2069 		if (instance->requestorId && instance->peerIsPresent)
2070 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2071 	} else {
2072 		writel(MFI_STOP_ADP,
2073 			&instance->reg_set->inbound_doorbell);
2074 	}
2075 	/* Complete outstanding ioctls when adapter is killed */
2076 	megasas_complete_outstanding_ioctls(instance);
2077 }
2078 
2079  /**
2080   * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2081   *					restored to max value
2082   * @instance:			Adapter soft state
2083   *
2084   */
2085 void
2086 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2087 {
2088 	unsigned long flags;
2089 
2090 	if (instance->flag & MEGASAS_FW_BUSY
2091 	    && time_after(jiffies, instance->last_time + 5 * HZ)
2092 	    && atomic_read(&instance->fw_outstanding) <
2093 	    instance->throttlequeuedepth + 1) {
2094 
2095 		spin_lock_irqsave(instance->host->host_lock, flags);
2096 		instance->flag &= ~MEGASAS_FW_BUSY;
2097 
2098 		instance->host->can_queue = instance->cur_can_queue;
2099 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2100 	}
2101 }
2102 
2103 /**
2104  * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2105  * @instance_addr:			Address of adapter soft state
2106  *
2107  * Tasklet to complete cmds
2108  */
2109 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2110 {
2111 	u32 producer;
2112 	u32 consumer;
2113 	u32 context;
2114 	struct megasas_cmd *cmd;
2115 	struct megasas_instance *instance =
2116 				(struct megasas_instance *)instance_addr;
2117 	unsigned long flags;
2118 
2119 	/* If we have already declared adapter dead, donot complete cmds */
2120 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2121 		return;
2122 
2123 	spin_lock_irqsave(&instance->completion_lock, flags);
2124 
2125 	producer = le32_to_cpu(*instance->producer);
2126 	consumer = le32_to_cpu(*instance->consumer);
2127 
2128 	while (consumer != producer) {
2129 		context = le32_to_cpu(instance->reply_queue[consumer]);
2130 		if (context >= instance->max_fw_cmds) {
2131 			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2132 				context);
2133 			BUG();
2134 		}
2135 
2136 		cmd = instance->cmd_list[context];
2137 
2138 		megasas_complete_cmd(instance, cmd, DID_OK);
2139 
2140 		consumer++;
2141 		if (consumer == (instance->max_fw_cmds + 1)) {
2142 			consumer = 0;
2143 		}
2144 	}
2145 
2146 	*instance->consumer = cpu_to_le32(producer);
2147 
2148 	spin_unlock_irqrestore(&instance->completion_lock, flags);
2149 
2150 	/*
2151 	 * Check if we can restore can_queue
2152 	 */
2153 	megasas_check_and_restore_queue_depth(instance);
2154 }
2155 
2156 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2157 
2158 /**
2159  * megasas_start_timer - Initializes sriov heartbeat timer object
2160  * @instance:		Adapter soft state
2161  *
2162  */
2163 void megasas_start_timer(struct megasas_instance *instance)
2164 {
2165 	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2166 
2167 	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2168 	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2169 	add_timer(timer);
2170 }
2171 
2172 static void
2173 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2174 
2175 static void
2176 process_fw_state_change_wq(struct work_struct *work);
2177 
2178 void megasas_do_ocr(struct megasas_instance *instance)
2179 {
2180 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2181 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2182 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2183 		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2184 	}
2185 	instance->instancet->disable_intr(instance);
2186 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2187 	instance->issuepend_done = 0;
2188 
2189 	atomic_set(&instance->fw_outstanding, 0);
2190 	megasas_internal_reset_defer_cmds(instance);
2191 	process_fw_state_change_wq(&instance->work_init);
2192 }
2193 
2194 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2195 					    int initial)
2196 {
2197 	struct megasas_cmd *cmd;
2198 	struct megasas_dcmd_frame *dcmd;
2199 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2200 	dma_addr_t new_affiliation_111_h;
2201 	int ld, retval = 0;
2202 	u8 thisVf;
2203 
2204 	cmd = megasas_get_cmd(instance);
2205 
2206 	if (!cmd) {
2207 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2208 		       "Failed to get cmd for scsi%d\n",
2209 			instance->host->host_no);
2210 		return -ENOMEM;
2211 	}
2212 
2213 	dcmd = &cmd->frame->dcmd;
2214 
2215 	if (!instance->vf_affiliation_111) {
2216 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2217 		       "affiliation for scsi%d\n", instance->host->host_no);
2218 		megasas_return_cmd(instance, cmd);
2219 		return -ENOMEM;
2220 	}
2221 
2222 	if (initial)
2223 			memset(instance->vf_affiliation_111, 0,
2224 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2225 	else {
2226 		new_affiliation_111 =
2227 			pci_alloc_consistent(instance->pdev,
2228 					     sizeof(struct MR_LD_VF_AFFILIATION_111),
2229 					     &new_affiliation_111_h);
2230 		if (!new_affiliation_111) {
2231 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2232 			       "memory for new affiliation for scsi%d\n",
2233 			       instance->host->host_no);
2234 			megasas_return_cmd(instance, cmd);
2235 			return -ENOMEM;
2236 		}
2237 		memset(new_affiliation_111, 0,
2238 		       sizeof(struct MR_LD_VF_AFFILIATION_111));
2239 	}
2240 
2241 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2242 
2243 	dcmd->cmd = MFI_CMD_DCMD;
2244 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2245 	dcmd->sge_count = 1;
2246 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2247 	dcmd->timeout = 0;
2248 	dcmd->pad_0 = 0;
2249 	dcmd->data_xfer_len =
2250 		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2251 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2252 
2253 	if (initial)
2254 		dcmd->sgl.sge32[0].phys_addr =
2255 			cpu_to_le32(instance->vf_affiliation_111_h);
2256 	else
2257 		dcmd->sgl.sge32[0].phys_addr =
2258 			cpu_to_le32(new_affiliation_111_h);
2259 
2260 	dcmd->sgl.sge32[0].length = cpu_to_le32(
2261 		sizeof(struct MR_LD_VF_AFFILIATION_111));
2262 
2263 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2264 	       "scsi%d\n", instance->host->host_no);
2265 
2266 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2267 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2268 		       " failed with status 0x%x for scsi%d\n",
2269 		       dcmd->cmd_status, instance->host->host_no);
2270 		retval = 1; /* Do a scan if we couldn't get affiliation */
2271 		goto out;
2272 	}
2273 
2274 	if (!initial) {
2275 		thisVf = new_affiliation_111->thisVf;
2276 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2277 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2278 			    new_affiliation_111->map[ld].policy[thisVf]) {
2279 				dev_warn(&instance->pdev->dev, "SR-IOV: "
2280 				       "Got new LD/VF affiliation for scsi%d\n",
2281 				       instance->host->host_no);
2282 				memcpy(instance->vf_affiliation_111,
2283 				       new_affiliation_111,
2284 				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2285 				retval = 1;
2286 				goto out;
2287 			}
2288 	}
2289 out:
2290 	if (new_affiliation_111) {
2291 		pci_free_consistent(instance->pdev,
2292 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2293 				    new_affiliation_111,
2294 				    new_affiliation_111_h);
2295 	}
2296 
2297 	megasas_return_cmd(instance, cmd);
2298 
2299 	return retval;
2300 }
2301 
2302 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2303 					    int initial)
2304 {
2305 	struct megasas_cmd *cmd;
2306 	struct megasas_dcmd_frame *dcmd;
2307 	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2308 	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2309 	dma_addr_t new_affiliation_h;
2310 	int i, j, retval = 0, found = 0, doscan = 0;
2311 	u8 thisVf;
2312 
2313 	cmd = megasas_get_cmd(instance);
2314 
2315 	if (!cmd) {
2316 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2317 		       "Failed to get cmd for scsi%d\n",
2318 		       instance->host->host_no);
2319 		return -ENOMEM;
2320 	}
2321 
2322 	dcmd = &cmd->frame->dcmd;
2323 
2324 	if (!instance->vf_affiliation) {
2325 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2326 		       "affiliation for scsi%d\n", instance->host->host_no);
2327 		megasas_return_cmd(instance, cmd);
2328 		return -ENOMEM;
2329 	}
2330 
2331 	if (initial)
2332 		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2333 		       sizeof(struct MR_LD_VF_AFFILIATION));
2334 	else {
2335 		new_affiliation =
2336 			pci_alloc_consistent(instance->pdev,
2337 					     (MAX_LOGICAL_DRIVES + 1) *
2338 					     sizeof(struct MR_LD_VF_AFFILIATION),
2339 					     &new_affiliation_h);
2340 		if (!new_affiliation) {
2341 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2342 			       "memory for new affiliation for scsi%d\n",
2343 			       instance->host->host_no);
2344 			megasas_return_cmd(instance, cmd);
2345 			return -ENOMEM;
2346 		}
2347 		memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2348 		       sizeof(struct MR_LD_VF_AFFILIATION));
2349 	}
2350 
2351 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2352 
2353 	dcmd->cmd = MFI_CMD_DCMD;
2354 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2355 	dcmd->sge_count = 1;
2356 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2357 	dcmd->timeout = 0;
2358 	dcmd->pad_0 = 0;
2359 	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2360 		sizeof(struct MR_LD_VF_AFFILIATION));
2361 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2362 
2363 	if (initial)
2364 		dcmd->sgl.sge32[0].phys_addr =
2365 			cpu_to_le32(instance->vf_affiliation_h);
2366 	else
2367 		dcmd->sgl.sge32[0].phys_addr =
2368 			cpu_to_le32(new_affiliation_h);
2369 
2370 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2371 		sizeof(struct MR_LD_VF_AFFILIATION));
2372 
2373 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2374 	       "scsi%d\n", instance->host->host_no);
2375 
2376 
2377 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2378 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2379 		       " failed with status 0x%x for scsi%d\n",
2380 		       dcmd->cmd_status, instance->host->host_no);
2381 		retval = 1; /* Do a scan if we couldn't get affiliation */
2382 		goto out;
2383 	}
2384 
2385 	if (!initial) {
2386 		if (!new_affiliation->ldCount) {
2387 			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2388 			       "affiliation for passive path for scsi%d\n",
2389 			       instance->host->host_no);
2390 			retval = 1;
2391 			goto out;
2392 		}
2393 		newmap = new_affiliation->map;
2394 		savedmap = instance->vf_affiliation->map;
2395 		thisVf = new_affiliation->thisVf;
2396 		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2397 			found = 0;
2398 			for (j = 0; j < instance->vf_affiliation->ldCount;
2399 			     j++) {
2400 				if (newmap->ref.targetId ==
2401 				    savedmap->ref.targetId) {
2402 					found = 1;
2403 					if (newmap->policy[thisVf] !=
2404 					    savedmap->policy[thisVf]) {
2405 						doscan = 1;
2406 						goto out;
2407 					}
2408 				}
2409 				savedmap = (struct MR_LD_VF_MAP *)
2410 					((unsigned char *)savedmap +
2411 					 savedmap->size);
2412 			}
2413 			if (!found && newmap->policy[thisVf] !=
2414 			    MR_LD_ACCESS_HIDDEN) {
2415 				doscan = 1;
2416 				goto out;
2417 			}
2418 			newmap = (struct MR_LD_VF_MAP *)
2419 				((unsigned char *)newmap + newmap->size);
2420 		}
2421 
2422 		newmap = new_affiliation->map;
2423 		savedmap = instance->vf_affiliation->map;
2424 
2425 		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2426 			found = 0;
2427 			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2428 				if (savedmap->ref.targetId ==
2429 				    newmap->ref.targetId) {
2430 					found = 1;
2431 					if (savedmap->policy[thisVf] !=
2432 					    newmap->policy[thisVf]) {
2433 						doscan = 1;
2434 						goto out;
2435 					}
2436 				}
2437 				newmap = (struct MR_LD_VF_MAP *)
2438 					((unsigned char *)newmap +
2439 					 newmap->size);
2440 			}
2441 			if (!found && savedmap->policy[thisVf] !=
2442 			    MR_LD_ACCESS_HIDDEN) {
2443 				doscan = 1;
2444 				goto out;
2445 			}
2446 			savedmap = (struct MR_LD_VF_MAP *)
2447 				((unsigned char *)savedmap +
2448 				 savedmap->size);
2449 		}
2450 	}
2451 out:
2452 	if (doscan) {
2453 		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2454 		       "affiliation for scsi%d\n", instance->host->host_no);
2455 		memcpy(instance->vf_affiliation, new_affiliation,
2456 		       new_affiliation->size);
2457 		retval = 1;
2458 	}
2459 
2460 	if (new_affiliation)
2461 		pci_free_consistent(instance->pdev,
2462 				    (MAX_LOGICAL_DRIVES + 1) *
2463 				    sizeof(struct MR_LD_VF_AFFILIATION),
2464 				    new_affiliation, new_affiliation_h);
2465 	megasas_return_cmd(instance, cmd);
2466 
2467 	return retval;
2468 }
2469 
2470 /* This function will get the current SR-IOV LD/VF affiliation */
2471 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2472 	int initial)
2473 {
2474 	int retval;
2475 
2476 	if (instance->PlasmaFW111)
2477 		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2478 	else
2479 		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2480 	return retval;
2481 }
2482 
2483 /* This function will tell FW to start the SR-IOV heartbeat */
2484 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2485 					 int initial)
2486 {
2487 	struct megasas_cmd *cmd;
2488 	struct megasas_dcmd_frame *dcmd;
2489 	int retval = 0;
2490 
2491 	cmd = megasas_get_cmd(instance);
2492 
2493 	if (!cmd) {
2494 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2495 		       "Failed to get cmd for scsi%d\n",
2496 		       instance->host->host_no);
2497 		return -ENOMEM;
2498 	}
2499 
2500 	dcmd = &cmd->frame->dcmd;
2501 
2502 	if (initial) {
2503 		instance->hb_host_mem =
2504 			pci_zalloc_consistent(instance->pdev,
2505 					      sizeof(struct MR_CTRL_HB_HOST_MEM),
2506 					      &instance->hb_host_mem_h);
2507 		if (!instance->hb_host_mem) {
2508 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2509 			       " memory for heartbeat host memory for scsi%d\n",
2510 			       instance->host->host_no);
2511 			retval = -ENOMEM;
2512 			goto out;
2513 		}
2514 	}
2515 
2516 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2517 
2518 	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2519 	dcmd->cmd = MFI_CMD_DCMD;
2520 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2521 	dcmd->sge_count = 1;
2522 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2523 	dcmd->timeout = 0;
2524 	dcmd->pad_0 = 0;
2525 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2526 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2527 
2528 	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2529 				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2530 
2531 	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2532 	       instance->host->host_no);
2533 
2534 	if ((instance->adapter_type != MFI_SERIES) &&
2535 	    !instance->mask_interrupts)
2536 		retval = megasas_issue_blocked_cmd(instance, cmd,
2537 			MEGASAS_ROUTINE_WAIT_TIME_VF);
2538 	else
2539 		retval = megasas_issue_polled(instance, cmd);
2540 
2541 	if (retval) {
2542 		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2543 			"_MEM_ALLOC DCMD %s for scsi%d\n",
2544 			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2545 			"timed out" : "failed", instance->host->host_no);
2546 		retval = 1;
2547 	}
2548 
2549 out:
2550 	megasas_return_cmd(instance, cmd);
2551 
2552 	return retval;
2553 }
2554 
2555 /* Handler for SR-IOV heartbeat */
2556 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2557 {
2558 	struct megasas_instance *instance =
2559 		from_timer(instance, t, sriov_heartbeat_timer);
2560 
2561 	if (instance->hb_host_mem->HB.fwCounter !=
2562 	    instance->hb_host_mem->HB.driverCounter) {
2563 		instance->hb_host_mem->HB.driverCounter =
2564 			instance->hb_host_mem->HB.fwCounter;
2565 		mod_timer(&instance->sriov_heartbeat_timer,
2566 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2567 	} else {
2568 		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2569 		       "completed for scsi%d\n", instance->host->host_no);
2570 		schedule_work(&instance->work_init);
2571 	}
2572 }
2573 
2574 /**
2575  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2576  * @instance:				Adapter soft state
2577  *
2578  * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2579  * complete all its outstanding commands. Returns error if one or more IOs
2580  * are pending after this time period. It also marks the controller dead.
2581  */
2582 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2583 {
2584 	int i, sl, outstanding;
2585 	u32 reset_index;
2586 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2587 	unsigned long flags;
2588 	struct list_head clist_local;
2589 	struct megasas_cmd *reset_cmd;
2590 	u32 fw_state;
2591 
2592 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2593 		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2594 		__func__, __LINE__);
2595 		return FAILED;
2596 	}
2597 
2598 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2599 
2600 		INIT_LIST_HEAD(&clist_local);
2601 		spin_lock_irqsave(&instance->hba_lock, flags);
2602 		list_splice_init(&instance->internal_reset_pending_q,
2603 				&clist_local);
2604 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2605 
2606 		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2607 		for (i = 0; i < wait_time; i++) {
2608 			msleep(1000);
2609 			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2610 				break;
2611 		}
2612 
2613 		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2614 			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2615 			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2616 			return FAILED;
2617 		}
2618 
2619 		reset_index = 0;
2620 		while (!list_empty(&clist_local)) {
2621 			reset_cmd = list_entry((&clist_local)->next,
2622 						struct megasas_cmd, list);
2623 			list_del_init(&reset_cmd->list);
2624 			if (reset_cmd->scmd) {
2625 				reset_cmd->scmd->result = DID_REQUEUE << 16;
2626 				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2627 					reset_index, reset_cmd,
2628 					reset_cmd->scmd->cmnd[0]);
2629 
2630 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2631 				megasas_return_cmd(instance, reset_cmd);
2632 			} else if (reset_cmd->sync_cmd) {
2633 				dev_notice(&instance->pdev->dev, "%p synch cmds"
2634 						"reset queue\n",
2635 						reset_cmd);
2636 
2637 				reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2638 				instance->instancet->fire_cmd(instance,
2639 						reset_cmd->frame_phys_addr,
2640 						0, instance->reg_set);
2641 			} else {
2642 				dev_notice(&instance->pdev->dev, "%p unexpected"
2643 					"cmds lst\n",
2644 					reset_cmd);
2645 			}
2646 			reset_index++;
2647 		}
2648 
2649 		return SUCCESS;
2650 	}
2651 
2652 	for (i = 0; i < resetwaittime; i++) {
2653 		outstanding = atomic_read(&instance->fw_outstanding);
2654 
2655 		if (!outstanding)
2656 			break;
2657 
2658 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2659 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2660 			       "commands to complete\n",i,outstanding);
2661 			/*
2662 			 * Call cmd completion routine. Cmd to be
2663 			 * be completed directly without depending on isr.
2664 			 */
2665 			megasas_complete_cmd_dpc((unsigned long)instance);
2666 		}
2667 
2668 		msleep(1000);
2669 	}
2670 
2671 	i = 0;
2672 	outstanding = atomic_read(&instance->fw_outstanding);
2673 	fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2674 
2675 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2676 		goto no_outstanding;
2677 
2678 	if (instance->disableOnlineCtrlReset)
2679 		goto kill_hba_and_failed;
2680 	do {
2681 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2682 			dev_info(&instance->pdev->dev,
2683 				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
2684 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2685 			if (i == 3)
2686 				goto kill_hba_and_failed;
2687 			megasas_do_ocr(instance);
2688 
2689 			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2690 				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2691 				__func__, __LINE__);
2692 				return FAILED;
2693 			}
2694 			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2695 				__func__, __LINE__);
2696 
2697 			for (sl = 0; sl < 10; sl++)
2698 				msleep(500);
2699 
2700 			outstanding = atomic_read(&instance->fw_outstanding);
2701 
2702 			fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2703 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2704 				goto no_outstanding;
2705 		}
2706 		i++;
2707 	} while (i <= 3);
2708 
2709 no_outstanding:
2710 
2711 	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2712 		__func__, __LINE__);
2713 	return SUCCESS;
2714 
2715 kill_hba_and_failed:
2716 
2717 	/* Reset not supported, kill adapter */
2718 	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2719 		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2720 		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2721 		atomic_read(&instance->fw_outstanding));
2722 	megasas_dump_pending_frames(instance);
2723 	megaraid_sas_kill_hba(instance);
2724 
2725 	return FAILED;
2726 }
2727 
2728 /**
2729  * megasas_generic_reset -	Generic reset routine
2730  * @scmd:			Mid-layer SCSI command
2731  *
2732  * This routine implements a generic reset handler for device, bus and host
2733  * reset requests. Device, bus and host specific reset handlers can use this
2734  * function after they do their specific tasks.
2735  */
2736 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2737 {
2738 	int ret_val;
2739 	struct megasas_instance *instance;
2740 
2741 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2742 
2743 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2744 		 scmd->cmnd[0], scmd->retries);
2745 
2746 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2747 		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2748 		return FAILED;
2749 	}
2750 
2751 	ret_val = megasas_wait_for_outstanding(instance);
2752 	if (ret_val == SUCCESS)
2753 		dev_notice(&instance->pdev->dev, "reset successful\n");
2754 	else
2755 		dev_err(&instance->pdev->dev, "failed to do reset\n");
2756 
2757 	return ret_val;
2758 }
2759 
2760 /**
2761  * megasas_reset_timer - quiesce the adapter if required
2762  * @scmd:		scsi cmnd
2763  *
2764  * Sets the FW busy flag and reduces the host->can_queue if the
2765  * cmd has not been completed within the timeout period.
2766  */
2767 static enum
2768 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2769 {
2770 	struct megasas_instance *instance;
2771 	unsigned long flags;
2772 
2773 	if (time_after(jiffies, scmd->jiffies_at_alloc +
2774 				(scmd_timeout * 2) * HZ)) {
2775 		return BLK_EH_NOT_HANDLED;
2776 	}
2777 
2778 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2779 	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2780 		/* FW is busy, throttle IO */
2781 		spin_lock_irqsave(instance->host->host_lock, flags);
2782 
2783 		instance->host->can_queue = instance->throttlequeuedepth;
2784 		instance->last_time = jiffies;
2785 		instance->flag |= MEGASAS_FW_BUSY;
2786 
2787 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2788 	}
2789 	return BLK_EH_RESET_TIMER;
2790 }
2791 
2792 /**
2793  * megasas_dump_frame -	This function will dump MPT/MFI frame
2794  */
2795 static inline void
2796 megasas_dump_frame(void *mpi_request, int sz)
2797 {
2798 	int i;
2799 	__le32 *mfp = (__le32 *)mpi_request;
2800 
2801 	printk(KERN_INFO "IO request frame:\n\t");
2802 	for (i = 0; i < sz / sizeof(__le32); i++) {
2803 		if (i && ((i % 8) == 0))
2804 			printk("\n\t");
2805 		printk("%08x ", le32_to_cpu(mfp[i]));
2806 	}
2807 	printk("\n");
2808 }
2809 
2810 /**
2811  * megasas_reset_bus_host -	Bus & host reset handler entry point
2812  */
2813 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2814 {
2815 	int ret;
2816 	struct megasas_instance *instance;
2817 
2818 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2819 
2820 	scmd_printk(KERN_INFO, scmd,
2821 		"Controller reset is requested due to IO timeout\n"
2822 		"SCSI command pointer: (%p)\t SCSI host state: %d\t"
2823 		" SCSI host busy: %d\t FW outstanding: %d\n",
2824 		scmd, scmd->device->host->shost_state,
2825 		atomic_read((atomic_t *)&scmd->device->host->host_busy),
2826 		atomic_read(&instance->fw_outstanding));
2827 
2828 	/*
2829 	 * First wait for all commands to complete
2830 	 */
2831 	if (instance->adapter_type == MFI_SERIES) {
2832 		ret = megasas_generic_reset(scmd);
2833 	} else {
2834 		struct megasas_cmd_fusion *cmd;
2835 		cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2836 		if (cmd)
2837 			megasas_dump_frame(cmd->io_request,
2838 				MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
2839 		ret = megasas_reset_fusion(scmd->device->host,
2840 				SCSIIO_TIMEOUT_OCR);
2841 	}
2842 
2843 	return ret;
2844 }
2845 
2846 /**
2847  * megasas_task_abort - Issues task abort request to firmware
2848  *			(supported only for fusion adapters)
2849  * @scmd:		SCSI command pointer
2850  */
2851 static int megasas_task_abort(struct scsi_cmnd *scmd)
2852 {
2853 	int ret;
2854 	struct megasas_instance *instance;
2855 
2856 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2857 
2858 	if (instance->adapter_type != MFI_SERIES)
2859 		ret = megasas_task_abort_fusion(scmd);
2860 	else {
2861 		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
2862 		ret = FAILED;
2863 	}
2864 
2865 	return ret;
2866 }
2867 
2868 /**
2869  * megasas_reset_target:  Issues target reset request to firmware
2870  *                        (supported only for fusion adapters)
2871  * @scmd:                 SCSI command pointer
2872  */
2873 static int megasas_reset_target(struct scsi_cmnd *scmd)
2874 {
2875 	int ret;
2876 	struct megasas_instance *instance;
2877 
2878 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2879 
2880 	if (instance->adapter_type != MFI_SERIES)
2881 		ret = megasas_reset_target_fusion(scmd);
2882 	else {
2883 		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
2884 		ret = FAILED;
2885 	}
2886 
2887 	return ret;
2888 }
2889 
2890 /**
2891  * megasas_bios_param - Returns disk geometry for a disk
2892  * @sdev:		device handle
2893  * @bdev:		block device
2894  * @capacity:		drive capacity
2895  * @geom:		geometry parameters
2896  */
2897 static int
2898 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2899 		 sector_t capacity, int geom[])
2900 {
2901 	int heads;
2902 	int sectors;
2903 	sector_t cylinders;
2904 	unsigned long tmp;
2905 
2906 	/* Default heads (64) & sectors (32) */
2907 	heads = 64;
2908 	sectors = 32;
2909 
2910 	tmp = heads * sectors;
2911 	cylinders = capacity;
2912 
2913 	sector_div(cylinders, tmp);
2914 
2915 	/*
2916 	 * Handle extended translation size for logical drives > 1Gb
2917 	 */
2918 
2919 	if (capacity >= 0x200000) {
2920 		heads = 255;
2921 		sectors = 63;
2922 		tmp = heads*sectors;
2923 		cylinders = capacity;
2924 		sector_div(cylinders, tmp);
2925 	}
2926 
2927 	geom[0] = heads;
2928 	geom[1] = sectors;
2929 	geom[2] = cylinders;
2930 
2931 	return 0;
2932 }
2933 
2934 static void megasas_aen_polling(struct work_struct *work);
2935 
2936 /**
2937  * megasas_service_aen -	Processes an event notification
2938  * @instance:			Adapter soft state
2939  * @cmd:			AEN command completed by the ISR
2940  *
2941  * For AEN, driver sends a command down to FW that is held by the FW till an
2942  * event occurs. When an event of interest occurs, FW completes the command
2943  * that it was previously holding.
2944  *
2945  * This routines sends SIGIO signal to processes that have registered with the
2946  * driver for AEN.
2947  */
2948 static void
2949 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2950 {
2951 	unsigned long flags;
2952 
2953 	/*
2954 	 * Don't signal app if it is just an aborted previously registered aen
2955 	 */
2956 	if ((!cmd->abort_aen) && (instance->unload == 0)) {
2957 		spin_lock_irqsave(&poll_aen_lock, flags);
2958 		megasas_poll_wait_aen = 1;
2959 		spin_unlock_irqrestore(&poll_aen_lock, flags);
2960 		wake_up(&megasas_poll_wait);
2961 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
2962 	}
2963 	else
2964 		cmd->abort_aen = 0;
2965 
2966 	instance->aen_cmd = NULL;
2967 
2968 	megasas_return_cmd(instance, cmd);
2969 
2970 	if ((instance->unload == 0) &&
2971 		((instance->issuepend_done == 1))) {
2972 		struct megasas_aen_event *ev;
2973 
2974 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2975 		if (!ev) {
2976 			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
2977 		} else {
2978 			ev->instance = instance;
2979 			instance->ev = ev;
2980 			INIT_DELAYED_WORK(&ev->hotplug_work,
2981 					  megasas_aen_polling);
2982 			schedule_delayed_work(&ev->hotplug_work, 0);
2983 		}
2984 	}
2985 }
2986 
2987 static ssize_t
2988 megasas_fw_crash_buffer_store(struct device *cdev,
2989 	struct device_attribute *attr, const char *buf, size_t count)
2990 {
2991 	struct Scsi_Host *shost = class_to_shost(cdev);
2992 	struct megasas_instance *instance =
2993 		(struct megasas_instance *) shost->hostdata;
2994 	int val = 0;
2995 	unsigned long flags;
2996 
2997 	if (kstrtoint(buf, 0, &val) != 0)
2998 		return -EINVAL;
2999 
3000 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3001 	instance->fw_crash_buffer_offset = val;
3002 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3003 	return strlen(buf);
3004 }
3005 
3006 static ssize_t
3007 megasas_fw_crash_buffer_show(struct device *cdev,
3008 	struct device_attribute *attr, char *buf)
3009 {
3010 	struct Scsi_Host *shost = class_to_shost(cdev);
3011 	struct megasas_instance *instance =
3012 		(struct megasas_instance *) shost->hostdata;
3013 	u32 size;
3014 	unsigned long buff_addr;
3015 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3016 	unsigned long src_addr;
3017 	unsigned long flags;
3018 	u32 buff_offset;
3019 
3020 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3021 	buff_offset = instance->fw_crash_buffer_offset;
3022 	if (!instance->crash_dump_buf &&
3023 		!((instance->fw_crash_state == AVAILABLE) ||
3024 		(instance->fw_crash_state == COPYING))) {
3025 		dev_err(&instance->pdev->dev,
3026 			"Firmware crash dump is not available\n");
3027 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3028 		return -EINVAL;
3029 	}
3030 
3031 	buff_addr = (unsigned long) buf;
3032 
3033 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3034 		dev_err(&instance->pdev->dev,
3035 			"Firmware crash dump offset is out of range\n");
3036 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3037 		return 0;
3038 	}
3039 
3040 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3041 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3042 
3043 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3044 		(buff_offset % dmachunk);
3045 	memcpy(buf, (void *)src_addr, size);
3046 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3047 
3048 	return size;
3049 }
3050 
3051 static ssize_t
3052 megasas_fw_crash_buffer_size_show(struct device *cdev,
3053 	struct device_attribute *attr, char *buf)
3054 {
3055 	struct Scsi_Host *shost = class_to_shost(cdev);
3056 	struct megasas_instance *instance =
3057 		(struct megasas_instance *) shost->hostdata;
3058 
3059 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3060 		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3061 }
3062 
3063 static ssize_t
3064 megasas_fw_crash_state_store(struct device *cdev,
3065 	struct device_attribute *attr, const char *buf, size_t count)
3066 {
3067 	struct Scsi_Host *shost = class_to_shost(cdev);
3068 	struct megasas_instance *instance =
3069 		(struct megasas_instance *) shost->hostdata;
3070 	int val = 0;
3071 	unsigned long flags;
3072 
3073 	if (kstrtoint(buf, 0, &val) != 0)
3074 		return -EINVAL;
3075 
3076 	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3077 		dev_err(&instance->pdev->dev, "application updates invalid "
3078 			"firmware crash state\n");
3079 		return -EINVAL;
3080 	}
3081 
3082 	instance->fw_crash_state = val;
3083 
3084 	if ((val == COPIED) || (val == COPY_ERROR)) {
3085 		spin_lock_irqsave(&instance->crashdump_lock, flags);
3086 		megasas_free_host_crash_buffer(instance);
3087 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3088 		if (val == COPY_ERROR)
3089 			dev_info(&instance->pdev->dev, "application failed to "
3090 				"copy Firmware crash dump\n");
3091 		else
3092 			dev_info(&instance->pdev->dev, "Firmware crash dump "
3093 				"copied successfully\n");
3094 	}
3095 	return strlen(buf);
3096 }
3097 
3098 static ssize_t
3099 megasas_fw_crash_state_show(struct device *cdev,
3100 	struct device_attribute *attr, char *buf)
3101 {
3102 	struct Scsi_Host *shost = class_to_shost(cdev);
3103 	struct megasas_instance *instance =
3104 		(struct megasas_instance *) shost->hostdata;
3105 
3106 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3107 }
3108 
3109 static ssize_t
3110 megasas_page_size_show(struct device *cdev,
3111 	struct device_attribute *attr, char *buf)
3112 {
3113 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3114 }
3115 
3116 static ssize_t
3117 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3118 	char *buf)
3119 {
3120 	struct Scsi_Host *shost = class_to_shost(cdev);
3121 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3122 
3123 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3124 }
3125 
3126 static ssize_t
3127 megasas_fw_cmds_outstanding_show(struct device *cdev,
3128 				 struct device_attribute *attr, char *buf)
3129 {
3130 	struct Scsi_Host *shost = class_to_shost(cdev);
3131 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3132 
3133 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3134 }
3135 
3136 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
3137 	megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
3138 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
3139 	megasas_fw_crash_buffer_size_show, NULL);
3140 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
3141 	megasas_fw_crash_state_show, megasas_fw_crash_state_store);
3142 static DEVICE_ATTR(page_size, S_IRUGO,
3143 	megasas_page_size_show, NULL);
3144 static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
3145 	megasas_ldio_outstanding_show, NULL);
3146 static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
3147 	megasas_fw_cmds_outstanding_show, NULL);
3148 
3149 struct device_attribute *megaraid_host_attrs[] = {
3150 	&dev_attr_fw_crash_buffer_size,
3151 	&dev_attr_fw_crash_buffer,
3152 	&dev_attr_fw_crash_state,
3153 	&dev_attr_page_size,
3154 	&dev_attr_ldio_outstanding,
3155 	&dev_attr_fw_cmds_outstanding,
3156 	NULL,
3157 };
3158 
3159 /*
3160  * Scsi host template for megaraid_sas driver
3161  */
3162 static struct scsi_host_template megasas_template = {
3163 
3164 	.module = THIS_MODULE,
3165 	.name = "Avago SAS based MegaRAID driver",
3166 	.proc_name = "megaraid_sas",
3167 	.slave_configure = megasas_slave_configure,
3168 	.slave_alloc = megasas_slave_alloc,
3169 	.slave_destroy = megasas_slave_destroy,
3170 	.queuecommand = megasas_queue_command,
3171 	.eh_target_reset_handler = megasas_reset_target,
3172 	.eh_abort_handler = megasas_task_abort,
3173 	.eh_host_reset_handler = megasas_reset_bus_host,
3174 	.eh_timed_out = megasas_reset_timer,
3175 	.shost_attrs = megaraid_host_attrs,
3176 	.bios_param = megasas_bios_param,
3177 	.use_clustering = ENABLE_CLUSTERING,
3178 	.change_queue_depth = scsi_change_queue_depth,
3179 	.no_write_same = 1,
3180 };
3181 
3182 /**
3183  * megasas_complete_int_cmd -	Completes an internal command
3184  * @instance:			Adapter soft state
3185  * @cmd:			Command to be completed
3186  *
3187  * The megasas_issue_blocked_cmd() function waits for a command to complete
3188  * after it issues a command. This function wakes up that waiting routine by
3189  * calling wake_up() on the wait queue.
3190  */
3191 static void
3192 megasas_complete_int_cmd(struct megasas_instance *instance,
3193 			 struct megasas_cmd *cmd)
3194 {
3195 	cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3196 	wake_up(&instance->int_cmd_wait_q);
3197 }
3198 
3199 /**
3200  * megasas_complete_abort -	Completes aborting a command
3201  * @instance:			Adapter soft state
3202  * @cmd:			Cmd that was issued to abort another cmd
3203  *
3204  * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3205  * after it issues an abort on a previously issued command. This function
3206  * wakes up all functions waiting on the same wait queue.
3207  */
3208 static void
3209 megasas_complete_abort(struct megasas_instance *instance,
3210 		       struct megasas_cmd *cmd)
3211 {
3212 	if (cmd->sync_cmd) {
3213 		cmd->sync_cmd = 0;
3214 		cmd->cmd_status_drv = 0;
3215 		wake_up(&instance->abort_cmd_wait_q);
3216 	}
3217 }
3218 
3219 /**
3220  * megasas_complete_cmd -	Completes a command
3221  * @instance:			Adapter soft state
3222  * @cmd:			Command to be completed
3223  * @alt_status:			If non-zero, use this value as status to
3224  *				SCSI mid-layer instead of the value returned
3225  *				by the FW. This should be used if caller wants
3226  *				an alternate status (as in the case of aborted
3227  *				commands)
3228  */
3229 void
3230 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3231 		     u8 alt_status)
3232 {
3233 	int exception = 0;
3234 	struct megasas_header *hdr = &cmd->frame->hdr;
3235 	unsigned long flags;
3236 	struct fusion_context *fusion = instance->ctrl_context;
3237 	u32 opcode, status;
3238 
3239 	/* flag for the retry reset */
3240 	cmd->retry_for_fw_reset = 0;
3241 
3242 	if (cmd->scmd)
3243 		cmd->scmd->SCp.ptr = NULL;
3244 
3245 	switch (hdr->cmd) {
3246 	case MFI_CMD_INVALID:
3247 		/* Some older 1068 controller FW may keep a pended
3248 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3249 		   when booting the kdump kernel.  Ignore this command to
3250 		   prevent a kernel panic on shutdown of the kdump kernel. */
3251 		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3252 		       "completed\n");
3253 		dev_warn(&instance->pdev->dev, "If you have a controller "
3254 		       "other than PERC5, please upgrade your firmware\n");
3255 		break;
3256 	case MFI_CMD_PD_SCSI_IO:
3257 	case MFI_CMD_LD_SCSI_IO:
3258 
3259 		/*
3260 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3261 		 * issued either through an IO path or an IOCTL path. If it
3262 		 * was via IOCTL, we will send it to internal completion.
3263 		 */
3264 		if (cmd->sync_cmd) {
3265 			cmd->sync_cmd = 0;
3266 			megasas_complete_int_cmd(instance, cmd);
3267 			break;
3268 		}
3269 
3270 	case MFI_CMD_LD_READ:
3271 	case MFI_CMD_LD_WRITE:
3272 
3273 		if (alt_status) {
3274 			cmd->scmd->result = alt_status << 16;
3275 			exception = 1;
3276 		}
3277 
3278 		if (exception) {
3279 
3280 			atomic_dec(&instance->fw_outstanding);
3281 
3282 			scsi_dma_unmap(cmd->scmd);
3283 			cmd->scmd->scsi_done(cmd->scmd);
3284 			megasas_return_cmd(instance, cmd);
3285 
3286 			break;
3287 		}
3288 
3289 		switch (hdr->cmd_status) {
3290 
3291 		case MFI_STAT_OK:
3292 			cmd->scmd->result = DID_OK << 16;
3293 			break;
3294 
3295 		case MFI_STAT_SCSI_IO_FAILED:
3296 		case MFI_STAT_LD_INIT_IN_PROGRESS:
3297 			cmd->scmd->result =
3298 			    (DID_ERROR << 16) | hdr->scsi_status;
3299 			break;
3300 
3301 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3302 
3303 			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3304 
3305 			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3306 				memset(cmd->scmd->sense_buffer, 0,
3307 				       SCSI_SENSE_BUFFERSIZE);
3308 				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3309 				       hdr->sense_len);
3310 
3311 				cmd->scmd->result |= DRIVER_SENSE << 24;
3312 			}
3313 
3314 			break;
3315 
3316 		case MFI_STAT_LD_OFFLINE:
3317 		case MFI_STAT_DEVICE_NOT_FOUND:
3318 			cmd->scmd->result = DID_BAD_TARGET << 16;
3319 			break;
3320 
3321 		default:
3322 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3323 			       hdr->cmd_status);
3324 			cmd->scmd->result = DID_ERROR << 16;
3325 			break;
3326 		}
3327 
3328 		atomic_dec(&instance->fw_outstanding);
3329 
3330 		scsi_dma_unmap(cmd->scmd);
3331 		cmd->scmd->scsi_done(cmd->scmd);
3332 		megasas_return_cmd(instance, cmd);
3333 
3334 		break;
3335 
3336 	case MFI_CMD_SMP:
3337 	case MFI_CMD_STP:
3338 	case MFI_CMD_NVME:
3339 		megasas_complete_int_cmd(instance, cmd);
3340 		break;
3341 
3342 	case MFI_CMD_DCMD:
3343 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3344 		/* Check for LD map update */
3345 		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3346 			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3347 			fusion->fast_path_io = 0;
3348 			spin_lock_irqsave(instance->host->host_lock, flags);
3349 			status = cmd->frame->hdr.cmd_status;
3350 			instance->map_update_cmd = NULL;
3351 			if (status != MFI_STAT_OK) {
3352 				if (status != MFI_STAT_NOT_FOUND)
3353 					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3354 					       cmd->frame->hdr.cmd_status);
3355 				else {
3356 					megasas_return_cmd(instance, cmd);
3357 					spin_unlock_irqrestore(
3358 						instance->host->host_lock,
3359 						flags);
3360 					break;
3361 				}
3362 			}
3363 
3364 			megasas_return_cmd(instance, cmd);
3365 
3366 			/*
3367 			 * Set fast path IO to ZERO.
3368 			 * Validate Map will set proper value.
3369 			 * Meanwhile all IOs will go as LD IO.
3370 			 */
3371 			if (status == MFI_STAT_OK &&
3372 			    (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3373 				instance->map_id++;
3374 				fusion->fast_path_io = 1;
3375 			} else {
3376 				fusion->fast_path_io = 0;
3377 			}
3378 
3379 			megasas_sync_map_info(instance);
3380 			spin_unlock_irqrestore(instance->host->host_lock,
3381 					       flags);
3382 			break;
3383 		}
3384 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3385 		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3386 			spin_lock_irqsave(&poll_aen_lock, flags);
3387 			megasas_poll_wait_aen = 0;
3388 			spin_unlock_irqrestore(&poll_aen_lock, flags);
3389 		}
3390 
3391 		/* FW has an updated PD sequence */
3392 		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3393 			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3394 
3395 			spin_lock_irqsave(instance->host->host_lock, flags);
3396 			status = cmd->frame->hdr.cmd_status;
3397 			instance->jbod_seq_cmd = NULL;
3398 			megasas_return_cmd(instance, cmd);
3399 
3400 			if (status == MFI_STAT_OK) {
3401 				instance->pd_seq_map_id++;
3402 				/* Re-register a pd sync seq num cmd */
3403 				if (megasas_sync_pd_seq_num(instance, true))
3404 					instance->use_seqnum_jbod_fp = false;
3405 			} else
3406 				instance->use_seqnum_jbod_fp = false;
3407 
3408 			spin_unlock_irqrestore(instance->host->host_lock, flags);
3409 			break;
3410 		}
3411 
3412 		/*
3413 		 * See if got an event notification
3414 		 */
3415 		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3416 			megasas_service_aen(instance, cmd);
3417 		else
3418 			megasas_complete_int_cmd(instance, cmd);
3419 
3420 		break;
3421 
3422 	case MFI_CMD_ABORT:
3423 		/*
3424 		 * Cmd issued to abort another cmd returned
3425 		 */
3426 		megasas_complete_abort(instance, cmd);
3427 		break;
3428 
3429 	default:
3430 		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3431 		       hdr->cmd);
3432 		megasas_complete_int_cmd(instance, cmd);
3433 		break;
3434 	}
3435 }
3436 
3437 /**
3438  * megasas_issue_pending_cmds_again -	issue all pending cmds
3439  *					in FW again because of the fw reset
3440  * @instance:				Adapter soft state
3441  */
3442 static inline void
3443 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3444 {
3445 	struct megasas_cmd *cmd;
3446 	struct list_head clist_local;
3447 	union megasas_evt_class_locale class_locale;
3448 	unsigned long flags;
3449 	u32 seq_num;
3450 
3451 	INIT_LIST_HEAD(&clist_local);
3452 	spin_lock_irqsave(&instance->hba_lock, flags);
3453 	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3454 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3455 
3456 	while (!list_empty(&clist_local)) {
3457 		cmd = list_entry((&clist_local)->next,
3458 					struct megasas_cmd, list);
3459 		list_del_init(&cmd->list);
3460 
3461 		if (cmd->sync_cmd || cmd->scmd) {
3462 			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3463 				"detected to be pending while HBA reset\n",
3464 					cmd, cmd->scmd, cmd->sync_cmd);
3465 
3466 			cmd->retry_for_fw_reset++;
3467 
3468 			if (cmd->retry_for_fw_reset == 3) {
3469 				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3470 					"was tried multiple times during reset."
3471 					"Shutting down the HBA\n",
3472 					cmd, cmd->scmd, cmd->sync_cmd);
3473 				instance->instancet->disable_intr(instance);
3474 				atomic_set(&instance->fw_reset_no_pci_access, 1);
3475 				megaraid_sas_kill_hba(instance);
3476 				return;
3477 			}
3478 		}
3479 
3480 		if (cmd->sync_cmd == 1) {
3481 			if (cmd->scmd) {
3482 				dev_notice(&instance->pdev->dev, "unexpected"
3483 					"cmd attached to internal command!\n");
3484 			}
3485 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3486 						"on the internal reset queue,"
3487 						"issue it again.\n", cmd);
3488 			cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3489 			instance->instancet->fire_cmd(instance,
3490 							cmd->frame_phys_addr,
3491 							0, instance->reg_set);
3492 		} else if (cmd->scmd) {
3493 			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3494 			"detected on the internal queue, issue again.\n",
3495 			cmd, cmd->scmd->cmnd[0]);
3496 
3497 			atomic_inc(&instance->fw_outstanding);
3498 			instance->instancet->fire_cmd(instance,
3499 					cmd->frame_phys_addr,
3500 					cmd->frame_count-1, instance->reg_set);
3501 		} else {
3502 			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3503 				"internal reset defer list while re-issue!!\n",
3504 				cmd);
3505 		}
3506 	}
3507 
3508 	if (instance->aen_cmd) {
3509 		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3510 		megasas_return_cmd(instance, instance->aen_cmd);
3511 
3512 		instance->aen_cmd = NULL;
3513 	}
3514 
3515 	/*
3516 	 * Initiate AEN (Asynchronous Event Notification)
3517 	 */
3518 	seq_num = instance->last_seq_num;
3519 	class_locale.members.reserved = 0;
3520 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3521 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3522 
3523 	megasas_register_aen(instance, seq_num, class_locale.word);
3524 }
3525 
3526 /**
3527  * Move the internal reset pending commands to a deferred queue.
3528  *
3529  * We move the commands pending at internal reset time to a
3530  * pending queue. This queue would be flushed after successful
3531  * completion of the internal reset sequence. if the internal reset
3532  * did not complete in time, the kernel reset handler would flush
3533  * these commands.
3534  **/
3535 static void
3536 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3537 {
3538 	struct megasas_cmd *cmd;
3539 	int i;
3540 	u16 max_cmd = instance->max_fw_cmds;
3541 	u32 defer_index;
3542 	unsigned long flags;
3543 
3544 	defer_index = 0;
3545 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3546 	for (i = 0; i < max_cmd; i++) {
3547 		cmd = instance->cmd_list[i];
3548 		if (cmd->sync_cmd == 1 || cmd->scmd) {
3549 			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3550 					"on the defer queue as internal\n",
3551 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3552 
3553 			if (!list_empty(&cmd->list)) {
3554 				dev_notice(&instance->pdev->dev, "ERROR while"
3555 					" moving this cmd:%p, %d %p, it was"
3556 					"discovered on some list?\n",
3557 					cmd, cmd->sync_cmd, cmd->scmd);
3558 
3559 				list_del_init(&cmd->list);
3560 			}
3561 			defer_index++;
3562 			list_add_tail(&cmd->list,
3563 				&instance->internal_reset_pending_q);
3564 		}
3565 	}
3566 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3567 }
3568 
3569 
3570 static void
3571 process_fw_state_change_wq(struct work_struct *work)
3572 {
3573 	struct megasas_instance *instance =
3574 		container_of(work, struct megasas_instance, work_init);
3575 	u32 wait;
3576 	unsigned long flags;
3577 
3578     if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3579 		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3580 				atomic_read(&instance->adprecovery));
3581 		return ;
3582 	}
3583 
3584 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3585 		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3586 					"state, restarting it...\n");
3587 
3588 		instance->instancet->disable_intr(instance);
3589 		atomic_set(&instance->fw_outstanding, 0);
3590 
3591 		atomic_set(&instance->fw_reset_no_pci_access, 1);
3592 		instance->instancet->adp_reset(instance, instance->reg_set);
3593 		atomic_set(&instance->fw_reset_no_pci_access, 0);
3594 
3595 		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3596 					"initiating next stage...\n");
3597 
3598 		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3599 					"state 2 starting...\n");
3600 
3601 		/* waiting for about 20 second before start the second init */
3602 		for (wait = 0; wait < 30; wait++) {
3603 			msleep(1000);
3604 		}
3605 
3606 		if (megasas_transition_to_ready(instance, 1)) {
3607 			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3608 
3609 			atomic_set(&instance->fw_reset_no_pci_access, 1);
3610 			megaraid_sas_kill_hba(instance);
3611 			return ;
3612 		}
3613 
3614 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3615 			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3616 			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3617 			) {
3618 			*instance->consumer = *instance->producer;
3619 		} else {
3620 			*instance->consumer = 0;
3621 			*instance->producer = 0;
3622 		}
3623 
3624 		megasas_issue_init_mfi(instance);
3625 
3626 		spin_lock_irqsave(&instance->hba_lock, flags);
3627 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3628 		spin_unlock_irqrestore(&instance->hba_lock, flags);
3629 		instance->instancet->enable_intr(instance);
3630 
3631 		megasas_issue_pending_cmds_again(instance);
3632 		instance->issuepend_done = 1;
3633 	}
3634 }
3635 
3636 /**
3637  * megasas_deplete_reply_queue -	Processes all completed commands
3638  * @instance:				Adapter soft state
3639  * @alt_status:				Alternate status to be returned to
3640  *					SCSI mid-layer instead of the status
3641  *					returned by the FW
3642  * Note: this must be called with hba lock held
3643  */
3644 static int
3645 megasas_deplete_reply_queue(struct megasas_instance *instance,
3646 					u8 alt_status)
3647 {
3648 	u32 mfiStatus;
3649 	u32 fw_state;
3650 
3651 	if ((mfiStatus = instance->instancet->check_reset(instance,
3652 					instance->reg_set)) == 1) {
3653 		return IRQ_HANDLED;
3654 	}
3655 
3656 	if ((mfiStatus = instance->instancet->clear_intr(
3657 						instance->reg_set)
3658 						) == 0) {
3659 		/* Hardware may not set outbound_intr_status in MSI-X mode */
3660 		if (!instance->msix_vectors)
3661 			return IRQ_NONE;
3662 	}
3663 
3664 	instance->mfiStatus = mfiStatus;
3665 
3666 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3667 		fw_state = instance->instancet->read_fw_status_reg(
3668 				instance->reg_set) & MFI_STATE_MASK;
3669 
3670 		if (fw_state != MFI_STATE_FAULT) {
3671 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
3672 						fw_state);
3673 		}
3674 
3675 		if ((fw_state == MFI_STATE_FAULT) &&
3676 				(instance->disableOnlineCtrlReset == 0)) {
3677 			dev_notice(&instance->pdev->dev, "wait adp restart\n");
3678 
3679 			if ((instance->pdev->device ==
3680 					PCI_DEVICE_ID_LSI_SAS1064R) ||
3681 				(instance->pdev->device ==
3682 					PCI_DEVICE_ID_DELL_PERC5) ||
3683 				(instance->pdev->device ==
3684 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3685 
3686 				*instance->consumer =
3687 					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3688 			}
3689 
3690 
3691 			instance->instancet->disable_intr(instance);
3692 			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3693 			instance->issuepend_done = 0;
3694 
3695 			atomic_set(&instance->fw_outstanding, 0);
3696 			megasas_internal_reset_defer_cmds(instance);
3697 
3698 			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3699 					fw_state, atomic_read(&instance->adprecovery));
3700 
3701 			schedule_work(&instance->work_init);
3702 			return IRQ_HANDLED;
3703 
3704 		} else {
3705 			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3706 				fw_state, instance->disableOnlineCtrlReset);
3707 		}
3708 	}
3709 
3710 	tasklet_schedule(&instance->isr_tasklet);
3711 	return IRQ_HANDLED;
3712 }
3713 /**
3714  * megasas_isr - isr entry point
3715  */
3716 static irqreturn_t megasas_isr(int irq, void *devp)
3717 {
3718 	struct megasas_irq_context *irq_context = devp;
3719 	struct megasas_instance *instance = irq_context->instance;
3720 	unsigned long flags;
3721 	irqreturn_t rc;
3722 
3723 	if (atomic_read(&instance->fw_reset_no_pci_access))
3724 		return IRQ_HANDLED;
3725 
3726 	spin_lock_irqsave(&instance->hba_lock, flags);
3727 	rc = megasas_deplete_reply_queue(instance, DID_OK);
3728 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3729 
3730 	return rc;
3731 }
3732 
3733 /**
3734  * megasas_transition_to_ready -	Move the FW to READY state
3735  * @instance:				Adapter soft state
3736  *
3737  * During the initialization, FW passes can potentially be in any one of
3738  * several possible states. If the FW in operational, waiting-for-handshake
3739  * states, driver must take steps to bring it to ready state. Otherwise, it
3740  * has to wait for the ready state.
3741  */
3742 int
3743 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3744 {
3745 	int i;
3746 	u8 max_wait;
3747 	u32 fw_state;
3748 	u32 cur_state;
3749 	u32 abs_state, curr_abs_state;
3750 
3751 	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
3752 	fw_state = abs_state & MFI_STATE_MASK;
3753 
3754 	if (fw_state != MFI_STATE_READY)
3755 		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3756 		       " state\n");
3757 
3758 	while (fw_state != MFI_STATE_READY) {
3759 
3760 		switch (fw_state) {
3761 
3762 		case MFI_STATE_FAULT:
3763 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3764 			if (ocr) {
3765 				max_wait = MEGASAS_RESET_WAIT_TIME;
3766 				cur_state = MFI_STATE_FAULT;
3767 				break;
3768 			} else
3769 				return -ENODEV;
3770 
3771 		case MFI_STATE_WAIT_HANDSHAKE:
3772 			/*
3773 			 * Set the CLR bit in inbound doorbell
3774 			 */
3775 			if ((instance->pdev->device ==
3776 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3777 				(instance->pdev->device ==
3778 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3779 				(instance->adapter_type != MFI_SERIES))
3780 				writel(
3781 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3782 				  &instance->reg_set->doorbell);
3783 			else
3784 				writel(
3785 				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3786 					&instance->reg_set->inbound_doorbell);
3787 
3788 			max_wait = MEGASAS_RESET_WAIT_TIME;
3789 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
3790 			break;
3791 
3792 		case MFI_STATE_BOOT_MESSAGE_PENDING:
3793 			if ((instance->pdev->device ==
3794 			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3795 				(instance->pdev->device ==
3796 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3797 				(instance->adapter_type != MFI_SERIES))
3798 				writel(MFI_INIT_HOTPLUG,
3799 				       &instance->reg_set->doorbell);
3800 			else
3801 				writel(MFI_INIT_HOTPLUG,
3802 					&instance->reg_set->inbound_doorbell);
3803 
3804 			max_wait = MEGASAS_RESET_WAIT_TIME;
3805 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3806 			break;
3807 
3808 		case MFI_STATE_OPERATIONAL:
3809 			/*
3810 			 * Bring it to READY state; assuming max wait 10 secs
3811 			 */
3812 			instance->instancet->disable_intr(instance);
3813 			if ((instance->pdev->device ==
3814 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3815 				(instance->pdev->device ==
3816 				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
3817 				(instance->adapter_type != MFI_SERIES)) {
3818 				writel(MFI_RESET_FLAGS,
3819 					&instance->reg_set->doorbell);
3820 
3821 				if (instance->adapter_type != MFI_SERIES) {
3822 					for (i = 0; i < (10 * 1000); i += 20) {
3823 						if (readl(
3824 							    &instance->
3825 							    reg_set->
3826 							    doorbell) & 1)
3827 							msleep(20);
3828 						else
3829 							break;
3830 					}
3831 				}
3832 			} else
3833 				writel(MFI_RESET_FLAGS,
3834 					&instance->reg_set->inbound_doorbell);
3835 
3836 			max_wait = MEGASAS_RESET_WAIT_TIME;
3837 			cur_state = MFI_STATE_OPERATIONAL;
3838 			break;
3839 
3840 		case MFI_STATE_UNDEFINED:
3841 			/*
3842 			 * This state should not last for more than 2 seconds
3843 			 */
3844 			max_wait = MEGASAS_RESET_WAIT_TIME;
3845 			cur_state = MFI_STATE_UNDEFINED;
3846 			break;
3847 
3848 		case MFI_STATE_BB_INIT:
3849 			max_wait = MEGASAS_RESET_WAIT_TIME;
3850 			cur_state = MFI_STATE_BB_INIT;
3851 			break;
3852 
3853 		case MFI_STATE_FW_INIT:
3854 			max_wait = MEGASAS_RESET_WAIT_TIME;
3855 			cur_state = MFI_STATE_FW_INIT;
3856 			break;
3857 
3858 		case MFI_STATE_FW_INIT_2:
3859 			max_wait = MEGASAS_RESET_WAIT_TIME;
3860 			cur_state = MFI_STATE_FW_INIT_2;
3861 			break;
3862 
3863 		case MFI_STATE_DEVICE_SCAN:
3864 			max_wait = MEGASAS_RESET_WAIT_TIME;
3865 			cur_state = MFI_STATE_DEVICE_SCAN;
3866 			break;
3867 
3868 		case MFI_STATE_FLUSH_CACHE:
3869 			max_wait = MEGASAS_RESET_WAIT_TIME;
3870 			cur_state = MFI_STATE_FLUSH_CACHE;
3871 			break;
3872 
3873 		default:
3874 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3875 			       fw_state);
3876 			return -ENODEV;
3877 		}
3878 
3879 		/*
3880 		 * The cur_state should not last for more than max_wait secs
3881 		 */
3882 		for (i = 0; i < (max_wait * 1000); i++) {
3883 			curr_abs_state = instance->instancet->
3884 				read_fw_status_reg(instance->reg_set);
3885 
3886 			if (abs_state == curr_abs_state) {
3887 				msleep(1);
3888 			} else
3889 				break;
3890 		}
3891 
3892 		/*
3893 		 * Return error if fw_state hasn't changed after max_wait
3894 		 */
3895 		if (curr_abs_state == abs_state) {
3896 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3897 			       "in %d secs\n", fw_state, max_wait);
3898 			return -ENODEV;
3899 		}
3900 
3901 		abs_state = curr_abs_state;
3902 		fw_state = curr_abs_state & MFI_STATE_MASK;
3903 	}
3904 	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3905 
3906 	return 0;
3907 }
3908 
3909 /**
3910  * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
3911  * @instance:				Adapter soft state
3912  */
3913 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3914 {
3915 	int i;
3916 	u16 max_cmd = instance->max_mfi_cmds;
3917 	struct megasas_cmd *cmd;
3918 
3919 	if (!instance->frame_dma_pool)
3920 		return;
3921 
3922 	/*
3923 	 * Return all frames to pool
3924 	 */
3925 	for (i = 0; i < max_cmd; i++) {
3926 
3927 		cmd = instance->cmd_list[i];
3928 
3929 		if (cmd->frame)
3930 			dma_pool_free(instance->frame_dma_pool, cmd->frame,
3931 				      cmd->frame_phys_addr);
3932 
3933 		if (cmd->sense)
3934 			dma_pool_free(instance->sense_dma_pool, cmd->sense,
3935 				      cmd->sense_phys_addr);
3936 	}
3937 
3938 	/*
3939 	 * Now destroy the pool itself
3940 	 */
3941 	dma_pool_destroy(instance->frame_dma_pool);
3942 	dma_pool_destroy(instance->sense_dma_pool);
3943 
3944 	instance->frame_dma_pool = NULL;
3945 	instance->sense_dma_pool = NULL;
3946 }
3947 
3948 /**
3949  * megasas_create_frame_pool -	Creates DMA pool for cmd frames
3950  * @instance:			Adapter soft state
3951  *
3952  * Each command packet has an embedded DMA memory buffer that is used for
3953  * filling MFI frame and the SG list that immediately follows the frame. This
3954  * function creates those DMA memory buffers for each command packet by using
3955  * PCI pool facility.
3956  */
3957 static int megasas_create_frame_pool(struct megasas_instance *instance)
3958 {
3959 	int i;
3960 	u16 max_cmd;
3961 	u32 sge_sz;
3962 	u32 frame_count;
3963 	struct megasas_cmd *cmd;
3964 
3965 	max_cmd = instance->max_mfi_cmds;
3966 
3967 	/*
3968 	 * Size of our frame is 64 bytes for MFI frame, followed by max SG
3969 	 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
3970 	 */
3971 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
3972 	    sizeof(struct megasas_sge32);
3973 
3974 	if (instance->flag_ieee)
3975 		sge_sz = sizeof(struct megasas_sge_skinny);
3976 
3977 	/*
3978 	 * For MFI controllers.
3979 	 * max_num_sge = 60
3980 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
3981 	 * Total 960 byte (15 MFI frame of 64 byte)
3982 	 *
3983 	 * Fusion adapter require only 3 extra frame.
3984 	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
3985 	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
3986 	 * Total 192 byte (3 MFI frame of 64 byte)
3987 	 */
3988 	frame_count = (instance->adapter_type == MFI_SERIES) ?
3989 			(15 + 1) : (3 + 1);
3990 	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
3991 	/*
3992 	 * Use DMA pool facility provided by PCI layer
3993 	 */
3994 	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
3995 					&instance->pdev->dev,
3996 					instance->mfi_frame_size, 256, 0);
3997 
3998 	if (!instance->frame_dma_pool) {
3999 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4000 		return -ENOMEM;
4001 	}
4002 
4003 	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4004 						   &instance->pdev->dev, 128,
4005 						   4, 0);
4006 
4007 	if (!instance->sense_dma_pool) {
4008 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4009 
4010 		dma_pool_destroy(instance->frame_dma_pool);
4011 		instance->frame_dma_pool = NULL;
4012 
4013 		return -ENOMEM;
4014 	}
4015 
4016 	/*
4017 	 * Allocate and attach a frame to each of the commands in cmd_list.
4018 	 * By making cmd->index as the context instead of the &cmd, we can
4019 	 * always use 32bit context regardless of the architecture
4020 	 */
4021 	for (i = 0; i < max_cmd; i++) {
4022 
4023 		cmd = instance->cmd_list[i];
4024 
4025 		cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4026 					    GFP_KERNEL, &cmd->frame_phys_addr);
4027 
4028 		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4029 					    GFP_KERNEL, &cmd->sense_phys_addr);
4030 
4031 		/*
4032 		 * megasas_teardown_frame_pool() takes care of freeing
4033 		 * whatever has been allocated
4034 		 */
4035 		if (!cmd->frame || !cmd->sense) {
4036 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4037 			megasas_teardown_frame_pool(instance);
4038 			return -ENOMEM;
4039 		}
4040 
4041 		cmd->frame->io.context = cpu_to_le32(cmd->index);
4042 		cmd->frame->io.pad_0 = 0;
4043 		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4044 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4045 	}
4046 
4047 	return 0;
4048 }
4049 
4050 /**
4051  * megasas_free_cmds -	Free all the cmds in the free cmd pool
4052  * @instance:		Adapter soft state
4053  */
4054 void megasas_free_cmds(struct megasas_instance *instance)
4055 {
4056 	int i;
4057 
4058 	/* First free the MFI frame pool */
4059 	megasas_teardown_frame_pool(instance);
4060 
4061 	/* Free all the commands in the cmd_list */
4062 	for (i = 0; i < instance->max_mfi_cmds; i++)
4063 
4064 		kfree(instance->cmd_list[i]);
4065 
4066 	/* Free the cmd_list buffer itself */
4067 	kfree(instance->cmd_list);
4068 	instance->cmd_list = NULL;
4069 
4070 	INIT_LIST_HEAD(&instance->cmd_pool);
4071 }
4072 
4073 /**
4074  * megasas_alloc_cmds -	Allocates the command packets
4075  * @instance:		Adapter soft state
4076  *
4077  * Each command that is issued to the FW, whether IO commands from the OS or
4078  * internal commands like IOCTLs, are wrapped in local data structure called
4079  * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4080  * the FW.
4081  *
4082  * Each frame has a 32-bit field called context (tag). This context is used
4083  * to get back the megasas_cmd from the frame when a frame gets completed in
4084  * the ISR. Typically the address of the megasas_cmd itself would be used as
4085  * the context. But we wanted to keep the differences between 32 and 64 bit
4086  * systems to the mininum. We always use 32 bit integers for the context. In
4087  * this driver, the 32 bit values are the indices into an array cmd_list.
4088  * This array is used only to look up the megasas_cmd given the context. The
4089  * free commands themselves are maintained in a linked list called cmd_pool.
4090  */
4091 int megasas_alloc_cmds(struct megasas_instance *instance)
4092 {
4093 	int i;
4094 	int j;
4095 	u16 max_cmd;
4096 	struct megasas_cmd *cmd;
4097 
4098 	max_cmd = instance->max_mfi_cmds;
4099 
4100 	/*
4101 	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4102 	 * Allocate the dynamic array first and then allocate individual
4103 	 * commands.
4104 	 */
4105 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4106 
4107 	if (!instance->cmd_list) {
4108 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4109 		return -ENOMEM;
4110 	}
4111 
4112 	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4113 
4114 	for (i = 0; i < max_cmd; i++) {
4115 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4116 						GFP_KERNEL);
4117 
4118 		if (!instance->cmd_list[i]) {
4119 
4120 			for (j = 0; j < i; j++)
4121 				kfree(instance->cmd_list[j]);
4122 
4123 			kfree(instance->cmd_list);
4124 			instance->cmd_list = NULL;
4125 
4126 			return -ENOMEM;
4127 		}
4128 	}
4129 
4130 	for (i = 0; i < max_cmd; i++) {
4131 		cmd = instance->cmd_list[i];
4132 		memset(cmd, 0, sizeof(struct megasas_cmd));
4133 		cmd->index = i;
4134 		cmd->scmd = NULL;
4135 		cmd->instance = instance;
4136 
4137 		list_add_tail(&cmd->list, &instance->cmd_pool);
4138 	}
4139 
4140 	/*
4141 	 * Create a frame pool and assign one frame to each cmd
4142 	 */
4143 	if (megasas_create_frame_pool(instance)) {
4144 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4145 		megasas_free_cmds(instance);
4146 	}
4147 
4148 	return 0;
4149 }
4150 
4151 /*
4152  * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4153  * @instance:				Adapter soft state
4154  *
4155  * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4156  * or FW is not under OCR.
4157  */
4158 inline int
4159 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4160 
4161 	if (instance->adapter_type == MFI_SERIES)
4162 		return KILL_ADAPTER;
4163 	else if (instance->unload ||
4164 			test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
4165 		return IGNORE_TIMEOUT;
4166 	else
4167 		return INITIATE_OCR;
4168 }
4169 
4170 static void
4171 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4172 {
4173 	int ret;
4174 	struct megasas_cmd *cmd;
4175 	struct megasas_dcmd_frame *dcmd;
4176 
4177 	struct MR_PRIV_DEVICE *mr_device_priv_data;
4178 	u16 device_id = 0;
4179 
4180 	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4181 	cmd = megasas_get_cmd(instance);
4182 
4183 	if (!cmd) {
4184 		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4185 		return;
4186 	}
4187 
4188 	dcmd = &cmd->frame->dcmd;
4189 
4190 	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4191 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4192 
4193 	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4194 	dcmd->cmd = MFI_CMD_DCMD;
4195 	dcmd->cmd_status = 0xFF;
4196 	dcmd->sge_count = 1;
4197 	dcmd->flags = MFI_FRAME_DIR_READ;
4198 	dcmd->timeout = 0;
4199 	dcmd->pad_0 = 0;
4200 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4201 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4202 
4203 	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4204 				 sizeof(struct MR_PD_INFO));
4205 
4206 	if ((instance->adapter_type != MFI_SERIES) &&
4207 	    !instance->mask_interrupts)
4208 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4209 	else
4210 		ret = megasas_issue_polled(instance, cmd);
4211 
4212 	switch (ret) {
4213 	case DCMD_SUCCESS:
4214 		mr_device_priv_data = sdev->hostdata;
4215 		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4216 		mr_device_priv_data->interface_type =
4217 				instance->pd_info->state.ddf.pdType.intf;
4218 		break;
4219 
4220 	case DCMD_TIMEOUT:
4221 
4222 		switch (dcmd_timeout_ocr_possible(instance)) {
4223 		case INITIATE_OCR:
4224 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4225 			megasas_reset_fusion(instance->host,
4226 				MFI_IO_TIMEOUT_OCR);
4227 			break;
4228 		case KILL_ADAPTER:
4229 			megaraid_sas_kill_hba(instance);
4230 			break;
4231 		case IGNORE_TIMEOUT:
4232 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4233 				__func__, __LINE__);
4234 			break;
4235 		}
4236 
4237 		break;
4238 	}
4239 
4240 	if (ret != DCMD_TIMEOUT)
4241 		megasas_return_cmd(instance, cmd);
4242 
4243 	return;
4244 }
4245 /*
4246  * megasas_get_pd_list_info -	Returns FW's pd_list structure
4247  * @instance:				Adapter soft state
4248  * @pd_list:				pd_list structure
4249  *
4250  * Issues an internal command (DCMD) to get the FW's controller PD
4251  * list structure.  This information is mainly used to find out SYSTEM
4252  * supported by the FW.
4253  */
4254 static int
4255 megasas_get_pd_list(struct megasas_instance *instance)
4256 {
4257 	int ret = 0, pd_index = 0;
4258 	struct megasas_cmd *cmd;
4259 	struct megasas_dcmd_frame *dcmd;
4260 	struct MR_PD_LIST *ci;
4261 	struct MR_PD_ADDRESS *pd_addr;
4262 	dma_addr_t ci_h = 0;
4263 
4264 	if (instance->pd_list_not_supported) {
4265 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4266 		"not supported by firmware\n");
4267 		return ret;
4268 	}
4269 
4270 	ci = instance->pd_list_buf;
4271 	ci_h = instance->pd_list_buf_h;
4272 
4273 	cmd = megasas_get_cmd(instance);
4274 
4275 	if (!cmd) {
4276 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4277 		return -ENOMEM;
4278 	}
4279 
4280 	dcmd = &cmd->frame->dcmd;
4281 
4282 	memset(ci, 0, sizeof(*ci));
4283 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4284 
4285 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4286 	dcmd->mbox.b[1] = 0;
4287 	dcmd->cmd = MFI_CMD_DCMD;
4288 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4289 	dcmd->sge_count = 1;
4290 	dcmd->flags = MFI_FRAME_DIR_READ;
4291 	dcmd->timeout = 0;
4292 	dcmd->pad_0 = 0;
4293 	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4294 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4295 
4296 	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4297 				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4298 
4299 	if ((instance->adapter_type != MFI_SERIES) &&
4300 	    !instance->mask_interrupts)
4301 		ret = megasas_issue_blocked_cmd(instance, cmd,
4302 			MFI_IO_TIMEOUT_SECS);
4303 	else
4304 		ret = megasas_issue_polled(instance, cmd);
4305 
4306 	switch (ret) {
4307 	case DCMD_FAILED:
4308 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4309 			"failed/not supported by firmware\n");
4310 
4311 		if (instance->adapter_type != MFI_SERIES)
4312 			megaraid_sas_kill_hba(instance);
4313 		else
4314 			instance->pd_list_not_supported = 1;
4315 		break;
4316 	case DCMD_TIMEOUT:
4317 
4318 		switch (dcmd_timeout_ocr_possible(instance)) {
4319 		case INITIATE_OCR:
4320 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4321 			/*
4322 			 * DCMD failed from AEN path.
4323 			 * AEN path already hold reset_mutex to avoid PCI access
4324 			 * while OCR is in progress.
4325 			 */
4326 			mutex_unlock(&instance->reset_mutex);
4327 			megasas_reset_fusion(instance->host,
4328 						MFI_IO_TIMEOUT_OCR);
4329 			mutex_lock(&instance->reset_mutex);
4330 			break;
4331 		case KILL_ADAPTER:
4332 			megaraid_sas_kill_hba(instance);
4333 			break;
4334 		case IGNORE_TIMEOUT:
4335 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4336 				__func__, __LINE__);
4337 			break;
4338 		}
4339 
4340 		break;
4341 
4342 	case DCMD_SUCCESS:
4343 		pd_addr = ci->addr;
4344 
4345 		if ((le32_to_cpu(ci->count) >
4346 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4347 			break;
4348 
4349 		memset(instance->local_pd_list, 0,
4350 				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4351 
4352 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4353 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4354 					le16_to_cpu(pd_addr->deviceId);
4355 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4356 					pd_addr->scsiDevType;
4357 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4358 					MR_PD_STATE_SYSTEM;
4359 			pd_addr++;
4360 		}
4361 
4362 		memcpy(instance->pd_list, instance->local_pd_list,
4363 			sizeof(instance->pd_list));
4364 		break;
4365 
4366 	}
4367 
4368 	if (ret != DCMD_TIMEOUT)
4369 		megasas_return_cmd(instance, cmd);
4370 
4371 	return ret;
4372 }
4373 
4374 /*
4375  * megasas_get_ld_list_info -	Returns FW's ld_list structure
4376  * @instance:				Adapter soft state
4377  * @ld_list:				ld_list structure
4378  *
4379  * Issues an internal command (DCMD) to get the FW's controller PD
4380  * list structure.  This information is mainly used to find out SYSTEM
4381  * supported by the FW.
4382  */
4383 static int
4384 megasas_get_ld_list(struct megasas_instance *instance)
4385 {
4386 	int ret = 0, ld_index = 0, ids = 0;
4387 	struct megasas_cmd *cmd;
4388 	struct megasas_dcmd_frame *dcmd;
4389 	struct MR_LD_LIST *ci;
4390 	dma_addr_t ci_h = 0;
4391 	u32 ld_count;
4392 
4393 	ci = instance->ld_list_buf;
4394 	ci_h = instance->ld_list_buf_h;
4395 
4396 	cmd = megasas_get_cmd(instance);
4397 
4398 	if (!cmd) {
4399 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4400 		return -ENOMEM;
4401 	}
4402 
4403 	dcmd = &cmd->frame->dcmd;
4404 
4405 	memset(ci, 0, sizeof(*ci));
4406 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4407 
4408 	if (instance->supportmax256vd)
4409 		dcmd->mbox.b[0] = 1;
4410 	dcmd->cmd = MFI_CMD_DCMD;
4411 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4412 	dcmd->sge_count = 1;
4413 	dcmd->flags = MFI_FRAME_DIR_READ;
4414 	dcmd->timeout = 0;
4415 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4416 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4417 	dcmd->pad_0  = 0;
4418 
4419 	megasas_set_dma_settings(instance, dcmd, ci_h,
4420 				 sizeof(struct MR_LD_LIST));
4421 
4422 	if ((instance->adapter_type != MFI_SERIES) &&
4423 	    !instance->mask_interrupts)
4424 		ret = megasas_issue_blocked_cmd(instance, cmd,
4425 			MFI_IO_TIMEOUT_SECS);
4426 	else
4427 		ret = megasas_issue_polled(instance, cmd);
4428 
4429 	ld_count = le32_to_cpu(ci->ldCount);
4430 
4431 	switch (ret) {
4432 	case DCMD_FAILED:
4433 		megaraid_sas_kill_hba(instance);
4434 		break;
4435 	case DCMD_TIMEOUT:
4436 
4437 		switch (dcmd_timeout_ocr_possible(instance)) {
4438 		case INITIATE_OCR:
4439 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4440 			/*
4441 			 * DCMD failed from AEN path.
4442 			 * AEN path already hold reset_mutex to avoid PCI access
4443 			 * while OCR is in progress.
4444 			 */
4445 			mutex_unlock(&instance->reset_mutex);
4446 			megasas_reset_fusion(instance->host,
4447 						MFI_IO_TIMEOUT_OCR);
4448 			mutex_lock(&instance->reset_mutex);
4449 			break;
4450 		case KILL_ADAPTER:
4451 			megaraid_sas_kill_hba(instance);
4452 			break;
4453 		case IGNORE_TIMEOUT:
4454 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4455 				__func__, __LINE__);
4456 			break;
4457 		}
4458 
4459 		break;
4460 
4461 	case DCMD_SUCCESS:
4462 		if (ld_count > instance->fw_supported_vd_count)
4463 			break;
4464 
4465 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4466 
4467 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4468 			if (ci->ldList[ld_index].state != 0) {
4469 				ids = ci->ldList[ld_index].ref.targetId;
4470 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4471 			}
4472 		}
4473 
4474 		break;
4475 	}
4476 
4477 	if (ret != DCMD_TIMEOUT)
4478 		megasas_return_cmd(instance, cmd);
4479 
4480 	return ret;
4481 }
4482 
4483 /**
4484  * megasas_ld_list_query -	Returns FW's ld_list structure
4485  * @instance:				Adapter soft state
4486  * @ld_list:				ld_list structure
4487  *
4488  * Issues an internal command (DCMD) to get the FW's controller PD
4489  * list structure.  This information is mainly used to find out SYSTEM
4490  * supported by the FW.
4491  */
4492 static int
4493 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4494 {
4495 	int ret = 0, ld_index = 0, ids = 0;
4496 	struct megasas_cmd *cmd;
4497 	struct megasas_dcmd_frame *dcmd;
4498 	struct MR_LD_TARGETID_LIST *ci;
4499 	dma_addr_t ci_h = 0;
4500 	u32 tgtid_count;
4501 
4502 	ci = instance->ld_targetid_list_buf;
4503 	ci_h = instance->ld_targetid_list_buf_h;
4504 
4505 	cmd = megasas_get_cmd(instance);
4506 
4507 	if (!cmd) {
4508 		dev_warn(&instance->pdev->dev,
4509 		         "megasas_ld_list_query: Failed to get cmd\n");
4510 		return -ENOMEM;
4511 	}
4512 
4513 	dcmd = &cmd->frame->dcmd;
4514 
4515 	memset(ci, 0, sizeof(*ci));
4516 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4517 
4518 	dcmd->mbox.b[0] = query_type;
4519 	if (instance->supportmax256vd)
4520 		dcmd->mbox.b[2] = 1;
4521 
4522 	dcmd->cmd = MFI_CMD_DCMD;
4523 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4524 	dcmd->sge_count = 1;
4525 	dcmd->flags = MFI_FRAME_DIR_READ;
4526 	dcmd->timeout = 0;
4527 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4528 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4529 	dcmd->pad_0  = 0;
4530 
4531 	megasas_set_dma_settings(instance, dcmd, ci_h,
4532 				 sizeof(struct MR_LD_TARGETID_LIST));
4533 
4534 	if ((instance->adapter_type != MFI_SERIES) &&
4535 	    !instance->mask_interrupts)
4536 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4537 	else
4538 		ret = megasas_issue_polled(instance, cmd);
4539 
4540 	switch (ret) {
4541 	case DCMD_FAILED:
4542 		dev_info(&instance->pdev->dev,
4543 			"DCMD not supported by firmware - %s %d\n",
4544 				__func__, __LINE__);
4545 		ret = megasas_get_ld_list(instance);
4546 		break;
4547 	case DCMD_TIMEOUT:
4548 		switch (dcmd_timeout_ocr_possible(instance)) {
4549 		case INITIATE_OCR:
4550 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4551 			/*
4552 			 * DCMD failed from AEN path.
4553 			 * AEN path already hold reset_mutex to avoid PCI access
4554 			 * while OCR is in progress.
4555 			 */
4556 			mutex_unlock(&instance->reset_mutex);
4557 			megasas_reset_fusion(instance->host,
4558 						MFI_IO_TIMEOUT_OCR);
4559 			mutex_lock(&instance->reset_mutex);
4560 			break;
4561 		case KILL_ADAPTER:
4562 			megaraid_sas_kill_hba(instance);
4563 			break;
4564 		case IGNORE_TIMEOUT:
4565 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4566 				__func__, __LINE__);
4567 			break;
4568 		}
4569 
4570 		break;
4571 	case DCMD_SUCCESS:
4572 		tgtid_count = le32_to_cpu(ci->count);
4573 
4574 		if ((tgtid_count > (instance->fw_supported_vd_count)))
4575 			break;
4576 
4577 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4578 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4579 			ids = ci->targetId[ld_index];
4580 			instance->ld_ids[ids] = ci->targetId[ld_index];
4581 		}
4582 
4583 		break;
4584 	}
4585 
4586 	if (ret != DCMD_TIMEOUT)
4587 		megasas_return_cmd(instance, cmd);
4588 
4589 	return ret;
4590 }
4591 
4592 /*
4593  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4594  * instance			 : Controller's instance
4595 */
4596 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4597 {
4598 	struct fusion_context *fusion;
4599 	u32 ventura_map_sz = 0;
4600 
4601 	fusion = instance->ctrl_context;
4602 	/* For MFI based controllers return dummy success */
4603 	if (!fusion)
4604 		return;
4605 
4606 	instance->supportmax256vd =
4607 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
4608 	/* Below is additional check to address future FW enhancement */
4609 	if (instance->ctrl_info_buf->max_lds > 64)
4610 		instance->supportmax256vd = 1;
4611 
4612 	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4613 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4614 	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4615 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4616 	if (instance->supportmax256vd) {
4617 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4618 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4619 	} else {
4620 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4621 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4622 	}
4623 
4624 	dev_info(&instance->pdev->dev,
4625 		"firmware type\t: %s\n",
4626 		instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4627 		"Legacy(64 VD) firmware");
4628 
4629 	if (instance->max_raid_mapsize) {
4630 		ventura_map_sz = instance->max_raid_mapsize *
4631 						MR_MIN_MAP_SIZE; /* 64k */
4632 		fusion->current_map_sz = ventura_map_sz;
4633 		fusion->max_map_sz = ventura_map_sz;
4634 	} else {
4635 		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
4636 					(sizeof(struct MR_LD_SPAN_MAP) *
4637 					(instance->fw_supported_vd_count - 1));
4638 		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
4639 
4640 		fusion->max_map_sz =
4641 			max(fusion->old_map_sz, fusion->new_map_sz);
4642 
4643 		if (instance->supportmax256vd)
4644 			fusion->current_map_sz = fusion->new_map_sz;
4645 		else
4646 			fusion->current_map_sz = fusion->old_map_sz;
4647 	}
4648 	/* irrespective of FW raid maps, driver raid map is constant */
4649 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4650 }
4651 
4652 /**
4653  * megasas_get_controller_info -	Returns FW's controller structure
4654  * @instance:				Adapter soft state
4655  *
4656  * Issues an internal command (DCMD) to get the FW's controller structure.
4657  * This information is mainly used to find out the maximum IO transfer per
4658  * command supported by the FW.
4659  */
4660 int
4661 megasas_get_ctrl_info(struct megasas_instance *instance)
4662 {
4663 	int ret = 0;
4664 	struct megasas_cmd *cmd;
4665 	struct megasas_dcmd_frame *dcmd;
4666 	struct megasas_ctrl_info *ci;
4667 	dma_addr_t ci_h = 0;
4668 
4669 	ci = instance->ctrl_info_buf;
4670 	ci_h = instance->ctrl_info_buf_h;
4671 
4672 	cmd = megasas_get_cmd(instance);
4673 
4674 	if (!cmd) {
4675 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4676 		return -ENOMEM;
4677 	}
4678 
4679 	dcmd = &cmd->frame->dcmd;
4680 
4681 	memset(ci, 0, sizeof(*ci));
4682 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4683 
4684 	dcmd->cmd = MFI_CMD_DCMD;
4685 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4686 	dcmd->sge_count = 1;
4687 	dcmd->flags = MFI_FRAME_DIR_READ;
4688 	dcmd->timeout = 0;
4689 	dcmd->pad_0 = 0;
4690 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4691 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4692 	dcmd->mbox.b[0] = 1;
4693 
4694 	megasas_set_dma_settings(instance, dcmd, ci_h,
4695 				 sizeof(struct megasas_ctrl_info));
4696 
4697 	if ((instance->adapter_type != MFI_SERIES) &&
4698 	    !instance->mask_interrupts) {
4699 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4700 	} else {
4701 		ret = megasas_issue_polled(instance, cmd);
4702 		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4703 	}
4704 
4705 	switch (ret) {
4706 	case DCMD_SUCCESS:
4707 		/* Save required controller information in
4708 		 * CPU endianness format.
4709 		 */
4710 		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
4711 		le32_to_cpus((u32 *)&ci->adapterOperations2);
4712 		le32_to_cpus((u32 *)&ci->adapterOperations3);
4713 		le16_to_cpus((u16 *)&ci->adapter_operations4);
4714 
4715 		/* Update the latest Ext VD info.
4716 		 * From Init path, store current firmware details.
4717 		 * From OCR path, detect any firmware properties changes.
4718 		 * in case of Firmware upgrade without system reboot.
4719 		 */
4720 		megasas_update_ext_vd_details(instance);
4721 		instance->use_seqnum_jbod_fp =
4722 			ci->adapterOperations3.useSeqNumJbodFP;
4723 		instance->support_morethan256jbod =
4724 			ci->adapter_operations4.support_pd_map_target_id;
4725 		instance->support_nvme_passthru =
4726 			ci->adapter_operations4.support_nvme_passthru;
4727 
4728 		/*Check whether controller is iMR or MR */
4729 		instance->is_imr = (ci->memory_size ? 0 : 1);
4730 		dev_info(&instance->pdev->dev,
4731 			"controller type\t: %s(%dMB)\n",
4732 			instance->is_imr ? "iMR" : "MR",
4733 			le16_to_cpu(ci->memory_size));
4734 
4735 		instance->disableOnlineCtrlReset =
4736 			ci->properties.OnOffProperties.disableOnlineCtrlReset;
4737 		instance->secure_jbod_support =
4738 			ci->adapterOperations3.supportSecurityonJBOD;
4739 		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4740 			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4741 		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4742 			instance->secure_jbod_support ? "Yes" : "No");
4743 		dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
4744 			 instance->support_nvme_passthru ? "Yes" : "No");
4745 		break;
4746 
4747 	case DCMD_TIMEOUT:
4748 		switch (dcmd_timeout_ocr_possible(instance)) {
4749 		case INITIATE_OCR:
4750 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4751 			megasas_reset_fusion(instance->host,
4752 				MFI_IO_TIMEOUT_OCR);
4753 			break;
4754 		case KILL_ADAPTER:
4755 			megaraid_sas_kill_hba(instance);
4756 			break;
4757 		case IGNORE_TIMEOUT:
4758 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4759 				__func__, __LINE__);
4760 			break;
4761 		}
4762 	case DCMD_FAILED:
4763 		megaraid_sas_kill_hba(instance);
4764 		break;
4765 
4766 	}
4767 
4768 	megasas_return_cmd(instance, cmd);
4769 
4770 
4771 	return ret;
4772 }
4773 
4774 /*
4775  * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
4776  *					to firmware
4777  *
4778  * @instance:				Adapter soft state
4779  * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
4780 					MR_CRASH_BUF_TURN_OFF = 0
4781 					MR_CRASH_BUF_TURN_ON = 1
4782  * @return 0 on success non-zero on failure.
4783  * Issues an internal command (DCMD) to set parameters for crash dump feature.
4784  * Driver will send address of crash dump DMA buffer and set mbox to tell FW
4785  * that driver supports crash dump feature. This DCMD will be sent only if
4786  * crash dump feature is supported by the FW.
4787  *
4788  */
4789 int megasas_set_crash_dump_params(struct megasas_instance *instance,
4790 	u8 crash_buf_state)
4791 {
4792 	int ret = 0;
4793 	struct megasas_cmd *cmd;
4794 	struct megasas_dcmd_frame *dcmd;
4795 
4796 	cmd = megasas_get_cmd(instance);
4797 
4798 	if (!cmd) {
4799 		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
4800 		return -ENOMEM;
4801 	}
4802 
4803 
4804 	dcmd = &cmd->frame->dcmd;
4805 
4806 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4807 	dcmd->mbox.b[0] = crash_buf_state;
4808 	dcmd->cmd = MFI_CMD_DCMD;
4809 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4810 	dcmd->sge_count = 1;
4811 	dcmd->flags = MFI_FRAME_DIR_NONE;
4812 	dcmd->timeout = 0;
4813 	dcmd->pad_0 = 0;
4814 	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4815 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4816 
4817 	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
4818 				 CRASH_DMA_BUF_SIZE);
4819 
4820 	if ((instance->adapter_type != MFI_SERIES) &&
4821 	    !instance->mask_interrupts)
4822 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4823 	else
4824 		ret = megasas_issue_polled(instance, cmd);
4825 
4826 	if (ret == DCMD_TIMEOUT) {
4827 		switch (dcmd_timeout_ocr_possible(instance)) {
4828 		case INITIATE_OCR:
4829 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4830 			megasas_reset_fusion(instance->host,
4831 					MFI_IO_TIMEOUT_OCR);
4832 			break;
4833 		case KILL_ADAPTER:
4834 			megaraid_sas_kill_hba(instance);
4835 			break;
4836 		case IGNORE_TIMEOUT:
4837 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4838 				__func__, __LINE__);
4839 			break;
4840 		}
4841 	} else
4842 		megasas_return_cmd(instance, cmd);
4843 
4844 	return ret;
4845 }
4846 
4847 /**
4848  * megasas_issue_init_mfi -	Initializes the FW
4849  * @instance:		Adapter soft state
4850  *
4851  * Issues the INIT MFI cmd
4852  */
4853 static int
4854 megasas_issue_init_mfi(struct megasas_instance *instance)
4855 {
4856 	__le32 context;
4857 	struct megasas_cmd *cmd;
4858 	struct megasas_init_frame *init_frame;
4859 	struct megasas_init_queue_info *initq_info;
4860 	dma_addr_t init_frame_h;
4861 	dma_addr_t initq_info_h;
4862 
4863 	/*
4864 	 * Prepare a init frame. Note the init frame points to queue info
4865 	 * structure. Each frame has SGL allocated after first 64 bytes. For
4866 	 * this frame - since we don't need any SGL - we use SGL's space as
4867 	 * queue info structure
4868 	 *
4869 	 * We will not get a NULL command below. We just created the pool.
4870 	 */
4871 	cmd = megasas_get_cmd(instance);
4872 
4873 	init_frame = (struct megasas_init_frame *)cmd->frame;
4874 	initq_info = (struct megasas_init_queue_info *)
4875 		((unsigned long)init_frame + 64);
4876 
4877 	init_frame_h = cmd->frame_phys_addr;
4878 	initq_info_h = init_frame_h + 64;
4879 
4880 	context = init_frame->context;
4881 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
4882 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
4883 	init_frame->context = context;
4884 
4885 	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
4886 	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
4887 
4888 	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
4889 	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
4890 
4891 	init_frame->cmd = MFI_CMD_INIT;
4892 	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
4893 	init_frame->queue_info_new_phys_addr_lo =
4894 		cpu_to_le32(lower_32_bits(initq_info_h));
4895 	init_frame->queue_info_new_phys_addr_hi =
4896 		cpu_to_le32(upper_32_bits(initq_info_h));
4897 
4898 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
4899 
4900 	/*
4901 	 * disable the intr before firing the init frame to FW
4902 	 */
4903 	instance->instancet->disable_intr(instance);
4904 
4905 	/*
4906 	 * Issue the init frame in polled mode
4907 	 */
4908 
4909 	if (megasas_issue_polled(instance, cmd)) {
4910 		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
4911 		megasas_return_cmd(instance, cmd);
4912 		goto fail_fw_init;
4913 	}
4914 
4915 	megasas_return_cmd(instance, cmd);
4916 
4917 	return 0;
4918 
4919 fail_fw_init:
4920 	return -EINVAL;
4921 }
4922 
4923 static u32
4924 megasas_init_adapter_mfi(struct megasas_instance *instance)
4925 {
4926 	struct megasas_register_set __iomem *reg_set;
4927 	u32 context_sz;
4928 	u32 reply_q_sz;
4929 
4930 	reg_set = instance->reg_set;
4931 
4932 	/*
4933 	 * Get various operational parameters from status register
4934 	 */
4935 	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
4936 	/*
4937 	 * Reduce the max supported cmds by 1. This is to ensure that the
4938 	 * reply_q_sz (1 more than the max cmd that driver may send)
4939 	 * does not exceed max cmds that the FW can support
4940 	 */
4941 	instance->max_fw_cmds = instance->max_fw_cmds-1;
4942 	instance->max_mfi_cmds = instance->max_fw_cmds;
4943 	instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
4944 					0x10;
4945 	/*
4946 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
4947 	 * are reserved for IOCTL + driver's internal DCMDs.
4948 	 */
4949 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4950 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4951 		instance->max_scsi_cmds = (instance->max_fw_cmds -
4952 			MEGASAS_SKINNY_INT_CMDS);
4953 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4954 	} else {
4955 		instance->max_scsi_cmds = (instance->max_fw_cmds -
4956 			MEGASAS_INT_CMDS);
4957 		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
4958 	}
4959 
4960 	instance->cur_can_queue = instance->max_scsi_cmds;
4961 	/*
4962 	 * Create a pool of commands
4963 	 */
4964 	if (megasas_alloc_cmds(instance))
4965 		goto fail_alloc_cmds;
4966 
4967 	/*
4968 	 * Allocate memory for reply queue. Length of reply queue should
4969 	 * be _one_ more than the maximum commands handled by the firmware.
4970 	 *
4971 	 * Note: When FW completes commands, it places corresponding contex
4972 	 * values in this circular reply queue. This circular queue is a fairly
4973 	 * typical producer-consumer queue. FW is the producer (of completed
4974 	 * commands) and the driver is the consumer.
4975 	 */
4976 	context_sz = sizeof(u32);
4977 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
4978 
4979 	instance->reply_queue = pci_alloc_consistent(instance->pdev,
4980 						     reply_q_sz,
4981 						     &instance->reply_queue_h);
4982 
4983 	if (!instance->reply_queue) {
4984 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
4985 		goto fail_reply_queue;
4986 	}
4987 
4988 	if (megasas_issue_init_mfi(instance))
4989 		goto fail_fw_init;
4990 
4991 	if (megasas_get_ctrl_info(instance)) {
4992 		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4993 			"Fail from %s %d\n", instance->unique_id,
4994 			__func__, __LINE__);
4995 		goto fail_fw_init;
4996 	}
4997 
4998 	instance->fw_support_ieee = 0;
4999 	instance->fw_support_ieee =
5000 		(instance->instancet->read_fw_status_reg(reg_set) &
5001 		0x04000000);
5002 
5003 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5004 			instance->fw_support_ieee);
5005 
5006 	if (instance->fw_support_ieee)
5007 		instance->flag_ieee = 1;
5008 
5009 	return 0;
5010 
5011 fail_fw_init:
5012 
5013 	pci_free_consistent(instance->pdev, reply_q_sz,
5014 			    instance->reply_queue, instance->reply_queue_h);
5015 fail_reply_queue:
5016 	megasas_free_cmds(instance);
5017 
5018 fail_alloc_cmds:
5019 	return 1;
5020 }
5021 
5022 /*
5023  * megasas_setup_irqs_ioapic -		register legacy interrupts.
5024  * @instance:				Adapter soft state
5025  *
5026  * Do not enable interrupt, only setup ISRs.
5027  *
5028  * Return 0 on success.
5029  */
5030 static int
5031 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5032 {
5033 	struct pci_dev *pdev;
5034 
5035 	pdev = instance->pdev;
5036 	instance->irq_context[0].instance = instance;
5037 	instance->irq_context[0].MSIxIndex = 0;
5038 	if (request_irq(pci_irq_vector(pdev, 0),
5039 			instance->instancet->service_isr, IRQF_SHARED,
5040 			"megasas", &instance->irq_context[0])) {
5041 		dev_err(&instance->pdev->dev,
5042 				"Failed to register IRQ from %s %d\n",
5043 				__func__, __LINE__);
5044 		return -1;
5045 	}
5046 	return 0;
5047 }
5048 
5049 /**
5050  * megasas_setup_irqs_msix -		register MSI-x interrupts.
5051  * @instance:				Adapter soft state
5052  * @is_probe:				Driver probe check
5053  *
5054  * Do not enable interrupt, only setup ISRs.
5055  *
5056  * Return 0 on success.
5057  */
5058 static int
5059 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5060 {
5061 	int i, j;
5062 	struct pci_dev *pdev;
5063 
5064 	pdev = instance->pdev;
5065 
5066 	/* Try MSI-x */
5067 	for (i = 0; i < instance->msix_vectors; i++) {
5068 		instance->irq_context[i].instance = instance;
5069 		instance->irq_context[i].MSIxIndex = i;
5070 		if (request_irq(pci_irq_vector(pdev, i),
5071 			instance->instancet->service_isr, 0, "megasas",
5072 			&instance->irq_context[i])) {
5073 			dev_err(&instance->pdev->dev,
5074 				"Failed to register IRQ for vector %d.\n", i);
5075 			for (j = 0; j < i; j++)
5076 				free_irq(pci_irq_vector(pdev, j),
5077 					 &instance->irq_context[j]);
5078 			/* Retry irq register for IO_APIC*/
5079 			instance->msix_vectors = 0;
5080 			if (is_probe) {
5081 				pci_free_irq_vectors(instance->pdev);
5082 				return megasas_setup_irqs_ioapic(instance);
5083 			} else {
5084 				return -1;
5085 			}
5086 		}
5087 	}
5088 	return 0;
5089 }
5090 
5091 /*
5092  * megasas_destroy_irqs-		unregister interrupts.
5093  * @instance:				Adapter soft state
5094  * return:				void
5095  */
5096 static void
5097 megasas_destroy_irqs(struct megasas_instance *instance) {
5098 
5099 	int i;
5100 
5101 	if (instance->msix_vectors)
5102 		for (i = 0; i < instance->msix_vectors; i++) {
5103 			free_irq(pci_irq_vector(instance->pdev, i),
5104 				 &instance->irq_context[i]);
5105 		}
5106 	else
5107 		free_irq(pci_irq_vector(instance->pdev, 0),
5108 			 &instance->irq_context[0]);
5109 }
5110 
5111 /**
5112  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5113  * @instance:				Adapter soft state
5114  * @is_probe:				Driver probe check
5115  *
5116  * Return 0 on success.
5117  */
5118 void
5119 megasas_setup_jbod_map(struct megasas_instance *instance)
5120 {
5121 	int i;
5122 	struct fusion_context *fusion = instance->ctrl_context;
5123 	u32 pd_seq_map_sz;
5124 
5125 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5126 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5127 
5128 	if (reset_devices || !fusion ||
5129 		!instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
5130 		dev_info(&instance->pdev->dev,
5131 			"Jbod map is not supported %s %d\n",
5132 			__func__, __LINE__);
5133 		instance->use_seqnum_jbod_fp = false;
5134 		return;
5135 	}
5136 
5137 	if (fusion->pd_seq_sync[0])
5138 		goto skip_alloc;
5139 
5140 	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5141 		fusion->pd_seq_sync[i] = dma_alloc_coherent
5142 			(&instance->pdev->dev, pd_seq_map_sz,
5143 			&fusion->pd_seq_phys[i], GFP_KERNEL);
5144 		if (!fusion->pd_seq_sync[i]) {
5145 			dev_err(&instance->pdev->dev,
5146 				"Failed to allocate memory from %s %d\n",
5147 				__func__, __LINE__);
5148 			if (i == 1) {
5149 				dma_free_coherent(&instance->pdev->dev,
5150 					pd_seq_map_sz, fusion->pd_seq_sync[0],
5151 					fusion->pd_seq_phys[0]);
5152 				fusion->pd_seq_sync[0] = NULL;
5153 			}
5154 			instance->use_seqnum_jbod_fp = false;
5155 			return;
5156 		}
5157 	}
5158 
5159 skip_alloc:
5160 	if (!megasas_sync_pd_seq_num(instance, false) &&
5161 		!megasas_sync_pd_seq_num(instance, true))
5162 		instance->use_seqnum_jbod_fp = true;
5163 	else
5164 		instance->use_seqnum_jbod_fp = false;
5165 }
5166 
5167 static void megasas_setup_reply_map(struct megasas_instance *instance)
5168 {
5169 	const struct cpumask *mask;
5170 	unsigned int queue, cpu;
5171 
5172 	for (queue = 0; queue < instance->msix_vectors; queue++) {
5173 		mask = pci_irq_get_affinity(instance->pdev, queue);
5174 		if (!mask)
5175 			goto fallback;
5176 
5177 		for_each_cpu(cpu, mask)
5178 			instance->reply_map[cpu] = queue;
5179 	}
5180 	return;
5181 
5182 fallback:
5183 	for_each_possible_cpu(cpu)
5184 		instance->reply_map[cpu] = cpu % instance->msix_vectors;
5185 }
5186 
5187 /**
5188  * megasas_init_fw -	Initializes the FW
5189  * @instance:		Adapter soft state
5190  *
5191  * This is the main function for initializing firmware
5192  */
5193 
5194 static int megasas_init_fw(struct megasas_instance *instance)
5195 {
5196 	u32 max_sectors_1;
5197 	u32 max_sectors_2, tmp_sectors, msix_enable;
5198 	u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
5199 	resource_size_t base_addr;
5200 	struct megasas_register_set __iomem *reg_set;
5201 	struct megasas_ctrl_info *ctrl_info = NULL;
5202 	unsigned long bar_list;
5203 	int i, j, loop, fw_msix_count = 0;
5204 	struct IOV_111 *iovPtr;
5205 	struct fusion_context *fusion;
5206 
5207 	fusion = instance->ctrl_context;
5208 
5209 	/* Find first memory bar */
5210 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5211 	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5212 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5213 					 "megasas: LSI")) {
5214 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5215 		return -EBUSY;
5216 	}
5217 
5218 	base_addr = pci_resource_start(instance->pdev, instance->bar);
5219 	instance->reg_set = ioremap_nocache(base_addr, 8192);
5220 
5221 	if (!instance->reg_set) {
5222 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5223 		goto fail_ioremap;
5224 	}
5225 
5226 	reg_set = instance->reg_set;
5227 
5228 	if (instance->adapter_type != MFI_SERIES)
5229 		instance->instancet = &megasas_instance_template_fusion;
5230 	else {
5231 		switch (instance->pdev->device) {
5232 		case PCI_DEVICE_ID_LSI_SAS1078R:
5233 		case PCI_DEVICE_ID_LSI_SAS1078DE:
5234 			instance->instancet = &megasas_instance_template_ppc;
5235 			break;
5236 		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5237 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5238 			instance->instancet = &megasas_instance_template_gen2;
5239 			break;
5240 		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5241 		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5242 			instance->instancet = &megasas_instance_template_skinny;
5243 			break;
5244 		case PCI_DEVICE_ID_LSI_SAS1064R:
5245 		case PCI_DEVICE_ID_DELL_PERC5:
5246 		default:
5247 			instance->instancet = &megasas_instance_template_xscale;
5248 			instance->pd_list_not_supported = 1;
5249 			break;
5250 		}
5251 	}
5252 
5253 	if (megasas_transition_to_ready(instance, 0)) {
5254 		atomic_set(&instance->fw_reset_no_pci_access, 1);
5255 		instance->instancet->adp_reset
5256 			(instance, instance->reg_set);
5257 		atomic_set(&instance->fw_reset_no_pci_access, 0);
5258 		dev_info(&instance->pdev->dev,
5259 			"FW restarted successfully from %s!\n",
5260 			__func__);
5261 
5262 		/*waitting for about 30 second before retry*/
5263 		ssleep(30);
5264 
5265 		if (megasas_transition_to_ready(instance, 0))
5266 			goto fail_ready_state;
5267 	}
5268 
5269 	megasas_init_ctrl_params(instance);
5270 
5271 	if (megasas_set_dma_mask(instance))
5272 		goto fail_ready_state;
5273 
5274 	if (megasas_alloc_ctrl_mem(instance))
5275 		goto fail_alloc_dma_buf;
5276 
5277 	if (megasas_alloc_ctrl_dma_buffers(instance))
5278 		goto fail_alloc_dma_buf;
5279 
5280 	fusion = instance->ctrl_context;
5281 
5282 	if (instance->adapter_type == VENTURA_SERIES) {
5283 		scratch_pad_3 =
5284 			readl(&instance->reg_set->outbound_scratch_pad_3);
5285 		instance->max_raid_mapsize = ((scratch_pad_3 >>
5286 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5287 			MR_MAX_RAID_MAP_SIZE_MASK);
5288 	}
5289 
5290 	/* Check if MSI-X is supported while in ready state */
5291 	msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
5292 		       0x4000000) >> 0x1a;
5293 	if (msix_enable && !msix_disable) {
5294 		int irq_flags = PCI_IRQ_MSIX;
5295 
5296 		scratch_pad_2 = readl
5297 			(&instance->reg_set->outbound_scratch_pad_2);
5298 		/* Check max MSI-X vectors */
5299 		if (fusion) {
5300 			if (instance->adapter_type == THUNDERBOLT_SERIES) {
5301 				/* Thunderbolt Series*/
5302 				instance->msix_vectors = (scratch_pad_2
5303 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5304 				fw_msix_count = instance->msix_vectors;
5305 			} else { /* Invader series supports more than 8 MSI-x vectors*/
5306 				instance->msix_vectors = ((scratch_pad_2
5307 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5308 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5309 				if (instance->msix_vectors > 16)
5310 					instance->msix_combined = true;
5311 
5312 				if (rdpq_enable)
5313 					instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5314 								1 : 0;
5315 				fw_msix_count = instance->msix_vectors;
5316 				/* Save 1-15 reply post index address to local memory
5317 				 * Index 0 is already saved from reg offset
5318 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5319 				 */
5320 				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5321 					instance->reply_post_host_index_addr[loop] =
5322 						(u32 __iomem *)
5323 						((u8 __iomem *)instance->reg_set +
5324 						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5325 						+ (loop * 0x10));
5326 				}
5327 			}
5328 			if (msix_vectors)
5329 				instance->msix_vectors = min(msix_vectors,
5330 					instance->msix_vectors);
5331 		} else /* MFI adapters */
5332 			instance->msix_vectors = 1;
5333 		/* Don't bother allocating more MSI-X vectors than cpus */
5334 		instance->msix_vectors = min(instance->msix_vectors,
5335 					     (unsigned int)num_online_cpus());
5336 		if (smp_affinity_enable)
5337 			irq_flags |= PCI_IRQ_AFFINITY;
5338 		i = pci_alloc_irq_vectors(instance->pdev, 1,
5339 					  instance->msix_vectors, irq_flags);
5340 		if (i > 0)
5341 			instance->msix_vectors = i;
5342 		else
5343 			instance->msix_vectors = 0;
5344 	}
5345 	/*
5346 	 * MSI-X host index 0 is common for all adapter.
5347 	 * It is used for all MPT based Adapters.
5348 	 */
5349 	if (instance->msix_combined) {
5350 		instance->reply_post_host_index_addr[0] =
5351 				(u32 *)((u8 *)instance->reg_set +
5352 				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
5353 	} else {
5354 		instance->reply_post_host_index_addr[0] =
5355 			(u32 *)((u8 *)instance->reg_set +
5356 			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5357 	}
5358 
5359 	if (!instance->msix_vectors) {
5360 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5361 		if (i < 0)
5362 			goto fail_setup_irqs;
5363 	}
5364 
5365 	megasas_setup_reply_map(instance);
5366 
5367 	dev_info(&instance->pdev->dev,
5368 		"firmware supports msix\t: (%d)", fw_msix_count);
5369 	dev_info(&instance->pdev->dev,
5370 		"current msix/online cpus\t: (%d/%d)\n",
5371 		instance->msix_vectors, (unsigned int)num_online_cpus());
5372 	dev_info(&instance->pdev->dev,
5373 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
5374 
5375 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5376 		(unsigned long)instance);
5377 
5378 	/*
5379 	 * Below are default value for legacy Firmware.
5380 	 * non-fusion based controllers
5381 	 */
5382 	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5383 	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5384 	/* Get operational params, sge flags, send init cmd to controller */
5385 	if (instance->instancet->init_adapter(instance))
5386 		goto fail_init_adapter;
5387 
5388 	if (instance->adapter_type == VENTURA_SERIES) {
5389 		scratch_pad_4 =
5390 			readl(&instance->reg_set->outbound_scratch_pad_4);
5391 		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
5392 			MR_DEFAULT_NVME_PAGE_SHIFT)
5393 			instance->nvme_page_size =
5394 				(1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
5395 
5396 		dev_info(&instance->pdev->dev,
5397 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
5398 	}
5399 
5400 	if (instance->msix_vectors ?
5401 		megasas_setup_irqs_msix(instance, 1) :
5402 		megasas_setup_irqs_ioapic(instance))
5403 		goto fail_init_adapter;
5404 
5405 	instance->instancet->enable_intr(instance);
5406 
5407 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
5408 
5409 	megasas_setup_jbod_map(instance);
5410 
5411 	/** for passthrough
5412 	 * the following function will get the PD LIST.
5413 	 */
5414 	memset(instance->pd_list, 0,
5415 		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5416 	if (megasas_get_pd_list(instance) < 0) {
5417 		dev_err(&instance->pdev->dev, "failed to get PD list\n");
5418 		goto fail_get_ld_pd_list;
5419 	}
5420 
5421 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5422 
5423 	/* stream detection initialization */
5424 	if (instance->adapter_type == VENTURA_SERIES) {
5425 		fusion->stream_detect_by_ld =
5426 			kzalloc(sizeof(struct LD_STREAM_DETECT *)
5427 			* MAX_LOGICAL_DRIVES_EXT,
5428 			GFP_KERNEL);
5429 		if (!fusion->stream_detect_by_ld) {
5430 			dev_err(&instance->pdev->dev,
5431 				"unable to allocate stream detection for pool of LDs\n");
5432 			goto fail_get_ld_pd_list;
5433 		}
5434 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
5435 			fusion->stream_detect_by_ld[i] =
5436 				kzalloc(sizeof(struct LD_STREAM_DETECT),
5437 				GFP_KERNEL);
5438 			if (!fusion->stream_detect_by_ld[i]) {
5439 				dev_err(&instance->pdev->dev,
5440 					"unable to allocate stream detect by LD\n ");
5441 				for (j = 0; j < i; ++j)
5442 					kfree(fusion->stream_detect_by_ld[j]);
5443 				kfree(fusion->stream_detect_by_ld);
5444 				fusion->stream_detect_by_ld = NULL;
5445 				goto fail_get_ld_pd_list;
5446 			}
5447 			fusion->stream_detect_by_ld[i]->mru_bit_map
5448 				= MR_STREAM_BITMAP;
5449 		}
5450 	}
5451 
5452 	if (megasas_ld_list_query(instance,
5453 				  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5454 		goto fail_get_ld_pd_list;
5455 
5456 	/*
5457 	 * Compute the max allowed sectors per IO: The controller info has two
5458 	 * limits on max sectors. Driver should use the minimum of these two.
5459 	 *
5460 	 * 1 << stripe_sz_ops.min = max sectors per strip
5461 	 *
5462 	 * Note that older firmwares ( < FW ver 30) didn't report information
5463 	 * to calculate max_sectors_1. So the number ended up as zero always.
5464 	 */
5465 	tmp_sectors = 0;
5466 	ctrl_info = instance->ctrl_info_buf;
5467 
5468 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5469 		le16_to_cpu(ctrl_info->max_strips_per_io);
5470 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
5471 
5472 	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
5473 
5474 	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
5475 	instance->passive = ctrl_info->cluster.passive;
5476 	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
5477 	instance->UnevenSpanSupport =
5478 		ctrl_info->adapterOperations2.supportUnevenSpans;
5479 	if (instance->UnevenSpanSupport) {
5480 		struct fusion_context *fusion = instance->ctrl_context;
5481 		if (MR_ValidateMapInfo(instance, instance->map_id))
5482 			fusion->fast_path_io = 1;
5483 		else
5484 			fusion->fast_path_io = 0;
5485 
5486 	}
5487 	if (ctrl_info->host_interface.SRIOV) {
5488 		instance->requestorId = ctrl_info->iov.requestorId;
5489 		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
5490 			if (!ctrl_info->adapterOperations2.activePassive)
5491 			    instance->PlasmaFW111 = 1;
5492 
5493 			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
5494 			    instance->PlasmaFW111 ? "1.11" : "new");
5495 
5496 			if (instance->PlasmaFW111) {
5497 			    iovPtr = (struct IOV_111 *)
5498 				((unsigned char *)ctrl_info + IOV_111_OFFSET);
5499 			    instance->requestorId = iovPtr->requestorId;
5500 			}
5501 		}
5502 		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
5503 			instance->requestorId);
5504 	}
5505 
5506 	instance->crash_dump_fw_support =
5507 		ctrl_info->adapterOperations3.supportCrashDump;
5508 	instance->crash_dump_drv_support =
5509 		(instance->crash_dump_fw_support &&
5510 		instance->crash_dump_buf);
5511 	if (instance->crash_dump_drv_support)
5512 		megasas_set_crash_dump_params(instance,
5513 			MR_CRASH_BUF_TURN_OFF);
5514 
5515 	else {
5516 		if (instance->crash_dump_buf)
5517 			pci_free_consistent(instance->pdev,
5518 				CRASH_DMA_BUF_SIZE,
5519 				instance->crash_dump_buf,
5520 				instance->crash_dump_h);
5521 		instance->crash_dump_buf = NULL;
5522 	}
5523 
5524 
5525 	dev_info(&instance->pdev->dev,
5526 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
5527 		le16_to_cpu(ctrl_info->pci.vendor_id),
5528 		le16_to_cpu(ctrl_info->pci.device_id),
5529 		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
5530 		le16_to_cpu(ctrl_info->pci.sub_device_id));
5531 	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
5532 		instance->UnevenSpanSupport ? "yes" : "no");
5533 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
5534 		instance->crash_dump_drv_support ? "yes" : "no");
5535 	dev_info(&instance->pdev->dev, "jbod sync map		: %s\n",
5536 		instance->use_seqnum_jbod_fp ? "yes" : "no");
5537 
5538 
5539 	instance->max_sectors_per_req = instance->max_num_sge *
5540 						SGE_BUFFER_SIZE / 512;
5541 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
5542 		instance->max_sectors_per_req = tmp_sectors;
5543 
5544 	/* Check for valid throttlequeuedepth module parameter */
5545 	if (throttlequeuedepth &&
5546 			throttlequeuedepth <= instance->max_scsi_cmds)
5547 		instance->throttlequeuedepth = throttlequeuedepth;
5548 	else
5549 		instance->throttlequeuedepth =
5550 				MEGASAS_THROTTLE_QUEUE_DEPTH;
5551 
5552 	if ((resetwaittime < 1) ||
5553 	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
5554 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
5555 
5556 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
5557 		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
5558 
5559 	/* Launch SR-IOV heartbeat timer */
5560 	if (instance->requestorId) {
5561 		if (!megasas_sriov_start_heartbeat(instance, 1))
5562 			megasas_start_timer(instance);
5563 		else
5564 			instance->skip_heartbeat_timer_del = 1;
5565 	}
5566 
5567 	return 0;
5568 
5569 fail_get_ld_pd_list:
5570 	instance->instancet->disable_intr(instance);
5571 fail_init_adapter:
5572 	megasas_destroy_irqs(instance);
5573 fail_setup_irqs:
5574 	if (instance->msix_vectors)
5575 		pci_free_irq_vectors(instance->pdev);
5576 	instance->msix_vectors = 0;
5577 fail_alloc_dma_buf:
5578 	megasas_free_ctrl_dma_buffers(instance);
5579 	megasas_free_ctrl_mem(instance);
5580 fail_ready_state:
5581 	iounmap(instance->reg_set);
5582 
5583 fail_ioremap:
5584 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5585 
5586 	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5587 		__func__, __LINE__);
5588 	return -EINVAL;
5589 }
5590 
5591 /**
5592  * megasas_release_mfi -	Reverses the FW initialization
5593  * @instance:			Adapter soft state
5594  */
5595 static void megasas_release_mfi(struct megasas_instance *instance)
5596 {
5597 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5598 
5599 	if (instance->reply_queue)
5600 		pci_free_consistent(instance->pdev, reply_q_sz,
5601 			    instance->reply_queue, instance->reply_queue_h);
5602 
5603 	megasas_free_cmds(instance);
5604 
5605 	iounmap(instance->reg_set);
5606 
5607 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5608 }
5609 
5610 /**
5611  * megasas_get_seq_num -	Gets latest event sequence numbers
5612  * @instance:			Adapter soft state
5613  * @eli:			FW event log sequence numbers information
5614  *
5615  * FW maintains a log of all events in a non-volatile area. Upper layers would
5616  * usually find out the latest sequence number of the events, the seq number at
5617  * the boot etc. They would "read" all the events below the latest seq number
5618  * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
5619  * number), they would subsribe to AEN (asynchronous event notification) and
5620  * wait for the events to happen.
5621  */
5622 static int
5623 megasas_get_seq_num(struct megasas_instance *instance,
5624 		    struct megasas_evt_log_info *eli)
5625 {
5626 	struct megasas_cmd *cmd;
5627 	struct megasas_dcmd_frame *dcmd;
5628 	struct megasas_evt_log_info *el_info;
5629 	dma_addr_t el_info_h = 0;
5630 	int ret;
5631 
5632 	cmd = megasas_get_cmd(instance);
5633 
5634 	if (!cmd) {
5635 		return -ENOMEM;
5636 	}
5637 
5638 	dcmd = &cmd->frame->dcmd;
5639 	el_info = pci_alloc_consistent(instance->pdev,
5640 				       sizeof(struct megasas_evt_log_info),
5641 				       &el_info_h);
5642 
5643 	if (!el_info) {
5644 		megasas_return_cmd(instance, cmd);
5645 		return -ENOMEM;
5646 	}
5647 
5648 	memset(el_info, 0, sizeof(*el_info));
5649 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5650 
5651 	dcmd->cmd = MFI_CMD_DCMD;
5652 	dcmd->cmd_status = 0x0;
5653 	dcmd->sge_count = 1;
5654 	dcmd->flags = MFI_FRAME_DIR_READ;
5655 	dcmd->timeout = 0;
5656 	dcmd->pad_0 = 0;
5657 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5658 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5659 
5660 	megasas_set_dma_settings(instance, dcmd, el_info_h,
5661 				 sizeof(struct megasas_evt_log_info));
5662 
5663 	ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5664 	if (ret != DCMD_SUCCESS) {
5665 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5666 			__func__, __LINE__);
5667 		goto dcmd_failed;
5668 	}
5669 
5670 	/*
5671 	 * Copy the data back into callers buffer
5672 	 */
5673 	eli->newest_seq_num = el_info->newest_seq_num;
5674 	eli->oldest_seq_num = el_info->oldest_seq_num;
5675 	eli->clear_seq_num = el_info->clear_seq_num;
5676 	eli->shutdown_seq_num = el_info->shutdown_seq_num;
5677 	eli->boot_seq_num = el_info->boot_seq_num;
5678 
5679 dcmd_failed:
5680 	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
5681 			    el_info, el_info_h);
5682 
5683 	megasas_return_cmd(instance, cmd);
5684 
5685 	return ret;
5686 }
5687 
5688 /**
5689  * megasas_register_aen -	Registers for asynchronous event notification
5690  * @instance:			Adapter soft state
5691  * @seq_num:			The starting sequence number
5692  * @class_locale:		Class of the event
5693  *
5694  * This function subscribes for AEN for events beyond the @seq_num. It requests
5695  * to be notified if and only if the event is of type @class_locale
5696  */
5697 static int
5698 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5699 		     u32 class_locale_word)
5700 {
5701 	int ret_val;
5702 	struct megasas_cmd *cmd;
5703 	struct megasas_dcmd_frame *dcmd;
5704 	union megasas_evt_class_locale curr_aen;
5705 	union megasas_evt_class_locale prev_aen;
5706 
5707 	/*
5708 	 * If there an AEN pending already (aen_cmd), check if the
5709 	 * class_locale of that pending AEN is inclusive of the new
5710 	 * AEN request we currently have. If it is, then we don't have
5711 	 * to do anything. In other words, whichever events the current
5712 	 * AEN request is subscribing to, have already been subscribed
5713 	 * to.
5714 	 *
5715 	 * If the old_cmd is _not_ inclusive, then we have to abort
5716 	 * that command, form a class_locale that is superset of both
5717 	 * old and current and re-issue to the FW
5718 	 */
5719 
5720 	curr_aen.word = class_locale_word;
5721 
5722 	if (instance->aen_cmd) {
5723 
5724 		prev_aen.word =
5725 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
5726 
5727 		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
5728 		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
5729 			dev_info(&instance->pdev->dev,
5730 				 "%s %d out of range class %d send by application\n",
5731 				 __func__, __LINE__, curr_aen.members.class);
5732 			return 0;
5733 		}
5734 
5735 		/*
5736 		 * A class whose enum value is smaller is inclusive of all
5737 		 * higher values. If a PROGRESS (= -1) was previously
5738 		 * registered, then a new registration requests for higher
5739 		 * classes need not be sent to FW. They are automatically
5740 		 * included.
5741 		 *
5742 		 * Locale numbers don't have such hierarchy. They are bitmap
5743 		 * values
5744 		 */
5745 		if ((prev_aen.members.class <= curr_aen.members.class) &&
5746 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
5747 		      curr_aen.members.locale)) {
5748 			/*
5749 			 * Previously issued event registration includes
5750 			 * current request. Nothing to do.
5751 			 */
5752 			return 0;
5753 		} else {
5754 			curr_aen.members.locale |= prev_aen.members.locale;
5755 
5756 			if (prev_aen.members.class < curr_aen.members.class)
5757 				curr_aen.members.class = prev_aen.members.class;
5758 
5759 			instance->aen_cmd->abort_aen = 1;
5760 			ret_val = megasas_issue_blocked_abort_cmd(instance,
5761 								  instance->
5762 								  aen_cmd, 30);
5763 
5764 			if (ret_val) {
5765 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
5766 				       "previous AEN command\n");
5767 				return ret_val;
5768 			}
5769 		}
5770 	}
5771 
5772 	cmd = megasas_get_cmd(instance);
5773 
5774 	if (!cmd)
5775 		return -ENOMEM;
5776 
5777 	dcmd = &cmd->frame->dcmd;
5778 
5779 	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
5780 
5781 	/*
5782 	 * Prepare DCMD for aen registration
5783 	 */
5784 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5785 
5786 	dcmd->cmd = MFI_CMD_DCMD;
5787 	dcmd->cmd_status = 0x0;
5788 	dcmd->sge_count = 1;
5789 	dcmd->flags = MFI_FRAME_DIR_READ;
5790 	dcmd->timeout = 0;
5791 	dcmd->pad_0 = 0;
5792 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
5793 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
5794 	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
5795 	instance->last_seq_num = seq_num;
5796 	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
5797 
5798 	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
5799 				 sizeof(struct megasas_evt_detail));
5800 
5801 	if (instance->aen_cmd != NULL) {
5802 		megasas_return_cmd(instance, cmd);
5803 		return 0;
5804 	}
5805 
5806 	/*
5807 	 * Store reference to the cmd used to register for AEN. When an
5808 	 * application wants us to register for AEN, we have to abort this
5809 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
5810 	 */
5811 	instance->aen_cmd = cmd;
5812 
5813 	/*
5814 	 * Issue the aen registration frame
5815 	 */
5816 	instance->instancet->issue_dcmd(instance, cmd);
5817 
5818 	return 0;
5819 }
5820 
5821 /* megasas_get_target_prop - Send DCMD with below details to firmware.
5822  *
5823  * This DCMD will fetch few properties of LD/system PD defined
5824  * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
5825  *
5826  * DCMD send by drivers whenever new target is added to the OS.
5827  *
5828  * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
5829  * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
5830  *                       0 = system PD, 1 = LD.
5831  * dcmd.mbox.s[1]      - TargetID for LD/system PD.
5832  * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
5833  *
5834  * @instance:		Adapter soft state
5835  * @sdev:		OS provided scsi device
5836  *
5837  * Returns 0 on success non-zero on failure.
5838  */
5839 static int
5840 megasas_get_target_prop(struct megasas_instance *instance,
5841 			struct scsi_device *sdev)
5842 {
5843 	int ret;
5844 	struct megasas_cmd *cmd;
5845 	struct megasas_dcmd_frame *dcmd;
5846 	u16 targetId = (sdev->channel % 2) + sdev->id;
5847 
5848 	cmd = megasas_get_cmd(instance);
5849 
5850 	if (!cmd) {
5851 		dev_err(&instance->pdev->dev,
5852 			"Failed to get cmd %s\n", __func__);
5853 		return -ENOMEM;
5854 	}
5855 
5856 	dcmd = &cmd->frame->dcmd;
5857 
5858 	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
5859 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5860 	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
5861 
5862 	dcmd->mbox.s[1] = cpu_to_le16(targetId);
5863 	dcmd->cmd = MFI_CMD_DCMD;
5864 	dcmd->cmd_status = 0xFF;
5865 	dcmd->sge_count = 1;
5866 	dcmd->flags = MFI_FRAME_DIR_READ;
5867 	dcmd->timeout = 0;
5868 	dcmd->pad_0 = 0;
5869 	dcmd->data_xfer_len =
5870 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5871 	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
5872 
5873 	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
5874 				 sizeof(struct MR_TARGET_PROPERTIES));
5875 
5876 	if ((instance->adapter_type != MFI_SERIES) &&
5877 	    !instance->mask_interrupts)
5878 		ret = megasas_issue_blocked_cmd(instance,
5879 						cmd, MFI_IO_TIMEOUT_SECS);
5880 	else
5881 		ret = megasas_issue_polled(instance, cmd);
5882 
5883 	switch (ret) {
5884 	case DCMD_TIMEOUT:
5885 		switch (dcmd_timeout_ocr_possible(instance)) {
5886 		case INITIATE_OCR:
5887 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5888 			megasas_reset_fusion(instance->host,
5889 					     MFI_IO_TIMEOUT_OCR);
5890 			break;
5891 		case KILL_ADAPTER:
5892 			megaraid_sas_kill_hba(instance);
5893 			break;
5894 		case IGNORE_TIMEOUT:
5895 			dev_info(&instance->pdev->dev,
5896 				 "Ignore DCMD timeout: %s %d\n",
5897 				 __func__, __LINE__);
5898 			break;
5899 		}
5900 		break;
5901 
5902 	default:
5903 		megasas_return_cmd(instance, cmd);
5904 	}
5905 	if (ret != DCMD_SUCCESS)
5906 		dev_err(&instance->pdev->dev,
5907 			"return from %s %d return value %d\n",
5908 			__func__, __LINE__, ret);
5909 
5910 	return ret;
5911 }
5912 
5913 /**
5914  * megasas_start_aen -	Subscribes to AEN during driver load time
5915  * @instance:		Adapter soft state
5916  */
5917 static int megasas_start_aen(struct megasas_instance *instance)
5918 {
5919 	struct megasas_evt_log_info eli;
5920 	union megasas_evt_class_locale class_locale;
5921 
5922 	/*
5923 	 * Get the latest sequence number from FW
5924 	 */
5925 	memset(&eli, 0, sizeof(eli));
5926 
5927 	if (megasas_get_seq_num(instance, &eli))
5928 		return -1;
5929 
5930 	/*
5931 	 * Register AEN with FW for latest sequence number plus 1
5932 	 */
5933 	class_locale.members.reserved = 0;
5934 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
5935 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
5936 
5937 	return megasas_register_aen(instance,
5938 			le32_to_cpu(eli.newest_seq_num) + 1,
5939 			class_locale.word);
5940 }
5941 
5942 /**
5943  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
5944  * @instance:		Adapter soft state
5945  */
5946 static int megasas_io_attach(struct megasas_instance *instance)
5947 {
5948 	struct Scsi_Host *host = instance->host;
5949 
5950 	/*
5951 	 * Export parameters required by SCSI mid-layer
5952 	 */
5953 	host->unique_id = instance->unique_id;
5954 	host->can_queue = instance->max_scsi_cmds;
5955 	host->this_id = instance->init_id;
5956 	host->sg_tablesize = instance->max_num_sge;
5957 
5958 	if (instance->fw_support_ieee)
5959 		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
5960 
5961 	/*
5962 	 * Check if the module parameter value for max_sectors can be used
5963 	 */
5964 	if (max_sectors && max_sectors < instance->max_sectors_per_req)
5965 		instance->max_sectors_per_req = max_sectors;
5966 	else {
5967 		if (max_sectors) {
5968 			if (((instance->pdev->device ==
5969 				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
5970 				(instance->pdev->device ==
5971 				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
5972 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
5973 				instance->max_sectors_per_req = max_sectors;
5974 			} else {
5975 			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
5976 				"and <= %d (or < 1MB for GEN2 controller)\n",
5977 				instance->max_sectors_per_req);
5978 			}
5979 		}
5980 	}
5981 
5982 	host->max_sectors = instance->max_sectors_per_req;
5983 	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
5984 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
5985 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
5986 	host->max_lun = MEGASAS_MAX_LUN;
5987 	host->max_cmd_len = 16;
5988 
5989 	/*
5990 	 * Notify the mid-layer about the new controller
5991 	 */
5992 	if (scsi_add_host(host, &instance->pdev->dev)) {
5993 		dev_err(&instance->pdev->dev,
5994 			"Failed to add host from %s %d\n",
5995 			__func__, __LINE__);
5996 		return -ENODEV;
5997 	}
5998 
5999 	return 0;
6000 }
6001 
6002 /**
6003  * megasas_set_dma_mask -	Set DMA mask for supported controllers
6004  *
6005  * @instance:		Adapter soft state
6006  * Description:
6007  *
6008  * For Ventura, driver/FW will operate in 64bit DMA addresses.
6009  *
6010  * For invader-
6011  *	By default, driver/FW will operate in 32bit DMA addresses
6012  *	for consistent DMA mapping but if 32 bit consistent
6013  *	DMA mask fails, driver will try with 64 bit consistent
6014  *	mask provided FW is true 64bit DMA capable
6015  *
6016  * For older controllers(Thunderbolt and MFI based adapters)-
6017  *	driver/FW will operate in 32 bit consistent DMA addresses.
6018  */
6019 static int
6020 megasas_set_dma_mask(struct megasas_instance *instance)
6021 {
6022 	u64 consistent_mask;
6023 	struct pci_dev *pdev;
6024 	u32 scratch_pad_2;
6025 
6026 	pdev = instance->pdev;
6027 	consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
6028 				DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
6029 
6030 	if (IS_DMA64) {
6031 		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6032 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6033 			goto fail_set_dma_mask;
6034 
6035 		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
6036 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6037 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6038 			/*
6039 			 * If 32 bit DMA mask fails, then try for 64 bit mask
6040 			 * for FW capable of handling 64 bit DMA.
6041 			 */
6042 			scratch_pad_2 = readl
6043 				(&instance->reg_set->outbound_scratch_pad_2);
6044 
6045 			if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6046 				goto fail_set_dma_mask;
6047 			else if (dma_set_mask_and_coherent(&pdev->dev,
6048 							   DMA_BIT_MASK(64)))
6049 				goto fail_set_dma_mask;
6050 		}
6051 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6052 		goto fail_set_dma_mask;
6053 
6054 	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6055 		instance->consistent_mask_64bit = false;
6056 	else
6057 		instance->consistent_mask_64bit = true;
6058 
6059 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6060 		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
6061 		 (instance->consistent_mask_64bit ? "64" : "32"));
6062 
6063 	return 0;
6064 
6065 fail_set_dma_mask:
6066 	dev_err(&pdev->dev, "Failed to set DMA mask\n");
6067 	return -1;
6068 
6069 }
6070 
6071 /*
6072  * megasas_set_adapter_type -	Set adapter type.
6073  *				Supported controllers can be divided in
6074  *				4 categories-  enum MR_ADAPTER_TYPE {
6075  *							MFI_SERIES = 1,
6076  *							THUNDERBOLT_SERIES = 2,
6077  *							INVADER_SERIES = 3,
6078  *							VENTURA_SERIES = 4,
6079  *						};
6080  * @instance:			Adapter soft state
6081  * return:			void
6082  */
6083 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6084 {
6085 	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6086 	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6087 		instance->adapter_type = MFI_SERIES;
6088 	} else {
6089 		switch (instance->pdev->device) {
6090 		case PCI_DEVICE_ID_LSI_VENTURA:
6091 		case PCI_DEVICE_ID_LSI_CRUSADER:
6092 		case PCI_DEVICE_ID_LSI_HARPOON:
6093 		case PCI_DEVICE_ID_LSI_TOMCAT:
6094 		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6095 		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6096 			instance->adapter_type = VENTURA_SERIES;
6097 			break;
6098 		case PCI_DEVICE_ID_LSI_FUSION:
6099 		case PCI_DEVICE_ID_LSI_PLASMA:
6100 			instance->adapter_type = THUNDERBOLT_SERIES;
6101 			break;
6102 		case PCI_DEVICE_ID_LSI_INVADER:
6103 		case PCI_DEVICE_ID_LSI_INTRUDER:
6104 		case PCI_DEVICE_ID_LSI_INTRUDER_24:
6105 		case PCI_DEVICE_ID_LSI_CUTLASS_52:
6106 		case PCI_DEVICE_ID_LSI_CUTLASS_53:
6107 		case PCI_DEVICE_ID_LSI_FURY:
6108 			instance->adapter_type = INVADER_SERIES;
6109 			break;
6110 		default: /* For all other supported controllers */
6111 			instance->adapter_type = MFI_SERIES;
6112 			break;
6113 		}
6114 	}
6115 }
6116 
6117 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6118 {
6119 	instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
6120 						  &instance->producer_h);
6121 	instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
6122 						  &instance->consumer_h);
6123 
6124 	if (!instance->producer || !instance->consumer) {
6125 		dev_err(&instance->pdev->dev,
6126 			"Failed to allocate memory for producer, consumer\n");
6127 		return -1;
6128 	}
6129 
6130 	*instance->producer = 0;
6131 	*instance->consumer = 0;
6132 	return 0;
6133 }
6134 
6135 /**
6136  * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
6137  *				structures which are not common across MFI
6138  *				adapters and fusion adapters.
6139  *				For MFI based adapters, allocate producer and
6140  *				consumer buffers. For fusion adapters, allocate
6141  *				memory for fusion context.
6142  * @instance:			Adapter soft state
6143  * return:			0 for SUCCESS
6144  */
6145 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6146 {
6147 	instance->reply_map = kzalloc(sizeof(unsigned int) * nr_cpu_ids,
6148 				      GFP_KERNEL);
6149 	if (!instance->reply_map)
6150 		return -ENOMEM;
6151 
6152 	switch (instance->adapter_type) {
6153 	case MFI_SERIES:
6154 		if (megasas_alloc_mfi_ctrl_mem(instance))
6155 			goto fail;
6156 		break;
6157 	case VENTURA_SERIES:
6158 	case THUNDERBOLT_SERIES:
6159 	case INVADER_SERIES:
6160 		if (megasas_alloc_fusion_context(instance))
6161 			goto fail;
6162 		break;
6163 	}
6164 
6165 	return 0;
6166  fail:
6167 	kfree(instance->reply_map);
6168 	instance->reply_map = NULL;
6169 	return -ENOMEM;
6170 }
6171 
6172 /*
6173  * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
6174  *				producer, consumer buffers for MFI adapters
6175  *
6176  * @instance -			Adapter soft instance
6177  *
6178  */
6179 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6180 {
6181 	kfree(instance->reply_map);
6182 	if (instance->adapter_type == MFI_SERIES) {
6183 		if (instance->producer)
6184 			pci_free_consistent(instance->pdev, sizeof(u32),
6185 					    instance->producer,
6186 					    instance->producer_h);
6187 		if (instance->consumer)
6188 			pci_free_consistent(instance->pdev, sizeof(u32),
6189 					    instance->consumer,
6190 					    instance->consumer_h);
6191 	} else {
6192 		megasas_free_fusion_context(instance);
6193 	}
6194 }
6195 
6196 /**
6197  * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
6198  *					driver load time
6199  *
6200  * @instance-				Adapter soft instance
6201  * @return-				O for SUCCESS
6202  */
6203 static inline
6204 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6205 {
6206 	struct pci_dev *pdev = instance->pdev;
6207 	struct fusion_context *fusion = instance->ctrl_context;
6208 
6209 	instance->evt_detail =
6210 		pci_alloc_consistent(pdev,
6211 				     sizeof(struct megasas_evt_detail),
6212 				     &instance->evt_detail_h);
6213 
6214 	if (!instance->evt_detail) {
6215 		dev_err(&instance->pdev->dev,
6216 			"Failed to allocate event detail buffer\n");
6217 		return -ENOMEM;
6218 	}
6219 
6220 	if (fusion) {
6221 		fusion->ioc_init_request =
6222 			dma_alloc_coherent(&pdev->dev,
6223 					   sizeof(struct MPI2_IOC_INIT_REQUEST),
6224 					   &fusion->ioc_init_request_phys,
6225 					   GFP_KERNEL);
6226 
6227 		if (!fusion->ioc_init_request) {
6228 			dev_err(&pdev->dev,
6229 				"Failed to allocate PD list buffer\n");
6230 			return -ENOMEM;
6231 		}
6232 	}
6233 
6234 	instance->pd_list_buf =
6235 		pci_alloc_consistent(pdev,
6236 				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6237 				     &instance->pd_list_buf_h);
6238 
6239 	if (!instance->pd_list_buf) {
6240 		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
6241 		return -ENOMEM;
6242 	}
6243 
6244 	instance->ctrl_info_buf =
6245 		pci_alloc_consistent(pdev,
6246 				     sizeof(struct megasas_ctrl_info),
6247 				     &instance->ctrl_info_buf_h);
6248 
6249 	if (!instance->ctrl_info_buf) {
6250 		dev_err(&pdev->dev,
6251 			"Failed to allocate controller info buffer\n");
6252 		return -ENOMEM;
6253 	}
6254 
6255 	instance->ld_list_buf =
6256 		pci_alloc_consistent(pdev,
6257 				     sizeof(struct MR_LD_LIST),
6258 				     &instance->ld_list_buf_h);
6259 
6260 	if (!instance->ld_list_buf) {
6261 		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
6262 		return -ENOMEM;
6263 	}
6264 
6265 	instance->ld_targetid_list_buf =
6266 		pci_alloc_consistent(pdev,
6267 				     sizeof(struct MR_LD_TARGETID_LIST),
6268 				     &instance->ld_targetid_list_buf_h);
6269 
6270 	if (!instance->ld_targetid_list_buf) {
6271 		dev_err(&pdev->dev,
6272 			"Failed to allocate LD targetid list buffer\n");
6273 		return -ENOMEM;
6274 	}
6275 
6276 	if (!reset_devices) {
6277 		instance->system_info_buf =
6278 			pci_alloc_consistent(pdev,
6279 					     sizeof(struct MR_DRV_SYSTEM_INFO),
6280 					     &instance->system_info_h);
6281 		instance->pd_info =
6282 			pci_alloc_consistent(pdev,
6283 					     sizeof(struct MR_PD_INFO),
6284 					     &instance->pd_info_h);
6285 		instance->tgt_prop =
6286 			pci_alloc_consistent(pdev,
6287 					     sizeof(struct MR_TARGET_PROPERTIES),
6288 					     &instance->tgt_prop_h);
6289 		instance->crash_dump_buf =
6290 			pci_alloc_consistent(pdev,
6291 					     CRASH_DMA_BUF_SIZE,
6292 					     &instance->crash_dump_h);
6293 
6294 		if (!instance->system_info_buf)
6295 			dev_err(&instance->pdev->dev,
6296 				"Failed to allocate system info buffer\n");
6297 
6298 		if (!instance->pd_info)
6299 			dev_err(&instance->pdev->dev,
6300 				"Failed to allocate pd_info buffer\n");
6301 
6302 		if (!instance->tgt_prop)
6303 			dev_err(&instance->pdev->dev,
6304 				"Failed to allocate tgt_prop buffer\n");
6305 
6306 		if (!instance->crash_dump_buf)
6307 			dev_err(&instance->pdev->dev,
6308 				"Failed to allocate crash dump buffer\n");
6309 	}
6310 
6311 	return 0;
6312 }
6313 
6314 /*
6315  * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
6316  *					during driver load time
6317  *
6318  * @instance-				Adapter soft instance
6319  *
6320  */
6321 static inline
6322 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
6323 {
6324 	struct pci_dev *pdev = instance->pdev;
6325 	struct fusion_context *fusion = instance->ctrl_context;
6326 
6327 	if (instance->evt_detail)
6328 		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6329 				    instance->evt_detail,
6330 				    instance->evt_detail_h);
6331 
6332 	if (fusion && fusion->ioc_init_request)
6333 		dma_free_coherent(&pdev->dev,
6334 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
6335 				  fusion->ioc_init_request,
6336 				  fusion->ioc_init_request_phys);
6337 
6338 	if (instance->pd_list_buf)
6339 		pci_free_consistent(pdev,
6340 				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6341 				    instance->pd_list_buf,
6342 				    instance->pd_list_buf_h);
6343 
6344 	if (instance->ld_list_buf)
6345 		pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
6346 				    instance->ld_list_buf,
6347 				    instance->ld_list_buf_h);
6348 
6349 	if (instance->ld_targetid_list_buf)
6350 		pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
6351 				    instance->ld_targetid_list_buf,
6352 				    instance->ld_targetid_list_buf_h);
6353 
6354 	if (instance->ctrl_info_buf)
6355 		pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
6356 				    instance->ctrl_info_buf,
6357 				    instance->ctrl_info_buf_h);
6358 
6359 	if (instance->system_info_buf)
6360 		pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6361 				    instance->system_info_buf,
6362 				    instance->system_info_h);
6363 
6364 	if (instance->pd_info)
6365 		pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6366 				    instance->pd_info, instance->pd_info_h);
6367 
6368 	if (instance->tgt_prop)
6369 		pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6370 				    instance->tgt_prop, instance->tgt_prop_h);
6371 
6372 	if (instance->crash_dump_buf)
6373 		pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6374 				    instance->crash_dump_buf,
6375 				    instance->crash_dump_h);
6376 }
6377 
6378 /*
6379  * megasas_init_ctrl_params -		Initialize controller's instance
6380  *					parameters before FW init
6381  * @instance -				Adapter soft instance
6382  * @return -				void
6383  */
6384 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
6385 {
6386 	instance->fw_crash_state = UNAVAILABLE;
6387 
6388 	megasas_poll_wait_aen = 0;
6389 	instance->issuepend_done = 1;
6390 	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
6391 
6392 	/*
6393 	 * Initialize locks and queues
6394 	 */
6395 	INIT_LIST_HEAD(&instance->cmd_pool);
6396 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
6397 
6398 	atomic_set(&instance->fw_outstanding, 0);
6399 
6400 	init_waitqueue_head(&instance->int_cmd_wait_q);
6401 	init_waitqueue_head(&instance->abort_cmd_wait_q);
6402 
6403 	spin_lock_init(&instance->crashdump_lock);
6404 	spin_lock_init(&instance->mfi_pool_lock);
6405 	spin_lock_init(&instance->hba_lock);
6406 	spin_lock_init(&instance->stream_lock);
6407 	spin_lock_init(&instance->completion_lock);
6408 
6409 	mutex_init(&instance->reset_mutex);
6410 
6411 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
6412 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
6413 		instance->flag_ieee = 1;
6414 
6415 	megasas_dbg_lvl = 0;
6416 	instance->flag = 0;
6417 	instance->unload = 1;
6418 	instance->last_time = 0;
6419 	instance->disableOnlineCtrlReset = 1;
6420 	instance->UnevenSpanSupport = 0;
6421 
6422 	if (instance->adapter_type != MFI_SERIES) {
6423 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
6424 		INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
6425 	} else {
6426 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
6427 	}
6428 }
6429 
6430 /**
6431  * megasas_probe_one -	PCI hotplug entry point
6432  * @pdev:		PCI device structure
6433  * @id:			PCI ids of supported hotplugged adapter
6434  */
6435 static int megasas_probe_one(struct pci_dev *pdev,
6436 			     const struct pci_device_id *id)
6437 {
6438 	int rval, pos;
6439 	struct Scsi_Host *host;
6440 	struct megasas_instance *instance;
6441 	u16 control = 0;
6442 
6443 	/* Reset MSI-X in the kdump kernel */
6444 	if (reset_devices) {
6445 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
6446 		if (pos) {
6447 			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
6448 					     &control);
6449 			if (control & PCI_MSIX_FLAGS_ENABLE) {
6450 				dev_info(&pdev->dev, "resetting MSI-X\n");
6451 				pci_write_config_word(pdev,
6452 						      pos + PCI_MSIX_FLAGS,
6453 						      control &
6454 						      ~PCI_MSIX_FLAGS_ENABLE);
6455 			}
6456 		}
6457 	}
6458 
6459 	/*
6460 	 * PCI prepping: enable device set bus mastering and dma mask
6461 	 */
6462 	rval = pci_enable_device_mem(pdev);
6463 
6464 	if (rval) {
6465 		return rval;
6466 	}
6467 
6468 	pci_set_master(pdev);
6469 
6470 	host = scsi_host_alloc(&megasas_template,
6471 			       sizeof(struct megasas_instance));
6472 
6473 	if (!host) {
6474 		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
6475 		goto fail_alloc_instance;
6476 	}
6477 
6478 	instance = (struct megasas_instance *)host->hostdata;
6479 	memset(instance, 0, sizeof(*instance));
6480 	atomic_set(&instance->fw_reset_no_pci_access, 0);
6481 
6482 	/*
6483 	 * Initialize PCI related and misc parameters
6484 	 */
6485 	instance->pdev = pdev;
6486 	instance->host = host;
6487 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
6488 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
6489 
6490 	megasas_set_adapter_type(instance);
6491 
6492 	/*
6493 	 * Initialize MFI Firmware
6494 	 */
6495 	if (megasas_init_fw(instance))
6496 		goto fail_init_mfi;
6497 
6498 	if (instance->requestorId) {
6499 		if (instance->PlasmaFW111) {
6500 			instance->vf_affiliation_111 =
6501 				pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
6502 						     &instance->vf_affiliation_111_h);
6503 			if (!instance->vf_affiliation_111)
6504 				dev_warn(&pdev->dev, "Can't allocate "
6505 				       "memory for VF affiliation buffer\n");
6506 		} else {
6507 			instance->vf_affiliation =
6508 				pci_alloc_consistent(pdev,
6509 						     (MAX_LOGICAL_DRIVES + 1) *
6510 						     sizeof(struct MR_LD_VF_AFFILIATION),
6511 						     &instance->vf_affiliation_h);
6512 			if (!instance->vf_affiliation)
6513 				dev_warn(&pdev->dev, "Can't allocate "
6514 				       "memory for VF affiliation buffer\n");
6515 		}
6516 	}
6517 
6518 	/*
6519 	 * Store instance in PCI softstate
6520 	 */
6521 	pci_set_drvdata(pdev, instance);
6522 
6523 	/*
6524 	 * Add this controller to megasas_mgmt_info structure so that it
6525 	 * can be exported to management applications
6526 	 */
6527 	megasas_mgmt_info.count++;
6528 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
6529 	megasas_mgmt_info.max_index++;
6530 
6531 	/*
6532 	 * Register with SCSI mid-layer
6533 	 */
6534 	if (megasas_io_attach(instance))
6535 		goto fail_io_attach;
6536 
6537 	instance->unload = 0;
6538 	/*
6539 	 * Trigger SCSI to scan our drives
6540 	 */
6541 	scsi_scan_host(host);
6542 
6543 	/*
6544 	 * Initiate AEN (Asynchronous Event Notification)
6545 	 */
6546 	if (megasas_start_aen(instance)) {
6547 		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
6548 		goto fail_start_aen;
6549 	}
6550 
6551 	/* Get current SR-IOV LD/VF affiliation */
6552 	if (instance->requestorId)
6553 		megasas_get_ld_vf_affiliation(instance, 1);
6554 
6555 	return 0;
6556 
6557 fail_start_aen:
6558 fail_io_attach:
6559 	megasas_mgmt_info.count--;
6560 	megasas_mgmt_info.max_index--;
6561 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6562 
6563 	instance->instancet->disable_intr(instance);
6564 	megasas_destroy_irqs(instance);
6565 
6566 	if (instance->adapter_type != MFI_SERIES)
6567 		megasas_release_fusion(instance);
6568 	else
6569 		megasas_release_mfi(instance);
6570 	if (instance->msix_vectors)
6571 		pci_free_irq_vectors(instance->pdev);
6572 fail_init_mfi:
6573 	scsi_host_put(host);
6574 fail_alloc_instance:
6575 	pci_disable_device(pdev);
6576 
6577 	return -ENODEV;
6578 }
6579 
6580 /**
6581  * megasas_flush_cache -	Requests FW to flush all its caches
6582  * @instance:			Adapter soft state
6583  */
6584 static void megasas_flush_cache(struct megasas_instance *instance)
6585 {
6586 	struct megasas_cmd *cmd;
6587 	struct megasas_dcmd_frame *dcmd;
6588 
6589 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6590 		return;
6591 
6592 	cmd = megasas_get_cmd(instance);
6593 
6594 	if (!cmd)
6595 		return;
6596 
6597 	dcmd = &cmd->frame->dcmd;
6598 
6599 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6600 
6601 	dcmd->cmd = MFI_CMD_DCMD;
6602 	dcmd->cmd_status = 0x0;
6603 	dcmd->sge_count = 0;
6604 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6605 	dcmd->timeout = 0;
6606 	dcmd->pad_0 = 0;
6607 	dcmd->data_xfer_len = 0;
6608 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
6609 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
6610 
6611 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6612 			!= DCMD_SUCCESS) {
6613 		dev_err(&instance->pdev->dev,
6614 			"return from %s %d\n", __func__, __LINE__);
6615 		return;
6616 	}
6617 
6618 	megasas_return_cmd(instance, cmd);
6619 }
6620 
6621 /**
6622  * megasas_shutdown_controller -	Instructs FW to shutdown the controller
6623  * @instance:				Adapter soft state
6624  * @opcode:				Shutdown/Hibernate
6625  */
6626 static void megasas_shutdown_controller(struct megasas_instance *instance,
6627 					u32 opcode)
6628 {
6629 	struct megasas_cmd *cmd;
6630 	struct megasas_dcmd_frame *dcmd;
6631 
6632 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6633 		return;
6634 
6635 	cmd = megasas_get_cmd(instance);
6636 
6637 	if (!cmd)
6638 		return;
6639 
6640 	if (instance->aen_cmd)
6641 		megasas_issue_blocked_abort_cmd(instance,
6642 			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
6643 	if (instance->map_update_cmd)
6644 		megasas_issue_blocked_abort_cmd(instance,
6645 			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
6646 	if (instance->jbod_seq_cmd)
6647 		megasas_issue_blocked_abort_cmd(instance,
6648 			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
6649 
6650 	dcmd = &cmd->frame->dcmd;
6651 
6652 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6653 
6654 	dcmd->cmd = MFI_CMD_DCMD;
6655 	dcmd->cmd_status = 0x0;
6656 	dcmd->sge_count = 0;
6657 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6658 	dcmd->timeout = 0;
6659 	dcmd->pad_0 = 0;
6660 	dcmd->data_xfer_len = 0;
6661 	dcmd->opcode = cpu_to_le32(opcode);
6662 
6663 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6664 			!= DCMD_SUCCESS) {
6665 		dev_err(&instance->pdev->dev,
6666 			"return from %s %d\n", __func__, __LINE__);
6667 		return;
6668 	}
6669 
6670 	megasas_return_cmd(instance, cmd);
6671 }
6672 
6673 #ifdef CONFIG_PM
6674 /**
6675  * megasas_suspend -	driver suspend entry point
6676  * @pdev:		PCI device structure
6677  * @state:		PCI power state to suspend routine
6678  */
6679 static int
6680 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6681 {
6682 	struct Scsi_Host *host;
6683 	struct megasas_instance *instance;
6684 
6685 	instance = pci_get_drvdata(pdev);
6686 	host = instance->host;
6687 	instance->unload = 1;
6688 
6689 	/* Shutdown SR-IOV heartbeat timer */
6690 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6691 		del_timer_sync(&instance->sriov_heartbeat_timer);
6692 
6693 	megasas_flush_cache(instance);
6694 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
6695 
6696 	/* cancel the delayed work if this work still in queue */
6697 	if (instance->ev != NULL) {
6698 		struct megasas_aen_event *ev = instance->ev;
6699 		cancel_delayed_work_sync(&ev->hotplug_work);
6700 		instance->ev = NULL;
6701 	}
6702 
6703 	tasklet_kill(&instance->isr_tasklet);
6704 
6705 	pci_set_drvdata(instance->pdev, instance);
6706 	instance->instancet->disable_intr(instance);
6707 
6708 	megasas_destroy_irqs(instance);
6709 
6710 	if (instance->msix_vectors)
6711 		pci_free_irq_vectors(instance->pdev);
6712 
6713 	pci_save_state(pdev);
6714 	pci_disable_device(pdev);
6715 
6716 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
6717 
6718 	return 0;
6719 }
6720 
6721 /**
6722  * megasas_resume-      driver resume entry point
6723  * @pdev:               PCI device structure
6724  */
6725 static int
6726 megasas_resume(struct pci_dev *pdev)
6727 {
6728 	int rval;
6729 	struct Scsi_Host *host;
6730 	struct megasas_instance *instance;
6731 	int irq_flags = PCI_IRQ_LEGACY;
6732 
6733 	instance = pci_get_drvdata(pdev);
6734 	host = instance->host;
6735 	pci_set_power_state(pdev, PCI_D0);
6736 	pci_enable_wake(pdev, PCI_D0, 0);
6737 	pci_restore_state(pdev);
6738 
6739 	/*
6740 	 * PCI prepping: enable device set bus mastering and dma mask
6741 	 */
6742 	rval = pci_enable_device_mem(pdev);
6743 
6744 	if (rval) {
6745 		dev_err(&pdev->dev, "Enable device failed\n");
6746 		return rval;
6747 	}
6748 
6749 	pci_set_master(pdev);
6750 
6751 	/*
6752 	 * We expect the FW state to be READY
6753 	 */
6754 	if (megasas_transition_to_ready(instance, 0))
6755 		goto fail_ready_state;
6756 
6757 	if (megasas_set_dma_mask(instance))
6758 		goto fail_set_dma_mask;
6759 
6760 	/*
6761 	 * Initialize MFI Firmware
6762 	 */
6763 
6764 	atomic_set(&instance->fw_outstanding, 0);
6765 	atomic_set(&instance->ldio_outstanding, 0);
6766 
6767 	/* Now re-enable MSI-X */
6768 	if (instance->msix_vectors) {
6769 		irq_flags = PCI_IRQ_MSIX;
6770 		if (smp_affinity_enable)
6771 			irq_flags |= PCI_IRQ_AFFINITY;
6772 	}
6773 	rval = pci_alloc_irq_vectors(instance->pdev, 1,
6774 				     instance->msix_vectors ?
6775 				     instance->msix_vectors : 1, irq_flags);
6776 	if (rval < 0)
6777 		goto fail_reenable_msix;
6778 
6779 	megasas_setup_reply_map(instance);
6780 
6781 	if (instance->adapter_type != MFI_SERIES) {
6782 		megasas_reset_reply_desc(instance);
6783 		if (megasas_ioc_init_fusion(instance)) {
6784 			megasas_free_cmds(instance);
6785 			megasas_free_cmds_fusion(instance);
6786 			goto fail_init_mfi;
6787 		}
6788 		if (!megasas_get_map_info(instance))
6789 			megasas_sync_map_info(instance);
6790 	} else {
6791 		*instance->producer = 0;
6792 		*instance->consumer = 0;
6793 		if (megasas_issue_init_mfi(instance))
6794 			goto fail_init_mfi;
6795 	}
6796 
6797 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6798 		     (unsigned long)instance);
6799 
6800 	if (instance->msix_vectors ?
6801 			megasas_setup_irqs_msix(instance, 0) :
6802 			megasas_setup_irqs_ioapic(instance))
6803 		goto fail_init_mfi;
6804 
6805 	/* Re-launch SR-IOV heartbeat timer */
6806 	if (instance->requestorId) {
6807 		if (!megasas_sriov_start_heartbeat(instance, 0))
6808 			megasas_start_timer(instance);
6809 		else {
6810 			instance->skip_heartbeat_timer_del = 1;
6811 			goto fail_init_mfi;
6812 		}
6813 	}
6814 
6815 	instance->instancet->enable_intr(instance);
6816 	megasas_setup_jbod_map(instance);
6817 	instance->unload = 0;
6818 
6819 	/*
6820 	 * Initiate AEN (Asynchronous Event Notification)
6821 	 */
6822 	if (megasas_start_aen(instance))
6823 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
6824 
6825 	return 0;
6826 
6827 fail_init_mfi:
6828 	megasas_free_ctrl_dma_buffers(instance);
6829 	megasas_free_ctrl_mem(instance);
6830 	scsi_host_put(host);
6831 
6832 fail_reenable_msix:
6833 fail_set_dma_mask:
6834 fail_ready_state:
6835 
6836 	pci_disable_device(pdev);
6837 
6838 	return -ENODEV;
6839 }
6840 #else
6841 #define megasas_suspend	NULL
6842 #define megasas_resume	NULL
6843 #endif
6844 
6845 static inline int
6846 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
6847 {
6848 	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
6849 	int i;
6850 
6851 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6852 		return 1;
6853 
6854 	for (i = 0; i < wait_time; i++) {
6855 		if (atomic_read(&instance->adprecovery)	== MEGASAS_HBA_OPERATIONAL)
6856 			break;
6857 
6858 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
6859 			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
6860 
6861 		msleep(1000);
6862 	}
6863 
6864 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6865 		dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
6866 			__func__);
6867 		return 1;
6868 	}
6869 
6870 	return 0;
6871 }
6872 
6873 /**
6874  * megasas_detach_one -	PCI hot"un"plug entry point
6875  * @pdev:		PCI device structure
6876  */
6877 static void megasas_detach_one(struct pci_dev *pdev)
6878 {
6879 	int i;
6880 	struct Scsi_Host *host;
6881 	struct megasas_instance *instance;
6882 	struct fusion_context *fusion;
6883 	u32 pd_seq_map_sz;
6884 
6885 	instance = pci_get_drvdata(pdev);
6886 	host = instance->host;
6887 	fusion = instance->ctrl_context;
6888 
6889 	/* Shutdown SR-IOV heartbeat timer */
6890 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6891 		del_timer_sync(&instance->sriov_heartbeat_timer);
6892 
6893 	if (instance->fw_crash_state != UNAVAILABLE)
6894 		megasas_free_host_crash_buffer(instance);
6895 	scsi_remove_host(instance->host);
6896 	instance->unload = 1;
6897 
6898 	if (megasas_wait_for_adapter_operational(instance))
6899 		goto skip_firing_dcmds;
6900 
6901 	megasas_flush_cache(instance);
6902 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6903 
6904 skip_firing_dcmds:
6905 	/* cancel the delayed work if this work still in queue*/
6906 	if (instance->ev != NULL) {
6907 		struct megasas_aen_event *ev = instance->ev;
6908 		cancel_delayed_work_sync(&ev->hotplug_work);
6909 		instance->ev = NULL;
6910 	}
6911 
6912 	/* cancel all wait events */
6913 	wake_up_all(&instance->int_cmd_wait_q);
6914 
6915 	tasklet_kill(&instance->isr_tasklet);
6916 
6917 	/*
6918 	 * Take the instance off the instance array. Note that we will not
6919 	 * decrement the max_index. We let this array be sparse array
6920 	 */
6921 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6922 		if (megasas_mgmt_info.instance[i] == instance) {
6923 			megasas_mgmt_info.count--;
6924 			megasas_mgmt_info.instance[i] = NULL;
6925 
6926 			break;
6927 		}
6928 	}
6929 
6930 	instance->instancet->disable_intr(instance);
6931 
6932 	megasas_destroy_irqs(instance);
6933 
6934 	if (instance->msix_vectors)
6935 		pci_free_irq_vectors(instance->pdev);
6936 
6937 	if (instance->adapter_type == VENTURA_SERIES) {
6938 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
6939 			kfree(fusion->stream_detect_by_ld[i]);
6940 		kfree(fusion->stream_detect_by_ld);
6941 		fusion->stream_detect_by_ld = NULL;
6942 	}
6943 
6944 
6945 	if (instance->adapter_type != MFI_SERIES) {
6946 		megasas_release_fusion(instance);
6947 			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
6948 				(sizeof(struct MR_PD_CFG_SEQ) *
6949 					(MAX_PHYSICAL_DEVICES - 1));
6950 		for (i = 0; i < 2 ; i++) {
6951 			if (fusion->ld_map[i])
6952 				dma_free_coherent(&instance->pdev->dev,
6953 						  fusion->max_map_sz,
6954 						  fusion->ld_map[i],
6955 						  fusion->ld_map_phys[i]);
6956 			if (fusion->ld_drv_map[i]) {
6957 				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
6958 					vfree(fusion->ld_drv_map[i]);
6959 				else
6960 					free_pages((ulong)fusion->ld_drv_map[i],
6961 						   fusion->drv_map_pages);
6962 			}
6963 
6964 			if (fusion->pd_seq_sync[i])
6965 				dma_free_coherent(&instance->pdev->dev,
6966 					pd_seq_map_sz,
6967 					fusion->pd_seq_sync[i],
6968 					fusion->pd_seq_phys[i]);
6969 		}
6970 	} else {
6971 		megasas_release_mfi(instance);
6972 	}
6973 
6974 	if (instance->vf_affiliation)
6975 		pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6976 				    sizeof(struct MR_LD_VF_AFFILIATION),
6977 				    instance->vf_affiliation,
6978 				    instance->vf_affiliation_h);
6979 
6980 	if (instance->vf_affiliation_111)
6981 		pci_free_consistent(pdev,
6982 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
6983 				    instance->vf_affiliation_111,
6984 				    instance->vf_affiliation_111_h);
6985 
6986 	if (instance->hb_host_mem)
6987 		pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
6988 				    instance->hb_host_mem,
6989 				    instance->hb_host_mem_h);
6990 
6991 	megasas_free_ctrl_dma_buffers(instance);
6992 
6993 	megasas_free_ctrl_mem(instance);
6994 
6995 	scsi_host_put(host);
6996 
6997 	pci_disable_device(pdev);
6998 }
6999 
7000 /**
7001  * megasas_shutdown -	Shutdown entry point
7002  * @device:		Generic device structure
7003  */
7004 static void megasas_shutdown(struct pci_dev *pdev)
7005 {
7006 	struct megasas_instance *instance = pci_get_drvdata(pdev);
7007 
7008 	instance->unload = 1;
7009 
7010 	if (megasas_wait_for_adapter_operational(instance))
7011 		goto skip_firing_dcmds;
7012 
7013 	megasas_flush_cache(instance);
7014 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7015 
7016 skip_firing_dcmds:
7017 	instance->instancet->disable_intr(instance);
7018 	megasas_destroy_irqs(instance);
7019 
7020 	if (instance->msix_vectors)
7021 		pci_free_irq_vectors(instance->pdev);
7022 }
7023 
7024 /**
7025  * megasas_mgmt_open -	char node "open" entry point
7026  */
7027 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
7028 {
7029 	/*
7030 	 * Allow only those users with admin rights
7031 	 */
7032 	if (!capable(CAP_SYS_ADMIN))
7033 		return -EACCES;
7034 
7035 	return 0;
7036 }
7037 
7038 /**
7039  * megasas_mgmt_fasync -	Async notifier registration from applications
7040  *
7041  * This function adds the calling process to a driver global queue. When an
7042  * event occurs, SIGIO will be sent to all processes in this queue.
7043  */
7044 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
7045 {
7046 	int rc;
7047 
7048 	mutex_lock(&megasas_async_queue_mutex);
7049 
7050 	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
7051 
7052 	mutex_unlock(&megasas_async_queue_mutex);
7053 
7054 	if (rc >= 0) {
7055 		/* For sanity check when we get ioctl */
7056 		filep->private_data = filep;
7057 		return 0;
7058 	}
7059 
7060 	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
7061 
7062 	return rc;
7063 }
7064 
7065 /**
7066  * megasas_mgmt_poll -  char node "poll" entry point
7067  * */
7068 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
7069 {
7070 	__poll_t mask;
7071 	unsigned long flags;
7072 
7073 	poll_wait(file, &megasas_poll_wait, wait);
7074 	spin_lock_irqsave(&poll_aen_lock, flags);
7075 	if (megasas_poll_wait_aen)
7076 		mask = (EPOLLIN | EPOLLRDNORM);
7077 	else
7078 		mask = 0;
7079 	megasas_poll_wait_aen = 0;
7080 	spin_unlock_irqrestore(&poll_aen_lock, flags);
7081 	return mask;
7082 }
7083 
7084 /*
7085  * megasas_set_crash_dump_params_ioctl:
7086  *		Send CRASH_DUMP_MODE DCMD to all controllers
7087  * @cmd:	MFI command frame
7088  */
7089 
7090 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
7091 {
7092 	struct megasas_instance *local_instance;
7093 	int i, error = 0;
7094 	int crash_support;
7095 
7096 	crash_support = cmd->frame->dcmd.mbox.w[0];
7097 
7098 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7099 		local_instance = megasas_mgmt_info.instance[i];
7100 		if (local_instance && local_instance->crash_dump_drv_support) {
7101 			if ((atomic_read(&local_instance->adprecovery) ==
7102 				MEGASAS_HBA_OPERATIONAL) &&
7103 				!megasas_set_crash_dump_params(local_instance,
7104 					crash_support)) {
7105 				local_instance->crash_dump_app_support =
7106 					crash_support;
7107 				dev_info(&local_instance->pdev->dev,
7108 					"Application firmware crash "
7109 					"dump mode set success\n");
7110 				error = 0;
7111 			} else {
7112 				dev_info(&local_instance->pdev->dev,
7113 					"Application firmware crash "
7114 					"dump mode set failed\n");
7115 				error = -1;
7116 			}
7117 		}
7118 	}
7119 	return error;
7120 }
7121 
7122 /**
7123  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
7124  * @instance:			Adapter soft state
7125  * @argp:			User's ioctl packet
7126  */
7127 static int
7128 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
7129 		      struct megasas_iocpacket __user * user_ioc,
7130 		      struct megasas_iocpacket *ioc)
7131 {
7132 	struct megasas_sge64 *kern_sge64 = NULL;
7133 	struct megasas_sge32 *kern_sge32 = NULL;
7134 	struct megasas_cmd *cmd;
7135 	void *kbuff_arr[MAX_IOCTL_SGE];
7136 	dma_addr_t buf_handle = 0;
7137 	int error = 0, i;
7138 	void *sense = NULL;
7139 	dma_addr_t sense_handle;
7140 	unsigned long *sense_ptr;
7141 	u32 opcode = 0;
7142 
7143 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
7144 
7145 	if (ioc->sge_count > MAX_IOCTL_SGE) {
7146 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
7147 		       ioc->sge_count, MAX_IOCTL_SGE);
7148 		return -EINVAL;
7149 	}
7150 
7151 	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
7152 	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
7153 	    !instance->support_nvme_passthru)) {
7154 		dev_err(&instance->pdev->dev,
7155 			"Received invalid ioctl command 0x%x\n",
7156 			ioc->frame.hdr.cmd);
7157 		return -ENOTSUPP;
7158 	}
7159 
7160 	cmd = megasas_get_cmd(instance);
7161 	if (!cmd) {
7162 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
7163 		return -ENOMEM;
7164 	}
7165 
7166 	/*
7167 	 * User's IOCTL packet has 2 frames (maximum). Copy those two
7168 	 * frames into our cmd's frames. cmd->frame's context will get
7169 	 * overwritten when we copy from user's frames. So set that value
7170 	 * alone separately
7171 	 */
7172 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
7173 	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
7174 	cmd->frame->hdr.pad_0 = 0;
7175 
7176 	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
7177 
7178 	if (instance->consistent_mask_64bit)
7179 		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
7180 				       MFI_FRAME_SENSE64));
7181 	else
7182 		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
7183 					       MFI_FRAME_SENSE64));
7184 
7185 	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
7186 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
7187 
7188 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
7189 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
7190 			megasas_return_cmd(instance, cmd);
7191 			return -1;
7192 		}
7193 	}
7194 
7195 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
7196 		error = megasas_set_crash_dump_params_ioctl(cmd);
7197 		megasas_return_cmd(instance, cmd);
7198 		return error;
7199 	}
7200 
7201 	/*
7202 	 * The management interface between applications and the fw uses
7203 	 * MFI frames. E.g, RAID configuration changes, LD property changes
7204 	 * etc are accomplishes through different kinds of MFI frames. The
7205 	 * driver needs to care only about substituting user buffers with
7206 	 * kernel buffers in SGLs. The location of SGL is embedded in the
7207 	 * struct iocpacket itself.
7208 	 */
7209 	if (instance->consistent_mask_64bit)
7210 		kern_sge64 = (struct megasas_sge64 *)
7211 			((unsigned long)cmd->frame + ioc->sgl_off);
7212 	else
7213 		kern_sge32 = (struct megasas_sge32 *)
7214 			((unsigned long)cmd->frame + ioc->sgl_off);
7215 
7216 	/*
7217 	 * For each user buffer, create a mirror buffer and copy in
7218 	 */
7219 	for (i = 0; i < ioc->sge_count; i++) {
7220 		if (!ioc->sgl[i].iov_len)
7221 			continue;
7222 
7223 		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
7224 						    ioc->sgl[i].iov_len,
7225 						    &buf_handle, GFP_KERNEL);
7226 		if (!kbuff_arr[i]) {
7227 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
7228 			       "kernel SGL buffer for IOCTL\n");
7229 			error = -ENOMEM;
7230 			goto out;
7231 		}
7232 
7233 		/*
7234 		 * We don't change the dma_coherent_mask, so
7235 		 * pci_alloc_consistent only returns 32bit addresses
7236 		 */
7237 		if (instance->consistent_mask_64bit) {
7238 			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
7239 			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7240 		} else {
7241 			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
7242 			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7243 		}
7244 
7245 		/*
7246 		 * We created a kernel buffer corresponding to the
7247 		 * user buffer. Now copy in from the user buffer
7248 		 */
7249 		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
7250 				   (u32) (ioc->sgl[i].iov_len))) {
7251 			error = -EFAULT;
7252 			goto out;
7253 		}
7254 	}
7255 
7256 	if (ioc->sense_len) {
7257 		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
7258 					     &sense_handle, GFP_KERNEL);
7259 		if (!sense) {
7260 			error = -ENOMEM;
7261 			goto out;
7262 		}
7263 
7264 		sense_ptr =
7265 		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
7266 		if (instance->consistent_mask_64bit)
7267 			*sense_ptr = cpu_to_le64(sense_handle);
7268 		else
7269 			*sense_ptr = cpu_to_le32(sense_handle);
7270 	}
7271 
7272 	/*
7273 	 * Set the sync_cmd flag so that the ISR knows not to complete this
7274 	 * cmd to the SCSI mid-layer
7275 	 */
7276 	cmd->sync_cmd = 1;
7277 	if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
7278 		cmd->sync_cmd = 0;
7279 		dev_err(&instance->pdev->dev,
7280 			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
7281 			__func__, __LINE__, cmd->frame->hdr.cmd, opcode,
7282 			cmd->cmd_status_drv);
7283 		return -EBUSY;
7284 	}
7285 
7286 	cmd->sync_cmd = 0;
7287 
7288 	if (instance->unload == 1) {
7289 		dev_info(&instance->pdev->dev, "Driver unload is in progress "
7290 			"don't submit data to application\n");
7291 		goto out;
7292 	}
7293 	/*
7294 	 * copy out the kernel buffers to user buffers
7295 	 */
7296 	for (i = 0; i < ioc->sge_count; i++) {
7297 		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
7298 				 ioc->sgl[i].iov_len)) {
7299 			error = -EFAULT;
7300 			goto out;
7301 		}
7302 	}
7303 
7304 	/*
7305 	 * copy out the sense
7306 	 */
7307 	if (ioc->sense_len) {
7308 		/*
7309 		 * sense_ptr points to the location that has the user
7310 		 * sense buffer address
7311 		 */
7312 		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
7313 				ioc->sense_off);
7314 
7315 		if (copy_to_user((void __user *)((unsigned long)
7316 				 get_unaligned((unsigned long *)sense_ptr)),
7317 				 sense, ioc->sense_len)) {
7318 			dev_err(&instance->pdev->dev, "Failed to copy out to user "
7319 					"sense data\n");
7320 			error = -EFAULT;
7321 			goto out;
7322 		}
7323 	}
7324 
7325 	/*
7326 	 * copy the status codes returned by the fw
7327 	 */
7328 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
7329 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
7330 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
7331 		error = -EFAULT;
7332 	}
7333 
7334 out:
7335 	if (sense) {
7336 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
7337 				    sense, sense_handle);
7338 	}
7339 
7340 	for (i = 0; i < ioc->sge_count; i++) {
7341 		if (kbuff_arr[i]) {
7342 			if (instance->consistent_mask_64bit)
7343 				dma_free_coherent(&instance->pdev->dev,
7344 					le32_to_cpu(kern_sge64[i].length),
7345 					kbuff_arr[i],
7346 					le64_to_cpu(kern_sge64[i].phys_addr));
7347 			else
7348 				dma_free_coherent(&instance->pdev->dev,
7349 					le32_to_cpu(kern_sge32[i].length),
7350 					kbuff_arr[i],
7351 					le32_to_cpu(kern_sge32[i].phys_addr));
7352 			kbuff_arr[i] = NULL;
7353 		}
7354 	}
7355 
7356 	megasas_return_cmd(instance, cmd);
7357 	return error;
7358 }
7359 
7360 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
7361 {
7362 	struct megasas_iocpacket __user *user_ioc =
7363 	    (struct megasas_iocpacket __user *)arg;
7364 	struct megasas_iocpacket *ioc;
7365 	struct megasas_instance *instance;
7366 	int error;
7367 
7368 	ioc = memdup_user(user_ioc, sizeof(*ioc));
7369 	if (IS_ERR(ioc))
7370 		return PTR_ERR(ioc);
7371 
7372 	instance = megasas_lookup_instance(ioc->host_no);
7373 	if (!instance) {
7374 		error = -ENODEV;
7375 		goto out_kfree_ioc;
7376 	}
7377 
7378 	/* Block ioctls in VF mode */
7379 	if (instance->requestorId && !allow_vf_ioctls) {
7380 		error = -ENODEV;
7381 		goto out_kfree_ioc;
7382 	}
7383 
7384 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7385 		dev_err(&instance->pdev->dev, "Controller in crit error\n");
7386 		error = -ENODEV;
7387 		goto out_kfree_ioc;
7388 	}
7389 
7390 	if (instance->unload == 1) {
7391 		error = -ENODEV;
7392 		goto out_kfree_ioc;
7393 	}
7394 
7395 	if (down_interruptible(&instance->ioctl_sem)) {
7396 		error = -ERESTARTSYS;
7397 		goto out_kfree_ioc;
7398 	}
7399 
7400 	if  (megasas_wait_for_adapter_operational(instance)) {
7401 		error = -ENODEV;
7402 		goto out_up;
7403 	}
7404 
7405 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
7406 out_up:
7407 	up(&instance->ioctl_sem);
7408 
7409 out_kfree_ioc:
7410 	kfree(ioc);
7411 	return error;
7412 }
7413 
7414 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
7415 {
7416 	struct megasas_instance *instance;
7417 	struct megasas_aen aen;
7418 	int error;
7419 
7420 	if (file->private_data != file) {
7421 		printk(KERN_DEBUG "megasas: fasync_helper was not "
7422 		       "called first\n");
7423 		return -EINVAL;
7424 	}
7425 
7426 	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
7427 		return -EFAULT;
7428 
7429 	instance = megasas_lookup_instance(aen.host_no);
7430 
7431 	if (!instance)
7432 		return -ENODEV;
7433 
7434 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7435 		return -ENODEV;
7436 	}
7437 
7438 	if (instance->unload == 1) {
7439 		return -ENODEV;
7440 	}
7441 
7442 	if  (megasas_wait_for_adapter_operational(instance))
7443 		return -ENODEV;
7444 
7445 	mutex_lock(&instance->reset_mutex);
7446 	error = megasas_register_aen(instance, aen.seq_num,
7447 				     aen.class_locale_word);
7448 	mutex_unlock(&instance->reset_mutex);
7449 	return error;
7450 }
7451 
7452 /**
7453  * megasas_mgmt_ioctl -	char node ioctl entry point
7454  */
7455 static long
7456 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7457 {
7458 	switch (cmd) {
7459 	case MEGASAS_IOC_FIRMWARE:
7460 		return megasas_mgmt_ioctl_fw(file, arg);
7461 
7462 	case MEGASAS_IOC_GET_AEN:
7463 		return megasas_mgmt_ioctl_aen(file, arg);
7464 	}
7465 
7466 	return -ENOTTY;
7467 }
7468 
7469 #ifdef CONFIG_COMPAT
7470 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
7471 {
7472 	struct compat_megasas_iocpacket __user *cioc =
7473 	    (struct compat_megasas_iocpacket __user *)arg;
7474 	struct megasas_iocpacket __user *ioc =
7475 	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
7476 	int i;
7477 	int error = 0;
7478 	compat_uptr_t ptr;
7479 	u32 local_sense_off;
7480 	u32 local_sense_len;
7481 	u32 user_sense_off;
7482 
7483 	if (clear_user(ioc, sizeof(*ioc)))
7484 		return -EFAULT;
7485 
7486 	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
7487 	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
7488 	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
7489 	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
7490 	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
7491 	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
7492 		return -EFAULT;
7493 
7494 	/*
7495 	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
7496 	 * sense_len is not null, so prepare the 64bit value under
7497 	 * the same condition.
7498 	 */
7499 	if (get_user(local_sense_off, &ioc->sense_off) ||
7500 		get_user(local_sense_len, &ioc->sense_len) ||
7501 		get_user(user_sense_off, &cioc->sense_off))
7502 		return -EFAULT;
7503 
7504 	if (local_sense_len) {
7505 		void __user **sense_ioc_ptr =
7506 			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
7507 		compat_uptr_t *sense_cioc_ptr =
7508 			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
7509 		if (get_user(ptr, sense_cioc_ptr) ||
7510 		    put_user(compat_ptr(ptr), sense_ioc_ptr))
7511 			return -EFAULT;
7512 	}
7513 
7514 	for (i = 0; i < MAX_IOCTL_SGE; i++) {
7515 		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
7516 		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
7517 		    copy_in_user(&ioc->sgl[i].iov_len,
7518 				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
7519 			return -EFAULT;
7520 	}
7521 
7522 	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
7523 
7524 	if (copy_in_user(&cioc->frame.hdr.cmd_status,
7525 			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
7526 		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
7527 		return -EFAULT;
7528 	}
7529 	return error;
7530 }
7531 
7532 static long
7533 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
7534 			  unsigned long arg)
7535 {
7536 	switch (cmd) {
7537 	case MEGASAS_IOC_FIRMWARE32:
7538 		return megasas_mgmt_compat_ioctl_fw(file, arg);
7539 	case MEGASAS_IOC_GET_AEN:
7540 		return megasas_mgmt_ioctl_aen(file, arg);
7541 	}
7542 
7543 	return -ENOTTY;
7544 }
7545 #endif
7546 
7547 /*
7548  * File operations structure for management interface
7549  */
7550 static const struct file_operations megasas_mgmt_fops = {
7551 	.owner = THIS_MODULE,
7552 	.open = megasas_mgmt_open,
7553 	.fasync = megasas_mgmt_fasync,
7554 	.unlocked_ioctl = megasas_mgmt_ioctl,
7555 	.poll = megasas_mgmt_poll,
7556 #ifdef CONFIG_COMPAT
7557 	.compat_ioctl = megasas_mgmt_compat_ioctl,
7558 #endif
7559 	.llseek = noop_llseek,
7560 };
7561 
7562 /*
7563  * PCI hotplug support registration structure
7564  */
7565 static struct pci_driver megasas_pci_driver = {
7566 
7567 	.name = "megaraid_sas",
7568 	.id_table = megasas_pci_table,
7569 	.probe = megasas_probe_one,
7570 	.remove = megasas_detach_one,
7571 	.suspend = megasas_suspend,
7572 	.resume = megasas_resume,
7573 	.shutdown = megasas_shutdown,
7574 };
7575 
7576 /*
7577  * Sysfs driver attributes
7578  */
7579 static ssize_t version_show(struct device_driver *dd, char *buf)
7580 {
7581 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
7582 			MEGASAS_VERSION);
7583 }
7584 static DRIVER_ATTR_RO(version);
7585 
7586 static ssize_t release_date_show(struct device_driver *dd, char *buf)
7587 {
7588 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
7589 		MEGASAS_RELDATE);
7590 }
7591 static DRIVER_ATTR_RO(release_date);
7592 
7593 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
7594 {
7595 	return sprintf(buf, "%u\n", support_poll_for_event);
7596 }
7597 static DRIVER_ATTR_RO(support_poll_for_event);
7598 
7599 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
7600 {
7601 	return sprintf(buf, "%u\n", support_device_change);
7602 }
7603 static DRIVER_ATTR_RO(support_device_change);
7604 
7605 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
7606 {
7607 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
7608 }
7609 
7610 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
7611 			     size_t count)
7612 {
7613 	int retval = count;
7614 
7615 	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
7616 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
7617 		retval = -EINVAL;
7618 	}
7619 	return retval;
7620 }
7621 static DRIVER_ATTR_RW(dbg_lvl);
7622 
7623 static ssize_t
7624 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
7625 {
7626 	return sprintf(buf, "%u\n", support_nvme_encapsulation);
7627 }
7628 
7629 static DRIVER_ATTR_RO(support_nvme_encapsulation);
7630 
7631 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
7632 {
7633 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
7634 	scsi_remove_device(sdev);
7635 	scsi_device_put(sdev);
7636 }
7637 
7638 static void
7639 megasas_aen_polling(struct work_struct *work)
7640 {
7641 	struct megasas_aen_event *ev =
7642 		container_of(work, struct megasas_aen_event, hotplug_work.work);
7643 	struct megasas_instance *instance = ev->instance;
7644 	union megasas_evt_class_locale class_locale;
7645 	struct  Scsi_Host *host;
7646 	struct  scsi_device *sdev1;
7647 	u16     pd_index = 0;
7648 	u16	ld_index = 0;
7649 	int     i, j, doscan = 0;
7650 	u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
7651 	int error;
7652 	u8  dcmd_ret = DCMD_SUCCESS;
7653 
7654 	if (!instance) {
7655 		printk(KERN_ERR "invalid instance!\n");
7656 		kfree(ev);
7657 		return;
7658 	}
7659 
7660 	/* Adjust event workqueue thread wait time for VF mode */
7661 	if (instance->requestorId)
7662 		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7663 
7664 	/* Don't run the event workqueue thread if OCR is running */
7665 	mutex_lock(&instance->reset_mutex);
7666 
7667 	instance->ev = NULL;
7668 	host = instance->host;
7669 	if (instance->evt_detail) {
7670 		megasas_decode_evt(instance);
7671 
7672 		switch (le32_to_cpu(instance->evt_detail->code)) {
7673 
7674 		case MR_EVT_PD_INSERTED:
7675 		case MR_EVT_PD_REMOVED:
7676 			dcmd_ret = megasas_get_pd_list(instance);
7677 			if (dcmd_ret == DCMD_SUCCESS)
7678 				doscan = SCAN_PD_CHANNEL;
7679 			break;
7680 
7681 		case MR_EVT_LD_OFFLINE:
7682 		case MR_EVT_CFG_CLEARED:
7683 		case MR_EVT_LD_DELETED:
7684 		case MR_EVT_LD_CREATED:
7685 			if (!instance->requestorId ||
7686 				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7687 				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7688 
7689 			if (dcmd_ret == DCMD_SUCCESS)
7690 				doscan = SCAN_VD_CHANNEL;
7691 
7692 			break;
7693 
7694 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
7695 		case MR_EVT_FOREIGN_CFG_IMPORTED:
7696 		case MR_EVT_LD_STATE_CHANGE:
7697 			dcmd_ret = megasas_get_pd_list(instance);
7698 
7699 			if (dcmd_ret != DCMD_SUCCESS)
7700 				break;
7701 
7702 			if (!instance->requestorId ||
7703 				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7704 				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7705 
7706 			if (dcmd_ret != DCMD_SUCCESS)
7707 				break;
7708 
7709 			doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
7710 			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7711 				instance->host->host_no);
7712 			break;
7713 
7714 		case MR_EVT_CTRL_PROP_CHANGED:
7715 				dcmd_ret = megasas_get_ctrl_info(instance);
7716 				break;
7717 		default:
7718 			doscan = 0;
7719 			break;
7720 		}
7721 	} else {
7722 		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
7723 		mutex_unlock(&instance->reset_mutex);
7724 		kfree(ev);
7725 		return;
7726 	}
7727 
7728 	mutex_unlock(&instance->reset_mutex);
7729 
7730 	if (doscan & SCAN_PD_CHANNEL) {
7731 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7732 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7733 				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7734 				sdev1 = scsi_device_lookup(host, i, j, 0);
7735 				if (instance->pd_list[pd_index].driveState ==
7736 							MR_PD_STATE_SYSTEM) {
7737 					if (!sdev1)
7738 						scsi_add_device(host, i, j, 0);
7739 					else
7740 						scsi_device_put(sdev1);
7741 				} else {
7742 					if (sdev1)
7743 						megasas_remove_scsi_device(sdev1);
7744 				}
7745 			}
7746 		}
7747 	}
7748 
7749 	if (doscan & SCAN_VD_CHANNEL) {
7750 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7751 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7752 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7753 				sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7754 				if (instance->ld_ids[ld_index] != 0xff) {
7755 					if (!sdev1)
7756 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7757 					else
7758 						scsi_device_put(sdev1);
7759 				} else {
7760 					if (sdev1)
7761 						megasas_remove_scsi_device(sdev1);
7762 				}
7763 			}
7764 		}
7765 	}
7766 
7767 	if (dcmd_ret == DCMD_SUCCESS)
7768 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7769 	else
7770 		seq_num = instance->last_seq_num;
7771 
7772 	/* Register AEN with FW for latest sequence number plus 1 */
7773 	class_locale.members.reserved = 0;
7774 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
7775 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
7776 
7777 	if (instance->aen_cmd != NULL) {
7778 		kfree(ev);
7779 		return;
7780 	}
7781 
7782 	mutex_lock(&instance->reset_mutex);
7783 	error = megasas_register_aen(instance, seq_num,
7784 					class_locale.word);
7785 	if (error)
7786 		dev_err(&instance->pdev->dev,
7787 			"register aen failed error %x\n", error);
7788 
7789 	mutex_unlock(&instance->reset_mutex);
7790 	kfree(ev);
7791 }
7792 
7793 /**
7794  * megasas_init - Driver load entry point
7795  */
7796 static int __init megasas_init(void)
7797 {
7798 	int rval;
7799 
7800 	/*
7801 	 * Booted in kdump kernel, minimize memory footprints by
7802 	 * disabling few features
7803 	 */
7804 	if (reset_devices) {
7805 		msix_vectors = 1;
7806 		rdpq_enable = 0;
7807 		dual_qdepth_disable = 1;
7808 	}
7809 
7810 	/*
7811 	 * Announce driver version and other information
7812 	 */
7813 	pr_info("megasas: %s\n", MEGASAS_VERSION);
7814 
7815 	spin_lock_init(&poll_aen_lock);
7816 
7817 	support_poll_for_event = 2;
7818 	support_device_change = 1;
7819 	support_nvme_encapsulation = true;
7820 
7821 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
7822 
7823 	/*
7824 	 * Register character device node
7825 	 */
7826 	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
7827 
7828 	if (rval < 0) {
7829 		printk(KERN_DEBUG "megasas: failed to open device node\n");
7830 		return rval;
7831 	}
7832 
7833 	megasas_mgmt_majorno = rval;
7834 
7835 	/*
7836 	 * Register ourselves as PCI hotplug module
7837 	 */
7838 	rval = pci_register_driver(&megasas_pci_driver);
7839 
7840 	if (rval) {
7841 		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
7842 		goto err_pcidrv;
7843 	}
7844 
7845 	rval = driver_create_file(&megasas_pci_driver.driver,
7846 				  &driver_attr_version);
7847 	if (rval)
7848 		goto err_dcf_attr_ver;
7849 
7850 	rval = driver_create_file(&megasas_pci_driver.driver,
7851 				  &driver_attr_release_date);
7852 	if (rval)
7853 		goto err_dcf_rel_date;
7854 
7855 	rval = driver_create_file(&megasas_pci_driver.driver,
7856 				&driver_attr_support_poll_for_event);
7857 	if (rval)
7858 		goto err_dcf_support_poll_for_event;
7859 
7860 	rval = driver_create_file(&megasas_pci_driver.driver,
7861 				  &driver_attr_dbg_lvl);
7862 	if (rval)
7863 		goto err_dcf_dbg_lvl;
7864 	rval = driver_create_file(&megasas_pci_driver.driver,
7865 				&driver_attr_support_device_change);
7866 	if (rval)
7867 		goto err_dcf_support_device_change;
7868 
7869 	rval = driver_create_file(&megasas_pci_driver.driver,
7870 				  &driver_attr_support_nvme_encapsulation);
7871 	if (rval)
7872 		goto err_dcf_support_nvme_encapsulation;
7873 
7874 	return rval;
7875 
7876 err_dcf_support_nvme_encapsulation:
7877 	driver_remove_file(&megasas_pci_driver.driver,
7878 			   &driver_attr_support_device_change);
7879 
7880 err_dcf_support_device_change:
7881 	driver_remove_file(&megasas_pci_driver.driver,
7882 			   &driver_attr_dbg_lvl);
7883 err_dcf_dbg_lvl:
7884 	driver_remove_file(&megasas_pci_driver.driver,
7885 			&driver_attr_support_poll_for_event);
7886 err_dcf_support_poll_for_event:
7887 	driver_remove_file(&megasas_pci_driver.driver,
7888 			   &driver_attr_release_date);
7889 err_dcf_rel_date:
7890 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7891 err_dcf_attr_ver:
7892 	pci_unregister_driver(&megasas_pci_driver);
7893 err_pcidrv:
7894 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7895 	return rval;
7896 }
7897 
7898 /**
7899  * megasas_exit - Driver unload entry point
7900  */
7901 static void __exit megasas_exit(void)
7902 {
7903 	driver_remove_file(&megasas_pci_driver.driver,
7904 			   &driver_attr_dbg_lvl);
7905 	driver_remove_file(&megasas_pci_driver.driver,
7906 			&driver_attr_support_poll_for_event);
7907 	driver_remove_file(&megasas_pci_driver.driver,
7908 			&driver_attr_support_device_change);
7909 	driver_remove_file(&megasas_pci_driver.driver,
7910 			   &driver_attr_release_date);
7911 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7912 	driver_remove_file(&megasas_pci_driver.driver,
7913 			   &driver_attr_support_nvme_encapsulation);
7914 
7915 	pci_unregister_driver(&megasas_pci_driver);
7916 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7917 }
7918 
7919 module_init(megasas_init);
7920 module_exit(megasas_exit);
7921