1 /*
2  *  Linux MegaRAID driver for SAS based RAID controllers
3  *
4  *  Copyright (c) 2003-2013  LSI Corporation
5  *  Copyright (c) 2013-2014  Avago Technologies
6  *
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation; either version 2
10  *  of the License, or (at your option) any later version.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License
18  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  *
20  *  Authors: Avago Technologies
21  *           Sreenivas Bagalkote
22  *           Sumant Patro
23  *           Bo Yang
24  *           Adam Radford
25  *           Kashyap Desai <kashyap.desai@avagotech.com>
26  *           Sumit Saxena <sumit.saxena@avagotech.com>
27  *
28  *  Send feedback to: megaraidlinux.pdl@avagotech.com
29  *
30  *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31  *  San Jose, California 95131
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/slab.h>
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
47 #include <linux/fs.h>
48 #include <linux/compat.h>
49 #include <linux/blkdev.h>
50 #include <linux/mutex.h>
51 #include <linux/poll.h>
52 #include <linux/vmalloc.h>
53 
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsi_tcq.h>
59 #include "megaraid_sas_fusion.h"
60 #include "megaraid_sas.h"
61 
62 /*
63  * Number of sectors per IO command
64  * Will be set in megasas_init_mfi if user does not provide
65  */
66 static unsigned int max_sectors;
67 module_param_named(max_sectors, max_sectors, int, 0);
68 MODULE_PARM_DESC(max_sectors,
69 	"Maximum number of sectors per IO command");
70 
71 static int msix_disable;
72 module_param(msix_disable, int, S_IRUGO);
73 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
74 
75 static unsigned int msix_vectors;
76 module_param(msix_vectors, int, S_IRUGO);
77 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
78 
79 static int allow_vf_ioctls;
80 module_param(allow_vf_ioctls, int, S_IRUGO);
81 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
82 
83 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
84 module_param(throttlequeuedepth, int, S_IRUGO);
85 MODULE_PARM_DESC(throttlequeuedepth,
86 	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
87 
88 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
89 module_param(resetwaittime, int, S_IRUGO);
90 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
91 		 "before resetting adapter. Default: 180");
92 
93 int smp_affinity_enable = 1;
94 module_param(smp_affinity_enable, int, S_IRUGO);
95 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
96 
97 int rdpq_enable = 1;
98 module_param(rdpq_enable, int, S_IRUGO);
99 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
100 
101 unsigned int dual_qdepth_disable;
102 module_param(dual_qdepth_disable, int, S_IRUGO);
103 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
104 
105 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
106 module_param(scmd_timeout, int, S_IRUGO);
107 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
108 
109 MODULE_LICENSE("GPL");
110 MODULE_VERSION(MEGASAS_VERSION);
111 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
112 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
113 
114 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
115 static int megasas_get_pd_list(struct megasas_instance *instance);
116 static int megasas_ld_list_query(struct megasas_instance *instance,
117 				 u8 query_type);
118 static int megasas_issue_init_mfi(struct megasas_instance *instance);
119 static int megasas_register_aen(struct megasas_instance *instance,
120 				u32 seq_num, u32 class_locale_word);
121 static void megasas_get_pd_info(struct megasas_instance *instance,
122 				struct scsi_device *sdev);
123 static int megasas_get_target_prop(struct megasas_instance *instance,
124 				   struct scsi_device *sdev);
125 /*
126  * PCI ID table for all supported controllers
127  */
128 static struct pci_device_id megasas_pci_table[] = {
129 
130 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
131 	/* xscale IOP */
132 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
133 	/* ppc IOP */
134 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
135 	/* ppc IOP */
136 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
137 	/* gen2*/
138 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
139 	/* gen2*/
140 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
141 	/* skinny*/
142 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
143 	/* skinny*/
144 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
145 	/* xscale IOP, vega */
146 	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
147 	/* xscale IOP */
148 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
149 	/* Fusion */
150 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
151 	/* Plasma */
152 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
153 	/* Invader */
154 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
155 	/* Fury */
156 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
157 	/* Intruder */
158 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
159 	/* Intruder 24 port*/
160 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
161 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
162 	/* VENTURA */
163 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
164 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
165 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
166 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
167 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
168 	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
169 	{}
170 };
171 
172 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
173 
174 static int megasas_mgmt_majorno;
175 struct megasas_mgmt_info megasas_mgmt_info;
176 static struct fasync_struct *megasas_async_queue;
177 static DEFINE_MUTEX(megasas_async_queue_mutex);
178 
179 static int megasas_poll_wait_aen;
180 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
181 static u32 support_poll_for_event;
182 u32 megasas_dbg_lvl;
183 static u32 support_device_change;
184 
185 /* define lock for aen poll */
186 spinlock_t poll_aen_lock;
187 
188 void
189 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
190 		     u8 alt_status);
191 static u32
192 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
193 static int
194 megasas_adp_reset_gen2(struct megasas_instance *instance,
195 		       struct megasas_register_set __iomem *reg_set);
196 static irqreturn_t megasas_isr(int irq, void *devp);
197 static u32
198 megasas_init_adapter_mfi(struct megasas_instance *instance);
199 u32
200 megasas_build_and_issue_cmd(struct megasas_instance *instance,
201 			    struct scsi_cmnd *scmd);
202 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
203 int
204 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
205 	int seconds);
206 void megasas_fusion_ocr_wq(struct work_struct *work);
207 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
208 					 int initial);
209 static int
210 megasas_set_dma_mask(struct megasas_instance *instance);
211 static int
212 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
213 static inline void
214 megasas_free_ctrl_mem(struct megasas_instance *instance);
215 static inline int
216 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
217 static inline void
218 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
219 static inline void
220 megasas_init_ctrl_params(struct megasas_instance *instance);
221 
222 /**
223  * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
224  * @instance:			Adapter soft state
225  * @dcmd:			DCMD frame inside MFI command
226  * @dma_addr:			DMA address of buffer to be passed to FW
227  * @dma_len:			Length of DMA buffer to be passed to FW
228  * @return:			void
229  */
230 void megasas_set_dma_settings(struct megasas_instance *instance,
231 			      struct megasas_dcmd_frame *dcmd,
232 			      dma_addr_t dma_addr, u32 dma_len)
233 {
234 	if (instance->consistent_mask_64bit) {
235 		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
236 		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
237 		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
238 
239 	} else {
240 		dcmd->sgl.sge32[0].phys_addr =
241 				cpu_to_le32(lower_32_bits(dma_addr));
242 		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
243 		dcmd->flags = cpu_to_le16(dcmd->flags);
244 	}
245 }
246 
247 void
248 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
249 {
250 	instance->instancet->fire_cmd(instance,
251 		cmd->frame_phys_addr, 0, instance->reg_set);
252 	return;
253 }
254 
255 /**
256  * megasas_get_cmd -	Get a command from the free pool
257  * @instance:		Adapter soft state
258  *
259  * Returns a free command from the pool
260  */
261 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
262 						  *instance)
263 {
264 	unsigned long flags;
265 	struct megasas_cmd *cmd = NULL;
266 
267 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
268 
269 	if (!list_empty(&instance->cmd_pool)) {
270 		cmd = list_entry((&instance->cmd_pool)->next,
271 				 struct megasas_cmd, list);
272 		list_del_init(&cmd->list);
273 	} else {
274 		dev_err(&instance->pdev->dev, "Command pool empty!\n");
275 	}
276 
277 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
278 	return cmd;
279 }
280 
281 /**
282  * megasas_return_cmd -	Return a cmd to free command pool
283  * @instance:		Adapter soft state
284  * @cmd:		Command packet to be returned to free command pool
285  */
286 void
287 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
288 {
289 	unsigned long flags;
290 	u32 blk_tags;
291 	struct megasas_cmd_fusion *cmd_fusion;
292 	struct fusion_context *fusion = instance->ctrl_context;
293 
294 	/* This flag is used only for fusion adapter.
295 	 * Wait for Interrupt for Polled mode DCMD
296 	 */
297 	if (cmd->flags & DRV_DCMD_POLLED_MODE)
298 		return;
299 
300 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
301 
302 	if (fusion) {
303 		blk_tags = instance->max_scsi_cmds + cmd->index;
304 		cmd_fusion = fusion->cmd_list[blk_tags];
305 		megasas_return_cmd_fusion(instance, cmd_fusion);
306 	}
307 	cmd->scmd = NULL;
308 	cmd->frame_count = 0;
309 	cmd->flags = 0;
310 	memset(cmd->frame, 0, instance->mfi_frame_size);
311 	cmd->frame->io.context = cpu_to_le32(cmd->index);
312 	if (!fusion && reset_devices)
313 		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
314 	list_add(&cmd->list, (&instance->cmd_pool)->next);
315 
316 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
317 
318 }
319 
320 static const char *
321 format_timestamp(uint32_t timestamp)
322 {
323 	static char buffer[32];
324 
325 	if ((timestamp & 0xff000000) == 0xff000000)
326 		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
327 		0x00ffffff);
328 	else
329 		snprintf(buffer, sizeof(buffer), "%us", timestamp);
330 	return buffer;
331 }
332 
333 static const char *
334 format_class(int8_t class)
335 {
336 	static char buffer[6];
337 
338 	switch (class) {
339 	case MFI_EVT_CLASS_DEBUG:
340 		return "debug";
341 	case MFI_EVT_CLASS_PROGRESS:
342 		return "progress";
343 	case MFI_EVT_CLASS_INFO:
344 		return "info";
345 	case MFI_EVT_CLASS_WARNING:
346 		return "WARN";
347 	case MFI_EVT_CLASS_CRITICAL:
348 		return "CRIT";
349 	case MFI_EVT_CLASS_FATAL:
350 		return "FATAL";
351 	case MFI_EVT_CLASS_DEAD:
352 		return "DEAD";
353 	default:
354 		snprintf(buffer, sizeof(buffer), "%d", class);
355 		return buffer;
356 	}
357 }
358 
359 /**
360   * megasas_decode_evt: Decode FW AEN event and print critical event
361   * for information.
362   * @instance:			Adapter soft state
363   */
364 static void
365 megasas_decode_evt(struct megasas_instance *instance)
366 {
367 	struct megasas_evt_detail *evt_detail = instance->evt_detail;
368 	union megasas_evt_class_locale class_locale;
369 	class_locale.word = le32_to_cpu(evt_detail->cl.word);
370 
371 	if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
372 		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
373 			le32_to_cpu(evt_detail->seq_num),
374 			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
375 			(class_locale.members.locale),
376 			format_class(class_locale.members.class),
377 			evt_detail->description);
378 }
379 
380 /**
381 *	The following functions are defined for xscale
382 *	(deviceid : 1064R, PERC5) controllers
383 */
384 
385 /**
386  * megasas_enable_intr_xscale -	Enables interrupts
387  * @regs:			MFI register set
388  */
389 static inline void
390 megasas_enable_intr_xscale(struct megasas_instance *instance)
391 {
392 	struct megasas_register_set __iomem *regs;
393 
394 	regs = instance->reg_set;
395 	writel(0, &(regs)->outbound_intr_mask);
396 
397 	/* Dummy readl to force pci flush */
398 	readl(&regs->outbound_intr_mask);
399 }
400 
401 /**
402  * megasas_disable_intr_xscale -Disables interrupt
403  * @regs:			MFI register set
404  */
405 static inline void
406 megasas_disable_intr_xscale(struct megasas_instance *instance)
407 {
408 	struct megasas_register_set __iomem *regs;
409 	u32 mask = 0x1f;
410 
411 	regs = instance->reg_set;
412 	writel(mask, &regs->outbound_intr_mask);
413 	/* Dummy readl to force pci flush */
414 	readl(&regs->outbound_intr_mask);
415 }
416 
417 /**
418  * megasas_read_fw_status_reg_xscale - returns the current FW status value
419  * @regs:			MFI register set
420  */
421 static u32
422 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
423 {
424 	return readl(&(regs)->outbound_msg_0);
425 }
426 /**
427  * megasas_clear_interrupt_xscale -	Check & clear interrupt
428  * @regs:				MFI register set
429  */
430 static int
431 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
432 {
433 	u32 status;
434 	u32 mfiStatus = 0;
435 
436 	/*
437 	 * Check if it is our interrupt
438 	 */
439 	status = readl(&regs->outbound_intr_status);
440 
441 	if (status & MFI_OB_INTR_STATUS_MASK)
442 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
443 	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
444 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
445 
446 	/*
447 	 * Clear the interrupt by writing back the same value
448 	 */
449 	if (mfiStatus)
450 		writel(status, &regs->outbound_intr_status);
451 
452 	/* Dummy readl to force pci flush */
453 	readl(&regs->outbound_intr_status);
454 
455 	return mfiStatus;
456 }
457 
458 /**
459  * megasas_fire_cmd_xscale -	Sends command to the FW
460  * @frame_phys_addr :		Physical address of cmd
461  * @frame_count :		Number of frames for the command
462  * @regs :			MFI register set
463  */
464 static inline void
465 megasas_fire_cmd_xscale(struct megasas_instance *instance,
466 		dma_addr_t frame_phys_addr,
467 		u32 frame_count,
468 		struct megasas_register_set __iomem *regs)
469 {
470 	unsigned long flags;
471 
472 	spin_lock_irqsave(&instance->hba_lock, flags);
473 	writel((frame_phys_addr >> 3)|(frame_count),
474 	       &(regs)->inbound_queue_port);
475 	spin_unlock_irqrestore(&instance->hba_lock, flags);
476 }
477 
478 /**
479  * megasas_adp_reset_xscale -  For controller reset
480  * @regs:                              MFI register set
481  */
482 static int
483 megasas_adp_reset_xscale(struct megasas_instance *instance,
484 	struct megasas_register_set __iomem *regs)
485 {
486 	u32 i;
487 	u32 pcidata;
488 
489 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
490 
491 	for (i = 0; i < 3; i++)
492 		msleep(1000); /* sleep for 3 secs */
493 	pcidata  = 0;
494 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
495 	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
496 	if (pcidata & 0x2) {
497 		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
498 		pcidata &= ~0x2;
499 		pci_write_config_dword(instance->pdev,
500 				MFI_1068_PCSR_OFFSET, pcidata);
501 
502 		for (i = 0; i < 2; i++)
503 			msleep(1000); /* need to wait 2 secs again */
504 
505 		pcidata  = 0;
506 		pci_read_config_dword(instance->pdev,
507 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
508 		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
509 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
510 			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
511 			pcidata = 0;
512 			pci_write_config_dword(instance->pdev,
513 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
514 		}
515 	}
516 	return 0;
517 }
518 
519 /**
520  * megasas_check_reset_xscale -	For controller reset check
521  * @regs:				MFI register set
522  */
523 static int
524 megasas_check_reset_xscale(struct megasas_instance *instance,
525 		struct megasas_register_set __iomem *regs)
526 {
527 	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
528 	    (le32_to_cpu(*instance->consumer) ==
529 		MEGASAS_ADPRESET_INPROG_SIGN))
530 		return 1;
531 	return 0;
532 }
533 
534 static struct megasas_instance_template megasas_instance_template_xscale = {
535 
536 	.fire_cmd = megasas_fire_cmd_xscale,
537 	.enable_intr = megasas_enable_intr_xscale,
538 	.disable_intr = megasas_disable_intr_xscale,
539 	.clear_intr = megasas_clear_intr_xscale,
540 	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
541 	.adp_reset = megasas_adp_reset_xscale,
542 	.check_reset = megasas_check_reset_xscale,
543 	.service_isr = megasas_isr,
544 	.tasklet = megasas_complete_cmd_dpc,
545 	.init_adapter = megasas_init_adapter_mfi,
546 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
547 	.issue_dcmd = megasas_issue_dcmd,
548 };
549 
550 /**
551 *	This is the end of set of functions & definitions specific
552 *	to xscale (deviceid : 1064R, PERC5) controllers
553 */
554 
555 /**
556 *	The following functions are defined for ppc (deviceid : 0x60)
557 *	controllers
558 */
559 
560 /**
561  * megasas_enable_intr_ppc -	Enables interrupts
562  * @regs:			MFI register set
563  */
564 static inline void
565 megasas_enable_intr_ppc(struct megasas_instance *instance)
566 {
567 	struct megasas_register_set __iomem *regs;
568 
569 	regs = instance->reg_set;
570 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
571 
572 	writel(~0x80000000, &(regs)->outbound_intr_mask);
573 
574 	/* Dummy readl to force pci flush */
575 	readl(&regs->outbound_intr_mask);
576 }
577 
578 /**
579  * megasas_disable_intr_ppc -	Disable interrupt
580  * @regs:			MFI register set
581  */
582 static inline void
583 megasas_disable_intr_ppc(struct megasas_instance *instance)
584 {
585 	struct megasas_register_set __iomem *regs;
586 	u32 mask = 0xFFFFFFFF;
587 
588 	regs = instance->reg_set;
589 	writel(mask, &regs->outbound_intr_mask);
590 	/* Dummy readl to force pci flush */
591 	readl(&regs->outbound_intr_mask);
592 }
593 
594 /**
595  * megasas_read_fw_status_reg_ppc - returns the current FW status value
596  * @regs:			MFI register set
597  */
598 static u32
599 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
600 {
601 	return readl(&(regs)->outbound_scratch_pad);
602 }
603 
604 /**
605  * megasas_clear_interrupt_ppc -	Check & clear interrupt
606  * @regs:				MFI register set
607  */
608 static int
609 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
610 {
611 	u32 status, mfiStatus = 0;
612 
613 	/*
614 	 * Check if it is our interrupt
615 	 */
616 	status = readl(&regs->outbound_intr_status);
617 
618 	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
619 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
620 
621 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
622 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
623 
624 	/*
625 	 * Clear the interrupt by writing back the same value
626 	 */
627 	writel(status, &regs->outbound_doorbell_clear);
628 
629 	/* Dummy readl to force pci flush */
630 	readl(&regs->outbound_doorbell_clear);
631 
632 	return mfiStatus;
633 }
634 
635 /**
636  * megasas_fire_cmd_ppc -	Sends command to the FW
637  * @frame_phys_addr :		Physical address of cmd
638  * @frame_count :		Number of frames for the command
639  * @regs :			MFI register set
640  */
641 static inline void
642 megasas_fire_cmd_ppc(struct megasas_instance *instance,
643 		dma_addr_t frame_phys_addr,
644 		u32 frame_count,
645 		struct megasas_register_set __iomem *regs)
646 {
647 	unsigned long flags;
648 
649 	spin_lock_irqsave(&instance->hba_lock, flags);
650 	writel((frame_phys_addr | (frame_count<<1))|1,
651 			&(regs)->inbound_queue_port);
652 	spin_unlock_irqrestore(&instance->hba_lock, flags);
653 }
654 
655 /**
656  * megasas_check_reset_ppc -	For controller reset check
657  * @regs:				MFI register set
658  */
659 static int
660 megasas_check_reset_ppc(struct megasas_instance *instance,
661 			struct megasas_register_set __iomem *regs)
662 {
663 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
664 		return 1;
665 
666 	return 0;
667 }
668 
669 static struct megasas_instance_template megasas_instance_template_ppc = {
670 
671 	.fire_cmd = megasas_fire_cmd_ppc,
672 	.enable_intr = megasas_enable_intr_ppc,
673 	.disable_intr = megasas_disable_intr_ppc,
674 	.clear_intr = megasas_clear_intr_ppc,
675 	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
676 	.adp_reset = megasas_adp_reset_xscale,
677 	.check_reset = megasas_check_reset_ppc,
678 	.service_isr = megasas_isr,
679 	.tasklet = megasas_complete_cmd_dpc,
680 	.init_adapter = megasas_init_adapter_mfi,
681 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
682 	.issue_dcmd = megasas_issue_dcmd,
683 };
684 
685 /**
686  * megasas_enable_intr_skinny -	Enables interrupts
687  * @regs:			MFI register set
688  */
689 static inline void
690 megasas_enable_intr_skinny(struct megasas_instance *instance)
691 {
692 	struct megasas_register_set __iomem *regs;
693 
694 	regs = instance->reg_set;
695 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
696 
697 	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
698 
699 	/* Dummy readl to force pci flush */
700 	readl(&regs->outbound_intr_mask);
701 }
702 
703 /**
704  * megasas_disable_intr_skinny -	Disables interrupt
705  * @regs:			MFI register set
706  */
707 static inline void
708 megasas_disable_intr_skinny(struct megasas_instance *instance)
709 {
710 	struct megasas_register_set __iomem *regs;
711 	u32 mask = 0xFFFFFFFF;
712 
713 	regs = instance->reg_set;
714 	writel(mask, &regs->outbound_intr_mask);
715 	/* Dummy readl to force pci flush */
716 	readl(&regs->outbound_intr_mask);
717 }
718 
719 /**
720  * megasas_read_fw_status_reg_skinny - returns the current FW status value
721  * @regs:			MFI register set
722  */
723 static u32
724 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
725 {
726 	return readl(&(regs)->outbound_scratch_pad);
727 }
728 
729 /**
730  * megasas_clear_interrupt_skinny -	Check & clear interrupt
731  * @regs:				MFI register set
732  */
733 static int
734 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
735 {
736 	u32 status;
737 	u32 mfiStatus = 0;
738 
739 	/*
740 	 * Check if it is our interrupt
741 	 */
742 	status = readl(&regs->outbound_intr_status);
743 
744 	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
745 		return 0;
746 	}
747 
748 	/*
749 	 * Check if it is our interrupt
750 	 */
751 	if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
752 	    MFI_STATE_FAULT) {
753 		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
754 	} else
755 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
756 
757 	/*
758 	 * Clear the interrupt by writing back the same value
759 	 */
760 	writel(status, &regs->outbound_intr_status);
761 
762 	/*
763 	 * dummy read to flush PCI
764 	 */
765 	readl(&regs->outbound_intr_status);
766 
767 	return mfiStatus;
768 }
769 
770 /**
771  * megasas_fire_cmd_skinny -	Sends command to the FW
772  * @frame_phys_addr :		Physical address of cmd
773  * @frame_count :		Number of frames for the command
774  * @regs :			MFI register set
775  */
776 static inline void
777 megasas_fire_cmd_skinny(struct megasas_instance *instance,
778 			dma_addr_t frame_phys_addr,
779 			u32 frame_count,
780 			struct megasas_register_set __iomem *regs)
781 {
782 	unsigned long flags;
783 
784 	spin_lock_irqsave(&instance->hba_lock, flags);
785 	writel(upper_32_bits(frame_phys_addr),
786 	       &(regs)->inbound_high_queue_port);
787 	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
788 	       &(regs)->inbound_low_queue_port);
789 	mmiowb();
790 	spin_unlock_irqrestore(&instance->hba_lock, flags);
791 }
792 
793 /**
794  * megasas_check_reset_skinny -	For controller reset check
795  * @regs:				MFI register set
796  */
797 static int
798 megasas_check_reset_skinny(struct megasas_instance *instance,
799 				struct megasas_register_set __iomem *regs)
800 {
801 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
802 		return 1;
803 
804 	return 0;
805 }
806 
807 static struct megasas_instance_template megasas_instance_template_skinny = {
808 
809 	.fire_cmd = megasas_fire_cmd_skinny,
810 	.enable_intr = megasas_enable_intr_skinny,
811 	.disable_intr = megasas_disable_intr_skinny,
812 	.clear_intr = megasas_clear_intr_skinny,
813 	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
814 	.adp_reset = megasas_adp_reset_gen2,
815 	.check_reset = megasas_check_reset_skinny,
816 	.service_isr = megasas_isr,
817 	.tasklet = megasas_complete_cmd_dpc,
818 	.init_adapter = megasas_init_adapter_mfi,
819 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
820 	.issue_dcmd = megasas_issue_dcmd,
821 };
822 
823 
824 /**
825 *	The following functions are defined for gen2 (deviceid : 0x78 0x79)
826 *	controllers
827 */
828 
829 /**
830  * megasas_enable_intr_gen2 -  Enables interrupts
831  * @regs:                      MFI register set
832  */
833 static inline void
834 megasas_enable_intr_gen2(struct megasas_instance *instance)
835 {
836 	struct megasas_register_set __iomem *regs;
837 
838 	regs = instance->reg_set;
839 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
840 
841 	/* write ~0x00000005 (4 & 1) to the intr mask*/
842 	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
843 
844 	/* Dummy readl to force pci flush */
845 	readl(&regs->outbound_intr_mask);
846 }
847 
848 /**
849  * megasas_disable_intr_gen2 - Disables interrupt
850  * @regs:                      MFI register set
851  */
852 static inline void
853 megasas_disable_intr_gen2(struct megasas_instance *instance)
854 {
855 	struct megasas_register_set __iomem *regs;
856 	u32 mask = 0xFFFFFFFF;
857 
858 	regs = instance->reg_set;
859 	writel(mask, &regs->outbound_intr_mask);
860 	/* Dummy readl to force pci flush */
861 	readl(&regs->outbound_intr_mask);
862 }
863 
864 /**
865  * megasas_read_fw_status_reg_gen2 - returns the current FW status value
866  * @regs:                      MFI register set
867  */
868 static u32
869 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
870 {
871 	return readl(&(regs)->outbound_scratch_pad);
872 }
873 
874 /**
875  * megasas_clear_interrupt_gen2 -      Check & clear interrupt
876  * @regs:                              MFI register set
877  */
878 static int
879 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
880 {
881 	u32 status;
882 	u32 mfiStatus = 0;
883 
884 	/*
885 	 * Check if it is our interrupt
886 	 */
887 	status = readl(&regs->outbound_intr_status);
888 
889 	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
890 		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
891 	}
892 	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
893 		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
894 	}
895 
896 	/*
897 	 * Clear the interrupt by writing back the same value
898 	 */
899 	if (mfiStatus)
900 		writel(status, &regs->outbound_doorbell_clear);
901 
902 	/* Dummy readl to force pci flush */
903 	readl(&regs->outbound_intr_status);
904 
905 	return mfiStatus;
906 }
907 /**
908  * megasas_fire_cmd_gen2 -     Sends command to the FW
909  * @frame_phys_addr :          Physical address of cmd
910  * @frame_count :              Number of frames for the command
911  * @regs :                     MFI register set
912  */
913 static inline void
914 megasas_fire_cmd_gen2(struct megasas_instance *instance,
915 			dma_addr_t frame_phys_addr,
916 			u32 frame_count,
917 			struct megasas_register_set __iomem *regs)
918 {
919 	unsigned long flags;
920 
921 	spin_lock_irqsave(&instance->hba_lock, flags);
922 	writel((frame_phys_addr | (frame_count<<1))|1,
923 			&(regs)->inbound_queue_port);
924 	spin_unlock_irqrestore(&instance->hba_lock, flags);
925 }
926 
927 /**
928  * megasas_adp_reset_gen2 -	For controller reset
929  * @regs:				MFI register set
930  */
931 static int
932 megasas_adp_reset_gen2(struct megasas_instance *instance,
933 			struct megasas_register_set __iomem *reg_set)
934 {
935 	u32 retry = 0 ;
936 	u32 HostDiag;
937 	u32 __iomem *seq_offset = &reg_set->seq_offset;
938 	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
939 
940 	if (instance->instancet == &megasas_instance_template_skinny) {
941 		seq_offset = &reg_set->fusion_seq_offset;
942 		hostdiag_offset = &reg_set->fusion_host_diag;
943 	}
944 
945 	writel(0, seq_offset);
946 	writel(4, seq_offset);
947 	writel(0xb, seq_offset);
948 	writel(2, seq_offset);
949 	writel(7, seq_offset);
950 	writel(0xd, seq_offset);
951 
952 	msleep(1000);
953 
954 	HostDiag = (u32)readl(hostdiag_offset);
955 
956 	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
957 		msleep(100);
958 		HostDiag = (u32)readl(hostdiag_offset);
959 		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
960 					retry, HostDiag);
961 
962 		if (retry++ >= 100)
963 			return 1;
964 
965 	}
966 
967 	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
968 
969 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
970 
971 	ssleep(10);
972 
973 	HostDiag = (u32)readl(hostdiag_offset);
974 	while (HostDiag & DIAG_RESET_ADAPTER) {
975 		msleep(100);
976 		HostDiag = (u32)readl(hostdiag_offset);
977 		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
978 				retry, HostDiag);
979 
980 		if (retry++ >= 1000)
981 			return 1;
982 
983 	}
984 	return 0;
985 }
986 
987 /**
988  * megasas_check_reset_gen2 -	For controller reset check
989  * @regs:				MFI register set
990  */
991 static int
992 megasas_check_reset_gen2(struct megasas_instance *instance,
993 		struct megasas_register_set __iomem *regs)
994 {
995 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
996 		return 1;
997 
998 	return 0;
999 }
1000 
1001 static struct megasas_instance_template megasas_instance_template_gen2 = {
1002 
1003 	.fire_cmd = megasas_fire_cmd_gen2,
1004 	.enable_intr = megasas_enable_intr_gen2,
1005 	.disable_intr = megasas_disable_intr_gen2,
1006 	.clear_intr = megasas_clear_intr_gen2,
1007 	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1008 	.adp_reset = megasas_adp_reset_gen2,
1009 	.check_reset = megasas_check_reset_gen2,
1010 	.service_isr = megasas_isr,
1011 	.tasklet = megasas_complete_cmd_dpc,
1012 	.init_adapter = megasas_init_adapter_mfi,
1013 	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1014 	.issue_dcmd = megasas_issue_dcmd,
1015 };
1016 
1017 /**
1018 *	This is the end of set of functions & definitions
1019 *       specific to gen2 (deviceid : 0x78, 0x79) controllers
1020 */
1021 
1022 /*
1023  * Template added for TB (Fusion)
1024  */
1025 extern struct megasas_instance_template megasas_instance_template_fusion;
1026 
1027 /**
1028  * megasas_issue_polled -	Issues a polling command
1029  * @instance:			Adapter soft state
1030  * @cmd:			Command packet to be issued
1031  *
1032  * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1033  */
1034 int
1035 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1036 {
1037 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1038 
1039 	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1040 	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1041 
1042 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1043 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1044 			__func__, __LINE__);
1045 		return DCMD_NOT_FIRED;
1046 	}
1047 
1048 	instance->instancet->issue_dcmd(instance, cmd);
1049 
1050 	return wait_and_poll(instance, cmd, instance->requestorId ?
1051 			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1052 }
1053 
1054 /**
1055  * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1056  * @instance:			Adapter soft state
1057  * @cmd:			Command to be issued
1058  * @timeout:			Timeout in seconds
1059  *
1060  * This function waits on an event for the command to be returned from ISR.
1061  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1062  * Used to issue ioctl commands.
1063  */
1064 int
1065 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1066 			  struct megasas_cmd *cmd, int timeout)
1067 {
1068 	int ret = 0;
1069 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1070 
1071 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1072 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1073 			__func__, __LINE__);
1074 		return DCMD_NOT_FIRED;
1075 	}
1076 
1077 	instance->instancet->issue_dcmd(instance, cmd);
1078 
1079 	if (timeout) {
1080 		ret = wait_event_timeout(instance->int_cmd_wait_q,
1081 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1082 		if (!ret) {
1083 			dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
1084 				__func__, __LINE__);
1085 			return DCMD_TIMEOUT;
1086 		}
1087 	} else
1088 		wait_event(instance->int_cmd_wait_q,
1089 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1090 
1091 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1092 		DCMD_SUCCESS : DCMD_FAILED;
1093 }
1094 
1095 /**
1096  * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1097  * @instance:				Adapter soft state
1098  * @cmd_to_abort:			Previously issued cmd to be aborted
1099  * @timeout:				Timeout in seconds
1100  *
1101  * MFI firmware can abort previously issued AEN comamnd (automatic event
1102  * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1103  * cmd and waits for return status.
1104  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1105  */
1106 static int
1107 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1108 				struct megasas_cmd *cmd_to_abort, int timeout)
1109 {
1110 	struct megasas_cmd *cmd;
1111 	struct megasas_abort_frame *abort_fr;
1112 	int ret = 0;
1113 
1114 	cmd = megasas_get_cmd(instance);
1115 
1116 	if (!cmd)
1117 		return -1;
1118 
1119 	abort_fr = &cmd->frame->abort;
1120 
1121 	/*
1122 	 * Prepare and issue the abort frame
1123 	 */
1124 	abort_fr->cmd = MFI_CMD_ABORT;
1125 	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1126 	abort_fr->flags = cpu_to_le16(0);
1127 	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1128 	abort_fr->abort_mfi_phys_addr_lo =
1129 		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1130 	abort_fr->abort_mfi_phys_addr_hi =
1131 		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1132 
1133 	cmd->sync_cmd = 1;
1134 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1135 
1136 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1137 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1138 			__func__, __LINE__);
1139 		return DCMD_NOT_FIRED;
1140 	}
1141 
1142 	instance->instancet->issue_dcmd(instance, cmd);
1143 
1144 	if (timeout) {
1145 		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1146 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1147 		if (!ret) {
1148 			dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
1149 				__func__, __LINE__);
1150 			return DCMD_TIMEOUT;
1151 		}
1152 	} else
1153 		wait_event(instance->abort_cmd_wait_q,
1154 				cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1155 
1156 	cmd->sync_cmd = 0;
1157 
1158 	megasas_return_cmd(instance, cmd);
1159 	return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1160 		DCMD_SUCCESS : DCMD_FAILED;
1161 }
1162 
1163 /**
1164  * megasas_make_sgl32 -	Prepares 32-bit SGL
1165  * @instance:		Adapter soft state
1166  * @scp:		SCSI command from the mid-layer
1167  * @mfi_sgl:		SGL to be filled in
1168  *
1169  * If successful, this function returns the number of SG elements. Otherwise,
1170  * it returnes -1.
1171  */
1172 static int
1173 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1174 		   union megasas_sgl *mfi_sgl)
1175 {
1176 	int i;
1177 	int sge_count;
1178 	struct scatterlist *os_sgl;
1179 
1180 	sge_count = scsi_dma_map(scp);
1181 	BUG_ON(sge_count < 0);
1182 
1183 	if (sge_count) {
1184 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1185 			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1186 			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1187 		}
1188 	}
1189 	return sge_count;
1190 }
1191 
1192 /**
1193  * megasas_make_sgl64 -	Prepares 64-bit SGL
1194  * @instance:		Adapter soft state
1195  * @scp:		SCSI command from the mid-layer
1196  * @mfi_sgl:		SGL to be filled in
1197  *
1198  * If successful, this function returns the number of SG elements. Otherwise,
1199  * it returnes -1.
1200  */
1201 static int
1202 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1203 		   union megasas_sgl *mfi_sgl)
1204 {
1205 	int i;
1206 	int sge_count;
1207 	struct scatterlist *os_sgl;
1208 
1209 	sge_count = scsi_dma_map(scp);
1210 	BUG_ON(sge_count < 0);
1211 
1212 	if (sge_count) {
1213 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1214 			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1215 			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1216 		}
1217 	}
1218 	return sge_count;
1219 }
1220 
1221 /**
1222  * megasas_make_sgl_skinny - Prepares IEEE SGL
1223  * @instance:           Adapter soft state
1224  * @scp:                SCSI command from the mid-layer
1225  * @mfi_sgl:            SGL to be filled in
1226  *
1227  * If successful, this function returns the number of SG elements. Otherwise,
1228  * it returnes -1.
1229  */
1230 static int
1231 megasas_make_sgl_skinny(struct megasas_instance *instance,
1232 		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1233 {
1234 	int i;
1235 	int sge_count;
1236 	struct scatterlist *os_sgl;
1237 
1238 	sge_count = scsi_dma_map(scp);
1239 
1240 	if (sge_count) {
1241 		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1242 			mfi_sgl->sge_skinny[i].length =
1243 				cpu_to_le32(sg_dma_len(os_sgl));
1244 			mfi_sgl->sge_skinny[i].phys_addr =
1245 				cpu_to_le64(sg_dma_address(os_sgl));
1246 			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1247 		}
1248 	}
1249 	return sge_count;
1250 }
1251 
1252  /**
1253  * megasas_get_frame_count - Computes the number of frames
1254  * @frame_type		: type of frame- io or pthru frame
1255  * @sge_count		: number of sg elements
1256  *
1257  * Returns the number of frames required for numnber of sge's (sge_count)
1258  */
1259 
1260 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1261 			u8 sge_count, u8 frame_type)
1262 {
1263 	int num_cnt;
1264 	int sge_bytes;
1265 	u32 sge_sz;
1266 	u32 frame_count = 0;
1267 
1268 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1269 	    sizeof(struct megasas_sge32);
1270 
1271 	if (instance->flag_ieee) {
1272 		sge_sz = sizeof(struct megasas_sge_skinny);
1273 	}
1274 
1275 	/*
1276 	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1277 	 * 3 SGEs for 32-bit SGLs for ldio &
1278 	 * 1 SGEs for 64-bit SGLs and
1279 	 * 2 SGEs for 32-bit SGLs for pthru frame
1280 	 */
1281 	if (unlikely(frame_type == PTHRU_FRAME)) {
1282 		if (instance->flag_ieee == 1) {
1283 			num_cnt = sge_count - 1;
1284 		} else if (IS_DMA64)
1285 			num_cnt = sge_count - 1;
1286 		else
1287 			num_cnt = sge_count - 2;
1288 	} else {
1289 		if (instance->flag_ieee == 1) {
1290 			num_cnt = sge_count - 1;
1291 		} else if (IS_DMA64)
1292 			num_cnt = sge_count - 2;
1293 		else
1294 			num_cnt = sge_count - 3;
1295 	}
1296 
1297 	if (num_cnt > 0) {
1298 		sge_bytes = sge_sz * num_cnt;
1299 
1300 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1301 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1302 	}
1303 	/* Main frame */
1304 	frame_count += 1;
1305 
1306 	if (frame_count > 7)
1307 		frame_count = 8;
1308 	return frame_count;
1309 }
1310 
1311 /**
1312  * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1313  * @instance:		Adapter soft state
1314  * @scp:		SCSI command
1315  * @cmd:		Command to be prepared in
1316  *
1317  * This function prepares CDB commands. These are typcially pass-through
1318  * commands to the devices.
1319  */
1320 static int
1321 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1322 		   struct megasas_cmd *cmd)
1323 {
1324 	u32 is_logical;
1325 	u32 device_id;
1326 	u16 flags = 0;
1327 	struct megasas_pthru_frame *pthru;
1328 
1329 	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1330 	device_id = MEGASAS_DEV_INDEX(scp);
1331 	pthru = (struct megasas_pthru_frame *)cmd->frame;
1332 
1333 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1334 		flags = MFI_FRAME_DIR_WRITE;
1335 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1336 		flags = MFI_FRAME_DIR_READ;
1337 	else if (scp->sc_data_direction == PCI_DMA_NONE)
1338 		flags = MFI_FRAME_DIR_NONE;
1339 
1340 	if (instance->flag_ieee == 1) {
1341 		flags |= MFI_FRAME_IEEE;
1342 	}
1343 
1344 	/*
1345 	 * Prepare the DCDB frame
1346 	 */
1347 	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1348 	pthru->cmd_status = 0x0;
1349 	pthru->scsi_status = 0x0;
1350 	pthru->target_id = device_id;
1351 	pthru->lun = scp->device->lun;
1352 	pthru->cdb_len = scp->cmd_len;
1353 	pthru->timeout = 0;
1354 	pthru->pad_0 = 0;
1355 	pthru->flags = cpu_to_le16(flags);
1356 	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1357 
1358 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1359 
1360 	/*
1361 	 * If the command is for the tape device, set the
1362 	 * pthru timeout to the os layer timeout value.
1363 	 */
1364 	if (scp->device->type == TYPE_TAPE) {
1365 		if ((scp->request->timeout / HZ) > 0xFFFF)
1366 			pthru->timeout = cpu_to_le16(0xFFFF);
1367 		else
1368 			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1369 	}
1370 
1371 	/*
1372 	 * Construct SGL
1373 	 */
1374 	if (instance->flag_ieee == 1) {
1375 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1376 		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1377 						      &pthru->sgl);
1378 	} else if (IS_DMA64) {
1379 		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1380 		pthru->sge_count = megasas_make_sgl64(instance, scp,
1381 						      &pthru->sgl);
1382 	} else
1383 		pthru->sge_count = megasas_make_sgl32(instance, scp,
1384 						      &pthru->sgl);
1385 
1386 	if (pthru->sge_count > instance->max_num_sge) {
1387 		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1388 			pthru->sge_count);
1389 		return 0;
1390 	}
1391 
1392 	/*
1393 	 * Sense info specific
1394 	 */
1395 	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1396 	pthru->sense_buf_phys_addr_hi =
1397 		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1398 	pthru->sense_buf_phys_addr_lo =
1399 		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1400 
1401 	/*
1402 	 * Compute the total number of frames this command consumes. FW uses
1403 	 * this number to pull sufficient number of frames from host memory.
1404 	 */
1405 	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1406 							PTHRU_FRAME);
1407 
1408 	return cmd->frame_count;
1409 }
1410 
1411 /**
1412  * megasas_build_ldio -	Prepares IOs to logical devices
1413  * @instance:		Adapter soft state
1414  * @scp:		SCSI command
1415  * @cmd:		Command to be prepared
1416  *
1417  * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1418  */
1419 static int
1420 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1421 		   struct megasas_cmd *cmd)
1422 {
1423 	u32 device_id;
1424 	u8 sc = scp->cmnd[0];
1425 	u16 flags = 0;
1426 	struct megasas_io_frame *ldio;
1427 
1428 	device_id = MEGASAS_DEV_INDEX(scp);
1429 	ldio = (struct megasas_io_frame *)cmd->frame;
1430 
1431 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1432 		flags = MFI_FRAME_DIR_WRITE;
1433 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1434 		flags = MFI_FRAME_DIR_READ;
1435 
1436 	if (instance->flag_ieee == 1) {
1437 		flags |= MFI_FRAME_IEEE;
1438 	}
1439 
1440 	/*
1441 	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1442 	 */
1443 	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1444 	ldio->cmd_status = 0x0;
1445 	ldio->scsi_status = 0x0;
1446 	ldio->target_id = device_id;
1447 	ldio->timeout = 0;
1448 	ldio->reserved_0 = 0;
1449 	ldio->pad_0 = 0;
1450 	ldio->flags = cpu_to_le16(flags);
1451 	ldio->start_lba_hi = 0;
1452 	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1453 
1454 	/*
1455 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1456 	 */
1457 	if (scp->cmd_len == 6) {
1458 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1459 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1460 						 ((u32) scp->cmnd[2] << 8) |
1461 						 (u32) scp->cmnd[3]);
1462 
1463 		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1464 	}
1465 
1466 	/*
1467 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1468 	 */
1469 	else if (scp->cmd_len == 10) {
1470 		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1471 					      ((u32) scp->cmnd[7] << 8));
1472 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1473 						 ((u32) scp->cmnd[3] << 16) |
1474 						 ((u32) scp->cmnd[4] << 8) |
1475 						 (u32) scp->cmnd[5]);
1476 	}
1477 
1478 	/*
1479 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1480 	 */
1481 	else if (scp->cmd_len == 12) {
1482 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1483 					      ((u32) scp->cmnd[7] << 16) |
1484 					      ((u32) scp->cmnd[8] << 8) |
1485 					      (u32) scp->cmnd[9]);
1486 
1487 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1488 						 ((u32) scp->cmnd[3] << 16) |
1489 						 ((u32) scp->cmnd[4] << 8) |
1490 						 (u32) scp->cmnd[5]);
1491 	}
1492 
1493 	/*
1494 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1495 	 */
1496 	else if (scp->cmd_len == 16) {
1497 		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1498 					      ((u32) scp->cmnd[11] << 16) |
1499 					      ((u32) scp->cmnd[12] << 8) |
1500 					      (u32) scp->cmnd[13]);
1501 
1502 		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1503 						 ((u32) scp->cmnd[7] << 16) |
1504 						 ((u32) scp->cmnd[8] << 8) |
1505 						 (u32) scp->cmnd[9]);
1506 
1507 		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1508 						 ((u32) scp->cmnd[3] << 16) |
1509 						 ((u32) scp->cmnd[4] << 8) |
1510 						 (u32) scp->cmnd[5]);
1511 
1512 	}
1513 
1514 	/*
1515 	 * Construct SGL
1516 	 */
1517 	if (instance->flag_ieee) {
1518 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1519 		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1520 					      &ldio->sgl);
1521 	} else if (IS_DMA64) {
1522 		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1523 		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1524 	} else
1525 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1526 
1527 	if (ldio->sge_count > instance->max_num_sge) {
1528 		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1529 			ldio->sge_count);
1530 		return 0;
1531 	}
1532 
1533 	/*
1534 	 * Sense info specific
1535 	 */
1536 	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1537 	ldio->sense_buf_phys_addr_hi = 0;
1538 	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1539 
1540 	/*
1541 	 * Compute the total number of frames this command consumes. FW uses
1542 	 * this number to pull sufficient number of frames from host memory.
1543 	 */
1544 	cmd->frame_count = megasas_get_frame_count(instance,
1545 			ldio->sge_count, IO_FRAME);
1546 
1547 	return cmd->frame_count;
1548 }
1549 
1550 /**
1551  * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1552  *				and whether it's RW or non RW
1553  * @scmd:			SCSI command
1554  *
1555  */
1556 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1557 {
1558 	int ret;
1559 
1560 	switch (cmd->cmnd[0]) {
1561 	case READ_10:
1562 	case WRITE_10:
1563 	case READ_12:
1564 	case WRITE_12:
1565 	case READ_6:
1566 	case WRITE_6:
1567 	case READ_16:
1568 	case WRITE_16:
1569 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1570 			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1571 		break;
1572 	default:
1573 		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1574 			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1575 	}
1576 	return ret;
1577 }
1578 
1579  /**
1580  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1581  *					in FW
1582  * @instance:				Adapter soft state
1583  */
1584 static inline void
1585 megasas_dump_pending_frames(struct megasas_instance *instance)
1586 {
1587 	struct megasas_cmd *cmd;
1588 	int i,n;
1589 	union megasas_sgl *mfi_sgl;
1590 	struct megasas_io_frame *ldio;
1591 	struct megasas_pthru_frame *pthru;
1592 	u32 sgcount;
1593 	u16 max_cmd = instance->max_fw_cmds;
1594 
1595 	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1596 	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1597 	if (IS_DMA64)
1598 		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1599 	else
1600 		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1601 
1602 	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1603 	for (i = 0; i < max_cmd; i++) {
1604 		cmd = instance->cmd_list[i];
1605 		if (!cmd->scmd)
1606 			continue;
1607 		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1608 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1609 			ldio = (struct megasas_io_frame *)cmd->frame;
1610 			mfi_sgl = &ldio->sgl;
1611 			sgcount = ldio->sge_count;
1612 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1613 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1614 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1615 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1616 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1617 		} else {
1618 			pthru = (struct megasas_pthru_frame *) cmd->frame;
1619 			mfi_sgl = &pthru->sgl;
1620 			sgcount = pthru->sge_count;
1621 			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1622 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1623 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1624 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1625 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1626 		}
1627 		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1628 			for (n = 0; n < sgcount; n++) {
1629 				if (IS_DMA64)
1630 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1631 						le32_to_cpu(mfi_sgl->sge64[n].length),
1632 						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1633 				else
1634 					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1635 						le32_to_cpu(mfi_sgl->sge32[n].length),
1636 						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1637 			}
1638 		}
1639 	} /*for max_cmd*/
1640 	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1641 	for (i = 0; i < max_cmd; i++) {
1642 
1643 		cmd = instance->cmd_list[i];
1644 
1645 		if (cmd->sync_cmd == 1)
1646 			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1647 	}
1648 	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1649 }
1650 
1651 u32
1652 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1653 			    struct scsi_cmnd *scmd)
1654 {
1655 	struct megasas_cmd *cmd;
1656 	u32 frame_count;
1657 
1658 	cmd = megasas_get_cmd(instance);
1659 	if (!cmd)
1660 		return SCSI_MLQUEUE_HOST_BUSY;
1661 
1662 	/*
1663 	 * Logical drive command
1664 	 */
1665 	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1666 		frame_count = megasas_build_ldio(instance, scmd, cmd);
1667 	else
1668 		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1669 
1670 	if (!frame_count)
1671 		goto out_return_cmd;
1672 
1673 	cmd->scmd = scmd;
1674 	scmd->SCp.ptr = (char *)cmd;
1675 
1676 	/*
1677 	 * Issue the command to the FW
1678 	 */
1679 	atomic_inc(&instance->fw_outstanding);
1680 
1681 	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1682 				cmd->frame_count-1, instance->reg_set);
1683 
1684 	return 0;
1685 out_return_cmd:
1686 	megasas_return_cmd(instance, cmd);
1687 	return SCSI_MLQUEUE_HOST_BUSY;
1688 }
1689 
1690 
1691 /**
1692  * megasas_queue_command -	Queue entry point
1693  * @scmd:			SCSI command to be queued
1694  * @done:			Callback entry point
1695  */
1696 static int
1697 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1698 {
1699 	struct megasas_instance *instance;
1700 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1701 
1702 	instance = (struct megasas_instance *)
1703 	    scmd->device->host->hostdata;
1704 
1705 	if (instance->unload == 1) {
1706 		scmd->result = DID_NO_CONNECT << 16;
1707 		scmd->scsi_done(scmd);
1708 		return 0;
1709 	}
1710 
1711 	if (instance->issuepend_done == 0)
1712 		return SCSI_MLQUEUE_HOST_BUSY;
1713 
1714 
1715 	/* Check for an mpio path and adjust behavior */
1716 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1717 		if (megasas_check_mpio_paths(instance, scmd) ==
1718 		    (DID_REQUEUE << 16)) {
1719 			return SCSI_MLQUEUE_HOST_BUSY;
1720 		} else {
1721 			scmd->result = DID_NO_CONNECT << 16;
1722 			scmd->scsi_done(scmd);
1723 			return 0;
1724 		}
1725 	}
1726 
1727 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1728 		scmd->result = DID_NO_CONNECT << 16;
1729 		scmd->scsi_done(scmd);
1730 		return 0;
1731 	}
1732 
1733 	mr_device_priv_data = scmd->device->hostdata;
1734 	if (!mr_device_priv_data) {
1735 		scmd->result = DID_NO_CONNECT << 16;
1736 		scmd->scsi_done(scmd);
1737 		return 0;
1738 	}
1739 
1740 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1741 		return SCSI_MLQUEUE_HOST_BUSY;
1742 
1743 	if (mr_device_priv_data->tm_busy)
1744 		return SCSI_MLQUEUE_DEVICE_BUSY;
1745 
1746 
1747 	scmd->result = 0;
1748 
1749 	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1750 	    (scmd->device->id >= instance->fw_supported_vd_count ||
1751 		scmd->device->lun)) {
1752 		scmd->result = DID_BAD_TARGET << 16;
1753 		goto out_done;
1754 	}
1755 
1756 	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1757 	    MEGASAS_IS_LOGICAL(scmd->device) &&
1758 	    (!instance->fw_sync_cache_support)) {
1759 		scmd->result = DID_OK << 16;
1760 		goto out_done;
1761 	}
1762 
1763 	return instance->instancet->build_and_issue_cmd(instance, scmd);
1764 
1765  out_done:
1766 	scmd->scsi_done(scmd);
1767 	return 0;
1768 }
1769 
1770 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1771 {
1772 	int i;
1773 
1774 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1775 
1776 		if ((megasas_mgmt_info.instance[i]) &&
1777 		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1778 			return megasas_mgmt_info.instance[i];
1779 	}
1780 
1781 	return NULL;
1782 }
1783 
1784 /*
1785 * megasas_set_dynamic_target_properties -
1786 * Device property set by driver may not be static and it is required to be
1787 * updated after OCR
1788 *
1789 * set tm_capable.
1790 * set dma alignment (only for eedp protection enable vd).
1791 *
1792 * @sdev: OS provided scsi device
1793 *
1794 * Returns void
1795 */
1796 void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
1797 {
1798 	u16 pd_index = 0, ld;
1799 	u32 device_id;
1800 	struct megasas_instance *instance;
1801 	struct fusion_context *fusion;
1802 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1803 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1804 	struct MR_LD_RAID *raid;
1805 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1806 
1807 	instance = megasas_lookup_instance(sdev->host->host_no);
1808 	fusion = instance->ctrl_context;
1809 	mr_device_priv_data = sdev->hostdata;
1810 
1811 	if (!fusion || !mr_device_priv_data)
1812 		return;
1813 
1814 	if (MEGASAS_IS_LOGICAL(sdev)) {
1815 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1816 					+ sdev->id;
1817 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1818 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1819 		if (ld >= instance->fw_supported_vd_count)
1820 			return;
1821 		raid = MR_LdRaidGet(ld, local_map_ptr);
1822 
1823 		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1824 		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1825 
1826 		mr_device_priv_data->is_tm_capable =
1827 			raid->capability.tmCapable;
1828 	} else if (instance->use_seqnum_jbod_fp) {
1829 		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1830 			sdev->id;
1831 		pd_sync = (void *)fusion->pd_seq_sync
1832 				[(instance->pd_seq_map_id - 1) & 1];
1833 		mr_device_priv_data->is_tm_capable =
1834 			pd_sync->seq[pd_index].capability.tmCapable;
1835 	}
1836 }
1837 
1838 /*
1839  * megasas_set_nvme_device_properties -
1840  * set nomerges=2
1841  * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1842  * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1843  *
1844  * MR firmware provides value in KB. Caller of this function converts
1845  * kb into bytes.
1846  *
1847  * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1848  * MR firmware provides value 128 as (32 * 4K) = 128K.
1849  *
1850  * @sdev:				scsi device
1851  * @max_io_size:				maximum io transfer size
1852  *
1853  */
1854 static inline void
1855 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1856 {
1857 	struct megasas_instance *instance;
1858 	u32 mr_nvme_pg_size;
1859 
1860 	instance = (struct megasas_instance *)sdev->host->hostdata;
1861 	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1862 				MR_DEFAULT_NVME_PAGE_SIZE);
1863 
1864 	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1865 
1866 	queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1867 	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1868 }
1869 
1870 
1871 /*
1872  * megasas_set_static_target_properties -
1873  * Device property set by driver are static and it is not required to be
1874  * updated after OCR.
1875  *
1876  * set io timeout
1877  * set device queue depth
1878  * set nvme device properties. see - megasas_set_nvme_device_properties
1879  *
1880  * @sdev:				scsi device
1881  * @is_target_prop			true, if fw provided target properties.
1882  */
1883 static void megasas_set_static_target_properties(struct scsi_device *sdev,
1884 						 bool is_target_prop)
1885 {
1886 	u16	target_index = 0;
1887 	u8 interface_type;
1888 	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1889 	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1890 	u32 tgt_device_qd;
1891 	struct megasas_instance *instance;
1892 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1893 
1894 	instance = megasas_lookup_instance(sdev->host->host_no);
1895 	mr_device_priv_data = sdev->hostdata;
1896 	interface_type  = mr_device_priv_data->interface_type;
1897 
1898 	/*
1899 	 * The RAID firmware may require extended timeouts.
1900 	 */
1901 	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1902 
1903 	target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1904 
1905 	switch (interface_type) {
1906 	case SAS_PD:
1907 		device_qd = MEGASAS_SAS_QD;
1908 		break;
1909 	case SATA_PD:
1910 		device_qd = MEGASAS_SATA_QD;
1911 		break;
1912 	case NVME_PD:
1913 		device_qd = MEGASAS_NVME_QD;
1914 		break;
1915 	}
1916 
1917 	if (is_target_prop) {
1918 		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1919 		if (tgt_device_qd &&
1920 		    (tgt_device_qd <= instance->host->can_queue))
1921 			device_qd = tgt_device_qd;
1922 
1923 		/* max_io_size_kb will be set to non zero for
1924 		 * nvme based vd and syspd.
1925 		 */
1926 		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1927 	}
1928 
1929 	if (instance->nvme_page_size && max_io_size_kb)
1930 		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
1931 
1932 	scsi_change_queue_depth(sdev, device_qd);
1933 
1934 }
1935 
1936 
1937 static int megasas_slave_configure(struct scsi_device *sdev)
1938 {
1939 	u16 pd_index = 0;
1940 	struct megasas_instance *instance;
1941 	int ret_target_prop = DCMD_FAILED;
1942 	bool is_target_prop = false;
1943 
1944 	instance = megasas_lookup_instance(sdev->host->host_no);
1945 	if (instance->pd_list_not_supported) {
1946 		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
1947 			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1948 				sdev->id;
1949 			if (instance->pd_list[pd_index].driveState !=
1950 				MR_PD_STATE_SYSTEM)
1951 				return -ENXIO;
1952 		}
1953 	}
1954 
1955 	mutex_lock(&instance->hba_mutex);
1956 	/* Send DCMD to Firmware and cache the information */
1957 	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
1958 		megasas_get_pd_info(instance, sdev);
1959 
1960 	/* Some ventura firmware may not have instance->nvme_page_size set.
1961 	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
1962 	 */
1963 	if ((instance->tgt_prop) && (instance->nvme_page_size))
1964 		ret_target_prop = megasas_get_target_prop(instance, sdev);
1965 
1966 	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
1967 	megasas_set_static_target_properties(sdev, is_target_prop);
1968 
1969 	mutex_unlock(&instance->hba_mutex);
1970 
1971 	/* This sdev property may change post OCR */
1972 	megasas_set_dynamic_target_properties(sdev);
1973 
1974 	return 0;
1975 }
1976 
1977 static int megasas_slave_alloc(struct scsi_device *sdev)
1978 {
1979 	u16 pd_index = 0;
1980 	struct megasas_instance *instance ;
1981 	struct MR_PRIV_DEVICE *mr_device_priv_data;
1982 
1983 	instance = megasas_lookup_instance(sdev->host->host_no);
1984 	if (!MEGASAS_IS_LOGICAL(sdev)) {
1985 		/*
1986 		 * Open the OS scan to the SYSTEM PD
1987 		 */
1988 		pd_index =
1989 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1990 			sdev->id;
1991 		if ((instance->pd_list_not_supported ||
1992 			instance->pd_list[pd_index].driveState ==
1993 			MR_PD_STATE_SYSTEM)) {
1994 			goto scan_target;
1995 		}
1996 		return -ENXIO;
1997 	}
1998 
1999 scan_target:
2000 	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2001 					GFP_KERNEL);
2002 	if (!mr_device_priv_data)
2003 		return -ENOMEM;
2004 	sdev->hostdata = mr_device_priv_data;
2005 
2006 	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2007 		   instance->r1_ldio_hint_default);
2008 	return 0;
2009 }
2010 
2011 static void megasas_slave_destroy(struct scsi_device *sdev)
2012 {
2013 	kfree(sdev->hostdata);
2014 	sdev->hostdata = NULL;
2015 }
2016 
2017 /*
2018 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2019 *                                       kill adapter
2020 * @instance:				Adapter soft state
2021 *
2022 */
2023 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2024 {
2025 	int i;
2026 	struct megasas_cmd *cmd_mfi;
2027 	struct megasas_cmd_fusion *cmd_fusion;
2028 	struct fusion_context *fusion = instance->ctrl_context;
2029 
2030 	/* Find all outstanding ioctls */
2031 	if (fusion) {
2032 		for (i = 0; i < instance->max_fw_cmds; i++) {
2033 			cmd_fusion = fusion->cmd_list[i];
2034 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2035 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2036 				if (cmd_mfi->sync_cmd &&
2037 				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2038 					cmd_mfi->frame->hdr.cmd_status =
2039 							MFI_STAT_WRONG_STATE;
2040 					megasas_complete_cmd(instance,
2041 							     cmd_mfi, DID_OK);
2042 				}
2043 			}
2044 		}
2045 	} else {
2046 		for (i = 0; i < instance->max_fw_cmds; i++) {
2047 			cmd_mfi = instance->cmd_list[i];
2048 			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2049 				MFI_CMD_ABORT)
2050 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2051 		}
2052 	}
2053 }
2054 
2055 
2056 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2057 {
2058 	/* Set critical error to block I/O & ioctls in case caller didn't */
2059 	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2060 	/* Wait 1 second to ensure IO or ioctls in build have posted */
2061 	msleep(1000);
2062 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2063 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2064 		(instance->adapter_type != MFI_SERIES)) {
2065 		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2066 		/* Flush */
2067 		readl(&instance->reg_set->doorbell);
2068 		if (instance->requestorId && instance->peerIsPresent)
2069 			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2070 	} else {
2071 		writel(MFI_STOP_ADP,
2072 			&instance->reg_set->inbound_doorbell);
2073 	}
2074 	/* Complete outstanding ioctls when adapter is killed */
2075 	megasas_complete_outstanding_ioctls(instance);
2076 }
2077 
2078  /**
2079   * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2080   *					restored to max value
2081   * @instance:			Adapter soft state
2082   *
2083   */
2084 void
2085 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2086 {
2087 	unsigned long flags;
2088 
2089 	if (instance->flag & MEGASAS_FW_BUSY
2090 	    && time_after(jiffies, instance->last_time + 5 * HZ)
2091 	    && atomic_read(&instance->fw_outstanding) <
2092 	    instance->throttlequeuedepth + 1) {
2093 
2094 		spin_lock_irqsave(instance->host->host_lock, flags);
2095 		instance->flag &= ~MEGASAS_FW_BUSY;
2096 
2097 		instance->host->can_queue = instance->cur_can_queue;
2098 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2099 	}
2100 }
2101 
2102 /**
2103  * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2104  * @instance_addr:			Address of adapter soft state
2105  *
2106  * Tasklet to complete cmds
2107  */
2108 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2109 {
2110 	u32 producer;
2111 	u32 consumer;
2112 	u32 context;
2113 	struct megasas_cmd *cmd;
2114 	struct megasas_instance *instance =
2115 				(struct megasas_instance *)instance_addr;
2116 	unsigned long flags;
2117 
2118 	/* If we have already declared adapter dead, donot complete cmds */
2119 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2120 		return;
2121 
2122 	spin_lock_irqsave(&instance->completion_lock, flags);
2123 
2124 	producer = le32_to_cpu(*instance->producer);
2125 	consumer = le32_to_cpu(*instance->consumer);
2126 
2127 	while (consumer != producer) {
2128 		context = le32_to_cpu(instance->reply_queue[consumer]);
2129 		if (context >= instance->max_fw_cmds) {
2130 			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2131 				context);
2132 			BUG();
2133 		}
2134 
2135 		cmd = instance->cmd_list[context];
2136 
2137 		megasas_complete_cmd(instance, cmd, DID_OK);
2138 
2139 		consumer++;
2140 		if (consumer == (instance->max_fw_cmds + 1)) {
2141 			consumer = 0;
2142 		}
2143 	}
2144 
2145 	*instance->consumer = cpu_to_le32(producer);
2146 
2147 	spin_unlock_irqrestore(&instance->completion_lock, flags);
2148 
2149 	/*
2150 	 * Check if we can restore can_queue
2151 	 */
2152 	megasas_check_and_restore_queue_depth(instance);
2153 }
2154 
2155 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2156 
2157 /**
2158  * megasas_start_timer - Initializes sriov heartbeat timer object
2159  * @instance:		Adapter soft state
2160  *
2161  */
2162 void megasas_start_timer(struct megasas_instance *instance)
2163 {
2164 	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2165 
2166 	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2167 	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2168 	add_timer(timer);
2169 }
2170 
2171 static void
2172 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2173 
2174 static void
2175 process_fw_state_change_wq(struct work_struct *work);
2176 
2177 void megasas_do_ocr(struct megasas_instance *instance)
2178 {
2179 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2180 	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2181 	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2182 		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2183 	}
2184 	instance->instancet->disable_intr(instance);
2185 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2186 	instance->issuepend_done = 0;
2187 
2188 	atomic_set(&instance->fw_outstanding, 0);
2189 	megasas_internal_reset_defer_cmds(instance);
2190 	process_fw_state_change_wq(&instance->work_init);
2191 }
2192 
2193 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2194 					    int initial)
2195 {
2196 	struct megasas_cmd *cmd;
2197 	struct megasas_dcmd_frame *dcmd;
2198 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2199 	dma_addr_t new_affiliation_111_h;
2200 	int ld, retval = 0;
2201 	u8 thisVf;
2202 
2203 	cmd = megasas_get_cmd(instance);
2204 
2205 	if (!cmd) {
2206 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2207 		       "Failed to get cmd for scsi%d\n",
2208 			instance->host->host_no);
2209 		return -ENOMEM;
2210 	}
2211 
2212 	dcmd = &cmd->frame->dcmd;
2213 
2214 	if (!instance->vf_affiliation_111) {
2215 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2216 		       "affiliation for scsi%d\n", instance->host->host_no);
2217 		megasas_return_cmd(instance, cmd);
2218 		return -ENOMEM;
2219 	}
2220 
2221 	if (initial)
2222 			memset(instance->vf_affiliation_111, 0,
2223 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2224 	else {
2225 		new_affiliation_111 =
2226 			pci_alloc_consistent(instance->pdev,
2227 					     sizeof(struct MR_LD_VF_AFFILIATION_111),
2228 					     &new_affiliation_111_h);
2229 		if (!new_affiliation_111) {
2230 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2231 			       "memory for new affiliation for scsi%d\n",
2232 			       instance->host->host_no);
2233 			megasas_return_cmd(instance, cmd);
2234 			return -ENOMEM;
2235 		}
2236 		memset(new_affiliation_111, 0,
2237 		       sizeof(struct MR_LD_VF_AFFILIATION_111));
2238 	}
2239 
2240 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2241 
2242 	dcmd->cmd = MFI_CMD_DCMD;
2243 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2244 	dcmd->sge_count = 1;
2245 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2246 	dcmd->timeout = 0;
2247 	dcmd->pad_0 = 0;
2248 	dcmd->data_xfer_len =
2249 		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2250 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2251 
2252 	if (initial)
2253 		dcmd->sgl.sge32[0].phys_addr =
2254 			cpu_to_le32(instance->vf_affiliation_111_h);
2255 	else
2256 		dcmd->sgl.sge32[0].phys_addr =
2257 			cpu_to_le32(new_affiliation_111_h);
2258 
2259 	dcmd->sgl.sge32[0].length = cpu_to_le32(
2260 		sizeof(struct MR_LD_VF_AFFILIATION_111));
2261 
2262 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2263 	       "scsi%d\n", instance->host->host_no);
2264 
2265 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2266 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2267 		       " failed with status 0x%x for scsi%d\n",
2268 		       dcmd->cmd_status, instance->host->host_no);
2269 		retval = 1; /* Do a scan if we couldn't get affiliation */
2270 		goto out;
2271 	}
2272 
2273 	if (!initial) {
2274 		thisVf = new_affiliation_111->thisVf;
2275 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2276 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2277 			    new_affiliation_111->map[ld].policy[thisVf]) {
2278 				dev_warn(&instance->pdev->dev, "SR-IOV: "
2279 				       "Got new LD/VF affiliation for scsi%d\n",
2280 				       instance->host->host_no);
2281 				memcpy(instance->vf_affiliation_111,
2282 				       new_affiliation_111,
2283 				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2284 				retval = 1;
2285 				goto out;
2286 			}
2287 	}
2288 out:
2289 	if (new_affiliation_111) {
2290 		pci_free_consistent(instance->pdev,
2291 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2292 				    new_affiliation_111,
2293 				    new_affiliation_111_h);
2294 	}
2295 
2296 	megasas_return_cmd(instance, cmd);
2297 
2298 	return retval;
2299 }
2300 
2301 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2302 					    int initial)
2303 {
2304 	struct megasas_cmd *cmd;
2305 	struct megasas_dcmd_frame *dcmd;
2306 	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2307 	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2308 	dma_addr_t new_affiliation_h;
2309 	int i, j, retval = 0, found = 0, doscan = 0;
2310 	u8 thisVf;
2311 
2312 	cmd = megasas_get_cmd(instance);
2313 
2314 	if (!cmd) {
2315 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2316 		       "Failed to get cmd for scsi%d\n",
2317 		       instance->host->host_no);
2318 		return -ENOMEM;
2319 	}
2320 
2321 	dcmd = &cmd->frame->dcmd;
2322 
2323 	if (!instance->vf_affiliation) {
2324 		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2325 		       "affiliation for scsi%d\n", instance->host->host_no);
2326 		megasas_return_cmd(instance, cmd);
2327 		return -ENOMEM;
2328 	}
2329 
2330 	if (initial)
2331 		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2332 		       sizeof(struct MR_LD_VF_AFFILIATION));
2333 	else {
2334 		new_affiliation =
2335 			pci_alloc_consistent(instance->pdev,
2336 					     (MAX_LOGICAL_DRIVES + 1) *
2337 					     sizeof(struct MR_LD_VF_AFFILIATION),
2338 					     &new_affiliation_h);
2339 		if (!new_affiliation) {
2340 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2341 			       "memory for new affiliation for scsi%d\n",
2342 			       instance->host->host_no);
2343 			megasas_return_cmd(instance, cmd);
2344 			return -ENOMEM;
2345 		}
2346 		memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2347 		       sizeof(struct MR_LD_VF_AFFILIATION));
2348 	}
2349 
2350 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2351 
2352 	dcmd->cmd = MFI_CMD_DCMD;
2353 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2354 	dcmd->sge_count = 1;
2355 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2356 	dcmd->timeout = 0;
2357 	dcmd->pad_0 = 0;
2358 	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2359 		sizeof(struct MR_LD_VF_AFFILIATION));
2360 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2361 
2362 	if (initial)
2363 		dcmd->sgl.sge32[0].phys_addr =
2364 			cpu_to_le32(instance->vf_affiliation_h);
2365 	else
2366 		dcmd->sgl.sge32[0].phys_addr =
2367 			cpu_to_le32(new_affiliation_h);
2368 
2369 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2370 		sizeof(struct MR_LD_VF_AFFILIATION));
2371 
2372 	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2373 	       "scsi%d\n", instance->host->host_no);
2374 
2375 
2376 	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2377 		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2378 		       " failed with status 0x%x for scsi%d\n",
2379 		       dcmd->cmd_status, instance->host->host_no);
2380 		retval = 1; /* Do a scan if we couldn't get affiliation */
2381 		goto out;
2382 	}
2383 
2384 	if (!initial) {
2385 		if (!new_affiliation->ldCount) {
2386 			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2387 			       "affiliation for passive path for scsi%d\n",
2388 			       instance->host->host_no);
2389 			retval = 1;
2390 			goto out;
2391 		}
2392 		newmap = new_affiliation->map;
2393 		savedmap = instance->vf_affiliation->map;
2394 		thisVf = new_affiliation->thisVf;
2395 		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2396 			found = 0;
2397 			for (j = 0; j < instance->vf_affiliation->ldCount;
2398 			     j++) {
2399 				if (newmap->ref.targetId ==
2400 				    savedmap->ref.targetId) {
2401 					found = 1;
2402 					if (newmap->policy[thisVf] !=
2403 					    savedmap->policy[thisVf]) {
2404 						doscan = 1;
2405 						goto out;
2406 					}
2407 				}
2408 				savedmap = (struct MR_LD_VF_MAP *)
2409 					((unsigned char *)savedmap +
2410 					 savedmap->size);
2411 			}
2412 			if (!found && newmap->policy[thisVf] !=
2413 			    MR_LD_ACCESS_HIDDEN) {
2414 				doscan = 1;
2415 				goto out;
2416 			}
2417 			newmap = (struct MR_LD_VF_MAP *)
2418 				((unsigned char *)newmap + newmap->size);
2419 		}
2420 
2421 		newmap = new_affiliation->map;
2422 		savedmap = instance->vf_affiliation->map;
2423 
2424 		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2425 			found = 0;
2426 			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2427 				if (savedmap->ref.targetId ==
2428 				    newmap->ref.targetId) {
2429 					found = 1;
2430 					if (savedmap->policy[thisVf] !=
2431 					    newmap->policy[thisVf]) {
2432 						doscan = 1;
2433 						goto out;
2434 					}
2435 				}
2436 				newmap = (struct MR_LD_VF_MAP *)
2437 					((unsigned char *)newmap +
2438 					 newmap->size);
2439 			}
2440 			if (!found && savedmap->policy[thisVf] !=
2441 			    MR_LD_ACCESS_HIDDEN) {
2442 				doscan = 1;
2443 				goto out;
2444 			}
2445 			savedmap = (struct MR_LD_VF_MAP *)
2446 				((unsigned char *)savedmap +
2447 				 savedmap->size);
2448 		}
2449 	}
2450 out:
2451 	if (doscan) {
2452 		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2453 		       "affiliation for scsi%d\n", instance->host->host_no);
2454 		memcpy(instance->vf_affiliation, new_affiliation,
2455 		       new_affiliation->size);
2456 		retval = 1;
2457 	}
2458 
2459 	if (new_affiliation)
2460 		pci_free_consistent(instance->pdev,
2461 				    (MAX_LOGICAL_DRIVES + 1) *
2462 				    sizeof(struct MR_LD_VF_AFFILIATION),
2463 				    new_affiliation, new_affiliation_h);
2464 	megasas_return_cmd(instance, cmd);
2465 
2466 	return retval;
2467 }
2468 
2469 /* This function will get the current SR-IOV LD/VF affiliation */
2470 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2471 	int initial)
2472 {
2473 	int retval;
2474 
2475 	if (instance->PlasmaFW111)
2476 		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2477 	else
2478 		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2479 	return retval;
2480 }
2481 
2482 /* This function will tell FW to start the SR-IOV heartbeat */
2483 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2484 					 int initial)
2485 {
2486 	struct megasas_cmd *cmd;
2487 	struct megasas_dcmd_frame *dcmd;
2488 	int retval = 0;
2489 
2490 	cmd = megasas_get_cmd(instance);
2491 
2492 	if (!cmd) {
2493 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2494 		       "Failed to get cmd for scsi%d\n",
2495 		       instance->host->host_no);
2496 		return -ENOMEM;
2497 	}
2498 
2499 	dcmd = &cmd->frame->dcmd;
2500 
2501 	if (initial) {
2502 		instance->hb_host_mem =
2503 			pci_zalloc_consistent(instance->pdev,
2504 					      sizeof(struct MR_CTRL_HB_HOST_MEM),
2505 					      &instance->hb_host_mem_h);
2506 		if (!instance->hb_host_mem) {
2507 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2508 			       " memory for heartbeat host memory for scsi%d\n",
2509 			       instance->host->host_no);
2510 			retval = -ENOMEM;
2511 			goto out;
2512 		}
2513 	}
2514 
2515 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2516 
2517 	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2518 	dcmd->cmd = MFI_CMD_DCMD;
2519 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2520 	dcmd->sge_count = 1;
2521 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2522 	dcmd->timeout = 0;
2523 	dcmd->pad_0 = 0;
2524 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2525 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2526 
2527 	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2528 				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2529 
2530 	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2531 	       instance->host->host_no);
2532 
2533 	if ((instance->adapter_type != MFI_SERIES) &&
2534 	    !instance->mask_interrupts)
2535 		retval = megasas_issue_blocked_cmd(instance, cmd,
2536 			MEGASAS_ROUTINE_WAIT_TIME_VF);
2537 	else
2538 		retval = megasas_issue_polled(instance, cmd);
2539 
2540 	if (retval) {
2541 		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2542 			"_MEM_ALLOC DCMD %s for scsi%d\n",
2543 			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2544 			"timed out" : "failed", instance->host->host_no);
2545 		retval = 1;
2546 	}
2547 
2548 out:
2549 	megasas_return_cmd(instance, cmd);
2550 
2551 	return retval;
2552 }
2553 
2554 /* Handler for SR-IOV heartbeat */
2555 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2556 {
2557 	struct megasas_instance *instance =
2558 		from_timer(instance, t, sriov_heartbeat_timer);
2559 
2560 	if (instance->hb_host_mem->HB.fwCounter !=
2561 	    instance->hb_host_mem->HB.driverCounter) {
2562 		instance->hb_host_mem->HB.driverCounter =
2563 			instance->hb_host_mem->HB.fwCounter;
2564 		mod_timer(&instance->sriov_heartbeat_timer,
2565 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2566 	} else {
2567 		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2568 		       "completed for scsi%d\n", instance->host->host_no);
2569 		schedule_work(&instance->work_init);
2570 	}
2571 }
2572 
2573 /**
2574  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2575  * @instance:				Adapter soft state
2576  *
2577  * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2578  * complete all its outstanding commands. Returns error if one or more IOs
2579  * are pending after this time period. It also marks the controller dead.
2580  */
2581 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2582 {
2583 	int i, sl, outstanding;
2584 	u32 reset_index;
2585 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2586 	unsigned long flags;
2587 	struct list_head clist_local;
2588 	struct megasas_cmd *reset_cmd;
2589 	u32 fw_state;
2590 
2591 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2592 		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2593 		__func__, __LINE__);
2594 		return FAILED;
2595 	}
2596 
2597 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2598 
2599 		INIT_LIST_HEAD(&clist_local);
2600 		spin_lock_irqsave(&instance->hba_lock, flags);
2601 		list_splice_init(&instance->internal_reset_pending_q,
2602 				&clist_local);
2603 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2604 
2605 		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2606 		for (i = 0; i < wait_time; i++) {
2607 			msleep(1000);
2608 			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2609 				break;
2610 		}
2611 
2612 		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2613 			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2614 			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2615 			return FAILED;
2616 		}
2617 
2618 		reset_index = 0;
2619 		while (!list_empty(&clist_local)) {
2620 			reset_cmd = list_entry((&clist_local)->next,
2621 						struct megasas_cmd, list);
2622 			list_del_init(&reset_cmd->list);
2623 			if (reset_cmd->scmd) {
2624 				reset_cmd->scmd->result = DID_REQUEUE << 16;
2625 				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2626 					reset_index, reset_cmd,
2627 					reset_cmd->scmd->cmnd[0]);
2628 
2629 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2630 				megasas_return_cmd(instance, reset_cmd);
2631 			} else if (reset_cmd->sync_cmd) {
2632 				dev_notice(&instance->pdev->dev, "%p synch cmds"
2633 						"reset queue\n",
2634 						reset_cmd);
2635 
2636 				reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2637 				instance->instancet->fire_cmd(instance,
2638 						reset_cmd->frame_phys_addr,
2639 						0, instance->reg_set);
2640 			} else {
2641 				dev_notice(&instance->pdev->dev, "%p unexpected"
2642 					"cmds lst\n",
2643 					reset_cmd);
2644 			}
2645 			reset_index++;
2646 		}
2647 
2648 		return SUCCESS;
2649 	}
2650 
2651 	for (i = 0; i < resetwaittime; i++) {
2652 		outstanding = atomic_read(&instance->fw_outstanding);
2653 
2654 		if (!outstanding)
2655 			break;
2656 
2657 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2658 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2659 			       "commands to complete\n",i,outstanding);
2660 			/*
2661 			 * Call cmd completion routine. Cmd to be
2662 			 * be completed directly without depending on isr.
2663 			 */
2664 			megasas_complete_cmd_dpc((unsigned long)instance);
2665 		}
2666 
2667 		msleep(1000);
2668 	}
2669 
2670 	i = 0;
2671 	outstanding = atomic_read(&instance->fw_outstanding);
2672 	fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2673 
2674 	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2675 		goto no_outstanding;
2676 
2677 	if (instance->disableOnlineCtrlReset)
2678 		goto kill_hba_and_failed;
2679 	do {
2680 		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2681 			dev_info(&instance->pdev->dev,
2682 				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
2683 				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2684 			if (i == 3)
2685 				goto kill_hba_and_failed;
2686 			megasas_do_ocr(instance);
2687 
2688 			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2689 				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2690 				__func__, __LINE__);
2691 				return FAILED;
2692 			}
2693 			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2694 				__func__, __LINE__);
2695 
2696 			for (sl = 0; sl < 10; sl++)
2697 				msleep(500);
2698 
2699 			outstanding = atomic_read(&instance->fw_outstanding);
2700 
2701 			fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2702 			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2703 				goto no_outstanding;
2704 		}
2705 		i++;
2706 	} while (i <= 3);
2707 
2708 no_outstanding:
2709 
2710 	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2711 		__func__, __LINE__);
2712 	return SUCCESS;
2713 
2714 kill_hba_and_failed:
2715 
2716 	/* Reset not supported, kill adapter */
2717 	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2718 		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2719 		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2720 		atomic_read(&instance->fw_outstanding));
2721 	megasas_dump_pending_frames(instance);
2722 	megaraid_sas_kill_hba(instance);
2723 
2724 	return FAILED;
2725 }
2726 
2727 /**
2728  * megasas_generic_reset -	Generic reset routine
2729  * @scmd:			Mid-layer SCSI command
2730  *
2731  * This routine implements a generic reset handler for device, bus and host
2732  * reset requests. Device, bus and host specific reset handlers can use this
2733  * function after they do their specific tasks.
2734  */
2735 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2736 {
2737 	int ret_val;
2738 	struct megasas_instance *instance;
2739 
2740 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2741 
2742 	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2743 		 scmd->cmnd[0], scmd->retries);
2744 
2745 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2746 		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2747 		return FAILED;
2748 	}
2749 
2750 	ret_val = megasas_wait_for_outstanding(instance);
2751 	if (ret_val == SUCCESS)
2752 		dev_notice(&instance->pdev->dev, "reset successful\n");
2753 	else
2754 		dev_err(&instance->pdev->dev, "failed to do reset\n");
2755 
2756 	return ret_val;
2757 }
2758 
2759 /**
2760  * megasas_reset_timer - quiesce the adapter if required
2761  * @scmd:		scsi cmnd
2762  *
2763  * Sets the FW busy flag and reduces the host->can_queue if the
2764  * cmd has not been completed within the timeout period.
2765  */
2766 static enum
2767 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2768 {
2769 	struct megasas_instance *instance;
2770 	unsigned long flags;
2771 
2772 	if (time_after(jiffies, scmd->jiffies_at_alloc +
2773 				(scmd_timeout * 2) * HZ)) {
2774 		return BLK_EH_NOT_HANDLED;
2775 	}
2776 
2777 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2778 	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2779 		/* FW is busy, throttle IO */
2780 		spin_lock_irqsave(instance->host->host_lock, flags);
2781 
2782 		instance->host->can_queue = instance->throttlequeuedepth;
2783 		instance->last_time = jiffies;
2784 		instance->flag |= MEGASAS_FW_BUSY;
2785 
2786 		spin_unlock_irqrestore(instance->host->host_lock, flags);
2787 	}
2788 	return BLK_EH_RESET_TIMER;
2789 }
2790 
2791 /**
2792  * megasas_dump_frame -	This function will dump MPT/MFI frame
2793  */
2794 static inline void
2795 megasas_dump_frame(void *mpi_request, int sz)
2796 {
2797 	int i;
2798 	__le32 *mfp = (__le32 *)mpi_request;
2799 
2800 	printk(KERN_INFO "IO request frame:\n\t");
2801 	for (i = 0; i < sz / sizeof(__le32); i++) {
2802 		if (i && ((i % 8) == 0))
2803 			printk("\n\t");
2804 		printk("%08x ", le32_to_cpu(mfp[i]));
2805 	}
2806 	printk("\n");
2807 }
2808 
2809 /**
2810  * megasas_reset_bus_host -	Bus & host reset handler entry point
2811  */
2812 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2813 {
2814 	int ret;
2815 	struct megasas_instance *instance;
2816 
2817 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2818 
2819 	scmd_printk(KERN_INFO, scmd,
2820 		"Controller reset is requested due to IO timeout\n"
2821 		"SCSI command pointer: (%p)\t SCSI host state: %d\t"
2822 		" SCSI host busy: %d\t FW outstanding: %d\n",
2823 		scmd, scmd->device->host->shost_state,
2824 		atomic_read((atomic_t *)&scmd->device->host->host_busy),
2825 		atomic_read(&instance->fw_outstanding));
2826 
2827 	/*
2828 	 * First wait for all commands to complete
2829 	 */
2830 	if (instance->adapter_type == MFI_SERIES) {
2831 		ret = megasas_generic_reset(scmd);
2832 	} else {
2833 		struct megasas_cmd_fusion *cmd;
2834 		cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2835 		if (cmd)
2836 			megasas_dump_frame(cmd->io_request,
2837 				MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
2838 		ret = megasas_reset_fusion(scmd->device->host,
2839 				SCSIIO_TIMEOUT_OCR);
2840 	}
2841 
2842 	return ret;
2843 }
2844 
2845 /**
2846  * megasas_task_abort - Issues task abort request to firmware
2847  *			(supported only for fusion adapters)
2848  * @scmd:		SCSI command pointer
2849  */
2850 static int megasas_task_abort(struct scsi_cmnd *scmd)
2851 {
2852 	int ret;
2853 	struct megasas_instance *instance;
2854 
2855 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2856 
2857 	if (instance->adapter_type != MFI_SERIES)
2858 		ret = megasas_task_abort_fusion(scmd);
2859 	else {
2860 		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
2861 		ret = FAILED;
2862 	}
2863 
2864 	return ret;
2865 }
2866 
2867 /**
2868  * megasas_reset_target:  Issues target reset request to firmware
2869  *                        (supported only for fusion adapters)
2870  * @scmd:                 SCSI command pointer
2871  */
2872 static int megasas_reset_target(struct scsi_cmnd *scmd)
2873 {
2874 	int ret;
2875 	struct megasas_instance *instance;
2876 
2877 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2878 
2879 	if (instance->adapter_type != MFI_SERIES)
2880 		ret = megasas_reset_target_fusion(scmd);
2881 	else {
2882 		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
2883 		ret = FAILED;
2884 	}
2885 
2886 	return ret;
2887 }
2888 
2889 /**
2890  * megasas_bios_param - Returns disk geometry for a disk
2891  * @sdev:		device handle
2892  * @bdev:		block device
2893  * @capacity:		drive capacity
2894  * @geom:		geometry parameters
2895  */
2896 static int
2897 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2898 		 sector_t capacity, int geom[])
2899 {
2900 	int heads;
2901 	int sectors;
2902 	sector_t cylinders;
2903 	unsigned long tmp;
2904 
2905 	/* Default heads (64) & sectors (32) */
2906 	heads = 64;
2907 	sectors = 32;
2908 
2909 	tmp = heads * sectors;
2910 	cylinders = capacity;
2911 
2912 	sector_div(cylinders, tmp);
2913 
2914 	/*
2915 	 * Handle extended translation size for logical drives > 1Gb
2916 	 */
2917 
2918 	if (capacity >= 0x200000) {
2919 		heads = 255;
2920 		sectors = 63;
2921 		tmp = heads*sectors;
2922 		cylinders = capacity;
2923 		sector_div(cylinders, tmp);
2924 	}
2925 
2926 	geom[0] = heads;
2927 	geom[1] = sectors;
2928 	geom[2] = cylinders;
2929 
2930 	return 0;
2931 }
2932 
2933 static void megasas_aen_polling(struct work_struct *work);
2934 
2935 /**
2936  * megasas_service_aen -	Processes an event notification
2937  * @instance:			Adapter soft state
2938  * @cmd:			AEN command completed by the ISR
2939  *
2940  * For AEN, driver sends a command down to FW that is held by the FW till an
2941  * event occurs. When an event of interest occurs, FW completes the command
2942  * that it was previously holding.
2943  *
2944  * This routines sends SIGIO signal to processes that have registered with the
2945  * driver for AEN.
2946  */
2947 static void
2948 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2949 {
2950 	unsigned long flags;
2951 
2952 	/*
2953 	 * Don't signal app if it is just an aborted previously registered aen
2954 	 */
2955 	if ((!cmd->abort_aen) && (instance->unload == 0)) {
2956 		spin_lock_irqsave(&poll_aen_lock, flags);
2957 		megasas_poll_wait_aen = 1;
2958 		spin_unlock_irqrestore(&poll_aen_lock, flags);
2959 		wake_up(&megasas_poll_wait);
2960 		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
2961 	}
2962 	else
2963 		cmd->abort_aen = 0;
2964 
2965 	instance->aen_cmd = NULL;
2966 
2967 	megasas_return_cmd(instance, cmd);
2968 
2969 	if ((instance->unload == 0) &&
2970 		((instance->issuepend_done == 1))) {
2971 		struct megasas_aen_event *ev;
2972 
2973 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2974 		if (!ev) {
2975 			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
2976 		} else {
2977 			ev->instance = instance;
2978 			instance->ev = ev;
2979 			INIT_DELAYED_WORK(&ev->hotplug_work,
2980 					  megasas_aen_polling);
2981 			schedule_delayed_work(&ev->hotplug_work, 0);
2982 		}
2983 	}
2984 }
2985 
2986 static ssize_t
2987 megasas_fw_crash_buffer_store(struct device *cdev,
2988 	struct device_attribute *attr, const char *buf, size_t count)
2989 {
2990 	struct Scsi_Host *shost = class_to_shost(cdev);
2991 	struct megasas_instance *instance =
2992 		(struct megasas_instance *) shost->hostdata;
2993 	int val = 0;
2994 	unsigned long flags;
2995 
2996 	if (kstrtoint(buf, 0, &val) != 0)
2997 		return -EINVAL;
2998 
2999 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3000 	instance->fw_crash_buffer_offset = val;
3001 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3002 	return strlen(buf);
3003 }
3004 
3005 static ssize_t
3006 megasas_fw_crash_buffer_show(struct device *cdev,
3007 	struct device_attribute *attr, char *buf)
3008 {
3009 	struct Scsi_Host *shost = class_to_shost(cdev);
3010 	struct megasas_instance *instance =
3011 		(struct megasas_instance *) shost->hostdata;
3012 	u32 size;
3013 	unsigned long buff_addr;
3014 	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3015 	unsigned long src_addr;
3016 	unsigned long flags;
3017 	u32 buff_offset;
3018 
3019 	spin_lock_irqsave(&instance->crashdump_lock, flags);
3020 	buff_offset = instance->fw_crash_buffer_offset;
3021 	if (!instance->crash_dump_buf &&
3022 		!((instance->fw_crash_state == AVAILABLE) ||
3023 		(instance->fw_crash_state == COPYING))) {
3024 		dev_err(&instance->pdev->dev,
3025 			"Firmware crash dump is not available\n");
3026 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3027 		return -EINVAL;
3028 	}
3029 
3030 	buff_addr = (unsigned long) buf;
3031 
3032 	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3033 		dev_err(&instance->pdev->dev,
3034 			"Firmware crash dump offset is out of range\n");
3035 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3036 		return 0;
3037 	}
3038 
3039 	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3040 	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3041 
3042 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3043 		(buff_offset % dmachunk);
3044 	memcpy(buf, (void *)src_addr, size);
3045 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3046 
3047 	return size;
3048 }
3049 
3050 static ssize_t
3051 megasas_fw_crash_buffer_size_show(struct device *cdev,
3052 	struct device_attribute *attr, char *buf)
3053 {
3054 	struct Scsi_Host *shost = class_to_shost(cdev);
3055 	struct megasas_instance *instance =
3056 		(struct megasas_instance *) shost->hostdata;
3057 
3058 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3059 		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3060 }
3061 
3062 static ssize_t
3063 megasas_fw_crash_state_store(struct device *cdev,
3064 	struct device_attribute *attr, const char *buf, size_t count)
3065 {
3066 	struct Scsi_Host *shost = class_to_shost(cdev);
3067 	struct megasas_instance *instance =
3068 		(struct megasas_instance *) shost->hostdata;
3069 	int val = 0;
3070 	unsigned long flags;
3071 
3072 	if (kstrtoint(buf, 0, &val) != 0)
3073 		return -EINVAL;
3074 
3075 	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3076 		dev_err(&instance->pdev->dev, "application updates invalid "
3077 			"firmware crash state\n");
3078 		return -EINVAL;
3079 	}
3080 
3081 	instance->fw_crash_state = val;
3082 
3083 	if ((val == COPIED) || (val == COPY_ERROR)) {
3084 		spin_lock_irqsave(&instance->crashdump_lock, flags);
3085 		megasas_free_host_crash_buffer(instance);
3086 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3087 		if (val == COPY_ERROR)
3088 			dev_info(&instance->pdev->dev, "application failed to "
3089 				"copy Firmware crash dump\n");
3090 		else
3091 			dev_info(&instance->pdev->dev, "Firmware crash dump "
3092 				"copied successfully\n");
3093 	}
3094 	return strlen(buf);
3095 }
3096 
3097 static ssize_t
3098 megasas_fw_crash_state_show(struct device *cdev,
3099 	struct device_attribute *attr, char *buf)
3100 {
3101 	struct Scsi_Host *shost = class_to_shost(cdev);
3102 	struct megasas_instance *instance =
3103 		(struct megasas_instance *) shost->hostdata;
3104 
3105 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3106 }
3107 
3108 static ssize_t
3109 megasas_page_size_show(struct device *cdev,
3110 	struct device_attribute *attr, char *buf)
3111 {
3112 	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3113 }
3114 
3115 static ssize_t
3116 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3117 	char *buf)
3118 {
3119 	struct Scsi_Host *shost = class_to_shost(cdev);
3120 	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3121 
3122 	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3123 }
3124 
3125 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
3126 	megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
3127 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
3128 	megasas_fw_crash_buffer_size_show, NULL);
3129 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
3130 	megasas_fw_crash_state_show, megasas_fw_crash_state_store);
3131 static DEVICE_ATTR(page_size, S_IRUGO,
3132 	megasas_page_size_show, NULL);
3133 static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
3134 	megasas_ldio_outstanding_show, NULL);
3135 
3136 struct device_attribute *megaraid_host_attrs[] = {
3137 	&dev_attr_fw_crash_buffer_size,
3138 	&dev_attr_fw_crash_buffer,
3139 	&dev_attr_fw_crash_state,
3140 	&dev_attr_page_size,
3141 	&dev_attr_ldio_outstanding,
3142 	NULL,
3143 };
3144 
3145 /*
3146  * Scsi host template for megaraid_sas driver
3147  */
3148 static struct scsi_host_template megasas_template = {
3149 
3150 	.module = THIS_MODULE,
3151 	.name = "Avago SAS based MegaRAID driver",
3152 	.proc_name = "megaraid_sas",
3153 	.slave_configure = megasas_slave_configure,
3154 	.slave_alloc = megasas_slave_alloc,
3155 	.slave_destroy = megasas_slave_destroy,
3156 	.queuecommand = megasas_queue_command,
3157 	.eh_target_reset_handler = megasas_reset_target,
3158 	.eh_abort_handler = megasas_task_abort,
3159 	.eh_host_reset_handler = megasas_reset_bus_host,
3160 	.eh_timed_out = megasas_reset_timer,
3161 	.shost_attrs = megaraid_host_attrs,
3162 	.bios_param = megasas_bios_param,
3163 	.use_clustering = ENABLE_CLUSTERING,
3164 	.change_queue_depth = scsi_change_queue_depth,
3165 	.no_write_same = 1,
3166 };
3167 
3168 /**
3169  * megasas_complete_int_cmd -	Completes an internal command
3170  * @instance:			Adapter soft state
3171  * @cmd:			Command to be completed
3172  *
3173  * The megasas_issue_blocked_cmd() function waits for a command to complete
3174  * after it issues a command. This function wakes up that waiting routine by
3175  * calling wake_up() on the wait queue.
3176  */
3177 static void
3178 megasas_complete_int_cmd(struct megasas_instance *instance,
3179 			 struct megasas_cmd *cmd)
3180 {
3181 	cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3182 	wake_up(&instance->int_cmd_wait_q);
3183 }
3184 
3185 /**
3186  * megasas_complete_abort -	Completes aborting a command
3187  * @instance:			Adapter soft state
3188  * @cmd:			Cmd that was issued to abort another cmd
3189  *
3190  * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3191  * after it issues an abort on a previously issued command. This function
3192  * wakes up all functions waiting on the same wait queue.
3193  */
3194 static void
3195 megasas_complete_abort(struct megasas_instance *instance,
3196 		       struct megasas_cmd *cmd)
3197 {
3198 	if (cmd->sync_cmd) {
3199 		cmd->sync_cmd = 0;
3200 		cmd->cmd_status_drv = 0;
3201 		wake_up(&instance->abort_cmd_wait_q);
3202 	}
3203 }
3204 
3205 /**
3206  * megasas_complete_cmd -	Completes a command
3207  * @instance:			Adapter soft state
3208  * @cmd:			Command to be completed
3209  * @alt_status:			If non-zero, use this value as status to
3210  *				SCSI mid-layer instead of the value returned
3211  *				by the FW. This should be used if caller wants
3212  *				an alternate status (as in the case of aborted
3213  *				commands)
3214  */
3215 void
3216 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3217 		     u8 alt_status)
3218 {
3219 	int exception = 0;
3220 	struct megasas_header *hdr = &cmd->frame->hdr;
3221 	unsigned long flags;
3222 	struct fusion_context *fusion = instance->ctrl_context;
3223 	u32 opcode, status;
3224 
3225 	/* flag for the retry reset */
3226 	cmd->retry_for_fw_reset = 0;
3227 
3228 	if (cmd->scmd)
3229 		cmd->scmd->SCp.ptr = NULL;
3230 
3231 	switch (hdr->cmd) {
3232 	case MFI_CMD_INVALID:
3233 		/* Some older 1068 controller FW may keep a pended
3234 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3235 		   when booting the kdump kernel.  Ignore this command to
3236 		   prevent a kernel panic on shutdown of the kdump kernel. */
3237 		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3238 		       "completed\n");
3239 		dev_warn(&instance->pdev->dev, "If you have a controller "
3240 		       "other than PERC5, please upgrade your firmware\n");
3241 		break;
3242 	case MFI_CMD_PD_SCSI_IO:
3243 	case MFI_CMD_LD_SCSI_IO:
3244 
3245 		/*
3246 		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3247 		 * issued either through an IO path or an IOCTL path. If it
3248 		 * was via IOCTL, we will send it to internal completion.
3249 		 */
3250 		if (cmd->sync_cmd) {
3251 			cmd->sync_cmd = 0;
3252 			megasas_complete_int_cmd(instance, cmd);
3253 			break;
3254 		}
3255 
3256 	case MFI_CMD_LD_READ:
3257 	case MFI_CMD_LD_WRITE:
3258 
3259 		if (alt_status) {
3260 			cmd->scmd->result = alt_status << 16;
3261 			exception = 1;
3262 		}
3263 
3264 		if (exception) {
3265 
3266 			atomic_dec(&instance->fw_outstanding);
3267 
3268 			scsi_dma_unmap(cmd->scmd);
3269 			cmd->scmd->scsi_done(cmd->scmd);
3270 			megasas_return_cmd(instance, cmd);
3271 
3272 			break;
3273 		}
3274 
3275 		switch (hdr->cmd_status) {
3276 
3277 		case MFI_STAT_OK:
3278 			cmd->scmd->result = DID_OK << 16;
3279 			break;
3280 
3281 		case MFI_STAT_SCSI_IO_FAILED:
3282 		case MFI_STAT_LD_INIT_IN_PROGRESS:
3283 			cmd->scmd->result =
3284 			    (DID_ERROR << 16) | hdr->scsi_status;
3285 			break;
3286 
3287 		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3288 
3289 			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3290 
3291 			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3292 				memset(cmd->scmd->sense_buffer, 0,
3293 				       SCSI_SENSE_BUFFERSIZE);
3294 				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3295 				       hdr->sense_len);
3296 
3297 				cmd->scmd->result |= DRIVER_SENSE << 24;
3298 			}
3299 
3300 			break;
3301 
3302 		case MFI_STAT_LD_OFFLINE:
3303 		case MFI_STAT_DEVICE_NOT_FOUND:
3304 			cmd->scmd->result = DID_BAD_TARGET << 16;
3305 			break;
3306 
3307 		default:
3308 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3309 			       hdr->cmd_status);
3310 			cmd->scmd->result = DID_ERROR << 16;
3311 			break;
3312 		}
3313 
3314 		atomic_dec(&instance->fw_outstanding);
3315 
3316 		scsi_dma_unmap(cmd->scmd);
3317 		cmd->scmd->scsi_done(cmd->scmd);
3318 		megasas_return_cmd(instance, cmd);
3319 
3320 		break;
3321 
3322 	case MFI_CMD_SMP:
3323 	case MFI_CMD_STP:
3324 		megasas_complete_int_cmd(instance, cmd);
3325 		break;
3326 
3327 	case MFI_CMD_DCMD:
3328 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3329 		/* Check for LD map update */
3330 		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3331 			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3332 			fusion->fast_path_io = 0;
3333 			spin_lock_irqsave(instance->host->host_lock, flags);
3334 			instance->map_update_cmd = NULL;
3335 			if (cmd->frame->hdr.cmd_status != 0) {
3336 				if (cmd->frame->hdr.cmd_status !=
3337 				    MFI_STAT_NOT_FOUND)
3338 					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3339 					       cmd->frame->hdr.cmd_status);
3340 				else {
3341 					megasas_return_cmd(instance, cmd);
3342 					spin_unlock_irqrestore(
3343 						instance->host->host_lock,
3344 						flags);
3345 					break;
3346 				}
3347 			} else
3348 				instance->map_id++;
3349 			megasas_return_cmd(instance, cmd);
3350 
3351 			/*
3352 			 * Set fast path IO to ZERO.
3353 			 * Validate Map will set proper value.
3354 			 * Meanwhile all IOs will go as LD IO.
3355 			 */
3356 			if (MR_ValidateMapInfo(instance))
3357 				fusion->fast_path_io = 1;
3358 			else
3359 				fusion->fast_path_io = 0;
3360 			megasas_sync_map_info(instance);
3361 			spin_unlock_irqrestore(instance->host->host_lock,
3362 					       flags);
3363 			break;
3364 		}
3365 		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3366 		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3367 			spin_lock_irqsave(&poll_aen_lock, flags);
3368 			megasas_poll_wait_aen = 0;
3369 			spin_unlock_irqrestore(&poll_aen_lock, flags);
3370 		}
3371 
3372 		/* FW has an updated PD sequence */
3373 		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3374 			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3375 
3376 			spin_lock_irqsave(instance->host->host_lock, flags);
3377 			status = cmd->frame->hdr.cmd_status;
3378 			instance->jbod_seq_cmd = NULL;
3379 			megasas_return_cmd(instance, cmd);
3380 
3381 			if (status == MFI_STAT_OK) {
3382 				instance->pd_seq_map_id++;
3383 				/* Re-register a pd sync seq num cmd */
3384 				if (megasas_sync_pd_seq_num(instance, true))
3385 					instance->use_seqnum_jbod_fp = false;
3386 			} else
3387 				instance->use_seqnum_jbod_fp = false;
3388 
3389 			spin_unlock_irqrestore(instance->host->host_lock, flags);
3390 			break;
3391 		}
3392 
3393 		/*
3394 		 * See if got an event notification
3395 		 */
3396 		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3397 			megasas_service_aen(instance, cmd);
3398 		else
3399 			megasas_complete_int_cmd(instance, cmd);
3400 
3401 		break;
3402 
3403 	case MFI_CMD_ABORT:
3404 		/*
3405 		 * Cmd issued to abort another cmd returned
3406 		 */
3407 		megasas_complete_abort(instance, cmd);
3408 		break;
3409 
3410 	default:
3411 		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3412 		       hdr->cmd);
3413 		megasas_complete_int_cmd(instance, cmd);
3414 		break;
3415 	}
3416 }
3417 
3418 /**
3419  * megasas_issue_pending_cmds_again -	issue all pending cmds
3420  *					in FW again because of the fw reset
3421  * @instance:				Adapter soft state
3422  */
3423 static inline void
3424 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3425 {
3426 	struct megasas_cmd *cmd;
3427 	struct list_head clist_local;
3428 	union megasas_evt_class_locale class_locale;
3429 	unsigned long flags;
3430 	u32 seq_num;
3431 
3432 	INIT_LIST_HEAD(&clist_local);
3433 	spin_lock_irqsave(&instance->hba_lock, flags);
3434 	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3435 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3436 
3437 	while (!list_empty(&clist_local)) {
3438 		cmd = list_entry((&clist_local)->next,
3439 					struct megasas_cmd, list);
3440 		list_del_init(&cmd->list);
3441 
3442 		if (cmd->sync_cmd || cmd->scmd) {
3443 			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3444 				"detected to be pending while HBA reset\n",
3445 					cmd, cmd->scmd, cmd->sync_cmd);
3446 
3447 			cmd->retry_for_fw_reset++;
3448 
3449 			if (cmd->retry_for_fw_reset == 3) {
3450 				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3451 					"was tried multiple times during reset."
3452 					"Shutting down the HBA\n",
3453 					cmd, cmd->scmd, cmd->sync_cmd);
3454 				instance->instancet->disable_intr(instance);
3455 				atomic_set(&instance->fw_reset_no_pci_access, 1);
3456 				megaraid_sas_kill_hba(instance);
3457 				return;
3458 			}
3459 		}
3460 
3461 		if (cmd->sync_cmd == 1) {
3462 			if (cmd->scmd) {
3463 				dev_notice(&instance->pdev->dev, "unexpected"
3464 					"cmd attached to internal command!\n");
3465 			}
3466 			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3467 						"on the internal reset queue,"
3468 						"issue it again.\n", cmd);
3469 			cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3470 			instance->instancet->fire_cmd(instance,
3471 							cmd->frame_phys_addr,
3472 							0, instance->reg_set);
3473 		} else if (cmd->scmd) {
3474 			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3475 			"detected on the internal queue, issue again.\n",
3476 			cmd, cmd->scmd->cmnd[0]);
3477 
3478 			atomic_inc(&instance->fw_outstanding);
3479 			instance->instancet->fire_cmd(instance,
3480 					cmd->frame_phys_addr,
3481 					cmd->frame_count-1, instance->reg_set);
3482 		} else {
3483 			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3484 				"internal reset defer list while re-issue!!\n",
3485 				cmd);
3486 		}
3487 	}
3488 
3489 	if (instance->aen_cmd) {
3490 		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3491 		megasas_return_cmd(instance, instance->aen_cmd);
3492 
3493 		instance->aen_cmd = NULL;
3494 	}
3495 
3496 	/*
3497 	 * Initiate AEN (Asynchronous Event Notification)
3498 	 */
3499 	seq_num = instance->last_seq_num;
3500 	class_locale.members.reserved = 0;
3501 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3502 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3503 
3504 	megasas_register_aen(instance, seq_num, class_locale.word);
3505 }
3506 
3507 /**
3508  * Move the internal reset pending commands to a deferred queue.
3509  *
3510  * We move the commands pending at internal reset time to a
3511  * pending queue. This queue would be flushed after successful
3512  * completion of the internal reset sequence. if the internal reset
3513  * did not complete in time, the kernel reset handler would flush
3514  * these commands.
3515  **/
3516 static void
3517 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3518 {
3519 	struct megasas_cmd *cmd;
3520 	int i;
3521 	u16 max_cmd = instance->max_fw_cmds;
3522 	u32 defer_index;
3523 	unsigned long flags;
3524 
3525 	defer_index = 0;
3526 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3527 	for (i = 0; i < max_cmd; i++) {
3528 		cmd = instance->cmd_list[i];
3529 		if (cmd->sync_cmd == 1 || cmd->scmd) {
3530 			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3531 					"on the defer queue as internal\n",
3532 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3533 
3534 			if (!list_empty(&cmd->list)) {
3535 				dev_notice(&instance->pdev->dev, "ERROR while"
3536 					" moving this cmd:%p, %d %p, it was"
3537 					"discovered on some list?\n",
3538 					cmd, cmd->sync_cmd, cmd->scmd);
3539 
3540 				list_del_init(&cmd->list);
3541 			}
3542 			defer_index++;
3543 			list_add_tail(&cmd->list,
3544 				&instance->internal_reset_pending_q);
3545 		}
3546 	}
3547 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3548 }
3549 
3550 
3551 static void
3552 process_fw_state_change_wq(struct work_struct *work)
3553 {
3554 	struct megasas_instance *instance =
3555 		container_of(work, struct megasas_instance, work_init);
3556 	u32 wait;
3557 	unsigned long flags;
3558 
3559     if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3560 		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3561 				atomic_read(&instance->adprecovery));
3562 		return ;
3563 	}
3564 
3565 	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3566 		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3567 					"state, restarting it...\n");
3568 
3569 		instance->instancet->disable_intr(instance);
3570 		atomic_set(&instance->fw_outstanding, 0);
3571 
3572 		atomic_set(&instance->fw_reset_no_pci_access, 1);
3573 		instance->instancet->adp_reset(instance, instance->reg_set);
3574 		atomic_set(&instance->fw_reset_no_pci_access, 0);
3575 
3576 		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3577 					"initiating next stage...\n");
3578 
3579 		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3580 					"state 2 starting...\n");
3581 
3582 		/* waiting for about 20 second before start the second init */
3583 		for (wait = 0; wait < 30; wait++) {
3584 			msleep(1000);
3585 		}
3586 
3587 		if (megasas_transition_to_ready(instance, 1)) {
3588 			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3589 
3590 			atomic_set(&instance->fw_reset_no_pci_access, 1);
3591 			megaraid_sas_kill_hba(instance);
3592 			return ;
3593 		}
3594 
3595 		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3596 			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3597 			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3598 			) {
3599 			*instance->consumer = *instance->producer;
3600 		} else {
3601 			*instance->consumer = 0;
3602 			*instance->producer = 0;
3603 		}
3604 
3605 		megasas_issue_init_mfi(instance);
3606 
3607 		spin_lock_irqsave(&instance->hba_lock, flags);
3608 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3609 		spin_unlock_irqrestore(&instance->hba_lock, flags);
3610 		instance->instancet->enable_intr(instance);
3611 
3612 		megasas_issue_pending_cmds_again(instance);
3613 		instance->issuepend_done = 1;
3614 	}
3615 }
3616 
3617 /**
3618  * megasas_deplete_reply_queue -	Processes all completed commands
3619  * @instance:				Adapter soft state
3620  * @alt_status:				Alternate status to be returned to
3621  *					SCSI mid-layer instead of the status
3622  *					returned by the FW
3623  * Note: this must be called with hba lock held
3624  */
3625 static int
3626 megasas_deplete_reply_queue(struct megasas_instance *instance,
3627 					u8 alt_status)
3628 {
3629 	u32 mfiStatus;
3630 	u32 fw_state;
3631 
3632 	if ((mfiStatus = instance->instancet->check_reset(instance,
3633 					instance->reg_set)) == 1) {
3634 		return IRQ_HANDLED;
3635 	}
3636 
3637 	if ((mfiStatus = instance->instancet->clear_intr(
3638 						instance->reg_set)
3639 						) == 0) {
3640 		/* Hardware may not set outbound_intr_status in MSI-X mode */
3641 		if (!instance->msix_vectors)
3642 			return IRQ_NONE;
3643 	}
3644 
3645 	instance->mfiStatus = mfiStatus;
3646 
3647 	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3648 		fw_state = instance->instancet->read_fw_status_reg(
3649 				instance->reg_set) & MFI_STATE_MASK;
3650 
3651 		if (fw_state != MFI_STATE_FAULT) {
3652 			dev_notice(&instance->pdev->dev, "fw state:%x\n",
3653 						fw_state);
3654 		}
3655 
3656 		if ((fw_state == MFI_STATE_FAULT) &&
3657 				(instance->disableOnlineCtrlReset == 0)) {
3658 			dev_notice(&instance->pdev->dev, "wait adp restart\n");
3659 
3660 			if ((instance->pdev->device ==
3661 					PCI_DEVICE_ID_LSI_SAS1064R) ||
3662 				(instance->pdev->device ==
3663 					PCI_DEVICE_ID_DELL_PERC5) ||
3664 				(instance->pdev->device ==
3665 					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3666 
3667 				*instance->consumer =
3668 					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3669 			}
3670 
3671 
3672 			instance->instancet->disable_intr(instance);
3673 			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3674 			instance->issuepend_done = 0;
3675 
3676 			atomic_set(&instance->fw_outstanding, 0);
3677 			megasas_internal_reset_defer_cmds(instance);
3678 
3679 			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3680 					fw_state, atomic_read(&instance->adprecovery));
3681 
3682 			schedule_work(&instance->work_init);
3683 			return IRQ_HANDLED;
3684 
3685 		} else {
3686 			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3687 				fw_state, instance->disableOnlineCtrlReset);
3688 		}
3689 	}
3690 
3691 	tasklet_schedule(&instance->isr_tasklet);
3692 	return IRQ_HANDLED;
3693 }
3694 /**
3695  * megasas_isr - isr entry point
3696  */
3697 static irqreturn_t megasas_isr(int irq, void *devp)
3698 {
3699 	struct megasas_irq_context *irq_context = devp;
3700 	struct megasas_instance *instance = irq_context->instance;
3701 	unsigned long flags;
3702 	irqreturn_t rc;
3703 
3704 	if (atomic_read(&instance->fw_reset_no_pci_access))
3705 		return IRQ_HANDLED;
3706 
3707 	spin_lock_irqsave(&instance->hba_lock, flags);
3708 	rc = megasas_deplete_reply_queue(instance, DID_OK);
3709 	spin_unlock_irqrestore(&instance->hba_lock, flags);
3710 
3711 	return rc;
3712 }
3713 
3714 /**
3715  * megasas_transition_to_ready -	Move the FW to READY state
3716  * @instance:				Adapter soft state
3717  *
3718  * During the initialization, FW passes can potentially be in any one of
3719  * several possible states. If the FW in operational, waiting-for-handshake
3720  * states, driver must take steps to bring it to ready state. Otherwise, it
3721  * has to wait for the ready state.
3722  */
3723 int
3724 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3725 {
3726 	int i;
3727 	u8 max_wait;
3728 	u32 fw_state;
3729 	u32 cur_state;
3730 	u32 abs_state, curr_abs_state;
3731 
3732 	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
3733 	fw_state = abs_state & MFI_STATE_MASK;
3734 
3735 	if (fw_state != MFI_STATE_READY)
3736 		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3737 		       " state\n");
3738 
3739 	while (fw_state != MFI_STATE_READY) {
3740 
3741 		switch (fw_state) {
3742 
3743 		case MFI_STATE_FAULT:
3744 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3745 			if (ocr) {
3746 				max_wait = MEGASAS_RESET_WAIT_TIME;
3747 				cur_state = MFI_STATE_FAULT;
3748 				break;
3749 			} else
3750 				return -ENODEV;
3751 
3752 		case MFI_STATE_WAIT_HANDSHAKE:
3753 			/*
3754 			 * Set the CLR bit in inbound doorbell
3755 			 */
3756 			if ((instance->pdev->device ==
3757 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3758 				(instance->pdev->device ==
3759 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3760 				(instance->adapter_type != MFI_SERIES))
3761 				writel(
3762 				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3763 				  &instance->reg_set->doorbell);
3764 			else
3765 				writel(
3766 				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3767 					&instance->reg_set->inbound_doorbell);
3768 
3769 			max_wait = MEGASAS_RESET_WAIT_TIME;
3770 			cur_state = MFI_STATE_WAIT_HANDSHAKE;
3771 			break;
3772 
3773 		case MFI_STATE_BOOT_MESSAGE_PENDING:
3774 			if ((instance->pdev->device ==
3775 			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3776 				(instance->pdev->device ==
3777 				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3778 				(instance->adapter_type != MFI_SERIES))
3779 				writel(MFI_INIT_HOTPLUG,
3780 				       &instance->reg_set->doorbell);
3781 			else
3782 				writel(MFI_INIT_HOTPLUG,
3783 					&instance->reg_set->inbound_doorbell);
3784 
3785 			max_wait = MEGASAS_RESET_WAIT_TIME;
3786 			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3787 			break;
3788 
3789 		case MFI_STATE_OPERATIONAL:
3790 			/*
3791 			 * Bring it to READY state; assuming max wait 10 secs
3792 			 */
3793 			instance->instancet->disable_intr(instance);
3794 			if ((instance->pdev->device ==
3795 				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3796 				(instance->pdev->device ==
3797 				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
3798 				(instance->adapter_type != MFI_SERIES)) {
3799 				writel(MFI_RESET_FLAGS,
3800 					&instance->reg_set->doorbell);
3801 
3802 				if (instance->adapter_type != MFI_SERIES) {
3803 					for (i = 0; i < (10 * 1000); i += 20) {
3804 						if (readl(
3805 							    &instance->
3806 							    reg_set->
3807 							    doorbell) & 1)
3808 							msleep(20);
3809 						else
3810 							break;
3811 					}
3812 				}
3813 			} else
3814 				writel(MFI_RESET_FLAGS,
3815 					&instance->reg_set->inbound_doorbell);
3816 
3817 			max_wait = MEGASAS_RESET_WAIT_TIME;
3818 			cur_state = MFI_STATE_OPERATIONAL;
3819 			break;
3820 
3821 		case MFI_STATE_UNDEFINED:
3822 			/*
3823 			 * This state should not last for more than 2 seconds
3824 			 */
3825 			max_wait = MEGASAS_RESET_WAIT_TIME;
3826 			cur_state = MFI_STATE_UNDEFINED;
3827 			break;
3828 
3829 		case MFI_STATE_BB_INIT:
3830 			max_wait = MEGASAS_RESET_WAIT_TIME;
3831 			cur_state = MFI_STATE_BB_INIT;
3832 			break;
3833 
3834 		case MFI_STATE_FW_INIT:
3835 			max_wait = MEGASAS_RESET_WAIT_TIME;
3836 			cur_state = MFI_STATE_FW_INIT;
3837 			break;
3838 
3839 		case MFI_STATE_FW_INIT_2:
3840 			max_wait = MEGASAS_RESET_WAIT_TIME;
3841 			cur_state = MFI_STATE_FW_INIT_2;
3842 			break;
3843 
3844 		case MFI_STATE_DEVICE_SCAN:
3845 			max_wait = MEGASAS_RESET_WAIT_TIME;
3846 			cur_state = MFI_STATE_DEVICE_SCAN;
3847 			break;
3848 
3849 		case MFI_STATE_FLUSH_CACHE:
3850 			max_wait = MEGASAS_RESET_WAIT_TIME;
3851 			cur_state = MFI_STATE_FLUSH_CACHE;
3852 			break;
3853 
3854 		default:
3855 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3856 			       fw_state);
3857 			return -ENODEV;
3858 		}
3859 
3860 		/*
3861 		 * The cur_state should not last for more than max_wait secs
3862 		 */
3863 		for (i = 0; i < (max_wait * 1000); i++) {
3864 			curr_abs_state = instance->instancet->
3865 				read_fw_status_reg(instance->reg_set);
3866 
3867 			if (abs_state == curr_abs_state) {
3868 				msleep(1);
3869 			} else
3870 				break;
3871 		}
3872 
3873 		/*
3874 		 * Return error if fw_state hasn't changed after max_wait
3875 		 */
3876 		if (curr_abs_state == abs_state) {
3877 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3878 			       "in %d secs\n", fw_state, max_wait);
3879 			return -ENODEV;
3880 		}
3881 
3882 		abs_state = curr_abs_state;
3883 		fw_state = curr_abs_state & MFI_STATE_MASK;
3884 	}
3885 	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3886 
3887 	return 0;
3888 }
3889 
3890 /**
3891  * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
3892  * @instance:				Adapter soft state
3893  */
3894 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3895 {
3896 	int i;
3897 	u16 max_cmd = instance->max_mfi_cmds;
3898 	struct megasas_cmd *cmd;
3899 
3900 	if (!instance->frame_dma_pool)
3901 		return;
3902 
3903 	/*
3904 	 * Return all frames to pool
3905 	 */
3906 	for (i = 0; i < max_cmd; i++) {
3907 
3908 		cmd = instance->cmd_list[i];
3909 
3910 		if (cmd->frame)
3911 			dma_pool_free(instance->frame_dma_pool, cmd->frame,
3912 				      cmd->frame_phys_addr);
3913 
3914 		if (cmd->sense)
3915 			dma_pool_free(instance->sense_dma_pool, cmd->sense,
3916 				      cmd->sense_phys_addr);
3917 	}
3918 
3919 	/*
3920 	 * Now destroy the pool itself
3921 	 */
3922 	dma_pool_destroy(instance->frame_dma_pool);
3923 	dma_pool_destroy(instance->sense_dma_pool);
3924 
3925 	instance->frame_dma_pool = NULL;
3926 	instance->sense_dma_pool = NULL;
3927 }
3928 
3929 /**
3930  * megasas_create_frame_pool -	Creates DMA pool for cmd frames
3931  * @instance:			Adapter soft state
3932  *
3933  * Each command packet has an embedded DMA memory buffer that is used for
3934  * filling MFI frame and the SG list that immediately follows the frame. This
3935  * function creates those DMA memory buffers for each command packet by using
3936  * PCI pool facility.
3937  */
3938 static int megasas_create_frame_pool(struct megasas_instance *instance)
3939 {
3940 	int i;
3941 	u16 max_cmd;
3942 	u32 sge_sz;
3943 	u32 frame_count;
3944 	struct megasas_cmd *cmd;
3945 
3946 	max_cmd = instance->max_mfi_cmds;
3947 
3948 	/*
3949 	 * Size of our frame is 64 bytes for MFI frame, followed by max SG
3950 	 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
3951 	 */
3952 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
3953 	    sizeof(struct megasas_sge32);
3954 
3955 	if (instance->flag_ieee)
3956 		sge_sz = sizeof(struct megasas_sge_skinny);
3957 
3958 	/*
3959 	 * For MFI controllers.
3960 	 * max_num_sge = 60
3961 	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
3962 	 * Total 960 byte (15 MFI frame of 64 byte)
3963 	 *
3964 	 * Fusion adapter require only 3 extra frame.
3965 	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
3966 	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
3967 	 * Total 192 byte (3 MFI frame of 64 byte)
3968 	 */
3969 	frame_count = (instance->adapter_type == MFI_SERIES) ?
3970 			(15 + 1) : (3 + 1);
3971 	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
3972 	/*
3973 	 * Use DMA pool facility provided by PCI layer
3974 	 */
3975 	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
3976 					&instance->pdev->dev,
3977 					instance->mfi_frame_size, 256, 0);
3978 
3979 	if (!instance->frame_dma_pool) {
3980 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
3981 		return -ENOMEM;
3982 	}
3983 
3984 	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
3985 						   &instance->pdev->dev, 128,
3986 						   4, 0);
3987 
3988 	if (!instance->sense_dma_pool) {
3989 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
3990 
3991 		dma_pool_destroy(instance->frame_dma_pool);
3992 		instance->frame_dma_pool = NULL;
3993 
3994 		return -ENOMEM;
3995 	}
3996 
3997 	/*
3998 	 * Allocate and attach a frame to each of the commands in cmd_list.
3999 	 * By making cmd->index as the context instead of the &cmd, we can
4000 	 * always use 32bit context regardless of the architecture
4001 	 */
4002 	for (i = 0; i < max_cmd; i++) {
4003 
4004 		cmd = instance->cmd_list[i];
4005 
4006 		cmd->frame = dma_pool_alloc(instance->frame_dma_pool,
4007 					    GFP_KERNEL, &cmd->frame_phys_addr);
4008 
4009 		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4010 					    GFP_KERNEL, &cmd->sense_phys_addr);
4011 
4012 		/*
4013 		 * megasas_teardown_frame_pool() takes care of freeing
4014 		 * whatever has been allocated
4015 		 */
4016 		if (!cmd->frame || !cmd->sense) {
4017 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4018 			megasas_teardown_frame_pool(instance);
4019 			return -ENOMEM;
4020 		}
4021 
4022 		memset(cmd->frame, 0, instance->mfi_frame_size);
4023 		cmd->frame->io.context = cpu_to_le32(cmd->index);
4024 		cmd->frame->io.pad_0 = 0;
4025 		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4026 			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4027 	}
4028 
4029 	return 0;
4030 }
4031 
4032 /**
4033  * megasas_free_cmds -	Free all the cmds in the free cmd pool
4034  * @instance:		Adapter soft state
4035  */
4036 void megasas_free_cmds(struct megasas_instance *instance)
4037 {
4038 	int i;
4039 
4040 	/* First free the MFI frame pool */
4041 	megasas_teardown_frame_pool(instance);
4042 
4043 	/* Free all the commands in the cmd_list */
4044 	for (i = 0; i < instance->max_mfi_cmds; i++)
4045 
4046 		kfree(instance->cmd_list[i]);
4047 
4048 	/* Free the cmd_list buffer itself */
4049 	kfree(instance->cmd_list);
4050 	instance->cmd_list = NULL;
4051 
4052 	INIT_LIST_HEAD(&instance->cmd_pool);
4053 }
4054 
4055 /**
4056  * megasas_alloc_cmds -	Allocates the command packets
4057  * @instance:		Adapter soft state
4058  *
4059  * Each command that is issued to the FW, whether IO commands from the OS or
4060  * internal commands like IOCTLs, are wrapped in local data structure called
4061  * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4062  * the FW.
4063  *
4064  * Each frame has a 32-bit field called context (tag). This context is used
4065  * to get back the megasas_cmd from the frame when a frame gets completed in
4066  * the ISR. Typically the address of the megasas_cmd itself would be used as
4067  * the context. But we wanted to keep the differences between 32 and 64 bit
4068  * systems to the mininum. We always use 32 bit integers for the context. In
4069  * this driver, the 32 bit values are the indices into an array cmd_list.
4070  * This array is used only to look up the megasas_cmd given the context. The
4071  * free commands themselves are maintained in a linked list called cmd_pool.
4072  */
4073 int megasas_alloc_cmds(struct megasas_instance *instance)
4074 {
4075 	int i;
4076 	int j;
4077 	u16 max_cmd;
4078 	struct megasas_cmd *cmd;
4079 
4080 	max_cmd = instance->max_mfi_cmds;
4081 
4082 	/*
4083 	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4084 	 * Allocate the dynamic array first and then allocate individual
4085 	 * commands.
4086 	 */
4087 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4088 
4089 	if (!instance->cmd_list) {
4090 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4091 		return -ENOMEM;
4092 	}
4093 
4094 	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4095 
4096 	for (i = 0; i < max_cmd; i++) {
4097 		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4098 						GFP_KERNEL);
4099 
4100 		if (!instance->cmd_list[i]) {
4101 
4102 			for (j = 0; j < i; j++)
4103 				kfree(instance->cmd_list[j]);
4104 
4105 			kfree(instance->cmd_list);
4106 			instance->cmd_list = NULL;
4107 
4108 			return -ENOMEM;
4109 		}
4110 	}
4111 
4112 	for (i = 0; i < max_cmd; i++) {
4113 		cmd = instance->cmd_list[i];
4114 		memset(cmd, 0, sizeof(struct megasas_cmd));
4115 		cmd->index = i;
4116 		cmd->scmd = NULL;
4117 		cmd->instance = instance;
4118 
4119 		list_add_tail(&cmd->list, &instance->cmd_pool);
4120 	}
4121 
4122 	/*
4123 	 * Create a frame pool and assign one frame to each cmd
4124 	 */
4125 	if (megasas_create_frame_pool(instance)) {
4126 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4127 		megasas_free_cmds(instance);
4128 	}
4129 
4130 	return 0;
4131 }
4132 
4133 /*
4134  * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4135  * @instance:				Adapter soft state
4136  *
4137  * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4138  * or FW is not under OCR.
4139  */
4140 inline int
4141 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4142 
4143 	if (instance->adapter_type == MFI_SERIES)
4144 		return KILL_ADAPTER;
4145 	else if (instance->unload ||
4146 			test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
4147 		return IGNORE_TIMEOUT;
4148 	else
4149 		return INITIATE_OCR;
4150 }
4151 
4152 static void
4153 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4154 {
4155 	int ret;
4156 	struct megasas_cmd *cmd;
4157 	struct megasas_dcmd_frame *dcmd;
4158 
4159 	struct MR_PRIV_DEVICE *mr_device_priv_data;
4160 	u16 device_id = 0;
4161 
4162 	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4163 	cmd = megasas_get_cmd(instance);
4164 
4165 	if (!cmd) {
4166 		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4167 		return;
4168 	}
4169 
4170 	dcmd = &cmd->frame->dcmd;
4171 
4172 	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4173 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4174 
4175 	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4176 	dcmd->cmd = MFI_CMD_DCMD;
4177 	dcmd->cmd_status = 0xFF;
4178 	dcmd->sge_count = 1;
4179 	dcmd->flags = MFI_FRAME_DIR_READ;
4180 	dcmd->timeout = 0;
4181 	dcmd->pad_0 = 0;
4182 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4183 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4184 
4185 	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4186 				 sizeof(struct MR_PD_INFO));
4187 
4188 	if ((instance->adapter_type != MFI_SERIES) &&
4189 	    !instance->mask_interrupts)
4190 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4191 	else
4192 		ret = megasas_issue_polled(instance, cmd);
4193 
4194 	switch (ret) {
4195 	case DCMD_SUCCESS:
4196 		mr_device_priv_data = sdev->hostdata;
4197 		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4198 		mr_device_priv_data->interface_type =
4199 				instance->pd_info->state.ddf.pdType.intf;
4200 		break;
4201 
4202 	case DCMD_TIMEOUT:
4203 
4204 		switch (dcmd_timeout_ocr_possible(instance)) {
4205 		case INITIATE_OCR:
4206 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4207 			megasas_reset_fusion(instance->host,
4208 				MFI_IO_TIMEOUT_OCR);
4209 			break;
4210 		case KILL_ADAPTER:
4211 			megaraid_sas_kill_hba(instance);
4212 			break;
4213 		case IGNORE_TIMEOUT:
4214 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4215 				__func__, __LINE__);
4216 			break;
4217 		}
4218 
4219 		break;
4220 	}
4221 
4222 	if (ret != DCMD_TIMEOUT)
4223 		megasas_return_cmd(instance, cmd);
4224 
4225 	return;
4226 }
4227 /*
4228  * megasas_get_pd_list_info -	Returns FW's pd_list structure
4229  * @instance:				Adapter soft state
4230  * @pd_list:				pd_list structure
4231  *
4232  * Issues an internal command (DCMD) to get the FW's controller PD
4233  * list structure.  This information is mainly used to find out SYSTEM
4234  * supported by the FW.
4235  */
4236 static int
4237 megasas_get_pd_list(struct megasas_instance *instance)
4238 {
4239 	int ret = 0, pd_index = 0;
4240 	struct megasas_cmd *cmd;
4241 	struct megasas_dcmd_frame *dcmd;
4242 	struct MR_PD_LIST *ci;
4243 	struct MR_PD_ADDRESS *pd_addr;
4244 	dma_addr_t ci_h = 0;
4245 
4246 	if (instance->pd_list_not_supported) {
4247 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4248 		"not supported by firmware\n");
4249 		return ret;
4250 	}
4251 
4252 	ci = instance->pd_list_buf;
4253 	ci_h = instance->pd_list_buf_h;
4254 
4255 	cmd = megasas_get_cmd(instance);
4256 
4257 	if (!cmd) {
4258 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4259 		return -ENOMEM;
4260 	}
4261 
4262 	dcmd = &cmd->frame->dcmd;
4263 
4264 	memset(ci, 0, sizeof(*ci));
4265 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4266 
4267 	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4268 	dcmd->mbox.b[1] = 0;
4269 	dcmd->cmd = MFI_CMD_DCMD;
4270 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4271 	dcmd->sge_count = 1;
4272 	dcmd->flags = MFI_FRAME_DIR_READ;
4273 	dcmd->timeout = 0;
4274 	dcmd->pad_0 = 0;
4275 	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4276 	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4277 
4278 	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4279 				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4280 
4281 	if ((instance->adapter_type != MFI_SERIES) &&
4282 	    !instance->mask_interrupts)
4283 		ret = megasas_issue_blocked_cmd(instance, cmd,
4284 			MFI_IO_TIMEOUT_SECS);
4285 	else
4286 		ret = megasas_issue_polled(instance, cmd);
4287 
4288 	switch (ret) {
4289 	case DCMD_FAILED:
4290 		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4291 			"failed/not supported by firmware\n");
4292 
4293 		if (instance->adapter_type != MFI_SERIES)
4294 			megaraid_sas_kill_hba(instance);
4295 		else
4296 			instance->pd_list_not_supported = 1;
4297 		break;
4298 	case DCMD_TIMEOUT:
4299 
4300 		switch (dcmd_timeout_ocr_possible(instance)) {
4301 		case INITIATE_OCR:
4302 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4303 			/*
4304 			 * DCMD failed from AEN path.
4305 			 * AEN path already hold reset_mutex to avoid PCI access
4306 			 * while OCR is in progress.
4307 			 */
4308 			mutex_unlock(&instance->reset_mutex);
4309 			megasas_reset_fusion(instance->host,
4310 						MFI_IO_TIMEOUT_OCR);
4311 			mutex_lock(&instance->reset_mutex);
4312 			break;
4313 		case KILL_ADAPTER:
4314 			megaraid_sas_kill_hba(instance);
4315 			break;
4316 		case IGNORE_TIMEOUT:
4317 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4318 				__func__, __LINE__);
4319 			break;
4320 		}
4321 
4322 		break;
4323 
4324 	case DCMD_SUCCESS:
4325 		pd_addr = ci->addr;
4326 
4327 		if ((le32_to_cpu(ci->count) >
4328 			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4329 			break;
4330 
4331 		memset(instance->local_pd_list, 0,
4332 				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4333 
4334 		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4335 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4336 					le16_to_cpu(pd_addr->deviceId);
4337 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4338 					pd_addr->scsiDevType;
4339 			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4340 					MR_PD_STATE_SYSTEM;
4341 			pd_addr++;
4342 		}
4343 
4344 		memcpy(instance->pd_list, instance->local_pd_list,
4345 			sizeof(instance->pd_list));
4346 		break;
4347 
4348 	}
4349 
4350 	if (ret != DCMD_TIMEOUT)
4351 		megasas_return_cmd(instance, cmd);
4352 
4353 	return ret;
4354 }
4355 
4356 /*
4357  * megasas_get_ld_list_info -	Returns FW's ld_list structure
4358  * @instance:				Adapter soft state
4359  * @ld_list:				ld_list structure
4360  *
4361  * Issues an internal command (DCMD) to get the FW's controller PD
4362  * list structure.  This information is mainly used to find out SYSTEM
4363  * supported by the FW.
4364  */
4365 static int
4366 megasas_get_ld_list(struct megasas_instance *instance)
4367 {
4368 	int ret = 0, ld_index = 0, ids = 0;
4369 	struct megasas_cmd *cmd;
4370 	struct megasas_dcmd_frame *dcmd;
4371 	struct MR_LD_LIST *ci;
4372 	dma_addr_t ci_h = 0;
4373 	u32 ld_count;
4374 
4375 	ci = instance->ld_list_buf;
4376 	ci_h = instance->ld_list_buf_h;
4377 
4378 	cmd = megasas_get_cmd(instance);
4379 
4380 	if (!cmd) {
4381 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4382 		return -ENOMEM;
4383 	}
4384 
4385 	dcmd = &cmd->frame->dcmd;
4386 
4387 	memset(ci, 0, sizeof(*ci));
4388 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4389 
4390 	if (instance->supportmax256vd)
4391 		dcmd->mbox.b[0] = 1;
4392 	dcmd->cmd = MFI_CMD_DCMD;
4393 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4394 	dcmd->sge_count = 1;
4395 	dcmd->flags = MFI_FRAME_DIR_READ;
4396 	dcmd->timeout = 0;
4397 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4398 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4399 	dcmd->pad_0  = 0;
4400 
4401 	megasas_set_dma_settings(instance, dcmd, ci_h,
4402 				 sizeof(struct MR_LD_LIST));
4403 
4404 	if ((instance->adapter_type != MFI_SERIES) &&
4405 	    !instance->mask_interrupts)
4406 		ret = megasas_issue_blocked_cmd(instance, cmd,
4407 			MFI_IO_TIMEOUT_SECS);
4408 	else
4409 		ret = megasas_issue_polled(instance, cmd);
4410 
4411 	ld_count = le32_to_cpu(ci->ldCount);
4412 
4413 	switch (ret) {
4414 	case DCMD_FAILED:
4415 		megaraid_sas_kill_hba(instance);
4416 		break;
4417 	case DCMD_TIMEOUT:
4418 
4419 		switch (dcmd_timeout_ocr_possible(instance)) {
4420 		case INITIATE_OCR:
4421 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4422 			/*
4423 			 * DCMD failed from AEN path.
4424 			 * AEN path already hold reset_mutex to avoid PCI access
4425 			 * while OCR is in progress.
4426 			 */
4427 			mutex_unlock(&instance->reset_mutex);
4428 			megasas_reset_fusion(instance->host,
4429 						MFI_IO_TIMEOUT_OCR);
4430 			mutex_lock(&instance->reset_mutex);
4431 			break;
4432 		case KILL_ADAPTER:
4433 			megaraid_sas_kill_hba(instance);
4434 			break;
4435 		case IGNORE_TIMEOUT:
4436 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4437 				__func__, __LINE__);
4438 			break;
4439 		}
4440 
4441 		break;
4442 
4443 	case DCMD_SUCCESS:
4444 		if (ld_count > instance->fw_supported_vd_count)
4445 			break;
4446 
4447 		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4448 
4449 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4450 			if (ci->ldList[ld_index].state != 0) {
4451 				ids = ci->ldList[ld_index].ref.targetId;
4452 				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4453 			}
4454 		}
4455 
4456 		break;
4457 	}
4458 
4459 	if (ret != DCMD_TIMEOUT)
4460 		megasas_return_cmd(instance, cmd);
4461 
4462 	return ret;
4463 }
4464 
4465 /**
4466  * megasas_ld_list_query -	Returns FW's ld_list structure
4467  * @instance:				Adapter soft state
4468  * @ld_list:				ld_list structure
4469  *
4470  * Issues an internal command (DCMD) to get the FW's controller PD
4471  * list structure.  This information is mainly used to find out SYSTEM
4472  * supported by the FW.
4473  */
4474 static int
4475 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4476 {
4477 	int ret = 0, ld_index = 0, ids = 0;
4478 	struct megasas_cmd *cmd;
4479 	struct megasas_dcmd_frame *dcmd;
4480 	struct MR_LD_TARGETID_LIST *ci;
4481 	dma_addr_t ci_h = 0;
4482 	u32 tgtid_count;
4483 
4484 	ci = instance->ld_targetid_list_buf;
4485 	ci_h = instance->ld_targetid_list_buf_h;
4486 
4487 	cmd = megasas_get_cmd(instance);
4488 
4489 	if (!cmd) {
4490 		dev_warn(&instance->pdev->dev,
4491 		         "megasas_ld_list_query: Failed to get cmd\n");
4492 		return -ENOMEM;
4493 	}
4494 
4495 	dcmd = &cmd->frame->dcmd;
4496 
4497 	memset(ci, 0, sizeof(*ci));
4498 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4499 
4500 	dcmd->mbox.b[0] = query_type;
4501 	if (instance->supportmax256vd)
4502 		dcmd->mbox.b[2] = 1;
4503 
4504 	dcmd->cmd = MFI_CMD_DCMD;
4505 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4506 	dcmd->sge_count = 1;
4507 	dcmd->flags = MFI_FRAME_DIR_READ;
4508 	dcmd->timeout = 0;
4509 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4510 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4511 	dcmd->pad_0  = 0;
4512 
4513 	megasas_set_dma_settings(instance, dcmd, ci_h,
4514 				 sizeof(struct MR_LD_TARGETID_LIST));
4515 
4516 	if ((instance->adapter_type != MFI_SERIES) &&
4517 	    !instance->mask_interrupts)
4518 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4519 	else
4520 		ret = megasas_issue_polled(instance, cmd);
4521 
4522 	switch (ret) {
4523 	case DCMD_FAILED:
4524 		dev_info(&instance->pdev->dev,
4525 			"DCMD not supported by firmware - %s %d\n",
4526 				__func__, __LINE__);
4527 		ret = megasas_get_ld_list(instance);
4528 		break;
4529 	case DCMD_TIMEOUT:
4530 		switch (dcmd_timeout_ocr_possible(instance)) {
4531 		case INITIATE_OCR:
4532 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4533 			/*
4534 			 * DCMD failed from AEN path.
4535 			 * AEN path already hold reset_mutex to avoid PCI access
4536 			 * while OCR is in progress.
4537 			 */
4538 			mutex_unlock(&instance->reset_mutex);
4539 			megasas_reset_fusion(instance->host,
4540 						MFI_IO_TIMEOUT_OCR);
4541 			mutex_lock(&instance->reset_mutex);
4542 			break;
4543 		case KILL_ADAPTER:
4544 			megaraid_sas_kill_hba(instance);
4545 			break;
4546 		case IGNORE_TIMEOUT:
4547 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4548 				__func__, __LINE__);
4549 			break;
4550 		}
4551 
4552 		break;
4553 	case DCMD_SUCCESS:
4554 		tgtid_count = le32_to_cpu(ci->count);
4555 
4556 		if ((tgtid_count > (instance->fw_supported_vd_count)))
4557 			break;
4558 
4559 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4560 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4561 			ids = ci->targetId[ld_index];
4562 			instance->ld_ids[ids] = ci->targetId[ld_index];
4563 		}
4564 
4565 		break;
4566 	}
4567 
4568 	if (ret != DCMD_TIMEOUT)
4569 		megasas_return_cmd(instance, cmd);
4570 
4571 	return ret;
4572 }
4573 
4574 /*
4575  * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4576  * instance			 : Controller's instance
4577 */
4578 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4579 {
4580 	struct fusion_context *fusion;
4581 	u32 ventura_map_sz = 0;
4582 
4583 	fusion = instance->ctrl_context;
4584 	/* For MFI based controllers return dummy success */
4585 	if (!fusion)
4586 		return;
4587 
4588 	instance->supportmax256vd =
4589 		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
4590 	/* Below is additional check to address future FW enhancement */
4591 	if (instance->ctrl_info_buf->max_lds > 64)
4592 		instance->supportmax256vd = 1;
4593 
4594 	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4595 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4596 	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4597 					* MEGASAS_MAX_DEV_PER_CHANNEL;
4598 	if (instance->supportmax256vd) {
4599 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4600 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4601 	} else {
4602 		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4603 		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4604 	}
4605 
4606 	dev_info(&instance->pdev->dev,
4607 		"firmware type\t: %s\n",
4608 		instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4609 		"Legacy(64 VD) firmware");
4610 
4611 	if (instance->max_raid_mapsize) {
4612 		ventura_map_sz = instance->max_raid_mapsize *
4613 						MR_MIN_MAP_SIZE; /* 64k */
4614 		fusion->current_map_sz = ventura_map_sz;
4615 		fusion->max_map_sz = ventura_map_sz;
4616 	} else {
4617 		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
4618 					(sizeof(struct MR_LD_SPAN_MAP) *
4619 					(instance->fw_supported_vd_count - 1));
4620 		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
4621 
4622 		fusion->max_map_sz =
4623 			max(fusion->old_map_sz, fusion->new_map_sz);
4624 
4625 		if (instance->supportmax256vd)
4626 			fusion->current_map_sz = fusion->new_map_sz;
4627 		else
4628 			fusion->current_map_sz = fusion->old_map_sz;
4629 	}
4630 	/* irrespective of FW raid maps, driver raid map is constant */
4631 	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4632 }
4633 
4634 /**
4635  * megasas_get_controller_info -	Returns FW's controller structure
4636  * @instance:				Adapter soft state
4637  *
4638  * Issues an internal command (DCMD) to get the FW's controller structure.
4639  * This information is mainly used to find out the maximum IO transfer per
4640  * command supported by the FW.
4641  */
4642 int
4643 megasas_get_ctrl_info(struct megasas_instance *instance)
4644 {
4645 	int ret = 0;
4646 	struct megasas_cmd *cmd;
4647 	struct megasas_dcmd_frame *dcmd;
4648 	struct megasas_ctrl_info *ci;
4649 	dma_addr_t ci_h = 0;
4650 
4651 	ci = instance->ctrl_info_buf;
4652 	ci_h = instance->ctrl_info_buf_h;
4653 
4654 	cmd = megasas_get_cmd(instance);
4655 
4656 	if (!cmd) {
4657 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4658 		return -ENOMEM;
4659 	}
4660 
4661 	dcmd = &cmd->frame->dcmd;
4662 
4663 	memset(ci, 0, sizeof(*ci));
4664 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4665 
4666 	dcmd->cmd = MFI_CMD_DCMD;
4667 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4668 	dcmd->sge_count = 1;
4669 	dcmd->flags = MFI_FRAME_DIR_READ;
4670 	dcmd->timeout = 0;
4671 	dcmd->pad_0 = 0;
4672 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4673 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4674 	dcmd->mbox.b[0] = 1;
4675 
4676 	megasas_set_dma_settings(instance, dcmd, ci_h,
4677 				 sizeof(struct megasas_ctrl_info));
4678 
4679 	if ((instance->adapter_type != MFI_SERIES) &&
4680 	    !instance->mask_interrupts)
4681 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4682 	else
4683 		ret = megasas_issue_polled(instance, cmd);
4684 
4685 	switch (ret) {
4686 	case DCMD_SUCCESS:
4687 		/* Save required controller information in
4688 		 * CPU endianness format.
4689 		 */
4690 		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
4691 		le32_to_cpus((u32 *)&ci->adapterOperations2);
4692 		le32_to_cpus((u32 *)&ci->adapterOperations3);
4693 		le16_to_cpus((u16 *)&ci->adapter_operations4);
4694 
4695 		/* Update the latest Ext VD info.
4696 		 * From Init path, store current firmware details.
4697 		 * From OCR path, detect any firmware properties changes.
4698 		 * in case of Firmware upgrade without system reboot.
4699 		 */
4700 		megasas_update_ext_vd_details(instance);
4701 		instance->use_seqnum_jbod_fp =
4702 			ci->adapterOperations3.useSeqNumJbodFP;
4703 		instance->support_morethan256jbod =
4704 			ci->adapter_operations4.support_pd_map_target_id;
4705 
4706 		/*Check whether controller is iMR or MR */
4707 		instance->is_imr = (ci->memory_size ? 0 : 1);
4708 		dev_info(&instance->pdev->dev,
4709 			"controller type\t: %s(%dMB)\n",
4710 			instance->is_imr ? "iMR" : "MR",
4711 			le16_to_cpu(ci->memory_size));
4712 
4713 		instance->disableOnlineCtrlReset =
4714 			ci->properties.OnOffProperties.disableOnlineCtrlReset;
4715 		instance->secure_jbod_support =
4716 			ci->adapterOperations3.supportSecurityonJBOD;
4717 		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4718 			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4719 		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4720 			instance->secure_jbod_support ? "Yes" : "No");
4721 		break;
4722 
4723 	case DCMD_TIMEOUT:
4724 		switch (dcmd_timeout_ocr_possible(instance)) {
4725 		case INITIATE_OCR:
4726 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4727 			megasas_reset_fusion(instance->host,
4728 				MFI_IO_TIMEOUT_OCR);
4729 			break;
4730 		case KILL_ADAPTER:
4731 			megaraid_sas_kill_hba(instance);
4732 			break;
4733 		case IGNORE_TIMEOUT:
4734 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4735 				__func__, __LINE__);
4736 			break;
4737 		}
4738 	case DCMD_FAILED:
4739 		megaraid_sas_kill_hba(instance);
4740 		break;
4741 
4742 	}
4743 
4744 	megasas_return_cmd(instance, cmd);
4745 
4746 
4747 	return ret;
4748 }
4749 
4750 /*
4751  * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
4752  *					to firmware
4753  *
4754  * @instance:				Adapter soft state
4755  * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
4756 					MR_CRASH_BUF_TURN_OFF = 0
4757 					MR_CRASH_BUF_TURN_ON = 1
4758  * @return 0 on success non-zero on failure.
4759  * Issues an internal command (DCMD) to set parameters for crash dump feature.
4760  * Driver will send address of crash dump DMA buffer and set mbox to tell FW
4761  * that driver supports crash dump feature. This DCMD will be sent only if
4762  * crash dump feature is supported by the FW.
4763  *
4764  */
4765 int megasas_set_crash_dump_params(struct megasas_instance *instance,
4766 	u8 crash_buf_state)
4767 {
4768 	int ret = 0;
4769 	struct megasas_cmd *cmd;
4770 	struct megasas_dcmd_frame *dcmd;
4771 
4772 	cmd = megasas_get_cmd(instance);
4773 
4774 	if (!cmd) {
4775 		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
4776 		return -ENOMEM;
4777 	}
4778 
4779 
4780 	dcmd = &cmd->frame->dcmd;
4781 
4782 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4783 	dcmd->mbox.b[0] = crash_buf_state;
4784 	dcmd->cmd = MFI_CMD_DCMD;
4785 	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4786 	dcmd->sge_count = 1;
4787 	dcmd->flags = MFI_FRAME_DIR_NONE;
4788 	dcmd->timeout = 0;
4789 	dcmd->pad_0 = 0;
4790 	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4791 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4792 
4793 	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
4794 				 CRASH_DMA_BUF_SIZE);
4795 
4796 	if ((instance->adapter_type != MFI_SERIES) &&
4797 	    !instance->mask_interrupts)
4798 		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4799 	else
4800 		ret = megasas_issue_polled(instance, cmd);
4801 
4802 	if (ret == DCMD_TIMEOUT) {
4803 		switch (dcmd_timeout_ocr_possible(instance)) {
4804 		case INITIATE_OCR:
4805 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4806 			megasas_reset_fusion(instance->host,
4807 					MFI_IO_TIMEOUT_OCR);
4808 			break;
4809 		case KILL_ADAPTER:
4810 			megaraid_sas_kill_hba(instance);
4811 			break;
4812 		case IGNORE_TIMEOUT:
4813 			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4814 				__func__, __LINE__);
4815 			break;
4816 		}
4817 	} else
4818 		megasas_return_cmd(instance, cmd);
4819 
4820 	return ret;
4821 }
4822 
4823 /**
4824  * megasas_issue_init_mfi -	Initializes the FW
4825  * @instance:		Adapter soft state
4826  *
4827  * Issues the INIT MFI cmd
4828  */
4829 static int
4830 megasas_issue_init_mfi(struct megasas_instance *instance)
4831 {
4832 	__le32 context;
4833 	struct megasas_cmd *cmd;
4834 	struct megasas_init_frame *init_frame;
4835 	struct megasas_init_queue_info *initq_info;
4836 	dma_addr_t init_frame_h;
4837 	dma_addr_t initq_info_h;
4838 
4839 	/*
4840 	 * Prepare a init frame. Note the init frame points to queue info
4841 	 * structure. Each frame has SGL allocated after first 64 bytes. For
4842 	 * this frame - since we don't need any SGL - we use SGL's space as
4843 	 * queue info structure
4844 	 *
4845 	 * We will not get a NULL command below. We just created the pool.
4846 	 */
4847 	cmd = megasas_get_cmd(instance);
4848 
4849 	init_frame = (struct megasas_init_frame *)cmd->frame;
4850 	initq_info = (struct megasas_init_queue_info *)
4851 		((unsigned long)init_frame + 64);
4852 
4853 	init_frame_h = cmd->frame_phys_addr;
4854 	initq_info_h = init_frame_h + 64;
4855 
4856 	context = init_frame->context;
4857 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
4858 	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
4859 	init_frame->context = context;
4860 
4861 	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
4862 	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
4863 
4864 	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
4865 	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
4866 
4867 	init_frame->cmd = MFI_CMD_INIT;
4868 	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
4869 	init_frame->queue_info_new_phys_addr_lo =
4870 		cpu_to_le32(lower_32_bits(initq_info_h));
4871 	init_frame->queue_info_new_phys_addr_hi =
4872 		cpu_to_le32(upper_32_bits(initq_info_h));
4873 
4874 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
4875 
4876 	/*
4877 	 * disable the intr before firing the init frame to FW
4878 	 */
4879 	instance->instancet->disable_intr(instance);
4880 
4881 	/*
4882 	 * Issue the init frame in polled mode
4883 	 */
4884 
4885 	if (megasas_issue_polled(instance, cmd)) {
4886 		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
4887 		megasas_return_cmd(instance, cmd);
4888 		goto fail_fw_init;
4889 	}
4890 
4891 	megasas_return_cmd(instance, cmd);
4892 
4893 	return 0;
4894 
4895 fail_fw_init:
4896 	return -EINVAL;
4897 }
4898 
4899 static u32
4900 megasas_init_adapter_mfi(struct megasas_instance *instance)
4901 {
4902 	struct megasas_register_set __iomem *reg_set;
4903 	u32 context_sz;
4904 	u32 reply_q_sz;
4905 
4906 	reg_set = instance->reg_set;
4907 
4908 	/*
4909 	 * Get various operational parameters from status register
4910 	 */
4911 	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
4912 	/*
4913 	 * Reduce the max supported cmds by 1. This is to ensure that the
4914 	 * reply_q_sz (1 more than the max cmd that driver may send)
4915 	 * does not exceed max cmds that the FW can support
4916 	 */
4917 	instance->max_fw_cmds = instance->max_fw_cmds-1;
4918 	instance->max_mfi_cmds = instance->max_fw_cmds;
4919 	instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
4920 					0x10;
4921 	/*
4922 	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
4923 	 * are reserved for IOCTL + driver's internal DCMDs.
4924 	 */
4925 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4926 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4927 		instance->max_scsi_cmds = (instance->max_fw_cmds -
4928 			MEGASAS_SKINNY_INT_CMDS);
4929 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4930 	} else {
4931 		instance->max_scsi_cmds = (instance->max_fw_cmds -
4932 			MEGASAS_INT_CMDS);
4933 		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
4934 	}
4935 
4936 	instance->cur_can_queue = instance->max_scsi_cmds;
4937 	/*
4938 	 * Create a pool of commands
4939 	 */
4940 	if (megasas_alloc_cmds(instance))
4941 		goto fail_alloc_cmds;
4942 
4943 	/*
4944 	 * Allocate memory for reply queue. Length of reply queue should
4945 	 * be _one_ more than the maximum commands handled by the firmware.
4946 	 *
4947 	 * Note: When FW completes commands, it places corresponding contex
4948 	 * values in this circular reply queue. This circular queue is a fairly
4949 	 * typical producer-consumer queue. FW is the producer (of completed
4950 	 * commands) and the driver is the consumer.
4951 	 */
4952 	context_sz = sizeof(u32);
4953 	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
4954 
4955 	instance->reply_queue = pci_alloc_consistent(instance->pdev,
4956 						     reply_q_sz,
4957 						     &instance->reply_queue_h);
4958 
4959 	if (!instance->reply_queue) {
4960 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
4961 		goto fail_reply_queue;
4962 	}
4963 
4964 	if (megasas_issue_init_mfi(instance))
4965 		goto fail_fw_init;
4966 
4967 	if (megasas_get_ctrl_info(instance)) {
4968 		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4969 			"Fail from %s %d\n", instance->unique_id,
4970 			__func__, __LINE__);
4971 		goto fail_fw_init;
4972 	}
4973 
4974 	instance->fw_support_ieee = 0;
4975 	instance->fw_support_ieee =
4976 		(instance->instancet->read_fw_status_reg(reg_set) &
4977 		0x04000000);
4978 
4979 	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
4980 			instance->fw_support_ieee);
4981 
4982 	if (instance->fw_support_ieee)
4983 		instance->flag_ieee = 1;
4984 
4985 	return 0;
4986 
4987 fail_fw_init:
4988 
4989 	pci_free_consistent(instance->pdev, reply_q_sz,
4990 			    instance->reply_queue, instance->reply_queue_h);
4991 fail_reply_queue:
4992 	megasas_free_cmds(instance);
4993 
4994 fail_alloc_cmds:
4995 	return 1;
4996 }
4997 
4998 /*
4999  * megasas_setup_irqs_ioapic -		register legacy interrupts.
5000  * @instance:				Adapter soft state
5001  *
5002  * Do not enable interrupt, only setup ISRs.
5003  *
5004  * Return 0 on success.
5005  */
5006 static int
5007 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5008 {
5009 	struct pci_dev *pdev;
5010 
5011 	pdev = instance->pdev;
5012 	instance->irq_context[0].instance = instance;
5013 	instance->irq_context[0].MSIxIndex = 0;
5014 	if (request_irq(pci_irq_vector(pdev, 0),
5015 			instance->instancet->service_isr, IRQF_SHARED,
5016 			"megasas", &instance->irq_context[0])) {
5017 		dev_err(&instance->pdev->dev,
5018 				"Failed to register IRQ from %s %d\n",
5019 				__func__, __LINE__);
5020 		return -1;
5021 	}
5022 	return 0;
5023 }
5024 
5025 /**
5026  * megasas_setup_irqs_msix -		register MSI-x interrupts.
5027  * @instance:				Adapter soft state
5028  * @is_probe:				Driver probe check
5029  *
5030  * Do not enable interrupt, only setup ISRs.
5031  *
5032  * Return 0 on success.
5033  */
5034 static int
5035 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5036 {
5037 	int i, j;
5038 	struct pci_dev *pdev;
5039 
5040 	pdev = instance->pdev;
5041 
5042 	/* Try MSI-x */
5043 	for (i = 0; i < instance->msix_vectors; i++) {
5044 		instance->irq_context[i].instance = instance;
5045 		instance->irq_context[i].MSIxIndex = i;
5046 		if (request_irq(pci_irq_vector(pdev, i),
5047 			instance->instancet->service_isr, 0, "megasas",
5048 			&instance->irq_context[i])) {
5049 			dev_err(&instance->pdev->dev,
5050 				"Failed to register IRQ for vector %d.\n", i);
5051 			for (j = 0; j < i; j++)
5052 				free_irq(pci_irq_vector(pdev, j),
5053 					 &instance->irq_context[j]);
5054 			/* Retry irq register for IO_APIC*/
5055 			instance->msix_vectors = 0;
5056 			if (is_probe) {
5057 				pci_free_irq_vectors(instance->pdev);
5058 				return megasas_setup_irqs_ioapic(instance);
5059 			} else {
5060 				return -1;
5061 			}
5062 		}
5063 	}
5064 	return 0;
5065 }
5066 
5067 /*
5068  * megasas_destroy_irqs-		unregister interrupts.
5069  * @instance:				Adapter soft state
5070  * return:				void
5071  */
5072 static void
5073 megasas_destroy_irqs(struct megasas_instance *instance) {
5074 
5075 	int i;
5076 
5077 	if (instance->msix_vectors)
5078 		for (i = 0; i < instance->msix_vectors; i++) {
5079 			free_irq(pci_irq_vector(instance->pdev, i),
5080 				 &instance->irq_context[i]);
5081 		}
5082 	else
5083 		free_irq(pci_irq_vector(instance->pdev, 0),
5084 			 &instance->irq_context[0]);
5085 }
5086 
5087 /**
5088  * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5089  * @instance:				Adapter soft state
5090  * @is_probe:				Driver probe check
5091  *
5092  * Return 0 on success.
5093  */
5094 void
5095 megasas_setup_jbod_map(struct megasas_instance *instance)
5096 {
5097 	int i;
5098 	struct fusion_context *fusion = instance->ctrl_context;
5099 	u32 pd_seq_map_sz;
5100 
5101 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5102 		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5103 
5104 	if (reset_devices || !fusion ||
5105 		!instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
5106 		dev_info(&instance->pdev->dev,
5107 			"Jbod map is not supported %s %d\n",
5108 			__func__, __LINE__);
5109 		instance->use_seqnum_jbod_fp = false;
5110 		return;
5111 	}
5112 
5113 	if (fusion->pd_seq_sync[0])
5114 		goto skip_alloc;
5115 
5116 	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5117 		fusion->pd_seq_sync[i] = dma_alloc_coherent
5118 			(&instance->pdev->dev, pd_seq_map_sz,
5119 			&fusion->pd_seq_phys[i], GFP_KERNEL);
5120 		if (!fusion->pd_seq_sync[i]) {
5121 			dev_err(&instance->pdev->dev,
5122 				"Failed to allocate memory from %s %d\n",
5123 				__func__, __LINE__);
5124 			if (i == 1) {
5125 				dma_free_coherent(&instance->pdev->dev,
5126 					pd_seq_map_sz, fusion->pd_seq_sync[0],
5127 					fusion->pd_seq_phys[0]);
5128 				fusion->pd_seq_sync[0] = NULL;
5129 			}
5130 			instance->use_seqnum_jbod_fp = false;
5131 			return;
5132 		}
5133 	}
5134 
5135 skip_alloc:
5136 	if (!megasas_sync_pd_seq_num(instance, false) &&
5137 		!megasas_sync_pd_seq_num(instance, true))
5138 		instance->use_seqnum_jbod_fp = true;
5139 	else
5140 		instance->use_seqnum_jbod_fp = false;
5141 }
5142 
5143 /**
5144  * megasas_init_fw -	Initializes the FW
5145  * @instance:		Adapter soft state
5146  *
5147  * This is the main function for initializing firmware
5148  */
5149 
5150 static int megasas_init_fw(struct megasas_instance *instance)
5151 {
5152 	u32 max_sectors_1;
5153 	u32 max_sectors_2, tmp_sectors, msix_enable;
5154 	u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
5155 	resource_size_t base_addr;
5156 	struct megasas_register_set __iomem *reg_set;
5157 	struct megasas_ctrl_info *ctrl_info = NULL;
5158 	unsigned long bar_list;
5159 	int i, j, loop, fw_msix_count = 0;
5160 	struct IOV_111 *iovPtr;
5161 	struct fusion_context *fusion;
5162 
5163 	fusion = instance->ctrl_context;
5164 
5165 	/* Find first memory bar */
5166 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5167 	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5168 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5169 					 "megasas: LSI")) {
5170 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5171 		return -EBUSY;
5172 	}
5173 
5174 	base_addr = pci_resource_start(instance->pdev, instance->bar);
5175 	instance->reg_set = ioremap_nocache(base_addr, 8192);
5176 
5177 	if (!instance->reg_set) {
5178 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5179 		goto fail_ioremap;
5180 	}
5181 
5182 	reg_set = instance->reg_set;
5183 
5184 	if (instance->adapter_type != MFI_SERIES)
5185 		instance->instancet = &megasas_instance_template_fusion;
5186 	else {
5187 		switch (instance->pdev->device) {
5188 		case PCI_DEVICE_ID_LSI_SAS1078R:
5189 		case PCI_DEVICE_ID_LSI_SAS1078DE:
5190 			instance->instancet = &megasas_instance_template_ppc;
5191 			break;
5192 		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5193 		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5194 			instance->instancet = &megasas_instance_template_gen2;
5195 			break;
5196 		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5197 		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5198 			instance->instancet = &megasas_instance_template_skinny;
5199 			break;
5200 		case PCI_DEVICE_ID_LSI_SAS1064R:
5201 		case PCI_DEVICE_ID_DELL_PERC5:
5202 		default:
5203 			instance->instancet = &megasas_instance_template_xscale;
5204 			instance->pd_list_not_supported = 1;
5205 			break;
5206 		}
5207 	}
5208 
5209 	if (megasas_transition_to_ready(instance, 0)) {
5210 		atomic_set(&instance->fw_reset_no_pci_access, 1);
5211 		instance->instancet->adp_reset
5212 			(instance, instance->reg_set);
5213 		atomic_set(&instance->fw_reset_no_pci_access, 0);
5214 		dev_info(&instance->pdev->dev,
5215 			"FW restarted successfully from %s!\n",
5216 			__func__);
5217 
5218 		/*waitting for about 30 second before retry*/
5219 		ssleep(30);
5220 
5221 		if (megasas_transition_to_ready(instance, 0))
5222 			goto fail_ready_state;
5223 	}
5224 
5225 	megasas_init_ctrl_params(instance);
5226 
5227 	if (megasas_set_dma_mask(instance))
5228 		goto fail_ready_state;
5229 
5230 	if (megasas_alloc_ctrl_mem(instance))
5231 		goto fail_alloc_dma_buf;
5232 
5233 	if (megasas_alloc_ctrl_dma_buffers(instance))
5234 		goto fail_alloc_dma_buf;
5235 
5236 	fusion = instance->ctrl_context;
5237 
5238 	if (instance->adapter_type == VENTURA_SERIES) {
5239 		scratch_pad_3 =
5240 			readl(&instance->reg_set->outbound_scratch_pad_3);
5241 		instance->max_raid_mapsize = ((scratch_pad_3 >>
5242 			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5243 			MR_MAX_RAID_MAP_SIZE_MASK);
5244 	}
5245 
5246 	/* Check if MSI-X is supported while in ready state */
5247 	msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
5248 		       0x4000000) >> 0x1a;
5249 	if (msix_enable && !msix_disable) {
5250 		int irq_flags = PCI_IRQ_MSIX;
5251 
5252 		scratch_pad_2 = readl
5253 			(&instance->reg_set->outbound_scratch_pad_2);
5254 		/* Check max MSI-X vectors */
5255 		if (fusion) {
5256 			if (instance->adapter_type == THUNDERBOLT_SERIES) {
5257 				/* Thunderbolt Series*/
5258 				instance->msix_vectors = (scratch_pad_2
5259 					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5260 				fw_msix_count = instance->msix_vectors;
5261 			} else { /* Invader series supports more than 8 MSI-x vectors*/
5262 				instance->msix_vectors = ((scratch_pad_2
5263 					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5264 					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5265 				if (instance->msix_vectors > 16)
5266 					instance->msix_combined = true;
5267 
5268 				if (rdpq_enable)
5269 					instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5270 								1 : 0;
5271 				fw_msix_count = instance->msix_vectors;
5272 				/* Save 1-15 reply post index address to local memory
5273 				 * Index 0 is already saved from reg offset
5274 				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5275 				 */
5276 				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5277 					instance->reply_post_host_index_addr[loop] =
5278 						(u32 __iomem *)
5279 						((u8 __iomem *)instance->reg_set +
5280 						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5281 						+ (loop * 0x10));
5282 				}
5283 			}
5284 			if (msix_vectors)
5285 				instance->msix_vectors = min(msix_vectors,
5286 					instance->msix_vectors);
5287 		} else /* MFI adapters */
5288 			instance->msix_vectors = 1;
5289 		/* Don't bother allocating more MSI-X vectors than cpus */
5290 		instance->msix_vectors = min(instance->msix_vectors,
5291 					     (unsigned int)num_online_cpus());
5292 		if (smp_affinity_enable)
5293 			irq_flags |= PCI_IRQ_AFFINITY;
5294 		i = pci_alloc_irq_vectors(instance->pdev, 1,
5295 					  instance->msix_vectors, irq_flags);
5296 		if (i > 0)
5297 			instance->msix_vectors = i;
5298 		else
5299 			instance->msix_vectors = 0;
5300 	}
5301 	/*
5302 	 * MSI-X host index 0 is common for all adapter.
5303 	 * It is used for all MPT based Adapters.
5304 	 */
5305 	if (instance->msix_combined) {
5306 		instance->reply_post_host_index_addr[0] =
5307 				(u32 *)((u8 *)instance->reg_set +
5308 				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
5309 	} else {
5310 		instance->reply_post_host_index_addr[0] =
5311 			(u32 *)((u8 *)instance->reg_set +
5312 			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5313 	}
5314 
5315 	if (!instance->msix_vectors) {
5316 		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5317 		if (i < 0)
5318 			goto fail_setup_irqs;
5319 	}
5320 
5321 	dev_info(&instance->pdev->dev,
5322 		"firmware supports msix\t: (%d)", fw_msix_count);
5323 	dev_info(&instance->pdev->dev,
5324 		"current msix/online cpus\t: (%d/%d)\n",
5325 		instance->msix_vectors, (unsigned int)num_online_cpus());
5326 	dev_info(&instance->pdev->dev,
5327 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
5328 
5329 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5330 		(unsigned long)instance);
5331 
5332 	/*
5333 	 * Below are default value for legacy Firmware.
5334 	 * non-fusion based controllers
5335 	 */
5336 	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5337 	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5338 	/* Get operational params, sge flags, send init cmd to controller */
5339 	if (instance->instancet->init_adapter(instance))
5340 		goto fail_init_adapter;
5341 
5342 	if (instance->adapter_type == VENTURA_SERIES) {
5343 		scratch_pad_4 =
5344 			readl(&instance->reg_set->outbound_scratch_pad_4);
5345 		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
5346 			MR_DEFAULT_NVME_PAGE_SHIFT)
5347 			instance->nvme_page_size =
5348 				(1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
5349 
5350 		dev_info(&instance->pdev->dev,
5351 			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
5352 	}
5353 
5354 	if (instance->msix_vectors ?
5355 		megasas_setup_irqs_msix(instance, 1) :
5356 		megasas_setup_irqs_ioapic(instance))
5357 		goto fail_init_adapter;
5358 
5359 	instance->instancet->enable_intr(instance);
5360 
5361 	dev_info(&instance->pdev->dev, "INIT adapter done\n");
5362 
5363 	megasas_setup_jbod_map(instance);
5364 
5365 	/** for passthrough
5366 	 * the following function will get the PD LIST.
5367 	 */
5368 	memset(instance->pd_list, 0,
5369 		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5370 	if (megasas_get_pd_list(instance) < 0) {
5371 		dev_err(&instance->pdev->dev, "failed to get PD list\n");
5372 		goto fail_get_ld_pd_list;
5373 	}
5374 
5375 	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5376 
5377 	/* stream detection initialization */
5378 	if (instance->adapter_type == VENTURA_SERIES) {
5379 		fusion->stream_detect_by_ld =
5380 			kzalloc(sizeof(struct LD_STREAM_DETECT *)
5381 			* MAX_LOGICAL_DRIVES_EXT,
5382 			GFP_KERNEL);
5383 		if (!fusion->stream_detect_by_ld) {
5384 			dev_err(&instance->pdev->dev,
5385 				"unable to allocate stream detection for pool of LDs\n");
5386 			goto fail_get_ld_pd_list;
5387 		}
5388 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
5389 			fusion->stream_detect_by_ld[i] =
5390 				kmalloc(sizeof(struct LD_STREAM_DETECT),
5391 				GFP_KERNEL);
5392 			if (!fusion->stream_detect_by_ld[i]) {
5393 				dev_err(&instance->pdev->dev,
5394 					"unable to allocate stream detect by LD\n ");
5395 				for (j = 0; j < i; ++j)
5396 					kfree(fusion->stream_detect_by_ld[j]);
5397 				kfree(fusion->stream_detect_by_ld);
5398 				fusion->stream_detect_by_ld = NULL;
5399 				goto fail_get_ld_pd_list;
5400 			}
5401 			fusion->stream_detect_by_ld[i]->mru_bit_map
5402 				= MR_STREAM_BITMAP;
5403 		}
5404 	}
5405 
5406 	if (megasas_ld_list_query(instance,
5407 				  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5408 		goto fail_get_ld_pd_list;
5409 
5410 	/*
5411 	 * Compute the max allowed sectors per IO: The controller info has two
5412 	 * limits on max sectors. Driver should use the minimum of these two.
5413 	 *
5414 	 * 1 << stripe_sz_ops.min = max sectors per strip
5415 	 *
5416 	 * Note that older firmwares ( < FW ver 30) didn't report information
5417 	 * to calculate max_sectors_1. So the number ended up as zero always.
5418 	 */
5419 	tmp_sectors = 0;
5420 	ctrl_info = instance->ctrl_info_buf;
5421 
5422 	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5423 		le16_to_cpu(ctrl_info->max_strips_per_io);
5424 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
5425 
5426 	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
5427 
5428 	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
5429 	instance->passive = ctrl_info->cluster.passive;
5430 	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
5431 	instance->UnevenSpanSupport =
5432 		ctrl_info->adapterOperations2.supportUnevenSpans;
5433 	if (instance->UnevenSpanSupport) {
5434 		struct fusion_context *fusion = instance->ctrl_context;
5435 		if (MR_ValidateMapInfo(instance))
5436 			fusion->fast_path_io = 1;
5437 		else
5438 			fusion->fast_path_io = 0;
5439 
5440 	}
5441 	if (ctrl_info->host_interface.SRIOV) {
5442 		instance->requestorId = ctrl_info->iov.requestorId;
5443 		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
5444 			if (!ctrl_info->adapterOperations2.activePassive)
5445 			    instance->PlasmaFW111 = 1;
5446 
5447 			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
5448 			    instance->PlasmaFW111 ? "1.11" : "new");
5449 
5450 			if (instance->PlasmaFW111) {
5451 			    iovPtr = (struct IOV_111 *)
5452 				((unsigned char *)ctrl_info + IOV_111_OFFSET);
5453 			    instance->requestorId = iovPtr->requestorId;
5454 			}
5455 		}
5456 		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
5457 			instance->requestorId);
5458 	}
5459 
5460 	instance->crash_dump_fw_support =
5461 		ctrl_info->adapterOperations3.supportCrashDump;
5462 	instance->crash_dump_drv_support =
5463 		(instance->crash_dump_fw_support &&
5464 		instance->crash_dump_buf);
5465 	if (instance->crash_dump_drv_support)
5466 		megasas_set_crash_dump_params(instance,
5467 			MR_CRASH_BUF_TURN_OFF);
5468 
5469 	else {
5470 		if (instance->crash_dump_buf)
5471 			pci_free_consistent(instance->pdev,
5472 				CRASH_DMA_BUF_SIZE,
5473 				instance->crash_dump_buf,
5474 				instance->crash_dump_h);
5475 		instance->crash_dump_buf = NULL;
5476 	}
5477 
5478 
5479 	dev_info(&instance->pdev->dev,
5480 		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
5481 		le16_to_cpu(ctrl_info->pci.vendor_id),
5482 		le16_to_cpu(ctrl_info->pci.device_id),
5483 		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
5484 		le16_to_cpu(ctrl_info->pci.sub_device_id));
5485 	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
5486 		instance->UnevenSpanSupport ? "yes" : "no");
5487 	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
5488 		instance->crash_dump_drv_support ? "yes" : "no");
5489 	dev_info(&instance->pdev->dev, "jbod sync map		: %s\n",
5490 		instance->use_seqnum_jbod_fp ? "yes" : "no");
5491 
5492 
5493 	instance->max_sectors_per_req = instance->max_num_sge *
5494 						SGE_BUFFER_SIZE / 512;
5495 	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
5496 		instance->max_sectors_per_req = tmp_sectors;
5497 
5498 	/* Check for valid throttlequeuedepth module parameter */
5499 	if (throttlequeuedepth &&
5500 			throttlequeuedepth <= instance->max_scsi_cmds)
5501 		instance->throttlequeuedepth = throttlequeuedepth;
5502 	else
5503 		instance->throttlequeuedepth =
5504 				MEGASAS_THROTTLE_QUEUE_DEPTH;
5505 
5506 	if ((resetwaittime < 1) ||
5507 	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
5508 		resetwaittime = MEGASAS_RESET_WAIT_TIME;
5509 
5510 	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
5511 		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
5512 
5513 	/* Launch SR-IOV heartbeat timer */
5514 	if (instance->requestorId) {
5515 		if (!megasas_sriov_start_heartbeat(instance, 1))
5516 			megasas_start_timer(instance);
5517 		else
5518 			instance->skip_heartbeat_timer_del = 1;
5519 	}
5520 
5521 	return 0;
5522 
5523 fail_get_ld_pd_list:
5524 	instance->instancet->disable_intr(instance);
5525 fail_init_adapter:
5526 	megasas_destroy_irqs(instance);
5527 fail_setup_irqs:
5528 	if (instance->msix_vectors)
5529 		pci_free_irq_vectors(instance->pdev);
5530 	instance->msix_vectors = 0;
5531 fail_alloc_dma_buf:
5532 	megasas_free_ctrl_dma_buffers(instance);
5533 	megasas_free_ctrl_mem(instance);
5534 fail_ready_state:
5535 	iounmap(instance->reg_set);
5536 
5537 fail_ioremap:
5538 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5539 
5540 	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5541 		__func__, __LINE__);
5542 	return -EINVAL;
5543 }
5544 
5545 /**
5546  * megasas_release_mfi -	Reverses the FW initialization
5547  * @instance:			Adapter soft state
5548  */
5549 static void megasas_release_mfi(struct megasas_instance *instance)
5550 {
5551 	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5552 
5553 	if (instance->reply_queue)
5554 		pci_free_consistent(instance->pdev, reply_q_sz,
5555 			    instance->reply_queue, instance->reply_queue_h);
5556 
5557 	megasas_free_cmds(instance);
5558 
5559 	iounmap(instance->reg_set);
5560 
5561 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5562 }
5563 
5564 /**
5565  * megasas_get_seq_num -	Gets latest event sequence numbers
5566  * @instance:			Adapter soft state
5567  * @eli:			FW event log sequence numbers information
5568  *
5569  * FW maintains a log of all events in a non-volatile area. Upper layers would
5570  * usually find out the latest sequence number of the events, the seq number at
5571  * the boot etc. They would "read" all the events below the latest seq number
5572  * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
5573  * number), they would subsribe to AEN (asynchronous event notification) and
5574  * wait for the events to happen.
5575  */
5576 static int
5577 megasas_get_seq_num(struct megasas_instance *instance,
5578 		    struct megasas_evt_log_info *eli)
5579 {
5580 	struct megasas_cmd *cmd;
5581 	struct megasas_dcmd_frame *dcmd;
5582 	struct megasas_evt_log_info *el_info;
5583 	dma_addr_t el_info_h = 0;
5584 
5585 	cmd = megasas_get_cmd(instance);
5586 
5587 	if (!cmd) {
5588 		return -ENOMEM;
5589 	}
5590 
5591 	dcmd = &cmd->frame->dcmd;
5592 	el_info = pci_alloc_consistent(instance->pdev,
5593 				       sizeof(struct megasas_evt_log_info),
5594 				       &el_info_h);
5595 
5596 	if (!el_info) {
5597 		megasas_return_cmd(instance, cmd);
5598 		return -ENOMEM;
5599 	}
5600 
5601 	memset(el_info, 0, sizeof(*el_info));
5602 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5603 
5604 	dcmd->cmd = MFI_CMD_DCMD;
5605 	dcmd->cmd_status = 0x0;
5606 	dcmd->sge_count = 1;
5607 	dcmd->flags = MFI_FRAME_DIR_READ;
5608 	dcmd->timeout = 0;
5609 	dcmd->pad_0 = 0;
5610 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5611 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5612 
5613 	megasas_set_dma_settings(instance, dcmd, el_info_h,
5614 				 sizeof(struct megasas_evt_log_info));
5615 
5616 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
5617 		DCMD_SUCCESS) {
5618 		/*
5619 		 * Copy the data back into callers buffer
5620 		 */
5621 		eli->newest_seq_num = el_info->newest_seq_num;
5622 		eli->oldest_seq_num = el_info->oldest_seq_num;
5623 		eli->clear_seq_num = el_info->clear_seq_num;
5624 		eli->shutdown_seq_num = el_info->shutdown_seq_num;
5625 		eli->boot_seq_num = el_info->boot_seq_num;
5626 	} else
5627 		dev_err(&instance->pdev->dev, "DCMD failed "
5628 			"from %s\n", __func__);
5629 
5630 	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
5631 			    el_info, el_info_h);
5632 
5633 	megasas_return_cmd(instance, cmd);
5634 
5635 	return 0;
5636 }
5637 
5638 /**
5639  * megasas_register_aen -	Registers for asynchronous event notification
5640  * @instance:			Adapter soft state
5641  * @seq_num:			The starting sequence number
5642  * @class_locale:		Class of the event
5643  *
5644  * This function subscribes for AEN for events beyond the @seq_num. It requests
5645  * to be notified if and only if the event is of type @class_locale
5646  */
5647 static int
5648 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5649 		     u32 class_locale_word)
5650 {
5651 	int ret_val;
5652 	struct megasas_cmd *cmd;
5653 	struct megasas_dcmd_frame *dcmd;
5654 	union megasas_evt_class_locale curr_aen;
5655 	union megasas_evt_class_locale prev_aen;
5656 
5657 	/*
5658 	 * If there an AEN pending already (aen_cmd), check if the
5659 	 * class_locale of that pending AEN is inclusive of the new
5660 	 * AEN request we currently have. If it is, then we don't have
5661 	 * to do anything. In other words, whichever events the current
5662 	 * AEN request is subscribing to, have already been subscribed
5663 	 * to.
5664 	 *
5665 	 * If the old_cmd is _not_ inclusive, then we have to abort
5666 	 * that command, form a class_locale that is superset of both
5667 	 * old and current and re-issue to the FW
5668 	 */
5669 
5670 	curr_aen.word = class_locale_word;
5671 
5672 	if (instance->aen_cmd) {
5673 
5674 		prev_aen.word =
5675 			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
5676 
5677 		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
5678 		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
5679 			dev_info(&instance->pdev->dev,
5680 				 "%s %d out of range class %d send by application\n",
5681 				 __func__, __LINE__, curr_aen.members.class);
5682 			return 0;
5683 		}
5684 
5685 		/*
5686 		 * A class whose enum value is smaller is inclusive of all
5687 		 * higher values. If a PROGRESS (= -1) was previously
5688 		 * registered, then a new registration requests for higher
5689 		 * classes need not be sent to FW. They are automatically
5690 		 * included.
5691 		 *
5692 		 * Locale numbers don't have such hierarchy. They are bitmap
5693 		 * values
5694 		 */
5695 		if ((prev_aen.members.class <= curr_aen.members.class) &&
5696 		    !((prev_aen.members.locale & curr_aen.members.locale) ^
5697 		      curr_aen.members.locale)) {
5698 			/*
5699 			 * Previously issued event registration includes
5700 			 * current request. Nothing to do.
5701 			 */
5702 			return 0;
5703 		} else {
5704 			curr_aen.members.locale |= prev_aen.members.locale;
5705 
5706 			if (prev_aen.members.class < curr_aen.members.class)
5707 				curr_aen.members.class = prev_aen.members.class;
5708 
5709 			instance->aen_cmd->abort_aen = 1;
5710 			ret_val = megasas_issue_blocked_abort_cmd(instance,
5711 								  instance->
5712 								  aen_cmd, 30);
5713 
5714 			if (ret_val) {
5715 				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
5716 				       "previous AEN command\n");
5717 				return ret_val;
5718 			}
5719 		}
5720 	}
5721 
5722 	cmd = megasas_get_cmd(instance);
5723 
5724 	if (!cmd)
5725 		return -ENOMEM;
5726 
5727 	dcmd = &cmd->frame->dcmd;
5728 
5729 	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
5730 
5731 	/*
5732 	 * Prepare DCMD for aen registration
5733 	 */
5734 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5735 
5736 	dcmd->cmd = MFI_CMD_DCMD;
5737 	dcmd->cmd_status = 0x0;
5738 	dcmd->sge_count = 1;
5739 	dcmd->flags = MFI_FRAME_DIR_READ;
5740 	dcmd->timeout = 0;
5741 	dcmd->pad_0 = 0;
5742 	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
5743 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
5744 	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
5745 	instance->last_seq_num = seq_num;
5746 	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
5747 
5748 	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
5749 				 sizeof(struct megasas_evt_detail));
5750 
5751 	if (instance->aen_cmd != NULL) {
5752 		megasas_return_cmd(instance, cmd);
5753 		return 0;
5754 	}
5755 
5756 	/*
5757 	 * Store reference to the cmd used to register for AEN. When an
5758 	 * application wants us to register for AEN, we have to abort this
5759 	 * cmd and re-register with a new EVENT LOCALE supplied by that app
5760 	 */
5761 	instance->aen_cmd = cmd;
5762 
5763 	/*
5764 	 * Issue the aen registration frame
5765 	 */
5766 	instance->instancet->issue_dcmd(instance, cmd);
5767 
5768 	return 0;
5769 }
5770 
5771 /* megasas_get_target_prop - Send DCMD with below details to firmware.
5772  *
5773  * This DCMD will fetch few properties of LD/system PD defined
5774  * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
5775  *
5776  * DCMD send by drivers whenever new target is added to the OS.
5777  *
5778  * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
5779  * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
5780  *                       0 = system PD, 1 = LD.
5781  * dcmd.mbox.s[1]      - TargetID for LD/system PD.
5782  * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
5783  *
5784  * @instance:		Adapter soft state
5785  * @sdev:		OS provided scsi device
5786  *
5787  * Returns 0 on success non-zero on failure.
5788  */
5789 static int
5790 megasas_get_target_prop(struct megasas_instance *instance,
5791 			struct scsi_device *sdev)
5792 {
5793 	int ret;
5794 	struct megasas_cmd *cmd;
5795 	struct megasas_dcmd_frame *dcmd;
5796 	u16 targetId = (sdev->channel % 2) + sdev->id;
5797 
5798 	cmd = megasas_get_cmd(instance);
5799 
5800 	if (!cmd) {
5801 		dev_err(&instance->pdev->dev,
5802 			"Failed to get cmd %s\n", __func__);
5803 		return -ENOMEM;
5804 	}
5805 
5806 	dcmd = &cmd->frame->dcmd;
5807 
5808 	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
5809 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5810 	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
5811 
5812 	dcmd->mbox.s[1] = cpu_to_le16(targetId);
5813 	dcmd->cmd = MFI_CMD_DCMD;
5814 	dcmd->cmd_status = 0xFF;
5815 	dcmd->sge_count = 1;
5816 	dcmd->flags = MFI_FRAME_DIR_READ;
5817 	dcmd->timeout = 0;
5818 	dcmd->pad_0 = 0;
5819 	dcmd->data_xfer_len =
5820 		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5821 	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
5822 
5823 	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
5824 				 sizeof(struct MR_TARGET_PROPERTIES));
5825 
5826 	if ((instance->adapter_type != MFI_SERIES) &&
5827 	    !instance->mask_interrupts)
5828 		ret = megasas_issue_blocked_cmd(instance,
5829 						cmd, MFI_IO_TIMEOUT_SECS);
5830 	else
5831 		ret = megasas_issue_polled(instance, cmd);
5832 
5833 	switch (ret) {
5834 	case DCMD_TIMEOUT:
5835 		switch (dcmd_timeout_ocr_possible(instance)) {
5836 		case INITIATE_OCR:
5837 			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5838 			megasas_reset_fusion(instance->host,
5839 					     MFI_IO_TIMEOUT_OCR);
5840 			break;
5841 		case KILL_ADAPTER:
5842 			megaraid_sas_kill_hba(instance);
5843 			break;
5844 		case IGNORE_TIMEOUT:
5845 			dev_info(&instance->pdev->dev,
5846 				 "Ignore DCMD timeout: %s %d\n",
5847 				 __func__, __LINE__);
5848 			break;
5849 		}
5850 		break;
5851 
5852 	default:
5853 		megasas_return_cmd(instance, cmd);
5854 	}
5855 	if (ret != DCMD_SUCCESS)
5856 		dev_err(&instance->pdev->dev,
5857 			"return from %s %d return value %d\n",
5858 			__func__, __LINE__, ret);
5859 
5860 	return ret;
5861 }
5862 
5863 /**
5864  * megasas_start_aen -	Subscribes to AEN during driver load time
5865  * @instance:		Adapter soft state
5866  */
5867 static int megasas_start_aen(struct megasas_instance *instance)
5868 {
5869 	struct megasas_evt_log_info eli;
5870 	union megasas_evt_class_locale class_locale;
5871 
5872 	/*
5873 	 * Get the latest sequence number from FW
5874 	 */
5875 	memset(&eli, 0, sizeof(eli));
5876 
5877 	if (megasas_get_seq_num(instance, &eli))
5878 		return -1;
5879 
5880 	/*
5881 	 * Register AEN with FW for latest sequence number plus 1
5882 	 */
5883 	class_locale.members.reserved = 0;
5884 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
5885 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
5886 
5887 	return megasas_register_aen(instance,
5888 			le32_to_cpu(eli.newest_seq_num) + 1,
5889 			class_locale.word);
5890 }
5891 
5892 /**
5893  * megasas_io_attach -	Attaches this driver to SCSI mid-layer
5894  * @instance:		Adapter soft state
5895  */
5896 static int megasas_io_attach(struct megasas_instance *instance)
5897 {
5898 	struct Scsi_Host *host = instance->host;
5899 
5900 	/*
5901 	 * Export parameters required by SCSI mid-layer
5902 	 */
5903 	host->unique_id = instance->unique_id;
5904 	host->can_queue = instance->max_scsi_cmds;
5905 	host->this_id = instance->init_id;
5906 	host->sg_tablesize = instance->max_num_sge;
5907 
5908 	if (instance->fw_support_ieee)
5909 		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
5910 
5911 	/*
5912 	 * Check if the module parameter value for max_sectors can be used
5913 	 */
5914 	if (max_sectors && max_sectors < instance->max_sectors_per_req)
5915 		instance->max_sectors_per_req = max_sectors;
5916 	else {
5917 		if (max_sectors) {
5918 			if (((instance->pdev->device ==
5919 				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
5920 				(instance->pdev->device ==
5921 				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
5922 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
5923 				instance->max_sectors_per_req = max_sectors;
5924 			} else {
5925 			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
5926 				"and <= %d (or < 1MB for GEN2 controller)\n",
5927 				instance->max_sectors_per_req);
5928 			}
5929 		}
5930 	}
5931 
5932 	host->max_sectors = instance->max_sectors_per_req;
5933 	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
5934 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
5935 	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
5936 	host->max_lun = MEGASAS_MAX_LUN;
5937 	host->max_cmd_len = 16;
5938 
5939 	/*
5940 	 * Notify the mid-layer about the new controller
5941 	 */
5942 	if (scsi_add_host(host, &instance->pdev->dev)) {
5943 		dev_err(&instance->pdev->dev,
5944 			"Failed to add host from %s %d\n",
5945 			__func__, __LINE__);
5946 		return -ENODEV;
5947 	}
5948 
5949 	return 0;
5950 }
5951 
5952 /**
5953  * megasas_set_dma_mask -	Set DMA mask for supported controllers
5954  *
5955  * @instance:		Adapter soft state
5956  * Description:
5957  *
5958  * For Ventura, driver/FW will operate in 64bit DMA addresses.
5959  *
5960  * For invader-
5961  *	By default, driver/FW will operate in 32bit DMA addresses
5962  *	for consistent DMA mapping but if 32 bit consistent
5963  *	DMA mask fails, driver will try with 64 bit consistent
5964  *	mask provided FW is true 64bit DMA capable
5965  *
5966  * For older controllers(Thunderbolt and MFI based adapters)-
5967  *	driver/FW will operate in 32 bit consistent DMA addresses.
5968  */
5969 static int
5970 megasas_set_dma_mask(struct megasas_instance *instance)
5971 {
5972 	u64 consistent_mask;
5973 	struct pci_dev *pdev;
5974 	u32 scratch_pad_2;
5975 
5976 	pdev = instance->pdev;
5977 	consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
5978 				DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
5979 
5980 	if (IS_DMA64) {
5981 		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
5982 		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
5983 			goto fail_set_dma_mask;
5984 
5985 		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
5986 		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
5987 		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
5988 			/*
5989 			 * If 32 bit DMA mask fails, then try for 64 bit mask
5990 			 * for FW capable of handling 64 bit DMA.
5991 			 */
5992 			scratch_pad_2 = readl
5993 				(&instance->reg_set->outbound_scratch_pad_2);
5994 
5995 			if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
5996 				goto fail_set_dma_mask;
5997 			else if (dma_set_mask_and_coherent(&pdev->dev,
5998 							   DMA_BIT_MASK(64)))
5999 				goto fail_set_dma_mask;
6000 		}
6001 	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6002 		goto fail_set_dma_mask;
6003 
6004 	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6005 		instance->consistent_mask_64bit = false;
6006 	else
6007 		instance->consistent_mask_64bit = true;
6008 
6009 	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6010 		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
6011 		 (instance->consistent_mask_64bit ? "64" : "32"));
6012 
6013 	return 0;
6014 
6015 fail_set_dma_mask:
6016 	dev_err(&pdev->dev, "Failed to set DMA mask\n");
6017 	return -1;
6018 
6019 }
6020 
6021 /*
6022  * megasas_set_adapter_type -	Set adapter type.
6023  *				Supported controllers can be divided in
6024  *				4 categories-  enum MR_ADAPTER_TYPE {
6025  *							MFI_SERIES = 1,
6026  *							THUNDERBOLT_SERIES = 2,
6027  *							INVADER_SERIES = 3,
6028  *							VENTURA_SERIES = 4,
6029  *						};
6030  * @instance:			Adapter soft state
6031  * return:			void
6032  */
6033 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6034 {
6035 	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6036 	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6037 		instance->adapter_type = MFI_SERIES;
6038 	} else {
6039 		switch (instance->pdev->device) {
6040 		case PCI_DEVICE_ID_LSI_VENTURA:
6041 		case PCI_DEVICE_ID_LSI_CRUSADER:
6042 		case PCI_DEVICE_ID_LSI_HARPOON:
6043 		case PCI_DEVICE_ID_LSI_TOMCAT:
6044 		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6045 		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6046 			instance->adapter_type = VENTURA_SERIES;
6047 			break;
6048 		case PCI_DEVICE_ID_LSI_FUSION:
6049 		case PCI_DEVICE_ID_LSI_PLASMA:
6050 			instance->adapter_type = THUNDERBOLT_SERIES;
6051 			break;
6052 		case PCI_DEVICE_ID_LSI_INVADER:
6053 		case PCI_DEVICE_ID_LSI_INTRUDER:
6054 		case PCI_DEVICE_ID_LSI_INTRUDER_24:
6055 		case PCI_DEVICE_ID_LSI_CUTLASS_52:
6056 		case PCI_DEVICE_ID_LSI_CUTLASS_53:
6057 		case PCI_DEVICE_ID_LSI_FURY:
6058 			instance->adapter_type = INVADER_SERIES;
6059 			break;
6060 		default: /* For all other supported controllers */
6061 			instance->adapter_type = MFI_SERIES;
6062 			break;
6063 		}
6064 	}
6065 }
6066 
6067 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6068 {
6069 	instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
6070 						  &instance->producer_h);
6071 	instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
6072 						  &instance->consumer_h);
6073 
6074 	if (!instance->producer || !instance->consumer) {
6075 		dev_err(&instance->pdev->dev,
6076 			"Failed to allocate memory for producer, consumer\n");
6077 		return -1;
6078 	}
6079 
6080 	*instance->producer = 0;
6081 	*instance->consumer = 0;
6082 	return 0;
6083 }
6084 
6085 /**
6086  * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
6087  *				structures which are not common across MFI
6088  *				adapters and fusion adapters.
6089  *				For MFI based adapters, allocate producer and
6090  *				consumer buffers. For fusion adapters, allocate
6091  *				memory for fusion context.
6092  * @instance:			Adapter soft state
6093  * return:			0 for SUCCESS
6094  */
6095 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6096 {
6097 	switch (instance->adapter_type) {
6098 	case MFI_SERIES:
6099 		if (megasas_alloc_mfi_ctrl_mem(instance))
6100 			return -ENOMEM;
6101 		break;
6102 	case VENTURA_SERIES:
6103 	case THUNDERBOLT_SERIES:
6104 	case INVADER_SERIES:
6105 		if (megasas_alloc_fusion_context(instance))
6106 			return -ENOMEM;
6107 		break;
6108 	}
6109 
6110 	return 0;
6111 }
6112 
6113 /*
6114  * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
6115  *				producer, consumer buffers for MFI adapters
6116  *
6117  * @instance -			Adapter soft instance
6118  *
6119  */
6120 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6121 {
6122 	if (instance->adapter_type == MFI_SERIES) {
6123 		if (instance->producer)
6124 			pci_free_consistent(instance->pdev, sizeof(u32),
6125 					    instance->producer,
6126 					    instance->producer_h);
6127 		if (instance->consumer)
6128 			pci_free_consistent(instance->pdev, sizeof(u32),
6129 					    instance->consumer,
6130 					    instance->consumer_h);
6131 	} else {
6132 		megasas_free_fusion_context(instance);
6133 	}
6134 }
6135 
6136 /**
6137  * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
6138  *					driver load time
6139  *
6140  * @instance-				Adapter soft instance
6141  * @return-				O for SUCCESS
6142  */
6143 static inline
6144 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6145 {
6146 	struct pci_dev *pdev = instance->pdev;
6147 	struct fusion_context *fusion = instance->ctrl_context;
6148 
6149 	instance->evt_detail =
6150 		pci_alloc_consistent(pdev,
6151 				     sizeof(struct megasas_evt_detail),
6152 				     &instance->evt_detail_h);
6153 
6154 	if (!instance->evt_detail) {
6155 		dev_err(&instance->pdev->dev,
6156 			"Failed to allocate event detail buffer\n");
6157 		return -ENOMEM;
6158 	}
6159 
6160 	if (fusion) {
6161 		fusion->ioc_init_request =
6162 			dma_alloc_coherent(&pdev->dev,
6163 					   sizeof(struct MPI2_IOC_INIT_REQUEST),
6164 					   &fusion->ioc_init_request_phys,
6165 					   GFP_KERNEL);
6166 
6167 		if (!fusion->ioc_init_request) {
6168 			dev_err(&pdev->dev,
6169 				"Failed to allocate PD list buffer\n");
6170 			return -ENOMEM;
6171 		}
6172 	}
6173 
6174 	instance->pd_list_buf =
6175 		pci_alloc_consistent(pdev,
6176 				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6177 				     &instance->pd_list_buf_h);
6178 
6179 	if (!instance->pd_list_buf) {
6180 		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
6181 		return -ENOMEM;
6182 	}
6183 
6184 	instance->ctrl_info_buf =
6185 		pci_alloc_consistent(pdev,
6186 				     sizeof(struct megasas_ctrl_info),
6187 				     &instance->ctrl_info_buf_h);
6188 
6189 	if (!instance->ctrl_info_buf) {
6190 		dev_err(&pdev->dev,
6191 			"Failed to allocate controller info buffer\n");
6192 		return -ENOMEM;
6193 	}
6194 
6195 	instance->ld_list_buf =
6196 		pci_alloc_consistent(pdev,
6197 				     sizeof(struct MR_LD_LIST),
6198 				     &instance->ld_list_buf_h);
6199 
6200 	if (!instance->ld_list_buf) {
6201 		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
6202 		return -ENOMEM;
6203 	}
6204 
6205 	instance->ld_targetid_list_buf =
6206 		pci_alloc_consistent(pdev,
6207 				     sizeof(struct MR_LD_TARGETID_LIST),
6208 				     &instance->ld_targetid_list_buf_h);
6209 
6210 	if (!instance->ld_targetid_list_buf) {
6211 		dev_err(&pdev->dev,
6212 			"Failed to allocate LD targetid list buffer\n");
6213 		return -ENOMEM;
6214 	}
6215 
6216 	if (!reset_devices) {
6217 		instance->system_info_buf =
6218 			pci_alloc_consistent(pdev,
6219 					     sizeof(struct MR_DRV_SYSTEM_INFO),
6220 					     &instance->system_info_h);
6221 		instance->pd_info =
6222 			pci_alloc_consistent(pdev,
6223 					     sizeof(struct MR_PD_INFO),
6224 					     &instance->pd_info_h);
6225 		instance->tgt_prop =
6226 			pci_alloc_consistent(pdev,
6227 					     sizeof(struct MR_TARGET_PROPERTIES),
6228 					     &instance->tgt_prop_h);
6229 		instance->crash_dump_buf =
6230 			pci_alloc_consistent(pdev,
6231 					     CRASH_DMA_BUF_SIZE,
6232 					     &instance->crash_dump_h);
6233 
6234 		if (!instance->system_info_buf)
6235 			dev_err(&instance->pdev->dev,
6236 				"Failed to allocate system info buffer\n");
6237 
6238 		if (!instance->pd_info)
6239 			dev_err(&instance->pdev->dev,
6240 				"Failed to allocate pd_info buffer\n");
6241 
6242 		if (!instance->tgt_prop)
6243 			dev_err(&instance->pdev->dev,
6244 				"Failed to allocate tgt_prop buffer\n");
6245 
6246 		if (!instance->crash_dump_buf)
6247 			dev_err(&instance->pdev->dev,
6248 				"Failed to allocate crash dump buffer\n");
6249 	}
6250 
6251 	return 0;
6252 }
6253 
6254 /*
6255  * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
6256  *					during driver load time
6257  *
6258  * @instance-				Adapter soft instance
6259  *
6260  */
6261 static inline
6262 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
6263 {
6264 	struct pci_dev *pdev = instance->pdev;
6265 	struct fusion_context *fusion = instance->ctrl_context;
6266 
6267 	if (instance->evt_detail)
6268 		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6269 				    instance->evt_detail,
6270 				    instance->evt_detail_h);
6271 
6272 	if (fusion && fusion->ioc_init_request)
6273 		dma_free_coherent(&pdev->dev,
6274 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
6275 				  fusion->ioc_init_request,
6276 				  fusion->ioc_init_request_phys);
6277 
6278 	if (instance->pd_list_buf)
6279 		pci_free_consistent(pdev,
6280 				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6281 				    instance->pd_list_buf,
6282 				    instance->pd_list_buf_h);
6283 
6284 	if (instance->ld_list_buf)
6285 		pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
6286 				    instance->ld_list_buf,
6287 				    instance->ld_list_buf_h);
6288 
6289 	if (instance->ld_targetid_list_buf)
6290 		pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
6291 				    instance->ld_targetid_list_buf,
6292 				    instance->ld_targetid_list_buf_h);
6293 
6294 	if (instance->ctrl_info_buf)
6295 		pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
6296 				    instance->ctrl_info_buf,
6297 				    instance->ctrl_info_buf_h);
6298 
6299 	if (instance->system_info_buf)
6300 		pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6301 				    instance->system_info_buf,
6302 				    instance->system_info_h);
6303 
6304 	if (instance->pd_info)
6305 		pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6306 				    instance->pd_info, instance->pd_info_h);
6307 
6308 	if (instance->tgt_prop)
6309 		pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6310 				    instance->tgt_prop, instance->tgt_prop_h);
6311 
6312 	if (instance->crash_dump_buf)
6313 		pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6314 				    instance->crash_dump_buf,
6315 				    instance->crash_dump_h);
6316 }
6317 
6318 /*
6319  * megasas_init_ctrl_params -		Initialize controller's instance
6320  *					parameters before FW init
6321  * @instance -				Adapter soft instance
6322  * @return -				void
6323  */
6324 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
6325 {
6326 	instance->fw_crash_state = UNAVAILABLE;
6327 
6328 	megasas_poll_wait_aen = 0;
6329 	instance->issuepend_done = 1;
6330 	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
6331 
6332 	/*
6333 	 * Initialize locks and queues
6334 	 */
6335 	INIT_LIST_HEAD(&instance->cmd_pool);
6336 	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
6337 
6338 	atomic_set(&instance->fw_outstanding, 0);
6339 
6340 	init_waitqueue_head(&instance->int_cmd_wait_q);
6341 	init_waitqueue_head(&instance->abort_cmd_wait_q);
6342 
6343 	spin_lock_init(&instance->crashdump_lock);
6344 	spin_lock_init(&instance->mfi_pool_lock);
6345 	spin_lock_init(&instance->hba_lock);
6346 	spin_lock_init(&instance->stream_lock);
6347 	spin_lock_init(&instance->completion_lock);
6348 
6349 	mutex_init(&instance->hba_mutex);
6350 	mutex_init(&instance->reset_mutex);
6351 
6352 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
6353 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
6354 		instance->flag_ieee = 1;
6355 
6356 	megasas_dbg_lvl = 0;
6357 	instance->flag = 0;
6358 	instance->unload = 1;
6359 	instance->last_time = 0;
6360 	instance->disableOnlineCtrlReset = 1;
6361 	instance->UnevenSpanSupport = 0;
6362 
6363 	if (instance->adapter_type != MFI_SERIES) {
6364 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
6365 		INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
6366 	} else {
6367 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
6368 	}
6369 }
6370 
6371 /**
6372  * megasas_probe_one -	PCI hotplug entry point
6373  * @pdev:		PCI device structure
6374  * @id:			PCI ids of supported hotplugged adapter
6375  */
6376 static int megasas_probe_one(struct pci_dev *pdev,
6377 			     const struct pci_device_id *id)
6378 {
6379 	int rval, pos;
6380 	struct Scsi_Host *host;
6381 	struct megasas_instance *instance;
6382 	u16 control = 0;
6383 
6384 	/* Reset MSI-X in the kdump kernel */
6385 	if (reset_devices) {
6386 		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
6387 		if (pos) {
6388 			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
6389 					     &control);
6390 			if (control & PCI_MSIX_FLAGS_ENABLE) {
6391 				dev_info(&pdev->dev, "resetting MSI-X\n");
6392 				pci_write_config_word(pdev,
6393 						      pos + PCI_MSIX_FLAGS,
6394 						      control &
6395 						      ~PCI_MSIX_FLAGS_ENABLE);
6396 			}
6397 		}
6398 	}
6399 
6400 	/*
6401 	 * PCI prepping: enable device set bus mastering and dma mask
6402 	 */
6403 	rval = pci_enable_device_mem(pdev);
6404 
6405 	if (rval) {
6406 		return rval;
6407 	}
6408 
6409 	pci_set_master(pdev);
6410 
6411 	host = scsi_host_alloc(&megasas_template,
6412 			       sizeof(struct megasas_instance));
6413 
6414 	if (!host) {
6415 		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
6416 		goto fail_alloc_instance;
6417 	}
6418 
6419 	instance = (struct megasas_instance *)host->hostdata;
6420 	memset(instance, 0, sizeof(*instance));
6421 	atomic_set(&instance->fw_reset_no_pci_access, 0);
6422 
6423 	/*
6424 	 * Initialize PCI related and misc parameters
6425 	 */
6426 	instance->pdev = pdev;
6427 	instance->host = host;
6428 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
6429 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
6430 
6431 	megasas_set_adapter_type(instance);
6432 
6433 	/*
6434 	 * Initialize MFI Firmware
6435 	 */
6436 	if (megasas_init_fw(instance))
6437 		goto fail_init_mfi;
6438 
6439 	if (instance->requestorId) {
6440 		if (instance->PlasmaFW111) {
6441 			instance->vf_affiliation_111 =
6442 				pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
6443 						     &instance->vf_affiliation_111_h);
6444 			if (!instance->vf_affiliation_111)
6445 				dev_warn(&pdev->dev, "Can't allocate "
6446 				       "memory for VF affiliation buffer\n");
6447 		} else {
6448 			instance->vf_affiliation =
6449 				pci_alloc_consistent(pdev,
6450 						     (MAX_LOGICAL_DRIVES + 1) *
6451 						     sizeof(struct MR_LD_VF_AFFILIATION),
6452 						     &instance->vf_affiliation_h);
6453 			if (!instance->vf_affiliation)
6454 				dev_warn(&pdev->dev, "Can't allocate "
6455 				       "memory for VF affiliation buffer\n");
6456 		}
6457 	}
6458 
6459 	/*
6460 	 * Store instance in PCI softstate
6461 	 */
6462 	pci_set_drvdata(pdev, instance);
6463 
6464 	/*
6465 	 * Add this controller to megasas_mgmt_info structure so that it
6466 	 * can be exported to management applications
6467 	 */
6468 	megasas_mgmt_info.count++;
6469 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
6470 	megasas_mgmt_info.max_index++;
6471 
6472 	/*
6473 	 * Register with SCSI mid-layer
6474 	 */
6475 	if (megasas_io_attach(instance))
6476 		goto fail_io_attach;
6477 
6478 	instance->unload = 0;
6479 	/*
6480 	 * Trigger SCSI to scan our drives
6481 	 */
6482 	scsi_scan_host(host);
6483 
6484 	/*
6485 	 * Initiate AEN (Asynchronous Event Notification)
6486 	 */
6487 	if (megasas_start_aen(instance)) {
6488 		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
6489 		goto fail_start_aen;
6490 	}
6491 
6492 	/* Get current SR-IOV LD/VF affiliation */
6493 	if (instance->requestorId)
6494 		megasas_get_ld_vf_affiliation(instance, 1);
6495 
6496 	return 0;
6497 
6498 fail_start_aen:
6499 fail_io_attach:
6500 	megasas_mgmt_info.count--;
6501 	megasas_mgmt_info.max_index--;
6502 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6503 
6504 	instance->instancet->disable_intr(instance);
6505 	megasas_destroy_irqs(instance);
6506 
6507 	if (instance->adapter_type != MFI_SERIES)
6508 		megasas_release_fusion(instance);
6509 	else
6510 		megasas_release_mfi(instance);
6511 	if (instance->msix_vectors)
6512 		pci_free_irq_vectors(instance->pdev);
6513 fail_init_mfi:
6514 	scsi_host_put(host);
6515 
6516 fail_alloc_instance:
6517 	pci_disable_device(pdev);
6518 
6519 	return -ENODEV;
6520 }
6521 
6522 /**
6523  * megasas_flush_cache -	Requests FW to flush all its caches
6524  * @instance:			Adapter soft state
6525  */
6526 static void megasas_flush_cache(struct megasas_instance *instance)
6527 {
6528 	struct megasas_cmd *cmd;
6529 	struct megasas_dcmd_frame *dcmd;
6530 
6531 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6532 		return;
6533 
6534 	cmd = megasas_get_cmd(instance);
6535 
6536 	if (!cmd)
6537 		return;
6538 
6539 	dcmd = &cmd->frame->dcmd;
6540 
6541 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6542 
6543 	dcmd->cmd = MFI_CMD_DCMD;
6544 	dcmd->cmd_status = 0x0;
6545 	dcmd->sge_count = 0;
6546 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6547 	dcmd->timeout = 0;
6548 	dcmd->pad_0 = 0;
6549 	dcmd->data_xfer_len = 0;
6550 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
6551 	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
6552 
6553 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6554 			!= DCMD_SUCCESS) {
6555 		dev_err(&instance->pdev->dev,
6556 			"return from %s %d\n", __func__, __LINE__);
6557 		return;
6558 	}
6559 
6560 	megasas_return_cmd(instance, cmd);
6561 }
6562 
6563 /**
6564  * megasas_shutdown_controller -	Instructs FW to shutdown the controller
6565  * @instance:				Adapter soft state
6566  * @opcode:				Shutdown/Hibernate
6567  */
6568 static void megasas_shutdown_controller(struct megasas_instance *instance,
6569 					u32 opcode)
6570 {
6571 	struct megasas_cmd *cmd;
6572 	struct megasas_dcmd_frame *dcmd;
6573 
6574 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6575 		return;
6576 
6577 	cmd = megasas_get_cmd(instance);
6578 
6579 	if (!cmd)
6580 		return;
6581 
6582 	if (instance->aen_cmd)
6583 		megasas_issue_blocked_abort_cmd(instance,
6584 			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
6585 	if (instance->map_update_cmd)
6586 		megasas_issue_blocked_abort_cmd(instance,
6587 			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
6588 	if (instance->jbod_seq_cmd)
6589 		megasas_issue_blocked_abort_cmd(instance,
6590 			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
6591 
6592 	dcmd = &cmd->frame->dcmd;
6593 
6594 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6595 
6596 	dcmd->cmd = MFI_CMD_DCMD;
6597 	dcmd->cmd_status = 0x0;
6598 	dcmd->sge_count = 0;
6599 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6600 	dcmd->timeout = 0;
6601 	dcmd->pad_0 = 0;
6602 	dcmd->data_xfer_len = 0;
6603 	dcmd->opcode = cpu_to_le32(opcode);
6604 
6605 	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6606 			!= DCMD_SUCCESS) {
6607 		dev_err(&instance->pdev->dev,
6608 			"return from %s %d\n", __func__, __LINE__);
6609 		return;
6610 	}
6611 
6612 	megasas_return_cmd(instance, cmd);
6613 }
6614 
6615 #ifdef CONFIG_PM
6616 /**
6617  * megasas_suspend -	driver suspend entry point
6618  * @pdev:		PCI device structure
6619  * @state:		PCI power state to suspend routine
6620  */
6621 static int
6622 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6623 {
6624 	struct Scsi_Host *host;
6625 	struct megasas_instance *instance;
6626 
6627 	instance = pci_get_drvdata(pdev);
6628 	host = instance->host;
6629 	instance->unload = 1;
6630 
6631 	/* Shutdown SR-IOV heartbeat timer */
6632 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6633 		del_timer_sync(&instance->sriov_heartbeat_timer);
6634 
6635 	megasas_flush_cache(instance);
6636 	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
6637 
6638 	/* cancel the delayed work if this work still in queue */
6639 	if (instance->ev != NULL) {
6640 		struct megasas_aen_event *ev = instance->ev;
6641 		cancel_delayed_work_sync(&ev->hotplug_work);
6642 		instance->ev = NULL;
6643 	}
6644 
6645 	tasklet_kill(&instance->isr_tasklet);
6646 
6647 	pci_set_drvdata(instance->pdev, instance);
6648 	instance->instancet->disable_intr(instance);
6649 
6650 	megasas_destroy_irqs(instance);
6651 
6652 	if (instance->msix_vectors)
6653 		pci_free_irq_vectors(instance->pdev);
6654 
6655 	pci_save_state(pdev);
6656 	pci_disable_device(pdev);
6657 
6658 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
6659 
6660 	return 0;
6661 }
6662 
6663 /**
6664  * megasas_resume-      driver resume entry point
6665  * @pdev:               PCI device structure
6666  */
6667 static int
6668 megasas_resume(struct pci_dev *pdev)
6669 {
6670 	int rval;
6671 	struct Scsi_Host *host;
6672 	struct megasas_instance *instance;
6673 	int irq_flags = PCI_IRQ_LEGACY;
6674 
6675 	instance = pci_get_drvdata(pdev);
6676 	host = instance->host;
6677 	pci_set_power_state(pdev, PCI_D0);
6678 	pci_enable_wake(pdev, PCI_D0, 0);
6679 	pci_restore_state(pdev);
6680 
6681 	/*
6682 	 * PCI prepping: enable device set bus mastering and dma mask
6683 	 */
6684 	rval = pci_enable_device_mem(pdev);
6685 
6686 	if (rval) {
6687 		dev_err(&pdev->dev, "Enable device failed\n");
6688 		return rval;
6689 	}
6690 
6691 	pci_set_master(pdev);
6692 
6693 	/*
6694 	 * We expect the FW state to be READY
6695 	 */
6696 	if (megasas_transition_to_ready(instance, 0))
6697 		goto fail_ready_state;
6698 
6699 	if (megasas_set_dma_mask(instance))
6700 		goto fail_set_dma_mask;
6701 
6702 	/*
6703 	 * Initialize MFI Firmware
6704 	 */
6705 
6706 	atomic_set(&instance->fw_outstanding, 0);
6707 
6708 	/* Now re-enable MSI-X */
6709 	if (instance->msix_vectors) {
6710 		irq_flags = PCI_IRQ_MSIX;
6711 		if (smp_affinity_enable)
6712 			irq_flags |= PCI_IRQ_AFFINITY;
6713 	}
6714 	rval = pci_alloc_irq_vectors(instance->pdev, 1,
6715 				     instance->msix_vectors ?
6716 				     instance->msix_vectors : 1, irq_flags);
6717 	if (rval < 0)
6718 		goto fail_reenable_msix;
6719 
6720 	if (instance->adapter_type != MFI_SERIES) {
6721 		megasas_reset_reply_desc(instance);
6722 		if (megasas_ioc_init_fusion(instance)) {
6723 			megasas_free_cmds(instance);
6724 			megasas_free_cmds_fusion(instance);
6725 			goto fail_init_mfi;
6726 		}
6727 		if (!megasas_get_map_info(instance))
6728 			megasas_sync_map_info(instance);
6729 	} else {
6730 		*instance->producer = 0;
6731 		*instance->consumer = 0;
6732 		if (megasas_issue_init_mfi(instance))
6733 			goto fail_init_mfi;
6734 	}
6735 
6736 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6737 		     (unsigned long)instance);
6738 
6739 	if (instance->msix_vectors ?
6740 			megasas_setup_irqs_msix(instance, 0) :
6741 			megasas_setup_irqs_ioapic(instance))
6742 		goto fail_init_mfi;
6743 
6744 	/* Re-launch SR-IOV heartbeat timer */
6745 	if (instance->requestorId) {
6746 		if (!megasas_sriov_start_heartbeat(instance, 0))
6747 			megasas_start_timer(instance);
6748 		else {
6749 			instance->skip_heartbeat_timer_del = 1;
6750 			goto fail_init_mfi;
6751 		}
6752 	}
6753 
6754 	instance->instancet->enable_intr(instance);
6755 	megasas_setup_jbod_map(instance);
6756 	instance->unload = 0;
6757 
6758 	/*
6759 	 * Initiate AEN (Asynchronous Event Notification)
6760 	 */
6761 	if (megasas_start_aen(instance))
6762 		dev_err(&instance->pdev->dev, "Start AEN failed\n");
6763 
6764 	return 0;
6765 
6766 fail_init_mfi:
6767 	megasas_free_ctrl_dma_buffers(instance);
6768 	megasas_free_ctrl_mem(instance);
6769 	scsi_host_put(host);
6770 
6771 fail_reenable_msix:
6772 fail_set_dma_mask:
6773 fail_ready_state:
6774 
6775 	pci_disable_device(pdev);
6776 
6777 	return -ENODEV;
6778 }
6779 #else
6780 #define megasas_suspend	NULL
6781 #define megasas_resume	NULL
6782 #endif
6783 
6784 static inline int
6785 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
6786 {
6787 	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
6788 	int i;
6789 
6790 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6791 		return 1;
6792 
6793 	for (i = 0; i < wait_time; i++) {
6794 		if (atomic_read(&instance->adprecovery)	== MEGASAS_HBA_OPERATIONAL)
6795 			break;
6796 
6797 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
6798 			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
6799 
6800 		msleep(1000);
6801 	}
6802 
6803 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6804 		dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
6805 			__func__);
6806 		return 1;
6807 	}
6808 
6809 	return 0;
6810 }
6811 
6812 /**
6813  * megasas_detach_one -	PCI hot"un"plug entry point
6814  * @pdev:		PCI device structure
6815  */
6816 static void megasas_detach_one(struct pci_dev *pdev)
6817 {
6818 	int i;
6819 	struct Scsi_Host *host;
6820 	struct megasas_instance *instance;
6821 	struct fusion_context *fusion;
6822 	u32 pd_seq_map_sz;
6823 
6824 	instance = pci_get_drvdata(pdev);
6825 	instance->unload = 1;
6826 	host = instance->host;
6827 	fusion = instance->ctrl_context;
6828 
6829 	/* Shutdown SR-IOV heartbeat timer */
6830 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6831 		del_timer_sync(&instance->sriov_heartbeat_timer);
6832 
6833 	if (instance->fw_crash_state != UNAVAILABLE)
6834 		megasas_free_host_crash_buffer(instance);
6835 	scsi_remove_host(instance->host);
6836 
6837 	if (megasas_wait_for_adapter_operational(instance))
6838 		goto skip_firing_dcmds;
6839 
6840 	megasas_flush_cache(instance);
6841 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6842 
6843 skip_firing_dcmds:
6844 	/* cancel the delayed work if this work still in queue*/
6845 	if (instance->ev != NULL) {
6846 		struct megasas_aen_event *ev = instance->ev;
6847 		cancel_delayed_work_sync(&ev->hotplug_work);
6848 		instance->ev = NULL;
6849 	}
6850 
6851 	/* cancel all wait events */
6852 	wake_up_all(&instance->int_cmd_wait_q);
6853 
6854 	tasklet_kill(&instance->isr_tasklet);
6855 
6856 	/*
6857 	 * Take the instance off the instance array. Note that we will not
6858 	 * decrement the max_index. We let this array be sparse array
6859 	 */
6860 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6861 		if (megasas_mgmt_info.instance[i] == instance) {
6862 			megasas_mgmt_info.count--;
6863 			megasas_mgmt_info.instance[i] = NULL;
6864 
6865 			break;
6866 		}
6867 	}
6868 
6869 	instance->instancet->disable_intr(instance);
6870 
6871 	megasas_destroy_irqs(instance);
6872 
6873 	if (instance->msix_vectors)
6874 		pci_free_irq_vectors(instance->pdev);
6875 
6876 	if (instance->adapter_type == VENTURA_SERIES) {
6877 		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
6878 			kfree(fusion->stream_detect_by_ld[i]);
6879 		kfree(fusion->stream_detect_by_ld);
6880 		fusion->stream_detect_by_ld = NULL;
6881 	}
6882 
6883 
6884 	if (instance->adapter_type != MFI_SERIES) {
6885 		megasas_release_fusion(instance);
6886 			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
6887 				(sizeof(struct MR_PD_CFG_SEQ) *
6888 					(MAX_PHYSICAL_DEVICES - 1));
6889 		for (i = 0; i < 2 ; i++) {
6890 			if (fusion->ld_map[i])
6891 				dma_free_coherent(&instance->pdev->dev,
6892 						  fusion->max_map_sz,
6893 						  fusion->ld_map[i],
6894 						  fusion->ld_map_phys[i]);
6895 			if (fusion->ld_drv_map[i]) {
6896 				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
6897 					vfree(fusion->ld_drv_map[i]);
6898 				else
6899 					free_pages((ulong)fusion->ld_drv_map[i],
6900 						   fusion->drv_map_pages);
6901 			}
6902 
6903 			if (fusion->pd_seq_sync[i])
6904 				dma_free_coherent(&instance->pdev->dev,
6905 					pd_seq_map_sz,
6906 					fusion->pd_seq_sync[i],
6907 					fusion->pd_seq_phys[i]);
6908 		}
6909 	} else {
6910 		megasas_release_mfi(instance);
6911 	}
6912 
6913 	if (instance->vf_affiliation)
6914 		pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6915 				    sizeof(struct MR_LD_VF_AFFILIATION),
6916 				    instance->vf_affiliation,
6917 				    instance->vf_affiliation_h);
6918 
6919 	if (instance->vf_affiliation_111)
6920 		pci_free_consistent(pdev,
6921 				    sizeof(struct MR_LD_VF_AFFILIATION_111),
6922 				    instance->vf_affiliation_111,
6923 				    instance->vf_affiliation_111_h);
6924 
6925 	if (instance->hb_host_mem)
6926 		pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
6927 				    instance->hb_host_mem,
6928 				    instance->hb_host_mem_h);
6929 
6930 	megasas_free_ctrl_dma_buffers(instance);
6931 
6932 	megasas_free_ctrl_mem(instance);
6933 
6934 	scsi_host_put(host);
6935 
6936 	pci_disable_device(pdev);
6937 }
6938 
6939 /**
6940  * megasas_shutdown -	Shutdown entry point
6941  * @device:		Generic device structure
6942  */
6943 static void megasas_shutdown(struct pci_dev *pdev)
6944 {
6945 	struct megasas_instance *instance = pci_get_drvdata(pdev);
6946 
6947 	instance->unload = 1;
6948 
6949 	if (megasas_wait_for_adapter_operational(instance))
6950 		goto skip_firing_dcmds;
6951 
6952 	megasas_flush_cache(instance);
6953 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6954 
6955 skip_firing_dcmds:
6956 	instance->instancet->disable_intr(instance);
6957 	megasas_destroy_irqs(instance);
6958 
6959 	if (instance->msix_vectors)
6960 		pci_free_irq_vectors(instance->pdev);
6961 }
6962 
6963 /**
6964  * megasas_mgmt_open -	char node "open" entry point
6965  */
6966 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
6967 {
6968 	/*
6969 	 * Allow only those users with admin rights
6970 	 */
6971 	if (!capable(CAP_SYS_ADMIN))
6972 		return -EACCES;
6973 
6974 	return 0;
6975 }
6976 
6977 /**
6978  * megasas_mgmt_fasync -	Async notifier registration from applications
6979  *
6980  * This function adds the calling process to a driver global queue. When an
6981  * event occurs, SIGIO will be sent to all processes in this queue.
6982  */
6983 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
6984 {
6985 	int rc;
6986 
6987 	mutex_lock(&megasas_async_queue_mutex);
6988 
6989 	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
6990 
6991 	mutex_unlock(&megasas_async_queue_mutex);
6992 
6993 	if (rc >= 0) {
6994 		/* For sanity check when we get ioctl */
6995 		filep->private_data = filep;
6996 		return 0;
6997 	}
6998 
6999 	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
7000 
7001 	return rc;
7002 }
7003 
7004 /**
7005  * megasas_mgmt_poll -  char node "poll" entry point
7006  * */
7007 static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
7008 {
7009 	unsigned int mask;
7010 	unsigned long flags;
7011 
7012 	poll_wait(file, &megasas_poll_wait, wait);
7013 	spin_lock_irqsave(&poll_aen_lock, flags);
7014 	if (megasas_poll_wait_aen)
7015 		mask = (POLLIN | POLLRDNORM);
7016 	else
7017 		mask = 0;
7018 	megasas_poll_wait_aen = 0;
7019 	spin_unlock_irqrestore(&poll_aen_lock, flags);
7020 	return mask;
7021 }
7022 
7023 /*
7024  * megasas_set_crash_dump_params_ioctl:
7025  *		Send CRASH_DUMP_MODE DCMD to all controllers
7026  * @cmd:	MFI command frame
7027  */
7028 
7029 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
7030 {
7031 	struct megasas_instance *local_instance;
7032 	int i, error = 0;
7033 	int crash_support;
7034 
7035 	crash_support = cmd->frame->dcmd.mbox.w[0];
7036 
7037 	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7038 		local_instance = megasas_mgmt_info.instance[i];
7039 		if (local_instance && local_instance->crash_dump_drv_support) {
7040 			if ((atomic_read(&local_instance->adprecovery) ==
7041 				MEGASAS_HBA_OPERATIONAL) &&
7042 				!megasas_set_crash_dump_params(local_instance,
7043 					crash_support)) {
7044 				local_instance->crash_dump_app_support =
7045 					crash_support;
7046 				dev_info(&local_instance->pdev->dev,
7047 					"Application firmware crash "
7048 					"dump mode set success\n");
7049 				error = 0;
7050 			} else {
7051 				dev_info(&local_instance->pdev->dev,
7052 					"Application firmware crash "
7053 					"dump mode set failed\n");
7054 				error = -1;
7055 			}
7056 		}
7057 	}
7058 	return error;
7059 }
7060 
7061 /**
7062  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
7063  * @instance:			Adapter soft state
7064  * @argp:			User's ioctl packet
7065  */
7066 static int
7067 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
7068 		      struct megasas_iocpacket __user * user_ioc,
7069 		      struct megasas_iocpacket *ioc)
7070 {
7071 	struct megasas_sge64 *kern_sge64 = NULL;
7072 	struct megasas_sge32 *kern_sge32 = NULL;
7073 	struct megasas_cmd *cmd;
7074 	void *kbuff_arr[MAX_IOCTL_SGE];
7075 	dma_addr_t buf_handle = 0;
7076 	int error = 0, i;
7077 	void *sense = NULL;
7078 	dma_addr_t sense_handle;
7079 	unsigned long *sense_ptr;
7080 	u32 opcode = 0;
7081 
7082 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
7083 
7084 	if (ioc->sge_count > MAX_IOCTL_SGE) {
7085 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
7086 		       ioc->sge_count, MAX_IOCTL_SGE);
7087 		return -EINVAL;
7088 	}
7089 
7090 	if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) {
7091 		dev_err(&instance->pdev->dev,
7092 			"Received invalid ioctl command 0x%x\n",
7093 			ioc->frame.hdr.cmd);
7094 		return -ENOTSUPP;
7095 	}
7096 
7097 	cmd = megasas_get_cmd(instance);
7098 	if (!cmd) {
7099 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
7100 		return -ENOMEM;
7101 	}
7102 
7103 	/*
7104 	 * User's IOCTL packet has 2 frames (maximum). Copy those two
7105 	 * frames into our cmd's frames. cmd->frame's context will get
7106 	 * overwritten when we copy from user's frames. So set that value
7107 	 * alone separately
7108 	 */
7109 	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
7110 	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
7111 	cmd->frame->hdr.pad_0 = 0;
7112 
7113 	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
7114 
7115 	if (instance->consistent_mask_64bit)
7116 		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
7117 				       MFI_FRAME_SENSE64));
7118 	else
7119 		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
7120 					       MFI_FRAME_SENSE64));
7121 
7122 	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
7123 		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
7124 
7125 	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
7126 		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
7127 			megasas_return_cmd(instance, cmd);
7128 			return -1;
7129 		}
7130 	}
7131 
7132 	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
7133 		error = megasas_set_crash_dump_params_ioctl(cmd);
7134 		megasas_return_cmd(instance, cmd);
7135 		return error;
7136 	}
7137 
7138 	/*
7139 	 * The management interface between applications and the fw uses
7140 	 * MFI frames. E.g, RAID configuration changes, LD property changes
7141 	 * etc are accomplishes through different kinds of MFI frames. The
7142 	 * driver needs to care only about substituting user buffers with
7143 	 * kernel buffers in SGLs. The location of SGL is embedded in the
7144 	 * struct iocpacket itself.
7145 	 */
7146 	if (instance->consistent_mask_64bit)
7147 		kern_sge64 = (struct megasas_sge64 *)
7148 			((unsigned long)cmd->frame + ioc->sgl_off);
7149 	else
7150 		kern_sge32 = (struct megasas_sge32 *)
7151 			((unsigned long)cmd->frame + ioc->sgl_off);
7152 
7153 	/*
7154 	 * For each user buffer, create a mirror buffer and copy in
7155 	 */
7156 	for (i = 0; i < ioc->sge_count; i++) {
7157 		if (!ioc->sgl[i].iov_len)
7158 			continue;
7159 
7160 		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
7161 						    ioc->sgl[i].iov_len,
7162 						    &buf_handle, GFP_KERNEL);
7163 		if (!kbuff_arr[i]) {
7164 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
7165 			       "kernel SGL buffer for IOCTL\n");
7166 			error = -ENOMEM;
7167 			goto out;
7168 		}
7169 
7170 		/*
7171 		 * We don't change the dma_coherent_mask, so
7172 		 * pci_alloc_consistent only returns 32bit addresses
7173 		 */
7174 		if (instance->consistent_mask_64bit) {
7175 			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
7176 			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7177 		} else {
7178 			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
7179 			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7180 		}
7181 
7182 		/*
7183 		 * We created a kernel buffer corresponding to the
7184 		 * user buffer. Now copy in from the user buffer
7185 		 */
7186 		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
7187 				   (u32) (ioc->sgl[i].iov_len))) {
7188 			error = -EFAULT;
7189 			goto out;
7190 		}
7191 	}
7192 
7193 	if (ioc->sense_len) {
7194 		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
7195 					     &sense_handle, GFP_KERNEL);
7196 		if (!sense) {
7197 			error = -ENOMEM;
7198 			goto out;
7199 		}
7200 
7201 		sense_ptr =
7202 		(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
7203 		if (instance->consistent_mask_64bit)
7204 			*sense_ptr = cpu_to_le64(sense_handle);
7205 		else
7206 			*sense_ptr = cpu_to_le32(sense_handle);
7207 	}
7208 
7209 	/*
7210 	 * Set the sync_cmd flag so that the ISR knows not to complete this
7211 	 * cmd to the SCSI mid-layer
7212 	 */
7213 	cmd->sync_cmd = 1;
7214 	if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
7215 		cmd->sync_cmd = 0;
7216 		dev_err(&instance->pdev->dev,
7217 			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
7218 			__func__, __LINE__, cmd->frame->hdr.cmd, opcode,
7219 			cmd->cmd_status_drv);
7220 		return -EBUSY;
7221 	}
7222 
7223 	cmd->sync_cmd = 0;
7224 
7225 	if (instance->unload == 1) {
7226 		dev_info(&instance->pdev->dev, "Driver unload is in progress "
7227 			"don't submit data to application\n");
7228 		goto out;
7229 	}
7230 	/*
7231 	 * copy out the kernel buffers to user buffers
7232 	 */
7233 	for (i = 0; i < ioc->sge_count; i++) {
7234 		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
7235 				 ioc->sgl[i].iov_len)) {
7236 			error = -EFAULT;
7237 			goto out;
7238 		}
7239 	}
7240 
7241 	/*
7242 	 * copy out the sense
7243 	 */
7244 	if (ioc->sense_len) {
7245 		/*
7246 		 * sense_ptr points to the location that has the user
7247 		 * sense buffer address
7248 		 */
7249 		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
7250 				ioc->sense_off);
7251 
7252 		if (copy_to_user((void __user *)((unsigned long)
7253 				 get_unaligned((unsigned long *)sense_ptr)),
7254 				 sense, ioc->sense_len)) {
7255 			dev_err(&instance->pdev->dev, "Failed to copy out to user "
7256 					"sense data\n");
7257 			error = -EFAULT;
7258 			goto out;
7259 		}
7260 	}
7261 
7262 	/*
7263 	 * copy the status codes returned by the fw
7264 	 */
7265 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
7266 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
7267 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
7268 		error = -EFAULT;
7269 	}
7270 
7271 out:
7272 	if (sense) {
7273 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
7274 				    sense, sense_handle);
7275 	}
7276 
7277 	for (i = 0; i < ioc->sge_count; i++) {
7278 		if (kbuff_arr[i]) {
7279 			if (instance->consistent_mask_64bit)
7280 				dma_free_coherent(&instance->pdev->dev,
7281 					le32_to_cpu(kern_sge64[i].length),
7282 					kbuff_arr[i],
7283 					le64_to_cpu(kern_sge64[i].phys_addr));
7284 			else
7285 				dma_free_coherent(&instance->pdev->dev,
7286 					le32_to_cpu(kern_sge32[i].length),
7287 					kbuff_arr[i],
7288 					le32_to_cpu(kern_sge32[i].phys_addr));
7289 			kbuff_arr[i] = NULL;
7290 		}
7291 	}
7292 
7293 	megasas_return_cmd(instance, cmd);
7294 	return error;
7295 }
7296 
7297 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
7298 {
7299 	struct megasas_iocpacket __user *user_ioc =
7300 	    (struct megasas_iocpacket __user *)arg;
7301 	struct megasas_iocpacket *ioc;
7302 	struct megasas_instance *instance;
7303 	int error;
7304 	int i;
7305 	unsigned long flags;
7306 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
7307 
7308 	ioc = memdup_user(user_ioc, sizeof(*ioc));
7309 	if (IS_ERR(ioc))
7310 		return PTR_ERR(ioc);
7311 
7312 	instance = megasas_lookup_instance(ioc->host_no);
7313 	if (!instance) {
7314 		error = -ENODEV;
7315 		goto out_kfree_ioc;
7316 	}
7317 
7318 	/* Adjust ioctl wait time for VF mode */
7319 	if (instance->requestorId)
7320 		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7321 
7322 	/* Block ioctls in VF mode */
7323 	if (instance->requestorId && !allow_vf_ioctls) {
7324 		error = -ENODEV;
7325 		goto out_kfree_ioc;
7326 	}
7327 
7328 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7329 		dev_err(&instance->pdev->dev, "Controller in crit error\n");
7330 		error = -ENODEV;
7331 		goto out_kfree_ioc;
7332 	}
7333 
7334 	if (instance->unload == 1) {
7335 		error = -ENODEV;
7336 		goto out_kfree_ioc;
7337 	}
7338 
7339 	if (down_interruptible(&instance->ioctl_sem)) {
7340 		error = -ERESTARTSYS;
7341 		goto out_kfree_ioc;
7342 	}
7343 
7344 	for (i = 0; i < wait_time; i++) {
7345 
7346 		spin_lock_irqsave(&instance->hba_lock, flags);
7347 		if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
7348 			spin_unlock_irqrestore(&instance->hba_lock, flags);
7349 			break;
7350 		}
7351 		spin_unlock_irqrestore(&instance->hba_lock, flags);
7352 
7353 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7354 			dev_notice(&instance->pdev->dev, "waiting"
7355 				"for controller reset to finish\n");
7356 		}
7357 
7358 		msleep(1000);
7359 	}
7360 
7361 	spin_lock_irqsave(&instance->hba_lock, flags);
7362 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
7363 		spin_unlock_irqrestore(&instance->hba_lock, flags);
7364 
7365 		dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
7366 		error = -ENODEV;
7367 		goto out_up;
7368 	}
7369 	spin_unlock_irqrestore(&instance->hba_lock, flags);
7370 
7371 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
7372 out_up:
7373 	up(&instance->ioctl_sem);
7374 
7375 out_kfree_ioc:
7376 	kfree(ioc);
7377 	return error;
7378 }
7379 
7380 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
7381 {
7382 	struct megasas_instance *instance;
7383 	struct megasas_aen aen;
7384 	int error;
7385 	int i;
7386 	unsigned long flags;
7387 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
7388 
7389 	if (file->private_data != file) {
7390 		printk(KERN_DEBUG "megasas: fasync_helper was not "
7391 		       "called first\n");
7392 		return -EINVAL;
7393 	}
7394 
7395 	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
7396 		return -EFAULT;
7397 
7398 	instance = megasas_lookup_instance(aen.host_no);
7399 
7400 	if (!instance)
7401 		return -ENODEV;
7402 
7403 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7404 		return -ENODEV;
7405 	}
7406 
7407 	if (instance->unload == 1) {
7408 		return -ENODEV;
7409 	}
7410 
7411 	for (i = 0; i < wait_time; i++) {
7412 
7413 		spin_lock_irqsave(&instance->hba_lock, flags);
7414 		if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
7415 			spin_unlock_irqrestore(&instance->hba_lock,
7416 						flags);
7417 			break;
7418 		}
7419 
7420 		spin_unlock_irqrestore(&instance->hba_lock, flags);
7421 
7422 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7423 			dev_notice(&instance->pdev->dev, "waiting for"
7424 				"controller reset to finish\n");
7425 		}
7426 
7427 		msleep(1000);
7428 	}
7429 
7430 	spin_lock_irqsave(&instance->hba_lock, flags);
7431 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
7432 		spin_unlock_irqrestore(&instance->hba_lock, flags);
7433 		dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
7434 		return -ENODEV;
7435 	}
7436 	spin_unlock_irqrestore(&instance->hba_lock, flags);
7437 
7438 	mutex_lock(&instance->reset_mutex);
7439 	error = megasas_register_aen(instance, aen.seq_num,
7440 				     aen.class_locale_word);
7441 	mutex_unlock(&instance->reset_mutex);
7442 	return error;
7443 }
7444 
7445 /**
7446  * megasas_mgmt_ioctl -	char node ioctl entry point
7447  */
7448 static long
7449 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7450 {
7451 	switch (cmd) {
7452 	case MEGASAS_IOC_FIRMWARE:
7453 		return megasas_mgmt_ioctl_fw(file, arg);
7454 
7455 	case MEGASAS_IOC_GET_AEN:
7456 		return megasas_mgmt_ioctl_aen(file, arg);
7457 	}
7458 
7459 	return -ENOTTY;
7460 }
7461 
7462 #ifdef CONFIG_COMPAT
7463 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
7464 {
7465 	struct compat_megasas_iocpacket __user *cioc =
7466 	    (struct compat_megasas_iocpacket __user *)arg;
7467 	struct megasas_iocpacket __user *ioc =
7468 	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
7469 	int i;
7470 	int error = 0;
7471 	compat_uptr_t ptr;
7472 	u32 local_sense_off;
7473 	u32 local_sense_len;
7474 	u32 user_sense_off;
7475 
7476 	if (clear_user(ioc, sizeof(*ioc)))
7477 		return -EFAULT;
7478 
7479 	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
7480 	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
7481 	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
7482 	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
7483 	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
7484 	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
7485 		return -EFAULT;
7486 
7487 	/*
7488 	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
7489 	 * sense_len is not null, so prepare the 64bit value under
7490 	 * the same condition.
7491 	 */
7492 	if (get_user(local_sense_off, &ioc->sense_off) ||
7493 		get_user(local_sense_len, &ioc->sense_len) ||
7494 		get_user(user_sense_off, &cioc->sense_off))
7495 		return -EFAULT;
7496 
7497 	if (local_sense_len) {
7498 		void __user **sense_ioc_ptr =
7499 			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
7500 		compat_uptr_t *sense_cioc_ptr =
7501 			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
7502 		if (get_user(ptr, sense_cioc_ptr) ||
7503 		    put_user(compat_ptr(ptr), sense_ioc_ptr))
7504 			return -EFAULT;
7505 	}
7506 
7507 	for (i = 0; i < MAX_IOCTL_SGE; i++) {
7508 		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
7509 		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
7510 		    copy_in_user(&ioc->sgl[i].iov_len,
7511 				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
7512 			return -EFAULT;
7513 	}
7514 
7515 	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
7516 
7517 	if (copy_in_user(&cioc->frame.hdr.cmd_status,
7518 			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
7519 		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
7520 		return -EFAULT;
7521 	}
7522 	return error;
7523 }
7524 
7525 static long
7526 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
7527 			  unsigned long arg)
7528 {
7529 	switch (cmd) {
7530 	case MEGASAS_IOC_FIRMWARE32:
7531 		return megasas_mgmt_compat_ioctl_fw(file, arg);
7532 	case MEGASAS_IOC_GET_AEN:
7533 		return megasas_mgmt_ioctl_aen(file, arg);
7534 	}
7535 
7536 	return -ENOTTY;
7537 }
7538 #endif
7539 
7540 /*
7541  * File operations structure for management interface
7542  */
7543 static const struct file_operations megasas_mgmt_fops = {
7544 	.owner = THIS_MODULE,
7545 	.open = megasas_mgmt_open,
7546 	.fasync = megasas_mgmt_fasync,
7547 	.unlocked_ioctl = megasas_mgmt_ioctl,
7548 	.poll = megasas_mgmt_poll,
7549 #ifdef CONFIG_COMPAT
7550 	.compat_ioctl = megasas_mgmt_compat_ioctl,
7551 #endif
7552 	.llseek = noop_llseek,
7553 };
7554 
7555 /*
7556  * PCI hotplug support registration structure
7557  */
7558 static struct pci_driver megasas_pci_driver = {
7559 
7560 	.name = "megaraid_sas",
7561 	.id_table = megasas_pci_table,
7562 	.probe = megasas_probe_one,
7563 	.remove = megasas_detach_one,
7564 	.suspend = megasas_suspend,
7565 	.resume = megasas_resume,
7566 	.shutdown = megasas_shutdown,
7567 };
7568 
7569 /*
7570  * Sysfs driver attributes
7571  */
7572 static ssize_t version_show(struct device_driver *dd, char *buf)
7573 {
7574 	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
7575 			MEGASAS_VERSION);
7576 }
7577 static DRIVER_ATTR_RO(version);
7578 
7579 static ssize_t release_date_show(struct device_driver *dd, char *buf)
7580 {
7581 	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
7582 		MEGASAS_RELDATE);
7583 }
7584 static DRIVER_ATTR_RO(release_date);
7585 
7586 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
7587 {
7588 	return sprintf(buf, "%u\n", support_poll_for_event);
7589 }
7590 static DRIVER_ATTR_RO(support_poll_for_event);
7591 
7592 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
7593 {
7594 	return sprintf(buf, "%u\n", support_device_change);
7595 }
7596 static DRIVER_ATTR_RO(support_device_change);
7597 
7598 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
7599 {
7600 	return sprintf(buf, "%u\n", megasas_dbg_lvl);
7601 }
7602 
7603 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
7604 			     size_t count)
7605 {
7606 	int retval = count;
7607 
7608 	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
7609 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
7610 		retval = -EINVAL;
7611 	}
7612 	return retval;
7613 }
7614 static DRIVER_ATTR_RW(dbg_lvl);
7615 
7616 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
7617 {
7618 	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
7619 	scsi_remove_device(sdev);
7620 	scsi_device_put(sdev);
7621 }
7622 
7623 static void
7624 megasas_aen_polling(struct work_struct *work)
7625 {
7626 	struct megasas_aen_event *ev =
7627 		container_of(work, struct megasas_aen_event, hotplug_work.work);
7628 	struct megasas_instance *instance = ev->instance;
7629 	union megasas_evt_class_locale class_locale;
7630 	struct  Scsi_Host *host;
7631 	struct  scsi_device *sdev1;
7632 	u16     pd_index = 0;
7633 	u16	ld_index = 0;
7634 	int     i, j, doscan = 0;
7635 	u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
7636 	int error;
7637 	u8  dcmd_ret = DCMD_SUCCESS;
7638 
7639 	if (!instance) {
7640 		printk(KERN_ERR "invalid instance!\n");
7641 		kfree(ev);
7642 		return;
7643 	}
7644 
7645 	/* Adjust event workqueue thread wait time for VF mode */
7646 	if (instance->requestorId)
7647 		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7648 
7649 	/* Don't run the event workqueue thread if OCR is running */
7650 	mutex_lock(&instance->reset_mutex);
7651 
7652 	instance->ev = NULL;
7653 	host = instance->host;
7654 	if (instance->evt_detail) {
7655 		megasas_decode_evt(instance);
7656 
7657 		switch (le32_to_cpu(instance->evt_detail->code)) {
7658 
7659 		case MR_EVT_PD_INSERTED:
7660 		case MR_EVT_PD_REMOVED:
7661 			dcmd_ret = megasas_get_pd_list(instance);
7662 			if (dcmd_ret == DCMD_SUCCESS)
7663 				doscan = SCAN_PD_CHANNEL;
7664 			break;
7665 
7666 		case MR_EVT_LD_OFFLINE:
7667 		case MR_EVT_CFG_CLEARED:
7668 		case MR_EVT_LD_DELETED:
7669 		case MR_EVT_LD_CREATED:
7670 			if (!instance->requestorId ||
7671 				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7672 				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7673 
7674 			if (dcmd_ret == DCMD_SUCCESS)
7675 				doscan = SCAN_VD_CHANNEL;
7676 
7677 			break;
7678 
7679 		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
7680 		case MR_EVT_FOREIGN_CFG_IMPORTED:
7681 		case MR_EVT_LD_STATE_CHANGE:
7682 			dcmd_ret = megasas_get_pd_list(instance);
7683 
7684 			if (dcmd_ret != DCMD_SUCCESS)
7685 				break;
7686 
7687 			if (!instance->requestorId ||
7688 				(instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7689 				dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7690 
7691 			if (dcmd_ret != DCMD_SUCCESS)
7692 				break;
7693 
7694 			doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
7695 			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7696 				instance->host->host_no);
7697 			break;
7698 
7699 		case MR_EVT_CTRL_PROP_CHANGED:
7700 				dcmd_ret = megasas_get_ctrl_info(instance);
7701 				break;
7702 		default:
7703 			doscan = 0;
7704 			break;
7705 		}
7706 	} else {
7707 		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
7708 		mutex_unlock(&instance->reset_mutex);
7709 		kfree(ev);
7710 		return;
7711 	}
7712 
7713 	mutex_unlock(&instance->reset_mutex);
7714 
7715 	if (doscan & SCAN_PD_CHANNEL) {
7716 		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7717 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7718 				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7719 				sdev1 = scsi_device_lookup(host, i, j, 0);
7720 				if (instance->pd_list[pd_index].driveState ==
7721 							MR_PD_STATE_SYSTEM) {
7722 					if (!sdev1)
7723 						scsi_add_device(host, i, j, 0);
7724 					else
7725 						scsi_device_put(sdev1);
7726 				} else {
7727 					if (sdev1)
7728 						megasas_remove_scsi_device(sdev1);
7729 				}
7730 			}
7731 		}
7732 	}
7733 
7734 	if (doscan & SCAN_VD_CHANNEL) {
7735 		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7736 			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7737 				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7738 				sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7739 				if (instance->ld_ids[ld_index] != 0xff) {
7740 					if (!sdev1)
7741 						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7742 					else
7743 						scsi_device_put(sdev1);
7744 				} else {
7745 					if (sdev1)
7746 						megasas_remove_scsi_device(sdev1);
7747 				}
7748 			}
7749 		}
7750 	}
7751 
7752 	if (dcmd_ret == DCMD_SUCCESS)
7753 		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7754 	else
7755 		seq_num = instance->last_seq_num;
7756 
7757 	/* Register AEN with FW for latest sequence number plus 1 */
7758 	class_locale.members.reserved = 0;
7759 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
7760 	class_locale.members.class = MR_EVT_CLASS_DEBUG;
7761 
7762 	if (instance->aen_cmd != NULL) {
7763 		kfree(ev);
7764 		return;
7765 	}
7766 
7767 	mutex_lock(&instance->reset_mutex);
7768 	error = megasas_register_aen(instance, seq_num,
7769 					class_locale.word);
7770 	if (error)
7771 		dev_err(&instance->pdev->dev,
7772 			"register aen failed error %x\n", error);
7773 
7774 	mutex_unlock(&instance->reset_mutex);
7775 	kfree(ev);
7776 }
7777 
7778 /**
7779  * megasas_init - Driver load entry point
7780  */
7781 static int __init megasas_init(void)
7782 {
7783 	int rval;
7784 
7785 	/*
7786 	 * Booted in kdump kernel, minimize memory footprints by
7787 	 * disabling few features
7788 	 */
7789 	if (reset_devices) {
7790 		msix_vectors = 1;
7791 		rdpq_enable = 0;
7792 		dual_qdepth_disable = 1;
7793 	}
7794 
7795 	/*
7796 	 * Announce driver version and other information
7797 	 */
7798 	pr_info("megasas: %s\n", MEGASAS_VERSION);
7799 
7800 	spin_lock_init(&poll_aen_lock);
7801 
7802 	support_poll_for_event = 2;
7803 	support_device_change = 1;
7804 
7805 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
7806 
7807 	/*
7808 	 * Register character device node
7809 	 */
7810 	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
7811 
7812 	if (rval < 0) {
7813 		printk(KERN_DEBUG "megasas: failed to open device node\n");
7814 		return rval;
7815 	}
7816 
7817 	megasas_mgmt_majorno = rval;
7818 
7819 	/*
7820 	 * Register ourselves as PCI hotplug module
7821 	 */
7822 	rval = pci_register_driver(&megasas_pci_driver);
7823 
7824 	if (rval) {
7825 		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
7826 		goto err_pcidrv;
7827 	}
7828 
7829 	rval = driver_create_file(&megasas_pci_driver.driver,
7830 				  &driver_attr_version);
7831 	if (rval)
7832 		goto err_dcf_attr_ver;
7833 
7834 	rval = driver_create_file(&megasas_pci_driver.driver,
7835 				  &driver_attr_release_date);
7836 	if (rval)
7837 		goto err_dcf_rel_date;
7838 
7839 	rval = driver_create_file(&megasas_pci_driver.driver,
7840 				&driver_attr_support_poll_for_event);
7841 	if (rval)
7842 		goto err_dcf_support_poll_for_event;
7843 
7844 	rval = driver_create_file(&megasas_pci_driver.driver,
7845 				  &driver_attr_dbg_lvl);
7846 	if (rval)
7847 		goto err_dcf_dbg_lvl;
7848 	rval = driver_create_file(&megasas_pci_driver.driver,
7849 				&driver_attr_support_device_change);
7850 	if (rval)
7851 		goto err_dcf_support_device_change;
7852 
7853 	return rval;
7854 
7855 err_dcf_support_device_change:
7856 	driver_remove_file(&megasas_pci_driver.driver,
7857 			   &driver_attr_dbg_lvl);
7858 err_dcf_dbg_lvl:
7859 	driver_remove_file(&megasas_pci_driver.driver,
7860 			&driver_attr_support_poll_for_event);
7861 err_dcf_support_poll_for_event:
7862 	driver_remove_file(&megasas_pci_driver.driver,
7863 			   &driver_attr_release_date);
7864 err_dcf_rel_date:
7865 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7866 err_dcf_attr_ver:
7867 	pci_unregister_driver(&megasas_pci_driver);
7868 err_pcidrv:
7869 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7870 	return rval;
7871 }
7872 
7873 /**
7874  * megasas_exit - Driver unload entry point
7875  */
7876 static void __exit megasas_exit(void)
7877 {
7878 	driver_remove_file(&megasas_pci_driver.driver,
7879 			   &driver_attr_dbg_lvl);
7880 	driver_remove_file(&megasas_pci_driver.driver,
7881 			&driver_attr_support_poll_for_event);
7882 	driver_remove_file(&megasas_pci_driver.driver,
7883 			&driver_attr_support_device_change);
7884 	driver_remove_file(&megasas_pci_driver.driver,
7885 			   &driver_attr_release_date);
7886 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7887 
7888 	pci_unregister_driver(&megasas_pci_driver);
7889 	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7890 }
7891 
7892 module_init(megasas_init);
7893 module_exit(megasas_exit);
7894