1 /*
2  *  Linux MegaRAID driver for SAS based RAID controllers
3  *
4  *  Copyright (c) 2009-2013  LSI Corporation
5  *  Copyright (c) 2013-2014  Avago Technologies
6  *
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation; either version 2
10  *  of the License, or (at your option) any later version.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License
18  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  *
20  *  FILE: megaraid_sas_fusion.c
21  *
22  *  Authors: Avago Technologies
23  *           Sumant Patro
24  *           Adam Radford
25  *           Kashyap Desai <kashyap.desai@avagotech.com>
26  *           Sumit Saxena <sumit.saxena@avagotech.com>
27  *
28  *  Send feedback to: megaraidlinux.pdl@avagotech.com
29  *
30  *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31  *  San Jose, California 95131
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/uaccess.h>
45 #include <linux/fs.h>
46 #include <linux/compat.h>
47 #include <linux/blkdev.h>
48 #include <linux/mutex.h>
49 #include <linux/poll.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsi_dbg.h>
56 #include <linux/dmi.h>
57 
58 #include "megaraid_sas_fusion.h"
59 #include "megaraid_sas.h"
60 
61 
62 extern void megasas_free_cmds(struct megasas_instance *instance);
63 extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
64 					   *instance);
65 extern void
66 megasas_complete_cmd(struct megasas_instance *instance,
67 		     struct megasas_cmd *cmd, u8 alt_status);
68 int
69 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
70 	      int seconds);
71 
72 void
73 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
74 int megasas_alloc_cmds(struct megasas_instance *instance);
75 int
76 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
77 int
78 megasas_issue_polled(struct megasas_instance *instance,
79 		     struct megasas_cmd *cmd);
80 void
81 megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
82 
83 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
84 void megaraid_sas_kill_hba(struct megasas_instance *instance);
85 
86 extern u32 megasas_dbg_lvl;
87 void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
88 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
89 				  int initial);
90 void megasas_start_timer(struct megasas_instance *instance,
91 			struct timer_list *timer,
92 			 void *fn, unsigned long interval);
93 extern struct megasas_mgmt_info megasas_mgmt_info;
94 extern int resetwaittime;
95 
96 
97 
98 /**
99  * megasas_enable_intr_fusion -	Enables interrupts
100  * @regs:			MFI register set
101  */
102 void
103 megasas_enable_intr_fusion(struct megasas_instance *instance)
104 {
105 	struct megasas_register_set __iomem *regs;
106 	regs = instance->reg_set;
107 
108 	instance->mask_interrupts = 0;
109 	/* For Thunderbolt/Invader also clear intr on enable */
110 	writel(~0, &regs->outbound_intr_status);
111 	readl(&regs->outbound_intr_status);
112 
113 	writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
114 
115 	/* Dummy readl to force pci flush */
116 	readl(&regs->outbound_intr_mask);
117 }
118 
119 /**
120  * megasas_disable_intr_fusion - Disables interrupt
121  * @regs:			 MFI register set
122  */
123 void
124 megasas_disable_intr_fusion(struct megasas_instance *instance)
125 {
126 	u32 mask = 0xFFFFFFFF;
127 	u32 status;
128 	struct megasas_register_set __iomem *regs;
129 	regs = instance->reg_set;
130 	instance->mask_interrupts = 1;
131 
132 	writel(mask, &regs->outbound_intr_mask);
133 	/* Dummy readl to force pci flush */
134 	status = readl(&regs->outbound_intr_mask);
135 }
136 
137 int
138 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
139 {
140 	u32 status;
141 	/*
142 	 * Check if it is our interrupt
143 	 */
144 	status = readl(&regs->outbound_intr_status);
145 
146 	if (status & 1) {
147 		writel(status, &regs->outbound_intr_status);
148 		readl(&regs->outbound_intr_status);
149 		return 1;
150 	}
151 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
152 		return 0;
153 
154 	return 1;
155 }
156 
157 /**
158  * megasas_get_cmd_fusion -	Get a command from the free pool
159  * @instance:		Adapter soft state
160  *
161  * Returns a blk_tag indexed mpt frame
162  */
163 inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
164 						  *instance, u32 blk_tag)
165 {
166 	struct fusion_context *fusion;
167 
168 	fusion = instance->ctrl_context;
169 	return fusion->cmd_list[blk_tag];
170 }
171 
172 /**
173  * megasas_return_cmd_fusion -	Return a cmd to free command pool
174  * @instance:		Adapter soft state
175  * @cmd:		Command packet to be returned to free command pool
176  */
177 inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
178 	struct megasas_cmd_fusion *cmd)
179 {
180 	cmd->scmd = NULL;
181 	memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
182 }
183 
184 /**
185  * megasas_fire_cmd_fusion -	Sends command to the FW
186  */
187 static void
188 megasas_fire_cmd_fusion(struct megasas_instance *instance,
189 		union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
190 {
191 #if defined(writeq) && defined(CONFIG_64BIT)
192 	u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
193 			le32_to_cpu(req_desc->u.low));
194 
195 	writeq(req_data, &instance->reg_set->inbound_low_queue_port);
196 #else
197 	unsigned long flags;
198 
199 	spin_lock_irqsave(&instance->hba_lock, flags);
200 	writel(le32_to_cpu(req_desc->u.low),
201 		&instance->reg_set->inbound_low_queue_port);
202 	writel(le32_to_cpu(req_desc->u.high),
203 		&instance->reg_set->inbound_high_queue_port);
204 	spin_unlock_irqrestore(&instance->hba_lock, flags);
205 #endif
206 }
207 
208 
209 /**
210  * megasas_teardown_frame_pool_fusion -	Destroy the cmd frame DMA pool
211  * @instance:				Adapter soft state
212  */
213 static void megasas_teardown_frame_pool_fusion(
214 	struct megasas_instance *instance)
215 {
216 	int i;
217 	struct fusion_context *fusion = instance->ctrl_context;
218 
219 	u16 max_cmd = instance->max_fw_cmds;
220 
221 	struct megasas_cmd_fusion *cmd;
222 
223 	if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
224 		dev_err(&instance->pdev->dev, "dma pool is null. SG Pool %p, "
225 		       "sense pool : %p\n", fusion->sg_dma_pool,
226 		       fusion->sense_dma_pool);
227 		return;
228 	}
229 
230 	/*
231 	 * Return all frames to pool
232 	 */
233 	for (i = 0; i < max_cmd; i++) {
234 
235 		cmd = fusion->cmd_list[i];
236 
237 		if (cmd->sg_frame)
238 			pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
239 				      cmd->sg_frame_phys_addr);
240 
241 		if (cmd->sense)
242 			pci_pool_free(fusion->sense_dma_pool, cmd->sense,
243 				      cmd->sense_phys_addr);
244 	}
245 
246 	/*
247 	 * Now destroy the pool itself
248 	 */
249 	pci_pool_destroy(fusion->sg_dma_pool);
250 	pci_pool_destroy(fusion->sense_dma_pool);
251 
252 	fusion->sg_dma_pool = NULL;
253 	fusion->sense_dma_pool = NULL;
254 }
255 
256 /**
257  * megasas_free_cmds_fusion -	Free all the cmds in the free cmd pool
258  * @instance:		Adapter soft state
259  */
260 void
261 megasas_free_cmds_fusion(struct megasas_instance *instance)
262 {
263 	int i;
264 	struct fusion_context *fusion = instance->ctrl_context;
265 
266 	u32 max_cmds, req_sz, reply_sz, io_frames_sz;
267 
268 
269 	req_sz = fusion->request_alloc_sz;
270 	reply_sz = fusion->reply_alloc_sz;
271 	io_frames_sz = fusion->io_frames_alloc_sz;
272 
273 	max_cmds = instance->max_fw_cmds;
274 
275 	/* Free descriptors and request Frames memory */
276 	if (fusion->req_frames_desc)
277 		dma_free_coherent(&instance->pdev->dev, req_sz,
278 				  fusion->req_frames_desc,
279 				  fusion->req_frames_desc_phys);
280 
281 	if (fusion->reply_frames_desc) {
282 		pci_pool_free(fusion->reply_frames_desc_pool,
283 			      fusion->reply_frames_desc,
284 			      fusion->reply_frames_desc_phys);
285 		pci_pool_destroy(fusion->reply_frames_desc_pool);
286 	}
287 
288 	if (fusion->io_request_frames) {
289 		pci_pool_free(fusion->io_request_frames_pool,
290 			      fusion->io_request_frames,
291 			      fusion->io_request_frames_phys);
292 		pci_pool_destroy(fusion->io_request_frames_pool);
293 	}
294 
295 	/* Free the Fusion frame pool */
296 	megasas_teardown_frame_pool_fusion(instance);
297 
298 	/* Free all the commands in the cmd_list */
299 	for (i = 0; i < max_cmds; i++)
300 		kfree(fusion->cmd_list[i]);
301 
302 	/* Free the cmd_list buffer itself */
303 	kfree(fusion->cmd_list);
304 	fusion->cmd_list = NULL;
305 
306 }
307 
308 /**
309  * megasas_create_frame_pool_fusion -	Creates DMA pool for cmd frames
310  * @instance:			Adapter soft state
311  *
312  */
313 static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
314 {
315 	int i;
316 	u32 max_cmd;
317 	struct fusion_context *fusion;
318 	struct megasas_cmd_fusion *cmd;
319 
320 	fusion = instance->ctrl_context;
321 	max_cmd = instance->max_fw_cmds;
322 
323 
324 	/*
325 	 * Use DMA pool facility provided by PCI layer
326 	 */
327 
328 	fusion->sg_dma_pool = pci_pool_create("sg_pool_fusion", instance->pdev,
329 						instance->max_chain_frame_sz,
330 						4, 0);
331 	if (!fusion->sg_dma_pool) {
332 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup request pool fusion\n");
333 		return -ENOMEM;
334 	}
335 	fusion->sense_dma_pool = pci_pool_create("sense pool fusion",
336 						 instance->pdev,
337 						 SCSI_SENSE_BUFFERSIZE, 64, 0);
338 
339 	if (!fusion->sense_dma_pool) {
340 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool fusion\n");
341 		pci_pool_destroy(fusion->sg_dma_pool);
342 		fusion->sg_dma_pool = NULL;
343 		return -ENOMEM;
344 	}
345 
346 	/*
347 	 * Allocate and attach a frame to each of the commands in cmd_list
348 	 */
349 	for (i = 0; i < max_cmd; i++) {
350 
351 		cmd = fusion->cmd_list[i];
352 
353 		cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
354 					       GFP_KERNEL,
355 					       &cmd->sg_frame_phys_addr);
356 
357 		cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
358 					    GFP_KERNEL, &cmd->sense_phys_addr);
359 		/*
360 		 * megasas_teardown_frame_pool_fusion() takes care of freeing
361 		 * whatever has been allocated
362 		 */
363 		if (!cmd->sg_frame || !cmd->sense) {
364 			dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
365 			megasas_teardown_frame_pool_fusion(instance);
366 			return -ENOMEM;
367 		}
368 	}
369 	return 0;
370 }
371 
372 /**
373  * megasas_alloc_cmds_fusion -	Allocates the command packets
374  * @instance:		Adapter soft state
375  *
376  *
377  * Each frame has a 32-bit field called context. This context is used to get
378  * back the megasas_cmd_fusion from the frame when a frame gets completed
379  * In this driver, the 32 bit values are the indices into an array cmd_list.
380  * This array is used only to look up the megasas_cmd_fusion given the context.
381  * The free commands themselves are maintained in a linked list called cmd_pool.
382  *
383  * cmds are formed in the io_request and sg_frame members of the
384  * megasas_cmd_fusion. The context field is used to get a request descriptor
385  * and is used as SMID of the cmd.
386  * SMID value range is from 1 to max_fw_cmds.
387  */
388 int
389 megasas_alloc_cmds_fusion(struct megasas_instance *instance)
390 {
391 	int i, j, count;
392 	u32 max_cmd, io_frames_sz;
393 	struct fusion_context *fusion;
394 	struct megasas_cmd_fusion *cmd;
395 	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
396 	u32 offset;
397 	dma_addr_t io_req_base_phys;
398 	u8 *io_req_base;
399 
400 	fusion = instance->ctrl_context;
401 
402 	max_cmd = instance->max_fw_cmds;
403 
404 	fusion->req_frames_desc =
405 		dma_alloc_coherent(&instance->pdev->dev,
406 				   fusion->request_alloc_sz,
407 				   &fusion->req_frames_desc_phys, GFP_KERNEL);
408 
409 	if (!fusion->req_frames_desc) {
410 		dev_err(&instance->pdev->dev, "Could not allocate memory for "
411 		       "request_frames\n");
412 		goto fail_req_desc;
413 	}
414 
415 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
416 	fusion->reply_frames_desc_pool =
417 		pci_pool_create("reply_frames pool", instance->pdev,
418 				fusion->reply_alloc_sz * count, 16, 0);
419 
420 	if (!fusion->reply_frames_desc_pool) {
421 		dev_err(&instance->pdev->dev, "Could not allocate memory for "
422 		       "reply_frame pool\n");
423 		goto fail_reply_desc;
424 	}
425 
426 	fusion->reply_frames_desc =
427 		pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
428 			       &fusion->reply_frames_desc_phys);
429 	if (!fusion->reply_frames_desc) {
430 		dev_err(&instance->pdev->dev, "Could not allocate memory for "
431 		       "reply_frame pool\n");
432 		pci_pool_destroy(fusion->reply_frames_desc_pool);
433 		goto fail_reply_desc;
434 	}
435 
436 	reply_desc = fusion->reply_frames_desc;
437 	for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
438 		reply_desc->Words = cpu_to_le64(ULLONG_MAX);
439 
440 	io_frames_sz = fusion->io_frames_alloc_sz;
441 
442 	fusion->io_request_frames_pool =
443 		pci_pool_create("io_request_frames pool", instance->pdev,
444 				fusion->io_frames_alloc_sz, 16, 0);
445 
446 	if (!fusion->io_request_frames_pool) {
447 		dev_err(&instance->pdev->dev, "Could not allocate memory for "
448 		       "io_request_frame pool\n");
449 		goto fail_io_frames;
450 	}
451 
452 	fusion->io_request_frames =
453 		pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
454 			       &fusion->io_request_frames_phys);
455 	if (!fusion->io_request_frames) {
456 		dev_err(&instance->pdev->dev, "Could not allocate memory for "
457 		       "io_request_frames frames\n");
458 		pci_pool_destroy(fusion->io_request_frames_pool);
459 		goto fail_io_frames;
460 	}
461 
462 	/*
463 	 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
464 	 * Allocate the dynamic array first and then allocate individual
465 	 * commands.
466 	 */
467 	fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *)
468 				   * max_cmd, GFP_KERNEL);
469 
470 	if (!fusion->cmd_list) {
471 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory. Could not alloc "
472 		       "memory for cmd_list_fusion\n");
473 		goto fail_cmd_list;
474 	}
475 
476 	max_cmd = instance->max_fw_cmds;
477 	for (i = 0; i < max_cmd; i++) {
478 		fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
479 					      GFP_KERNEL);
480 		if (!fusion->cmd_list[i]) {
481 			dev_err(&instance->pdev->dev, "Could not alloc cmd list fusion\n");
482 
483 			for (j = 0; j < i; j++)
484 				kfree(fusion->cmd_list[j]);
485 
486 			kfree(fusion->cmd_list);
487 			fusion->cmd_list = NULL;
488 			goto fail_cmd_list;
489 		}
490 	}
491 
492 	/* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */
493 	io_req_base = fusion->io_request_frames +
494 		MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
495 	io_req_base_phys = fusion->io_request_frames_phys +
496 		MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
497 
498 	/*
499 	 * Add all the commands to command pool (fusion->cmd_pool)
500 	 */
501 
502 	/* SMID 0 is reserved. Set SMID/index from 1 */
503 	for (i = 0; i < max_cmd; i++) {
504 		cmd = fusion->cmd_list[i];
505 		offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
506 		memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
507 		cmd->index = i + 1;
508 		cmd->scmd = NULL;
509 		cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ?
510 				(i - instance->max_scsi_cmds) :
511 				(u32)ULONG_MAX; /* Set to Invalid */
512 		cmd->instance = instance;
513 		cmd->io_request =
514 			(struct MPI2_RAID_SCSI_IO_REQUEST *)
515 		  (io_req_base + offset);
516 		memset(cmd->io_request, 0,
517 		       sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
518 		cmd->io_request_phys_addr = io_req_base_phys + offset;
519 	}
520 
521 	/*
522 	 * Create a frame pool and assign one frame to each cmd
523 	 */
524 	if (megasas_create_frame_pool_fusion(instance)) {
525 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
526 		megasas_free_cmds_fusion(instance);
527 		goto fail_req_desc;
528 	}
529 
530 	return 0;
531 
532 fail_cmd_list:
533 	pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames,
534 		      fusion->io_request_frames_phys);
535 	pci_pool_destroy(fusion->io_request_frames_pool);
536 fail_io_frames:
537 	dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
538 			  fusion->reply_frames_desc,
539 			  fusion->reply_frames_desc_phys);
540 	pci_pool_free(fusion->reply_frames_desc_pool,
541 		      fusion->reply_frames_desc,
542 		      fusion->reply_frames_desc_phys);
543 	pci_pool_destroy(fusion->reply_frames_desc_pool);
544 
545 fail_reply_desc:
546 	dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
547 			  fusion->req_frames_desc,
548 			  fusion->req_frames_desc_phys);
549 fail_req_desc:
550 	return -ENOMEM;
551 }
552 
553 /**
554  * wait_and_poll -	Issues a polling command
555  * @instance:			Adapter soft state
556  * @cmd:			Command packet to be issued
557  *
558  * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
559  */
560 int
561 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
562 	int seconds)
563 {
564 	int i;
565 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
566 	struct fusion_context *fusion;
567 
568 	u32 msecs = seconds * 1000;
569 
570 	fusion = instance->ctrl_context;
571 	/*
572 	 * Wait for cmd_status to change
573 	 */
574 	for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
575 		rmb();
576 		msleep(20);
577 	}
578 
579 	if (frame_hdr->cmd_status == 0xff)
580 		return -ETIME;
581 
582 	return (frame_hdr->cmd_status == MFI_STAT_OK) ?
583 		0 : 1;
584 }
585 
586 /**
587  * megasas_ioc_init_fusion -	Initializes the FW
588  * @instance:		Adapter soft state
589  *
590  * Issues the IOC Init cmd
591  */
592 int
593 megasas_ioc_init_fusion(struct megasas_instance *instance)
594 {
595 	struct megasas_init_frame *init_frame;
596 	struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
597 	dma_addr_t	ioc_init_handle;
598 	struct megasas_cmd *cmd;
599 	u8 ret;
600 	struct fusion_context *fusion;
601 	union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
602 	int i;
603 	struct megasas_header *frame_hdr;
604 	const char *sys_info;
605 	MFI_CAPABILITIES *drv_ops;
606 
607 	fusion = instance->ctrl_context;
608 
609 	cmd = megasas_get_cmd(instance);
610 
611 	if (!cmd) {
612 		dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
613 		ret = 1;
614 		goto fail_get_cmd;
615 	}
616 
617 	IOCInitMessage =
618 	  dma_alloc_coherent(&instance->pdev->dev,
619 			     sizeof(struct MPI2_IOC_INIT_REQUEST),
620 			     &ioc_init_handle, GFP_KERNEL);
621 
622 	if (!IOCInitMessage) {
623 		dev_err(&instance->pdev->dev, "Could not allocate memory for "
624 		       "IOCInitMessage\n");
625 		ret = 1;
626 		goto fail_fw_init;
627 	}
628 
629 	memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
630 
631 	IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
632 	IOCInitMessage->WhoInit	= MPI2_WHOINIT_HOST_DRIVER;
633 	IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
634 	IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
635 	IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
636 
637 	IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
638 	IOCInitMessage->ReplyDescriptorPostQueueAddress	= cpu_to_le64(fusion->reply_frames_desc_phys);
639 	IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
640 	IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
641 	init_frame = (struct megasas_init_frame *)cmd->frame;
642 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
643 
644 	frame_hdr = &cmd->frame->hdr;
645 	frame_hdr->cmd_status = 0xFF;
646 	frame_hdr->flags = cpu_to_le16(
647 		le16_to_cpu(frame_hdr->flags) |
648 		MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
649 
650 	init_frame->cmd	= MFI_CMD_INIT;
651 	init_frame->cmd_status = 0xFF;
652 
653 	drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
654 
655 	/* driver support Extended MSIX */
656 	if (fusion->adapter_type == INVADER_SERIES)
657 		drv_ops->mfi_capabilities.support_additional_msix = 1;
658 	/* driver supports HA / Remote LUN over Fast Path interface */
659 	drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
660 
661 	drv_ops->mfi_capabilities.support_max_255lds = 1;
662 	drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1;
663 	drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1;
664 
665 	if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
666 		drv_ops->mfi_capabilities.support_ext_io_size = 1;
667 
668 	/* Convert capability to LE32 */
669 	cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
670 
671 	sys_info = dmi_get_system_info(DMI_PRODUCT_UUID);
672 	if (instance->system_info_buf && sys_info) {
673 		memcpy(instance->system_info_buf->systemId, sys_info,
674 			strlen(sys_info) > 64 ? 64 : strlen(sys_info));
675 		instance->system_info_buf->systemIdLength =
676 			strlen(sys_info) > 64 ? 64 : strlen(sys_info);
677 		init_frame->system_info_lo = instance->system_info_h;
678 		init_frame->system_info_hi = 0;
679 	}
680 
681 	init_frame->queue_info_new_phys_addr_hi =
682 		cpu_to_le32(upper_32_bits(ioc_init_handle));
683 	init_frame->queue_info_new_phys_addr_lo =
684 		cpu_to_le32(lower_32_bits(ioc_init_handle));
685 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
686 
687 	req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
688 	req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
689 	req_desc.MFAIo.RequestFlags =
690 		(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
691 		MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
692 
693 	/*
694 	 * disable the intr before firing the init frame
695 	 */
696 	instance->instancet->disable_intr(instance);
697 
698 	for (i = 0; i < (10 * 1000); i += 20) {
699 		if (readl(&instance->reg_set->doorbell) & 1)
700 			msleep(20);
701 		else
702 			break;
703 	}
704 
705 	megasas_fire_cmd_fusion(instance, &req_desc);
706 
707 	wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
708 
709 	frame_hdr = &cmd->frame->hdr;
710 	if (frame_hdr->cmd_status != 0) {
711 		ret = 1;
712 		goto fail_fw_init;
713 	}
714 	dev_err(&instance->pdev->dev, "Init cmd success\n");
715 
716 	ret = 0;
717 
718 fail_fw_init:
719 	megasas_return_cmd(instance, cmd);
720 	if (IOCInitMessage)
721 		dma_free_coherent(&instance->pdev->dev,
722 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
723 				  IOCInitMessage, ioc_init_handle);
724 fail_get_cmd:
725 	return ret;
726 }
727 
728 /**
729  * megasas_sync_pd_seq_num -	JBOD SEQ MAP
730  * @instance:		Adapter soft state
731  * @pend:		set to 1, if it is pended jbod map.
732  *
733  * Issue Jbod map to the firmware. If it is pended command,
734  * issue command and return. If it is first instance of jbod map
735  * issue and receive command.
736  */
737 int
738 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
739 	int ret = 0;
740 	u32 pd_seq_map_sz;
741 	struct megasas_cmd *cmd;
742 	struct megasas_dcmd_frame *dcmd;
743 	struct fusion_context *fusion = instance->ctrl_context;
744 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
745 	dma_addr_t pd_seq_h;
746 
747 	pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
748 	pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
749 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
750 			(sizeof(struct MR_PD_CFG_SEQ) *
751 			(MAX_PHYSICAL_DEVICES - 1));
752 
753 	cmd = megasas_get_cmd(instance);
754 	if (!cmd) {
755 		dev_err(&instance->pdev->dev,
756 			"Could not get mfi cmd. Fail from %s %d\n",
757 			__func__, __LINE__);
758 		return -ENOMEM;
759 	}
760 
761 	dcmd = &cmd->frame->dcmd;
762 
763 	memset(pd_sync, 0, pd_seq_map_sz);
764 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
765 	dcmd->cmd = MFI_CMD_DCMD;
766 	dcmd->cmd_status = 0xFF;
767 	dcmd->sge_count = 1;
768 	dcmd->timeout = 0;
769 	dcmd->pad_0 = 0;
770 	dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
771 	dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
772 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
773 	dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
774 
775 	if (pend) {
776 		dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
777 		dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
778 		instance->jbod_seq_cmd = cmd;
779 		instance->instancet->issue_dcmd(instance, cmd);
780 		return 0;
781 	}
782 
783 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
784 
785 	/* Below code is only for non pended DCMD */
786 	if (instance->ctrl_context && !instance->mask_interrupts)
787 		ret = megasas_issue_blocked_cmd(instance, cmd, 60);
788 	else
789 		ret = megasas_issue_polled(instance, cmd);
790 
791 	if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
792 		dev_warn(&instance->pdev->dev,
793 			"driver supports max %d JBOD, but FW reports %d\n",
794 			MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count));
795 		ret = -EINVAL;
796 	}
797 
798 	if (!ret)
799 		instance->pd_seq_map_id++;
800 
801 	megasas_return_cmd(instance, cmd);
802 	return ret;
803 }
804 
805 /*
806  * megasas_get_ld_map_info -	Returns FW's ld_map structure
807  * @instance:				Adapter soft state
808  * @pend:				Pend the command or not
809  * Issues an internal command (DCMD) to get the FW's controller PD
810  * list structure.  This information is mainly used to find out SYSTEM
811  * supported by the FW.
812  * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
813  * dcmd.mbox.b[0]	- number of LDs being sync'd
814  * dcmd.mbox.b[1]	- 0 - complete command immediately.
815  *			- 1 - pend till config change
816  * dcmd.mbox.b[2]	- 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
817  *			- 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
818  *				uses extended struct MR_FW_RAID_MAP_EXT
819  */
820 static int
821 megasas_get_ld_map_info(struct megasas_instance *instance)
822 {
823 	int ret = 0;
824 	struct megasas_cmd *cmd;
825 	struct megasas_dcmd_frame *dcmd;
826 	void *ci;
827 	dma_addr_t ci_h = 0;
828 	u32 size_map_info;
829 	struct fusion_context *fusion;
830 
831 	cmd = megasas_get_cmd(instance);
832 
833 	if (!cmd) {
834 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
835 		return -ENOMEM;
836 	}
837 
838 	fusion = instance->ctrl_context;
839 
840 	if (!fusion) {
841 		megasas_return_cmd(instance, cmd);
842 		return -ENXIO;
843 	}
844 
845 	dcmd = &cmd->frame->dcmd;
846 
847 	size_map_info = fusion->current_map_sz;
848 
849 	ci = (void *) fusion->ld_map[(instance->map_id & 1)];
850 	ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
851 
852 	if (!ci) {
853 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
854 		megasas_return_cmd(instance, cmd);
855 		return -ENOMEM;
856 	}
857 
858 	memset(ci, 0, fusion->max_map_sz);
859 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
860 #if VD_EXT_DEBUG
861 	dev_dbg(&instance->pdev->dev,
862 		"%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
863 		__func__, cpu_to_le32(size_map_info));
864 #endif
865 	dcmd->cmd = MFI_CMD_DCMD;
866 	dcmd->cmd_status = 0xFF;
867 	dcmd->sge_count = 1;
868 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
869 	dcmd->timeout = 0;
870 	dcmd->pad_0 = 0;
871 	dcmd->data_xfer_len = cpu_to_le32(size_map_info);
872 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
873 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
874 	dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
875 
876 	if (instance->ctrl_context && !instance->mask_interrupts)
877 		ret = megasas_issue_blocked_cmd(instance, cmd,
878 			MEGASAS_BLOCKED_CMD_TIMEOUT);
879 	else
880 		ret = megasas_issue_polled(instance, cmd);
881 
882 	megasas_return_cmd(instance, cmd);
883 
884 	return ret;
885 }
886 
887 u8
888 megasas_get_map_info(struct megasas_instance *instance)
889 {
890 	struct fusion_context *fusion = instance->ctrl_context;
891 
892 	fusion->fast_path_io = 0;
893 	if (!megasas_get_ld_map_info(instance)) {
894 		if (MR_ValidateMapInfo(instance)) {
895 			fusion->fast_path_io = 1;
896 			return 0;
897 		}
898 	}
899 	return 1;
900 }
901 
902 /*
903  * megasas_sync_map_info -	Returns FW's ld_map structure
904  * @instance:				Adapter soft state
905  *
906  * Issues an internal command (DCMD) to get the FW's controller PD
907  * list structure.  This information is mainly used to find out SYSTEM
908  * supported by the FW.
909  */
910 int
911 megasas_sync_map_info(struct megasas_instance *instance)
912 {
913 	int ret = 0, i;
914 	struct megasas_cmd *cmd;
915 	struct megasas_dcmd_frame *dcmd;
916 	u32 size_sync_info, num_lds;
917 	struct fusion_context *fusion;
918 	struct MR_LD_TARGET_SYNC *ci = NULL;
919 	struct MR_DRV_RAID_MAP_ALL *map;
920 	struct MR_LD_RAID  *raid;
921 	struct MR_LD_TARGET_SYNC *ld_sync;
922 	dma_addr_t ci_h = 0;
923 	u32 size_map_info;
924 
925 	cmd = megasas_get_cmd(instance);
926 
927 	if (!cmd) {
928 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
929 		return -ENOMEM;
930 	}
931 
932 	fusion = instance->ctrl_context;
933 
934 	if (!fusion) {
935 		megasas_return_cmd(instance, cmd);
936 		return 1;
937 	}
938 
939 	map = fusion->ld_drv_map[instance->map_id & 1];
940 
941 	num_lds = le16_to_cpu(map->raidMap.ldCount);
942 
943 	dcmd = &cmd->frame->dcmd;
944 
945 	size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
946 
947 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
948 
949 	ci = (struct MR_LD_TARGET_SYNC *)
950 	  fusion->ld_map[(instance->map_id - 1) & 1];
951 	memset(ci, 0, fusion->max_map_sz);
952 
953 	ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
954 
955 	ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
956 
957 	for (i = 0; i < num_lds; i++, ld_sync++) {
958 		raid = MR_LdRaidGet(i, map);
959 		ld_sync->targetId = MR_GetLDTgtId(i, map);
960 		ld_sync->seqNum = raid->seqNum;
961 	}
962 
963 	size_map_info = fusion->current_map_sz;
964 
965 	dcmd->cmd = MFI_CMD_DCMD;
966 	dcmd->cmd_status = 0xFF;
967 	dcmd->sge_count = 1;
968 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
969 	dcmd->timeout = 0;
970 	dcmd->pad_0 = 0;
971 	dcmd->data_xfer_len = cpu_to_le32(size_map_info);
972 	dcmd->mbox.b[0] = num_lds;
973 	dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
974 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
975 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
976 	dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
977 
978 	instance->map_update_cmd = cmd;
979 
980 	instance->instancet->issue_dcmd(instance, cmd);
981 
982 	return ret;
983 }
984 
985 /*
986  * meagasas_display_intel_branding - Display branding string
987  * @instance: per adapter object
988  *
989  * Return nothing.
990  */
991 static void
992 megasas_display_intel_branding(struct megasas_instance *instance)
993 {
994 	if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
995 		return;
996 
997 	switch (instance->pdev->device) {
998 	case PCI_DEVICE_ID_LSI_INVADER:
999 		switch (instance->pdev->subsystem_device) {
1000 		case MEGARAID_INTEL_RS3DC080_SSDID:
1001 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1002 				instance->host->host_no,
1003 				MEGARAID_INTEL_RS3DC080_BRANDING);
1004 			break;
1005 		case MEGARAID_INTEL_RS3DC040_SSDID:
1006 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1007 				instance->host->host_no,
1008 				MEGARAID_INTEL_RS3DC040_BRANDING);
1009 			break;
1010 		case MEGARAID_INTEL_RS3SC008_SSDID:
1011 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1012 				instance->host->host_no,
1013 				MEGARAID_INTEL_RS3SC008_BRANDING);
1014 			break;
1015 		case MEGARAID_INTEL_RS3MC044_SSDID:
1016 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1017 				instance->host->host_no,
1018 				MEGARAID_INTEL_RS3MC044_BRANDING);
1019 			break;
1020 		default:
1021 			break;
1022 		}
1023 		break;
1024 	case PCI_DEVICE_ID_LSI_FURY:
1025 		switch (instance->pdev->subsystem_device) {
1026 		case MEGARAID_INTEL_RS3WC080_SSDID:
1027 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1028 				instance->host->host_no,
1029 				MEGARAID_INTEL_RS3WC080_BRANDING);
1030 			break;
1031 		case MEGARAID_INTEL_RS3WC040_SSDID:
1032 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1033 				instance->host->host_no,
1034 				MEGARAID_INTEL_RS3WC040_BRANDING);
1035 			break;
1036 		default:
1037 			break;
1038 		}
1039 		break;
1040 	case PCI_DEVICE_ID_LSI_CUTLASS_52:
1041 	case PCI_DEVICE_ID_LSI_CUTLASS_53:
1042 		switch (instance->pdev->subsystem_device) {
1043 		case MEGARAID_INTEL_RMS3BC160_SSDID:
1044 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1045 				instance->host->host_no,
1046 				MEGARAID_INTEL_RMS3BC160_BRANDING);
1047 			break;
1048 		default:
1049 			break;
1050 		}
1051 		break;
1052 	default:
1053 		break;
1054 	}
1055 }
1056 
1057 /**
1058  * megasas_init_adapter_fusion -	Initializes the FW
1059  * @instance:		Adapter soft state
1060  *
1061  * This is the main function for initializing firmware.
1062  */
1063 u32
1064 megasas_init_adapter_fusion(struct megasas_instance *instance)
1065 {
1066 	struct megasas_register_set __iomem *reg_set;
1067 	struct fusion_context *fusion;
1068 	u32 max_cmd, scratch_pad_2;
1069 	int i = 0, count;
1070 
1071 	fusion = instance->ctrl_context;
1072 
1073 	reg_set = instance->reg_set;
1074 
1075 	/*
1076 	 * Get various operational parameters from status register
1077 	 */
1078 	instance->max_fw_cmds =
1079 		instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
1080 	instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008);
1081 
1082 	/*
1083 	 * Reduce the max supported cmds by 1. This is to ensure that the
1084 	 * reply_q_sz (1 more than the max cmd that driver may send)
1085 	 * does not exceed max cmds that the FW can support
1086 	 */
1087 	instance->max_fw_cmds = instance->max_fw_cmds-1;
1088 
1089 	/*
1090 	 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
1091 	 */
1092 	instance->max_mfi_cmds =
1093 		MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
1094 
1095 	max_cmd = instance->max_fw_cmds;
1096 
1097 	fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
1098 
1099 	fusion->request_alloc_sz =
1100 		sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
1101 	fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
1102 		*(fusion->reply_q_depth);
1103 	fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
1104 		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
1105 		 (max_cmd + 1)); /* Extra 1 for SMID 0 */
1106 
1107 	scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
1108 	/* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
1109 	 * Firmware support extended IO chain frame which is 4 times more than
1110 	 * legacy Firmware.
1111 	 * Legacy Firmware - Frame size is (8 * 128) = 1K
1112 	 * 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
1113 	 */
1114 	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
1115 		instance->max_chain_frame_sz =
1116 			((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1117 			MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
1118 	else
1119 		instance->max_chain_frame_sz =
1120 			((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1121 			MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
1122 
1123 	if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
1124 		dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n",
1125 			instance->max_chain_frame_sz,
1126 			MEGASAS_CHAIN_FRAME_SZ_MIN);
1127 		instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN;
1128 	}
1129 
1130 	fusion->max_sge_in_main_msg =
1131 		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1132 			- offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
1133 
1134 	fusion->max_sge_in_chain =
1135 		instance->max_chain_frame_sz
1136 			/ sizeof(union MPI2_SGE_IO_UNION);
1137 
1138 	instance->max_num_sge =
1139 		rounddown_pow_of_two(fusion->max_sge_in_main_msg
1140 			+ fusion->max_sge_in_chain - 2);
1141 
1142 	/* Used for pass thru MFI frame (DCMD) */
1143 	fusion->chain_offset_mfi_pthru =
1144 		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
1145 
1146 	fusion->chain_offset_io_request =
1147 		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1148 		 sizeof(union MPI2_SGE_IO_UNION))/16;
1149 
1150 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
1151 	for (i = 0 ; i < count; i++)
1152 		fusion->last_reply_idx[i] = 0;
1153 
1154 	/*
1155 	 * For fusion adapters, 3 commands for IOCTL and 5 commands
1156 	 * for driver's internal DCMDs.
1157 	 */
1158 	instance->max_scsi_cmds = instance->max_fw_cmds -
1159 				(MEGASAS_FUSION_INTERNAL_CMDS +
1160 				MEGASAS_FUSION_IOCTL_CMDS);
1161 	sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
1162 
1163 	/*
1164 	 * Allocate memory for descriptors
1165 	 * Create a pool of commands
1166 	 */
1167 	if (megasas_alloc_cmds(instance))
1168 		goto fail_alloc_mfi_cmds;
1169 	if (megasas_alloc_cmds_fusion(instance))
1170 		goto fail_alloc_cmds;
1171 
1172 	if (megasas_ioc_init_fusion(instance))
1173 		goto fail_ioc_init;
1174 
1175 	megasas_display_intel_branding(instance);
1176 	if (megasas_get_ctrl_info(instance)) {
1177 		dev_err(&instance->pdev->dev,
1178 			"Could not get controller info. Fail from %s %d\n",
1179 			__func__, __LINE__);
1180 		goto fail_ioc_init;
1181 	}
1182 
1183 	instance->flag_ieee = 1;
1184 	fusion->fast_path_io = 0;
1185 
1186 	fusion->drv_map_pages = get_order(fusion->drv_map_sz);
1187 	for (i = 0; i < 2; i++) {
1188 		fusion->ld_map[i] = NULL;
1189 		fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
1190 			fusion->drv_map_pages);
1191 		if (!fusion->ld_drv_map[i]) {
1192 			dev_err(&instance->pdev->dev, "Could not allocate "
1193 				"memory for local map info for %d pages\n",
1194 				fusion->drv_map_pages);
1195 			if (i == 1)
1196 				free_pages((ulong)fusion->ld_drv_map[0],
1197 					fusion->drv_map_pages);
1198 			goto fail_ioc_init;
1199 		}
1200 		memset(fusion->ld_drv_map[i], 0,
1201 			((1 << PAGE_SHIFT) << fusion->drv_map_pages));
1202 	}
1203 
1204 	for (i = 0; i < 2; i++) {
1205 		fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
1206 						       fusion->max_map_sz,
1207 						       &fusion->ld_map_phys[i],
1208 						       GFP_KERNEL);
1209 		if (!fusion->ld_map[i]) {
1210 			dev_err(&instance->pdev->dev, "Could not allocate memory "
1211 			       "for map info\n");
1212 			goto fail_map_info;
1213 		}
1214 	}
1215 
1216 	if (!megasas_get_map_info(instance))
1217 		megasas_sync_map_info(instance);
1218 
1219 	return 0;
1220 
1221 fail_map_info:
1222 	if (i == 1)
1223 		dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
1224 				  fusion->ld_map[0], fusion->ld_map_phys[0]);
1225 fail_ioc_init:
1226 	megasas_free_cmds_fusion(instance);
1227 fail_alloc_cmds:
1228 	megasas_free_cmds(instance);
1229 fail_alloc_mfi_cmds:
1230 	return 1;
1231 }
1232 
1233 /**
1234  * map_cmd_status -	Maps FW cmd status to OS cmd status
1235  * @cmd :		Pointer to cmd
1236  * @status :		status of cmd returned by FW
1237  * @ext_status :	ext status of cmd returned by FW
1238  */
1239 
1240 void
1241 map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
1242 {
1243 
1244 	switch (status) {
1245 
1246 	case MFI_STAT_OK:
1247 		cmd->scmd->result = DID_OK << 16;
1248 		break;
1249 
1250 	case MFI_STAT_SCSI_IO_FAILED:
1251 	case MFI_STAT_LD_INIT_IN_PROGRESS:
1252 		cmd->scmd->result = (DID_ERROR << 16) | ext_status;
1253 		break;
1254 
1255 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1256 
1257 		cmd->scmd->result = (DID_OK << 16) | ext_status;
1258 		if (ext_status == SAM_STAT_CHECK_CONDITION) {
1259 			memset(cmd->scmd->sense_buffer, 0,
1260 			       SCSI_SENSE_BUFFERSIZE);
1261 			memcpy(cmd->scmd->sense_buffer, cmd->sense,
1262 			       SCSI_SENSE_BUFFERSIZE);
1263 			cmd->scmd->result |= DRIVER_SENSE << 24;
1264 		}
1265 		break;
1266 
1267 	case MFI_STAT_LD_OFFLINE:
1268 	case MFI_STAT_DEVICE_NOT_FOUND:
1269 		cmd->scmd->result = DID_BAD_TARGET << 16;
1270 		break;
1271 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1272 		cmd->scmd->result = DID_IMM_RETRY << 16;
1273 		break;
1274 	default:
1275 		dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status);
1276 		cmd->scmd->result = DID_ERROR << 16;
1277 		break;
1278 	}
1279 }
1280 
1281 /**
1282  * megasas_make_sgl_fusion -	Prepares 32-bit SGL
1283  * @instance:		Adapter soft state
1284  * @scp:		SCSI command from the mid-layer
1285  * @sgl_ptr:		SGL to be filled in
1286  * @cmd:		cmd we are working on
1287  *
1288  * If successful, this function returns the number of SG elements.
1289  */
1290 static int
1291 megasas_make_sgl_fusion(struct megasas_instance *instance,
1292 			struct scsi_cmnd *scp,
1293 			struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
1294 			struct megasas_cmd_fusion *cmd)
1295 {
1296 	int i, sg_processed, sge_count;
1297 	struct scatterlist *os_sgl;
1298 	struct fusion_context *fusion;
1299 
1300 	fusion = instance->ctrl_context;
1301 
1302 	if (fusion->adapter_type == INVADER_SERIES) {
1303 		struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
1304 		sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
1305 		sgl_ptr_end->Flags = 0;
1306 	}
1307 
1308 	sge_count = scsi_dma_map(scp);
1309 
1310 	BUG_ON(sge_count < 0);
1311 
1312 	if (sge_count > instance->max_num_sge || !sge_count)
1313 		return sge_count;
1314 
1315 	scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1316 		sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
1317 		sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
1318 		sgl_ptr->Flags = 0;
1319 		if (fusion->adapter_type == INVADER_SERIES)
1320 			if (i == sge_count - 1)
1321 				sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1322 		sgl_ptr++;
1323 
1324 		sg_processed = i + 1;
1325 
1326 		if ((sg_processed ==  (fusion->max_sge_in_main_msg - 1)) &&
1327 		    (sge_count > fusion->max_sge_in_main_msg)) {
1328 
1329 			struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
1330 			if (fusion->adapter_type == INVADER_SERIES) {
1331 				if ((le16_to_cpu(cmd->io_request->IoFlags) &
1332 					MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1333 					MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1334 					cmd->io_request->ChainOffset =
1335 						fusion->
1336 						chain_offset_io_request;
1337 				else
1338 					cmd->io_request->ChainOffset = 0;
1339 			} else
1340 				cmd->io_request->ChainOffset =
1341 					fusion->chain_offset_io_request;
1342 
1343 			sg_chain = sgl_ptr;
1344 			/* Prepare chain element */
1345 			sg_chain->NextChainOffset = 0;
1346 			if (fusion->adapter_type == INVADER_SERIES)
1347 				sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1348 			else
1349 				sg_chain->Flags =
1350 					(IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1351 					 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1352 			sg_chain->Length =  cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
1353 			sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
1354 
1355 			sgl_ptr =
1356 			  (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
1357 			memset(sgl_ptr, 0, instance->max_chain_frame_sz);
1358 		}
1359 	}
1360 
1361 	return sge_count;
1362 }
1363 
1364 /**
1365  * megasas_set_pd_lba -	Sets PD LBA
1366  * @cdb:		CDB
1367  * @cdb_len:		cdb length
1368  * @start_blk:		Start block of IO
1369  *
1370  * Used to set the PD LBA in CDB for FP IOs
1371  */
1372 void
1373 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1374 		   struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
1375 		   struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
1376 {
1377 	struct MR_LD_RAID *raid;
1378 	u32 ld;
1379 	u64 start_blk = io_info->pdBlock;
1380 	u8 *cdb = io_request->CDB.CDB32;
1381 	u32 num_blocks = io_info->numBlocks;
1382 	u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
1383 
1384 	/* Check if T10 PI (DIF) is enabled for this LD */
1385 	ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
1386 	raid = MR_LdRaidGet(ld, local_map_ptr);
1387 	if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
1388 		memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1389 		cdb[0] =  MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
1390 		cdb[7] =  MEGASAS_SCSI_ADDL_CDB_LEN;
1391 
1392 		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1393 			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
1394 		else
1395 			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
1396 		cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
1397 
1398 		/* LBA */
1399 		cdb[12] = (u8)((start_blk >> 56) & 0xff);
1400 		cdb[13] = (u8)((start_blk >> 48) & 0xff);
1401 		cdb[14] = (u8)((start_blk >> 40) & 0xff);
1402 		cdb[15] = (u8)((start_blk >> 32) & 0xff);
1403 		cdb[16] = (u8)((start_blk >> 24) & 0xff);
1404 		cdb[17] = (u8)((start_blk >> 16) & 0xff);
1405 		cdb[18] = (u8)((start_blk >> 8) & 0xff);
1406 		cdb[19] = (u8)(start_blk & 0xff);
1407 
1408 		/* Logical block reference tag */
1409 		io_request->CDB.EEDP32.PrimaryReferenceTag =
1410 			cpu_to_be32(ref_tag);
1411 		io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
1412 		io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
1413 
1414 		/* Transfer length */
1415 		cdb[28] = (u8)((num_blocks >> 24) & 0xff);
1416 		cdb[29] = (u8)((num_blocks >> 16) & 0xff);
1417 		cdb[30] = (u8)((num_blocks >> 8) & 0xff);
1418 		cdb[31] = (u8)(num_blocks & 0xff);
1419 
1420 		/* set SCSI IO EEDPFlags */
1421 		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
1422 			io_request->EEDPFlags = cpu_to_le16(
1423 				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
1424 				MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1425 				MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
1426 				MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
1427 				MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1428 		} else {
1429 			io_request->EEDPFlags = cpu_to_le16(
1430 				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1431 				MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
1432 		}
1433 		io_request->Control |= cpu_to_le32((0x4 << 26));
1434 		io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
1435 	} else {
1436 		/* Some drives don't support 16/12 byte CDB's, convert to 10 */
1437 		if (((cdb_len == 12) || (cdb_len == 16)) &&
1438 		    (start_blk <= 0xffffffff)) {
1439 			if (cdb_len == 16) {
1440 				opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
1441 				flagvals = cdb[1];
1442 				groupnum = cdb[14];
1443 				control = cdb[15];
1444 			} else {
1445 				opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
1446 				flagvals = cdb[1];
1447 				groupnum = cdb[10];
1448 				control = cdb[11];
1449 			}
1450 
1451 			memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1452 
1453 			cdb[0] = opcode;
1454 			cdb[1] = flagvals;
1455 			cdb[6] = groupnum;
1456 			cdb[9] = control;
1457 
1458 			/* Transfer length */
1459 			cdb[8] = (u8)(num_blocks & 0xff);
1460 			cdb[7] = (u8)((num_blocks >> 8) & 0xff);
1461 
1462 			io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
1463 			cdb_len = 10;
1464 		} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
1465 			/* Convert to 16 byte CDB for large LBA's */
1466 			switch (cdb_len) {
1467 			case 6:
1468 				opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
1469 				control = cdb[5];
1470 				break;
1471 			case 10:
1472 				opcode =
1473 					cdb[0] == READ_10 ? READ_16 : WRITE_16;
1474 				flagvals = cdb[1];
1475 				groupnum = cdb[6];
1476 				control = cdb[9];
1477 				break;
1478 			case 12:
1479 				opcode =
1480 					cdb[0] == READ_12 ? READ_16 : WRITE_16;
1481 				flagvals = cdb[1];
1482 				groupnum = cdb[10];
1483 				control = cdb[11];
1484 				break;
1485 			}
1486 
1487 			memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1488 
1489 			cdb[0] = opcode;
1490 			cdb[1] = flagvals;
1491 			cdb[14] = groupnum;
1492 			cdb[15] = control;
1493 
1494 			/* Transfer length */
1495 			cdb[13] = (u8)(num_blocks & 0xff);
1496 			cdb[12] = (u8)((num_blocks >> 8) & 0xff);
1497 			cdb[11] = (u8)((num_blocks >> 16) & 0xff);
1498 			cdb[10] = (u8)((num_blocks >> 24) & 0xff);
1499 
1500 			io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
1501 			cdb_len = 16;
1502 		}
1503 
1504 		/* Normal case, just load LBA here */
1505 		switch (cdb_len) {
1506 		case 6:
1507 		{
1508 			u8 val = cdb[1] & 0xE0;
1509 			cdb[3] = (u8)(start_blk & 0xff);
1510 			cdb[2] = (u8)((start_blk >> 8) & 0xff);
1511 			cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
1512 			break;
1513 		}
1514 		case 10:
1515 			cdb[5] = (u8)(start_blk & 0xff);
1516 			cdb[4] = (u8)((start_blk >> 8) & 0xff);
1517 			cdb[3] = (u8)((start_blk >> 16) & 0xff);
1518 			cdb[2] = (u8)((start_blk >> 24) & 0xff);
1519 			break;
1520 		case 12:
1521 			cdb[5]    = (u8)(start_blk & 0xff);
1522 			cdb[4]    = (u8)((start_blk >> 8) & 0xff);
1523 			cdb[3]    = (u8)((start_blk >> 16) & 0xff);
1524 			cdb[2]    = (u8)((start_blk >> 24) & 0xff);
1525 			break;
1526 		case 16:
1527 			cdb[9]    = (u8)(start_blk & 0xff);
1528 			cdb[8]    = (u8)((start_blk >> 8) & 0xff);
1529 			cdb[7]    = (u8)((start_blk >> 16) & 0xff);
1530 			cdb[6]    = (u8)((start_blk >> 24) & 0xff);
1531 			cdb[5]    = (u8)((start_blk >> 32) & 0xff);
1532 			cdb[4]    = (u8)((start_blk >> 40) & 0xff);
1533 			cdb[3]    = (u8)((start_blk >> 48) & 0xff);
1534 			cdb[2]    = (u8)((start_blk >> 56) & 0xff);
1535 			break;
1536 		}
1537 	}
1538 }
1539 
1540 /**
1541  * megasas_build_ldio_fusion -	Prepares IOs to devices
1542  * @instance:		Adapter soft state
1543  * @scp:		SCSI command
1544  * @cmd:		Command to be prepared
1545  *
1546  * Prepares the io_request and chain elements (sg_frame) for IO
1547  * The IO can be for PD (Fast Path) or LD
1548  */
1549 void
1550 megasas_build_ldio_fusion(struct megasas_instance *instance,
1551 			  struct scsi_cmnd *scp,
1552 			  struct megasas_cmd_fusion *cmd)
1553 {
1554 	u8 fp_possible;
1555 	u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
1556 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1557 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1558 	struct IO_REQUEST_INFO io_info;
1559 	struct fusion_context *fusion;
1560 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1561 	u8 *raidLUN;
1562 
1563 	device_id = MEGASAS_DEV_INDEX(scp);
1564 
1565 	fusion = instance->ctrl_context;
1566 
1567 	io_request = cmd->io_request;
1568 	io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
1569 	io_request->RaidContext.status = 0;
1570 	io_request->RaidContext.exStatus = 0;
1571 
1572 	req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
1573 
1574 	start_lba_lo = 0;
1575 	start_lba_hi = 0;
1576 	fp_possible = 0;
1577 
1578 	/*
1579 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1580 	 */
1581 	if (scp->cmd_len == 6) {
1582 		datalength = (u32) scp->cmnd[4];
1583 		start_lba_lo = ((u32) scp->cmnd[1] << 16) |
1584 			((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
1585 
1586 		start_lba_lo &= 0x1FFFFF;
1587 	}
1588 
1589 	/*
1590 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1591 	 */
1592 	else if (scp->cmd_len == 10) {
1593 		datalength = (u32) scp->cmnd[8] |
1594 			((u32) scp->cmnd[7] << 8);
1595 		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1596 			((u32) scp->cmnd[3] << 16) |
1597 			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1598 	}
1599 
1600 	/*
1601 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1602 	 */
1603 	else if (scp->cmd_len == 12) {
1604 		datalength = ((u32) scp->cmnd[6] << 24) |
1605 			((u32) scp->cmnd[7] << 16) |
1606 			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1607 		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1608 			((u32) scp->cmnd[3] << 16) |
1609 			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1610 	}
1611 
1612 	/*
1613 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1614 	 */
1615 	else if (scp->cmd_len == 16) {
1616 		datalength = ((u32) scp->cmnd[10] << 24) |
1617 			((u32) scp->cmnd[11] << 16) |
1618 			((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
1619 		start_lba_lo = ((u32) scp->cmnd[6] << 24) |
1620 			((u32) scp->cmnd[7] << 16) |
1621 			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1622 
1623 		start_lba_hi = ((u32) scp->cmnd[2] << 24) |
1624 			((u32) scp->cmnd[3] << 16) |
1625 			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1626 	}
1627 
1628 	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
1629 	io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
1630 	io_info.numBlocks = datalength;
1631 	io_info.ldTgtId = device_id;
1632 	io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
1633 
1634 	if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1635 		io_info.isRead = 1;
1636 
1637 	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1638 
1639 	if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
1640 		instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
1641 		io_request->RaidContext.regLockFlags  = 0;
1642 		fp_possible = 0;
1643 	} else {
1644 		if (MR_BuildRaidContext(instance, &io_info,
1645 					&io_request->RaidContext,
1646 					local_map_ptr, &raidLUN))
1647 			fp_possible = io_info.fpOkForIo;
1648 	}
1649 
1650 	/* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
1651 	   id by default, not CPU group id, otherwise all MSI-X queues won't
1652 	   be utilized */
1653 	cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
1654 		raw_smp_processor_id() % instance->msix_vectors : 0;
1655 
1656 	if (fp_possible) {
1657 		megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
1658 				   local_map_ptr, start_lba_lo);
1659 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1660 		cmd->request_desc->SCSIIO.RequestFlags =
1661 			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
1662 			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1663 		if (fusion->adapter_type == INVADER_SERIES) {
1664 			if (io_request->RaidContext.regLockFlags ==
1665 			    REGION_TYPE_UNUSED)
1666 				cmd->request_desc->SCSIIO.RequestFlags =
1667 					(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1668 					MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1669 			io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1670 			io_request->RaidContext.nseg = 0x1;
1671 			io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1672 			io_request->RaidContext.regLockFlags |=
1673 			  (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1674 			   MR_RL_FLAGS_SEQ_NUM_ENABLE);
1675 		}
1676 		if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
1677 		    (io_info.isRead)) {
1678 			io_info.devHandle =
1679 				get_updated_dev_handle(instance,
1680 					&fusion->load_balance_info[device_id],
1681 					&io_info);
1682 			scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
1683 			cmd->pd_r1_lb = io_info.pd_after_lb;
1684 		} else
1685 			scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
1686 
1687 		if ((raidLUN[0] == 1) &&
1688 			(local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
1689 			instance->dev_handle = !(instance->dev_handle);
1690 			io_info.devHandle =
1691 				local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle];
1692 		}
1693 
1694 		cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1695 		io_request->DevHandle = io_info.devHandle;
1696 		/* populate the LUN field */
1697 		memcpy(io_request->LUN, raidLUN, 8);
1698 	} else {
1699 		io_request->RaidContext.timeoutValue =
1700 			cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1701 		cmd->request_desc->SCSIIO.RequestFlags =
1702 			(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
1703 			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1704 		if (fusion->adapter_type == INVADER_SERIES) {
1705 			if (io_request->RaidContext.regLockFlags ==
1706 			    REGION_TYPE_UNUSED)
1707 				cmd->request_desc->SCSIIO.RequestFlags =
1708 					(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1709 					MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1710 			io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1711 			io_request->RaidContext.regLockFlags |=
1712 				(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1713 				 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1714 			io_request->RaidContext.nseg = 0x1;
1715 		}
1716 		io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1717 		io_request->DevHandle = cpu_to_le16(device_id);
1718 	} /* Not FP */
1719 }
1720 
1721 /**
1722  * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
1723  * @instance:		Adapter soft state
1724  * @scp:		SCSI command
1725  * @cmd:		Command to be prepared
1726  *
1727  * Prepares the io_request frame for non-rw io cmds for vd.
1728  */
1729 static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1730 			  struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd)
1731 {
1732 	u32 device_id;
1733 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1734 	u16 pd_index = 0;
1735 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1736 	struct fusion_context *fusion = instance->ctrl_context;
1737 	u8                          span, physArm;
1738 	__le16                      devHandle;
1739 	u32                         ld, arRef, pd;
1740 	struct MR_LD_RAID                  *raid;
1741 	struct RAID_CONTEXT                *pRAID_Context;
1742 	u8 fp_possible = 1;
1743 
1744 	io_request = cmd->io_request;
1745 	device_id = MEGASAS_DEV_INDEX(scmd);
1746 	pd_index = MEGASAS_PD_INDEX(scmd);
1747 	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1748 	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1749 	/* get RAID_Context pointer */
1750 	pRAID_Context = &io_request->RaidContext;
1751 	/* Check with FW team */
1752 	pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1753 	pRAID_Context->regLockRowLBA    = 0;
1754 	pRAID_Context->regLockLength    = 0;
1755 
1756 	if (fusion->fast_path_io && (
1757 		device_id < instance->fw_supported_vd_count)) {
1758 
1759 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1760 		if (ld >= instance->fw_supported_vd_count)
1761 			fp_possible = 0;
1762 
1763 		raid = MR_LdRaidGet(ld, local_map_ptr);
1764 		if (!(raid->capability.fpNonRWCapable))
1765 			fp_possible = 0;
1766 	} else
1767 		fp_possible = 0;
1768 
1769 	if (!fp_possible) {
1770 		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1771 		io_request->DevHandle = cpu_to_le16(device_id);
1772 		io_request->LUN[1] = scmd->device->lun;
1773 		pRAID_Context->timeoutValue =
1774 			cpu_to_le16 (scmd->request->timeout / HZ);
1775 		cmd->request_desc->SCSIIO.RequestFlags =
1776 			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1777 			MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1778 	} else {
1779 
1780 		/* set RAID context values */
1781 		pRAID_Context->configSeqNum = raid->seqNum;
1782 		pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
1783 		pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
1784 
1785 		/* get the DevHandle for the PD (since this is
1786 		   fpNonRWCapable, this is a single disk RAID0) */
1787 		span = physArm = 0;
1788 		arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
1789 		pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
1790 		devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
1791 
1792 		/* build request descriptor */
1793 		cmd->request_desc->SCSIIO.RequestFlags =
1794 			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1795 			MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1796 		cmd->request_desc->SCSIIO.DevHandle = devHandle;
1797 
1798 		/* populate the LUN field */
1799 		memcpy(io_request->LUN, raid->LUN, 8);
1800 
1801 		/* build the raidScsiIO structure */
1802 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1803 		io_request->DevHandle = devHandle;
1804 	}
1805 }
1806 
1807 /**
1808  * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
1809  * @instance:		Adapter soft state
1810  * @scp:		SCSI command
1811  * @cmd:		Command to be prepared
1812  * @fp_possible:	parameter to detect fast path or firmware path io.
1813  *
1814  * Prepares the io_request frame for rw/non-rw io cmds for syspds
1815  */
1816 static void
1817 megasas_build_syspd_fusion(struct megasas_instance *instance,
1818 	struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
1819 {
1820 	u32 device_id;
1821 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1822 	u16 pd_index = 0;
1823 	u16 os_timeout_value;
1824 	u16 timeout_limit;
1825 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1826 	struct RAID_CONTEXT	*pRAID_Context;
1827 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1828 	struct fusion_context *fusion = instance->ctrl_context;
1829 	pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
1830 
1831 	device_id = MEGASAS_DEV_INDEX(scmd);
1832 	pd_index = MEGASAS_PD_INDEX(scmd);
1833 	os_timeout_value = scmd->request->timeout / HZ;
1834 
1835 	io_request = cmd->io_request;
1836 	/* get RAID_Context pointer */
1837 	pRAID_Context = &io_request->RaidContext;
1838 	pRAID_Context->regLockFlags = 0;
1839 	pRAID_Context->regLockRowLBA = 0;
1840 	pRAID_Context->regLockLength = 0;
1841 	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1842 	io_request->LUN[1] = scmd->device->lun;
1843 	pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1844 		<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1845 
1846 	/* If FW supports PD sequence number */
1847 	if (instance->use_seqnum_jbod_fp &&
1848 		instance->pd_list[pd_index].driveType == TYPE_DISK) {
1849 		/* TgtId must be incremented by 255 as jbod seq number is index
1850 		 * below raid map
1851 		 */
1852 		pRAID_Context->VirtualDiskTgtId =
1853 			cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
1854 		pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum;
1855 		io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
1856 		pRAID_Context->regLockFlags |=
1857 			(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
1858 	} else if (fusion->fast_path_io) {
1859 		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1860 		pRAID_Context->configSeqNum = 0;
1861 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1862 		io_request->DevHandle =
1863 			local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1864 	} else {
1865 		/* Want to send all IO via FW path */
1866 		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1867 		pRAID_Context->configSeqNum = 0;
1868 		io_request->DevHandle = cpu_to_le16(0xFFFF);
1869 	}
1870 
1871 	cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
1872 	cmd->request_desc->SCSIIO.MSIxIndex =
1873 		instance->msix_vectors ?
1874 		(raw_smp_processor_id() % instance->msix_vectors) : 0;
1875 
1876 
1877 	if (!fp_possible) {
1878 		/* system pd firmware path */
1879 		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1880 		cmd->request_desc->SCSIIO.RequestFlags =
1881 			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1882 				MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1883 		pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
1884 		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1885 	} else {
1886 		/* system pd Fast Path */
1887 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1888 		timeout_limit = (scmd->device->type == TYPE_DISK) ?
1889 				255 : 0xFFFF;
1890 		pRAID_Context->timeoutValue =
1891 			cpu_to_le16((os_timeout_value > timeout_limit) ?
1892 			timeout_limit : os_timeout_value);
1893 		if (fusion->adapter_type == INVADER_SERIES) {
1894 			pRAID_Context->Type = MPI2_TYPE_CUDA;
1895 			pRAID_Context->nseg = 0x1;
1896 			io_request->IoFlags |=
1897 				cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1898 		}
1899 		cmd->request_desc->SCSIIO.RequestFlags =
1900 			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1901 				MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1902 	}
1903 }
1904 
1905 /**
1906  * megasas_build_io_fusion -	Prepares IOs to devices
1907  * @instance:		Adapter soft state
1908  * @scp:		SCSI command
1909  * @cmd:		Command to be prepared
1910  *
1911  * Invokes helper functions to prepare request frames
1912  * and sets flags appropriate for IO/Non-IO cmd
1913  */
1914 int
1915 megasas_build_io_fusion(struct megasas_instance *instance,
1916 			struct scsi_cmnd *scp,
1917 			struct megasas_cmd_fusion *cmd)
1918 {
1919 	u16 sge_count;
1920 	u8  cmd_type;
1921 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
1922 
1923 	/* Zero out some fields so they don't get reused */
1924 	memset(io_request->LUN, 0x0, 8);
1925 	io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
1926 	io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
1927 	io_request->EEDPFlags = 0;
1928 	io_request->Control = 0;
1929 	io_request->EEDPBlockSize = 0;
1930 	io_request->ChainOffset = 0;
1931 	io_request->RaidContext.RAIDFlags = 0;
1932 	io_request->RaidContext.Type = 0;
1933 	io_request->RaidContext.nseg = 0;
1934 
1935 	memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
1936 	/*
1937 	 * Just the CDB length,rest of the Flags are zero
1938 	 * This will be modified for FP in build_ldio_fusion
1939 	 */
1940 	io_request->IoFlags = cpu_to_le16(scp->cmd_len);
1941 
1942 	switch (cmd_type = megasas_cmd_type(scp)) {
1943 	case READ_WRITE_LDIO:
1944 		megasas_build_ldio_fusion(instance, scp, cmd);
1945 		break;
1946 	case NON_READ_WRITE_LDIO:
1947 		megasas_build_ld_nonrw_fusion(instance, scp, cmd);
1948 		break;
1949 	case READ_WRITE_SYSPDIO:
1950 	case NON_READ_WRITE_SYSPDIO:
1951 		if (instance->secure_jbod_support &&
1952 			(cmd_type == NON_READ_WRITE_SYSPDIO))
1953 			megasas_build_syspd_fusion(instance, scp, cmd, 0);
1954 		else
1955 			megasas_build_syspd_fusion(instance, scp, cmd, 1);
1956 		break;
1957 	default:
1958 		break;
1959 	}
1960 
1961 	/*
1962 	 * Construct SGL
1963 	 */
1964 
1965 	sge_count =
1966 		megasas_make_sgl_fusion(instance, scp,
1967 					(struct MPI25_IEEE_SGE_CHAIN64 *)
1968 					&io_request->SGL, cmd);
1969 
1970 	if (sge_count > instance->max_num_sge) {
1971 		dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds "
1972 		       "max (0x%x) allowed\n", sge_count,
1973 		       instance->max_num_sge);
1974 		return 1;
1975 	}
1976 
1977 	/* numSGE store lower 8 bit of sge_count.
1978 	 * numSGEExt store higher 8 bit of sge_count
1979 	 */
1980 	io_request->RaidContext.numSGE = sge_count;
1981 	io_request->RaidContext.numSGEExt = (u8)(sge_count >> 8);
1982 
1983 	io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1984 
1985 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1986 		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
1987 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1988 		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
1989 
1990 	io_request->SGLOffset0 =
1991 		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
1992 
1993 	io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
1994 	io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
1995 
1996 	cmd->scmd = scp;
1997 	scp->SCp.ptr = (char *)cmd;
1998 
1999 	return 0;
2000 }
2001 
2002 union MEGASAS_REQUEST_DESCRIPTOR_UNION *
2003 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
2004 {
2005 	u8 *p;
2006 	struct fusion_context *fusion;
2007 
2008 	if (index >= instance->max_fw_cmds) {
2009 		dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
2010 		       "descriptor for scsi%d\n", index,
2011 			instance->host->host_no);
2012 		return NULL;
2013 	}
2014 	fusion = instance->ctrl_context;
2015 	p = fusion->req_frames_desc
2016 		+sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
2017 
2018 	return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
2019 }
2020 
2021 /**
2022  * megasas_build_and_issue_cmd_fusion -Main routine for building and
2023  *                                     issuing non IOCTL cmd
2024  * @instance:			Adapter soft state
2025  * @scmd:			pointer to scsi cmd from OS
2026  */
2027 static u32
2028 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
2029 				   struct scsi_cmnd *scmd)
2030 {
2031 	struct megasas_cmd_fusion *cmd;
2032 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2033 	u32 index;
2034 	struct fusion_context *fusion;
2035 
2036 	fusion = instance->ctrl_context;
2037 
2038 	cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
2039 
2040 	index = cmd->index;
2041 
2042 	req_desc = megasas_get_request_descriptor(instance, index-1);
2043 	if (!req_desc)
2044 		return 1;
2045 
2046 	req_desc->Words = 0;
2047 	cmd->request_desc = req_desc;
2048 
2049 	if (megasas_build_io_fusion(instance, scmd, cmd)) {
2050 		megasas_return_cmd_fusion(instance, cmd);
2051 		dev_err(&instance->pdev->dev, "Error building command\n");
2052 		cmd->request_desc = NULL;
2053 		return 1;
2054 	}
2055 
2056 	req_desc = cmd->request_desc;
2057 	req_desc->SCSIIO.SMID = cpu_to_le16(index);
2058 
2059 	if (cmd->io_request->ChainOffset != 0 &&
2060 	    cmd->io_request->ChainOffset != 0xF)
2061 		dev_err(&instance->pdev->dev, "The chain offset value is not "
2062 		       "correct : %x\n", cmd->io_request->ChainOffset);
2063 
2064 	/*
2065 	 * Issue the command to the FW
2066 	 */
2067 	atomic_inc(&instance->fw_outstanding);
2068 
2069 	megasas_fire_cmd_fusion(instance, req_desc);
2070 
2071 	return 0;
2072 }
2073 
2074 /**
2075  * complete_cmd_fusion -	Completes command
2076  * @instance:			Adapter soft state
2077  * Completes all commands that is in reply descriptor queue
2078  */
2079 int
2080 complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2081 {
2082 	union MPI2_REPLY_DESCRIPTORS_UNION *desc;
2083 	struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
2084 	struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
2085 	struct fusion_context *fusion;
2086 	struct megasas_cmd *cmd_mfi;
2087 	struct megasas_cmd_fusion *cmd_fusion;
2088 	u16 smid, num_completed;
2089 	u8 reply_descript_type;
2090 	u32 status, extStatus, device_id;
2091 	union desc_value d_val;
2092 	struct LD_LOAD_BALANCE_INFO *lbinfo;
2093 	int threshold_reply_count = 0;
2094 	struct scsi_cmnd *scmd_local = NULL;
2095 
2096 	fusion = instance->ctrl_context;
2097 
2098 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
2099 		return IRQ_HANDLED;
2100 
2101 	desc = fusion->reply_frames_desc;
2102 	desc += ((MSIxIndex * fusion->reply_alloc_sz)/
2103 		 sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) +
2104 		fusion->last_reply_idx[MSIxIndex];
2105 
2106 	reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2107 
2108 	d_val.word = desc->Words;
2109 
2110 	reply_descript_type = reply_desc->ReplyFlags &
2111 		MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2112 
2113 	if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2114 		return IRQ_NONE;
2115 
2116 	num_completed = 0;
2117 
2118 	while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
2119 	       d_val.u.high != cpu_to_le32(UINT_MAX)) {
2120 		smid = le16_to_cpu(reply_desc->SMID);
2121 
2122 		cmd_fusion = fusion->cmd_list[smid - 1];
2123 
2124 		scsi_io_req =
2125 			(struct MPI2_RAID_SCSI_IO_REQUEST *)
2126 		  cmd_fusion->io_request;
2127 
2128 		if (cmd_fusion->scmd)
2129 			cmd_fusion->scmd->SCp.ptr = NULL;
2130 
2131 		scmd_local = cmd_fusion->scmd;
2132 		status = scsi_io_req->RaidContext.status;
2133 		extStatus = scsi_io_req->RaidContext.exStatus;
2134 
2135 		switch (scsi_io_req->Function) {
2136 		case MPI2_FUNCTION_SCSI_IO_REQUEST:  /*Fast Path IO.*/
2137 			/* Update load balancing info */
2138 			device_id = MEGASAS_DEV_INDEX(scmd_local);
2139 			lbinfo = &fusion->load_balance_info[device_id];
2140 			if (cmd_fusion->scmd->SCp.Status &
2141 			    MEGASAS_LOAD_BALANCE_FLAG) {
2142 				atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
2143 				cmd_fusion->scmd->SCp.Status &=
2144 					~MEGASAS_LOAD_BALANCE_FLAG;
2145 			}
2146 			if (reply_descript_type ==
2147 			    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
2148 				if (megasas_dbg_lvl == 5)
2149 					dev_err(&instance->pdev->dev, "\nFAST Path "
2150 					       "IO Success\n");
2151 			}
2152 			/* Fall thru and complete IO */
2153 		case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
2154 			/* Map the FW Cmd Status */
2155 			map_cmd_status(cmd_fusion, status, extStatus);
2156 			scsi_io_req->RaidContext.status = 0;
2157 			scsi_io_req->RaidContext.exStatus = 0;
2158 			megasas_return_cmd_fusion(instance, cmd_fusion);
2159 			scsi_dma_unmap(scmd_local);
2160 			scmd_local->scsi_done(scmd_local);
2161 			atomic_dec(&instance->fw_outstanding);
2162 
2163 			break;
2164 		case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
2165 			cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2166 
2167 			/* Poll mode. Dummy free.
2168 			 * In case of Interrupt mode, caller has reverse check.
2169 			 */
2170 			if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
2171 				cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
2172 				megasas_return_cmd(instance, cmd_mfi);
2173 			} else
2174 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2175 			break;
2176 		}
2177 
2178 		fusion->last_reply_idx[MSIxIndex]++;
2179 		if (fusion->last_reply_idx[MSIxIndex] >=
2180 		    fusion->reply_q_depth)
2181 			fusion->last_reply_idx[MSIxIndex] = 0;
2182 
2183 		desc->Words = cpu_to_le64(ULLONG_MAX);
2184 		num_completed++;
2185 		threshold_reply_count++;
2186 
2187 		/* Get the next reply descriptor */
2188 		if (!fusion->last_reply_idx[MSIxIndex])
2189 			desc = fusion->reply_frames_desc +
2190 				((MSIxIndex * fusion->reply_alloc_sz)/
2191 				 sizeof(union MPI2_REPLY_DESCRIPTORS_UNION));
2192 		else
2193 			desc++;
2194 
2195 		reply_desc =
2196 		  (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2197 
2198 		d_val.word = desc->Words;
2199 
2200 		reply_descript_type = reply_desc->ReplyFlags &
2201 			MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2202 
2203 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2204 			break;
2205 		/*
2206 		 * Write to reply post host index register after completing threshold
2207 		 * number of reply counts and still there are more replies in reply queue
2208 		 * pending to be completed
2209 		 */
2210 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
2211 			if (fusion->adapter_type == INVADER_SERIES)
2212 				writel(((MSIxIndex & 0x7) << 24) |
2213 					fusion->last_reply_idx[MSIxIndex],
2214 					instance->reply_post_host_index_addr[MSIxIndex/8]);
2215 			else
2216 				writel((MSIxIndex << 24) |
2217 					fusion->last_reply_idx[MSIxIndex],
2218 					instance->reply_post_host_index_addr[0]);
2219 			threshold_reply_count = 0;
2220 		}
2221 	}
2222 
2223 	if (!num_completed)
2224 		return IRQ_NONE;
2225 
2226 	wmb();
2227 	if (fusion->adapter_type == INVADER_SERIES)
2228 		writel(((MSIxIndex & 0x7) << 24) |
2229 			fusion->last_reply_idx[MSIxIndex],
2230 			instance->reply_post_host_index_addr[MSIxIndex/8]);
2231 	else
2232 		writel((MSIxIndex << 24) |
2233 			fusion->last_reply_idx[MSIxIndex],
2234 			instance->reply_post_host_index_addr[0]);
2235 	megasas_check_and_restore_queue_depth(instance);
2236 	return IRQ_HANDLED;
2237 }
2238 
2239 /**
2240  * megasas_complete_cmd_dpc_fusion -	Completes command
2241  * @instance:			Adapter soft state
2242  *
2243  * Tasklet to complete cmds
2244  */
2245 void
2246 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
2247 {
2248 	struct megasas_instance *instance =
2249 		(struct megasas_instance *)instance_addr;
2250 	unsigned long flags;
2251 	u32 count, MSIxIndex;
2252 
2253 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
2254 
2255 	/* If we have already declared adapter dead, donot complete cmds */
2256 	spin_lock_irqsave(&instance->hba_lock, flags);
2257 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
2258 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2259 		return;
2260 	}
2261 	spin_unlock_irqrestore(&instance->hba_lock, flags);
2262 
2263 	for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
2264 		complete_cmd_fusion(instance, MSIxIndex);
2265 }
2266 
2267 /**
2268  * megasas_isr_fusion - isr entry point
2269  */
2270 irqreturn_t megasas_isr_fusion(int irq, void *devp)
2271 {
2272 	struct megasas_irq_context *irq_context = devp;
2273 	struct megasas_instance *instance = irq_context->instance;
2274 	u32 mfiStatus, fw_state, dma_state;
2275 
2276 	if (instance->mask_interrupts)
2277 		return IRQ_NONE;
2278 
2279 	if (!instance->msix_vectors) {
2280 		mfiStatus = instance->instancet->clear_intr(instance->reg_set);
2281 		if (!mfiStatus)
2282 			return IRQ_NONE;
2283 	}
2284 
2285 	/* If we are resetting, bail */
2286 	if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
2287 		instance->instancet->clear_intr(instance->reg_set);
2288 		return IRQ_HANDLED;
2289 	}
2290 
2291 	if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
2292 		instance->instancet->clear_intr(instance->reg_set);
2293 		/* If we didn't complete any commands, check for FW fault */
2294 		fw_state = instance->instancet->read_fw_status_reg(
2295 			instance->reg_set) & MFI_STATE_MASK;
2296 		dma_state = instance->instancet->read_fw_status_reg
2297 			(instance->reg_set) & MFI_STATE_DMADONE;
2298 		if (instance->crash_dump_drv_support &&
2299 			instance->crash_dump_app_support) {
2300 			/* Start collecting crash, if DMA bit is done */
2301 			if ((fw_state == MFI_STATE_FAULT) && dma_state)
2302 				schedule_work(&instance->crash_init);
2303 			else if (fw_state == MFI_STATE_FAULT)
2304 				schedule_work(&instance->work_init);
2305 		} else if (fw_state == MFI_STATE_FAULT) {
2306 			dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
2307 			       "for scsi%d\n", instance->host->host_no);
2308 			schedule_work(&instance->work_init);
2309 		}
2310 	}
2311 
2312 	return IRQ_HANDLED;
2313 }
2314 
2315 /**
2316  * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
2317  * @instance:			Adapter soft state
2318  * mfi_cmd:			megasas_cmd pointer
2319  *
2320  */
2321 u8
2322 build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2323 			struct megasas_cmd *mfi_cmd)
2324 {
2325 	struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2326 	struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
2327 	struct megasas_cmd_fusion *cmd;
2328 	struct fusion_context *fusion;
2329 	struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
2330 
2331 	fusion = instance->ctrl_context;
2332 
2333 	cmd = megasas_get_cmd_fusion(instance,
2334 			instance->max_scsi_cmds + mfi_cmd->index);
2335 
2336 	/*  Save the smid. To be used for returning the cmd */
2337 	mfi_cmd->context.smid = cmd->index;
2338 
2339 	/*
2340 	 * For cmds where the flag is set, store the flag and check
2341 	 * on completion. For cmds with this flag, don't call
2342 	 * megasas_complete_cmd
2343 	 */
2344 
2345 	if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
2346 		mfi_cmd->flags |= DRV_DCMD_POLLED_MODE;
2347 
2348 	io_req = cmd->io_request;
2349 
2350 	if (fusion->adapter_type == INVADER_SERIES) {
2351 		struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
2352 			(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
2353 		sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
2354 		sgl_ptr_end->Flags = 0;
2355 	}
2356 
2357 	mpi25_ieee_chain =
2358 	  (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2359 
2360 	io_req->Function    = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2361 	io_req->SGLOffset0  = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
2362 				       SGL) / 4;
2363 	io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
2364 
2365 	mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
2366 
2367 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2368 		MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2369 
2370 	mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz);
2371 
2372 	return 0;
2373 }
2374 
2375 /**
2376  * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
2377  * @instance:			Adapter soft state
2378  * @cmd:			mfi cmd to build
2379  *
2380  */
2381 union MEGASAS_REQUEST_DESCRIPTOR_UNION *
2382 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
2383 {
2384 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2385 	u16 index;
2386 
2387 	if (build_mpt_mfi_pass_thru(instance, cmd)) {
2388 		dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
2389 		return NULL;
2390 	}
2391 
2392 	index = cmd->context.smid;
2393 
2394 	req_desc = megasas_get_request_descriptor(instance, index - 1);
2395 
2396 	if (!req_desc)
2397 		return NULL;
2398 
2399 	req_desc->Words = 0;
2400 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2401 					 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2402 
2403 	req_desc->SCSIIO.SMID = cpu_to_le16(index);
2404 
2405 	return req_desc;
2406 }
2407 
2408 /**
2409  * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
2410  * @instance:			Adapter soft state
2411  * @cmd:			mfi cmd pointer
2412  *
2413  */
2414 void
2415 megasas_issue_dcmd_fusion(struct megasas_instance *instance,
2416 			  struct megasas_cmd *cmd)
2417 {
2418 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2419 
2420 	req_desc = build_mpt_cmd(instance, cmd);
2421 	if (!req_desc) {
2422 		dev_err(&instance->pdev->dev, "Couldn't issue MFI pass thru cmd\n");
2423 		return;
2424 	}
2425 	megasas_fire_cmd_fusion(instance, req_desc);
2426 }
2427 
2428 /**
2429  * megasas_release_fusion -	Reverses the FW initialization
2430  * @instance:			Adapter soft state
2431  */
2432 void
2433 megasas_release_fusion(struct megasas_instance *instance)
2434 {
2435 	megasas_free_cmds(instance);
2436 	megasas_free_cmds_fusion(instance);
2437 
2438 	iounmap(instance->reg_set);
2439 
2440 	pci_release_selected_regions(instance->pdev, instance->bar);
2441 }
2442 
2443 /**
2444  * megasas_read_fw_status_reg_fusion - returns the current FW status value
2445  * @regs:			MFI register set
2446  */
2447 static u32
2448 megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
2449 {
2450 	return readl(&(regs)->outbound_scratch_pad);
2451 }
2452 
2453 /**
2454  * megasas_alloc_host_crash_buffer -	Host buffers for Crash dump collection from Firmware
2455  * @instance:				Controller's soft instance
2456  * return:			        Number of allocated host crash buffers
2457  */
2458 static void
2459 megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
2460 {
2461 	unsigned int i;
2462 
2463 	instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
2464 	for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
2465 		instance->crash_buf[i] = (void	*)__get_free_pages(GFP_KERNEL,
2466 				instance->crash_buf_pages);
2467 		if (!instance->crash_buf[i]) {
2468 			dev_info(&instance->pdev->dev, "Firmware crash dump "
2469 				"memory allocation failed at index %d\n", i);
2470 			break;
2471 		}
2472 		memset(instance->crash_buf[i], 0,
2473 			((1 << PAGE_SHIFT) << instance->crash_buf_pages));
2474 	}
2475 	instance->drv_buf_alloc = i;
2476 }
2477 
2478 /**
2479  * megasas_free_host_crash_buffer -	Host buffers for Crash dump collection from Firmware
2480  * @instance:				Controller's soft instance
2481  */
2482 void
2483 megasas_free_host_crash_buffer(struct megasas_instance *instance)
2484 {
2485 	unsigned int i
2486 ;
2487 	for (i = 0; i < instance->drv_buf_alloc; i++) {
2488 		if (instance->crash_buf[i])
2489 			free_pages((ulong)instance->crash_buf[i],
2490 					instance->crash_buf_pages);
2491 	}
2492 	instance->drv_buf_index = 0;
2493 	instance->drv_buf_alloc = 0;
2494 	instance->fw_crash_state = UNAVAILABLE;
2495 	instance->fw_crash_buffer_size = 0;
2496 }
2497 
2498 /**
2499  * megasas_adp_reset_fusion -	For controller reset
2500  * @regs:				MFI register set
2501  */
2502 static int
2503 megasas_adp_reset_fusion(struct megasas_instance *instance,
2504 			 struct megasas_register_set __iomem *regs)
2505 {
2506 	u32 host_diag, abs_state, retry;
2507 
2508 	/* Now try to reset the chip */
2509 	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2510 	writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2511 	writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2512 	writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2513 	writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2514 	writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2515 	writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2516 
2517 	/* Check that the diag write enable (DRWE) bit is on */
2518 	host_diag = readl(&instance->reg_set->fusion_host_diag);
2519 	retry = 0;
2520 	while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2521 		msleep(100);
2522 		host_diag = readl(&instance->reg_set->fusion_host_diag);
2523 		if (retry++ == 100) {
2524 			dev_warn(&instance->pdev->dev,
2525 				"Host diag unlock failed from %s %d\n",
2526 				__func__, __LINE__);
2527 			break;
2528 		}
2529 	}
2530 	if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2531 		return -1;
2532 
2533 	/* Send chip reset command */
2534 	writel(host_diag | HOST_DIAG_RESET_ADAPTER,
2535 		&instance->reg_set->fusion_host_diag);
2536 	msleep(3000);
2537 
2538 	/* Make sure reset adapter bit is cleared */
2539 	host_diag = readl(&instance->reg_set->fusion_host_diag);
2540 	retry = 0;
2541 	while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2542 		msleep(100);
2543 		host_diag = readl(&instance->reg_set->fusion_host_diag);
2544 		if (retry++ == 1000) {
2545 			dev_warn(&instance->pdev->dev,
2546 				"Diag reset adapter never cleared %s %d\n",
2547 				__func__, __LINE__);
2548 			break;
2549 		}
2550 	}
2551 	if (host_diag & HOST_DIAG_RESET_ADAPTER)
2552 		return -1;
2553 
2554 	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set)
2555 			& MFI_STATE_MASK;
2556 	retry = 0;
2557 
2558 	while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2559 		msleep(100);
2560 		abs_state = instance->instancet->
2561 			read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2562 	}
2563 	if (abs_state <= MFI_STATE_FW_INIT) {
2564 		dev_warn(&instance->pdev->dev,
2565 			"fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n",
2566 			abs_state, __func__, __LINE__);
2567 		return -1;
2568 	}
2569 
2570 	return 0;
2571 }
2572 
2573 /**
2574  * megasas_check_reset_fusion -	For controller reset check
2575  * @regs:				MFI register set
2576  */
2577 static int
2578 megasas_check_reset_fusion(struct megasas_instance *instance,
2579 			   struct megasas_register_set __iomem *regs)
2580 {
2581 	return 0;
2582 }
2583 
2584 /* This function waits for outstanding commands on fusion to complete */
2585 int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2586 					int iotimeout, int *convert)
2587 {
2588 	int i, outstanding, retval = 0, hb_seconds_missed = 0;
2589 	u32 fw_state;
2590 
2591 	for (i = 0; i < resetwaittime; i++) {
2592 		/* Check if firmware is in fault state */
2593 		fw_state = instance->instancet->read_fw_status_reg(
2594 			instance->reg_set) & MFI_STATE_MASK;
2595 		if (fw_state == MFI_STATE_FAULT) {
2596 			dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
2597 			       " will reset adapter scsi%d.\n",
2598 				instance->host->host_no);
2599 			retval = 1;
2600 			goto out;
2601 		}
2602 		/* If SR-IOV VF mode & heartbeat timeout, don't wait */
2603 		if (instance->requestorId && !iotimeout) {
2604 			retval = 1;
2605 			goto out;
2606 		}
2607 
2608 		/* If SR-IOV VF mode & I/O timeout, check for HB timeout */
2609 		if (instance->requestorId && iotimeout) {
2610 			if (instance->hb_host_mem->HB.fwCounter !=
2611 			    instance->hb_host_mem->HB.driverCounter) {
2612 				instance->hb_host_mem->HB.driverCounter =
2613 					instance->hb_host_mem->HB.fwCounter;
2614 				hb_seconds_missed = 0;
2615 			} else {
2616 				hb_seconds_missed++;
2617 				if (hb_seconds_missed ==
2618 				    (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
2619 					dev_warn(&instance->pdev->dev, "SR-IOV:"
2620 					       " Heartbeat never completed "
2621 					       " while polling during I/O "
2622 					       " timeout handling for "
2623 					       "scsi%d.\n",
2624 					       instance->host->host_no);
2625 					       *convert = 1;
2626 					       retval = 1;
2627 					       goto out;
2628 				}
2629 			}
2630 		}
2631 
2632 		outstanding = atomic_read(&instance->fw_outstanding);
2633 		if (!outstanding)
2634 			goto out;
2635 
2636 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2637 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2638 			       "commands to complete for scsi%d\n", i,
2639 			       outstanding, instance->host->host_no);
2640 			megasas_complete_cmd_dpc_fusion(
2641 				(unsigned long)instance);
2642 		}
2643 		msleep(1000);
2644 	}
2645 
2646 	if (atomic_read(&instance->fw_outstanding)) {
2647 		dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
2648 		       "will reset adapter scsi%d.\n",
2649 		       instance->host->host_no);
2650 		retval = 1;
2651 	}
2652 out:
2653 	return retval;
2654 }
2655 
2656 void  megasas_reset_reply_desc(struct megasas_instance *instance)
2657 {
2658 	int i, count;
2659 	struct fusion_context *fusion;
2660 	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2661 
2662 	fusion = instance->ctrl_context;
2663 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
2664 	for (i = 0 ; i < count ; i++)
2665 		fusion->last_reply_idx[i] = 0;
2666 	reply_desc = fusion->reply_frames_desc;
2667 	for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
2668 		reply_desc->Words = cpu_to_le64(ULLONG_MAX);
2669 }
2670 
2671 /*
2672  * megasas_refire_mgmt_cmd :	Re-fire management commands
2673  * @instance:				Controller's soft instance
2674 */
2675 void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
2676 {
2677 	int j;
2678 	struct megasas_cmd_fusion *cmd_fusion;
2679 	struct fusion_context *fusion;
2680 	struct megasas_cmd *cmd_mfi;
2681 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2682 	u16 smid;
2683 
2684 	fusion = instance->ctrl_context;
2685 
2686 	/* Re-fire management commands.
2687 	 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
2688 	 */
2689 	for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) {
2690 		cmd_fusion = fusion->cmd_list[j];
2691 		cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2692 		smid = le16_to_cpu(cmd_mfi->context.smid);
2693 
2694 		if (!smid)
2695 			continue;
2696 		req_desc = megasas_get_request_descriptor
2697 					(instance, smid - 1);
2698 		if (req_desc && ((cmd_mfi->frame->dcmd.opcode !=
2699 				cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
2700 				 (cmd_mfi->frame->dcmd.opcode !=
2701 				cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO))))
2702 			megasas_fire_cmd_fusion(instance, req_desc);
2703 		else
2704 			megasas_return_cmd(instance, cmd_mfi);
2705 	}
2706 }
2707 
2708 /* Check for a second path that is currently UP */
2709 int megasas_check_mpio_paths(struct megasas_instance *instance,
2710 	struct scsi_cmnd *scmd)
2711 {
2712 	int i, j, retval = (DID_RESET << 16);
2713 
2714 	if (instance->mpio && instance->requestorId) {
2715 		for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
2716 			for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
2717 				if (megasas_mgmt_info.instance[i] &&
2718 				    (megasas_mgmt_info.instance[i] != instance) &&
2719 				    megasas_mgmt_info.instance[i]->mpio &&
2720 				    megasas_mgmt_info.instance[i]->requestorId
2721 				    &&
2722 				    (megasas_mgmt_info.instance[i]->ld_ids[j]
2723 				     == scmd->device->id)) {
2724 					    retval = (DID_NO_CONNECT << 16);
2725 					    goto out;
2726 				}
2727 	}
2728 out:
2729 	return retval;
2730 }
2731 
2732 /* Core fusion reset function */
2733 int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2734 {
2735 	int retval = SUCCESS, i, convert = 0;
2736 	struct megasas_instance *instance;
2737 	struct megasas_cmd_fusion *cmd_fusion;
2738 	struct fusion_context *fusion;
2739 	u32 abs_state, status_reg, reset_adapter;
2740 	u32 io_timeout_in_crash_mode = 0;
2741 	struct scsi_cmnd *scmd_local = NULL;
2742 
2743 	instance = (struct megasas_instance *)shost->hostdata;
2744 	fusion = instance->ctrl_context;
2745 
2746 	mutex_lock(&instance->reset_mutex);
2747 
2748 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
2749 		dev_warn(&instance->pdev->dev, "Hardware critical error, "
2750 		       "returning FAILED for scsi%d.\n",
2751 			instance->host->host_no);
2752 		mutex_unlock(&instance->reset_mutex);
2753 		return FAILED;
2754 	}
2755 	status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
2756 	abs_state = status_reg & MFI_STATE_MASK;
2757 
2758 	/* IO timeout detected, forcibly put FW in FAULT state */
2759 	if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
2760 		instance->crash_dump_app_support && iotimeout) {
2761 		dev_info(&instance->pdev->dev, "IO timeout is detected, "
2762 			"forcibly FAULT Firmware\n");
2763 		instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2764 		status_reg = readl(&instance->reg_set->doorbell);
2765 		writel(status_reg | MFI_STATE_FORCE_OCR,
2766 			&instance->reg_set->doorbell);
2767 		readl(&instance->reg_set->doorbell);
2768 		mutex_unlock(&instance->reset_mutex);
2769 		do {
2770 			ssleep(3);
2771 			io_timeout_in_crash_mode++;
2772 			dev_dbg(&instance->pdev->dev, "waiting for [%d] "
2773 				"seconds for crash dump collection and OCR "
2774 				"to be done\n", (io_timeout_in_crash_mode * 3));
2775 		} while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
2776 			(io_timeout_in_crash_mode < 80));
2777 
2778 		if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
2779 			dev_info(&instance->pdev->dev, "OCR done for IO "
2780 				"timeout case\n");
2781 			retval = SUCCESS;
2782 		} else {
2783 			dev_info(&instance->pdev->dev, "Controller is not "
2784 				"operational after 240 seconds wait for IO "
2785 				"timeout case in FW crash dump mode\n do "
2786 				"OCR/kill adapter\n");
2787 			retval = megasas_reset_fusion(shost, 0);
2788 		}
2789 		return retval;
2790 	}
2791 
2792 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
2793 		del_timer_sync(&instance->sriov_heartbeat_timer);
2794 	set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2795 	instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
2796 	instance->instancet->disable_intr(instance);
2797 	msleep(1000);
2798 
2799 	/* First try waiting for commands to complete */
2800 	if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
2801 						&convert)) {
2802 		instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2803 		dev_warn(&instance->pdev->dev, "resetting fusion "
2804 		       "adapter scsi%d.\n", instance->host->host_no);
2805 		if (convert)
2806 			iotimeout = 0;
2807 
2808 		/* Now return commands back to the OS */
2809 		for (i = 0 ; i < instance->max_scsi_cmds; i++) {
2810 			cmd_fusion = fusion->cmd_list[i];
2811 			scmd_local = cmd_fusion->scmd;
2812 			if (cmd_fusion->scmd) {
2813 				scmd_local->result =
2814 					megasas_check_mpio_paths(instance,
2815 							scmd_local);
2816 				megasas_return_cmd_fusion(instance, cmd_fusion);
2817 				scsi_dma_unmap(scmd_local);
2818 				scmd_local->scsi_done(scmd_local);
2819 				atomic_dec(&instance->fw_outstanding);
2820 			}
2821 		}
2822 
2823 		status_reg = instance->instancet->read_fw_status_reg(
2824 			instance->reg_set);
2825 		abs_state = status_reg & MFI_STATE_MASK;
2826 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
2827 		if (instance->disableOnlineCtrlReset ||
2828 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2829 			/* Reset not supported, kill adapter */
2830 			dev_warn(&instance->pdev->dev, "Reset not supported"
2831 			       ", killing adapter scsi%d.\n",
2832 				instance->host->host_no);
2833 			megaraid_sas_kill_hba(instance);
2834 			instance->skip_heartbeat_timer_del = 1;
2835 			retval = FAILED;
2836 			goto out;
2837 		}
2838 
2839 		/* Let SR-IOV VF & PF sync up if there was a HB failure */
2840 		if (instance->requestorId && !iotimeout) {
2841 			msleep(MEGASAS_OCR_SETTLE_TIME_VF);
2842 			/* Look for a late HB update after VF settle time */
2843 			if (abs_state == MFI_STATE_OPERATIONAL &&
2844 			    (instance->hb_host_mem->HB.fwCounter !=
2845 			     instance->hb_host_mem->HB.driverCounter)) {
2846 					instance->hb_host_mem->HB.driverCounter =
2847 						instance->hb_host_mem->HB.fwCounter;
2848 					dev_warn(&instance->pdev->dev, "SR-IOV:"
2849 					       "Late FW heartbeat update for "
2850 					       "scsi%d.\n",
2851 					       instance->host->host_no);
2852 			} else {
2853 				/* In VF mode, first poll for FW ready */
2854 				for (i = 0;
2855 				     i < (MEGASAS_RESET_WAIT_TIME * 1000);
2856 				     i += 20) {
2857 					status_reg =
2858 						instance->instancet->
2859 						read_fw_status_reg(
2860 							instance->reg_set);
2861 					abs_state = status_reg &
2862 						MFI_STATE_MASK;
2863 					if (abs_state == MFI_STATE_READY) {
2864 						dev_warn(&instance->pdev->dev,
2865 						       "SR-IOV: FW was found"
2866 						       "to be in ready state "
2867 						       "for scsi%d.\n",
2868 						       instance->host->host_no);
2869 						break;
2870 					}
2871 					msleep(20);
2872 				}
2873 				if (abs_state != MFI_STATE_READY) {
2874 					dev_warn(&instance->pdev->dev, "SR-IOV: "
2875 					       "FW not in ready state after %d"
2876 					       " seconds for scsi%d, status_reg = "
2877 					       "0x%x.\n",
2878 					       MEGASAS_RESET_WAIT_TIME,
2879 					       instance->host->host_no,
2880 					       status_reg);
2881 					megaraid_sas_kill_hba(instance);
2882 					instance->skip_heartbeat_timer_del = 1;
2883 					instance->adprecovery =
2884 						MEGASAS_HW_CRITICAL_ERROR;
2885 					retval = FAILED;
2886 					goto out;
2887 				}
2888 			}
2889 		}
2890 
2891 		/* Now try to reset the chip */
2892 		for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
2893 
2894 			if (instance->instancet->adp_reset
2895 				(instance, instance->reg_set))
2896 				continue;
2897 
2898 			/* Wait for FW to become ready */
2899 			if (megasas_transition_to_ready(instance, 1)) {
2900 				dev_warn(&instance->pdev->dev, "Failed to "
2901 				       "transition controller to ready "
2902 				       "for scsi%d.\n",
2903 				       instance->host->host_no);
2904 				continue;
2905 			}
2906 
2907 			megasas_reset_reply_desc(instance);
2908 			if (megasas_ioc_init_fusion(instance)) {
2909 				dev_warn(&instance->pdev->dev,
2910 				       "megasas_ioc_init_fusion() failed!"
2911 				       " for scsi%d\n",
2912 				       instance->host->host_no);
2913 				continue;
2914 			}
2915 
2916 			megasas_refire_mgmt_cmd(instance);
2917 
2918 			if (megasas_get_ctrl_info(instance)) {
2919 				dev_info(&instance->pdev->dev,
2920 					"Failed from %s %d\n",
2921 					__func__, __LINE__);
2922 				megaraid_sas_kill_hba(instance);
2923 				retval = FAILED;
2924 			}
2925 			/* Reset load balance info */
2926 			memset(fusion->load_balance_info, 0,
2927 			       sizeof(struct LD_LOAD_BALANCE_INFO)
2928 			       *MAX_LOGICAL_DRIVES_EXT);
2929 
2930 			if (!megasas_get_map_info(instance))
2931 				megasas_sync_map_info(instance);
2932 
2933 			megasas_setup_jbod_map(instance);
2934 
2935 			clear_bit(MEGASAS_FUSION_IN_RESET,
2936 				  &instance->reset_flags);
2937 			instance->instancet->enable_intr(instance);
2938 			instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2939 
2940 			/* Restart SR-IOV heartbeat */
2941 			if (instance->requestorId) {
2942 				if (!megasas_sriov_start_heartbeat(instance, 0))
2943 					megasas_start_timer(instance,
2944 							    &instance->sriov_heartbeat_timer,
2945 							    megasas_sriov_heartbeat_handler,
2946 							    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2947 				else
2948 					instance->skip_heartbeat_timer_del = 1;
2949 			}
2950 
2951 			/* Adapter reset completed successfully */
2952 			dev_warn(&instance->pdev->dev, "Reset "
2953 			       "successful for scsi%d.\n",
2954 				instance->host->host_no);
2955 
2956 			if (instance->crash_dump_drv_support &&
2957 				instance->crash_dump_app_support)
2958 				megasas_set_crash_dump_params(instance,
2959 					MR_CRASH_BUF_TURN_ON);
2960 			else
2961 				megasas_set_crash_dump_params(instance,
2962 					MR_CRASH_BUF_TURN_OFF);
2963 
2964 			retval = SUCCESS;
2965 			goto out;
2966 		}
2967 		/* Reset failed, kill the adapter */
2968 		dev_warn(&instance->pdev->dev, "Reset failed, killing "
2969 		       "adapter scsi%d.\n", instance->host->host_no);
2970 		megaraid_sas_kill_hba(instance);
2971 		instance->skip_heartbeat_timer_del = 1;
2972 		retval = FAILED;
2973 	} else {
2974 		/* For VF: Restart HB timer if we didn't OCR */
2975 		if (instance->requestorId) {
2976 			megasas_start_timer(instance,
2977 					    &instance->sriov_heartbeat_timer,
2978 					    megasas_sriov_heartbeat_handler,
2979 					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2980 		}
2981 		clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2982 		instance->instancet->enable_intr(instance);
2983 		instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2984 	}
2985 out:
2986 	clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2987 	mutex_unlock(&instance->reset_mutex);
2988 	return retval;
2989 }
2990 
2991 /* Fusion Crash dump collection work queue */
2992 void  megasas_fusion_crash_dump_wq(struct work_struct *work)
2993 {
2994 	struct megasas_instance *instance =
2995 		container_of(work, struct megasas_instance, crash_init);
2996 	u32 status_reg;
2997 	u8 partial_copy = 0;
2998 
2999 
3000 	status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
3001 
3002 	/*
3003 	 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
3004 	 * to host crash buffers
3005 	 */
3006 	if (instance->drv_buf_index == 0) {
3007 		/* Buffer is already allocated for old Crash dump.
3008 		 * Do OCR and do not wait for crash dump collection
3009 		 */
3010 		if (instance->drv_buf_alloc) {
3011 			dev_info(&instance->pdev->dev, "earlier crash dump is "
3012 				"not yet copied by application, ignoring this "
3013 				"crash dump and initiating OCR\n");
3014 			status_reg |= MFI_STATE_CRASH_DUMP_DONE;
3015 			writel(status_reg,
3016 				&instance->reg_set->outbound_scratch_pad);
3017 			readl(&instance->reg_set->outbound_scratch_pad);
3018 			return;
3019 		}
3020 		megasas_alloc_host_crash_buffer(instance);
3021 		dev_info(&instance->pdev->dev, "Number of host crash buffers "
3022 			"allocated: %d\n", instance->drv_buf_alloc);
3023 	}
3024 
3025 	/*
3026 	 * Driver has allocated max buffers, which can be allocated
3027 	 * and FW has more crash dump data, then driver will
3028 	 * ignore the data.
3029 	 */
3030 	if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
3031 		dev_info(&instance->pdev->dev, "Driver is done copying "
3032 			"the buffer: %d\n", instance->drv_buf_alloc);
3033 		status_reg |= MFI_STATE_CRASH_DUMP_DONE;
3034 		partial_copy = 1;
3035 	} else {
3036 		memcpy(instance->crash_buf[instance->drv_buf_index],
3037 			instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
3038 		instance->drv_buf_index++;
3039 		status_reg &= ~MFI_STATE_DMADONE;
3040 	}
3041 
3042 	if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
3043 		dev_info(&instance->pdev->dev, "Crash Dump is available,number "
3044 			"of copied buffers: %d\n", instance->drv_buf_index);
3045 		instance->fw_crash_buffer_size =  instance->drv_buf_index;
3046 		instance->fw_crash_state = AVAILABLE;
3047 		instance->drv_buf_index = 0;
3048 		writel(status_reg, &instance->reg_set->outbound_scratch_pad);
3049 		readl(&instance->reg_set->outbound_scratch_pad);
3050 		if (!partial_copy)
3051 			megasas_reset_fusion(instance->host, 0);
3052 	} else {
3053 		writel(status_reg, &instance->reg_set->outbound_scratch_pad);
3054 		readl(&instance->reg_set->outbound_scratch_pad);
3055 	}
3056 }
3057 
3058 
3059 /* Fusion OCR work queue */
3060 void megasas_fusion_ocr_wq(struct work_struct *work)
3061 {
3062 	struct megasas_instance *instance =
3063 		container_of(work, struct megasas_instance, work_init);
3064 
3065 	megasas_reset_fusion(instance->host, 0);
3066 }
3067 
3068 struct megasas_instance_template megasas_instance_template_fusion = {
3069 	.enable_intr = megasas_enable_intr_fusion,
3070 	.disable_intr = megasas_disable_intr_fusion,
3071 	.clear_intr = megasas_clear_intr_fusion,
3072 	.read_fw_status_reg = megasas_read_fw_status_reg_fusion,
3073 	.adp_reset = megasas_adp_reset_fusion,
3074 	.check_reset = megasas_check_reset_fusion,
3075 	.service_isr = megasas_isr_fusion,
3076 	.tasklet = megasas_complete_cmd_dpc_fusion,
3077 	.init_adapter = megasas_init_adapter_fusion,
3078 	.build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
3079 	.issue_dcmd = megasas_issue_dcmd_fusion,
3080 };
3081