1 /*
2  *  Linux MegaRAID driver for SAS based RAID controllers
3  *
4  *  Copyright (c) 2009-2013  LSI Corporation
5  *  Copyright (c) 2013-2014  Avago Technologies
6  *
7  *  This program is free software; you can redistribute it and/or
8  *  modify it under the terms of the GNU General Public License
9  *  as published by the Free Software Foundation; either version 2
10  *  of the License, or (at your option) any later version.
11  *
12  *  This program is distributed in the hope that it will be useful,
13  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *  GNU General Public License for more details.
16  *
17  *  You should have received a copy of the GNU General Public License
18  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  *
20  *  FILE: megaraid_sas_fusion.c
21  *
22  *  Authors: Avago Technologies
23  *           Sumant Patro
24  *           Adam Radford
25  *           Kashyap Desai <kashyap.desai@avagotech.com>
26  *           Sumit Saxena <sumit.saxena@avagotech.com>
27  *
28  *  Send feedback to: megaraidlinux.pdl@avagotech.com
29  *
30  *  Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31  *  San Jose, California 95131
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/uaccess.h>
45 #include <linux/fs.h>
46 #include <linux/compat.h>
47 #include <linux/blkdev.h>
48 #include <linux/mutex.h>
49 #include <linux/poll.h>
50 
51 #include <scsi/scsi.h>
52 #include <scsi/scsi_cmnd.h>
53 #include <scsi/scsi_device.h>
54 #include <scsi/scsi_host.h>
55 #include <scsi/scsi_dbg.h>
56 #include <linux/dmi.h>
57 
58 #include "megaraid_sas_fusion.h"
59 #include "megaraid_sas.h"
60 
61 
62 extern void megasas_free_cmds(struct megasas_instance *instance);
63 extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
64 					   *instance);
65 extern void
66 megasas_complete_cmd(struct megasas_instance *instance,
67 		     struct megasas_cmd *cmd, u8 alt_status);
68 int
69 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
70 	      int seconds);
71 
72 void
73 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
74 int megasas_alloc_cmds(struct megasas_instance *instance);
75 int
76 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
77 int
78 megasas_issue_polled(struct megasas_instance *instance,
79 		     struct megasas_cmd *cmd);
80 void
81 megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
82 
83 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
84 void megaraid_sas_kill_hba(struct megasas_instance *instance);
85 
86 extern u32 megasas_dbg_lvl;
87 void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
88 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
89 				  int initial);
90 void megasas_start_timer(struct megasas_instance *instance,
91 			struct timer_list *timer,
92 			 void *fn, unsigned long interval);
93 extern struct megasas_mgmt_info megasas_mgmt_info;
94 extern unsigned int resetwaittime;
95 extern unsigned int dual_qdepth_disable;
96 static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
97 static void megasas_free_reply_fusion(struct megasas_instance *instance);
98 
99 
100 
101 /**
102  * megasas_enable_intr_fusion -	Enables interrupts
103  * @regs:			MFI register set
104  */
105 void
106 megasas_enable_intr_fusion(struct megasas_instance *instance)
107 {
108 	struct megasas_register_set __iomem *regs;
109 	regs = instance->reg_set;
110 
111 	instance->mask_interrupts = 0;
112 	/* For Thunderbolt/Invader also clear intr on enable */
113 	writel(~0, &regs->outbound_intr_status);
114 	readl(&regs->outbound_intr_status);
115 
116 	writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
117 
118 	/* Dummy readl to force pci flush */
119 	readl(&regs->outbound_intr_mask);
120 }
121 
122 /**
123  * megasas_disable_intr_fusion - Disables interrupt
124  * @regs:			 MFI register set
125  */
126 void
127 megasas_disable_intr_fusion(struct megasas_instance *instance)
128 {
129 	u32 mask = 0xFFFFFFFF;
130 	u32 status;
131 	struct megasas_register_set __iomem *regs;
132 	regs = instance->reg_set;
133 	instance->mask_interrupts = 1;
134 
135 	writel(mask, &regs->outbound_intr_mask);
136 	/* Dummy readl to force pci flush */
137 	status = readl(&regs->outbound_intr_mask);
138 }
139 
140 int
141 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
142 {
143 	u32 status;
144 	/*
145 	 * Check if it is our interrupt
146 	 */
147 	status = readl(&regs->outbound_intr_status);
148 
149 	if (status & 1) {
150 		writel(status, &regs->outbound_intr_status);
151 		readl(&regs->outbound_intr_status);
152 		return 1;
153 	}
154 	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
155 		return 0;
156 
157 	return 1;
158 }
159 
160 /**
161  * megasas_get_cmd_fusion -	Get a command from the free pool
162  * @instance:		Adapter soft state
163  *
164  * Returns a blk_tag indexed mpt frame
165  */
166 inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
167 						  *instance, u32 blk_tag)
168 {
169 	struct fusion_context *fusion;
170 
171 	fusion = instance->ctrl_context;
172 	return fusion->cmd_list[blk_tag];
173 }
174 
175 /**
176  * megasas_return_cmd_fusion -	Return a cmd to free command pool
177  * @instance:		Adapter soft state
178  * @cmd:		Command packet to be returned to free command pool
179  */
180 inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
181 	struct megasas_cmd_fusion *cmd)
182 {
183 	cmd->scmd = NULL;
184 	memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
185 }
186 
187 /**
188  * megasas_fire_cmd_fusion -	Sends command to the FW
189  */
190 static void
191 megasas_fire_cmd_fusion(struct megasas_instance *instance,
192 		union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
193 {
194 #if defined(writeq) && defined(CONFIG_64BIT)
195 	u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
196 			le32_to_cpu(req_desc->u.low));
197 
198 	writeq(req_data, &instance->reg_set->inbound_low_queue_port);
199 #else
200 	unsigned long flags;
201 
202 	spin_lock_irqsave(&instance->hba_lock, flags);
203 	writel(le32_to_cpu(req_desc->u.low),
204 		&instance->reg_set->inbound_low_queue_port);
205 	writel(le32_to_cpu(req_desc->u.high),
206 		&instance->reg_set->inbound_high_queue_port);
207 	mmiowb();
208 	spin_unlock_irqrestore(&instance->hba_lock, flags);
209 #endif
210 }
211 
212 /**
213  * megasas_fusion_update_can_queue -	Do all Adapter Queue depth related calculations here
214  * @instance:							Adapter soft state
215  * fw_boot_context:						Whether this function called during probe or after OCR
216  *
217  * This function is only for fusion controllers.
218  * Update host can queue, if firmware downgrade max supported firmware commands.
219  * Firmware upgrade case will be skiped because underlying firmware has
220  * more resource than exposed to the OS.
221  *
222  */
223 static void
224 megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context)
225 {
226 	u16 cur_max_fw_cmds = 0;
227 	u16 ldio_threshold = 0;
228 	struct megasas_register_set __iomem *reg_set;
229 
230 	reg_set = instance->reg_set;
231 
232 	cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
233 
234 	if (dual_qdepth_disable || !cur_max_fw_cmds)
235 		cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
236 	else
237 		ldio_threshold =
238 			(instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
239 
240 	dev_info(&instance->pdev->dev,
241 			"Current firmware maximum commands: %d\t LDIO threshold: %d\n",
242 			cur_max_fw_cmds, ldio_threshold);
243 
244 	if (fw_boot_context == OCR_CONTEXT) {
245 		cur_max_fw_cmds = cur_max_fw_cmds - 1;
246 		if (cur_max_fw_cmds <= instance->max_fw_cmds) {
247 			instance->cur_can_queue =
248 				cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
249 						MEGASAS_FUSION_IOCTL_CMDS);
250 			instance->host->can_queue = instance->cur_can_queue;
251 			instance->ldio_threshold = ldio_threshold;
252 		}
253 	} else {
254 		instance->max_fw_cmds = cur_max_fw_cmds;
255 		instance->ldio_threshold = ldio_threshold;
256 
257 		if (!instance->is_rdpq)
258 			instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024);
259 
260 		if (reset_devices)
261 			instance->max_fw_cmds = min(instance->max_fw_cmds,
262 						(u16)MEGASAS_KDUMP_QUEUE_DEPTH);
263 		/*
264 		* Reduce the max supported cmds by 1. This is to ensure that the
265 		* reply_q_sz (1 more than the max cmd that driver may send)
266 		* does not exceed max cmds that the FW can support
267 		*/
268 		instance->max_fw_cmds = instance->max_fw_cmds-1;
269 
270 		instance->max_scsi_cmds = instance->max_fw_cmds -
271 				(MEGASAS_FUSION_INTERNAL_CMDS +
272 				MEGASAS_FUSION_IOCTL_CMDS);
273 		instance->cur_can_queue = instance->max_scsi_cmds;
274 	}
275 }
276 /**
277  * megasas_free_cmds_fusion -	Free all the cmds in the free cmd pool
278  * @instance:		Adapter soft state
279  */
280 void
281 megasas_free_cmds_fusion(struct megasas_instance *instance)
282 {
283 	int i;
284 	struct fusion_context *fusion = instance->ctrl_context;
285 	struct megasas_cmd_fusion *cmd;
286 
287 	/* SG, Sense */
288 	for (i = 0; i < instance->max_fw_cmds; i++) {
289 		cmd = fusion->cmd_list[i];
290 		if (cmd) {
291 			if (cmd->sg_frame)
292 				pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
293 				      cmd->sg_frame_phys_addr);
294 			if (cmd->sense)
295 				pci_pool_free(fusion->sense_dma_pool, cmd->sense,
296 				      cmd->sense_phys_addr);
297 		}
298 	}
299 
300 	if (fusion->sg_dma_pool) {
301 		pci_pool_destroy(fusion->sg_dma_pool);
302 		fusion->sg_dma_pool = NULL;
303 	}
304 	if (fusion->sense_dma_pool) {
305 		pci_pool_destroy(fusion->sense_dma_pool);
306 		fusion->sense_dma_pool = NULL;
307 	}
308 
309 
310 	/* Reply Frame, Desc*/
311 	if (instance->is_rdpq)
312 		megasas_free_rdpq_fusion(instance);
313 	else
314 		megasas_free_reply_fusion(instance);
315 
316 	/* Request Frame, Desc*/
317 	if (fusion->req_frames_desc)
318 		dma_free_coherent(&instance->pdev->dev,
319 			fusion->request_alloc_sz, fusion->req_frames_desc,
320 			fusion->req_frames_desc_phys);
321 	if (fusion->io_request_frames)
322 		pci_pool_free(fusion->io_request_frames_pool,
323 			fusion->io_request_frames,
324 			fusion->io_request_frames_phys);
325 	if (fusion->io_request_frames_pool) {
326 		pci_pool_destroy(fusion->io_request_frames_pool);
327 		fusion->io_request_frames_pool = NULL;
328 	}
329 
330 
331 	/* cmd_list */
332 	for (i = 0; i < instance->max_fw_cmds; i++)
333 		kfree(fusion->cmd_list[i]);
334 
335 	kfree(fusion->cmd_list);
336 }
337 
338 /**
339  * megasas_create_sg_sense_fusion -	Creates DMA pool for cmd frames
340  * @instance:			Adapter soft state
341  *
342  */
343 static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
344 {
345 	int i;
346 	u32 max_cmd;
347 	struct fusion_context *fusion;
348 	struct megasas_cmd_fusion *cmd;
349 
350 	fusion = instance->ctrl_context;
351 	max_cmd = instance->max_fw_cmds;
352 
353 
354 	fusion->sg_dma_pool =
355 			pci_pool_create("mr_sg", instance->pdev,
356 				instance->max_chain_frame_sz, 4, 0);
357 	/* SCSI_SENSE_BUFFERSIZE  = 96 bytes */
358 	fusion->sense_dma_pool =
359 			pci_pool_create("mr_sense", instance->pdev,
360 				SCSI_SENSE_BUFFERSIZE, 64, 0);
361 
362 	if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
363 		dev_err(&instance->pdev->dev,
364 			"Failed from %s %d\n",  __func__, __LINE__);
365 		return -ENOMEM;
366 	}
367 
368 	/*
369 	 * Allocate and attach a frame to each of the commands in cmd_list
370 	 */
371 	for (i = 0; i < max_cmd; i++) {
372 		cmd = fusion->cmd_list[i];
373 		cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
374 					GFP_KERNEL, &cmd->sg_frame_phys_addr);
375 
376 		cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
377 					GFP_KERNEL, &cmd->sense_phys_addr);
378 		if (!cmd->sg_frame || !cmd->sense) {
379 			dev_err(&instance->pdev->dev,
380 				"Failed from %s %d\n",  __func__, __LINE__);
381 			return -ENOMEM;
382 		}
383 	}
384 	return 0;
385 }
386 
387 int
388 megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
389 {
390 	u32 max_cmd, i;
391 	struct fusion_context *fusion;
392 
393 	fusion = instance->ctrl_context;
394 
395 	max_cmd = instance->max_fw_cmds;
396 
397 	/*
398 	 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
399 	 * Allocate the dynamic array first and then allocate individual
400 	 * commands.
401 	 */
402 	fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd,
403 						GFP_KERNEL);
404 	if (!fusion->cmd_list) {
405 		dev_err(&instance->pdev->dev,
406 			"Failed from %s %d\n",  __func__, __LINE__);
407 		return -ENOMEM;
408 	}
409 
410 	for (i = 0; i < max_cmd; i++) {
411 		fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
412 					      GFP_KERNEL);
413 		if (!fusion->cmd_list[i]) {
414 			dev_err(&instance->pdev->dev,
415 				"Failed from %s %d\n",  __func__, __LINE__);
416 			return -ENOMEM;
417 		}
418 	}
419 	return 0;
420 }
421 int
422 megasas_alloc_request_fusion(struct megasas_instance *instance)
423 {
424 	struct fusion_context *fusion;
425 
426 	fusion = instance->ctrl_context;
427 
428 	fusion->req_frames_desc =
429 		dma_alloc_coherent(&instance->pdev->dev,
430 			fusion->request_alloc_sz,
431 			&fusion->req_frames_desc_phys, GFP_KERNEL);
432 	if (!fusion->req_frames_desc) {
433 		dev_err(&instance->pdev->dev,
434 			"Failed from %s %d\n",  __func__, __LINE__);
435 		return -ENOMEM;
436 	}
437 
438 	fusion->io_request_frames_pool =
439 			pci_pool_create("mr_ioreq", instance->pdev,
440 				fusion->io_frames_alloc_sz, 16, 0);
441 
442 	if (!fusion->io_request_frames_pool) {
443 		dev_err(&instance->pdev->dev,
444 			"Failed from %s %d\n",  __func__, __LINE__);
445 		return -ENOMEM;
446 	}
447 
448 	fusion->io_request_frames =
449 			pci_pool_alloc(fusion->io_request_frames_pool,
450 				GFP_KERNEL, &fusion->io_request_frames_phys);
451 	if (!fusion->io_request_frames) {
452 		dev_err(&instance->pdev->dev,
453 			"Failed from %s %d\n",  __func__, __LINE__);
454 		return -ENOMEM;
455 	}
456 	return 0;
457 }
458 
459 int
460 megasas_alloc_reply_fusion(struct megasas_instance *instance)
461 {
462 	int i, count;
463 	struct fusion_context *fusion;
464 	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
465 	fusion = instance->ctrl_context;
466 
467 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
468 	fusion->reply_frames_desc_pool =
469 			pci_pool_create("mr_reply", instance->pdev,
470 				fusion->reply_alloc_sz * count, 16, 0);
471 
472 	if (!fusion->reply_frames_desc_pool) {
473 		dev_err(&instance->pdev->dev,
474 			"Failed from %s %d\n",  __func__, __LINE__);
475 		return -ENOMEM;
476 	}
477 
478 	fusion->reply_frames_desc[0] =
479 		pci_pool_alloc(fusion->reply_frames_desc_pool,
480 			GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
481 	if (!fusion->reply_frames_desc[0]) {
482 		dev_err(&instance->pdev->dev,
483 			"Failed from %s %d\n",  __func__, __LINE__);
484 		return -ENOMEM;
485 	}
486 	reply_desc = fusion->reply_frames_desc[0];
487 	for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
488 		reply_desc->Words = cpu_to_le64(ULLONG_MAX);
489 
490 	/* This is not a rdpq mode, but driver still populate
491 	 * reply_frame_desc array to use same msix index in ISR path.
492 	 */
493 	for (i = 0; i < (count - 1); i++)
494 		fusion->reply_frames_desc[i + 1] =
495 			fusion->reply_frames_desc[i] +
496 			(fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION);
497 
498 	return 0;
499 }
500 
501 int
502 megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
503 {
504 	int i, j, count;
505 	struct fusion_context *fusion;
506 	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
507 
508 	fusion = instance->ctrl_context;
509 
510 	fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
511 				sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
512 				&fusion->rdpq_phys);
513 	if (!fusion->rdpq_virt) {
514 		dev_err(&instance->pdev->dev,
515 			"Failed from %s %d\n",  __func__, __LINE__);
516 		return -ENOMEM;
517 	}
518 
519 	memset(fusion->rdpq_virt, 0,
520 			sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
521 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
522 	fusion->reply_frames_desc_pool = pci_pool_create("mr_rdpq",
523 							 instance->pdev, fusion->reply_alloc_sz, 16, 0);
524 
525 	if (!fusion->reply_frames_desc_pool) {
526 		dev_err(&instance->pdev->dev,
527 			"Failed from %s %d\n",  __func__, __LINE__);
528 		return -ENOMEM;
529 	}
530 
531 	for (i = 0; i < count; i++) {
532 		fusion->reply_frames_desc[i] =
533 				pci_pool_alloc(fusion->reply_frames_desc_pool,
534 					GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
535 		if (!fusion->reply_frames_desc[i]) {
536 			dev_err(&instance->pdev->dev,
537 				"Failed from %s %d\n",  __func__, __LINE__);
538 			return -ENOMEM;
539 		}
540 
541 		fusion->rdpq_virt[i].RDPQBaseAddress =
542 			fusion->reply_frames_desc_phys[i];
543 
544 		reply_desc = fusion->reply_frames_desc[i];
545 		for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
546 			reply_desc->Words = cpu_to_le64(ULLONG_MAX);
547 	}
548 	return 0;
549 }
550 
551 static void
552 megasas_free_rdpq_fusion(struct megasas_instance *instance) {
553 
554 	int i;
555 	struct fusion_context *fusion;
556 
557 	fusion = instance->ctrl_context;
558 
559 	for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
560 		if (fusion->reply_frames_desc[i])
561 			pci_pool_free(fusion->reply_frames_desc_pool,
562 				fusion->reply_frames_desc[i],
563 				fusion->reply_frames_desc_phys[i]);
564 	}
565 
566 	if (fusion->reply_frames_desc_pool)
567 		pci_pool_destroy(fusion->reply_frames_desc_pool);
568 
569 	if (fusion->rdpq_virt)
570 		pci_free_consistent(instance->pdev,
571 			sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
572 			fusion->rdpq_virt, fusion->rdpq_phys);
573 }
574 
575 static void
576 megasas_free_reply_fusion(struct megasas_instance *instance) {
577 
578 	struct fusion_context *fusion;
579 
580 	fusion = instance->ctrl_context;
581 
582 	if (fusion->reply_frames_desc[0])
583 		pci_pool_free(fusion->reply_frames_desc_pool,
584 			fusion->reply_frames_desc[0],
585 			fusion->reply_frames_desc_phys[0]);
586 
587 	if (fusion->reply_frames_desc_pool)
588 		pci_pool_destroy(fusion->reply_frames_desc_pool);
589 
590 }
591 
592 
593 /**
594  * megasas_alloc_cmds_fusion -	Allocates the command packets
595  * @instance:		Adapter soft state
596  *
597  *
598  * Each frame has a 32-bit field called context. This context is used to get
599  * back the megasas_cmd_fusion from the frame when a frame gets completed
600  * In this driver, the 32 bit values are the indices into an array cmd_list.
601  * This array is used only to look up the megasas_cmd_fusion given the context.
602  * The free commands themselves are maintained in a linked list called cmd_pool.
603  *
604  * cmds are formed in the io_request and sg_frame members of the
605  * megasas_cmd_fusion. The context field is used to get a request descriptor
606  * and is used as SMID of the cmd.
607  * SMID value range is from 1 to max_fw_cmds.
608  */
609 int
610 megasas_alloc_cmds_fusion(struct megasas_instance *instance)
611 {
612 	int i;
613 	struct fusion_context *fusion;
614 	struct megasas_cmd_fusion *cmd;
615 	u32 offset;
616 	dma_addr_t io_req_base_phys;
617 	u8 *io_req_base;
618 
619 
620 	fusion = instance->ctrl_context;
621 
622 	if (megasas_alloc_cmdlist_fusion(instance))
623 		goto fail_exit;
624 
625 	if (megasas_alloc_request_fusion(instance))
626 		goto fail_exit;
627 
628 	if (instance->is_rdpq) {
629 		if (megasas_alloc_rdpq_fusion(instance))
630 			goto fail_exit;
631 	} else
632 		if (megasas_alloc_reply_fusion(instance))
633 			goto fail_exit;
634 
635 
636 	/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
637 	io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
638 	io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
639 
640 	/*
641 	 * Add all the commands to command pool (fusion->cmd_pool)
642 	 */
643 
644 	/* SMID 0 is reserved. Set SMID/index from 1 */
645 	for (i = 0; i < instance->max_fw_cmds; i++) {
646 		cmd = fusion->cmd_list[i];
647 		offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
648 		memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
649 		cmd->index = i + 1;
650 		cmd->scmd = NULL;
651 		cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ?
652 				(i - instance->max_scsi_cmds) :
653 				(u32)ULONG_MAX; /* Set to Invalid */
654 		cmd->instance = instance;
655 		cmd->io_request =
656 			(struct MPI2_RAID_SCSI_IO_REQUEST *)
657 		  (io_req_base + offset);
658 		memset(cmd->io_request, 0,
659 		       sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
660 		cmd->io_request_phys_addr = io_req_base_phys + offset;
661 	}
662 
663 	if (megasas_create_sg_sense_fusion(instance))
664 		goto fail_exit;
665 
666 	return 0;
667 
668 fail_exit:
669 	megasas_free_cmds_fusion(instance);
670 	return -ENOMEM;
671 }
672 
673 /**
674  * wait_and_poll -	Issues a polling command
675  * @instance:			Adapter soft state
676  * @cmd:			Command packet to be issued
677  *
678  * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
679  */
680 int
681 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
682 	int seconds)
683 {
684 	int i;
685 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
686 	struct fusion_context *fusion;
687 
688 	u32 msecs = seconds * 1000;
689 
690 	fusion = instance->ctrl_context;
691 	/*
692 	 * Wait for cmd_status to change
693 	 */
694 	for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
695 		rmb();
696 		msleep(20);
697 	}
698 
699 	if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
700 		return DCMD_TIMEOUT;
701 	else if (frame_hdr->cmd_status == MFI_STAT_OK)
702 		return DCMD_SUCCESS;
703 	else
704 		return DCMD_FAILED;
705 }
706 
707 /**
708  * megasas_ioc_init_fusion -	Initializes the FW
709  * @instance:		Adapter soft state
710  *
711  * Issues the IOC Init cmd
712  */
713 int
714 megasas_ioc_init_fusion(struct megasas_instance *instance)
715 {
716 	struct megasas_init_frame *init_frame;
717 	struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL;
718 	dma_addr_t	ioc_init_handle;
719 	struct megasas_cmd *cmd;
720 	u8 ret, cur_rdpq_mode;
721 	struct fusion_context *fusion;
722 	union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
723 	int i;
724 	struct megasas_header *frame_hdr;
725 	const char *sys_info;
726 	MFI_CAPABILITIES *drv_ops;
727 	u32 scratch_pad_2;
728 
729 	fusion = instance->ctrl_context;
730 
731 	cmd = megasas_get_cmd(instance);
732 
733 	if (!cmd) {
734 		dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
735 		ret = 1;
736 		goto fail_get_cmd;
737 	}
738 
739 	scratch_pad_2 = readl
740 		(&instance->reg_set->outbound_scratch_pad_2);
741 
742 	cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
743 
744 	if (instance->is_rdpq && !cur_rdpq_mode) {
745 		dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
746 			" from RDPQ mode to non RDPQ mode\n");
747 		ret = 1;
748 		goto fail_fw_init;
749 	}
750 
751 	instance->fw_sync_cache_support = (scratch_pad_2 &
752 		MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
753 	dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
754 		 instance->fw_sync_cache_support ? "Yes" : "No");
755 
756 	IOCInitMessage =
757 	  dma_alloc_coherent(&instance->pdev->dev,
758 			     sizeof(struct MPI2_IOC_INIT_REQUEST),
759 			     &ioc_init_handle, GFP_KERNEL);
760 
761 	if (!IOCInitMessage) {
762 		dev_err(&instance->pdev->dev, "Could not allocate memory for "
763 		       "IOCInitMessage\n");
764 		ret = 1;
765 		goto fail_fw_init;
766 	}
767 
768 	memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
769 
770 	IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
771 	IOCInitMessage->WhoInit	= MPI2_WHOINIT_HOST_DRIVER;
772 	IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
773 	IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
774 	IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
775 
776 	IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
777 	IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ?
778 			cpu_to_le64(fusion->rdpq_phys) :
779 			cpu_to_le64(fusion->reply_frames_desc_phys[0]);
780 	IOCInitMessage->MsgFlags = instance->is_rdpq ?
781 			MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
782 	IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
783 	IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
784 	init_frame = (struct megasas_init_frame *)cmd->frame;
785 	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
786 
787 	frame_hdr = &cmd->frame->hdr;
788 	frame_hdr->cmd_status = 0xFF;
789 	frame_hdr->flags = cpu_to_le16(
790 		le16_to_cpu(frame_hdr->flags) |
791 		MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
792 
793 	init_frame->cmd	= MFI_CMD_INIT;
794 	init_frame->cmd_status = 0xFF;
795 
796 	drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
797 
798 	/* driver support Extended MSIX */
799 	if (fusion->adapter_type == INVADER_SERIES)
800 		drv_ops->mfi_capabilities.support_additional_msix = 1;
801 	/* driver supports HA / Remote LUN over Fast Path interface */
802 	drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
803 
804 	drv_ops->mfi_capabilities.support_max_255lds = 1;
805 	drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1;
806 	drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1;
807 
808 	if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
809 		drv_ops->mfi_capabilities.support_ext_io_size = 1;
810 
811 	drv_ops->mfi_capabilities.support_fp_rlbypass = 1;
812 	if (!dual_qdepth_disable)
813 		drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
814 
815 	drv_ops->mfi_capabilities.support_qd_throttling = 1;
816 	/* Convert capability to LE32 */
817 	cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
818 
819 	sys_info = dmi_get_system_info(DMI_PRODUCT_UUID);
820 	if (instance->system_info_buf && sys_info) {
821 		memcpy(instance->system_info_buf->systemId, sys_info,
822 			strlen(sys_info) > 64 ? 64 : strlen(sys_info));
823 		instance->system_info_buf->systemIdLength =
824 			strlen(sys_info) > 64 ? 64 : strlen(sys_info);
825 		init_frame->system_info_lo = instance->system_info_h;
826 		init_frame->system_info_hi = 0;
827 	}
828 
829 	init_frame->queue_info_new_phys_addr_hi =
830 		cpu_to_le32(upper_32_bits(ioc_init_handle));
831 	init_frame->queue_info_new_phys_addr_lo =
832 		cpu_to_le32(lower_32_bits(ioc_init_handle));
833 	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
834 
835 	req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
836 	req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
837 	req_desc.MFAIo.RequestFlags =
838 		(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
839 		MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
840 
841 	/*
842 	 * disable the intr before firing the init frame
843 	 */
844 	instance->instancet->disable_intr(instance);
845 
846 	for (i = 0; i < (10 * 1000); i += 20) {
847 		if (readl(&instance->reg_set->doorbell) & 1)
848 			msleep(20);
849 		else
850 			break;
851 	}
852 
853 	megasas_fire_cmd_fusion(instance, &req_desc);
854 
855 	wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
856 
857 	frame_hdr = &cmd->frame->hdr;
858 	if (frame_hdr->cmd_status != 0) {
859 		ret = 1;
860 		goto fail_fw_init;
861 	}
862 	dev_info(&instance->pdev->dev, "Init cmd success\n");
863 
864 	ret = 0;
865 
866 fail_fw_init:
867 	megasas_return_cmd(instance, cmd);
868 	if (IOCInitMessage)
869 		dma_free_coherent(&instance->pdev->dev,
870 				  sizeof(struct MPI2_IOC_INIT_REQUEST),
871 				  IOCInitMessage, ioc_init_handle);
872 fail_get_cmd:
873 	return ret;
874 }
875 
876 /**
877  * megasas_sync_pd_seq_num -	JBOD SEQ MAP
878  * @instance:		Adapter soft state
879  * @pend:		set to 1, if it is pended jbod map.
880  *
881  * Issue Jbod map to the firmware. If it is pended command,
882  * issue command and return. If it is first instance of jbod map
883  * issue and receive command.
884  */
885 int
886 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
887 	int ret = 0;
888 	u32 pd_seq_map_sz;
889 	struct megasas_cmd *cmd;
890 	struct megasas_dcmd_frame *dcmd;
891 	struct fusion_context *fusion = instance->ctrl_context;
892 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
893 	dma_addr_t pd_seq_h;
894 
895 	pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
896 	pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
897 	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
898 			(sizeof(struct MR_PD_CFG_SEQ) *
899 			(MAX_PHYSICAL_DEVICES - 1));
900 
901 	cmd = megasas_get_cmd(instance);
902 	if (!cmd) {
903 		dev_err(&instance->pdev->dev,
904 			"Could not get mfi cmd. Fail from %s %d\n",
905 			__func__, __LINE__);
906 		return -ENOMEM;
907 	}
908 
909 	dcmd = &cmd->frame->dcmd;
910 
911 	memset(pd_sync, 0, pd_seq_map_sz);
912 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
913 	dcmd->cmd = MFI_CMD_DCMD;
914 	dcmd->cmd_status = 0xFF;
915 	dcmd->sge_count = 1;
916 	dcmd->timeout = 0;
917 	dcmd->pad_0 = 0;
918 	dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
919 	dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
920 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
921 	dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
922 
923 	if (pend) {
924 		dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
925 		dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
926 		instance->jbod_seq_cmd = cmd;
927 		instance->instancet->issue_dcmd(instance, cmd);
928 		return 0;
929 	}
930 
931 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
932 
933 	/* Below code is only for non pended DCMD */
934 	if (instance->ctrl_context && !instance->mask_interrupts)
935 		ret = megasas_issue_blocked_cmd(instance, cmd,
936 			MFI_IO_TIMEOUT_SECS);
937 	else
938 		ret = megasas_issue_polled(instance, cmd);
939 
940 	if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
941 		dev_warn(&instance->pdev->dev,
942 			"driver supports max %d JBOD, but FW reports %d\n",
943 			MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count));
944 		ret = -EINVAL;
945 	}
946 
947 	if (ret == DCMD_TIMEOUT && instance->ctrl_context)
948 		megaraid_sas_kill_hba(instance);
949 
950 	if (ret == DCMD_SUCCESS)
951 		instance->pd_seq_map_id++;
952 
953 	megasas_return_cmd(instance, cmd);
954 	return ret;
955 }
956 
957 /*
958  * megasas_get_ld_map_info -	Returns FW's ld_map structure
959  * @instance:				Adapter soft state
960  * @pend:				Pend the command or not
961  * Issues an internal command (DCMD) to get the FW's controller PD
962  * list structure.  This information is mainly used to find out SYSTEM
963  * supported by the FW.
964  * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
965  * dcmd.mbox.b[0]	- number of LDs being sync'd
966  * dcmd.mbox.b[1]	- 0 - complete command immediately.
967  *			- 1 - pend till config change
968  * dcmd.mbox.b[2]	- 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
969  *			- 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
970  *				uses extended struct MR_FW_RAID_MAP_EXT
971  */
972 static int
973 megasas_get_ld_map_info(struct megasas_instance *instance)
974 {
975 	int ret = 0;
976 	struct megasas_cmd *cmd;
977 	struct megasas_dcmd_frame *dcmd;
978 	void *ci;
979 	dma_addr_t ci_h = 0;
980 	u32 size_map_info;
981 	struct fusion_context *fusion;
982 
983 	cmd = megasas_get_cmd(instance);
984 
985 	if (!cmd) {
986 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
987 		return -ENOMEM;
988 	}
989 
990 	fusion = instance->ctrl_context;
991 
992 	if (!fusion) {
993 		megasas_return_cmd(instance, cmd);
994 		return -ENXIO;
995 	}
996 
997 	dcmd = &cmd->frame->dcmd;
998 
999 	size_map_info = fusion->current_map_sz;
1000 
1001 	ci = (void *) fusion->ld_map[(instance->map_id & 1)];
1002 	ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
1003 
1004 	if (!ci) {
1005 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
1006 		megasas_return_cmd(instance, cmd);
1007 		return -ENOMEM;
1008 	}
1009 
1010 	memset(ci, 0, fusion->max_map_sz);
1011 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1012 #if VD_EXT_DEBUG
1013 	dev_dbg(&instance->pdev->dev,
1014 		"%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
1015 		__func__, cpu_to_le32(size_map_info));
1016 #endif
1017 	dcmd->cmd = MFI_CMD_DCMD;
1018 	dcmd->cmd_status = 0xFF;
1019 	dcmd->sge_count = 1;
1020 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
1021 	dcmd->timeout = 0;
1022 	dcmd->pad_0 = 0;
1023 	dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1024 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1025 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
1026 	dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
1027 
1028 	if (instance->ctrl_context && !instance->mask_interrupts)
1029 		ret = megasas_issue_blocked_cmd(instance, cmd,
1030 			MFI_IO_TIMEOUT_SECS);
1031 	else
1032 		ret = megasas_issue_polled(instance, cmd);
1033 
1034 	if (ret == DCMD_TIMEOUT && instance->ctrl_context)
1035 		megaraid_sas_kill_hba(instance);
1036 
1037 	megasas_return_cmd(instance, cmd);
1038 
1039 	return ret;
1040 }
1041 
1042 u8
1043 megasas_get_map_info(struct megasas_instance *instance)
1044 {
1045 	struct fusion_context *fusion = instance->ctrl_context;
1046 
1047 	fusion->fast_path_io = 0;
1048 	if (!megasas_get_ld_map_info(instance)) {
1049 		if (MR_ValidateMapInfo(instance)) {
1050 			fusion->fast_path_io = 1;
1051 			return 0;
1052 		}
1053 	}
1054 	return 1;
1055 }
1056 
1057 /*
1058  * megasas_sync_map_info -	Returns FW's ld_map structure
1059  * @instance:				Adapter soft state
1060  *
1061  * Issues an internal command (DCMD) to get the FW's controller PD
1062  * list structure.  This information is mainly used to find out SYSTEM
1063  * supported by the FW.
1064  */
1065 int
1066 megasas_sync_map_info(struct megasas_instance *instance)
1067 {
1068 	int ret = 0, i;
1069 	struct megasas_cmd *cmd;
1070 	struct megasas_dcmd_frame *dcmd;
1071 	u32 size_sync_info, num_lds;
1072 	struct fusion_context *fusion;
1073 	struct MR_LD_TARGET_SYNC *ci = NULL;
1074 	struct MR_DRV_RAID_MAP_ALL *map;
1075 	struct MR_LD_RAID  *raid;
1076 	struct MR_LD_TARGET_SYNC *ld_sync;
1077 	dma_addr_t ci_h = 0;
1078 	u32 size_map_info;
1079 
1080 	cmd = megasas_get_cmd(instance);
1081 
1082 	if (!cmd) {
1083 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
1084 		return -ENOMEM;
1085 	}
1086 
1087 	fusion = instance->ctrl_context;
1088 
1089 	if (!fusion) {
1090 		megasas_return_cmd(instance, cmd);
1091 		return 1;
1092 	}
1093 
1094 	map = fusion->ld_drv_map[instance->map_id & 1];
1095 
1096 	num_lds = le16_to_cpu(map->raidMap.ldCount);
1097 
1098 	dcmd = &cmd->frame->dcmd;
1099 
1100 	size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
1101 
1102 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1103 
1104 	ci = (struct MR_LD_TARGET_SYNC *)
1105 	  fusion->ld_map[(instance->map_id - 1) & 1];
1106 	memset(ci, 0, fusion->max_map_sz);
1107 
1108 	ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
1109 
1110 	ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
1111 
1112 	for (i = 0; i < num_lds; i++, ld_sync++) {
1113 		raid = MR_LdRaidGet(i, map);
1114 		ld_sync->targetId = MR_GetLDTgtId(i, map);
1115 		ld_sync->seqNum = raid->seqNum;
1116 	}
1117 
1118 	size_map_info = fusion->current_map_sz;
1119 
1120 	dcmd->cmd = MFI_CMD_DCMD;
1121 	dcmd->cmd_status = 0xFF;
1122 	dcmd->sge_count = 1;
1123 	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
1124 	dcmd->timeout = 0;
1125 	dcmd->pad_0 = 0;
1126 	dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1127 	dcmd->mbox.b[0] = num_lds;
1128 	dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
1129 	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1130 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
1131 	dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
1132 
1133 	instance->map_update_cmd = cmd;
1134 
1135 	instance->instancet->issue_dcmd(instance, cmd);
1136 
1137 	return ret;
1138 }
1139 
1140 /*
1141  * meagasas_display_intel_branding - Display branding string
1142  * @instance: per adapter object
1143  *
1144  * Return nothing.
1145  */
1146 static void
1147 megasas_display_intel_branding(struct megasas_instance *instance)
1148 {
1149 	if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
1150 		return;
1151 
1152 	switch (instance->pdev->device) {
1153 	case PCI_DEVICE_ID_LSI_INVADER:
1154 		switch (instance->pdev->subsystem_device) {
1155 		case MEGARAID_INTEL_RS3DC080_SSDID:
1156 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1157 				instance->host->host_no,
1158 				MEGARAID_INTEL_RS3DC080_BRANDING);
1159 			break;
1160 		case MEGARAID_INTEL_RS3DC040_SSDID:
1161 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1162 				instance->host->host_no,
1163 				MEGARAID_INTEL_RS3DC040_BRANDING);
1164 			break;
1165 		case MEGARAID_INTEL_RS3SC008_SSDID:
1166 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1167 				instance->host->host_no,
1168 				MEGARAID_INTEL_RS3SC008_BRANDING);
1169 			break;
1170 		case MEGARAID_INTEL_RS3MC044_SSDID:
1171 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1172 				instance->host->host_no,
1173 				MEGARAID_INTEL_RS3MC044_BRANDING);
1174 			break;
1175 		default:
1176 			break;
1177 		}
1178 		break;
1179 	case PCI_DEVICE_ID_LSI_FURY:
1180 		switch (instance->pdev->subsystem_device) {
1181 		case MEGARAID_INTEL_RS3WC080_SSDID:
1182 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1183 				instance->host->host_no,
1184 				MEGARAID_INTEL_RS3WC080_BRANDING);
1185 			break;
1186 		case MEGARAID_INTEL_RS3WC040_SSDID:
1187 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1188 				instance->host->host_no,
1189 				MEGARAID_INTEL_RS3WC040_BRANDING);
1190 			break;
1191 		default:
1192 			break;
1193 		}
1194 		break;
1195 	case PCI_DEVICE_ID_LSI_CUTLASS_52:
1196 	case PCI_DEVICE_ID_LSI_CUTLASS_53:
1197 		switch (instance->pdev->subsystem_device) {
1198 		case MEGARAID_INTEL_RMS3BC160_SSDID:
1199 			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1200 				instance->host->host_no,
1201 				MEGARAID_INTEL_RMS3BC160_BRANDING);
1202 			break;
1203 		default:
1204 			break;
1205 		}
1206 		break;
1207 	default:
1208 		break;
1209 	}
1210 }
1211 
1212 /**
1213  * megasas_init_adapter_fusion -	Initializes the FW
1214  * @instance:		Adapter soft state
1215  *
1216  * This is the main function for initializing firmware.
1217  */
1218 u32
1219 megasas_init_adapter_fusion(struct megasas_instance *instance)
1220 {
1221 	struct megasas_register_set __iomem *reg_set;
1222 	struct fusion_context *fusion;
1223 	u32 max_cmd, scratch_pad_2;
1224 	int i = 0, count;
1225 
1226 	fusion = instance->ctrl_context;
1227 
1228 	reg_set = instance->reg_set;
1229 
1230 	megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
1231 
1232 	/*
1233 	 * Reduce the max supported cmds by 1. This is to ensure that the
1234 	 * reply_q_sz (1 more than the max cmd that driver may send)
1235 	 * does not exceed max cmds that the FW can support
1236 	 */
1237 	instance->max_fw_cmds = instance->max_fw_cmds-1;
1238 
1239 	/*
1240 	 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
1241 	 */
1242 	instance->max_mfi_cmds =
1243 		MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
1244 
1245 	max_cmd = instance->max_fw_cmds;
1246 
1247 	fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
1248 
1249 	fusion->request_alloc_sz =
1250 		sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
1251 	fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
1252 		*(fusion->reply_q_depth);
1253 	fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
1254 		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
1255 		 (max_cmd + 1)); /* Extra 1 for SMID 0 */
1256 
1257 	scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
1258 	/* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
1259 	 * Firmware support extended IO chain frame which is 4 times more than
1260 	 * legacy Firmware.
1261 	 * Legacy Firmware - Frame size is (8 * 128) = 1K
1262 	 * 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
1263 	 */
1264 	if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
1265 		instance->max_chain_frame_sz =
1266 			((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1267 			MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
1268 	else
1269 		instance->max_chain_frame_sz =
1270 			((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1271 			MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
1272 
1273 	if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
1274 		dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n",
1275 			instance->max_chain_frame_sz,
1276 			MEGASAS_CHAIN_FRAME_SZ_MIN);
1277 		instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN;
1278 	}
1279 
1280 	fusion->max_sge_in_main_msg =
1281 		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1282 			- offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
1283 
1284 	fusion->max_sge_in_chain =
1285 		instance->max_chain_frame_sz
1286 			/ sizeof(union MPI2_SGE_IO_UNION);
1287 
1288 	instance->max_num_sge =
1289 		rounddown_pow_of_two(fusion->max_sge_in_main_msg
1290 			+ fusion->max_sge_in_chain - 2);
1291 
1292 	/* Used for pass thru MFI frame (DCMD) */
1293 	fusion->chain_offset_mfi_pthru =
1294 		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
1295 
1296 	fusion->chain_offset_io_request =
1297 		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1298 		 sizeof(union MPI2_SGE_IO_UNION))/16;
1299 
1300 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
1301 	for (i = 0 ; i < count; i++)
1302 		fusion->last_reply_idx[i] = 0;
1303 
1304 	/*
1305 	 * For fusion adapters, 3 commands for IOCTL and 5 commands
1306 	 * for driver's internal DCMDs.
1307 	 */
1308 	instance->max_scsi_cmds = instance->max_fw_cmds -
1309 				(MEGASAS_FUSION_INTERNAL_CMDS +
1310 				MEGASAS_FUSION_IOCTL_CMDS);
1311 	sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
1312 
1313 	/*
1314 	 * Allocate memory for descriptors
1315 	 * Create a pool of commands
1316 	 */
1317 	if (megasas_alloc_cmds(instance))
1318 		goto fail_alloc_mfi_cmds;
1319 	if (megasas_alloc_cmds_fusion(instance))
1320 		goto fail_alloc_cmds;
1321 
1322 	if (megasas_ioc_init_fusion(instance))
1323 		goto fail_ioc_init;
1324 
1325 	megasas_display_intel_branding(instance);
1326 	if (megasas_get_ctrl_info(instance)) {
1327 		dev_err(&instance->pdev->dev,
1328 			"Could not get controller info. Fail from %s %d\n",
1329 			__func__, __LINE__);
1330 		goto fail_ioc_init;
1331 	}
1332 
1333 	instance->flag_ieee = 1;
1334 	fusion->fast_path_io = 0;
1335 
1336 	fusion->drv_map_pages = get_order(fusion->drv_map_sz);
1337 	for (i = 0; i < 2; i++) {
1338 		fusion->ld_map[i] = NULL;
1339 		fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
1340 			fusion->drv_map_pages);
1341 		if (!fusion->ld_drv_map[i]) {
1342 			dev_err(&instance->pdev->dev, "Could not allocate "
1343 				"memory for local map info for %d pages\n",
1344 				fusion->drv_map_pages);
1345 			if (i == 1)
1346 				free_pages((ulong)fusion->ld_drv_map[0],
1347 					fusion->drv_map_pages);
1348 			goto fail_ioc_init;
1349 		}
1350 		memset(fusion->ld_drv_map[i], 0,
1351 			((1 << PAGE_SHIFT) << fusion->drv_map_pages));
1352 	}
1353 
1354 	for (i = 0; i < 2; i++) {
1355 		fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
1356 						       fusion->max_map_sz,
1357 						       &fusion->ld_map_phys[i],
1358 						       GFP_KERNEL);
1359 		if (!fusion->ld_map[i]) {
1360 			dev_err(&instance->pdev->dev, "Could not allocate memory "
1361 			       "for map info\n");
1362 			goto fail_map_info;
1363 		}
1364 	}
1365 
1366 	if (!megasas_get_map_info(instance))
1367 		megasas_sync_map_info(instance);
1368 
1369 	return 0;
1370 
1371 fail_map_info:
1372 	if (i == 1)
1373 		dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
1374 				  fusion->ld_map[0], fusion->ld_map_phys[0]);
1375 fail_ioc_init:
1376 	megasas_free_cmds_fusion(instance);
1377 fail_alloc_cmds:
1378 	megasas_free_cmds(instance);
1379 fail_alloc_mfi_cmds:
1380 	return 1;
1381 }
1382 
1383 /**
1384  * map_cmd_status -	Maps FW cmd status to OS cmd status
1385  * @cmd :		Pointer to cmd
1386  * @status :		status of cmd returned by FW
1387  * @ext_status :	ext status of cmd returned by FW
1388  */
1389 
1390 void
1391 map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
1392 {
1393 
1394 	switch (status) {
1395 
1396 	case MFI_STAT_OK:
1397 		cmd->scmd->result = DID_OK << 16;
1398 		break;
1399 
1400 	case MFI_STAT_SCSI_IO_FAILED:
1401 	case MFI_STAT_LD_INIT_IN_PROGRESS:
1402 		cmd->scmd->result = (DID_ERROR << 16) | ext_status;
1403 		break;
1404 
1405 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1406 
1407 		cmd->scmd->result = (DID_OK << 16) | ext_status;
1408 		if (ext_status == SAM_STAT_CHECK_CONDITION) {
1409 			memset(cmd->scmd->sense_buffer, 0,
1410 			       SCSI_SENSE_BUFFERSIZE);
1411 			memcpy(cmd->scmd->sense_buffer, cmd->sense,
1412 			       SCSI_SENSE_BUFFERSIZE);
1413 			cmd->scmd->result |= DRIVER_SENSE << 24;
1414 		}
1415 		break;
1416 
1417 	case MFI_STAT_LD_OFFLINE:
1418 	case MFI_STAT_DEVICE_NOT_FOUND:
1419 		cmd->scmd->result = DID_BAD_TARGET << 16;
1420 		break;
1421 	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1422 		cmd->scmd->result = DID_IMM_RETRY << 16;
1423 		break;
1424 	default:
1425 		dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status);
1426 		cmd->scmd->result = DID_ERROR << 16;
1427 		break;
1428 	}
1429 }
1430 
1431 /**
1432  * megasas_make_sgl_fusion -	Prepares 32-bit SGL
1433  * @instance:		Adapter soft state
1434  * @scp:		SCSI command from the mid-layer
1435  * @sgl_ptr:		SGL to be filled in
1436  * @cmd:		cmd we are working on
1437  *
1438  * If successful, this function returns the number of SG elements.
1439  */
1440 static int
1441 megasas_make_sgl_fusion(struct megasas_instance *instance,
1442 			struct scsi_cmnd *scp,
1443 			struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
1444 			struct megasas_cmd_fusion *cmd)
1445 {
1446 	int i, sg_processed, sge_count;
1447 	struct scatterlist *os_sgl;
1448 	struct fusion_context *fusion;
1449 
1450 	fusion = instance->ctrl_context;
1451 
1452 	if (fusion->adapter_type == INVADER_SERIES) {
1453 		struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
1454 		sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
1455 		sgl_ptr_end->Flags = 0;
1456 	}
1457 
1458 	sge_count = scsi_dma_map(scp);
1459 
1460 	BUG_ON(sge_count < 0);
1461 
1462 	if (sge_count > instance->max_num_sge || !sge_count)
1463 		return sge_count;
1464 
1465 	scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1466 		sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
1467 		sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
1468 		sgl_ptr->Flags = 0;
1469 		if (fusion->adapter_type == INVADER_SERIES)
1470 			if (i == sge_count - 1)
1471 				sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1472 		sgl_ptr++;
1473 
1474 		sg_processed = i + 1;
1475 
1476 		if ((sg_processed ==  (fusion->max_sge_in_main_msg - 1)) &&
1477 		    (sge_count > fusion->max_sge_in_main_msg)) {
1478 
1479 			struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
1480 			if (fusion->adapter_type == INVADER_SERIES) {
1481 				if ((le16_to_cpu(cmd->io_request->IoFlags) &
1482 					MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1483 					MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1484 					cmd->io_request->ChainOffset =
1485 						fusion->
1486 						chain_offset_io_request;
1487 				else
1488 					cmd->io_request->ChainOffset = 0;
1489 			} else
1490 				cmd->io_request->ChainOffset =
1491 					fusion->chain_offset_io_request;
1492 
1493 			sg_chain = sgl_ptr;
1494 			/* Prepare chain element */
1495 			sg_chain->NextChainOffset = 0;
1496 			if (fusion->adapter_type == INVADER_SERIES)
1497 				sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1498 			else
1499 				sg_chain->Flags =
1500 					(IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1501 					 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1502 			sg_chain->Length =  cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
1503 			sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
1504 
1505 			sgl_ptr =
1506 			  (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
1507 			memset(sgl_ptr, 0, instance->max_chain_frame_sz);
1508 		}
1509 	}
1510 
1511 	return sge_count;
1512 }
1513 
1514 /**
1515  * megasas_set_pd_lba -	Sets PD LBA
1516  * @cdb:		CDB
1517  * @cdb_len:		cdb length
1518  * @start_blk:		Start block of IO
1519  *
1520  * Used to set the PD LBA in CDB for FP IOs
1521  */
1522 void
1523 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1524 		   struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
1525 		   struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
1526 {
1527 	struct MR_LD_RAID *raid;
1528 	u32 ld;
1529 	u64 start_blk = io_info->pdBlock;
1530 	u8 *cdb = io_request->CDB.CDB32;
1531 	u32 num_blocks = io_info->numBlocks;
1532 	u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
1533 
1534 	/* Check if T10 PI (DIF) is enabled for this LD */
1535 	ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
1536 	raid = MR_LdRaidGet(ld, local_map_ptr);
1537 	if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
1538 		memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1539 		cdb[0] =  MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
1540 		cdb[7] =  MEGASAS_SCSI_ADDL_CDB_LEN;
1541 
1542 		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1543 			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
1544 		else
1545 			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
1546 		cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
1547 
1548 		/* LBA */
1549 		cdb[12] = (u8)((start_blk >> 56) & 0xff);
1550 		cdb[13] = (u8)((start_blk >> 48) & 0xff);
1551 		cdb[14] = (u8)((start_blk >> 40) & 0xff);
1552 		cdb[15] = (u8)((start_blk >> 32) & 0xff);
1553 		cdb[16] = (u8)((start_blk >> 24) & 0xff);
1554 		cdb[17] = (u8)((start_blk >> 16) & 0xff);
1555 		cdb[18] = (u8)((start_blk >> 8) & 0xff);
1556 		cdb[19] = (u8)(start_blk & 0xff);
1557 
1558 		/* Logical block reference tag */
1559 		io_request->CDB.EEDP32.PrimaryReferenceTag =
1560 			cpu_to_be32(ref_tag);
1561 		io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
1562 		io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
1563 
1564 		/* Transfer length */
1565 		cdb[28] = (u8)((num_blocks >> 24) & 0xff);
1566 		cdb[29] = (u8)((num_blocks >> 16) & 0xff);
1567 		cdb[30] = (u8)((num_blocks >> 8) & 0xff);
1568 		cdb[31] = (u8)(num_blocks & 0xff);
1569 
1570 		/* set SCSI IO EEDPFlags */
1571 		if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
1572 			io_request->EEDPFlags = cpu_to_le16(
1573 				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
1574 				MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1575 				MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
1576 				MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
1577 				MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1578 		} else {
1579 			io_request->EEDPFlags = cpu_to_le16(
1580 				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1581 				MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
1582 		}
1583 		io_request->Control |= cpu_to_le32((0x4 << 26));
1584 		io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
1585 	} else {
1586 		/* Some drives don't support 16/12 byte CDB's, convert to 10 */
1587 		if (((cdb_len == 12) || (cdb_len == 16)) &&
1588 		    (start_blk <= 0xffffffff)) {
1589 			if (cdb_len == 16) {
1590 				opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
1591 				flagvals = cdb[1];
1592 				groupnum = cdb[14];
1593 				control = cdb[15];
1594 			} else {
1595 				opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
1596 				flagvals = cdb[1];
1597 				groupnum = cdb[10];
1598 				control = cdb[11];
1599 			}
1600 
1601 			memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1602 
1603 			cdb[0] = opcode;
1604 			cdb[1] = flagvals;
1605 			cdb[6] = groupnum;
1606 			cdb[9] = control;
1607 
1608 			/* Transfer length */
1609 			cdb[8] = (u8)(num_blocks & 0xff);
1610 			cdb[7] = (u8)((num_blocks >> 8) & 0xff);
1611 
1612 			io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
1613 			cdb_len = 10;
1614 		} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
1615 			/* Convert to 16 byte CDB for large LBA's */
1616 			switch (cdb_len) {
1617 			case 6:
1618 				opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
1619 				control = cdb[5];
1620 				break;
1621 			case 10:
1622 				opcode =
1623 					cdb[0] == READ_10 ? READ_16 : WRITE_16;
1624 				flagvals = cdb[1];
1625 				groupnum = cdb[6];
1626 				control = cdb[9];
1627 				break;
1628 			case 12:
1629 				opcode =
1630 					cdb[0] == READ_12 ? READ_16 : WRITE_16;
1631 				flagvals = cdb[1];
1632 				groupnum = cdb[10];
1633 				control = cdb[11];
1634 				break;
1635 			}
1636 
1637 			memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1638 
1639 			cdb[0] = opcode;
1640 			cdb[1] = flagvals;
1641 			cdb[14] = groupnum;
1642 			cdb[15] = control;
1643 
1644 			/* Transfer length */
1645 			cdb[13] = (u8)(num_blocks & 0xff);
1646 			cdb[12] = (u8)((num_blocks >> 8) & 0xff);
1647 			cdb[11] = (u8)((num_blocks >> 16) & 0xff);
1648 			cdb[10] = (u8)((num_blocks >> 24) & 0xff);
1649 
1650 			io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
1651 			cdb_len = 16;
1652 		}
1653 
1654 		/* Normal case, just load LBA here */
1655 		switch (cdb_len) {
1656 		case 6:
1657 		{
1658 			u8 val = cdb[1] & 0xE0;
1659 			cdb[3] = (u8)(start_blk & 0xff);
1660 			cdb[2] = (u8)((start_blk >> 8) & 0xff);
1661 			cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
1662 			break;
1663 		}
1664 		case 10:
1665 			cdb[5] = (u8)(start_blk & 0xff);
1666 			cdb[4] = (u8)((start_blk >> 8) & 0xff);
1667 			cdb[3] = (u8)((start_blk >> 16) & 0xff);
1668 			cdb[2] = (u8)((start_blk >> 24) & 0xff);
1669 			break;
1670 		case 12:
1671 			cdb[5]    = (u8)(start_blk & 0xff);
1672 			cdb[4]    = (u8)((start_blk >> 8) & 0xff);
1673 			cdb[3]    = (u8)((start_blk >> 16) & 0xff);
1674 			cdb[2]    = (u8)((start_blk >> 24) & 0xff);
1675 			break;
1676 		case 16:
1677 			cdb[9]    = (u8)(start_blk & 0xff);
1678 			cdb[8]    = (u8)((start_blk >> 8) & 0xff);
1679 			cdb[7]    = (u8)((start_blk >> 16) & 0xff);
1680 			cdb[6]    = (u8)((start_blk >> 24) & 0xff);
1681 			cdb[5]    = (u8)((start_blk >> 32) & 0xff);
1682 			cdb[4]    = (u8)((start_blk >> 40) & 0xff);
1683 			cdb[3]    = (u8)((start_blk >> 48) & 0xff);
1684 			cdb[2]    = (u8)((start_blk >> 56) & 0xff);
1685 			break;
1686 		}
1687 	}
1688 }
1689 
1690 /**
1691  * megasas_build_ldio_fusion -	Prepares IOs to devices
1692  * @instance:		Adapter soft state
1693  * @scp:		SCSI command
1694  * @cmd:		Command to be prepared
1695  *
1696  * Prepares the io_request and chain elements (sg_frame) for IO
1697  * The IO can be for PD (Fast Path) or LD
1698  */
1699 void
1700 megasas_build_ldio_fusion(struct megasas_instance *instance,
1701 			  struct scsi_cmnd *scp,
1702 			  struct megasas_cmd_fusion *cmd)
1703 {
1704 	u8 fp_possible;
1705 	u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
1706 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1707 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1708 	struct IO_REQUEST_INFO io_info;
1709 	struct fusion_context *fusion;
1710 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1711 	u8 *raidLUN;
1712 
1713 	device_id = MEGASAS_DEV_INDEX(scp);
1714 
1715 	fusion = instance->ctrl_context;
1716 
1717 	io_request = cmd->io_request;
1718 	io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
1719 	io_request->RaidContext.status = 0;
1720 	io_request->RaidContext.exStatus = 0;
1721 
1722 	req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
1723 
1724 	start_lba_lo = 0;
1725 	start_lba_hi = 0;
1726 	fp_possible = 0;
1727 
1728 	/*
1729 	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1730 	 */
1731 	if (scp->cmd_len == 6) {
1732 		datalength = (u32) scp->cmnd[4];
1733 		start_lba_lo = ((u32) scp->cmnd[1] << 16) |
1734 			((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
1735 
1736 		start_lba_lo &= 0x1FFFFF;
1737 	}
1738 
1739 	/*
1740 	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1741 	 */
1742 	else if (scp->cmd_len == 10) {
1743 		datalength = (u32) scp->cmnd[8] |
1744 			((u32) scp->cmnd[7] << 8);
1745 		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1746 			((u32) scp->cmnd[3] << 16) |
1747 			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1748 	}
1749 
1750 	/*
1751 	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1752 	 */
1753 	else if (scp->cmd_len == 12) {
1754 		datalength = ((u32) scp->cmnd[6] << 24) |
1755 			((u32) scp->cmnd[7] << 16) |
1756 			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1757 		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
1758 			((u32) scp->cmnd[3] << 16) |
1759 			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1760 	}
1761 
1762 	/*
1763 	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1764 	 */
1765 	else if (scp->cmd_len == 16) {
1766 		datalength = ((u32) scp->cmnd[10] << 24) |
1767 			((u32) scp->cmnd[11] << 16) |
1768 			((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
1769 		start_lba_lo = ((u32) scp->cmnd[6] << 24) |
1770 			((u32) scp->cmnd[7] << 16) |
1771 			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
1772 
1773 		start_lba_hi = ((u32) scp->cmnd[2] << 24) |
1774 			((u32) scp->cmnd[3] << 16) |
1775 			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
1776 	}
1777 
1778 	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
1779 	io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
1780 	io_info.numBlocks = datalength;
1781 	io_info.ldTgtId = device_id;
1782 	io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
1783 
1784 	if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1785 		io_info.isRead = 1;
1786 
1787 	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1788 
1789 	if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
1790 		instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
1791 		io_request->RaidContext.regLockFlags  = 0;
1792 		fp_possible = 0;
1793 	} else {
1794 		if (MR_BuildRaidContext(instance, &io_info,
1795 					&io_request->RaidContext,
1796 					local_map_ptr, &raidLUN))
1797 			fp_possible = io_info.fpOkForIo;
1798 	}
1799 
1800 	/* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
1801 	   id by default, not CPU group id, otherwise all MSI-X queues won't
1802 	   be utilized */
1803 	cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
1804 		raw_smp_processor_id() % instance->msix_vectors : 0;
1805 
1806 	if (fp_possible) {
1807 		megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
1808 				   local_map_ptr, start_lba_lo);
1809 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1810 		cmd->request_desc->SCSIIO.RequestFlags =
1811 			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
1812 			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1813 		if (fusion->adapter_type == INVADER_SERIES) {
1814 			if (io_request->RaidContext.regLockFlags ==
1815 			    REGION_TYPE_UNUSED)
1816 				cmd->request_desc->SCSIIO.RequestFlags =
1817 					(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1818 					MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1819 			io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1820 			io_request->RaidContext.nseg = 0x1;
1821 			io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1822 			io_request->RaidContext.regLockFlags |=
1823 			  (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1824 			   MR_RL_FLAGS_SEQ_NUM_ENABLE);
1825 		}
1826 		if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
1827 		    (io_info.isRead)) {
1828 			io_info.devHandle =
1829 				get_updated_dev_handle(instance,
1830 					&fusion->load_balance_info[device_id],
1831 					&io_info);
1832 			scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
1833 			cmd->pd_r1_lb = io_info.pd_after_lb;
1834 		} else
1835 			scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
1836 
1837 		if ((raidLUN[0] == 1) &&
1838 			(local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
1839 			instance->dev_handle = !(instance->dev_handle);
1840 			io_info.devHandle =
1841 				local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle];
1842 		}
1843 
1844 		cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1845 		io_request->DevHandle = io_info.devHandle;
1846 		/* populate the LUN field */
1847 		memcpy(io_request->LUN, raidLUN, 8);
1848 	} else {
1849 		io_request->RaidContext.timeoutValue =
1850 			cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1851 		cmd->request_desc->SCSIIO.RequestFlags =
1852 			(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
1853 			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1854 		if (fusion->adapter_type == INVADER_SERIES) {
1855 			if (io_info.do_fp_rlbypass ||
1856 				(io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED))
1857 				cmd->request_desc->SCSIIO.RequestFlags =
1858 					(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1859 					MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1860 			io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1861 			io_request->RaidContext.regLockFlags |=
1862 				(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1863 				 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1864 			io_request->RaidContext.nseg = 0x1;
1865 		}
1866 		io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1867 		io_request->DevHandle = cpu_to_le16(device_id);
1868 	} /* Not FP */
1869 }
1870 
1871 /**
1872  * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
1873  * @instance:		Adapter soft state
1874  * @scp:		SCSI command
1875  * @cmd:		Command to be prepared
1876  *
1877  * Prepares the io_request frame for non-rw io cmds for vd.
1878  */
1879 static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
1880 			  struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd)
1881 {
1882 	u32 device_id;
1883 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1884 	u16 pd_index = 0;
1885 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1886 	struct fusion_context *fusion = instance->ctrl_context;
1887 	u8                          span, physArm;
1888 	__le16                      devHandle;
1889 	u32                         ld, arRef, pd;
1890 	struct MR_LD_RAID                  *raid;
1891 	struct RAID_CONTEXT                *pRAID_Context;
1892 	u8 fp_possible = 1;
1893 
1894 	io_request = cmd->io_request;
1895 	device_id = MEGASAS_DEV_INDEX(scmd);
1896 	pd_index = MEGASAS_PD_INDEX(scmd);
1897 	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1898 	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1899 	/* get RAID_Context pointer */
1900 	pRAID_Context = &io_request->RaidContext;
1901 	/* Check with FW team */
1902 	pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
1903 	pRAID_Context->regLockRowLBA    = 0;
1904 	pRAID_Context->regLockLength    = 0;
1905 
1906 	if (fusion->fast_path_io && (
1907 		device_id < instance->fw_supported_vd_count)) {
1908 
1909 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1910 		if (ld >= instance->fw_supported_vd_count)
1911 			fp_possible = 0;
1912 
1913 		raid = MR_LdRaidGet(ld, local_map_ptr);
1914 		if (!(raid->capability.fpNonRWCapable))
1915 			fp_possible = 0;
1916 	} else
1917 		fp_possible = 0;
1918 
1919 	if (!fp_possible) {
1920 		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1921 		io_request->DevHandle = cpu_to_le16(device_id);
1922 		io_request->LUN[1] = scmd->device->lun;
1923 		pRAID_Context->timeoutValue =
1924 			cpu_to_le16 (scmd->request->timeout / HZ);
1925 		cmd->request_desc->SCSIIO.RequestFlags =
1926 			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1927 			MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1928 	} else {
1929 
1930 		/* set RAID context values */
1931 		pRAID_Context->configSeqNum = raid->seqNum;
1932 		pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
1933 		pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
1934 
1935 		/* get the DevHandle for the PD (since this is
1936 		   fpNonRWCapable, this is a single disk RAID0) */
1937 		span = physArm = 0;
1938 		arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
1939 		pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
1940 		devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
1941 
1942 		/* build request descriptor */
1943 		cmd->request_desc->SCSIIO.RequestFlags =
1944 			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1945 			MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1946 		cmd->request_desc->SCSIIO.DevHandle = devHandle;
1947 
1948 		/* populate the LUN field */
1949 		memcpy(io_request->LUN, raid->LUN, 8);
1950 
1951 		/* build the raidScsiIO structure */
1952 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1953 		io_request->DevHandle = devHandle;
1954 	}
1955 }
1956 
1957 /**
1958  * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
1959  * @instance:		Adapter soft state
1960  * @scp:		SCSI command
1961  * @cmd:		Command to be prepared
1962  * @fp_possible:	parameter to detect fast path or firmware path io.
1963  *
1964  * Prepares the io_request frame for rw/non-rw io cmds for syspds
1965  */
1966 static void
1967 megasas_build_syspd_fusion(struct megasas_instance *instance,
1968 	struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
1969 {
1970 	u32 device_id;
1971 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1972 	u16 pd_index = 0;
1973 	u16 os_timeout_value;
1974 	u16 timeout_limit;
1975 	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1976 	struct RAID_CONTEXT	*pRAID_Context;
1977 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1978 	struct fusion_context *fusion = instance->ctrl_context;
1979 	pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
1980 
1981 	device_id = MEGASAS_DEV_INDEX(scmd);
1982 	pd_index = MEGASAS_PD_INDEX(scmd);
1983 	os_timeout_value = scmd->request->timeout / HZ;
1984 
1985 	io_request = cmd->io_request;
1986 	/* get RAID_Context pointer */
1987 	pRAID_Context = &io_request->RaidContext;
1988 	pRAID_Context->regLockFlags = 0;
1989 	pRAID_Context->regLockRowLBA = 0;
1990 	pRAID_Context->regLockLength = 0;
1991 	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1992 	io_request->LUN[1] = scmd->device->lun;
1993 	pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1994 		<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1995 
1996 	/* If FW supports PD sequence number */
1997 	if (instance->use_seqnum_jbod_fp &&
1998 		instance->pd_list[pd_index].driveType == TYPE_DISK) {
1999 		/* TgtId must be incremented by 255 as jbod seq number is index
2000 		 * below raid map
2001 		 */
2002 		pRAID_Context->VirtualDiskTgtId =
2003 			cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
2004 		pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum;
2005 		io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
2006 		pRAID_Context->regLockFlags |=
2007 			(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
2008 		pRAID_Context->Type = MPI2_TYPE_CUDA;
2009 		pRAID_Context->nseg = 0x1;
2010 	} else if (fusion->fast_path_io) {
2011 		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
2012 		pRAID_Context->configSeqNum = 0;
2013 		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
2014 		io_request->DevHandle =
2015 			local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
2016 	} else {
2017 		/* Want to send all IO via FW path */
2018 		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
2019 		pRAID_Context->configSeqNum = 0;
2020 		io_request->DevHandle = cpu_to_le16(0xFFFF);
2021 	}
2022 
2023 	cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
2024 	cmd->request_desc->SCSIIO.MSIxIndex =
2025 		instance->msix_vectors ?
2026 		(raw_smp_processor_id() % instance->msix_vectors) : 0;
2027 
2028 
2029 	if (!fp_possible) {
2030 		/* system pd firmware path */
2031 		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
2032 		cmd->request_desc->SCSIIO.RequestFlags =
2033 			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2034 				MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2035 		pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
2036 		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
2037 	} else {
2038 		/* system pd Fast Path */
2039 		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2040 		timeout_limit = (scmd->device->type == TYPE_DISK) ?
2041 				255 : 0xFFFF;
2042 		pRAID_Context->timeoutValue =
2043 			cpu_to_le16((os_timeout_value > timeout_limit) ?
2044 			timeout_limit : os_timeout_value);
2045 		if (fusion->adapter_type == INVADER_SERIES)
2046 			io_request->IoFlags |=
2047 				cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2048 
2049 		cmd->request_desc->SCSIIO.RequestFlags =
2050 			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
2051 				MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2052 	}
2053 }
2054 
2055 /**
2056  * megasas_build_io_fusion -	Prepares IOs to devices
2057  * @instance:		Adapter soft state
2058  * @scp:		SCSI command
2059  * @cmd:		Command to be prepared
2060  *
2061  * Invokes helper functions to prepare request frames
2062  * and sets flags appropriate for IO/Non-IO cmd
2063  */
2064 int
2065 megasas_build_io_fusion(struct megasas_instance *instance,
2066 			struct scsi_cmnd *scp,
2067 			struct megasas_cmd_fusion *cmd)
2068 {
2069 	u16 sge_count;
2070 	u8  cmd_type;
2071 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
2072 
2073 	/* Zero out some fields so they don't get reused */
2074 	memset(io_request->LUN, 0x0, 8);
2075 	io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
2076 	io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
2077 	io_request->EEDPFlags = 0;
2078 	io_request->Control = 0;
2079 	io_request->EEDPBlockSize = 0;
2080 	io_request->ChainOffset = 0;
2081 	io_request->RaidContext.RAIDFlags = 0;
2082 	io_request->RaidContext.Type = 0;
2083 	io_request->RaidContext.nseg = 0;
2084 
2085 	memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
2086 	/*
2087 	 * Just the CDB length,rest of the Flags are zero
2088 	 * This will be modified for FP in build_ldio_fusion
2089 	 */
2090 	io_request->IoFlags = cpu_to_le16(scp->cmd_len);
2091 
2092 	switch (cmd_type = megasas_cmd_type(scp)) {
2093 	case READ_WRITE_LDIO:
2094 		megasas_build_ldio_fusion(instance, scp, cmd);
2095 		break;
2096 	case NON_READ_WRITE_LDIO:
2097 		megasas_build_ld_nonrw_fusion(instance, scp, cmd);
2098 		break;
2099 	case READ_WRITE_SYSPDIO:
2100 	case NON_READ_WRITE_SYSPDIO:
2101 		if (instance->secure_jbod_support &&
2102 			(cmd_type == NON_READ_WRITE_SYSPDIO))
2103 			megasas_build_syspd_fusion(instance, scp, cmd, 0);
2104 		else
2105 			megasas_build_syspd_fusion(instance, scp, cmd, 1);
2106 		break;
2107 	default:
2108 		break;
2109 	}
2110 
2111 	/*
2112 	 * Construct SGL
2113 	 */
2114 
2115 	sge_count =
2116 		megasas_make_sgl_fusion(instance, scp,
2117 					(struct MPI25_IEEE_SGE_CHAIN64 *)
2118 					&io_request->SGL, cmd);
2119 
2120 	if (sge_count > instance->max_num_sge) {
2121 		dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds "
2122 		       "max (0x%x) allowed\n", sge_count,
2123 		       instance->max_num_sge);
2124 		return 1;
2125 	}
2126 
2127 	/* numSGE store lower 8 bit of sge_count.
2128 	 * numSGEExt store higher 8 bit of sge_count
2129 	 */
2130 	io_request->RaidContext.numSGE = sge_count;
2131 	io_request->RaidContext.numSGEExt = (u8)(sge_count >> 8);
2132 
2133 	io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
2134 
2135 	if (scp->sc_data_direction == PCI_DMA_TODEVICE)
2136 		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
2137 	else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
2138 		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
2139 
2140 	io_request->SGLOffset0 =
2141 		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
2142 
2143 	io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
2144 	io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
2145 
2146 	cmd->scmd = scp;
2147 	scp->SCp.ptr = (char *)cmd;
2148 
2149 	return 0;
2150 }
2151 
2152 union MEGASAS_REQUEST_DESCRIPTOR_UNION *
2153 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
2154 {
2155 	u8 *p;
2156 	struct fusion_context *fusion;
2157 
2158 	if (index >= instance->max_fw_cmds) {
2159 		dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
2160 		       "descriptor for scsi%d\n", index,
2161 			instance->host->host_no);
2162 		return NULL;
2163 	}
2164 	fusion = instance->ctrl_context;
2165 	p = fusion->req_frames_desc
2166 		+sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
2167 
2168 	return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
2169 }
2170 
2171 /**
2172  * megasas_build_and_issue_cmd_fusion -Main routine for building and
2173  *                                     issuing non IOCTL cmd
2174  * @instance:			Adapter soft state
2175  * @scmd:			pointer to scsi cmd from OS
2176  */
2177 static u32
2178 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
2179 				   struct scsi_cmnd *scmd)
2180 {
2181 	struct megasas_cmd_fusion *cmd;
2182 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2183 	u32 index;
2184 	struct fusion_context *fusion;
2185 
2186 	fusion = instance->ctrl_context;
2187 
2188 	if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
2189 		instance->ldio_threshold &&
2190 		(atomic_inc_return(&instance->ldio_outstanding) >
2191 		instance->ldio_threshold)) {
2192 		atomic_dec(&instance->ldio_outstanding);
2193 		return SCSI_MLQUEUE_DEVICE_BUSY;
2194 	}
2195 
2196 	cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
2197 
2198 	index = cmd->index;
2199 
2200 	req_desc = megasas_get_request_descriptor(instance, index-1);
2201 	if (!req_desc)
2202 		return SCSI_MLQUEUE_HOST_BUSY;
2203 
2204 	req_desc->Words = 0;
2205 	cmd->request_desc = req_desc;
2206 
2207 	if (megasas_build_io_fusion(instance, scmd, cmd)) {
2208 		megasas_return_cmd_fusion(instance, cmd);
2209 		dev_err(&instance->pdev->dev, "Error building command\n");
2210 		cmd->request_desc = NULL;
2211 		return SCSI_MLQUEUE_HOST_BUSY;
2212 	}
2213 
2214 	req_desc = cmd->request_desc;
2215 	req_desc->SCSIIO.SMID = cpu_to_le16(index);
2216 
2217 	if (cmd->io_request->ChainOffset != 0 &&
2218 	    cmd->io_request->ChainOffset != 0xF)
2219 		dev_err(&instance->pdev->dev, "The chain offset value is not "
2220 		       "correct : %x\n", cmd->io_request->ChainOffset);
2221 
2222 	/*
2223 	 * Issue the command to the FW
2224 	 */
2225 	atomic_inc(&instance->fw_outstanding);
2226 
2227 	megasas_fire_cmd_fusion(instance, req_desc);
2228 
2229 	return 0;
2230 }
2231 
2232 /**
2233  * complete_cmd_fusion -	Completes command
2234  * @instance:			Adapter soft state
2235  * Completes all commands that is in reply descriptor queue
2236  */
2237 int
2238 complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2239 {
2240 	union MPI2_REPLY_DESCRIPTORS_UNION *desc;
2241 	struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
2242 	struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
2243 	struct fusion_context *fusion;
2244 	struct megasas_cmd *cmd_mfi;
2245 	struct megasas_cmd_fusion *cmd_fusion;
2246 	u16 smid, num_completed;
2247 	u8 reply_descript_type;
2248 	u32 status, extStatus, device_id;
2249 	union desc_value d_val;
2250 	struct LD_LOAD_BALANCE_INFO *lbinfo;
2251 	int threshold_reply_count = 0;
2252 	struct scsi_cmnd *scmd_local = NULL;
2253 	struct MR_TASK_MANAGE_REQUEST *mr_tm_req;
2254 	struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
2255 
2256 	fusion = instance->ctrl_context;
2257 
2258 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2259 		return IRQ_HANDLED;
2260 
2261 	desc = fusion->reply_frames_desc[MSIxIndex] +
2262 				fusion->last_reply_idx[MSIxIndex];
2263 
2264 	reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2265 
2266 	d_val.word = desc->Words;
2267 
2268 	reply_descript_type = reply_desc->ReplyFlags &
2269 		MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2270 
2271 	if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2272 		return IRQ_NONE;
2273 
2274 	num_completed = 0;
2275 
2276 	while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
2277 	       d_val.u.high != cpu_to_le32(UINT_MAX)) {
2278 		smid = le16_to_cpu(reply_desc->SMID);
2279 
2280 		cmd_fusion = fusion->cmd_list[smid - 1];
2281 
2282 		scsi_io_req =
2283 			(struct MPI2_RAID_SCSI_IO_REQUEST *)
2284 		  cmd_fusion->io_request;
2285 
2286 		if (cmd_fusion->scmd)
2287 			cmd_fusion->scmd->SCp.ptr = NULL;
2288 
2289 		scmd_local = cmd_fusion->scmd;
2290 		status = scsi_io_req->RaidContext.status;
2291 		extStatus = scsi_io_req->RaidContext.exStatus;
2292 
2293 		switch (scsi_io_req->Function) {
2294 		case MPI2_FUNCTION_SCSI_TASK_MGMT:
2295 			mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *)
2296 						cmd_fusion->io_request;
2297 			mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *)
2298 						&mr_tm_req->TmRequest;
2299 			dev_dbg(&instance->pdev->dev, "TM completion:"
2300 				"type: 0x%x TaskMID: 0x%x\n",
2301 				mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
2302 			complete(&cmd_fusion->done);
2303 			break;
2304 		case MPI2_FUNCTION_SCSI_IO_REQUEST:  /*Fast Path IO.*/
2305 			/* Update load balancing info */
2306 			device_id = MEGASAS_DEV_INDEX(scmd_local);
2307 			lbinfo = &fusion->load_balance_info[device_id];
2308 			if (cmd_fusion->scmd->SCp.Status &
2309 			    MEGASAS_LOAD_BALANCE_FLAG) {
2310 				atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
2311 				cmd_fusion->scmd->SCp.Status &=
2312 					~MEGASAS_LOAD_BALANCE_FLAG;
2313 			}
2314 			if (reply_descript_type ==
2315 			    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
2316 				if (megasas_dbg_lvl == 5)
2317 					dev_err(&instance->pdev->dev, "\nFAST Path "
2318 					       "IO Success\n");
2319 			}
2320 			/* Fall thru and complete IO */
2321 		case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
2322 			/* Map the FW Cmd Status */
2323 			map_cmd_status(cmd_fusion, status, extStatus);
2324 			scsi_io_req->RaidContext.status = 0;
2325 			scsi_io_req->RaidContext.exStatus = 0;
2326 			if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
2327 				atomic_dec(&instance->ldio_outstanding);
2328 			megasas_return_cmd_fusion(instance, cmd_fusion);
2329 			scsi_dma_unmap(scmd_local);
2330 			scmd_local->scsi_done(scmd_local);
2331 			atomic_dec(&instance->fw_outstanding);
2332 
2333 			break;
2334 		case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
2335 			cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2336 
2337 			/* Poll mode. Dummy free.
2338 			 * In case of Interrupt mode, caller has reverse check.
2339 			 */
2340 			if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
2341 				cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
2342 				megasas_return_cmd(instance, cmd_mfi);
2343 			} else
2344 				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2345 			break;
2346 		}
2347 
2348 		fusion->last_reply_idx[MSIxIndex]++;
2349 		if (fusion->last_reply_idx[MSIxIndex] >=
2350 		    fusion->reply_q_depth)
2351 			fusion->last_reply_idx[MSIxIndex] = 0;
2352 
2353 		desc->Words = cpu_to_le64(ULLONG_MAX);
2354 		num_completed++;
2355 		threshold_reply_count++;
2356 
2357 		/* Get the next reply descriptor */
2358 		if (!fusion->last_reply_idx[MSIxIndex])
2359 			desc = fusion->reply_frames_desc[MSIxIndex];
2360 		else
2361 			desc++;
2362 
2363 		reply_desc =
2364 		  (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2365 
2366 		d_val.word = desc->Words;
2367 
2368 		reply_descript_type = reply_desc->ReplyFlags &
2369 			MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2370 
2371 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2372 			break;
2373 		/*
2374 		 * Write to reply post host index register after completing threshold
2375 		 * number of reply counts and still there are more replies in reply queue
2376 		 * pending to be completed
2377 		 */
2378 		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
2379 			if (fusion->adapter_type == INVADER_SERIES)
2380 				writel(((MSIxIndex & 0x7) << 24) |
2381 					fusion->last_reply_idx[MSIxIndex],
2382 					instance->reply_post_host_index_addr[MSIxIndex/8]);
2383 			else
2384 				writel((MSIxIndex << 24) |
2385 					fusion->last_reply_idx[MSIxIndex],
2386 					instance->reply_post_host_index_addr[0]);
2387 			threshold_reply_count = 0;
2388 		}
2389 	}
2390 
2391 	if (!num_completed)
2392 		return IRQ_NONE;
2393 
2394 	wmb();
2395 	if (fusion->adapter_type == INVADER_SERIES)
2396 		writel(((MSIxIndex & 0x7) << 24) |
2397 			fusion->last_reply_idx[MSIxIndex],
2398 			instance->reply_post_host_index_addr[MSIxIndex/8]);
2399 	else
2400 		writel((MSIxIndex << 24) |
2401 			fusion->last_reply_idx[MSIxIndex],
2402 			instance->reply_post_host_index_addr[0]);
2403 	megasas_check_and_restore_queue_depth(instance);
2404 	return IRQ_HANDLED;
2405 }
2406 
2407 /**
2408  * megasas_complete_cmd_dpc_fusion -	Completes command
2409  * @instance:			Adapter soft state
2410  *
2411  * Tasklet to complete cmds
2412  */
2413 void
2414 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
2415 {
2416 	struct megasas_instance *instance =
2417 		(struct megasas_instance *)instance_addr;
2418 	unsigned long flags;
2419 	u32 count, MSIxIndex;
2420 
2421 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
2422 
2423 	/* If we have already declared adapter dead, donot complete cmds */
2424 	spin_lock_irqsave(&instance->hba_lock, flags);
2425 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2426 		spin_unlock_irqrestore(&instance->hba_lock, flags);
2427 		return;
2428 	}
2429 	spin_unlock_irqrestore(&instance->hba_lock, flags);
2430 
2431 	for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
2432 		complete_cmd_fusion(instance, MSIxIndex);
2433 }
2434 
2435 /**
2436  * megasas_isr_fusion - isr entry point
2437  */
2438 irqreturn_t megasas_isr_fusion(int irq, void *devp)
2439 {
2440 	struct megasas_irq_context *irq_context = devp;
2441 	struct megasas_instance *instance = irq_context->instance;
2442 	u32 mfiStatus, fw_state, dma_state;
2443 
2444 	if (instance->mask_interrupts)
2445 		return IRQ_NONE;
2446 
2447 	if (!instance->msix_vectors) {
2448 		mfiStatus = instance->instancet->clear_intr(instance->reg_set);
2449 		if (!mfiStatus)
2450 			return IRQ_NONE;
2451 	}
2452 
2453 	/* If we are resetting, bail */
2454 	if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
2455 		instance->instancet->clear_intr(instance->reg_set);
2456 		return IRQ_HANDLED;
2457 	}
2458 
2459 	if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
2460 		instance->instancet->clear_intr(instance->reg_set);
2461 		/* If we didn't complete any commands, check for FW fault */
2462 		fw_state = instance->instancet->read_fw_status_reg(
2463 			instance->reg_set) & MFI_STATE_MASK;
2464 		dma_state = instance->instancet->read_fw_status_reg
2465 			(instance->reg_set) & MFI_STATE_DMADONE;
2466 		if (instance->crash_dump_drv_support &&
2467 			instance->crash_dump_app_support) {
2468 			/* Start collecting crash, if DMA bit is done */
2469 			if ((fw_state == MFI_STATE_FAULT) && dma_state)
2470 				schedule_work(&instance->crash_init);
2471 			else if (fw_state == MFI_STATE_FAULT) {
2472 				if (instance->unload == 0)
2473 					schedule_work(&instance->work_init);
2474 			}
2475 		} else if (fw_state == MFI_STATE_FAULT) {
2476 			dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
2477 			       "for scsi%d\n", instance->host->host_no);
2478 			if (instance->unload == 0)
2479 				schedule_work(&instance->work_init);
2480 		}
2481 	}
2482 
2483 	return IRQ_HANDLED;
2484 }
2485 
2486 /**
2487  * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
2488  * @instance:			Adapter soft state
2489  * mfi_cmd:			megasas_cmd pointer
2490  *
2491  */
2492 u8
2493 build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2494 			struct megasas_cmd *mfi_cmd)
2495 {
2496 	struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2497 	struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
2498 	struct megasas_cmd_fusion *cmd;
2499 	struct fusion_context *fusion;
2500 	struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
2501 
2502 	fusion = instance->ctrl_context;
2503 
2504 	cmd = megasas_get_cmd_fusion(instance,
2505 			instance->max_scsi_cmds + mfi_cmd->index);
2506 
2507 	/*  Save the smid. To be used for returning the cmd */
2508 	mfi_cmd->context.smid = cmd->index;
2509 
2510 	/*
2511 	 * For cmds where the flag is set, store the flag and check
2512 	 * on completion. For cmds with this flag, don't call
2513 	 * megasas_complete_cmd
2514 	 */
2515 
2516 	if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
2517 		mfi_cmd->flags |= DRV_DCMD_POLLED_MODE;
2518 
2519 	io_req = cmd->io_request;
2520 
2521 	if (fusion->adapter_type == INVADER_SERIES) {
2522 		struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
2523 			(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
2524 		sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
2525 		sgl_ptr_end->Flags = 0;
2526 	}
2527 
2528 	mpi25_ieee_chain =
2529 	  (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2530 
2531 	io_req->Function    = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2532 	io_req->SGLOffset0  = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
2533 				       SGL) / 4;
2534 	io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
2535 
2536 	mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
2537 
2538 	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2539 		MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2540 
2541 	mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz);
2542 
2543 	return 0;
2544 }
2545 
2546 /**
2547  * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
2548  * @instance:			Adapter soft state
2549  * @cmd:			mfi cmd to build
2550  *
2551  */
2552 union MEGASAS_REQUEST_DESCRIPTOR_UNION *
2553 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
2554 {
2555 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2556 	u16 index;
2557 
2558 	if (build_mpt_mfi_pass_thru(instance, cmd)) {
2559 		dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
2560 		return NULL;
2561 	}
2562 
2563 	index = cmd->context.smid;
2564 
2565 	req_desc = megasas_get_request_descriptor(instance, index - 1);
2566 
2567 	if (!req_desc)
2568 		return NULL;
2569 
2570 	req_desc->Words = 0;
2571 	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2572 					 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2573 
2574 	req_desc->SCSIIO.SMID = cpu_to_le16(index);
2575 
2576 	return req_desc;
2577 }
2578 
2579 /**
2580  * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
2581  * @instance:			Adapter soft state
2582  * @cmd:			mfi cmd pointer
2583  *
2584  */
2585 int
2586 megasas_issue_dcmd_fusion(struct megasas_instance *instance,
2587 			  struct megasas_cmd *cmd)
2588 {
2589 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2590 
2591 	req_desc = build_mpt_cmd(instance, cmd);
2592 	if (!req_desc) {
2593 		dev_info(&instance->pdev->dev, "Failed from %s %d\n",
2594 					__func__, __LINE__);
2595 		return DCMD_NOT_FIRED;
2596 	}
2597 
2598 	megasas_fire_cmd_fusion(instance, req_desc);
2599 	return DCMD_SUCCESS;
2600 }
2601 
2602 /**
2603  * megasas_release_fusion -	Reverses the FW initialization
2604  * @instance:			Adapter soft state
2605  */
2606 void
2607 megasas_release_fusion(struct megasas_instance *instance)
2608 {
2609 	megasas_free_cmds(instance);
2610 	megasas_free_cmds_fusion(instance);
2611 
2612 	iounmap(instance->reg_set);
2613 
2614 	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
2615 }
2616 
2617 /**
2618  * megasas_read_fw_status_reg_fusion - returns the current FW status value
2619  * @regs:			MFI register set
2620  */
2621 static u32
2622 megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
2623 {
2624 	return readl(&(regs)->outbound_scratch_pad);
2625 }
2626 
2627 /**
2628  * megasas_alloc_host_crash_buffer -	Host buffers for Crash dump collection from Firmware
2629  * @instance:				Controller's soft instance
2630  * return:			        Number of allocated host crash buffers
2631  */
2632 static void
2633 megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
2634 {
2635 	unsigned int i;
2636 
2637 	instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
2638 	for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
2639 		instance->crash_buf[i] = (void	*)__get_free_pages(GFP_KERNEL,
2640 				instance->crash_buf_pages);
2641 		if (!instance->crash_buf[i]) {
2642 			dev_info(&instance->pdev->dev, "Firmware crash dump "
2643 				"memory allocation failed at index %d\n", i);
2644 			break;
2645 		}
2646 		memset(instance->crash_buf[i], 0,
2647 			((1 << PAGE_SHIFT) << instance->crash_buf_pages));
2648 	}
2649 	instance->drv_buf_alloc = i;
2650 }
2651 
2652 /**
2653  * megasas_free_host_crash_buffer -	Host buffers for Crash dump collection from Firmware
2654  * @instance:				Controller's soft instance
2655  */
2656 void
2657 megasas_free_host_crash_buffer(struct megasas_instance *instance)
2658 {
2659 	unsigned int i
2660 ;
2661 	for (i = 0; i < instance->drv_buf_alloc; i++) {
2662 		if (instance->crash_buf[i])
2663 			free_pages((ulong)instance->crash_buf[i],
2664 					instance->crash_buf_pages);
2665 	}
2666 	instance->drv_buf_index = 0;
2667 	instance->drv_buf_alloc = 0;
2668 	instance->fw_crash_state = UNAVAILABLE;
2669 	instance->fw_crash_buffer_size = 0;
2670 }
2671 
2672 /**
2673  * megasas_adp_reset_fusion -	For controller reset
2674  * @regs:				MFI register set
2675  */
2676 static int
2677 megasas_adp_reset_fusion(struct megasas_instance *instance,
2678 			 struct megasas_register_set __iomem *regs)
2679 {
2680 	u32 host_diag, abs_state, retry;
2681 
2682 	/* Now try to reset the chip */
2683 	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2684 	writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2685 	writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2686 	writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2687 	writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2688 	writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2689 	writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
2690 
2691 	/* Check that the diag write enable (DRWE) bit is on */
2692 	host_diag = readl(&instance->reg_set->fusion_host_diag);
2693 	retry = 0;
2694 	while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2695 		msleep(100);
2696 		host_diag = readl(&instance->reg_set->fusion_host_diag);
2697 		if (retry++ == 100) {
2698 			dev_warn(&instance->pdev->dev,
2699 				"Host diag unlock failed from %s %d\n",
2700 				__func__, __LINE__);
2701 			break;
2702 		}
2703 	}
2704 	if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2705 		return -1;
2706 
2707 	/* Send chip reset command */
2708 	writel(host_diag | HOST_DIAG_RESET_ADAPTER,
2709 		&instance->reg_set->fusion_host_diag);
2710 	msleep(3000);
2711 
2712 	/* Make sure reset adapter bit is cleared */
2713 	host_diag = readl(&instance->reg_set->fusion_host_diag);
2714 	retry = 0;
2715 	while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2716 		msleep(100);
2717 		host_diag = readl(&instance->reg_set->fusion_host_diag);
2718 		if (retry++ == 1000) {
2719 			dev_warn(&instance->pdev->dev,
2720 				"Diag reset adapter never cleared %s %d\n",
2721 				__func__, __LINE__);
2722 			break;
2723 		}
2724 	}
2725 	if (host_diag & HOST_DIAG_RESET_ADAPTER)
2726 		return -1;
2727 
2728 	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set)
2729 			& MFI_STATE_MASK;
2730 	retry = 0;
2731 
2732 	while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2733 		msleep(100);
2734 		abs_state = instance->instancet->
2735 			read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2736 	}
2737 	if (abs_state <= MFI_STATE_FW_INIT) {
2738 		dev_warn(&instance->pdev->dev,
2739 			"fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n",
2740 			abs_state, __func__, __LINE__);
2741 		return -1;
2742 	}
2743 
2744 	return 0;
2745 }
2746 
2747 /**
2748  * megasas_check_reset_fusion -	For controller reset check
2749  * @regs:				MFI register set
2750  */
2751 static int
2752 megasas_check_reset_fusion(struct megasas_instance *instance,
2753 			   struct megasas_register_set __iomem *regs)
2754 {
2755 	return 0;
2756 }
2757 
2758 /* This function waits for outstanding commands on fusion to complete */
2759 int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2760 					int reason, int *convert)
2761 {
2762 	int i, outstanding, retval = 0, hb_seconds_missed = 0;
2763 	u32 fw_state;
2764 
2765 	for (i = 0; i < resetwaittime; i++) {
2766 		/* Check if firmware is in fault state */
2767 		fw_state = instance->instancet->read_fw_status_reg(
2768 			instance->reg_set) & MFI_STATE_MASK;
2769 		if (fw_state == MFI_STATE_FAULT) {
2770 			dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
2771 			       " will reset adapter scsi%d.\n",
2772 				instance->host->host_no);
2773 			megasas_complete_cmd_dpc_fusion((unsigned long)instance);
2774 			retval = 1;
2775 			goto out;
2776 		}
2777 
2778 		if (reason == MFI_IO_TIMEOUT_OCR) {
2779 			dev_info(&instance->pdev->dev,
2780 				"MFI IO is timed out, initiating OCR\n");
2781 			megasas_complete_cmd_dpc_fusion((unsigned long)instance);
2782 			retval = 1;
2783 			goto out;
2784 		}
2785 
2786 		/* If SR-IOV VF mode & heartbeat timeout, don't wait */
2787 		if (instance->requestorId && !reason) {
2788 			retval = 1;
2789 			goto out;
2790 		}
2791 
2792 		/* If SR-IOV VF mode & I/O timeout, check for HB timeout */
2793 		if (instance->requestorId && reason) {
2794 			if (instance->hb_host_mem->HB.fwCounter !=
2795 			    instance->hb_host_mem->HB.driverCounter) {
2796 				instance->hb_host_mem->HB.driverCounter =
2797 					instance->hb_host_mem->HB.fwCounter;
2798 				hb_seconds_missed = 0;
2799 			} else {
2800 				hb_seconds_missed++;
2801 				if (hb_seconds_missed ==
2802 				    (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
2803 					dev_warn(&instance->pdev->dev, "SR-IOV:"
2804 					       " Heartbeat never completed "
2805 					       " while polling during I/O "
2806 					       " timeout handling for "
2807 					       "scsi%d.\n",
2808 					       instance->host->host_no);
2809 					       *convert = 1;
2810 					       retval = 1;
2811 					       goto out;
2812 				}
2813 			}
2814 		}
2815 
2816 		outstanding = atomic_read(&instance->fw_outstanding);
2817 		if (!outstanding)
2818 			goto out;
2819 
2820 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2821 			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2822 			       "commands to complete for scsi%d\n", i,
2823 			       outstanding, instance->host->host_no);
2824 			megasas_complete_cmd_dpc_fusion(
2825 				(unsigned long)instance);
2826 		}
2827 		msleep(1000);
2828 	}
2829 
2830 	if (atomic_read(&instance->fw_outstanding)) {
2831 		dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
2832 		       "will reset adapter scsi%d.\n",
2833 		       instance->host->host_no);
2834 		*convert = 1;
2835 		retval = 1;
2836 	}
2837 out:
2838 	return retval;
2839 }
2840 
2841 void  megasas_reset_reply_desc(struct megasas_instance *instance)
2842 {
2843 	int i, j, count;
2844 	struct fusion_context *fusion;
2845 	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2846 
2847 	fusion = instance->ctrl_context;
2848 	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
2849 	for (i = 0 ; i < count ; i++) {
2850 		fusion->last_reply_idx[i] = 0;
2851 		reply_desc = fusion->reply_frames_desc[i];
2852 		for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++)
2853 			reply_desc->Words = cpu_to_le64(ULLONG_MAX);
2854 	}
2855 }
2856 
2857 /*
2858  * megasas_refire_mgmt_cmd :	Re-fire management commands
2859  * @instance:				Controller's soft instance
2860 */
2861 void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
2862 {
2863 	int j;
2864 	struct megasas_cmd_fusion *cmd_fusion;
2865 	struct fusion_context *fusion;
2866 	struct megasas_cmd *cmd_mfi;
2867 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2868 	u16 smid;
2869 	bool refire_cmd = 0;
2870 
2871 	fusion = instance->ctrl_context;
2872 
2873 	/* Re-fire management commands.
2874 	 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
2875 	 */
2876 	for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) {
2877 		cmd_fusion = fusion->cmd_list[j];
2878 		cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2879 		smid = le16_to_cpu(cmd_mfi->context.smid);
2880 
2881 		if (!smid)
2882 			continue;
2883 		req_desc = megasas_get_request_descriptor
2884 					(instance, smid - 1);
2885 		refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
2886 				cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
2887 				 (cmd_mfi->frame->dcmd.opcode !=
2888 				cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
2889 				&& !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
2890 		if (refire_cmd)
2891 			megasas_fire_cmd_fusion(instance, req_desc);
2892 		else
2893 			megasas_return_cmd(instance, cmd_mfi);
2894 	}
2895 }
2896 
2897 /*
2898  * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
2899  * @instance: per adapter struct
2900  * @channel: the channel assigned by the OS
2901  * @id: the id assigned by the OS
2902  *
2903  * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED
2904  */
2905 
2906 static int megasas_track_scsiio(struct megasas_instance *instance,
2907 		int id, int channel)
2908 {
2909 	int i, found = 0;
2910 	struct megasas_cmd_fusion *cmd_fusion;
2911 	struct fusion_context *fusion;
2912 	fusion = instance->ctrl_context;
2913 
2914 	for (i = 0 ; i < instance->max_scsi_cmds; i++) {
2915 		cmd_fusion = fusion->cmd_list[i];
2916 		if (cmd_fusion->scmd &&
2917 			(cmd_fusion->scmd->device->id == id &&
2918 			cmd_fusion->scmd->device->channel == channel)) {
2919 			dev_info(&instance->pdev->dev,
2920 				"SCSI commands pending to target"
2921 				"channel %d id %d \tSMID: 0x%x\n",
2922 				channel, id, cmd_fusion->index);
2923 			scsi_print_command(cmd_fusion->scmd);
2924 			found = 1;
2925 			break;
2926 		}
2927 	}
2928 
2929 	return found ? FAILED : SUCCESS;
2930 }
2931 
2932 /**
2933  * megasas_tm_response_code - translation of device response code
2934  * @ioc: per adapter object
2935  * @mpi_reply: MPI reply returned by firmware
2936  *
2937  * Return nothing.
2938  */
2939 static void
2940 megasas_tm_response_code(struct megasas_instance *instance,
2941 		struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
2942 {
2943 	char *desc;
2944 
2945 	switch (mpi_reply->ResponseCode) {
2946 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2947 		desc = "task management request completed";
2948 		break;
2949 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2950 		desc = "invalid frame";
2951 		break;
2952 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2953 		desc = "task management request not supported";
2954 		break;
2955 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2956 		desc = "task management request failed";
2957 		break;
2958 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2959 		desc = "task management request succeeded";
2960 		break;
2961 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2962 		desc = "invalid lun";
2963 		break;
2964 	case 0xA:
2965 		desc = "overlapped tag attempted";
2966 		break;
2967 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2968 		desc = "task queued, however not sent to target";
2969 		break;
2970 	default:
2971 		desc = "unknown";
2972 		break;
2973 	}
2974 	dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n",
2975 		mpi_reply->ResponseCode, desc);
2976 	dev_dbg(&instance->pdev->dev,
2977 		"TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo"
2978 		" 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
2979 		mpi_reply->TerminationCount, mpi_reply->DevHandle,
2980 		mpi_reply->Function, mpi_reply->TaskType,
2981 		mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
2982 }
2983 
2984 /**
2985  * megasas_issue_tm - main routine for sending tm requests
2986  * @instance: per adapter struct
2987  * @device_handle: device handle
2988  * @channel: the channel assigned by the OS
2989  * @id: the id assigned by the OS
2990  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
2991  * @smid_task: smid assigned to the task
2992  * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
2993  * Context: user
2994  *
2995  * MegaRaid use MPT interface for Task Magement request.
2996  * A generic API for sending task management requests to firmware.
2997  *
2998  * Return SUCCESS or FAILED.
2999  */
3000 static int
3001 megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
3002 	uint channel, uint id, u16 smid_task, u8 type)
3003 {
3004 	struct MR_TASK_MANAGE_REQUEST *mr_request;
3005 	struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
3006 	unsigned long timeleft;
3007 	struct megasas_cmd_fusion *cmd_fusion;
3008 	struct megasas_cmd *cmd_mfi;
3009 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3010 	struct fusion_context *fusion;
3011 	struct megasas_cmd_fusion *scsi_lookup;
3012 	int rc;
3013 	struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
3014 
3015 	fusion = instance->ctrl_context;
3016 
3017 	cmd_mfi = megasas_get_cmd(instance);
3018 
3019 	if (!cmd_mfi) {
3020 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
3021 			__func__, __LINE__);
3022 		return -ENOMEM;
3023 	}
3024 
3025 	cmd_fusion = megasas_get_cmd_fusion(instance,
3026 			instance->max_scsi_cmds + cmd_mfi->index);
3027 
3028 	/*  Save the smid. To be used for returning the cmd */
3029 	cmd_mfi->context.smid = cmd_fusion->index;
3030 
3031 	req_desc = megasas_get_request_descriptor(instance,
3032 			(cmd_fusion->index - 1));
3033 	if (!req_desc) {
3034 		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
3035 			__func__, __LINE__);
3036 		megasas_return_cmd(instance, cmd_mfi);
3037 		return -ENOMEM;
3038 	}
3039 
3040 	cmd_fusion->request_desc = req_desc;
3041 	req_desc->Words = 0;
3042 
3043 	scsi_lookup = fusion->cmd_list[smid_task - 1];
3044 
3045 	mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request;
3046 	memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST));
3047 	mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
3048 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3049 	mpi_request->DevHandle = cpu_to_le16(device_handle);
3050 	mpi_request->TaskType = type;
3051 	mpi_request->TaskMID = cpu_to_le16(smid_task);
3052 	mpi_request->LUN[1] = 0;
3053 
3054 
3055 	req_desc = cmd_fusion->request_desc;
3056 	req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index);
3057 	req_desc->HighPriority.RequestFlags =
3058 		(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
3059 		MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3060 	req_desc->HighPriority.MSIxIndex =  0;
3061 	req_desc->HighPriority.LMID = 0;
3062 	req_desc->HighPriority.Reserved1 = 0;
3063 
3064 	if (channel < MEGASAS_MAX_PD_CHANNELS)
3065 		mr_request->tmReqFlags.isTMForPD = 1;
3066 	else
3067 		mr_request->tmReqFlags.isTMForLD = 1;
3068 
3069 	init_completion(&cmd_fusion->done);
3070 	megasas_fire_cmd_fusion(instance, req_desc);
3071 
3072 	timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ);
3073 
3074 	if (!timeleft) {
3075 		dev_err(&instance->pdev->dev,
3076 			"task mgmt type 0x%x timed out\n", type);
3077 		cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
3078 		mutex_unlock(&instance->reset_mutex);
3079 		rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
3080 		mutex_lock(&instance->reset_mutex);
3081 		return rc;
3082 	}
3083 
3084 	mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply;
3085 	megasas_tm_response_code(instance, mpi_reply);
3086 
3087 	megasas_return_cmd(instance, cmd_mfi);
3088 	rc = SUCCESS;
3089 	switch (type) {
3090 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3091 		if (scsi_lookup->scmd == NULL)
3092 			break;
3093 		else {
3094 			instance->instancet->disable_intr(instance);
3095 			msleep(1000);
3096 			megasas_complete_cmd_dpc_fusion
3097 					((unsigned long)instance);
3098 			instance->instancet->enable_intr(instance);
3099 			if (scsi_lookup->scmd == NULL)
3100 				break;
3101 		}
3102 		rc = FAILED;
3103 		break;
3104 
3105 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3106 		if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF))
3107 			break;
3108 		instance->instancet->disable_intr(instance);
3109 		msleep(1000);
3110 		megasas_complete_cmd_dpc_fusion
3111 				((unsigned long)instance);
3112 		rc = megasas_track_scsiio(instance, id, channel);
3113 		instance->instancet->enable_intr(instance);
3114 
3115 		break;
3116 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3117 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3118 		break;
3119 	default:
3120 		rc = FAILED;
3121 		break;
3122 	}
3123 
3124 	return rc;
3125 
3126 }
3127 
3128 /*
3129  * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI
3130  * @instance: per adapter struct
3131  *
3132  * Return Non Zero index, if SMID found in outstanding commands
3133  */
3134 static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd)
3135 {
3136 	int i, ret = 0;
3137 	struct megasas_instance *instance;
3138 	struct megasas_cmd_fusion *cmd_fusion;
3139 	struct fusion_context *fusion;
3140 
3141 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3142 
3143 	fusion = instance->ctrl_context;
3144 
3145 	for (i = 0; i < instance->max_scsi_cmds; i++) {
3146 		cmd_fusion = fusion->cmd_list[i];
3147 		if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) {
3148 			scmd_printk(KERN_NOTICE, scmd, "Abort request is for"
3149 				" SMID: %d\n", cmd_fusion->index);
3150 			ret = cmd_fusion->index;
3151 			break;
3152 		}
3153 	}
3154 
3155 	return ret;
3156 }
3157 
3158 /*
3159 * megasas_get_tm_devhandle - Get devhandle for TM request
3160 * @sdev-		     OS provided scsi device
3161 *
3162 * Returns-		     devhandle/targetID of SCSI device
3163 */
3164 static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
3165 {
3166 	u16 pd_index = 0;
3167 	u32 device_id;
3168 	struct megasas_instance *instance;
3169 	struct fusion_context *fusion;
3170 	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3171 	u16 devhandle = (u16)ULONG_MAX;
3172 
3173 	instance = (struct megasas_instance *)sdev->host->hostdata;
3174 	fusion = instance->ctrl_context;
3175 
3176 	if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
3177 		if (instance->use_seqnum_jbod_fp) {
3178 				pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
3179 						sdev->id;
3180 				pd_sync = (void *)fusion->pd_seq_sync
3181 						[(instance->pd_seq_map_id - 1) & 1];
3182 				devhandle = pd_sync->seq[pd_index].devHandle;
3183 		} else
3184 			sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
3185 				" without JBOD MAP support from %s %d\n", __func__, __LINE__);
3186 	} else {
3187 		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
3188 				+ sdev->id;
3189 		devhandle = device_id;
3190 	}
3191 
3192 	return devhandle;
3193 }
3194 
3195 /*
3196  * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
3197  * @scmd : pointer to scsi command object
3198  *
3199  * Return SUCCESS, if command aborted else FAILED
3200  */
3201 
3202 int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
3203 {
3204 	struct megasas_instance *instance;
3205 	u16 smid, devhandle;
3206 	struct fusion_context *fusion;
3207 	int ret;
3208 	struct MR_PRIV_DEVICE *mr_device_priv_data;
3209 	mr_device_priv_data = scmd->device->hostdata;
3210 
3211 
3212 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3213 	fusion = instance->ctrl_context;
3214 
3215 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
3216 		dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
3217 		"SCSI host:%d\n", instance->host->host_no);
3218 		ret = FAILED;
3219 		return ret;
3220 	}
3221 
3222 	if (!mr_device_priv_data) {
3223 		sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
3224 			"scmd(%p)\n", scmd);
3225 		scmd->result = DID_NO_CONNECT << 16;
3226 		ret = SUCCESS;
3227 		goto out;
3228 	}
3229 
3230 
3231 	if (!mr_device_priv_data->is_tm_capable) {
3232 		ret = FAILED;
3233 		goto out;
3234 	}
3235 
3236 	mutex_lock(&instance->reset_mutex);
3237 
3238 	smid = megasas_fusion_smid_lookup(scmd);
3239 
3240 	if (!smid) {
3241 		ret = SUCCESS;
3242 		scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
3243 			" issued is not found in oustanding commands\n");
3244 		mutex_unlock(&instance->reset_mutex);
3245 		goto out;
3246 	}
3247 
3248 	devhandle = megasas_get_tm_devhandle(scmd->device);
3249 
3250 	if (devhandle == (u16)ULONG_MAX) {
3251 		ret = SUCCESS;
3252 		sdev_printk(KERN_INFO, scmd->device,
3253 			"task abort issued for invalid devhandle\n");
3254 		mutex_unlock(&instance->reset_mutex);
3255 		goto out;
3256 	}
3257 	sdev_printk(KERN_INFO, scmd->device,
3258 		"attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
3259 		scmd, devhandle);
3260 
3261 	mr_device_priv_data->tm_busy = 1;
3262 	ret = megasas_issue_tm(instance, devhandle,
3263 			scmd->device->channel, scmd->device->id, smid,
3264 			MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
3265 	mr_device_priv_data->tm_busy = 0;
3266 
3267 	mutex_unlock(&instance->reset_mutex);
3268 out:
3269 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
3270 			((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3271 
3272 	return ret;
3273 }
3274 
3275 /*
3276  * megasas_reset_target_fusion : target reset function for fusion adapters
3277  * scmd: SCSI command pointer
3278  *
3279  * Returns SUCCESS if all commands associated with target aborted else FAILED
3280  */
3281 
3282 int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
3283 {
3284 
3285 	struct megasas_instance *instance;
3286 	int ret = FAILED;
3287 	u16 devhandle;
3288 	struct fusion_context *fusion;
3289 	struct MR_PRIV_DEVICE *mr_device_priv_data;
3290 	mr_device_priv_data = scmd->device->hostdata;
3291 
3292 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3293 	fusion = instance->ctrl_context;
3294 
3295 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
3296 		dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
3297 		"SCSI host:%d\n", instance->host->host_no);
3298 		ret = FAILED;
3299 		return ret;
3300 	}
3301 
3302 	if (!mr_device_priv_data) {
3303 		sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
3304 			"scmd(%p)\n", scmd);
3305 		scmd->result = DID_NO_CONNECT << 16;
3306 		ret = SUCCESS;
3307 		goto out;
3308 	}
3309 
3310 
3311 	if (!mr_device_priv_data->is_tm_capable) {
3312 		ret = FAILED;
3313 		goto out;
3314 	}
3315 
3316 	mutex_lock(&instance->reset_mutex);
3317 	devhandle = megasas_get_tm_devhandle(scmd->device);
3318 
3319 	if (devhandle == (u16)ULONG_MAX) {
3320 		ret = SUCCESS;
3321 		sdev_printk(KERN_INFO, scmd->device,
3322 			"target reset issued for invalid devhandle\n");
3323 		mutex_unlock(&instance->reset_mutex);
3324 		goto out;
3325 	}
3326 
3327 	sdev_printk(KERN_INFO, scmd->device,
3328 		"attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
3329 		scmd, devhandle);
3330 	mr_device_priv_data->tm_busy = 1;
3331 	ret = megasas_issue_tm(instance, devhandle,
3332 			scmd->device->channel, scmd->device->id, 0,
3333 			MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
3334 	mr_device_priv_data->tm_busy = 0;
3335 	mutex_unlock(&instance->reset_mutex);
3336 out:
3337 	scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n",
3338 		(ret == SUCCESS) ? "SUCCESS" : "FAILED");
3339 
3340 	return ret;
3341 }
3342 
3343 /*SRIOV get other instance in cluster if any*/
3344 struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
3345 {
3346 	int i;
3347 
3348 	for (i = 0; i < MAX_MGMT_ADAPTERS; i++) {
3349 		if (megasas_mgmt_info.instance[i] &&
3350 			(megasas_mgmt_info.instance[i] != instance) &&
3351 			 megasas_mgmt_info.instance[i]->requestorId &&
3352 			 megasas_mgmt_info.instance[i]->peerIsPresent &&
3353 			(memcmp((megasas_mgmt_info.instance[i]->clusterId),
3354 			instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0))
3355 			return megasas_mgmt_info.instance[i];
3356 	}
3357 	return NULL;
3358 }
3359 
3360 /* Check for a second path that is currently UP */
3361 int megasas_check_mpio_paths(struct megasas_instance *instance,
3362 	struct scsi_cmnd *scmd)
3363 {
3364 	struct megasas_instance *peer_instance = NULL;
3365 	int retval = (DID_RESET << 16);
3366 
3367 	if (instance->peerIsPresent) {
3368 		peer_instance = megasas_get_peer_instance(instance);
3369 		if ((peer_instance) &&
3370 			(atomic_read(&peer_instance->adprecovery) ==
3371 			MEGASAS_HBA_OPERATIONAL))
3372 			retval = (DID_NO_CONNECT << 16);
3373 	}
3374 	return retval;
3375 }
3376 
3377 /* Core fusion reset function */
3378 int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
3379 {
3380 	int retval = SUCCESS, i, convert = 0;
3381 	struct megasas_instance *instance;
3382 	struct megasas_cmd_fusion *cmd_fusion;
3383 	struct fusion_context *fusion;
3384 	u32 abs_state, status_reg, reset_adapter;
3385 	u32 io_timeout_in_crash_mode = 0;
3386 	struct scsi_cmnd *scmd_local = NULL;
3387 	struct scsi_device *sdev;
3388 
3389 	instance = (struct megasas_instance *)shost->hostdata;
3390 	fusion = instance->ctrl_context;
3391 
3392 	mutex_lock(&instance->reset_mutex);
3393 
3394 	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
3395 		dev_warn(&instance->pdev->dev, "Hardware critical error, "
3396 		       "returning FAILED for scsi%d.\n",
3397 			instance->host->host_no);
3398 		mutex_unlock(&instance->reset_mutex);
3399 		return FAILED;
3400 	}
3401 	status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
3402 	abs_state = status_reg & MFI_STATE_MASK;
3403 
3404 	/* IO timeout detected, forcibly put FW in FAULT state */
3405 	if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
3406 		instance->crash_dump_app_support && reason) {
3407 		dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
3408 			"forcibly FAULT Firmware\n");
3409 		atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3410 		status_reg = readl(&instance->reg_set->doorbell);
3411 		writel(status_reg | MFI_STATE_FORCE_OCR,
3412 			&instance->reg_set->doorbell);
3413 		readl(&instance->reg_set->doorbell);
3414 		mutex_unlock(&instance->reset_mutex);
3415 		do {
3416 			ssleep(3);
3417 			io_timeout_in_crash_mode++;
3418 			dev_dbg(&instance->pdev->dev, "waiting for [%d] "
3419 				"seconds for crash dump collection and OCR "
3420 				"to be done\n", (io_timeout_in_crash_mode * 3));
3421 		} while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
3422 			(io_timeout_in_crash_mode < 80));
3423 
3424 		if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
3425 			dev_info(&instance->pdev->dev, "OCR done for IO "
3426 				"timeout case\n");
3427 			retval = SUCCESS;
3428 		} else {
3429 			dev_info(&instance->pdev->dev, "Controller is not "
3430 				"operational after 240 seconds wait for IO "
3431 				"timeout case in FW crash dump mode\n do "
3432 				"OCR/kill adapter\n");
3433 			retval = megasas_reset_fusion(shost, 0);
3434 		}
3435 		return retval;
3436 	}
3437 
3438 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
3439 		del_timer_sync(&instance->sriov_heartbeat_timer);
3440 	set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
3441 	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
3442 	instance->instancet->disable_intr(instance);
3443 	msleep(1000);
3444 
3445 	/* First try waiting for commands to complete */
3446 	if (megasas_wait_for_outstanding_fusion(instance, reason,
3447 						&convert)) {
3448 		atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3449 		dev_warn(&instance->pdev->dev, "resetting fusion "
3450 		       "adapter scsi%d.\n", instance->host->host_no);
3451 		if (convert)
3452 			reason = 0;
3453 
3454 		/* Now return commands back to the OS */
3455 		for (i = 0 ; i < instance->max_scsi_cmds; i++) {
3456 			cmd_fusion = fusion->cmd_list[i];
3457 			scmd_local = cmd_fusion->scmd;
3458 			if (cmd_fusion->scmd) {
3459 				scmd_local->result =
3460 					megasas_check_mpio_paths(instance,
3461 							scmd_local);
3462 				if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
3463 					atomic_dec(&instance->ldio_outstanding);
3464 				megasas_return_cmd_fusion(instance, cmd_fusion);
3465 				scsi_dma_unmap(scmd_local);
3466 				scmd_local->scsi_done(scmd_local);
3467 				atomic_dec(&instance->fw_outstanding);
3468 			}
3469 		}
3470 
3471 		status_reg = instance->instancet->read_fw_status_reg(
3472 			instance->reg_set);
3473 		abs_state = status_reg & MFI_STATE_MASK;
3474 		reset_adapter = status_reg & MFI_RESET_ADAPTER;
3475 		if (instance->disableOnlineCtrlReset ||
3476 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
3477 			/* Reset not supported, kill adapter */
3478 			dev_warn(&instance->pdev->dev, "Reset not supported"
3479 			       ", killing adapter scsi%d.\n",
3480 				instance->host->host_no);
3481 			megaraid_sas_kill_hba(instance);
3482 			instance->skip_heartbeat_timer_del = 1;
3483 			retval = FAILED;
3484 			goto out;
3485 		}
3486 
3487 		/* Let SR-IOV VF & PF sync up if there was a HB failure */
3488 		if (instance->requestorId && !reason) {
3489 			msleep(MEGASAS_OCR_SETTLE_TIME_VF);
3490 			goto transition_to_ready;
3491 		}
3492 
3493 		/* Now try to reset the chip */
3494 		for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
3495 
3496 			if (instance->instancet->adp_reset
3497 				(instance, instance->reg_set))
3498 				continue;
3499 transition_to_ready:
3500 			/* Wait for FW to become ready */
3501 			if (megasas_transition_to_ready(instance, 1)) {
3502 				dev_warn(&instance->pdev->dev,
3503 					"Failed to transition controller to ready for "
3504 					"scsi%d.\n", instance->host->host_no);
3505 				if (instance->requestorId && !reason)
3506 					goto fail_kill_adapter;
3507 				else
3508 					continue;
3509 			}
3510 			megasas_reset_reply_desc(instance);
3511 			megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
3512 
3513 			if (megasas_ioc_init_fusion(instance)) {
3514 				dev_warn(&instance->pdev->dev,
3515 				       "megasas_ioc_init_fusion() failed! for "
3516 				       "scsi%d\n", instance->host->host_no);
3517 				if (instance->requestorId && !reason)
3518 					goto fail_kill_adapter;
3519 				else
3520 					continue;
3521 			}
3522 
3523 			megasas_refire_mgmt_cmd(instance);
3524 
3525 			if (megasas_get_ctrl_info(instance)) {
3526 				dev_info(&instance->pdev->dev,
3527 					"Failed from %s %d\n",
3528 					__func__, __LINE__);
3529 				megaraid_sas_kill_hba(instance);
3530 				retval = FAILED;
3531 			}
3532 			/* Reset load balance info */
3533 			memset(fusion->load_balance_info, 0,
3534 			       sizeof(struct LD_LOAD_BALANCE_INFO)
3535 			       *MAX_LOGICAL_DRIVES_EXT);
3536 
3537 			if (!megasas_get_map_info(instance))
3538 				megasas_sync_map_info(instance);
3539 
3540 			megasas_setup_jbod_map(instance);
3541 
3542 			shost_for_each_device(sdev, shost)
3543 				megasas_update_sdev_properties(sdev);
3544 
3545 			clear_bit(MEGASAS_FUSION_IN_RESET,
3546 				  &instance->reset_flags);
3547 			instance->instancet->enable_intr(instance);
3548 			atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3549 
3550 			/* Restart SR-IOV heartbeat */
3551 			if (instance->requestorId) {
3552 				if (!megasas_sriov_start_heartbeat(instance, 0))
3553 					megasas_start_timer(instance,
3554 							    &instance->sriov_heartbeat_timer,
3555 							    megasas_sriov_heartbeat_handler,
3556 							    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
3557 				else
3558 					instance->skip_heartbeat_timer_del = 1;
3559 			}
3560 
3561 			/* Adapter reset completed successfully */
3562 			dev_warn(&instance->pdev->dev, "Reset "
3563 			       "successful for scsi%d.\n",
3564 				instance->host->host_no);
3565 
3566 			if (instance->crash_dump_drv_support &&
3567 				instance->crash_dump_app_support)
3568 				megasas_set_crash_dump_params(instance,
3569 					MR_CRASH_BUF_TURN_ON);
3570 			else
3571 				megasas_set_crash_dump_params(instance,
3572 					MR_CRASH_BUF_TURN_OFF);
3573 
3574 			retval = SUCCESS;
3575 			goto out;
3576 		}
3577 fail_kill_adapter:
3578 		/* Reset failed, kill the adapter */
3579 		dev_warn(&instance->pdev->dev, "Reset failed, killing "
3580 		       "adapter scsi%d.\n", instance->host->host_no);
3581 		megaraid_sas_kill_hba(instance);
3582 		instance->skip_heartbeat_timer_del = 1;
3583 		retval = FAILED;
3584 	} else {
3585 		/* For VF: Restart HB timer if we didn't OCR */
3586 		if (instance->requestorId) {
3587 			megasas_start_timer(instance,
3588 					    &instance->sriov_heartbeat_timer,
3589 					    megasas_sriov_heartbeat_handler,
3590 					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
3591 		}
3592 		clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
3593 		instance->instancet->enable_intr(instance);
3594 		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3595 	}
3596 out:
3597 	clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
3598 	mutex_unlock(&instance->reset_mutex);
3599 	return retval;
3600 }
3601 
3602 /* Fusion Crash dump collection work queue */
3603 void  megasas_fusion_crash_dump_wq(struct work_struct *work)
3604 {
3605 	struct megasas_instance *instance =
3606 		container_of(work, struct megasas_instance, crash_init);
3607 	u32 status_reg;
3608 	u8 partial_copy = 0;
3609 
3610 
3611 	status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
3612 
3613 	/*
3614 	 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
3615 	 * to host crash buffers
3616 	 */
3617 	if (instance->drv_buf_index == 0) {
3618 		/* Buffer is already allocated for old Crash dump.
3619 		 * Do OCR and do not wait for crash dump collection
3620 		 */
3621 		if (instance->drv_buf_alloc) {
3622 			dev_info(&instance->pdev->dev, "earlier crash dump is "
3623 				"not yet copied by application, ignoring this "
3624 				"crash dump and initiating OCR\n");
3625 			status_reg |= MFI_STATE_CRASH_DUMP_DONE;
3626 			writel(status_reg,
3627 				&instance->reg_set->outbound_scratch_pad);
3628 			readl(&instance->reg_set->outbound_scratch_pad);
3629 			return;
3630 		}
3631 		megasas_alloc_host_crash_buffer(instance);
3632 		dev_info(&instance->pdev->dev, "Number of host crash buffers "
3633 			"allocated: %d\n", instance->drv_buf_alloc);
3634 	}
3635 
3636 	/*
3637 	 * Driver has allocated max buffers, which can be allocated
3638 	 * and FW has more crash dump data, then driver will
3639 	 * ignore the data.
3640 	 */
3641 	if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
3642 		dev_info(&instance->pdev->dev, "Driver is done copying "
3643 			"the buffer: %d\n", instance->drv_buf_alloc);
3644 		status_reg |= MFI_STATE_CRASH_DUMP_DONE;
3645 		partial_copy = 1;
3646 	} else {
3647 		memcpy(instance->crash_buf[instance->drv_buf_index],
3648 			instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
3649 		instance->drv_buf_index++;
3650 		status_reg &= ~MFI_STATE_DMADONE;
3651 	}
3652 
3653 	if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
3654 		dev_info(&instance->pdev->dev, "Crash Dump is available,number "
3655 			"of copied buffers: %d\n", instance->drv_buf_index);
3656 		instance->fw_crash_buffer_size =  instance->drv_buf_index;
3657 		instance->fw_crash_state = AVAILABLE;
3658 		instance->drv_buf_index = 0;
3659 		writel(status_reg, &instance->reg_set->outbound_scratch_pad);
3660 		readl(&instance->reg_set->outbound_scratch_pad);
3661 		if (!partial_copy)
3662 			megasas_reset_fusion(instance->host, 0);
3663 	} else {
3664 		writel(status_reg, &instance->reg_set->outbound_scratch_pad);
3665 		readl(&instance->reg_set->outbound_scratch_pad);
3666 	}
3667 }
3668 
3669 
3670 /* Fusion OCR work queue */
3671 void megasas_fusion_ocr_wq(struct work_struct *work)
3672 {
3673 	struct megasas_instance *instance =
3674 		container_of(work, struct megasas_instance, work_init);
3675 
3676 	megasas_reset_fusion(instance->host, 0);
3677 }
3678 
3679 struct megasas_instance_template megasas_instance_template_fusion = {
3680 	.enable_intr = megasas_enable_intr_fusion,
3681 	.disable_intr = megasas_disable_intr_fusion,
3682 	.clear_intr = megasas_clear_intr_fusion,
3683 	.read_fw_status_reg = megasas_read_fw_status_reg_fusion,
3684 	.adp_reset = megasas_adp_reset_fusion,
3685 	.check_reset = megasas_check_reset_fusion,
3686 	.service_isr = megasas_isr_fusion,
3687 	.tasklet = megasas_complete_cmd_dpc_fusion,
3688 	.init_adapter = megasas_init_adapter_fusion,
3689 	.build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
3690 	.issue_dcmd = megasas_issue_dcmd_fusion,
3691 };
3692