1 /*
2  * This is the Fusion MPT base driver providing common API layer interface
3  * for access to MPT (Message Passing Technology) firmware.
4  *
5  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6  * Copyright (C) 2012-2014  LSI Corporation
7  * Copyright (C) 2013-2014 Avago Technologies
8  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * NO WARRANTY
21  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25  * solely responsible for determining the appropriateness of using and
26  * distributing the Program and assumes all risks associated with its
27  * exercise of rights under this Agreement, including but not limited to
28  * the risks and costs of program errors, damage to or loss of data,
29  * programs or equipment, and unavailability or interruption of operations.
30 
31  * DISCLAIMER OF LIABILITY
32  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 
40  * You should have received a copy of the GNU General Public License
41  * along with this program; if not, write to the Free Software
42  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
43  * USA.
44  */
45 
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/kdev_t.h>
54 #include <linux/blkdev.h>
55 #include <linux/delay.h>
56 #include <linux/interrupt.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/io.h>
59 #include <linux/time.h>
60 #include <linux/ktime.h>
61 #include <linux/kthread.h>
62 #include <asm/page.h>        /* To get host page size per arch */
63 #include <linux/aer.h>
64 
65 
66 #include "mpt3sas_base.h"
67 
68 static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
69 
70 
71 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
72 
73  /* maximum controller queue depth */
74 #define MAX_HBA_QUEUE_DEPTH	30000
75 #define MAX_CHAIN_DEPTH		100000
76 static int max_queue_depth = -1;
77 module_param(max_queue_depth, int, 0);
78 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
79 
80 static int max_sgl_entries = -1;
81 module_param(max_sgl_entries, int, 0);
82 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
83 
84 static int msix_disable = -1;
85 module_param(msix_disable, int, 0);
86 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
87 
88 static int smp_affinity_enable = 1;
89 module_param(smp_affinity_enable, int, S_IRUGO);
90 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
91 
92 static int max_msix_vectors = -1;
93 module_param(max_msix_vectors, int, 0);
94 MODULE_PARM_DESC(max_msix_vectors,
95 	" max msix vectors");
96 
97 static int mpt3sas_fwfault_debug;
98 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
99 	" enable detection of firmware fault and halt firmware - (default=0)");
100 
101 static int
102 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
103 
104 /**
105  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
106  *
107  */
108 static int
109 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
110 {
111 	int ret = param_set_int(val, kp);
112 	struct MPT3SAS_ADAPTER *ioc;
113 
114 	if (ret)
115 		return ret;
116 
117 	/* global ioc spinlock to protect controller list on list operations */
118 	pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
119 	spin_lock(&gioc_lock);
120 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
121 		ioc->fwfault_debug = mpt3sas_fwfault_debug;
122 	spin_unlock(&gioc_lock);
123 	return 0;
124 }
125 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
126 	param_get_int, &mpt3sas_fwfault_debug, 0644);
127 
128 /**
129  * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
130  *				  in BAR0 space.
131  *
132  * @ioc: per adapter object
133  * @reply: reply message frame(lower 32bit addr)
134  * @index: System request message index.
135  *
136  * @Returns - Nothing
137  */
138 static void
139 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
140 		u32 index)
141 {
142 	/*
143 	 * 256 is offset within sys register.
144 	 * 256 offset MPI frame starts. Max MPI frame supported is 32.
145 	 * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
146 	 */
147 	u16 cmd_credit = ioc->facts.RequestCredit + 1;
148 	void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
149 			MPI_FRAME_START_OFFSET +
150 			(cmd_credit * ioc->request_sz) + (index * sizeof(u32));
151 
152 	writel(reply, reply_free_iomem);
153 }
154 
155 /**
156  * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
157  *				to system/BAR0 region.
158  *
159  * @dst_iomem: Pointer to the destinaltion location in BAR0 space.
160  * @src: Pointer to the Source data.
161  * @size: Size of data to be copied.
162  */
163 static void
164 _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
165 {
166 	int i;
167 	u32 *src_virt_mem = (u32 *)src;
168 
169 	for (i = 0; i < size/4; i++)
170 		writel((u32)src_virt_mem[i],
171 				(void __iomem *)dst_iomem + (i * 4));
172 }
173 
174 /**
175  * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
176  *
177  * @dst_iomem: Pointer to the destination location in BAR0 space.
178  * @src: Pointer to the Source data.
179  * @size: Size of data to be copied.
180  */
181 static void
182 _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
183 {
184 	int i;
185 	u32 *src_virt_mem = (u32 *)(src);
186 
187 	for (i = 0; i < size/4; i++)
188 		writel((u32)src_virt_mem[i],
189 			(void __iomem *)dst_iomem + (i * 4));
190 }
191 
192 /**
193  * _base_get_chain - Calculates and Returns virtual chain address
194  *			 for the provided smid in BAR0 space.
195  *
196  * @ioc: per adapter object
197  * @smid: system request message index
198  * @sge_chain_count: Scatter gather chain count.
199  *
200  * @Return: chain address.
201  */
202 static inline void __iomem*
203 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
204 		u8 sge_chain_count)
205 {
206 	void __iomem *base_chain, *chain_virt;
207 	u16 cmd_credit = ioc->facts.RequestCredit + 1;
208 
209 	base_chain  = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
210 		(cmd_credit * ioc->request_sz) +
211 		REPLY_FREE_POOL_SIZE;
212 	chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
213 			ioc->request_sz) + (sge_chain_count * ioc->request_sz);
214 	return chain_virt;
215 }
216 
217 /**
218  * _base_get_chain_phys - Calculates and Returns physical address
219  *			in BAR0 for scatter gather chains, for
220  *			the provided smid.
221  *
222  * @ioc: per adapter object
223  * @smid: system request message index
224  * @sge_chain_count: Scatter gather chain count.
225  *
226  * @Return - Physical chain address.
227  */
228 static inline phys_addr_t
229 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
230 		u8 sge_chain_count)
231 {
232 	phys_addr_t base_chain_phys, chain_phys;
233 	u16 cmd_credit = ioc->facts.RequestCredit + 1;
234 
235 	base_chain_phys  = ioc->chip_phys + MPI_FRAME_START_OFFSET +
236 		(cmd_credit * ioc->request_sz) +
237 		REPLY_FREE_POOL_SIZE;
238 	chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
239 			ioc->request_sz) + (sge_chain_count * ioc->request_sz);
240 	return chain_phys;
241 }
242 
243 /**
244  * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
245  *			buffer address for the provided smid.
246  *			(Each smid can have 64K starts from 17024)
247  *
248  * @ioc: per adapter object
249  * @smid: system request message index
250  *
251  * @Returns - Pointer to buffer location in BAR0.
252  */
253 
254 static void __iomem *
255 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
256 {
257 	u16 cmd_credit = ioc->facts.RequestCredit + 1;
258 	// Added extra 1 to reach end of chain.
259 	void __iomem *chain_end = _base_get_chain(ioc,
260 			cmd_credit + 1,
261 			ioc->facts.MaxChainDepth);
262 	return chain_end + (smid * 64 * 1024);
263 }
264 
265 /**
266  * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
267  *		Host buffer Physical address for the provided smid.
268  *		(Each smid can have 64K starts from 17024)
269  *
270  * @ioc: per adapter object
271  * @smid: system request message index
272  *
273  * @Returns - Pointer to buffer location in BAR0.
274  */
275 static phys_addr_t
276 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
277 {
278 	u16 cmd_credit = ioc->facts.RequestCredit + 1;
279 	phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
280 			cmd_credit + 1,
281 			ioc->facts.MaxChainDepth);
282 	return chain_end_phys + (smid * 64 * 1024);
283 }
284 
285 /**
286  * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
287  *			lookup list and Provides chain_buffer
288  *			address for the matching dma address.
289  *			(Each smid can have 64K starts from 17024)
290  *
291  * @ioc: per adapter object
292  * @chain_buffer_dma: Chain buffer dma address.
293  *
294  * @Returns - Pointer to chain buffer. Or Null on Failure.
295  */
296 static void *
297 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
298 		dma_addr_t chain_buffer_dma)
299 {
300 	u16 index, j;
301 	struct chain_tracker *ct;
302 
303 	for (index = 0; index < ioc->scsiio_depth; index++) {
304 		for (j = 0; j < ioc->chains_needed_per_io; j++) {
305 			ct = &ioc->chain_lookup[index].chains_per_smid[j];
306 			if (ct && ct->chain_buffer_dma == chain_buffer_dma)
307 				return ct->chain_buffer;
308 		}
309 	}
310 	pr_info(MPT3SAS_FMT
311 	    "Provided chain_buffer_dma address is not in the lookup list\n",
312 	    ioc->name);
313 	return NULL;
314 }
315 
316 /**
317  * _clone_sg_entries -	MPI EP's scsiio and config requests
318  *			are handled here. Base function for
319  *			double buffering, before submitting
320  *			the requests.
321  *
322  * @ioc: per adapter object.
323  * @mpi_request: mf request pointer.
324  * @smid: system request message index.
325  *
326  * @Returns: Nothing.
327  */
328 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
329 		void *mpi_request, u16 smid)
330 {
331 	Mpi2SGESimple32_t *sgel, *sgel_next;
332 	u32  sgl_flags, sge_chain_count = 0;
333 	bool is_write = 0;
334 	u16 i = 0;
335 	void __iomem *buffer_iomem;
336 	phys_addr_t buffer_iomem_phys;
337 	void __iomem *buff_ptr;
338 	phys_addr_t buff_ptr_phys;
339 	void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
340 	void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
341 	phys_addr_t dst_addr_phys;
342 	MPI2RequestHeader_t *request_hdr;
343 	struct scsi_cmnd *scmd;
344 	struct scatterlist *sg_scmd = NULL;
345 	int is_scsiio_req = 0;
346 
347 	request_hdr = (MPI2RequestHeader_t *) mpi_request;
348 
349 	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
350 		Mpi25SCSIIORequest_t *scsiio_request =
351 			(Mpi25SCSIIORequest_t *)mpi_request;
352 		sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
353 		is_scsiio_req = 1;
354 	} else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
355 		Mpi2ConfigRequest_t  *config_req =
356 			(Mpi2ConfigRequest_t *)mpi_request;
357 		sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
358 	} else
359 		return;
360 
361 	/* From smid we can get scsi_cmd, once we have sg_scmd,
362 	 * we just need to get sg_virt and sg_next to get virual
363 	 * address associated with sgel->Address.
364 	 */
365 
366 	if (is_scsiio_req) {
367 		/* Get scsi_cmd using smid */
368 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
369 		if (scmd == NULL) {
370 			pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name);
371 			return;
372 		}
373 
374 		/* Get sg_scmd from scmd provided */
375 		sg_scmd = scsi_sglist(scmd);
376 	}
377 
378 	/*
379 	 * 0 - 255	System register
380 	 * 256 - 4352	MPI Frame. (This is based on maxCredit 32)
381 	 * 4352 - 4864	Reply_free pool (512 byte is reserved
382 	 *		considering maxCredit 32. Reply need extra
383 	 *		room, for mCPU case kept four times of
384 	 *		maxCredit).
385 	 * 4864 - 17152	SGE chain element. (32cmd * 3 chain of
386 	 *		128 byte size = 12288)
387 	 * 17152 - x	Host buffer mapped with smid.
388 	 *		(Each smid can have 64K Max IO.)
389 	 * BAR0+Last 1K MSIX Addr and Data
390 	 * Total size in use 2113664 bytes of 4MB BAR0
391 	 */
392 
393 	buffer_iomem = _base_get_buffer_bar0(ioc, smid);
394 	buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
395 
396 	buff_ptr = buffer_iomem;
397 	buff_ptr_phys = buffer_iomem_phys;
398 	WARN_ON(buff_ptr_phys > U32_MAX);
399 
400 	if (le32_to_cpu(sgel->FlagsLength) &
401 			(MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
402 		is_write = 1;
403 
404 	for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
405 
406 		sgl_flags =
407 		    (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
408 
409 		switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
410 		case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
411 			/*
412 			 * Helper function which on passing
413 			 * chain_buffer_dma returns chain_buffer. Get
414 			 * the virtual address for sgel->Address
415 			 */
416 			sgel_next =
417 				_base_get_chain_buffer_dma_to_chain_buffer(ioc,
418 						le32_to_cpu(sgel->Address));
419 			if (sgel_next == NULL)
420 				return;
421 			/*
422 			 * This is coping 128 byte chain
423 			 * frame (not a host buffer)
424 			 */
425 			dst_chain_addr[sge_chain_count] =
426 				_base_get_chain(ioc,
427 					smid, sge_chain_count);
428 			src_chain_addr[sge_chain_count] =
429 						(void *) sgel_next;
430 			dst_addr_phys = _base_get_chain_phys(ioc,
431 						smid, sge_chain_count);
432 			WARN_ON(dst_addr_phys > U32_MAX);
433 			sgel->Address =
434 				cpu_to_le32(lower_32_bits(dst_addr_phys));
435 			sgel = sgel_next;
436 			sge_chain_count++;
437 			break;
438 		case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
439 			if (is_write) {
440 				if (is_scsiio_req) {
441 					_base_clone_to_sys_mem(buff_ptr,
442 					    sg_virt(sg_scmd),
443 					    (le32_to_cpu(sgel->FlagsLength) &
444 					    0x00ffffff));
445 					/*
446 					 * FIXME: this relies on a a zero
447 					 * PCI mem_offset.
448 					 */
449 					sgel->Address =
450 					    cpu_to_le32((u32)buff_ptr_phys);
451 				} else {
452 					_base_clone_to_sys_mem(buff_ptr,
453 					    ioc->config_vaddr,
454 					    (le32_to_cpu(sgel->FlagsLength) &
455 					    0x00ffffff));
456 					sgel->Address =
457 					    cpu_to_le32((u32)buff_ptr_phys);
458 				}
459 			}
460 			buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
461 			    0x00ffffff);
462 			buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
463 			    0x00ffffff);
464 			if ((le32_to_cpu(sgel->FlagsLength) &
465 			    (MPI2_SGE_FLAGS_END_OF_BUFFER
466 					<< MPI2_SGE_FLAGS_SHIFT)))
467 				goto eob_clone_chain;
468 			else {
469 				/*
470 				 * Every single element in MPT will have
471 				 * associated sg_next. Better to sanity that
472 				 * sg_next is not NULL, but it will be a bug
473 				 * if it is null.
474 				 */
475 				if (is_scsiio_req) {
476 					sg_scmd = sg_next(sg_scmd);
477 					if (sg_scmd)
478 						sgel++;
479 					else
480 						goto eob_clone_chain;
481 				}
482 			}
483 			break;
484 		}
485 	}
486 
487 eob_clone_chain:
488 	for (i = 0; i < sge_chain_count; i++) {
489 		if (is_scsiio_req)
490 			_base_clone_to_sys_mem(dst_chain_addr[i],
491 				src_chain_addr[i], ioc->request_sz);
492 	}
493 }
494 
495 /**
496  *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
497  * @arg: input argument, used to derive ioc
498  *
499  * Return 0 if controller is removed from pci subsystem.
500  * Return -1 for other case.
501  */
502 static int mpt3sas_remove_dead_ioc_func(void *arg)
503 {
504 	struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
505 	struct pci_dev *pdev;
506 
507 	if ((ioc == NULL))
508 		return -1;
509 
510 	pdev = ioc->pdev;
511 	if ((pdev == NULL))
512 		return -1;
513 	pci_stop_and_remove_bus_device_locked(pdev);
514 	return 0;
515 }
516 
517 /**
518  * _base_fault_reset_work - workq handling ioc fault conditions
519  * @work: input argument, used to derive ioc
520  * Context: sleep.
521  *
522  * Return nothing.
523  */
524 static void
525 _base_fault_reset_work(struct work_struct *work)
526 {
527 	struct MPT3SAS_ADAPTER *ioc =
528 	    container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
529 	unsigned long	 flags;
530 	u32 doorbell;
531 	int rc;
532 	struct task_struct *p;
533 
534 
535 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
536 	if (ioc->shost_recovery || ioc->pci_error_recovery)
537 		goto rearm_timer;
538 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
539 
540 	doorbell = mpt3sas_base_get_iocstate(ioc, 0);
541 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
542 		pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
543 		    ioc->name);
544 
545 		/* It may be possible that EEH recovery can resolve some of
546 		 * pci bus failure issues rather removing the dead ioc function
547 		 * by considering controller is in a non-operational state. So
548 		 * here priority is given to the EEH recovery. If it doesn't
549 		 * not resolve this issue, mpt3sas driver will consider this
550 		 * controller to non-operational state and remove the dead ioc
551 		 * function.
552 		 */
553 		if (ioc->non_operational_loop++ < 5) {
554 			spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
555 							 flags);
556 			goto rearm_timer;
557 		}
558 
559 		/*
560 		 * Call _scsih_flush_pending_cmds callback so that we flush all
561 		 * pending commands back to OS. This call is required to aovid
562 		 * deadlock at block layer. Dead IOC will fail to do diag reset,
563 		 * and this call is safe since dead ioc will never return any
564 		 * command back from HW.
565 		 */
566 		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
567 		/*
568 		 * Set remove_host flag early since kernel thread will
569 		 * take some time to execute.
570 		 */
571 		ioc->remove_host = 1;
572 		/*Remove the Dead Host */
573 		p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
574 		    "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
575 		if (IS_ERR(p))
576 			pr_err(MPT3SAS_FMT
577 			"%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
578 			ioc->name, __func__);
579 		else
580 			pr_err(MPT3SAS_FMT
581 			"%s: Running mpt3sas_dead_ioc thread success !!!!\n",
582 			ioc->name, __func__);
583 		return; /* don't rearm timer */
584 	}
585 
586 	ioc->non_operational_loop = 0;
587 
588 	if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
589 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
590 		pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
591 		    __func__, (rc == 0) ? "success" : "failed");
592 		doorbell = mpt3sas_base_get_iocstate(ioc, 0);
593 		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
594 			mpt3sas_base_fault_info(ioc, doorbell &
595 			    MPI2_DOORBELL_DATA_MASK);
596 		if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
597 		    MPI2_IOC_STATE_OPERATIONAL)
598 			return; /* don't rearm timer */
599 	}
600 
601 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
602  rearm_timer:
603 	if (ioc->fault_reset_work_q)
604 		queue_delayed_work(ioc->fault_reset_work_q,
605 		    &ioc->fault_reset_work,
606 		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
607 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
608 }
609 
610 /**
611  * mpt3sas_base_start_watchdog - start the fault_reset_work_q
612  * @ioc: per adapter object
613  * Context: sleep.
614  *
615  * Return nothing.
616  */
617 void
618 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
619 {
620 	unsigned long	 flags;
621 
622 	if (ioc->fault_reset_work_q)
623 		return;
624 
625 	/* initialize fault polling */
626 
627 	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
628 	snprintf(ioc->fault_reset_work_q_name,
629 	    sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
630 	    ioc->driver_name, ioc->id);
631 	ioc->fault_reset_work_q =
632 		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
633 	if (!ioc->fault_reset_work_q) {
634 		pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
635 		    ioc->name, __func__, __LINE__);
636 			return;
637 	}
638 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
639 	if (ioc->fault_reset_work_q)
640 		queue_delayed_work(ioc->fault_reset_work_q,
641 		    &ioc->fault_reset_work,
642 		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
643 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
644 }
645 
646 /**
647  * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
648  * @ioc: per adapter object
649  * Context: sleep.
650  *
651  * Return nothing.
652  */
653 void
654 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
655 {
656 	unsigned long flags;
657 	struct workqueue_struct *wq;
658 
659 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
660 	wq = ioc->fault_reset_work_q;
661 	ioc->fault_reset_work_q = NULL;
662 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
663 	if (wq) {
664 		if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
665 			flush_workqueue(wq);
666 		destroy_workqueue(wq);
667 	}
668 }
669 
670 /**
671  * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
672  * @ioc: per adapter object
673  * @fault_code: fault code
674  *
675  * Return nothing.
676  */
677 void
678 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
679 {
680 	pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
681 	    ioc->name, fault_code);
682 }
683 
684 /**
685  * mpt3sas_halt_firmware - halt's mpt controller firmware
686  * @ioc: per adapter object
687  *
688  * For debugging timeout related issues.  Writing 0xCOFFEE00
689  * to the doorbell register will halt controller firmware. With
690  * the purpose to stop both driver and firmware, the enduser can
691  * obtain a ring buffer from controller UART.
692  */
693 void
694 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
695 {
696 	u32 doorbell;
697 
698 	if (!ioc->fwfault_debug)
699 		return;
700 
701 	dump_stack();
702 
703 	doorbell = readl(&ioc->chip->Doorbell);
704 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
705 		mpt3sas_base_fault_info(ioc , doorbell);
706 	else {
707 		writel(0xC0FFEE00, &ioc->chip->Doorbell);
708 		pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
709 			ioc->name);
710 	}
711 
712 	if (ioc->fwfault_debug == 2)
713 		for (;;)
714 			;
715 	else
716 		panic("panic in %s\n", __func__);
717 }
718 
719 /**
720  * _base_sas_ioc_info - verbose translation of the ioc status
721  * @ioc: per adapter object
722  * @mpi_reply: reply mf payload returned from firmware
723  * @request_hdr: request mf
724  *
725  * Return nothing.
726  */
727 static void
728 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
729 	MPI2RequestHeader_t *request_hdr)
730 {
731 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
732 	    MPI2_IOCSTATUS_MASK;
733 	char *desc = NULL;
734 	u16 frame_sz;
735 	char *func_str = NULL;
736 
737 	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
738 	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
739 	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
740 	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
741 		return;
742 
743 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
744 		return;
745 
746 	switch (ioc_status) {
747 
748 /****************************************************************************
749 *  Common IOCStatus values for all replies
750 ****************************************************************************/
751 
752 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
753 		desc = "invalid function";
754 		break;
755 	case MPI2_IOCSTATUS_BUSY:
756 		desc = "busy";
757 		break;
758 	case MPI2_IOCSTATUS_INVALID_SGL:
759 		desc = "invalid sgl";
760 		break;
761 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
762 		desc = "internal error";
763 		break;
764 	case MPI2_IOCSTATUS_INVALID_VPID:
765 		desc = "invalid vpid";
766 		break;
767 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
768 		desc = "insufficient resources";
769 		break;
770 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
771 		desc = "insufficient power";
772 		break;
773 	case MPI2_IOCSTATUS_INVALID_FIELD:
774 		desc = "invalid field";
775 		break;
776 	case MPI2_IOCSTATUS_INVALID_STATE:
777 		desc = "invalid state";
778 		break;
779 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
780 		desc = "op state not supported";
781 		break;
782 
783 /****************************************************************************
784 *  Config IOCStatus values
785 ****************************************************************************/
786 
787 	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
788 		desc = "config invalid action";
789 		break;
790 	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
791 		desc = "config invalid type";
792 		break;
793 	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
794 		desc = "config invalid page";
795 		break;
796 	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
797 		desc = "config invalid data";
798 		break;
799 	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
800 		desc = "config no defaults";
801 		break;
802 	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
803 		desc = "config cant commit";
804 		break;
805 
806 /****************************************************************************
807 *  SCSI IO Reply
808 ****************************************************************************/
809 
810 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
811 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
812 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
813 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
814 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
815 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
816 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
817 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
818 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
819 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
820 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
821 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
822 		break;
823 
824 /****************************************************************************
825 *  For use by SCSI Initiator and SCSI Target end-to-end data protection
826 ****************************************************************************/
827 
828 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
829 		desc = "eedp guard error";
830 		break;
831 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
832 		desc = "eedp ref tag error";
833 		break;
834 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
835 		desc = "eedp app tag error";
836 		break;
837 
838 /****************************************************************************
839 *  SCSI Target values
840 ****************************************************************************/
841 
842 	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
843 		desc = "target invalid io index";
844 		break;
845 	case MPI2_IOCSTATUS_TARGET_ABORTED:
846 		desc = "target aborted";
847 		break;
848 	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
849 		desc = "target no conn retryable";
850 		break;
851 	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
852 		desc = "target no connection";
853 		break;
854 	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
855 		desc = "target xfer count mismatch";
856 		break;
857 	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
858 		desc = "target data offset error";
859 		break;
860 	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
861 		desc = "target too much write data";
862 		break;
863 	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
864 		desc = "target iu too short";
865 		break;
866 	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
867 		desc = "target ack nak timeout";
868 		break;
869 	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
870 		desc = "target nak received";
871 		break;
872 
873 /****************************************************************************
874 *  Serial Attached SCSI values
875 ****************************************************************************/
876 
877 	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
878 		desc = "smp request failed";
879 		break;
880 	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
881 		desc = "smp data overrun";
882 		break;
883 
884 /****************************************************************************
885 *  Diagnostic Buffer Post / Diagnostic Release values
886 ****************************************************************************/
887 
888 	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
889 		desc = "diagnostic released";
890 		break;
891 	default:
892 		break;
893 	}
894 
895 	if (!desc)
896 		return;
897 
898 	switch (request_hdr->Function) {
899 	case MPI2_FUNCTION_CONFIG:
900 		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
901 		func_str = "config_page";
902 		break;
903 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
904 		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
905 		func_str = "task_mgmt";
906 		break;
907 	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
908 		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
909 		func_str = "sas_iounit_ctl";
910 		break;
911 	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
912 		frame_sz = sizeof(Mpi2SepRequest_t);
913 		func_str = "enclosure";
914 		break;
915 	case MPI2_FUNCTION_IOC_INIT:
916 		frame_sz = sizeof(Mpi2IOCInitRequest_t);
917 		func_str = "ioc_init";
918 		break;
919 	case MPI2_FUNCTION_PORT_ENABLE:
920 		frame_sz = sizeof(Mpi2PortEnableRequest_t);
921 		func_str = "port_enable";
922 		break;
923 	case MPI2_FUNCTION_SMP_PASSTHROUGH:
924 		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
925 		func_str = "smp_passthru";
926 		break;
927 	case MPI2_FUNCTION_NVME_ENCAPSULATED:
928 		frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
929 		    ioc->sge_size;
930 		func_str = "nvme_encapsulated";
931 		break;
932 	default:
933 		frame_sz = 32;
934 		func_str = "unknown";
935 		break;
936 	}
937 
938 	pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
939 		ioc->name, desc, ioc_status, request_hdr, func_str);
940 
941 	_debug_dump_mf(request_hdr, frame_sz/4);
942 }
943 
944 /**
945  * _base_display_event_data - verbose translation of firmware asyn events
946  * @ioc: per adapter object
947  * @mpi_reply: reply mf payload returned from firmware
948  *
949  * Return nothing.
950  */
951 static void
952 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
953 	Mpi2EventNotificationReply_t *mpi_reply)
954 {
955 	char *desc = NULL;
956 	u16 event;
957 
958 	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
959 		return;
960 
961 	event = le16_to_cpu(mpi_reply->Event);
962 
963 	switch (event) {
964 	case MPI2_EVENT_LOG_DATA:
965 		desc = "Log Data";
966 		break;
967 	case MPI2_EVENT_STATE_CHANGE:
968 		desc = "Status Change";
969 		break;
970 	case MPI2_EVENT_HARD_RESET_RECEIVED:
971 		desc = "Hard Reset Received";
972 		break;
973 	case MPI2_EVENT_EVENT_CHANGE:
974 		desc = "Event Change";
975 		break;
976 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
977 		desc = "Device Status Change";
978 		break;
979 	case MPI2_EVENT_IR_OPERATION_STATUS:
980 		if (!ioc->hide_ir_msg)
981 			desc = "IR Operation Status";
982 		break;
983 	case MPI2_EVENT_SAS_DISCOVERY:
984 	{
985 		Mpi2EventDataSasDiscovery_t *event_data =
986 		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
987 		pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
988 		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
989 		    "start" : "stop");
990 		if (event_data->DiscoveryStatus)
991 			pr_cont(" discovery_status(0x%08x)",
992 			    le32_to_cpu(event_data->DiscoveryStatus));
993 		pr_cont("\n");
994 		return;
995 	}
996 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
997 		desc = "SAS Broadcast Primitive";
998 		break;
999 	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1000 		desc = "SAS Init Device Status Change";
1001 		break;
1002 	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1003 		desc = "SAS Init Table Overflow";
1004 		break;
1005 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1006 		desc = "SAS Topology Change List";
1007 		break;
1008 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1009 		desc = "SAS Enclosure Device Status Change";
1010 		break;
1011 	case MPI2_EVENT_IR_VOLUME:
1012 		if (!ioc->hide_ir_msg)
1013 			desc = "IR Volume";
1014 		break;
1015 	case MPI2_EVENT_IR_PHYSICAL_DISK:
1016 		if (!ioc->hide_ir_msg)
1017 			desc = "IR Physical Disk";
1018 		break;
1019 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1020 		if (!ioc->hide_ir_msg)
1021 			desc = "IR Configuration Change List";
1022 		break;
1023 	case MPI2_EVENT_LOG_ENTRY_ADDED:
1024 		if (!ioc->hide_ir_msg)
1025 			desc = "Log Entry Added";
1026 		break;
1027 	case MPI2_EVENT_TEMP_THRESHOLD:
1028 		desc = "Temperature Threshold";
1029 		break;
1030 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1031 		desc = "Cable Event";
1032 		break;
1033 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1034 		desc = "SAS Device Discovery Error";
1035 		break;
1036 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1037 		desc = "PCIE Device Status Change";
1038 		break;
1039 	case MPI2_EVENT_PCIE_ENUMERATION:
1040 	{
1041 		Mpi26EventDataPCIeEnumeration_t *event_data =
1042 			(Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1043 		pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
1044 			   (event_data->ReasonCode ==
1045 				MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
1046 				"start" : "stop");
1047 		if (event_data->EnumerationStatus)
1048 			pr_info("enumeration_status(0x%08x)",
1049 				   le32_to_cpu(event_data->EnumerationStatus));
1050 		pr_info("\n");
1051 		return;
1052 	}
1053 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1054 		desc = "PCIE Topology Change List";
1055 		break;
1056 	}
1057 
1058 	if (!desc)
1059 		return;
1060 
1061 	pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
1062 }
1063 
1064 /**
1065  * _base_sas_log_info - verbose translation of firmware log info
1066  * @ioc: per adapter object
1067  * @log_info: log info
1068  *
1069  * Return nothing.
1070  */
1071 static void
1072 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1073 {
1074 	union loginfo_type {
1075 		u32	loginfo;
1076 		struct {
1077 			u32	subcode:16;
1078 			u32	code:8;
1079 			u32	originator:4;
1080 			u32	bus_type:4;
1081 		} dw;
1082 	};
1083 	union loginfo_type sas_loginfo;
1084 	char *originator_str = NULL;
1085 
1086 	sas_loginfo.loginfo = log_info;
1087 	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1088 		return;
1089 
1090 	/* each nexus loss loginfo */
1091 	if (log_info == 0x31170000)
1092 		return;
1093 
1094 	/* eat the loginfos associated with task aborts */
1095 	if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1096 	    0x31140000 || log_info == 0x31130000))
1097 		return;
1098 
1099 	switch (sas_loginfo.dw.originator) {
1100 	case 0:
1101 		originator_str = "IOP";
1102 		break;
1103 	case 1:
1104 		originator_str = "PL";
1105 		break;
1106 	case 2:
1107 		if (!ioc->hide_ir_msg)
1108 			originator_str = "IR";
1109 		else
1110 			originator_str = "WarpDrive";
1111 		break;
1112 	}
1113 
1114 	pr_warn(MPT3SAS_FMT
1115 		"log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1116 		ioc->name, log_info,
1117 	     originator_str, sas_loginfo.dw.code,
1118 	     sas_loginfo.dw.subcode);
1119 }
1120 
1121 /**
1122  * _base_display_reply_info -
1123  * @ioc: per adapter object
1124  * @smid: system request message index
1125  * @msix_index: MSIX table index supplied by the OS
1126  * @reply: reply message frame(lower 32bit addr)
1127  *
1128  * Return nothing.
1129  */
1130 static void
1131 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1132 	u32 reply)
1133 {
1134 	MPI2DefaultReply_t *mpi_reply;
1135 	u16 ioc_status;
1136 	u32 loginfo = 0;
1137 
1138 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1139 	if (unlikely(!mpi_reply)) {
1140 		pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
1141 		    ioc->name, __FILE__, __LINE__, __func__);
1142 		return;
1143 	}
1144 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1145 
1146 	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1147 	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
1148 		_base_sas_ioc_info(ioc , mpi_reply,
1149 		   mpt3sas_base_get_msg_frame(ioc, smid));
1150 	}
1151 
1152 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1153 		loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1154 		_base_sas_log_info(ioc, loginfo);
1155 	}
1156 
1157 	if (ioc_status || loginfo) {
1158 		ioc_status &= MPI2_IOCSTATUS_MASK;
1159 		mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1160 	}
1161 }
1162 
1163 /**
1164  * mpt3sas_base_done - base internal command completion routine
1165  * @ioc: per adapter object
1166  * @smid: system request message index
1167  * @msix_index: MSIX table index supplied by the OS
1168  * @reply: reply message frame(lower 32bit addr)
1169  *
1170  * Return 1 meaning mf should be freed from _base_interrupt
1171  *        0 means the mf is freed from this function.
1172  */
1173 u8
1174 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1175 	u32 reply)
1176 {
1177 	MPI2DefaultReply_t *mpi_reply;
1178 
1179 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1180 	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1181 		return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1182 
1183 	if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1184 		return 1;
1185 
1186 	ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1187 	if (mpi_reply) {
1188 		ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1189 		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1190 	}
1191 	ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1192 
1193 	complete(&ioc->base_cmds.done);
1194 	return 1;
1195 }
1196 
1197 /**
1198  * _base_async_event - main callback handler for firmware asyn events
1199  * @ioc: per adapter object
1200  * @msix_index: MSIX table index supplied by the OS
1201  * @reply: reply message frame(lower 32bit addr)
1202  *
1203  * Return 1 meaning mf should be freed from _base_interrupt
1204  *        0 means the mf is freed from this function.
1205  */
1206 static u8
1207 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1208 {
1209 	Mpi2EventNotificationReply_t *mpi_reply;
1210 	Mpi2EventAckRequest_t *ack_request;
1211 	u16 smid;
1212 	struct _event_ack_list *delayed_event_ack;
1213 
1214 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1215 	if (!mpi_reply)
1216 		return 1;
1217 	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1218 		return 1;
1219 
1220 	_base_display_event_data(ioc, mpi_reply);
1221 
1222 	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1223 		goto out;
1224 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1225 	if (!smid) {
1226 		delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1227 					GFP_ATOMIC);
1228 		if (!delayed_event_ack)
1229 			goto out;
1230 		INIT_LIST_HEAD(&delayed_event_ack->list);
1231 		delayed_event_ack->Event = mpi_reply->Event;
1232 		delayed_event_ack->EventContext = mpi_reply->EventContext;
1233 		list_add_tail(&delayed_event_ack->list,
1234 				&ioc->delayed_event_ack_list);
1235 		dewtprintk(ioc, pr_info(MPT3SAS_FMT
1236 				"DELAYED: EVENT ACK: event (0x%04x)\n",
1237 				ioc->name, le16_to_cpu(mpi_reply->Event)));
1238 		goto out;
1239 	}
1240 
1241 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1242 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1243 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1244 	ack_request->Event = mpi_reply->Event;
1245 	ack_request->EventContext = mpi_reply->EventContext;
1246 	ack_request->VF_ID = 0;  /* TODO */
1247 	ack_request->VP_ID = 0;
1248 	mpt3sas_base_put_smid_default(ioc, smid);
1249 
1250  out:
1251 
1252 	/* scsih callback handler */
1253 	mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1254 
1255 	/* ctl callback handler */
1256 	mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1257 
1258 	return 1;
1259 }
1260 
1261 static struct scsiio_tracker *
1262 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1263 {
1264 	struct scsi_cmnd *cmd;
1265 
1266 	if (WARN_ON(!smid) ||
1267 	    WARN_ON(smid >= ioc->hi_priority_smid))
1268 		return NULL;
1269 
1270 	cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1271 	if (cmd)
1272 		return scsi_cmd_priv(cmd);
1273 
1274 	return NULL;
1275 }
1276 
1277 /**
1278  * _base_get_cb_idx - obtain the callback index
1279  * @ioc: per adapter object
1280  * @smid: system request message index
1281  *
1282  * Return callback index.
1283  */
1284 static u8
1285 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1286 {
1287 	int i;
1288 	u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1289 	u8 cb_idx = 0xFF;
1290 
1291 	if (smid < ioc->hi_priority_smid) {
1292 		struct scsiio_tracker *st;
1293 
1294 		if (smid < ctl_smid) {
1295 			st = _get_st_from_smid(ioc, smid);
1296 			if (st)
1297 				cb_idx = st->cb_idx;
1298 		} else if (smid == ctl_smid)
1299 			cb_idx = ioc->ctl_cb_idx;
1300 	} else if (smid < ioc->internal_smid) {
1301 		i = smid - ioc->hi_priority_smid;
1302 		cb_idx = ioc->hpr_lookup[i].cb_idx;
1303 	} else if (smid <= ioc->hba_queue_depth) {
1304 		i = smid - ioc->internal_smid;
1305 		cb_idx = ioc->internal_lookup[i].cb_idx;
1306 	}
1307 	return cb_idx;
1308 }
1309 
1310 /**
1311  * _base_mask_interrupts - disable interrupts
1312  * @ioc: per adapter object
1313  *
1314  * Disabling ResetIRQ, Reply and Doorbell Interrupts
1315  *
1316  * Return nothing.
1317  */
1318 static void
1319 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1320 {
1321 	u32 him_register;
1322 
1323 	ioc->mask_interrupts = 1;
1324 	him_register = readl(&ioc->chip->HostInterruptMask);
1325 	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1326 	writel(him_register, &ioc->chip->HostInterruptMask);
1327 	readl(&ioc->chip->HostInterruptMask);
1328 }
1329 
1330 /**
1331  * _base_unmask_interrupts - enable interrupts
1332  * @ioc: per adapter object
1333  *
1334  * Enabling only Reply Interrupts
1335  *
1336  * Return nothing.
1337  */
1338 static void
1339 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1340 {
1341 	u32 him_register;
1342 
1343 	him_register = readl(&ioc->chip->HostInterruptMask);
1344 	him_register &= ~MPI2_HIM_RIM;
1345 	writel(him_register, &ioc->chip->HostInterruptMask);
1346 	ioc->mask_interrupts = 0;
1347 }
1348 
1349 union reply_descriptor {
1350 	u64 word;
1351 	struct {
1352 		u32 low;
1353 		u32 high;
1354 	} u;
1355 };
1356 
1357 /**
1358  * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1359  * @irq: irq number (not used)
1360  * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
1361  * @r: pt_regs pointer (not used)
1362  *
1363  * Return IRQ_HANDLE if processed, else IRQ_NONE.
1364  */
1365 static irqreturn_t
1366 _base_interrupt(int irq, void *bus_id)
1367 {
1368 	struct adapter_reply_queue *reply_q = bus_id;
1369 	union reply_descriptor rd;
1370 	u32 completed_cmds;
1371 	u8 request_desript_type;
1372 	u16 smid;
1373 	u8 cb_idx;
1374 	u32 reply;
1375 	u8 msix_index = reply_q->msix_index;
1376 	struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1377 	Mpi2ReplyDescriptorsUnion_t *rpf;
1378 	u8 rc;
1379 
1380 	if (ioc->mask_interrupts)
1381 		return IRQ_NONE;
1382 
1383 	if (!atomic_add_unless(&reply_q->busy, 1, 1))
1384 		return IRQ_NONE;
1385 
1386 	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1387 	request_desript_type = rpf->Default.ReplyFlags
1388 	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1389 	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1390 		atomic_dec(&reply_q->busy);
1391 		return IRQ_NONE;
1392 	}
1393 
1394 	completed_cmds = 0;
1395 	cb_idx = 0xFF;
1396 	do {
1397 		rd.word = le64_to_cpu(rpf->Words);
1398 		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1399 			goto out;
1400 		reply = 0;
1401 		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1402 		if (request_desript_type ==
1403 		    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1404 		    request_desript_type ==
1405 		    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1406 		    request_desript_type ==
1407 		    MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1408 			cb_idx = _base_get_cb_idx(ioc, smid);
1409 			if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1410 			    (likely(mpt_callbacks[cb_idx] != NULL))) {
1411 				rc = mpt_callbacks[cb_idx](ioc, smid,
1412 				    msix_index, 0);
1413 				if (rc)
1414 					mpt3sas_base_free_smid(ioc, smid);
1415 			}
1416 		} else if (request_desript_type ==
1417 		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1418 			reply = le32_to_cpu(
1419 			    rpf->AddressReply.ReplyFrameAddress);
1420 			if (reply > ioc->reply_dma_max_address ||
1421 			    reply < ioc->reply_dma_min_address)
1422 				reply = 0;
1423 			if (smid) {
1424 				cb_idx = _base_get_cb_idx(ioc, smid);
1425 				if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1426 				    (likely(mpt_callbacks[cb_idx] != NULL))) {
1427 					rc = mpt_callbacks[cb_idx](ioc, smid,
1428 					    msix_index, reply);
1429 					if (reply)
1430 						_base_display_reply_info(ioc,
1431 						    smid, msix_index, reply);
1432 					if (rc)
1433 						mpt3sas_base_free_smid(ioc,
1434 						    smid);
1435 				}
1436 			} else {
1437 				_base_async_event(ioc, msix_index, reply);
1438 			}
1439 
1440 			/* reply free queue handling */
1441 			if (reply) {
1442 				ioc->reply_free_host_index =
1443 				    (ioc->reply_free_host_index ==
1444 				    (ioc->reply_free_queue_depth - 1)) ?
1445 				    0 : ioc->reply_free_host_index + 1;
1446 				ioc->reply_free[ioc->reply_free_host_index] =
1447 				    cpu_to_le32(reply);
1448 				if (ioc->is_mcpu_endpoint)
1449 					_base_clone_reply_to_sys_mem(ioc,
1450 						reply,
1451 						ioc->reply_free_host_index);
1452 				writel(ioc->reply_free_host_index,
1453 				    &ioc->chip->ReplyFreeHostIndex);
1454 			}
1455 		}
1456 
1457 		rpf->Words = cpu_to_le64(ULLONG_MAX);
1458 		reply_q->reply_post_host_index =
1459 		    (reply_q->reply_post_host_index ==
1460 		    (ioc->reply_post_queue_depth - 1)) ? 0 :
1461 		    reply_q->reply_post_host_index + 1;
1462 		request_desript_type =
1463 		    reply_q->reply_post_free[reply_q->reply_post_host_index].
1464 		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1465 		completed_cmds++;
1466 		/* Update the reply post host index after continuously
1467 		 * processing the threshold number of Reply Descriptors.
1468 		 * So that FW can find enough entries to post the Reply
1469 		 * Descriptors in the reply descriptor post queue.
1470 		 */
1471 		if (completed_cmds > ioc->hba_queue_depth/3) {
1472 			if (ioc->combined_reply_queue) {
1473 				writel(reply_q->reply_post_host_index |
1474 						((msix_index  & 7) <<
1475 						 MPI2_RPHI_MSIX_INDEX_SHIFT),
1476 				    ioc->replyPostRegisterIndex[msix_index/8]);
1477 			} else {
1478 				writel(reply_q->reply_post_host_index |
1479 						(msix_index <<
1480 						 MPI2_RPHI_MSIX_INDEX_SHIFT),
1481 						&ioc->chip->ReplyPostHostIndex);
1482 			}
1483 			completed_cmds = 1;
1484 		}
1485 		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1486 			goto out;
1487 		if (!reply_q->reply_post_host_index)
1488 			rpf = reply_q->reply_post_free;
1489 		else
1490 			rpf++;
1491 	} while (1);
1492 
1493  out:
1494 
1495 	if (!completed_cmds) {
1496 		atomic_dec(&reply_q->busy);
1497 		return IRQ_NONE;
1498 	}
1499 
1500 	if (ioc->is_warpdrive) {
1501 		writel(reply_q->reply_post_host_index,
1502 		ioc->reply_post_host_index[msix_index]);
1503 		atomic_dec(&reply_q->busy);
1504 		return IRQ_HANDLED;
1505 	}
1506 
1507 	/* Update Reply Post Host Index.
1508 	 * For those HBA's which support combined reply queue feature
1509 	 * 1. Get the correct Supplemental Reply Post Host Index Register.
1510 	 *    i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1511 	 *    Index Register address bank i.e replyPostRegisterIndex[],
1512 	 * 2. Then update this register with new reply host index value
1513 	 *    in ReplyPostIndex field and the MSIxIndex field with
1514 	 *    msix_index value reduced to a value between 0 and 7,
1515 	 *    using a modulo 8 operation. Since each Supplemental Reply Post
1516 	 *    Host Index Register supports 8 MSI-X vectors.
1517 	 *
1518 	 * For other HBA's just update the Reply Post Host Index register with
1519 	 * new reply host index value in ReplyPostIndex Field and msix_index
1520 	 * value in MSIxIndex field.
1521 	 */
1522 	if (ioc->combined_reply_queue)
1523 		writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
1524 			MPI2_RPHI_MSIX_INDEX_SHIFT),
1525 			ioc->replyPostRegisterIndex[msix_index/8]);
1526 	else
1527 		writel(reply_q->reply_post_host_index | (msix_index <<
1528 			MPI2_RPHI_MSIX_INDEX_SHIFT),
1529 			&ioc->chip->ReplyPostHostIndex);
1530 	atomic_dec(&reply_q->busy);
1531 	return IRQ_HANDLED;
1532 }
1533 
1534 /**
1535  * _base_is_controller_msix_enabled - is controller support muli-reply queues
1536  * @ioc: per adapter object
1537  *
1538  */
1539 static inline int
1540 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1541 {
1542 	return (ioc->facts.IOCCapabilities &
1543 	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1544 }
1545 
1546 /**
1547  * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1548  * @ioc: per adapter object
1549  * Context: non ISR conext
1550  *
1551  * Called when a Task Management request has completed.
1552  *
1553  * Return nothing.
1554  */
1555 void
1556 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
1557 {
1558 	struct adapter_reply_queue *reply_q;
1559 
1560 	/* If MSIX capability is turned off
1561 	 * then multi-queues are not enabled
1562 	 */
1563 	if (!_base_is_controller_msix_enabled(ioc))
1564 		return;
1565 
1566 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1567 		if (ioc->shost_recovery || ioc->remove_host ||
1568 				ioc->pci_error_recovery)
1569 			return;
1570 		/* TMs are on msix_index == 0 */
1571 		if (reply_q->msix_index == 0)
1572 			continue;
1573 		synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1574 	}
1575 }
1576 
1577 /**
1578  * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1579  * @cb_idx: callback index
1580  *
1581  * Return nothing.
1582  */
1583 void
1584 mpt3sas_base_release_callback_handler(u8 cb_idx)
1585 {
1586 	mpt_callbacks[cb_idx] = NULL;
1587 }
1588 
1589 /**
1590  * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1591  * @cb_func: callback function
1592  *
1593  * Returns cb_func.
1594  */
1595 u8
1596 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1597 {
1598 	u8 cb_idx;
1599 
1600 	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1601 		if (mpt_callbacks[cb_idx] == NULL)
1602 			break;
1603 
1604 	mpt_callbacks[cb_idx] = cb_func;
1605 	return cb_idx;
1606 }
1607 
1608 /**
1609  * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1610  *
1611  * Return nothing.
1612  */
1613 void
1614 mpt3sas_base_initialize_callback_handler(void)
1615 {
1616 	u8 cb_idx;
1617 
1618 	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1619 		mpt3sas_base_release_callback_handler(cb_idx);
1620 }
1621 
1622 
1623 /**
1624  * _base_build_zero_len_sge - build zero length sg entry
1625  * @ioc: per adapter object
1626  * @paddr: virtual address for SGE
1627  *
1628  * Create a zero length scatter gather entry to insure the IOCs hardware has
1629  * something to use if the target device goes brain dead and tries
1630  * to send data even when none is asked for.
1631  *
1632  * Return nothing.
1633  */
1634 static void
1635 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1636 {
1637 	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1638 	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1639 	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1640 	    MPI2_SGE_FLAGS_SHIFT);
1641 	ioc->base_add_sg_single(paddr, flags_length, -1);
1642 }
1643 
1644 /**
1645  * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1646  * @paddr: virtual address for SGE
1647  * @flags_length: SGE flags and data transfer length
1648  * @dma_addr: Physical address
1649  *
1650  * Return nothing.
1651  */
1652 static void
1653 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1654 {
1655 	Mpi2SGESimple32_t *sgel = paddr;
1656 
1657 	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1658 	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1659 	sgel->FlagsLength = cpu_to_le32(flags_length);
1660 	sgel->Address = cpu_to_le32(dma_addr);
1661 }
1662 
1663 
1664 /**
1665  * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1666  * @paddr: virtual address for SGE
1667  * @flags_length: SGE flags and data transfer length
1668  * @dma_addr: Physical address
1669  *
1670  * Return nothing.
1671  */
1672 static void
1673 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1674 {
1675 	Mpi2SGESimple64_t *sgel = paddr;
1676 
1677 	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1678 	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1679 	sgel->FlagsLength = cpu_to_le32(flags_length);
1680 	sgel->Address = cpu_to_le64(dma_addr);
1681 }
1682 
1683 /**
1684  * _base_get_chain_buffer_tracker - obtain chain tracker
1685  * @ioc: per adapter object
1686  * @scmd: SCSI commands of the IO request
1687  *
1688  * Returns chain tracker from chain_lookup table using key as
1689  * smid and smid's chain_offset.
1690  */
1691 static struct chain_tracker *
1692 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1693 			       struct scsi_cmnd *scmd)
1694 {
1695 	struct chain_tracker *chain_req;
1696 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
1697 	u16 smid = st->smid;
1698 	u8 chain_offset =
1699 	   atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
1700 
1701 	if (chain_offset == ioc->chains_needed_per_io)
1702 		return NULL;
1703 
1704 	chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
1705 	atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
1706 	return chain_req;
1707 }
1708 
1709 
1710 /**
1711  * _base_build_sg - build generic sg
1712  * @ioc: per adapter object
1713  * @psge: virtual address for SGE
1714  * @data_out_dma: physical address for WRITES
1715  * @data_out_sz: data xfer size for WRITES
1716  * @data_in_dma: physical address for READS
1717  * @data_in_sz: data xfer size for READS
1718  *
1719  * Return nothing.
1720  */
1721 static void
1722 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1723 	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1724 	size_t data_in_sz)
1725 {
1726 	u32 sgl_flags;
1727 
1728 	if (!data_out_sz && !data_in_sz) {
1729 		_base_build_zero_len_sge(ioc, psge);
1730 		return;
1731 	}
1732 
1733 	if (data_out_sz && data_in_sz) {
1734 		/* WRITE sgel first */
1735 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1736 		    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1737 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1738 		ioc->base_add_sg_single(psge, sgl_flags |
1739 		    data_out_sz, data_out_dma);
1740 
1741 		/* incr sgel */
1742 		psge += ioc->sge_size;
1743 
1744 		/* READ sgel last */
1745 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1746 		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1747 		    MPI2_SGE_FLAGS_END_OF_LIST);
1748 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1749 		ioc->base_add_sg_single(psge, sgl_flags |
1750 		    data_in_sz, data_in_dma);
1751 	} else if (data_out_sz) /* WRITE */ {
1752 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1753 		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1754 		    MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1755 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1756 		ioc->base_add_sg_single(psge, sgl_flags |
1757 		    data_out_sz, data_out_dma);
1758 	} else if (data_in_sz) /* READ */ {
1759 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1760 		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1761 		    MPI2_SGE_FLAGS_END_OF_LIST);
1762 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1763 		ioc->base_add_sg_single(psge, sgl_flags |
1764 		    data_in_sz, data_in_dma);
1765 	}
1766 }
1767 
1768 /* IEEE format sgls */
1769 
1770 /**
1771  * _base_build_nvme_prp - This function is called for NVMe end devices to build
1772  * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
1773  * entry of the NVMe message (PRP1).  If the data buffer is small enough to be
1774  * described entirely using PRP1, then PRP2 is not used.  If needed, PRP2 is
1775  * used to describe a larger data buffer.  If the data buffer is too large to
1776  * describe using the two PRP entriess inside the NVMe message, then PRP1
1777  * describes the first data memory segment, and PRP2 contains a pointer to a PRP
1778  * list located elsewhere in memory to describe the remaining data memory
1779  * segments.  The PRP list will be contiguous.
1780 
1781  * The native SGL for NVMe devices is a Physical Region Page (PRP).  A PRP
1782  * consists of a list of PRP entries to describe a number of noncontigous
1783  * physical memory segments as a single memory buffer, just as a SGL does.  Note
1784  * however, that this function is only used by the IOCTL call, so the memory
1785  * given will be guaranteed to be contiguous.  There is no need to translate
1786  * non-contiguous SGL into a PRP in this case.  All PRPs will describe
1787  * contiguous space that is one page size each.
1788  *
1789  * Each NVMe message contains two PRP entries.  The first (PRP1) either contains
1790  * a PRP list pointer or a PRP element, depending upon the command.  PRP2
1791  * contains the second PRP element if the memory being described fits within 2
1792  * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
1793  *
1794  * A PRP list pointer contains the address of a PRP list, structured as a linear
1795  * array of PRP entries.  Each PRP entry in this list describes a segment of
1796  * physical memory.
1797  *
1798  * Each 64-bit PRP entry comprises an address and an offset field.  The address
1799  * always points at the beginning of a 4KB physical memory page, and the offset
1800  * describes where within that 4KB page the memory segment begins.  Only the
1801  * first element in a PRP list may contain a non-zero offest, implying that all
1802  * memory segments following the first begin at the start of a 4KB page.
1803  *
1804  * Each PRP element normally describes 4KB of physical memory, with exceptions
1805  * for the first and last elements in the list.  If the memory being described
1806  * by the list begins at a non-zero offset within the first 4KB page, then the
1807  * first PRP element will contain a non-zero offset indicating where the region
1808  * begins within the 4KB page.  The last memory segment may end before the end
1809  * of the 4KB segment, depending upon the overall size of the memory being
1810  * described by the PRP list.
1811  *
1812  * Since PRP entries lack any indication of size, the overall data buffer length
1813  * is used to determine where the end of the data memory buffer is located, and
1814  * how many PRP entries are required to describe it.
1815  *
1816  * @ioc: per adapter object
1817  * @smid: system request message index for getting asscociated SGL
1818  * @nvme_encap_request: the NVMe request msg frame pointer
1819  * @data_out_dma: physical address for WRITES
1820  * @data_out_sz: data xfer size for WRITES
1821  * @data_in_dma: physical address for READS
1822  * @data_in_sz: data xfer size for READS
1823  *
1824  * Returns nothing.
1825  */
1826 static void
1827 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
1828 	Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
1829 	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1830 	size_t data_in_sz)
1831 {
1832 	int		prp_size = NVME_PRP_SIZE;
1833 	__le64		*prp_entry, *prp1_entry, *prp2_entry;
1834 	__le64		*prp_page;
1835 	dma_addr_t	prp_entry_dma, prp_page_dma, dma_addr;
1836 	u32		offset, entry_len;
1837 	u32		page_mask_result, page_mask;
1838 	size_t		length;
1839 
1840 	/*
1841 	 * Not all commands require a data transfer. If no data, just return
1842 	 * without constructing any PRP.
1843 	 */
1844 	if (!data_in_sz && !data_out_sz)
1845 		return;
1846 	/*
1847 	 * Set pointers to PRP1 and PRP2, which are in the NVMe command.
1848 	 * PRP1 is located at a 24 byte offset from the start of the NVMe
1849 	 * command.  Then set the current PRP entry pointer to PRP1.
1850 	 */
1851 	prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
1852 	    NVME_CMD_PRP1_OFFSET);
1853 	prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
1854 	    NVME_CMD_PRP2_OFFSET);
1855 	prp_entry = prp1_entry;
1856 	/*
1857 	 * For the PRP entries, use the specially allocated buffer of
1858 	 * contiguous memory.
1859 	 */
1860 	prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
1861 	prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
1862 
1863 	/*
1864 	 * Check if we are within 1 entry of a page boundary we don't
1865 	 * want our first entry to be a PRP List entry.
1866 	 */
1867 	page_mask = ioc->page_size - 1;
1868 	page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
1869 	if (!page_mask_result) {
1870 		/* Bump up to next page boundary. */
1871 		prp_page = (__le64 *)((u8 *)prp_page + prp_size);
1872 		prp_page_dma = prp_page_dma + prp_size;
1873 	}
1874 
1875 	/*
1876 	 * Set PRP physical pointer, which initially points to the current PRP
1877 	 * DMA memory page.
1878 	 */
1879 	prp_entry_dma = prp_page_dma;
1880 
1881 	/* Get physical address and length of the data buffer. */
1882 	if (data_in_sz) {
1883 		dma_addr = data_in_dma;
1884 		length = data_in_sz;
1885 	} else {
1886 		dma_addr = data_out_dma;
1887 		length = data_out_sz;
1888 	}
1889 
1890 	/* Loop while the length is not zero. */
1891 	while (length) {
1892 		/*
1893 		 * Check if we need to put a list pointer here if we are at
1894 		 * page boundary - prp_size (8 bytes).
1895 		 */
1896 		page_mask_result = (prp_entry_dma + prp_size) & page_mask;
1897 		if (!page_mask_result) {
1898 			/*
1899 			 * This is the last entry in a PRP List, so we need to
1900 			 * put a PRP list pointer here.  What this does is:
1901 			 *   - bump the current memory pointer to the next
1902 			 *     address, which will be the next full page.
1903 			 *   - set the PRP Entry to point to that page.  This
1904 			 *     is now the PRP List pointer.
1905 			 *   - bump the PRP Entry pointer the start of the
1906 			 *     next page.  Since all of this PRP memory is
1907 			 *     contiguous, no need to get a new page - it's
1908 			 *     just the next address.
1909 			 */
1910 			prp_entry_dma++;
1911 			*prp_entry = cpu_to_le64(prp_entry_dma);
1912 			prp_entry++;
1913 		}
1914 
1915 		/* Need to handle if entry will be part of a page. */
1916 		offset = dma_addr & page_mask;
1917 		entry_len = ioc->page_size - offset;
1918 
1919 		if (prp_entry == prp1_entry) {
1920 			/*
1921 			 * Must fill in the first PRP pointer (PRP1) before
1922 			 * moving on.
1923 			 */
1924 			*prp1_entry = cpu_to_le64(dma_addr);
1925 
1926 			/*
1927 			 * Now point to the second PRP entry within the
1928 			 * command (PRP2).
1929 			 */
1930 			prp_entry = prp2_entry;
1931 		} else if (prp_entry == prp2_entry) {
1932 			/*
1933 			 * Should the PRP2 entry be a PRP List pointer or just
1934 			 * a regular PRP pointer?  If there is more than one
1935 			 * more page of data, must use a PRP List pointer.
1936 			 */
1937 			if (length > ioc->page_size) {
1938 				/*
1939 				 * PRP2 will contain a PRP List pointer because
1940 				 * more PRP's are needed with this command. The
1941 				 * list will start at the beginning of the
1942 				 * contiguous buffer.
1943 				 */
1944 				*prp2_entry = cpu_to_le64(prp_entry_dma);
1945 
1946 				/*
1947 				 * The next PRP Entry will be the start of the
1948 				 * first PRP List.
1949 				 */
1950 				prp_entry = prp_page;
1951 			} else {
1952 				/*
1953 				 * After this, the PRP Entries are complete.
1954 				 * This command uses 2 PRP's and no PRP list.
1955 				 */
1956 				*prp2_entry = cpu_to_le64(dma_addr);
1957 			}
1958 		} else {
1959 			/*
1960 			 * Put entry in list and bump the addresses.
1961 			 *
1962 			 * After PRP1 and PRP2 are filled in, this will fill in
1963 			 * all remaining PRP entries in a PRP List, one per
1964 			 * each time through the loop.
1965 			 */
1966 			*prp_entry = cpu_to_le64(dma_addr);
1967 			prp_entry++;
1968 			prp_entry_dma++;
1969 		}
1970 
1971 		/*
1972 		 * Bump the phys address of the command's data buffer by the
1973 		 * entry_len.
1974 		 */
1975 		dma_addr += entry_len;
1976 
1977 		/* Decrement length accounting for last partial page. */
1978 		if (entry_len > length)
1979 			length = 0;
1980 		else
1981 			length -= entry_len;
1982 	}
1983 }
1984 
1985 /**
1986  * base_make_prp_nvme -
1987  * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
1988  *
1989  * @ioc:		per adapter object
1990  * @scmd:		SCSI command from the mid-layer
1991  * @mpi_request:	mpi request
1992  * @smid:		msg Index
1993  * @sge_count:		scatter gather element count.
1994  *
1995  * Returns:		true: PRPs are built
1996  *			false: IEEE SGLs needs to be built
1997  */
1998 static void
1999 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2000 		struct scsi_cmnd *scmd,
2001 		Mpi25SCSIIORequest_t *mpi_request,
2002 		u16 smid, int sge_count)
2003 {
2004 	int sge_len, num_prp_in_chain = 0;
2005 	Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2006 	__le64 *curr_buff;
2007 	dma_addr_t msg_dma, sge_addr, offset;
2008 	u32 page_mask, page_mask_result;
2009 	struct scatterlist *sg_scmd;
2010 	u32 first_prp_len;
2011 	int data_len = scsi_bufflen(scmd);
2012 	u32 nvme_pg_size;
2013 
2014 	nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2015 	/*
2016 	 * Nvme has a very convoluted prp format.  One prp is required
2017 	 * for each page or partial page. Driver need to split up OS sg_list
2018 	 * entries if it is longer than one page or cross a page
2019 	 * boundary.  Driver also have to insert a PRP list pointer entry as
2020 	 * the last entry in each physical page of the PRP list.
2021 	 *
2022 	 * NOTE: The first PRP "entry" is actually placed in the first
2023 	 * SGL entry in the main message as IEEE 64 format.  The 2nd
2024 	 * entry in the main message is the chain element, and the rest
2025 	 * of the PRP entries are built in the contiguous pcie buffer.
2026 	 */
2027 	page_mask = nvme_pg_size - 1;
2028 
2029 	/*
2030 	 * Native SGL is needed.
2031 	 * Put a chain element in main message frame that points to the first
2032 	 * chain buffer.
2033 	 *
2034 	 * NOTE:  The ChainOffset field must be 0 when using a chain pointer to
2035 	 *        a native SGL.
2036 	 */
2037 
2038 	/* Set main message chain element pointer */
2039 	main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2040 	/*
2041 	 * For NVMe the chain element needs to be the 2nd SG entry in the main
2042 	 * message.
2043 	 */
2044 	main_chain_element = (Mpi25IeeeSgeChain64_t *)
2045 		((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2046 
2047 	/*
2048 	 * For the PRP entries, use the specially allocated buffer of
2049 	 * contiguous memory.  Normal chain buffers can't be used
2050 	 * because each chain buffer would need to be the size of an OS
2051 	 * page (4k).
2052 	 */
2053 	curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2054 	msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2055 
2056 	main_chain_element->Address = cpu_to_le64(msg_dma);
2057 	main_chain_element->NextChainOffset = 0;
2058 	main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2059 			MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2060 			MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2061 
2062 	/* Build first prp, sge need not to be page aligned*/
2063 	ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2064 	sg_scmd = scsi_sglist(scmd);
2065 	sge_addr = sg_dma_address(sg_scmd);
2066 	sge_len = sg_dma_len(sg_scmd);
2067 
2068 	offset = sge_addr & page_mask;
2069 	first_prp_len = nvme_pg_size - offset;
2070 
2071 	ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2072 	ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2073 
2074 	data_len -= first_prp_len;
2075 
2076 	if (sge_len > first_prp_len) {
2077 		sge_addr += first_prp_len;
2078 		sge_len -= first_prp_len;
2079 	} else if (data_len && (sge_len == first_prp_len)) {
2080 		sg_scmd = sg_next(sg_scmd);
2081 		sge_addr = sg_dma_address(sg_scmd);
2082 		sge_len = sg_dma_len(sg_scmd);
2083 	}
2084 
2085 	for (;;) {
2086 		offset = sge_addr & page_mask;
2087 
2088 		/* Put PRP pointer due to page boundary*/
2089 		page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2090 		if (unlikely(!page_mask_result)) {
2091 			scmd_printk(KERN_NOTICE,
2092 				scmd, "page boundary curr_buff: 0x%p\n",
2093 				curr_buff);
2094 			msg_dma += 8;
2095 			*curr_buff = cpu_to_le64(msg_dma);
2096 			curr_buff++;
2097 			num_prp_in_chain++;
2098 		}
2099 
2100 		*curr_buff = cpu_to_le64(sge_addr);
2101 		curr_buff++;
2102 		msg_dma += 8;
2103 		num_prp_in_chain++;
2104 
2105 		sge_addr += nvme_pg_size;
2106 		sge_len -= nvme_pg_size;
2107 		data_len -= nvme_pg_size;
2108 
2109 		if (data_len <= 0)
2110 			break;
2111 
2112 		if (sge_len > 0)
2113 			continue;
2114 
2115 		sg_scmd = sg_next(sg_scmd);
2116 		sge_addr = sg_dma_address(sg_scmd);
2117 		sge_len = sg_dma_len(sg_scmd);
2118 	}
2119 
2120 	main_chain_element->Length =
2121 		cpu_to_le32(num_prp_in_chain * sizeof(u64));
2122 	return;
2123 }
2124 
2125 static bool
2126 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2127 	struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2128 {
2129 	u32 data_length = 0;
2130 	struct scatterlist *sg_scmd;
2131 	bool build_prp = true;
2132 
2133 	data_length = scsi_bufflen(scmd);
2134 	sg_scmd = scsi_sglist(scmd);
2135 
2136 	/* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2137 	 * we built IEEE SGL
2138 	 */
2139 	if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2140 		build_prp = false;
2141 
2142 	return build_prp;
2143 }
2144 
2145 /**
2146  * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2147  * determine if the driver needs to build a native SGL.  If so, that native
2148  * SGL is built in the special contiguous buffers allocated especially for
2149  * PCIe SGL creation.  If the driver will not build a native SGL, return
2150  * TRUE and a normal IEEE SGL will be built.  Currently this routine
2151  * supports NVMe.
2152  * @ioc: per adapter object
2153  * @mpi_request: mf request pointer
2154  * @smid: system request message index
2155  * @scmd: scsi command
2156  * @pcie_device: points to the PCIe device's info
2157  *
2158  * Returns 0 if native SGL was built, 1 if no SGL was built
2159  */
2160 static int
2161 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2162 	Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2163 	struct _pcie_device *pcie_device)
2164 {
2165 	struct scatterlist *sg_scmd;
2166 	int sges_left;
2167 
2168 	/* Get the SG list pointer and info. */
2169 	sg_scmd = scsi_sglist(scmd);
2170 	sges_left = scsi_dma_map(scmd);
2171 	if (sges_left < 0) {
2172 		sdev_printk(KERN_ERR, scmd->device,
2173 			"scsi_dma_map failed: request for %d bytes!\n",
2174 			scsi_bufflen(scmd));
2175 		return 1;
2176 	}
2177 
2178 	/* Check if we need to build a native SG list. */
2179 	if (base_is_prp_possible(ioc, pcie_device,
2180 				scmd, sges_left) == 0) {
2181 		/* We built a native SG list, just return. */
2182 		goto out;
2183 	}
2184 
2185 	/*
2186 	 * Build native NVMe PRP.
2187 	 */
2188 	base_make_prp_nvme(ioc, scmd, mpi_request,
2189 			smid, sges_left);
2190 
2191 	return 0;
2192 out:
2193 	scsi_dma_unmap(scmd);
2194 	return 1;
2195 }
2196 
2197 /**
2198  * _base_add_sg_single_ieee - add sg element for IEEE format
2199  * @paddr: virtual address for SGE
2200  * @flags: SGE flags
2201  * @chain_offset: number of 128 byte elements from start of segment
2202  * @length: data transfer length
2203  * @dma_addr: Physical address
2204  *
2205  * Return nothing.
2206  */
2207 static void
2208 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2209 	dma_addr_t dma_addr)
2210 {
2211 	Mpi25IeeeSgeChain64_t *sgel = paddr;
2212 
2213 	sgel->Flags = flags;
2214 	sgel->NextChainOffset = chain_offset;
2215 	sgel->Length = cpu_to_le32(length);
2216 	sgel->Address = cpu_to_le64(dma_addr);
2217 }
2218 
2219 /**
2220  * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2221  * @ioc: per adapter object
2222  * @paddr: virtual address for SGE
2223  *
2224  * Create a zero length scatter gather entry to insure the IOCs hardware has
2225  * something to use if the target device goes brain dead and tries
2226  * to send data even when none is asked for.
2227  *
2228  * Return nothing.
2229  */
2230 static void
2231 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2232 {
2233 	u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2234 		MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2235 		MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2236 
2237 	_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2238 }
2239 
2240 /**
2241  * _base_build_sg_scmd - main sg creation routine
2242  *		pcie_device is unused here!
2243  * @ioc: per adapter object
2244  * @scmd: scsi command
2245  * @smid: system request message index
2246  * @unused: unused pcie_device pointer
2247  * Context: none.
2248  *
2249  * The main routine that builds scatter gather table from a given
2250  * scsi request sent via the .queuecommand main handler.
2251  *
2252  * Returns 0 success, anything else error
2253  */
2254 static int
2255 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2256 	struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2257 {
2258 	Mpi2SCSIIORequest_t *mpi_request;
2259 	dma_addr_t chain_dma;
2260 	struct scatterlist *sg_scmd;
2261 	void *sg_local, *chain;
2262 	u32 chain_offset;
2263 	u32 chain_length;
2264 	u32 chain_flags;
2265 	int sges_left;
2266 	u32 sges_in_segment;
2267 	u32 sgl_flags;
2268 	u32 sgl_flags_last_element;
2269 	u32 sgl_flags_end_buffer;
2270 	struct chain_tracker *chain_req;
2271 
2272 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2273 
2274 	/* init scatter gather flags */
2275 	sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2276 	if (scmd->sc_data_direction == DMA_TO_DEVICE)
2277 		sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2278 	sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2279 	    << MPI2_SGE_FLAGS_SHIFT;
2280 	sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2281 	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2282 	    << MPI2_SGE_FLAGS_SHIFT;
2283 	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2284 
2285 	sg_scmd = scsi_sglist(scmd);
2286 	sges_left = scsi_dma_map(scmd);
2287 	if (sges_left < 0) {
2288 		sdev_printk(KERN_ERR, scmd->device,
2289 		 "pci_map_sg failed: request for %d bytes!\n",
2290 		 scsi_bufflen(scmd));
2291 		return -ENOMEM;
2292 	}
2293 
2294 	sg_local = &mpi_request->SGL;
2295 	sges_in_segment = ioc->max_sges_in_main_message;
2296 	if (sges_left <= sges_in_segment)
2297 		goto fill_in_last_segment;
2298 
2299 	mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2300 	    (sges_in_segment * ioc->sge_size))/4;
2301 
2302 	/* fill in main message segment when there is a chain following */
2303 	while (sges_in_segment) {
2304 		if (sges_in_segment == 1)
2305 			ioc->base_add_sg_single(sg_local,
2306 			    sgl_flags_last_element | sg_dma_len(sg_scmd),
2307 			    sg_dma_address(sg_scmd));
2308 		else
2309 			ioc->base_add_sg_single(sg_local, sgl_flags |
2310 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2311 		sg_scmd = sg_next(sg_scmd);
2312 		sg_local += ioc->sge_size;
2313 		sges_left--;
2314 		sges_in_segment--;
2315 	}
2316 
2317 	/* initializing the chain flags and pointers */
2318 	chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2319 	chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2320 	if (!chain_req)
2321 		return -1;
2322 	chain = chain_req->chain_buffer;
2323 	chain_dma = chain_req->chain_buffer_dma;
2324 	do {
2325 		sges_in_segment = (sges_left <=
2326 		    ioc->max_sges_in_chain_message) ? sges_left :
2327 		    ioc->max_sges_in_chain_message;
2328 		chain_offset = (sges_left == sges_in_segment) ?
2329 		    0 : (sges_in_segment * ioc->sge_size)/4;
2330 		chain_length = sges_in_segment * ioc->sge_size;
2331 		if (chain_offset) {
2332 			chain_offset = chain_offset <<
2333 			    MPI2_SGE_CHAIN_OFFSET_SHIFT;
2334 			chain_length += ioc->sge_size;
2335 		}
2336 		ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2337 		    chain_length, chain_dma);
2338 		sg_local = chain;
2339 		if (!chain_offset)
2340 			goto fill_in_last_segment;
2341 
2342 		/* fill in chain segments */
2343 		while (sges_in_segment) {
2344 			if (sges_in_segment == 1)
2345 				ioc->base_add_sg_single(sg_local,
2346 				    sgl_flags_last_element |
2347 				    sg_dma_len(sg_scmd),
2348 				    sg_dma_address(sg_scmd));
2349 			else
2350 				ioc->base_add_sg_single(sg_local, sgl_flags |
2351 				    sg_dma_len(sg_scmd),
2352 				    sg_dma_address(sg_scmd));
2353 			sg_scmd = sg_next(sg_scmd);
2354 			sg_local += ioc->sge_size;
2355 			sges_left--;
2356 			sges_in_segment--;
2357 		}
2358 
2359 		chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2360 		if (!chain_req)
2361 			return -1;
2362 		chain = chain_req->chain_buffer;
2363 		chain_dma = chain_req->chain_buffer_dma;
2364 	} while (1);
2365 
2366 
2367  fill_in_last_segment:
2368 
2369 	/* fill the last segment */
2370 	while (sges_left) {
2371 		if (sges_left == 1)
2372 			ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2373 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2374 		else
2375 			ioc->base_add_sg_single(sg_local, sgl_flags |
2376 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2377 		sg_scmd = sg_next(sg_scmd);
2378 		sg_local += ioc->sge_size;
2379 		sges_left--;
2380 	}
2381 
2382 	return 0;
2383 }
2384 
2385 /**
2386  * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2387  * @ioc: per adapter object
2388  * @scmd: scsi command
2389  * @smid: system request message index
2390  * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2391  * constructed on need.
2392  * Context: none.
2393  *
2394  * The main routine that builds scatter gather table from a given
2395  * scsi request sent via the .queuecommand main handler.
2396  *
2397  * Returns 0 success, anything else error
2398  */
2399 static int
2400 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2401 	struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2402 {
2403 	Mpi25SCSIIORequest_t *mpi_request;
2404 	dma_addr_t chain_dma;
2405 	struct scatterlist *sg_scmd;
2406 	void *sg_local, *chain;
2407 	u32 chain_offset;
2408 	u32 chain_length;
2409 	int sges_left;
2410 	u32 sges_in_segment;
2411 	u8 simple_sgl_flags;
2412 	u8 simple_sgl_flags_last;
2413 	u8 chain_sgl_flags;
2414 	struct chain_tracker *chain_req;
2415 
2416 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2417 
2418 	/* init scatter gather flags */
2419 	simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2420 	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2421 	simple_sgl_flags_last = simple_sgl_flags |
2422 	    MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2423 	chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2424 	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2425 
2426 	/* Check if we need to build a native SG list. */
2427 	if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2428 			smid, scmd, pcie_device) == 0)) {
2429 		/* We built a native SG list, just return. */
2430 		return 0;
2431 	}
2432 
2433 	sg_scmd = scsi_sglist(scmd);
2434 	sges_left = scsi_dma_map(scmd);
2435 	if (sges_left < 0) {
2436 		sdev_printk(KERN_ERR, scmd->device,
2437 			"pci_map_sg failed: request for %d bytes!\n",
2438 			scsi_bufflen(scmd));
2439 		return -ENOMEM;
2440 	}
2441 
2442 	sg_local = &mpi_request->SGL;
2443 	sges_in_segment = (ioc->request_sz -
2444 		   offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2445 	if (sges_left <= sges_in_segment)
2446 		goto fill_in_last_segment;
2447 
2448 	mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
2449 	    (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2450 
2451 	/* fill in main message segment when there is a chain following */
2452 	while (sges_in_segment > 1) {
2453 		_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2454 		    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2455 		sg_scmd = sg_next(sg_scmd);
2456 		sg_local += ioc->sge_size_ieee;
2457 		sges_left--;
2458 		sges_in_segment--;
2459 	}
2460 
2461 	/* initializing the pointers */
2462 	chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2463 	if (!chain_req)
2464 		return -1;
2465 	chain = chain_req->chain_buffer;
2466 	chain_dma = chain_req->chain_buffer_dma;
2467 	do {
2468 		sges_in_segment = (sges_left <=
2469 		    ioc->max_sges_in_chain_message) ? sges_left :
2470 		    ioc->max_sges_in_chain_message;
2471 		chain_offset = (sges_left == sges_in_segment) ?
2472 		    0 : sges_in_segment;
2473 		chain_length = sges_in_segment * ioc->sge_size_ieee;
2474 		if (chain_offset)
2475 			chain_length += ioc->sge_size_ieee;
2476 		_base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2477 		    chain_offset, chain_length, chain_dma);
2478 
2479 		sg_local = chain;
2480 		if (!chain_offset)
2481 			goto fill_in_last_segment;
2482 
2483 		/* fill in chain segments */
2484 		while (sges_in_segment) {
2485 			_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2486 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2487 			sg_scmd = sg_next(sg_scmd);
2488 			sg_local += ioc->sge_size_ieee;
2489 			sges_left--;
2490 			sges_in_segment--;
2491 		}
2492 
2493 		chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2494 		if (!chain_req)
2495 			return -1;
2496 		chain = chain_req->chain_buffer;
2497 		chain_dma = chain_req->chain_buffer_dma;
2498 	} while (1);
2499 
2500 
2501  fill_in_last_segment:
2502 
2503 	/* fill the last segment */
2504 	while (sges_left > 0) {
2505 		if (sges_left == 1)
2506 			_base_add_sg_single_ieee(sg_local,
2507 			    simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2508 			    sg_dma_address(sg_scmd));
2509 		else
2510 			_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2511 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2512 		sg_scmd = sg_next(sg_scmd);
2513 		sg_local += ioc->sge_size_ieee;
2514 		sges_left--;
2515 	}
2516 
2517 	return 0;
2518 }
2519 
2520 /**
2521  * _base_build_sg_ieee - build generic sg for IEEE format
2522  * @ioc: per adapter object
2523  * @psge: virtual address for SGE
2524  * @data_out_dma: physical address for WRITES
2525  * @data_out_sz: data xfer size for WRITES
2526  * @data_in_dma: physical address for READS
2527  * @data_in_sz: data xfer size for READS
2528  *
2529  * Return nothing.
2530  */
2531 static void
2532 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2533 	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2534 	size_t data_in_sz)
2535 {
2536 	u8 sgl_flags;
2537 
2538 	if (!data_out_sz && !data_in_sz) {
2539 		_base_build_zero_len_sge_ieee(ioc, psge);
2540 		return;
2541 	}
2542 
2543 	if (data_out_sz && data_in_sz) {
2544 		/* WRITE sgel first */
2545 		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2546 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2547 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2548 		    data_out_dma);
2549 
2550 		/* incr sgel */
2551 		psge += ioc->sge_size_ieee;
2552 
2553 		/* READ sgel last */
2554 		sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2555 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2556 		    data_in_dma);
2557 	} else if (data_out_sz) /* WRITE */ {
2558 		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2559 		    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2560 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2561 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2562 		    data_out_dma);
2563 	} else if (data_in_sz) /* READ */ {
2564 		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2565 		    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2566 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2567 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2568 		    data_in_dma);
2569 	}
2570 }
2571 
2572 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2573 
2574 /**
2575  * _base_config_dma_addressing - set dma addressing
2576  * @ioc: per adapter object
2577  * @pdev: PCI device struct
2578  *
2579  * Returns 0 for success, non-zero for failure.
2580  */
2581 static int
2582 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2583 {
2584 	struct sysinfo s;
2585 	u64 consistent_dma_mask;
2586 
2587 	if (ioc->is_mcpu_endpoint)
2588 		goto try_32bit;
2589 
2590 	if (ioc->dma_mask)
2591 		consistent_dma_mask = DMA_BIT_MASK(64);
2592 	else
2593 		consistent_dma_mask = DMA_BIT_MASK(32);
2594 
2595 	if (sizeof(dma_addr_t) > 4) {
2596 		const uint64_t required_mask =
2597 		    dma_get_required_mask(&pdev->dev);
2598 		if ((required_mask > DMA_BIT_MASK(32)) &&
2599 		    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
2600 		    !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
2601 			ioc->base_add_sg_single = &_base_add_sg_single_64;
2602 			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2603 			ioc->dma_mask = 64;
2604 			goto out;
2605 		}
2606 	}
2607 
2608  try_32bit:
2609 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2610 	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2611 		ioc->base_add_sg_single = &_base_add_sg_single_32;
2612 		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2613 		ioc->dma_mask = 32;
2614 	} else
2615 		return -ENODEV;
2616 
2617  out:
2618 	si_meminfo(&s);
2619 	pr_info(MPT3SAS_FMT
2620 		"%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2621 		ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
2622 
2623 	return 0;
2624 }
2625 
2626 static int
2627 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
2628 				      struct pci_dev *pdev)
2629 {
2630 	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2631 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2632 			return -ENODEV;
2633 	}
2634 	return 0;
2635 }
2636 
2637 /**
2638  * _base_check_enable_msix - checks MSIX capabable.
2639  * @ioc: per adapter object
2640  *
2641  * Check to see if card is capable of MSIX, and set number
2642  * of available msix vectors
2643  */
2644 static int
2645 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2646 {
2647 	int base;
2648 	u16 message_control;
2649 
2650 	/* Check whether controller SAS2008 B0 controller,
2651 	 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
2652 	 */
2653 	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2654 	    ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2655 		return -EINVAL;
2656 	}
2657 
2658 	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2659 	if (!base) {
2660 		dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
2661 			ioc->name));
2662 		return -EINVAL;
2663 	}
2664 
2665 	/* get msix vector count */
2666 	/* NUMA_IO not supported for older controllers */
2667 	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2668 	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2669 	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2670 	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2671 	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2672 	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2673 	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2674 		ioc->msix_vector_count = 1;
2675 	else {
2676 		pci_read_config_word(ioc->pdev, base + 2, &message_control);
2677 		ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2678 	}
2679 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2680 		"msix is supported, vector_count(%d)\n",
2681 		ioc->name, ioc->msix_vector_count));
2682 	return 0;
2683 }
2684 
2685 /**
2686  * _base_free_irq - free irq
2687  * @ioc: per adapter object
2688  *
2689  * Freeing respective reply_queue from the list.
2690  */
2691 static void
2692 _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2693 {
2694 	struct adapter_reply_queue *reply_q, *next;
2695 
2696 	if (list_empty(&ioc->reply_queue_list))
2697 		return;
2698 
2699 	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
2700 		list_del(&reply_q->list);
2701 		free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
2702 			 reply_q);
2703 		kfree(reply_q);
2704 	}
2705 }
2706 
2707 /**
2708  * _base_request_irq - request irq
2709  * @ioc: per adapter object
2710  * @index: msix index into vector table
2711  *
2712  * Inserting respective reply_queue into the list.
2713  */
2714 static int
2715 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
2716 {
2717 	struct pci_dev *pdev = ioc->pdev;
2718 	struct adapter_reply_queue *reply_q;
2719 	int r;
2720 
2721 	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
2722 	if (!reply_q) {
2723 		pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
2724 		    ioc->name, (int)sizeof(struct adapter_reply_queue));
2725 		return -ENOMEM;
2726 	}
2727 	reply_q->ioc = ioc;
2728 	reply_q->msix_index = index;
2729 
2730 	atomic_set(&reply_q->busy, 0);
2731 	if (ioc->msix_enable)
2732 		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
2733 		    ioc->driver_name, ioc->id, index);
2734 	else
2735 		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
2736 		    ioc->driver_name, ioc->id);
2737 	r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
2738 			IRQF_SHARED, reply_q->name, reply_q);
2739 	if (r) {
2740 		pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
2741 		       reply_q->name, pci_irq_vector(pdev, index));
2742 		kfree(reply_q);
2743 		return -EBUSY;
2744 	}
2745 
2746 	INIT_LIST_HEAD(&reply_q->list);
2747 	list_add_tail(&reply_q->list, &ioc->reply_queue_list);
2748 	return 0;
2749 }
2750 
2751 /**
2752  * _base_assign_reply_queues - assigning msix index for each cpu
2753  * @ioc: per adapter object
2754  *
2755  * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
2756  *
2757  * It would nice if we could call irq_set_affinity, however it is not
2758  * an exported symbol
2759  */
2760 static void
2761 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2762 {
2763 	unsigned int cpu, nr_cpus, nr_msix, index = 0;
2764 	struct adapter_reply_queue *reply_q;
2765 
2766 	if (!_base_is_controller_msix_enabled(ioc))
2767 		return;
2768 
2769 	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
2770 
2771 	nr_cpus = num_online_cpus();
2772 	nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
2773 					       ioc->facts.MaxMSIxVectors);
2774 	if (!nr_msix)
2775 		return;
2776 
2777 	if (smp_affinity_enable) {
2778 		list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2779 			const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
2780 							reply_q->msix_index);
2781 			if (!mask) {
2782 				pr_warn(MPT3SAS_FMT "no affinity for msi %x\n",
2783 					ioc->name, reply_q->msix_index);
2784 				continue;
2785 			}
2786 
2787 			for_each_cpu_and(cpu, mask, cpu_online_mask) {
2788 				if (cpu >= ioc->cpu_msix_table_sz)
2789 					break;
2790 				ioc->cpu_msix_table[cpu] = reply_q->msix_index;
2791 			}
2792 		}
2793 		return;
2794 	}
2795 	cpu = cpumask_first(cpu_online_mask);
2796 
2797 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2798 
2799 		unsigned int i, group = nr_cpus / nr_msix;
2800 
2801 		if (cpu >= nr_cpus)
2802 			break;
2803 
2804 		if (index < nr_cpus % nr_msix)
2805 			group++;
2806 
2807 		for (i = 0 ; i < group ; i++) {
2808 			ioc->cpu_msix_table[cpu] = reply_q->msix_index;
2809 			cpu = cpumask_next(cpu, cpu_online_mask);
2810 		}
2811 		index++;
2812 	}
2813 }
2814 
2815 /**
2816  * _base_disable_msix - disables msix
2817  * @ioc: per adapter object
2818  *
2819  */
2820 static void
2821 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
2822 {
2823 	if (!ioc->msix_enable)
2824 		return;
2825 	pci_disable_msix(ioc->pdev);
2826 	ioc->msix_enable = 0;
2827 }
2828 
2829 /**
2830  * _base_enable_msix - enables msix, failback to io_apic
2831  * @ioc: per adapter object
2832  *
2833  */
2834 static int
2835 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2836 {
2837 	int r;
2838 	int i, local_max_msix_vectors;
2839 	u8 try_msix = 0;
2840 	unsigned int irq_flags = PCI_IRQ_MSIX;
2841 
2842 	if (msix_disable == -1 || msix_disable == 0)
2843 		try_msix = 1;
2844 
2845 	if (!try_msix)
2846 		goto try_ioapic;
2847 
2848 	if (_base_check_enable_msix(ioc) != 0)
2849 		goto try_ioapic;
2850 
2851 	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
2852 		ioc->msix_vector_count);
2853 
2854 	printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
2855 	  ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
2856 	  ioc->cpu_count, max_msix_vectors);
2857 
2858 	if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
2859 		local_max_msix_vectors = (reset_devices) ? 1 : 8;
2860 	else
2861 		local_max_msix_vectors = max_msix_vectors;
2862 
2863 	if (local_max_msix_vectors > 0)
2864 		ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
2865 			ioc->reply_queue_count);
2866 	else if (local_max_msix_vectors == 0)
2867 		goto try_ioapic;
2868 
2869 	if (ioc->msix_vector_count < ioc->cpu_count)
2870 		smp_affinity_enable = 0;
2871 
2872 	if (smp_affinity_enable)
2873 		irq_flags |= PCI_IRQ_AFFINITY;
2874 
2875 	r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
2876 				  irq_flags);
2877 	if (r < 0) {
2878 		dfailprintk(ioc, pr_info(MPT3SAS_FMT
2879 			"pci_alloc_irq_vectors failed (r=%d) !!!\n",
2880 			ioc->name, r));
2881 		goto try_ioapic;
2882 	}
2883 
2884 	ioc->msix_enable = 1;
2885 	ioc->reply_queue_count = r;
2886 	for (i = 0; i < ioc->reply_queue_count; i++) {
2887 		r = _base_request_irq(ioc, i);
2888 		if (r) {
2889 			_base_free_irq(ioc);
2890 			_base_disable_msix(ioc);
2891 			goto try_ioapic;
2892 		}
2893 	}
2894 
2895 	return 0;
2896 
2897 /* failback to io_apic interrupt routing */
2898  try_ioapic:
2899 
2900 	ioc->reply_queue_count = 1;
2901 	r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
2902 	if (r < 0) {
2903 		dfailprintk(ioc, pr_info(MPT3SAS_FMT
2904 			"pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
2905 			ioc->name, r));
2906 	} else
2907 		r = _base_request_irq(ioc, 0);
2908 
2909 	return r;
2910 }
2911 
2912 /**
2913  * mpt3sas_base_unmap_resources - free controller resources
2914  * @ioc: per adapter object
2915  */
2916 static void
2917 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2918 {
2919 	struct pci_dev *pdev = ioc->pdev;
2920 
2921 	dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
2922 		ioc->name, __func__));
2923 
2924 	_base_free_irq(ioc);
2925 	_base_disable_msix(ioc);
2926 
2927 	if (ioc->combined_reply_queue) {
2928 		kfree(ioc->replyPostRegisterIndex);
2929 		ioc->replyPostRegisterIndex = NULL;
2930 	}
2931 
2932 	if (ioc->chip_phys) {
2933 		iounmap(ioc->chip);
2934 		ioc->chip_phys = 0;
2935 	}
2936 
2937 	if (pci_is_enabled(pdev)) {
2938 		pci_release_selected_regions(ioc->pdev, ioc->bars);
2939 		pci_disable_pcie_error_reporting(pdev);
2940 		pci_disable_device(pdev);
2941 	}
2942 }
2943 
2944 /**
2945  * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
2946  * @ioc: per adapter object
2947  *
2948  * Returns 0 for success, non-zero for failure.
2949  */
2950 int
2951 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2952 {
2953 	struct pci_dev *pdev = ioc->pdev;
2954 	u32 memap_sz;
2955 	u32 pio_sz;
2956 	int i, r = 0;
2957 	u64 pio_chip = 0;
2958 	phys_addr_t chip_phys = 0;
2959 	struct adapter_reply_queue *reply_q;
2960 
2961 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
2962 	    ioc->name, __func__));
2963 
2964 	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
2965 	if (pci_enable_device_mem(pdev)) {
2966 		pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
2967 			ioc->name);
2968 		ioc->bars = 0;
2969 		return -ENODEV;
2970 	}
2971 
2972 
2973 	if (pci_request_selected_regions(pdev, ioc->bars,
2974 	    ioc->driver_name)) {
2975 		pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
2976 			ioc->name);
2977 		ioc->bars = 0;
2978 		r = -ENODEV;
2979 		goto out_fail;
2980 	}
2981 
2982 /* AER (Advanced Error Reporting) hooks */
2983 	pci_enable_pcie_error_reporting(pdev);
2984 
2985 	pci_set_master(pdev);
2986 
2987 
2988 	if (_base_config_dma_addressing(ioc, pdev) != 0) {
2989 		pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
2990 		    ioc->name, pci_name(pdev));
2991 		r = -ENODEV;
2992 		goto out_fail;
2993 	}
2994 
2995 	for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
2996 	     (!memap_sz || !pio_sz); i++) {
2997 		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
2998 			if (pio_sz)
2999 				continue;
3000 			pio_chip = (u64)pci_resource_start(pdev, i);
3001 			pio_sz = pci_resource_len(pdev, i);
3002 		} else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3003 			if (memap_sz)
3004 				continue;
3005 			ioc->chip_phys = pci_resource_start(pdev, i);
3006 			chip_phys = ioc->chip_phys;
3007 			memap_sz = pci_resource_len(pdev, i);
3008 			ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3009 		}
3010 	}
3011 
3012 	if (ioc->chip == NULL) {
3013 		pr_err(MPT3SAS_FMT "unable to map adapter memory! "
3014 			" or resource not found\n", ioc->name);
3015 		r = -EINVAL;
3016 		goto out_fail;
3017 	}
3018 
3019 	_base_mask_interrupts(ioc);
3020 
3021 	r = _base_get_ioc_facts(ioc);
3022 	if (r)
3023 		goto out_fail;
3024 
3025 	if (!ioc->rdpq_array_enable_assigned) {
3026 		ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3027 		ioc->rdpq_array_enable_assigned = 1;
3028 	}
3029 
3030 	r = _base_enable_msix(ioc);
3031 	if (r)
3032 		goto out_fail;
3033 
3034 	/* Use the Combined reply queue feature only for SAS3 C0 & higher
3035 	 * revision HBAs and also only when reply queue count is greater than 8
3036 	 */
3037 	if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
3038 		/* Determine the Supplemental Reply Post Host Index Registers
3039 		 * Addresse. Supplemental Reply Post Host Index Registers
3040 		 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3041 		 * each register is at offset bytes of
3042 		 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3043 		 */
3044 		ioc->replyPostRegisterIndex = kcalloc(
3045 		     ioc->combined_reply_index_count,
3046 		     sizeof(resource_size_t *), GFP_KERNEL);
3047 		if (!ioc->replyPostRegisterIndex) {
3048 			dfailprintk(ioc, printk(MPT3SAS_FMT
3049 			"allocation for reply Post Register Index failed!!!\n",
3050 								   ioc->name));
3051 			r = -ENOMEM;
3052 			goto out_fail;
3053 		}
3054 
3055 		for (i = 0; i < ioc->combined_reply_index_count; i++) {
3056 			ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3057 			     ((u8 __force *)&ioc->chip->Doorbell +
3058 			     MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3059 			     (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3060 		}
3061 	} else
3062 		ioc->combined_reply_queue = 0;
3063 
3064 	if (ioc->is_warpdrive) {
3065 		ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3066 		    &ioc->chip->ReplyPostHostIndex;
3067 
3068 		for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3069 			ioc->reply_post_host_index[i] =
3070 			(resource_size_t __iomem *)
3071 			((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3072 			* 4)));
3073 	}
3074 
3075 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3076 		pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
3077 		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
3078 		    "IO-APIC enabled"),
3079 		    pci_irq_vector(ioc->pdev, reply_q->msix_index));
3080 
3081 	pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n",
3082 	    ioc->name, &chip_phys, ioc->chip, memap_sz);
3083 	pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
3084 	    ioc->name, (unsigned long long)pio_chip, pio_sz);
3085 
3086 	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
3087 	pci_save_state(pdev);
3088 	return 0;
3089 
3090  out_fail:
3091 	mpt3sas_base_unmap_resources(ioc);
3092 	return r;
3093 }
3094 
3095 /**
3096  * mpt3sas_base_get_msg_frame - obtain request mf pointer
3097  * @ioc: per adapter object
3098  * @smid: system request message index(smid zero is invalid)
3099  *
3100  * Returns virt pointer to message frame.
3101  */
3102 void *
3103 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3104 {
3105 	return (void *)(ioc->request + (smid * ioc->request_sz));
3106 }
3107 
3108 /**
3109  * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3110  * @ioc: per adapter object
3111  * @smid: system request message index
3112  *
3113  * Returns virt pointer to sense buffer.
3114  */
3115 void *
3116 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3117 {
3118 	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3119 }
3120 
3121 /**
3122  * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3123  * @ioc: per adapter object
3124  * @smid: system request message index
3125  *
3126  * Returns phys pointer to the low 32bit address of the sense buffer.
3127  */
3128 __le32
3129 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3130 {
3131 	return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3132 	    SCSI_SENSE_BUFFERSIZE));
3133 }
3134 
3135 /**
3136  * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3137  * @ioc: per adapter object
3138  * @smid: system request message index
3139  *
3140  * Returns virt pointer to a PCIe SGL.
3141  */
3142 void *
3143 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3144 {
3145 	return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3146 }
3147 
3148 /**
3149  * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3150  * @ioc: per adapter object
3151  * @smid: system request message index
3152  *
3153  * Returns phys pointer to the address of the PCIe buffer.
3154  */
3155 dma_addr_t
3156 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3157 {
3158 	return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3159 }
3160 
3161 /**
3162  * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3163  * @ioc: per adapter object
3164  * @phys_addr: lower 32 physical addr of the reply
3165  *
3166  * Converts 32bit lower physical addr into a virt address.
3167  */
3168 void *
3169 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3170 {
3171 	if (!phys_addr)
3172 		return NULL;
3173 	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3174 }
3175 
3176 static inline u8
3177 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
3178 {
3179 	return ioc->cpu_msix_table[raw_smp_processor_id()];
3180 }
3181 
3182 /**
3183  * mpt3sas_base_get_smid - obtain a free smid from internal queue
3184  * @ioc: per adapter object
3185  * @cb_idx: callback index
3186  *
3187  * Returns smid (zero is invalid)
3188  */
3189 u16
3190 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3191 {
3192 	unsigned long flags;
3193 	struct request_tracker *request;
3194 	u16 smid;
3195 
3196 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3197 	if (list_empty(&ioc->internal_free_list)) {
3198 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3199 		pr_err(MPT3SAS_FMT "%s: smid not available\n",
3200 		    ioc->name, __func__);
3201 		return 0;
3202 	}
3203 
3204 	request = list_entry(ioc->internal_free_list.next,
3205 	    struct request_tracker, tracker_list);
3206 	request->cb_idx = cb_idx;
3207 	smid = request->smid;
3208 	list_del(&request->tracker_list);
3209 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3210 	return smid;
3211 }
3212 
3213 /**
3214  * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3215  * @ioc: per adapter object
3216  * @cb_idx: callback index
3217  * @scmd: pointer to scsi command object
3218  *
3219  * Returns smid (zero is invalid)
3220  */
3221 u16
3222 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3223 	struct scsi_cmnd *scmd)
3224 {
3225 	struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3226 	unsigned int tag = scmd->request->tag;
3227 	u16 smid;
3228 
3229 	smid = tag + 1;
3230 	request->cb_idx = cb_idx;
3231 	request->msix_io = _base_get_msix_index(ioc);
3232 	request->smid = smid;
3233 	INIT_LIST_HEAD(&request->chain_list);
3234 	return smid;
3235 }
3236 
3237 /**
3238  * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3239  * @ioc: per adapter object
3240  * @cb_idx: callback index
3241  *
3242  * Returns smid (zero is invalid)
3243  */
3244 u16
3245 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3246 {
3247 	unsigned long flags;
3248 	struct request_tracker *request;
3249 	u16 smid;
3250 
3251 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3252 	if (list_empty(&ioc->hpr_free_list)) {
3253 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3254 		return 0;
3255 	}
3256 
3257 	request = list_entry(ioc->hpr_free_list.next,
3258 	    struct request_tracker, tracker_list);
3259 	request->cb_idx = cb_idx;
3260 	smid = request->smid;
3261 	list_del(&request->tracker_list);
3262 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3263 	return smid;
3264 }
3265 
3266 static void
3267 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3268 {
3269 	/*
3270 	 * See _wait_for_commands_to_complete() call with regards to this code.
3271 	 */
3272 	if (ioc->shost_recovery && ioc->pending_io_count) {
3273 		ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
3274 		if (ioc->pending_io_count == 0)
3275 			wake_up(&ioc->reset_wq);
3276 	}
3277 }
3278 
3279 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3280 			   struct scsiio_tracker *st)
3281 {
3282 	if (WARN_ON(st->smid == 0))
3283 		return;
3284 	st->cb_idx = 0xFF;
3285 	st->direct_io = 0;
3286 	atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3287 }
3288 
3289 /**
3290  * mpt3sas_base_free_smid - put smid back on free_list
3291  * @ioc: per adapter object
3292  * @smid: system request message index
3293  *
3294  * Return nothing.
3295  */
3296 void
3297 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3298 {
3299 	unsigned long flags;
3300 	int i;
3301 
3302 	if (smid < ioc->hi_priority_smid) {
3303 		struct scsiio_tracker *st;
3304 
3305 		st = _get_st_from_smid(ioc, smid);
3306 		if (!st) {
3307 			_base_recovery_check(ioc);
3308 			return;
3309 		}
3310 		mpt3sas_base_clear_st(ioc, st);
3311 		_base_recovery_check(ioc);
3312 		return;
3313 	}
3314 
3315 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3316 	if (smid < ioc->internal_smid) {
3317 		/* hi-priority */
3318 		i = smid - ioc->hi_priority_smid;
3319 		ioc->hpr_lookup[i].cb_idx = 0xFF;
3320 		list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3321 	} else if (smid <= ioc->hba_queue_depth) {
3322 		/* internal queue */
3323 		i = smid - ioc->internal_smid;
3324 		ioc->internal_lookup[i].cb_idx = 0xFF;
3325 		list_add(&ioc->internal_lookup[i].tracker_list,
3326 		    &ioc->internal_free_list);
3327 	}
3328 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3329 }
3330 
3331 /**
3332  * _base_mpi_ep_writeq - 32 bit write to MMIO
3333  * @b: data payload
3334  * @addr: address in MMIO space
3335  * @writeq_lock: spin lock
3336  *
3337  * This special handling for MPI EP to take care of 32 bit
3338  * environment where its not quarenteed to send the entire word
3339  * in one transfer.
3340  */
3341 static inline void
3342 _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3343 					spinlock_t *writeq_lock)
3344 {
3345 	unsigned long flags;
3346 	__u64 data_out = b;
3347 
3348 	spin_lock_irqsave(writeq_lock, flags);
3349 	writel((u32)(data_out), addr);
3350 	writel((u32)(data_out >> 32), (addr + 4));
3351 	mmiowb();
3352 	spin_unlock_irqrestore(writeq_lock, flags);
3353 }
3354 
3355 /**
3356  * _base_writeq - 64 bit write to MMIO
3357  * @ioc: per adapter object
3358  * @b: data payload
3359  * @addr: address in MMIO space
3360  * @writeq_lock: spin lock
3361  *
3362  * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
3363  * care of 32 bit environment where its not quarenteed to send the entire word
3364  * in one transfer.
3365  */
3366 #if defined(writeq) && defined(CONFIG_64BIT)
3367 static inline void
3368 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3369 {
3370 	writeq(b, addr);
3371 }
3372 #else
3373 static inline void
3374 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3375 {
3376 	_base_mpi_ep_writeq(b, addr, writeq_lock);
3377 }
3378 #endif
3379 
3380 /**
3381  * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
3382  * @ioc: per adapter object
3383  * @smid: system request message index
3384  * @handle: device handle
3385  *
3386  * Return nothing.
3387  */
3388 static void
3389 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3390 {
3391 	Mpi2RequestDescriptorUnion_t descriptor;
3392 	u64 *request = (u64 *)&descriptor;
3393 	void *mpi_req_iomem;
3394 	__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3395 
3396 	_clone_sg_entries(ioc, (void *) mfp, smid);
3397 	mpi_req_iomem = (void __force *)ioc->chip +
3398 			MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3399 	_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3400 					ioc->request_sz);
3401 	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3402 	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
3403 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3404 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3405 	descriptor.SCSIIO.LMID = 0;
3406 	_base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3407 	    &ioc->scsi_lookup_lock);
3408 }
3409 
3410 /**
3411  * _base_put_smid_scsi_io - send SCSI_IO request to firmware
3412  * @ioc: per adapter object
3413  * @smid: system request message index
3414  * @handle: device handle
3415  *
3416  * Return nothing.
3417  */
3418 static void
3419 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3420 {
3421 	Mpi2RequestDescriptorUnion_t descriptor;
3422 	u64 *request = (u64 *)&descriptor;
3423 
3424 
3425 	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3426 	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
3427 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3428 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3429 	descriptor.SCSIIO.LMID = 0;
3430 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3431 	    &ioc->scsi_lookup_lock);
3432 }
3433 
3434 /**
3435  * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
3436  * @ioc: per adapter object
3437  * @smid: system request message index
3438  * @handle: device handle
3439  *
3440  * Return nothing.
3441  */
3442 void
3443 mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3444 	u16 handle)
3445 {
3446 	Mpi2RequestDescriptorUnion_t descriptor;
3447 	u64 *request = (u64 *)&descriptor;
3448 
3449 	descriptor.SCSIIO.RequestFlags =
3450 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3451 	descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
3452 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3453 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3454 	descriptor.SCSIIO.LMID = 0;
3455 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3456 	    &ioc->scsi_lookup_lock);
3457 }
3458 
3459 /**
3460  * mpt3sas_base_put_smid_hi_priority - send Task Management request to firmware
3461  * @ioc: per adapter object
3462  * @smid: system request message index
3463  * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
3464  * Return nothing.
3465  */
3466 void
3467 mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3468 	u16 msix_task)
3469 {
3470 	Mpi2RequestDescriptorUnion_t descriptor;
3471 	void *mpi_req_iomem;
3472 	u64 *request;
3473 
3474 	if (ioc->is_mcpu_endpoint) {
3475 		MPI2RequestHeader_t *request_hdr;
3476 
3477 		__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3478 
3479 		request_hdr = (MPI2RequestHeader_t *)mfp;
3480 		/* TBD 256 is offset within sys register. */
3481 		mpi_req_iomem = (void __force *)ioc->chip
3482 					+ MPI_FRAME_START_OFFSET
3483 					+ (smid * ioc->request_sz);
3484 		_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3485 							ioc->request_sz);
3486 	}
3487 
3488 	request = (u64 *)&descriptor;
3489 
3490 	descriptor.HighPriority.RequestFlags =
3491 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3492 	descriptor.HighPriority.MSIxIndex =  msix_task;
3493 	descriptor.HighPriority.SMID = cpu_to_le16(smid);
3494 	descriptor.HighPriority.LMID = 0;
3495 	descriptor.HighPriority.Reserved1 = 0;
3496 	if (ioc->is_mcpu_endpoint)
3497 		_base_mpi_ep_writeq(*request,
3498 				&ioc->chip->RequestDescriptorPostLow,
3499 				&ioc->scsi_lookup_lock);
3500 	else
3501 		_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3502 		    &ioc->scsi_lookup_lock);
3503 }
3504 
3505 /**
3506  * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
3507  *  firmware
3508  * @ioc: per adapter object
3509  * @smid: system request message index
3510  *
3511  * Return nothing.
3512  */
3513 void
3514 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3515 {
3516 	Mpi2RequestDescriptorUnion_t descriptor;
3517 	u64 *request = (u64 *)&descriptor;
3518 
3519 	descriptor.Default.RequestFlags =
3520 		MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3521 	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
3522 	descriptor.Default.SMID = cpu_to_le16(smid);
3523 	descriptor.Default.LMID = 0;
3524 	descriptor.Default.DescriptorTypeDependent = 0;
3525 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3526 	    &ioc->scsi_lookup_lock);
3527 }
3528 
3529 /**
3530  * mpt3sas_base_put_smid_default - Default, primarily used for config pages
3531  * @ioc: per adapter object
3532  * @smid: system request message index
3533  *
3534  * Return nothing.
3535  */
3536 void
3537 mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3538 {
3539 	Mpi2RequestDescriptorUnion_t descriptor;
3540 	void *mpi_req_iomem;
3541 	u64 *request;
3542 	MPI2RequestHeader_t *request_hdr;
3543 
3544 	if (ioc->is_mcpu_endpoint) {
3545 		__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3546 
3547 		request_hdr = (MPI2RequestHeader_t *)mfp;
3548 
3549 		_clone_sg_entries(ioc, (void *) mfp, smid);
3550 		/* TBD 256 is offset within sys register */
3551 		mpi_req_iomem = (void __force *)ioc->chip +
3552 			MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3553 		_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3554 							ioc->request_sz);
3555 	}
3556 	request = (u64 *)&descriptor;
3557 	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3558 	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
3559 	descriptor.Default.SMID = cpu_to_le16(smid);
3560 	descriptor.Default.LMID = 0;
3561 	descriptor.Default.DescriptorTypeDependent = 0;
3562 	if (ioc->is_mcpu_endpoint)
3563 		_base_mpi_ep_writeq(*request,
3564 				&ioc->chip->RequestDescriptorPostLow,
3565 				&ioc->scsi_lookup_lock);
3566 	else
3567 		_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3568 				&ioc->scsi_lookup_lock);
3569 }
3570 
3571 /**
3572  * _base_display_OEMs_branding - Display branding string
3573  * @ioc: per adapter object
3574  *
3575  * Return nothing.
3576  */
3577 static void
3578 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
3579 {
3580 	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
3581 		return;
3582 
3583 	switch (ioc->pdev->subsystem_vendor) {
3584 	case PCI_VENDOR_ID_INTEL:
3585 		switch (ioc->pdev->device) {
3586 		case MPI2_MFGPAGE_DEVID_SAS2008:
3587 			switch (ioc->pdev->subsystem_device) {
3588 			case MPT2SAS_INTEL_RMS2LL080_SSDID:
3589 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3590 				    MPT2SAS_INTEL_RMS2LL080_BRANDING);
3591 				break;
3592 			case MPT2SAS_INTEL_RMS2LL040_SSDID:
3593 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3594 				    MPT2SAS_INTEL_RMS2LL040_BRANDING);
3595 				break;
3596 			case MPT2SAS_INTEL_SSD910_SSDID:
3597 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3598 				    MPT2SAS_INTEL_SSD910_BRANDING);
3599 				break;
3600 			default:
3601 				pr_info(MPT3SAS_FMT
3602 				 "Intel(R) Controller: Subsystem ID: 0x%X\n",
3603 				 ioc->name, ioc->pdev->subsystem_device);
3604 				break;
3605 			}
3606 		case MPI2_MFGPAGE_DEVID_SAS2308_2:
3607 			switch (ioc->pdev->subsystem_device) {
3608 			case MPT2SAS_INTEL_RS25GB008_SSDID:
3609 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3610 				    MPT2SAS_INTEL_RS25GB008_BRANDING);
3611 				break;
3612 			case MPT2SAS_INTEL_RMS25JB080_SSDID:
3613 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3614 				    MPT2SAS_INTEL_RMS25JB080_BRANDING);
3615 				break;
3616 			case MPT2SAS_INTEL_RMS25JB040_SSDID:
3617 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3618 				    MPT2SAS_INTEL_RMS25JB040_BRANDING);
3619 				break;
3620 			case MPT2SAS_INTEL_RMS25KB080_SSDID:
3621 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3622 				    MPT2SAS_INTEL_RMS25KB080_BRANDING);
3623 				break;
3624 			case MPT2SAS_INTEL_RMS25KB040_SSDID:
3625 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3626 				    MPT2SAS_INTEL_RMS25KB040_BRANDING);
3627 				break;
3628 			case MPT2SAS_INTEL_RMS25LB040_SSDID:
3629 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3630 				    MPT2SAS_INTEL_RMS25LB040_BRANDING);
3631 				break;
3632 			case MPT2SAS_INTEL_RMS25LB080_SSDID:
3633 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3634 				    MPT2SAS_INTEL_RMS25LB080_BRANDING);
3635 				break;
3636 			default:
3637 				pr_info(MPT3SAS_FMT
3638 				 "Intel(R) Controller: Subsystem ID: 0x%X\n",
3639 				 ioc->name, ioc->pdev->subsystem_device);
3640 				break;
3641 			}
3642 		case MPI25_MFGPAGE_DEVID_SAS3008:
3643 			switch (ioc->pdev->subsystem_device) {
3644 			case MPT3SAS_INTEL_RMS3JC080_SSDID:
3645 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3646 					MPT3SAS_INTEL_RMS3JC080_BRANDING);
3647 				break;
3648 
3649 			case MPT3SAS_INTEL_RS3GC008_SSDID:
3650 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3651 					MPT3SAS_INTEL_RS3GC008_BRANDING);
3652 				break;
3653 			case MPT3SAS_INTEL_RS3FC044_SSDID:
3654 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3655 					MPT3SAS_INTEL_RS3FC044_BRANDING);
3656 				break;
3657 			case MPT3SAS_INTEL_RS3UC080_SSDID:
3658 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3659 					MPT3SAS_INTEL_RS3UC080_BRANDING);
3660 				break;
3661 			default:
3662 				pr_info(MPT3SAS_FMT
3663 				 "Intel(R) Controller: Subsystem ID: 0x%X\n",
3664 				 ioc->name, ioc->pdev->subsystem_device);
3665 				break;
3666 			}
3667 			break;
3668 		default:
3669 			pr_info(MPT3SAS_FMT
3670 			 "Intel(R) Controller: Subsystem ID: 0x%X\n",
3671 			 ioc->name, ioc->pdev->subsystem_device);
3672 			break;
3673 		}
3674 		break;
3675 	case PCI_VENDOR_ID_DELL:
3676 		switch (ioc->pdev->device) {
3677 		case MPI2_MFGPAGE_DEVID_SAS2008:
3678 			switch (ioc->pdev->subsystem_device) {
3679 			case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
3680 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3681 				 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
3682 				break;
3683 			case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
3684 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3685 				 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
3686 				break;
3687 			case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
3688 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3689 				 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
3690 				break;
3691 			case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
3692 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3693 				 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
3694 				break;
3695 			case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
3696 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3697 				 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
3698 				break;
3699 			case MPT2SAS_DELL_PERC_H200_SSDID:
3700 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3701 				 MPT2SAS_DELL_PERC_H200_BRANDING);
3702 				break;
3703 			case MPT2SAS_DELL_6GBPS_SAS_SSDID:
3704 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3705 				 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
3706 				break;
3707 			default:
3708 				pr_info(MPT3SAS_FMT
3709 				   "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
3710 				   ioc->name, ioc->pdev->subsystem_device);
3711 				break;
3712 			}
3713 			break;
3714 		case MPI25_MFGPAGE_DEVID_SAS3008:
3715 			switch (ioc->pdev->subsystem_device) {
3716 			case MPT3SAS_DELL_12G_HBA_SSDID:
3717 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3718 					MPT3SAS_DELL_12G_HBA_BRANDING);
3719 				break;
3720 			default:
3721 				pr_info(MPT3SAS_FMT
3722 				   "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
3723 				   ioc->name, ioc->pdev->subsystem_device);
3724 				break;
3725 			}
3726 			break;
3727 		default:
3728 			pr_info(MPT3SAS_FMT
3729 			   "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
3730 			   ioc->pdev->subsystem_device);
3731 			break;
3732 		}
3733 		break;
3734 	case PCI_VENDOR_ID_CISCO:
3735 		switch (ioc->pdev->device) {
3736 		case MPI25_MFGPAGE_DEVID_SAS3008:
3737 			switch (ioc->pdev->subsystem_device) {
3738 			case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
3739 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3740 					MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
3741 				break;
3742 			case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
3743 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3744 					MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
3745 				break;
3746 			case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
3747 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3748 					MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
3749 				break;
3750 			default:
3751 				pr_info(MPT3SAS_FMT
3752 				  "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
3753 				  ioc->name, ioc->pdev->subsystem_device);
3754 				break;
3755 			}
3756 			break;
3757 		case MPI25_MFGPAGE_DEVID_SAS3108_1:
3758 			switch (ioc->pdev->subsystem_device) {
3759 			case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
3760 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3761 				MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
3762 				break;
3763 			case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
3764 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3765 				MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
3766 				);
3767 				break;
3768 			default:
3769 				pr_info(MPT3SAS_FMT
3770 				 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
3771 				 ioc->name, ioc->pdev->subsystem_device);
3772 				break;
3773 			}
3774 			break;
3775 		default:
3776 			pr_info(MPT3SAS_FMT
3777 			   "Cisco SAS HBA: Subsystem ID: 0x%X\n",
3778 			   ioc->name, ioc->pdev->subsystem_device);
3779 			break;
3780 		}
3781 		break;
3782 	case MPT2SAS_HP_3PAR_SSVID:
3783 		switch (ioc->pdev->device) {
3784 		case MPI2_MFGPAGE_DEVID_SAS2004:
3785 			switch (ioc->pdev->subsystem_device) {
3786 			case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
3787 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3788 				    MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
3789 				break;
3790 			default:
3791 				pr_info(MPT3SAS_FMT
3792 				   "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
3793 				   ioc->name, ioc->pdev->subsystem_device);
3794 				break;
3795 			}
3796 		case MPI2_MFGPAGE_DEVID_SAS2308_2:
3797 			switch (ioc->pdev->subsystem_device) {
3798 			case MPT2SAS_HP_2_4_INTERNAL_SSDID:
3799 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3800 				    MPT2SAS_HP_2_4_INTERNAL_BRANDING);
3801 				break;
3802 			case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
3803 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3804 				    MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
3805 				break;
3806 			case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
3807 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3808 				 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
3809 				break;
3810 			case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
3811 				pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3812 				    MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
3813 				break;
3814 			default:
3815 				pr_info(MPT3SAS_FMT
3816 				   "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
3817 				   ioc->name, ioc->pdev->subsystem_device);
3818 				break;
3819 			}
3820 		default:
3821 			pr_info(MPT3SAS_FMT
3822 			   "HP SAS HBA: Subsystem ID: 0x%X\n",
3823 			   ioc->name, ioc->pdev->subsystem_device);
3824 			break;
3825 		}
3826 	default:
3827 		break;
3828 	}
3829 }
3830 
3831 /**
3832  * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
3833  *				version from FW Image Header.
3834  * @ioc: per adapter object
3835  *
3836  * Returns 0 for success, non-zero for failure.
3837  */
3838 	static int
3839 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
3840 {
3841 	Mpi2FWImageHeader_t *FWImgHdr;
3842 	Mpi25FWUploadRequest_t *mpi_request;
3843 	Mpi2FWUploadReply_t mpi_reply;
3844 	int r = 0;
3845 	void *fwpkg_data = NULL;
3846 	dma_addr_t fwpkg_data_dma;
3847 	u16 smid, ioc_status;
3848 	size_t data_length;
3849 
3850 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3851 				__func__));
3852 
3853 	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
3854 		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
3855 				ioc->name, __func__);
3856 		return -EAGAIN;
3857 	}
3858 
3859 	data_length = sizeof(Mpi2FWImageHeader_t);
3860 	fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
3861 			&fwpkg_data_dma);
3862 	if (!fwpkg_data) {
3863 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
3864 				ioc->name, __FILE__, __LINE__, __func__);
3865 		return -ENOMEM;
3866 	}
3867 
3868 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3869 	if (!smid) {
3870 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3871 				ioc->name, __func__);
3872 		r = -EAGAIN;
3873 		goto out;
3874 	}
3875 
3876 	ioc->base_cmds.status = MPT3_CMD_PENDING;
3877 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3878 	ioc->base_cmds.smid = smid;
3879 	memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
3880 	mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
3881 	mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
3882 	mpi_request->ImageSize = cpu_to_le32(data_length);
3883 	ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
3884 			data_length);
3885 	init_completion(&ioc->base_cmds.done);
3886 	mpt3sas_base_put_smid_default(ioc, smid);
3887 	/* Wait for 15 seconds */
3888 	wait_for_completion_timeout(&ioc->base_cmds.done,
3889 			FW_IMG_HDR_READ_TIMEOUT*HZ);
3890 	pr_info(MPT3SAS_FMT "%s: complete\n",
3891 			ioc->name, __func__);
3892 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3893 		pr_err(MPT3SAS_FMT "%s: timeout\n",
3894 				ioc->name, __func__);
3895 		_debug_dump_mf(mpi_request,
3896 				sizeof(Mpi25FWUploadRequest_t)/4);
3897 		r = -ETIME;
3898 	} else {
3899 		memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
3900 		if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
3901 			memcpy(&mpi_reply, ioc->base_cmds.reply,
3902 					sizeof(Mpi2FWUploadReply_t));
3903 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3904 						MPI2_IOCSTATUS_MASK;
3905 			if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3906 				FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
3907 				if (FWImgHdr->PackageVersion.Word) {
3908 					pr_info(MPT3SAS_FMT "FW Package Version"
3909 					"(%02d.%02d.%02d.%02d)\n",
3910 					ioc->name,
3911 					FWImgHdr->PackageVersion.Struct.Major,
3912 					FWImgHdr->PackageVersion.Struct.Minor,
3913 					FWImgHdr->PackageVersion.Struct.Unit,
3914 					FWImgHdr->PackageVersion.Struct.Dev);
3915 				}
3916 			} else {
3917 				_debug_dump_mf(&mpi_reply,
3918 						sizeof(Mpi2FWUploadReply_t)/4);
3919 			}
3920 		}
3921 	}
3922 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3923 out:
3924 	if (fwpkg_data)
3925 		pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
3926 				fwpkg_data_dma);
3927 	return r;
3928 }
3929 
3930 /**
3931  * _base_display_ioc_capabilities - Disply IOC's capabilities.
3932  * @ioc: per adapter object
3933  *
3934  * Return nothing.
3935  */
3936 static void
3937 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
3938 {
3939 	int i = 0;
3940 	char desc[16];
3941 	u32 iounit_pg1_flags;
3942 	u32 bios_version;
3943 
3944 	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
3945 	strncpy(desc, ioc->manu_pg0.ChipName, 16);
3946 	pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
3947 	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
3948 	    ioc->name, desc,
3949 	   (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
3950 	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
3951 	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
3952 	   ioc->facts.FWVersion.Word & 0x000000FF,
3953 	   ioc->pdev->revision,
3954 	   (bios_version & 0xFF000000) >> 24,
3955 	   (bios_version & 0x00FF0000) >> 16,
3956 	   (bios_version & 0x0000FF00) >> 8,
3957 	    bios_version & 0x000000FF);
3958 
3959 	_base_display_OEMs_branding(ioc);
3960 
3961 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
3962 		pr_info("%sNVMe", i ? "," : "");
3963 		i++;
3964 	}
3965 
3966 	pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
3967 
3968 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
3969 		pr_info("Initiator");
3970 		i++;
3971 	}
3972 
3973 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
3974 		pr_info("%sTarget", i ? "," : "");
3975 		i++;
3976 	}
3977 
3978 	i = 0;
3979 	pr_info("), ");
3980 	pr_info("Capabilities=(");
3981 
3982 	if (!ioc->hide_ir_msg) {
3983 		if (ioc->facts.IOCCapabilities &
3984 		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
3985 			pr_info("Raid");
3986 			i++;
3987 		}
3988 	}
3989 
3990 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
3991 		pr_info("%sTLR", i ? "," : "");
3992 		i++;
3993 	}
3994 
3995 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
3996 		pr_info("%sMulticast", i ? "," : "");
3997 		i++;
3998 	}
3999 
4000 	if (ioc->facts.IOCCapabilities &
4001 	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4002 		pr_info("%sBIDI Target", i ? "," : "");
4003 		i++;
4004 	}
4005 
4006 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4007 		pr_info("%sEEDP", i ? "," : "");
4008 		i++;
4009 	}
4010 
4011 	if (ioc->facts.IOCCapabilities &
4012 	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4013 		pr_info("%sSnapshot Buffer", i ? "," : "");
4014 		i++;
4015 	}
4016 
4017 	if (ioc->facts.IOCCapabilities &
4018 	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4019 		pr_info("%sDiag Trace Buffer", i ? "," : "");
4020 		i++;
4021 	}
4022 
4023 	if (ioc->facts.IOCCapabilities &
4024 	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4025 		pr_info("%sDiag Extended Buffer", i ? "," : "");
4026 		i++;
4027 	}
4028 
4029 	if (ioc->facts.IOCCapabilities &
4030 	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4031 		pr_info("%sTask Set Full", i ? "," : "");
4032 		i++;
4033 	}
4034 
4035 	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4036 	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4037 		pr_info("%sNCQ", i ? "," : "");
4038 		i++;
4039 	}
4040 
4041 	pr_info(")\n");
4042 }
4043 
4044 /**
4045  * mpt3sas_base_update_missing_delay - change the missing delay timers
4046  * @ioc: per adapter object
4047  * @device_missing_delay: amount of time till device is reported missing
4048  * @io_missing_delay: interval IO is returned when there is a missing device
4049  *
4050  * Return nothing.
4051  *
4052  * Passed on the command line, this function will modify the device missing
4053  * delay, as well as the io missing delay. This should be called at driver
4054  * load time.
4055  */
4056 void
4057 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4058 	u16 device_missing_delay, u8 io_missing_delay)
4059 {
4060 	u16 dmd, dmd_new, dmd_orignal;
4061 	u8 io_missing_delay_original;
4062 	u16 sz;
4063 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4064 	Mpi2ConfigReply_t mpi_reply;
4065 	u8 num_phys = 0;
4066 	u16 ioc_status;
4067 
4068 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4069 	if (!num_phys)
4070 		return;
4071 
4072 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4073 	    sizeof(Mpi2SasIOUnit1PhyData_t));
4074 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4075 	if (!sas_iounit_pg1) {
4076 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
4077 		    ioc->name, __FILE__, __LINE__, __func__);
4078 		goto out;
4079 	}
4080 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4081 	    sas_iounit_pg1, sz))) {
4082 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
4083 		    ioc->name, __FILE__, __LINE__, __func__);
4084 		goto out;
4085 	}
4086 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4087 	    MPI2_IOCSTATUS_MASK;
4088 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4089 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
4090 		    ioc->name, __FILE__, __LINE__, __func__);
4091 		goto out;
4092 	}
4093 
4094 	/* device missing delay */
4095 	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4096 	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4097 		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4098 	else
4099 		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4100 	dmd_orignal = dmd;
4101 	if (device_missing_delay > 0x7F) {
4102 		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4103 		    device_missing_delay;
4104 		dmd = dmd / 16;
4105 		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4106 	} else
4107 		dmd = device_missing_delay;
4108 	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4109 
4110 	/* io missing delay */
4111 	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4112 	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4113 
4114 	if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4115 	    sz)) {
4116 		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4117 			dmd_new = (dmd &
4118 			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4119 		else
4120 			dmd_new =
4121 		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4122 		pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
4123 			ioc->name, dmd_orignal, dmd_new);
4124 		pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
4125 			ioc->name, io_missing_delay_original,
4126 		    io_missing_delay);
4127 		ioc->device_missing_delay = dmd_new;
4128 		ioc->io_missing_delay = io_missing_delay;
4129 	}
4130 
4131 out:
4132 	kfree(sas_iounit_pg1);
4133 }
4134 /**
4135  * _base_static_config_pages - static start of day config pages
4136  * @ioc: per adapter object
4137  *
4138  * Return nothing.
4139  */
4140 static void
4141 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4142 {
4143 	Mpi2ConfigReply_t mpi_reply;
4144 	u32 iounit_pg1_flags;
4145 
4146 	ioc->nvme_abort_timeout = 30;
4147 	mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
4148 	if (ioc->ir_firmware)
4149 		mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
4150 		    &ioc->manu_pg10);
4151 
4152 	/*
4153 	 * Ensure correct T10 PI operation if vendor left EEDPTagMode
4154 	 * flag unset in NVDATA.
4155 	 */
4156 	mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
4157 	if (ioc->manu_pg11.EEDPTagMode == 0) {
4158 		pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
4159 		    ioc->name);
4160 		ioc->manu_pg11.EEDPTagMode &= ~0x3;
4161 		ioc->manu_pg11.EEDPTagMode |= 0x1;
4162 		mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
4163 		    &ioc->manu_pg11);
4164 	}
4165 	if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
4166 		ioc->tm_custom_handling = 1;
4167 	else {
4168 		ioc->tm_custom_handling = 0;
4169 		if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
4170 			ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
4171 		else if (ioc->manu_pg11.NVMeAbortTO >
4172 					NVME_TASK_ABORT_MAX_TIMEOUT)
4173 			ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
4174 		else
4175 			ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
4176 	}
4177 
4178 	mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
4179 	mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
4180 	mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
4181 	mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
4182 	mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4183 	mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
4184 	_base_display_ioc_capabilities(ioc);
4185 
4186 	/*
4187 	 * Enable task_set_full handling in iounit_pg1 when the
4188 	 * facts capabilities indicate that its supported.
4189 	 */
4190 	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4191 	if ((ioc->facts.IOCCapabilities &
4192 	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
4193 		iounit_pg1_flags &=
4194 		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4195 	else
4196 		iounit_pg1_flags |=
4197 		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4198 	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
4199 	mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4200 
4201 	if (ioc->iounit_pg8.NumSensors)
4202 		ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
4203 }
4204 
4205 /**
4206  * mpt3sas_free_enclosure_list - release memory
4207  * @ioc: per adapter object
4208  *
4209  * Free memory allocated during encloure add.
4210  *
4211  * Return nothing.
4212  */
4213 void
4214 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
4215 {
4216 	struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
4217 
4218 	/* Free enclosure list */
4219 	list_for_each_entry_safe(enclosure_dev,
4220 			enclosure_dev_next, &ioc->enclosure_list, list) {
4221 		list_del(&enclosure_dev->list);
4222 		kfree(enclosure_dev);
4223 	}
4224 }
4225 
4226 /**
4227  * _base_release_memory_pools - release memory
4228  * @ioc: per adapter object
4229  *
4230  * Free memory allocated from _base_allocate_memory_pools.
4231  *
4232  * Return nothing.
4233  */
4234 static void
4235 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4236 {
4237 	int i = 0;
4238 	int j = 0;
4239 	struct chain_tracker *ct;
4240 	struct reply_post_struct *rps;
4241 
4242 	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4243 	    __func__));
4244 
4245 	if (ioc->request) {
4246 		pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
4247 		    ioc->request,  ioc->request_dma);
4248 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
4249 			"request_pool(0x%p): free\n",
4250 			ioc->name, ioc->request));
4251 		ioc->request = NULL;
4252 	}
4253 
4254 	if (ioc->sense) {
4255 		dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4256 		dma_pool_destroy(ioc->sense_dma_pool);
4257 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
4258 			"sense_pool(0x%p): free\n",
4259 			ioc->name, ioc->sense));
4260 		ioc->sense = NULL;
4261 	}
4262 
4263 	if (ioc->reply) {
4264 		dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
4265 		dma_pool_destroy(ioc->reply_dma_pool);
4266 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
4267 			"reply_pool(0x%p): free\n",
4268 			ioc->name, ioc->reply));
4269 		ioc->reply = NULL;
4270 	}
4271 
4272 	if (ioc->reply_free) {
4273 		dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
4274 		    ioc->reply_free_dma);
4275 		dma_pool_destroy(ioc->reply_free_dma_pool);
4276 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
4277 			"reply_free_pool(0x%p): free\n",
4278 			ioc->name, ioc->reply_free));
4279 		ioc->reply_free = NULL;
4280 	}
4281 
4282 	if (ioc->reply_post) {
4283 		do {
4284 			rps = &ioc->reply_post[i];
4285 			if (rps->reply_post_free) {
4286 				dma_pool_free(
4287 				    ioc->reply_post_free_dma_pool,
4288 				    rps->reply_post_free,
4289 				    rps->reply_post_free_dma);
4290 				dexitprintk(ioc, pr_info(MPT3SAS_FMT
4291 				    "reply_post_free_pool(0x%p): free\n",
4292 				    ioc->name, rps->reply_post_free));
4293 				rps->reply_post_free = NULL;
4294 			}
4295 		} while (ioc->rdpq_array_enable &&
4296 			   (++i < ioc->reply_queue_count));
4297 		if (ioc->reply_post_free_array &&
4298 			ioc->rdpq_array_enable) {
4299 			dma_pool_free(ioc->reply_post_free_array_dma_pool,
4300 				ioc->reply_post_free_array,
4301 				ioc->reply_post_free_array_dma);
4302 			ioc->reply_post_free_array = NULL;
4303 		}
4304 		dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
4305 		dma_pool_destroy(ioc->reply_post_free_dma_pool);
4306 		kfree(ioc->reply_post);
4307 	}
4308 
4309 	if (ioc->pcie_sgl_dma_pool) {
4310 		for (i = 0; i < ioc->scsiio_depth; i++) {
4311 			dma_pool_free(ioc->pcie_sgl_dma_pool,
4312 					ioc->pcie_sg_lookup[i].pcie_sgl,
4313 					ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4314 		}
4315 		if (ioc->pcie_sgl_dma_pool)
4316 			dma_pool_destroy(ioc->pcie_sgl_dma_pool);
4317 	}
4318 
4319 	if (ioc->config_page) {
4320 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
4321 		    "config_page(0x%p): free\n", ioc->name,
4322 		    ioc->config_page));
4323 		pci_free_consistent(ioc->pdev, ioc->config_page_sz,
4324 		    ioc->config_page, ioc->config_page_dma);
4325 	}
4326 
4327 	kfree(ioc->hpr_lookup);
4328 	kfree(ioc->internal_lookup);
4329 	if (ioc->chain_lookup) {
4330 		for (i = 0; i < ioc->scsiio_depth; i++) {
4331 			for (j = ioc->chains_per_prp_buffer;
4332 			    j < ioc->chains_needed_per_io; j++) {
4333 				ct = &ioc->chain_lookup[i].chains_per_smid[j];
4334 				if (ct && ct->chain_buffer)
4335 					dma_pool_free(ioc->chain_dma_pool,
4336 						ct->chain_buffer,
4337 						ct->chain_buffer_dma);
4338 			}
4339 			kfree(ioc->chain_lookup[i].chains_per_smid);
4340 		}
4341 		dma_pool_destroy(ioc->chain_dma_pool);
4342 		kfree(ioc->chain_lookup);
4343 		ioc->chain_lookup = NULL;
4344 	}
4345 }
4346 
4347 /**
4348  * is_MSB_are_same - checks whether all reply queues in a set are
4349  *	having same upper 32bits in their base memory address.
4350  * @reply_pool_start_address: Base address of a reply queue set
4351  * @pool_sz: Size of single Reply Descriptor Post Queues pool size
4352  *
4353  * Returns 1 if reply queues in a set have a same upper 32bits
4354  * in their base memory address,
4355  * else 0
4356  */
4357 
4358 static int
4359 is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
4360 {
4361 	long reply_pool_end_address;
4362 
4363 	reply_pool_end_address = reply_pool_start_address + pool_sz;
4364 
4365 	if (upper_32_bits(reply_pool_start_address) ==
4366 		upper_32_bits(reply_pool_end_address))
4367 		return 1;
4368 	else
4369 		return 0;
4370 }
4371 
4372 /**
4373  * _base_allocate_memory_pools - allocate start of day memory pools
4374  * @ioc: per adapter object
4375  *
4376  * Returns 0 success, anything else error
4377  */
4378 static int
4379 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4380 {
4381 	struct mpt3sas_facts *facts;
4382 	u16 max_sge_elements;
4383 	u16 chains_needed_per_io;
4384 	u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
4385 	u32 retry_sz;
4386 	u16 max_request_credit, nvme_blocks_needed;
4387 	unsigned short sg_tablesize;
4388 	u16 sge_size;
4389 	int i, j;
4390 	struct chain_tracker *ct;
4391 
4392 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4393 	    __func__));
4394 
4395 
4396 	retry_sz = 0;
4397 	facts = &ioc->facts;
4398 
4399 	/* command line tunables for max sgl entries */
4400 	if (max_sgl_entries != -1)
4401 		sg_tablesize = max_sgl_entries;
4402 	else {
4403 		if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
4404 			sg_tablesize = MPT2SAS_SG_DEPTH;
4405 		else
4406 			sg_tablesize = MPT3SAS_SG_DEPTH;
4407 	}
4408 
4409 	/* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
4410 	if (reset_devices)
4411 		sg_tablesize = min_t(unsigned short, sg_tablesize,
4412 		   MPT_KDUMP_MIN_PHYS_SEGMENTS);
4413 
4414 	if (ioc->is_mcpu_endpoint)
4415 		ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
4416 	else {
4417 		if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
4418 			sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
4419 		else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
4420 			sg_tablesize = min_t(unsigned short, sg_tablesize,
4421 					SG_MAX_SEGMENTS);
4422 			pr_warn(MPT3SAS_FMT
4423 				"sg_tablesize(%u) is bigger than kernel "
4424 				"defined SG_CHUNK_SIZE(%u)\n", ioc->name,
4425 				sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
4426 		}
4427 		ioc->shost->sg_tablesize = sg_tablesize;
4428 	}
4429 
4430 	ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
4431 		(facts->RequestCredit / 4));
4432 	if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
4433 		if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
4434 				INTERNAL_SCSIIO_CMDS_COUNT)) {
4435 			pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
4436 			    Credits, it has just %d number of credits\n",
4437 			    ioc->name, facts->RequestCredit);
4438 			return -ENOMEM;
4439 		}
4440 		ioc->internal_depth = 10;
4441 	}
4442 
4443 	ioc->hi_priority_depth = ioc->internal_depth - (5);
4444 	/* command line tunables  for max controller queue depth */
4445 	if (max_queue_depth != -1 && max_queue_depth != 0) {
4446 		max_request_credit = min_t(u16, max_queue_depth +
4447 			ioc->internal_depth, facts->RequestCredit);
4448 		if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
4449 			max_request_credit =  MAX_HBA_QUEUE_DEPTH;
4450 	} else if (reset_devices)
4451 		max_request_credit = min_t(u16, facts->RequestCredit,
4452 		    (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
4453 	else
4454 		max_request_credit = min_t(u16, facts->RequestCredit,
4455 		    MAX_HBA_QUEUE_DEPTH);
4456 
4457 	/* Firmware maintains additional facts->HighPriorityCredit number of
4458 	 * credits for HiPriprity Request messages, so hba queue depth will be
4459 	 * sum of max_request_credit and high priority queue depth.
4460 	 */
4461 	ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
4462 
4463 	/* request frame size */
4464 	ioc->request_sz = facts->IOCRequestFrameSize * 4;
4465 
4466 	/* reply frame size */
4467 	ioc->reply_sz = facts->ReplyFrameSize * 4;
4468 
4469 	/* chain segment size */
4470 	if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4471 		if (facts->IOCMaxChainSegmentSize)
4472 			ioc->chain_segment_sz =
4473 					facts->IOCMaxChainSegmentSize *
4474 					MAX_CHAIN_ELEMT_SZ;
4475 		else
4476 		/* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
4477 			ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
4478 						    MAX_CHAIN_ELEMT_SZ;
4479 	} else
4480 		ioc->chain_segment_sz = ioc->request_sz;
4481 
4482 	/* calculate the max scatter element size */
4483 	sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
4484 
4485  retry_allocation:
4486 	total_sz = 0;
4487 	/* calculate number of sg elements left over in the 1st frame */
4488 	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
4489 	    sizeof(Mpi2SGEIOUnion_t)) + sge_size);
4490 	ioc->max_sges_in_main_message = max_sge_elements/sge_size;
4491 
4492 	/* now do the same for a chain buffer */
4493 	max_sge_elements = ioc->chain_segment_sz - sge_size;
4494 	ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
4495 
4496 	/*
4497 	 *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
4498 	 */
4499 	chains_needed_per_io = ((ioc->shost->sg_tablesize -
4500 	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
4501 	    + 1;
4502 	if (chains_needed_per_io > facts->MaxChainDepth) {
4503 		chains_needed_per_io = facts->MaxChainDepth;
4504 		ioc->shost->sg_tablesize = min_t(u16,
4505 		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
4506 		* chains_needed_per_io), ioc->shost->sg_tablesize);
4507 	}
4508 	ioc->chains_needed_per_io = chains_needed_per_io;
4509 
4510 	/* reply free queue sizing - taking into account for 64 FW events */
4511 	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4512 
4513 	/* mCPU manage single counters for simplicity */
4514 	if (ioc->is_mcpu_endpoint)
4515 		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
4516 	else {
4517 		/* calculate reply descriptor post queue depth */
4518 		ioc->reply_post_queue_depth = ioc->hba_queue_depth +
4519 			ioc->reply_free_queue_depth +  1;
4520 		/* align the reply post queue on the next 16 count boundary */
4521 		if (ioc->reply_post_queue_depth % 16)
4522 			ioc->reply_post_queue_depth += 16 -
4523 				(ioc->reply_post_queue_depth % 16);
4524 	}
4525 
4526 	if (ioc->reply_post_queue_depth >
4527 	    facts->MaxReplyDescriptorPostQueueDepth) {
4528 		ioc->reply_post_queue_depth =
4529 				facts->MaxReplyDescriptorPostQueueDepth -
4530 		    (facts->MaxReplyDescriptorPostQueueDepth % 16);
4531 		ioc->hba_queue_depth =
4532 				((ioc->reply_post_queue_depth - 64) / 2) - 1;
4533 		ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4534 	}
4535 
4536 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
4537 	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
4538 	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
4539 	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
4540 	    ioc->chains_needed_per_io));
4541 
4542 	/* reply post queue, 16 byte align */
4543 	reply_post_free_sz = ioc->reply_post_queue_depth *
4544 	    sizeof(Mpi2DefaultReplyDescriptor_t);
4545 
4546 	sz = reply_post_free_sz;
4547 	if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
4548 		sz *= ioc->reply_queue_count;
4549 
4550 	ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
4551 	    (ioc->reply_queue_count):1,
4552 	    sizeof(struct reply_post_struct), GFP_KERNEL);
4553 
4554 	if (!ioc->reply_post) {
4555 		pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
4556 			ioc->name);
4557 		goto out;
4558 	}
4559 	ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
4560 	    &ioc->pdev->dev, sz, 16, 0);
4561 	if (!ioc->reply_post_free_dma_pool) {
4562 		pr_err(MPT3SAS_FMT
4563 		 "reply_post_free pool: dma_pool_create failed\n",
4564 		 ioc->name);
4565 		goto out;
4566 	}
4567 	i = 0;
4568 	do {
4569 		ioc->reply_post[i].reply_post_free =
4570 		    dma_pool_alloc(ioc->reply_post_free_dma_pool,
4571 		    GFP_KERNEL,
4572 		    &ioc->reply_post[i].reply_post_free_dma);
4573 		if (!ioc->reply_post[i].reply_post_free) {
4574 			pr_err(MPT3SAS_FMT
4575 			"reply_post_free pool: dma_pool_alloc failed\n",
4576 			ioc->name);
4577 			goto out;
4578 		}
4579 		memset(ioc->reply_post[i].reply_post_free, 0, sz);
4580 		dinitprintk(ioc, pr_info(MPT3SAS_FMT
4581 		    "reply post free pool (0x%p): depth(%d),"
4582 		    "element_size(%d), pool_size(%d kB)\n", ioc->name,
4583 		    ioc->reply_post[i].reply_post_free,
4584 		    ioc->reply_post_queue_depth, 8, sz/1024));
4585 		dinitprintk(ioc, pr_info(MPT3SAS_FMT
4586 		    "reply_post_free_dma = (0x%llx)\n", ioc->name,
4587 		    (unsigned long long)
4588 		    ioc->reply_post[i].reply_post_free_dma));
4589 		total_sz += sz;
4590 	} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
4591 
4592 	if (ioc->dma_mask == 64) {
4593 		if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
4594 			pr_warn(MPT3SAS_FMT
4595 			    "no suitable consistent DMA mask for %s\n",
4596 			    ioc->name, pci_name(ioc->pdev));
4597 			goto out;
4598 		}
4599 	}
4600 
4601 	ioc->scsiio_depth = ioc->hba_queue_depth -
4602 	    ioc->hi_priority_depth - ioc->internal_depth;
4603 
4604 	/* set the scsi host can_queue depth
4605 	 * with some internal commands that could be outstanding
4606 	 */
4607 	ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
4608 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
4609 		"scsi host: can_queue depth (%d)\n",
4610 		ioc->name, ioc->shost->can_queue));
4611 
4612 
4613 	/* contiguous pool for request and chains, 16 byte align, one extra "
4614 	 * "frame for smid=0
4615 	 */
4616 	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
4617 	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
4618 
4619 	/* hi-priority queue */
4620 	sz += (ioc->hi_priority_depth * ioc->request_sz);
4621 
4622 	/* internal queue */
4623 	sz += (ioc->internal_depth * ioc->request_sz);
4624 
4625 	ioc->request_dma_sz = sz;
4626 	ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
4627 	if (!ioc->request) {
4628 		pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
4629 		    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
4630 		    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
4631 		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
4632 		if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
4633 			goto out;
4634 		retry_sz = 64;
4635 		ioc->hba_queue_depth -= retry_sz;
4636 		_base_release_memory_pools(ioc);
4637 		goto retry_allocation;
4638 	}
4639 
4640 	if (retry_sz)
4641 		pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
4642 		    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
4643 		    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
4644 		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
4645 
4646 	/* hi-priority queue */
4647 	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
4648 	    ioc->request_sz);
4649 	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
4650 	    ioc->request_sz);
4651 
4652 	/* internal queue */
4653 	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
4654 	    ioc->request_sz);
4655 	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
4656 	    ioc->request_sz);
4657 
4658 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
4659 		"request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
4660 		ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
4661 	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
4662 
4663 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
4664 	    ioc->name, (unsigned long long) ioc->request_dma));
4665 	total_sz += sz;
4666 
4667 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
4668 		ioc->name, ioc->request, ioc->scsiio_depth));
4669 
4670 	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
4671 	sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
4672 	ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
4673 	if (!ioc->chain_lookup) {
4674 		pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages "
4675 				"failed\n", ioc->name);
4676 		goto out;
4677 	}
4678 
4679 	sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
4680 	for (i = 0; i < ioc->scsiio_depth; i++) {
4681 		ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
4682 		if (!ioc->chain_lookup[i].chains_per_smid) {
4683 			pr_err(MPT3SAS_FMT "chain_lookup: "
4684 					" kzalloc failed\n", ioc->name);
4685 			goto out;
4686 		}
4687 	}
4688 
4689 	/* initialize hi-priority queue smid's */
4690 	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
4691 	    sizeof(struct request_tracker), GFP_KERNEL);
4692 	if (!ioc->hpr_lookup) {
4693 		pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
4694 		    ioc->name);
4695 		goto out;
4696 	}
4697 	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
4698 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
4699 		"hi_priority(0x%p): depth(%d), start smid(%d)\n",
4700 		ioc->name, ioc->hi_priority,
4701 	    ioc->hi_priority_depth, ioc->hi_priority_smid));
4702 
4703 	/* initialize internal queue smid's */
4704 	ioc->internal_lookup = kcalloc(ioc->internal_depth,
4705 	    sizeof(struct request_tracker), GFP_KERNEL);
4706 	if (!ioc->internal_lookup) {
4707 		pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
4708 		    ioc->name);
4709 		goto out;
4710 	}
4711 	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
4712 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
4713 		"internal(0x%p): depth(%d), start smid(%d)\n",
4714 		ioc->name, ioc->internal,
4715 	    ioc->internal_depth, ioc->internal_smid));
4716 	/*
4717 	 * The number of NVMe page sized blocks needed is:
4718 	 *     (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
4719 	 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
4720 	 * that is placed in the main message frame.  8 is the size of each PRP
4721 	 * entry or PRP list pointer entry.  8 is subtracted from page_size
4722 	 * because of the PRP list pointer entry at the end of a page, so this
4723 	 * is not counted as a PRP entry.  The 1 added page is a round up.
4724 	 *
4725 	 * To avoid allocation failures due to the amount of memory that could
4726 	 * be required for NVMe PRP's, only each set of NVMe blocks will be
4727 	 * contiguous, so a new set is allocated for each possible I/O.
4728 	 */
4729 	ioc->chains_per_prp_buffer = 0;
4730 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4731 		nvme_blocks_needed =
4732 			(ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
4733 		nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
4734 		nvme_blocks_needed++;
4735 
4736 		sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
4737 		ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
4738 		if (!ioc->pcie_sg_lookup) {
4739 			pr_info(MPT3SAS_FMT
4740 			    "PCIe SGL lookup: kzalloc failed\n", ioc->name);
4741 			goto out;
4742 		}
4743 		sz = nvme_blocks_needed * ioc->page_size;
4744 		ioc->pcie_sgl_dma_pool =
4745 			dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
4746 		if (!ioc->pcie_sgl_dma_pool) {
4747 			pr_info(MPT3SAS_FMT
4748 			    "PCIe SGL pool: dma_pool_create failed\n",
4749 			    ioc->name);
4750 			goto out;
4751 		}
4752 
4753 		ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
4754 		ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
4755 						ioc->chains_needed_per_io);
4756 
4757 		for (i = 0; i < ioc->scsiio_depth; i++) {
4758 			ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
4759 				ioc->pcie_sgl_dma_pool, GFP_KERNEL,
4760 				&ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4761 			if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
4762 				pr_info(MPT3SAS_FMT
4763 				    "PCIe SGL pool: dma_pool_alloc failed\n",
4764 				    ioc->name);
4765 				goto out;
4766 			}
4767 			for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
4768 				ct = &ioc->chain_lookup[i].chains_per_smid[j];
4769 				ct->chain_buffer =
4770 				    ioc->pcie_sg_lookup[i].pcie_sgl +
4771 				    (j * ioc->chain_segment_sz);
4772 				ct->chain_buffer_dma =
4773 				    ioc->pcie_sg_lookup[i].pcie_sgl_dma +
4774 				    (j * ioc->chain_segment_sz);
4775 			}
4776 		}
4777 
4778 		dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
4779 			"element_size(%d), pool_size(%d kB)\n", ioc->name,
4780 			ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
4781 		dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can "
4782 		    "fit in a PRP page(%d)\n", ioc->name,
4783 		    ioc->chains_per_prp_buffer));
4784 		total_sz += sz * ioc->scsiio_depth;
4785 	}
4786 
4787 	ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
4788 	    ioc->chain_segment_sz, 16, 0);
4789 	if (!ioc->chain_dma_pool) {
4790 		pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
4791 			ioc->name);
4792 		goto out;
4793 	}
4794 	for (i = 0; i < ioc->scsiio_depth; i++) {
4795 		for (j = ioc->chains_per_prp_buffer;
4796 				j < ioc->chains_needed_per_io; j++) {
4797 			ct = &ioc->chain_lookup[i].chains_per_smid[j];
4798 			ct->chain_buffer = dma_pool_alloc(
4799 					ioc->chain_dma_pool, GFP_KERNEL,
4800 					&ct->chain_buffer_dma);
4801 			if (!ct->chain_buffer) {
4802 				pr_err(MPT3SAS_FMT "chain_lookup: "
4803 				" pci_pool_alloc failed\n", ioc->name);
4804 				_base_release_memory_pools(ioc);
4805 				goto out;
4806 			}
4807 		}
4808 		total_sz += ioc->chain_segment_sz;
4809 	}
4810 
4811 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
4812 		"chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
4813 		ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
4814 		((ioc->chain_depth *  ioc->chain_segment_sz))/1024));
4815 
4816 	/* sense buffers, 4 byte align */
4817 	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
4818 	ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4819 					      4, 0);
4820 	if (!ioc->sense_dma_pool) {
4821 		pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n",
4822 		    ioc->name);
4823 		goto out;
4824 	}
4825 	ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
4826 	    &ioc->sense_dma);
4827 	if (!ioc->sense) {
4828 		pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n",
4829 		    ioc->name);
4830 		goto out;
4831 	}
4832 	/* sense buffer requires to be in same 4 gb region.
4833 	 * Below function will check the same.
4834 	 * In case of failure, new pci pool will be created with updated
4835 	 * alignment. Older allocation and pool will be destroyed.
4836 	 * Alignment will be used such a way that next allocation if
4837 	 * success, will always meet same 4gb region requirement.
4838 	 * Actual requirement is not alignment, but we need start and end of
4839 	 * DMA address must have same upper 32 bit address.
4840 	 */
4841 	if (!is_MSB_are_same((long)ioc->sense, sz)) {
4842 		//Release Sense pool & Reallocate
4843 		dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4844 		dma_pool_destroy(ioc->sense_dma_pool);
4845 		ioc->sense = NULL;
4846 
4847 		ioc->sense_dma_pool =
4848 			dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4849 						roundup_pow_of_two(sz), 0);
4850 		if (!ioc->sense_dma_pool) {
4851 			pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
4852 					ioc->name);
4853 			goto out;
4854 		}
4855 		ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
4856 				&ioc->sense_dma);
4857 		if (!ioc->sense) {
4858 			pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
4859 					ioc->name);
4860 			goto out;
4861 		}
4862 	}
4863 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
4864 	    "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
4865 	    "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
4866 	    SCSI_SENSE_BUFFERSIZE, sz/1024));
4867 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
4868 	    ioc->name, (unsigned long long)ioc->sense_dma));
4869 	total_sz += sz;
4870 
4871 	/* reply pool, 4 byte align */
4872 	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
4873 	ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
4874 					      4, 0);
4875 	if (!ioc->reply_dma_pool) {
4876 		pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n",
4877 		    ioc->name);
4878 		goto out;
4879 	}
4880 	ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
4881 	    &ioc->reply_dma);
4882 	if (!ioc->reply) {
4883 		pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n",
4884 		    ioc->name);
4885 		goto out;
4886 	}
4887 	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
4888 	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
4889 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
4890 		"reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
4891 		ioc->name, ioc->reply,
4892 	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
4893 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
4894 	    ioc->name, (unsigned long long)ioc->reply_dma));
4895 	total_sz += sz;
4896 
4897 	/* reply free queue, 16 byte align */
4898 	sz = ioc->reply_free_queue_depth * 4;
4899 	ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
4900 	    &ioc->pdev->dev, sz, 16, 0);
4901 	if (!ioc->reply_free_dma_pool) {
4902 		pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n",
4903 			ioc->name);
4904 		goto out;
4905 	}
4906 	ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL,
4907 	    &ioc->reply_free_dma);
4908 	if (!ioc->reply_free) {
4909 		pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n",
4910 			ioc->name);
4911 		goto out;
4912 	}
4913 	memset(ioc->reply_free, 0, sz);
4914 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
4915 	    "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
4916 	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
4917 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
4918 		"reply_free_dma (0x%llx)\n",
4919 		ioc->name, (unsigned long long)ioc->reply_free_dma));
4920 	total_sz += sz;
4921 
4922 	if (ioc->rdpq_array_enable) {
4923 		reply_post_free_array_sz = ioc->reply_queue_count *
4924 		    sizeof(Mpi2IOCInitRDPQArrayEntry);
4925 		ioc->reply_post_free_array_dma_pool =
4926 		    dma_pool_create("reply_post_free_array pool",
4927 		    &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
4928 		if (!ioc->reply_post_free_array_dma_pool) {
4929 			dinitprintk(ioc,
4930 			    pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
4931 			    "dma_pool_create failed\n", ioc->name));
4932 			goto out;
4933 		}
4934 		ioc->reply_post_free_array =
4935 		    dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
4936 		    GFP_KERNEL, &ioc->reply_post_free_array_dma);
4937 		if (!ioc->reply_post_free_array) {
4938 			dinitprintk(ioc,
4939 			    pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
4940 			    "dma_pool_alloc failed\n", ioc->name));
4941 			goto out;
4942 		}
4943 	}
4944 	ioc->config_page_sz = 512;
4945 	ioc->config_page = pci_alloc_consistent(ioc->pdev,
4946 	    ioc->config_page_sz, &ioc->config_page_dma);
4947 	if (!ioc->config_page) {
4948 		pr_err(MPT3SAS_FMT
4949 			"config page: dma_pool_alloc failed\n",
4950 			ioc->name);
4951 		goto out;
4952 	}
4953 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
4954 		"config page(0x%p): size(%d)\n",
4955 		ioc->name, ioc->config_page, ioc->config_page_sz));
4956 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
4957 		ioc->name, (unsigned long long)ioc->config_page_dma));
4958 	total_sz += ioc->config_page_sz;
4959 
4960 	pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
4961 	    ioc->name, total_sz/1024);
4962 	pr_info(MPT3SAS_FMT
4963 		"Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
4964 	    ioc->name, ioc->shost->can_queue, facts->RequestCredit);
4965 	pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
4966 	    ioc->name, ioc->shost->sg_tablesize);
4967 	return 0;
4968 
4969  out:
4970 	return -ENOMEM;
4971 }
4972 
4973 /**
4974  * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
4975  * @ioc: Pointer to MPT_ADAPTER structure
4976  * @cooked: Request raw or cooked IOC state
4977  *
4978  * Returns all IOC Doorbell register bits if cooked==0, else just the
4979  * Doorbell bits in MPI_IOC_STATE_MASK.
4980  */
4981 u32
4982 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
4983 {
4984 	u32 s, sc;
4985 
4986 	s = readl(&ioc->chip->Doorbell);
4987 	sc = s & MPI2_IOC_STATE_MASK;
4988 	return cooked ? sc : s;
4989 }
4990 
4991 /**
4992  * _base_wait_on_iocstate - waiting on a particular ioc state
4993  * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
4994  * @timeout: timeout in second
4995  *
4996  * Returns 0 for success, non-zero for failure.
4997  */
4998 static int
4999 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
5000 {
5001 	u32 count, cntdn;
5002 	u32 current_state;
5003 
5004 	count = 0;
5005 	cntdn = 1000 * timeout;
5006 	do {
5007 		current_state = mpt3sas_base_get_iocstate(ioc, 1);
5008 		if (current_state == ioc_state)
5009 			return 0;
5010 		if (count && current_state == MPI2_IOC_STATE_FAULT)
5011 			break;
5012 
5013 		usleep_range(1000, 1500);
5014 		count++;
5015 	} while (--cntdn);
5016 
5017 	return current_state;
5018 }
5019 
5020 /**
5021  * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
5022  * a write to the doorbell)
5023  * @ioc: per adapter object
5024  * @timeout: timeout in second
5025  *
5026  * Returns 0 for success, non-zero for failure.
5027  *
5028  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
5029  */
5030 static int
5031 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
5032 
5033 static int
5034 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5035 {
5036 	u32 cntdn, count;
5037 	u32 int_status;
5038 
5039 	count = 0;
5040 	cntdn = 1000 * timeout;
5041 	do {
5042 		int_status = readl(&ioc->chip->HostInterruptStatus);
5043 		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5044 			dhsprintk(ioc, pr_info(MPT3SAS_FMT
5045 				"%s: successful count(%d), timeout(%d)\n",
5046 				ioc->name, __func__, count, timeout));
5047 			return 0;
5048 		}
5049 
5050 		usleep_range(1000, 1500);
5051 		count++;
5052 	} while (--cntdn);
5053 
5054 	pr_err(MPT3SAS_FMT
5055 		"%s: failed due to timeout count(%d), int_status(%x)!\n",
5056 		ioc->name, __func__, count, int_status);
5057 	return -EFAULT;
5058 }
5059 
5060 static int
5061 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5062 {
5063 	u32 cntdn, count;
5064 	u32 int_status;
5065 
5066 	count = 0;
5067 	cntdn = 2000 * timeout;
5068 	do {
5069 		int_status = readl(&ioc->chip->HostInterruptStatus);
5070 		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5071 			dhsprintk(ioc, pr_info(MPT3SAS_FMT
5072 				"%s: successful count(%d), timeout(%d)\n",
5073 				ioc->name, __func__, count, timeout));
5074 			return 0;
5075 		}
5076 
5077 		udelay(500);
5078 		count++;
5079 	} while (--cntdn);
5080 
5081 	pr_err(MPT3SAS_FMT
5082 		"%s: failed due to timeout count(%d), int_status(%x)!\n",
5083 		ioc->name, __func__, count, int_status);
5084 	return -EFAULT;
5085 
5086 }
5087 
5088 /**
5089  * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
5090  * @ioc: per adapter object
5091  * @timeout: timeout in second
5092  *
5093  * Returns 0 for success, non-zero for failure.
5094  *
5095  * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
5096  * doorbell.
5097  */
5098 static int
5099 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
5100 {
5101 	u32 cntdn, count;
5102 	u32 int_status;
5103 	u32 doorbell;
5104 
5105 	count = 0;
5106 	cntdn = 1000 * timeout;
5107 	do {
5108 		int_status = readl(&ioc->chip->HostInterruptStatus);
5109 		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
5110 			dhsprintk(ioc, pr_info(MPT3SAS_FMT
5111 				"%s: successful count(%d), timeout(%d)\n",
5112 				ioc->name, __func__, count, timeout));
5113 			return 0;
5114 		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5115 			doorbell = readl(&ioc->chip->Doorbell);
5116 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
5117 			    MPI2_IOC_STATE_FAULT) {
5118 				mpt3sas_base_fault_info(ioc , doorbell);
5119 				return -EFAULT;
5120 			}
5121 		} else if (int_status == 0xFFFFFFFF)
5122 			goto out;
5123 
5124 		usleep_range(1000, 1500);
5125 		count++;
5126 	} while (--cntdn);
5127 
5128  out:
5129 	pr_err(MPT3SAS_FMT
5130 	 "%s: failed due to timeout count(%d), int_status(%x)!\n",
5131 	 ioc->name, __func__, count, int_status);
5132 	return -EFAULT;
5133 }
5134 
5135 /**
5136  * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
5137  * @ioc: per adapter object
5138  * @timeout: timeout in second
5139  *
5140  * Returns 0 for success, non-zero for failure.
5141  *
5142  */
5143 static int
5144 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
5145 {
5146 	u32 cntdn, count;
5147 	u32 doorbell_reg;
5148 
5149 	count = 0;
5150 	cntdn = 1000 * timeout;
5151 	do {
5152 		doorbell_reg = readl(&ioc->chip->Doorbell);
5153 		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
5154 			dhsprintk(ioc, pr_info(MPT3SAS_FMT
5155 				"%s: successful count(%d), timeout(%d)\n",
5156 				ioc->name, __func__, count, timeout));
5157 			return 0;
5158 		}
5159 
5160 		usleep_range(1000, 1500);
5161 		count++;
5162 	} while (--cntdn);
5163 
5164 	pr_err(MPT3SAS_FMT
5165 		"%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
5166 		ioc->name, __func__, count, doorbell_reg);
5167 	return -EFAULT;
5168 }
5169 
5170 /**
5171  * _base_send_ioc_reset - send doorbell reset
5172  * @ioc: per adapter object
5173  * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
5174  * @timeout: timeout in second
5175  *
5176  * Returns 0 for success, non-zero for failure.
5177  */
5178 static int
5179 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5180 {
5181 	u32 ioc_state;
5182 	int r = 0;
5183 
5184 	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
5185 		pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
5186 		    ioc->name, __func__);
5187 		return -EFAULT;
5188 	}
5189 
5190 	if (!(ioc->facts.IOCCapabilities &
5191 	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
5192 		return -EFAULT;
5193 
5194 	pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
5195 
5196 	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
5197 	    &ioc->chip->Doorbell);
5198 	if ((_base_wait_for_doorbell_ack(ioc, 15))) {
5199 		r = -EFAULT;
5200 		goto out;
5201 	}
5202 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5203 	if (ioc_state) {
5204 		pr_err(MPT3SAS_FMT
5205 			"%s: failed going to ready state (ioc_state=0x%x)\n",
5206 			ioc->name, __func__, ioc_state);
5207 		r = -EFAULT;
5208 		goto out;
5209 	}
5210  out:
5211 	pr_info(MPT3SAS_FMT "message unit reset: %s\n",
5212 	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
5213 	return r;
5214 }
5215 
5216 /**
5217  * _base_handshake_req_reply_wait - send request thru doorbell interface
5218  * @ioc: per adapter object
5219  * @request_bytes: request length
5220  * @request: pointer having request payload
5221  * @reply_bytes: reply length
5222  * @reply: pointer to reply payload
5223  * @timeout: timeout in second
5224  *
5225  * Returns 0 for success, non-zero for failure.
5226  */
5227 static int
5228 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5229 	u32 *request, int reply_bytes, u16 *reply, int timeout)
5230 {
5231 	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
5232 	int i;
5233 	u8 failed;
5234 	__le32 *mfp;
5235 
5236 	/* make sure doorbell is not in use */
5237 	if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
5238 		pr_err(MPT3SAS_FMT
5239 			"doorbell is in use (line=%d)\n",
5240 			ioc->name, __LINE__);
5241 		return -EFAULT;
5242 	}
5243 
5244 	/* clear pending doorbell interrupts from previous state changes */
5245 	if (readl(&ioc->chip->HostInterruptStatus) &
5246 	    MPI2_HIS_IOC2SYS_DB_STATUS)
5247 		writel(0, &ioc->chip->HostInterruptStatus);
5248 
5249 	/* send message to ioc */
5250 	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
5251 	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
5252 	    &ioc->chip->Doorbell);
5253 
5254 	if ((_base_spin_on_doorbell_int(ioc, 5))) {
5255 		pr_err(MPT3SAS_FMT
5256 			"doorbell handshake int failed (line=%d)\n",
5257 			ioc->name, __LINE__);
5258 		return -EFAULT;
5259 	}
5260 	writel(0, &ioc->chip->HostInterruptStatus);
5261 
5262 	if ((_base_wait_for_doorbell_ack(ioc, 5))) {
5263 		pr_err(MPT3SAS_FMT
5264 			"doorbell handshake ack failed (line=%d)\n",
5265 			ioc->name, __LINE__);
5266 		return -EFAULT;
5267 	}
5268 
5269 	/* send message 32-bits at a time */
5270 	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
5271 		writel((u32)(request[i]), &ioc->chip->Doorbell);
5272 		if ((_base_wait_for_doorbell_ack(ioc, 5)))
5273 			failed = 1;
5274 	}
5275 
5276 	if (failed) {
5277 		pr_err(MPT3SAS_FMT
5278 			"doorbell handshake sending request failed (line=%d)\n",
5279 			ioc->name, __LINE__);
5280 		return -EFAULT;
5281 	}
5282 
5283 	/* now wait for the reply */
5284 	if ((_base_wait_for_doorbell_int(ioc, timeout))) {
5285 		pr_err(MPT3SAS_FMT
5286 			"doorbell handshake int failed (line=%d)\n",
5287 			ioc->name, __LINE__);
5288 		return -EFAULT;
5289 	}
5290 
5291 	/* read the first two 16-bits, it gives the total length of the reply */
5292 	reply[0] = (u16)(readl(&ioc->chip->Doorbell)
5293 	    & MPI2_DOORBELL_DATA_MASK);
5294 	writel(0, &ioc->chip->HostInterruptStatus);
5295 	if ((_base_wait_for_doorbell_int(ioc, 5))) {
5296 		pr_err(MPT3SAS_FMT
5297 			"doorbell handshake int failed (line=%d)\n",
5298 			ioc->name, __LINE__);
5299 		return -EFAULT;
5300 	}
5301 	reply[1] = (u16)(readl(&ioc->chip->Doorbell)
5302 	    & MPI2_DOORBELL_DATA_MASK);
5303 	writel(0, &ioc->chip->HostInterruptStatus);
5304 
5305 	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
5306 		if ((_base_wait_for_doorbell_int(ioc, 5))) {
5307 			pr_err(MPT3SAS_FMT
5308 				"doorbell handshake int failed (line=%d)\n",
5309 				ioc->name, __LINE__);
5310 			return -EFAULT;
5311 		}
5312 		if (i >=  reply_bytes/2) /* overflow case */
5313 			readl(&ioc->chip->Doorbell);
5314 		else
5315 			reply[i] = (u16)(readl(&ioc->chip->Doorbell)
5316 			    & MPI2_DOORBELL_DATA_MASK);
5317 		writel(0, &ioc->chip->HostInterruptStatus);
5318 	}
5319 
5320 	_base_wait_for_doorbell_int(ioc, 5);
5321 	if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
5322 		dhsprintk(ioc, pr_info(MPT3SAS_FMT
5323 			"doorbell is in use (line=%d)\n", ioc->name, __LINE__));
5324 	}
5325 	writel(0, &ioc->chip->HostInterruptStatus);
5326 
5327 	if (ioc->logging_level & MPT_DEBUG_INIT) {
5328 		mfp = (__le32 *)reply;
5329 		pr_info("\toffset:data\n");
5330 		for (i = 0; i < reply_bytes/4; i++)
5331 			pr_info("\t[0x%02x]:%08x\n", i*4,
5332 			    le32_to_cpu(mfp[i]));
5333 	}
5334 	return 0;
5335 }
5336 
5337 /**
5338  * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
5339  * @ioc: per adapter object
5340  * @mpi_reply: the reply payload from FW
5341  * @mpi_request: the request payload sent to FW
5342  *
5343  * The SAS IO Unit Control Request message allows the host to perform low-level
5344  * operations, such as resets on the PHYs of the IO Unit, also allows the host
5345  * to obtain the IOC assigned device handles for a device if it has other
5346  * identifying information about the device, in addition allows the host to
5347  * remove IOC resources associated with the device.
5348  *
5349  * Returns 0 for success, non-zero for failure.
5350  */
5351 int
5352 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
5353 	Mpi2SasIoUnitControlReply_t *mpi_reply,
5354 	Mpi2SasIoUnitControlRequest_t *mpi_request)
5355 {
5356 	u16 smid;
5357 	u32 ioc_state;
5358 	bool issue_reset = false;
5359 	int rc;
5360 	void *request;
5361 	u16 wait_state_count;
5362 
5363 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5364 	    __func__));
5365 
5366 	mutex_lock(&ioc->base_cmds.mutex);
5367 
5368 	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
5369 		pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
5370 		    ioc->name, __func__);
5371 		rc = -EAGAIN;
5372 		goto out;
5373 	}
5374 
5375 	wait_state_count = 0;
5376 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5377 	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
5378 		if (wait_state_count++ == 10) {
5379 			pr_err(MPT3SAS_FMT
5380 			    "%s: failed due to ioc not operational\n",
5381 			    ioc->name, __func__);
5382 			rc = -EFAULT;
5383 			goto out;
5384 		}
5385 		ssleep(1);
5386 		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5387 		pr_info(MPT3SAS_FMT
5388 			"%s: waiting for operational state(count=%d)\n",
5389 			ioc->name, __func__, wait_state_count);
5390 	}
5391 
5392 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5393 	if (!smid) {
5394 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
5395 		    ioc->name, __func__);
5396 		rc = -EAGAIN;
5397 		goto out;
5398 	}
5399 
5400 	rc = 0;
5401 	ioc->base_cmds.status = MPT3_CMD_PENDING;
5402 	request = mpt3sas_base_get_msg_frame(ioc, smid);
5403 	ioc->base_cmds.smid = smid;
5404 	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
5405 	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
5406 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
5407 		ioc->ioc_link_reset_in_progress = 1;
5408 	init_completion(&ioc->base_cmds.done);
5409 	mpt3sas_base_put_smid_default(ioc, smid);
5410 	wait_for_completion_timeout(&ioc->base_cmds.done,
5411 	    msecs_to_jiffies(10000));
5412 	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
5413 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
5414 	    ioc->ioc_link_reset_in_progress)
5415 		ioc->ioc_link_reset_in_progress = 0;
5416 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
5417 		pr_err(MPT3SAS_FMT "%s: timeout\n",
5418 		    ioc->name, __func__);
5419 		_debug_dump_mf(mpi_request,
5420 		    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
5421 		if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
5422 			issue_reset = true;
5423 		goto issue_host_reset;
5424 	}
5425 	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
5426 		memcpy(mpi_reply, ioc->base_cmds.reply,
5427 		    sizeof(Mpi2SasIoUnitControlReply_t));
5428 	else
5429 		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
5430 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5431 	goto out;
5432 
5433  issue_host_reset:
5434 	if (issue_reset)
5435 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
5436 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5437 	rc = -EFAULT;
5438  out:
5439 	mutex_unlock(&ioc->base_cmds.mutex);
5440 	return rc;
5441 }
5442 
5443 /**
5444  * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
5445  * @ioc: per adapter object
5446  * @mpi_reply: the reply payload from FW
5447  * @mpi_request: the request payload sent to FW
5448  *
5449  * The SCSI Enclosure Processor request message causes the IOC to
5450  * communicate with SES devices to control LED status signals.
5451  *
5452  * Returns 0 for success, non-zero for failure.
5453  */
5454 int
5455 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
5456 	Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
5457 {
5458 	u16 smid;
5459 	u32 ioc_state;
5460 	bool issue_reset = false;
5461 	int rc;
5462 	void *request;
5463 	u16 wait_state_count;
5464 
5465 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5466 	    __func__));
5467 
5468 	mutex_lock(&ioc->base_cmds.mutex);
5469 
5470 	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
5471 		pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
5472 		    ioc->name, __func__);
5473 		rc = -EAGAIN;
5474 		goto out;
5475 	}
5476 
5477 	wait_state_count = 0;
5478 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5479 	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
5480 		if (wait_state_count++ == 10) {
5481 			pr_err(MPT3SAS_FMT
5482 			    "%s: failed due to ioc not operational\n",
5483 			    ioc->name, __func__);
5484 			rc = -EFAULT;
5485 			goto out;
5486 		}
5487 		ssleep(1);
5488 		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5489 		pr_info(MPT3SAS_FMT
5490 			"%s: waiting for operational state(count=%d)\n",
5491 			ioc->name,
5492 		    __func__, wait_state_count);
5493 	}
5494 
5495 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5496 	if (!smid) {
5497 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
5498 		    ioc->name, __func__);
5499 		rc = -EAGAIN;
5500 		goto out;
5501 	}
5502 
5503 	rc = 0;
5504 	ioc->base_cmds.status = MPT3_CMD_PENDING;
5505 	request = mpt3sas_base_get_msg_frame(ioc, smid);
5506 	ioc->base_cmds.smid = smid;
5507 	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
5508 	init_completion(&ioc->base_cmds.done);
5509 	mpt3sas_base_put_smid_default(ioc, smid);
5510 	wait_for_completion_timeout(&ioc->base_cmds.done,
5511 	    msecs_to_jiffies(10000));
5512 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
5513 		pr_err(MPT3SAS_FMT "%s: timeout\n",
5514 		    ioc->name, __func__);
5515 		_debug_dump_mf(mpi_request,
5516 		    sizeof(Mpi2SepRequest_t)/4);
5517 		if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
5518 			issue_reset = false;
5519 		goto issue_host_reset;
5520 	}
5521 	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
5522 		memcpy(mpi_reply, ioc->base_cmds.reply,
5523 		    sizeof(Mpi2SepReply_t));
5524 	else
5525 		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
5526 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5527 	goto out;
5528 
5529  issue_host_reset:
5530 	if (issue_reset)
5531 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
5532 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5533 	rc = -EFAULT;
5534  out:
5535 	mutex_unlock(&ioc->base_cmds.mutex);
5536 	return rc;
5537 }
5538 
5539 /**
5540  * _base_get_port_facts - obtain port facts reply and save in ioc
5541  * @ioc: per adapter object
5542  *
5543  * Returns 0 for success, non-zero for failure.
5544  */
5545 static int
5546 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
5547 {
5548 	Mpi2PortFactsRequest_t mpi_request;
5549 	Mpi2PortFactsReply_t mpi_reply;
5550 	struct mpt3sas_port_facts *pfacts;
5551 	int mpi_reply_sz, mpi_request_sz, r;
5552 
5553 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5554 	    __func__));
5555 
5556 	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
5557 	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
5558 	memset(&mpi_request, 0, mpi_request_sz);
5559 	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
5560 	mpi_request.PortNumber = port;
5561 	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
5562 	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
5563 
5564 	if (r != 0) {
5565 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
5566 		    ioc->name, __func__, r);
5567 		return r;
5568 	}
5569 
5570 	pfacts = &ioc->pfacts[port];
5571 	memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
5572 	pfacts->PortNumber = mpi_reply.PortNumber;
5573 	pfacts->VP_ID = mpi_reply.VP_ID;
5574 	pfacts->VF_ID = mpi_reply.VF_ID;
5575 	pfacts->MaxPostedCmdBuffers =
5576 	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
5577 
5578 	return 0;
5579 }
5580 
5581 /**
5582  * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
5583  * @ioc: per adapter object
5584  * @timeout:
5585  *
5586  * Returns 0 for success, non-zero for failure.
5587  */
5588 static int
5589 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
5590 {
5591 	u32 ioc_state;
5592 	int rc;
5593 
5594 	dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
5595 	    __func__));
5596 
5597 	if (ioc->pci_error_recovery) {
5598 		dfailprintk(ioc, printk(MPT3SAS_FMT
5599 		    "%s: host in pci error recovery\n", ioc->name, __func__));
5600 		return -EFAULT;
5601 	}
5602 
5603 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5604 	dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
5605 	    ioc->name, __func__, ioc_state));
5606 
5607 	if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
5608 	    (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
5609 		return 0;
5610 
5611 	if (ioc_state & MPI2_DOORBELL_USED) {
5612 		dhsprintk(ioc, printk(MPT3SAS_FMT
5613 		    "unexpected doorbell active!\n", ioc->name));
5614 		goto issue_diag_reset;
5615 	}
5616 
5617 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
5618 		mpt3sas_base_fault_info(ioc, ioc_state &
5619 		    MPI2_DOORBELL_DATA_MASK);
5620 		goto issue_diag_reset;
5621 	}
5622 
5623 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5624 	if (ioc_state) {
5625 		dfailprintk(ioc, printk(MPT3SAS_FMT
5626 		    "%s: failed going to ready state (ioc_state=0x%x)\n",
5627 		    ioc->name, __func__, ioc_state));
5628 		return -EFAULT;
5629 	}
5630 
5631  issue_diag_reset:
5632 	rc = _base_diag_reset(ioc);
5633 	return rc;
5634 }
5635 
5636 /**
5637  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
5638  * @ioc: per adapter object
5639  *
5640  * Returns 0 for success, non-zero for failure.
5641  */
5642 static int
5643 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
5644 {
5645 	Mpi2IOCFactsRequest_t mpi_request;
5646 	Mpi2IOCFactsReply_t mpi_reply;
5647 	struct mpt3sas_facts *facts;
5648 	int mpi_reply_sz, mpi_request_sz, r;
5649 
5650 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5651 	    __func__));
5652 
5653 	r = _base_wait_for_iocstate(ioc, 10);
5654 	if (r) {
5655 		dfailprintk(ioc, printk(MPT3SAS_FMT
5656 		    "%s: failed getting to correct state\n",
5657 		    ioc->name, __func__));
5658 		return r;
5659 	}
5660 	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
5661 	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
5662 	memset(&mpi_request, 0, mpi_request_sz);
5663 	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
5664 	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
5665 	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
5666 
5667 	if (r != 0) {
5668 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
5669 		    ioc->name, __func__, r);
5670 		return r;
5671 	}
5672 
5673 	facts = &ioc->facts;
5674 	memset(facts, 0, sizeof(struct mpt3sas_facts));
5675 	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
5676 	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
5677 	facts->VP_ID = mpi_reply.VP_ID;
5678 	facts->VF_ID = mpi_reply.VF_ID;
5679 	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
5680 	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
5681 	facts->WhoInit = mpi_reply.WhoInit;
5682 	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
5683 	facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
5684 	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
5685 	facts->MaxReplyDescriptorPostQueueDepth =
5686 	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
5687 	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
5688 	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
5689 	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
5690 		ioc->ir_firmware = 1;
5691 	if ((facts->IOCCapabilities &
5692 	      MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
5693 		ioc->rdpq_array_capable = 1;
5694 	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
5695 	facts->IOCRequestFrameSize =
5696 	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
5697 	if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5698 		facts->IOCMaxChainSegmentSize =
5699 			le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
5700 	}
5701 	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
5702 	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
5703 	ioc->shost->max_id = -1;
5704 	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
5705 	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
5706 	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
5707 	facts->HighPriorityCredit =
5708 	    le16_to_cpu(mpi_reply.HighPriorityCredit);
5709 	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
5710 	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
5711 	facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
5712 
5713 	/*
5714 	 * Get the Page Size from IOC Facts. If it's 0, default to 4k.
5715 	 */
5716 	ioc->page_size = 1 << facts->CurrentHostPageSize;
5717 	if (ioc->page_size == 1) {
5718 		pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
5719 			"default host page size to 4k\n", ioc->name);
5720 		ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
5721 	}
5722 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
5723 		ioc->name, facts->CurrentHostPageSize));
5724 
5725 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
5726 		"hba queue depth(%d), max chains per io(%d)\n",
5727 		ioc->name, facts->RequestCredit,
5728 	    facts->MaxChainDepth));
5729 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
5730 		"request frame size(%d), reply frame size(%d)\n", ioc->name,
5731 	    facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
5732 	return 0;
5733 }
5734 
5735 /**
5736  * _base_send_ioc_init - send ioc_init to firmware
5737  * @ioc: per adapter object
5738  *
5739  * Returns 0 for success, non-zero for failure.
5740  */
5741 static int
5742 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
5743 {
5744 	Mpi2IOCInitRequest_t mpi_request;
5745 	Mpi2IOCInitReply_t mpi_reply;
5746 	int i, r = 0;
5747 	ktime_t current_time;
5748 	u16 ioc_status;
5749 	u32 reply_post_free_array_sz = 0;
5750 
5751 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5752 	    __func__));
5753 
5754 	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
5755 	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
5756 	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
5757 	mpi_request.VF_ID = 0; /* TODO */
5758 	mpi_request.VP_ID = 0;
5759 	mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
5760 	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
5761 	mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
5762 
5763 	if (_base_is_controller_msix_enabled(ioc))
5764 		mpi_request.HostMSIxVectors = ioc->reply_queue_count;
5765 	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
5766 	mpi_request.ReplyDescriptorPostQueueDepth =
5767 	    cpu_to_le16(ioc->reply_post_queue_depth);
5768 	mpi_request.ReplyFreeQueueDepth =
5769 	    cpu_to_le16(ioc->reply_free_queue_depth);
5770 
5771 	mpi_request.SenseBufferAddressHigh =
5772 	    cpu_to_le32((u64)ioc->sense_dma >> 32);
5773 	mpi_request.SystemReplyAddressHigh =
5774 	    cpu_to_le32((u64)ioc->reply_dma >> 32);
5775 	mpi_request.SystemRequestFrameBaseAddress =
5776 	    cpu_to_le64((u64)ioc->request_dma);
5777 	mpi_request.ReplyFreeQueueAddress =
5778 	    cpu_to_le64((u64)ioc->reply_free_dma);
5779 
5780 	if (ioc->rdpq_array_enable) {
5781 		reply_post_free_array_sz = ioc->reply_queue_count *
5782 		    sizeof(Mpi2IOCInitRDPQArrayEntry);
5783 		memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
5784 		for (i = 0; i < ioc->reply_queue_count; i++)
5785 			ioc->reply_post_free_array[i].RDPQBaseAddress =
5786 			    cpu_to_le64(
5787 				(u64)ioc->reply_post[i].reply_post_free_dma);
5788 		mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
5789 		mpi_request.ReplyDescriptorPostQueueAddress =
5790 		    cpu_to_le64((u64)ioc->reply_post_free_array_dma);
5791 	} else {
5792 		mpi_request.ReplyDescriptorPostQueueAddress =
5793 		    cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
5794 	}
5795 
5796 	/* This time stamp specifies number of milliseconds
5797 	 * since epoch ~ midnight January 1, 1970.
5798 	 */
5799 	current_time = ktime_get_real();
5800 	mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
5801 
5802 	if (ioc->logging_level & MPT_DEBUG_INIT) {
5803 		__le32 *mfp;
5804 		int i;
5805 
5806 		mfp = (__le32 *)&mpi_request;
5807 		pr_info("\toffset:data\n");
5808 		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
5809 			pr_info("\t[0x%02x]:%08x\n", i*4,
5810 			    le32_to_cpu(mfp[i]));
5811 	}
5812 
5813 	r = _base_handshake_req_reply_wait(ioc,
5814 	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
5815 	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
5816 
5817 	if (r != 0) {
5818 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
5819 		    ioc->name, __func__, r);
5820 		return r;
5821 	}
5822 
5823 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5824 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
5825 	    mpi_reply.IOCLogInfo) {
5826 		pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
5827 		r = -EIO;
5828 	}
5829 
5830 	return r;
5831 }
5832 
5833 /**
5834  * mpt3sas_port_enable_done - command completion routine for port enable
5835  * @ioc: per adapter object
5836  * @smid: system request message index
5837  * @msix_index: MSIX table index supplied by the OS
5838  * @reply: reply message frame(lower 32bit addr)
5839  *
5840  * Return 1 meaning mf should be freed from _base_interrupt
5841  *        0 means the mf is freed from this function.
5842  */
5843 u8
5844 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
5845 	u32 reply)
5846 {
5847 	MPI2DefaultReply_t *mpi_reply;
5848 	u16 ioc_status;
5849 
5850 	if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
5851 		return 1;
5852 
5853 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5854 	if (!mpi_reply)
5855 		return 1;
5856 
5857 	if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
5858 		return 1;
5859 
5860 	ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
5861 	ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
5862 	ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
5863 	memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
5864 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
5865 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5866 		ioc->port_enable_failed = 1;
5867 
5868 	if (ioc->is_driver_loading) {
5869 		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
5870 			mpt3sas_port_enable_complete(ioc);
5871 			return 1;
5872 		} else {
5873 			ioc->start_scan_failed = ioc_status;
5874 			ioc->start_scan = 0;
5875 			return 1;
5876 		}
5877 	}
5878 	complete(&ioc->port_enable_cmds.done);
5879 	return 1;
5880 }
5881 
5882 /**
5883  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
5884  * @ioc: per adapter object
5885  *
5886  * Returns 0 for success, non-zero for failure.
5887  */
5888 static int
5889 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
5890 {
5891 	Mpi2PortEnableRequest_t *mpi_request;
5892 	Mpi2PortEnableReply_t *mpi_reply;
5893 	int r = 0;
5894 	u16 smid;
5895 	u16 ioc_status;
5896 
5897 	pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
5898 
5899 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5900 		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
5901 		    ioc->name, __func__);
5902 		return -EAGAIN;
5903 	}
5904 
5905 	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
5906 	if (!smid) {
5907 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
5908 		    ioc->name, __func__);
5909 		return -EAGAIN;
5910 	}
5911 
5912 	ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
5913 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5914 	ioc->port_enable_cmds.smid = smid;
5915 	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
5916 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
5917 
5918 	init_completion(&ioc->port_enable_cmds.done);
5919 	mpt3sas_base_put_smid_default(ioc, smid);
5920 	wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
5921 	if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
5922 		pr_err(MPT3SAS_FMT "%s: timeout\n",
5923 		    ioc->name, __func__);
5924 		_debug_dump_mf(mpi_request,
5925 		    sizeof(Mpi2PortEnableRequest_t)/4);
5926 		if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
5927 			r = -EFAULT;
5928 		else
5929 			r = -ETIME;
5930 		goto out;
5931 	}
5932 
5933 	mpi_reply = ioc->port_enable_cmds.reply;
5934 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
5935 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5936 		pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
5937 		    ioc->name, __func__, ioc_status);
5938 		r = -EFAULT;
5939 		goto out;
5940 	}
5941 
5942  out:
5943 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
5944 	pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
5945 	    "SUCCESS" : "FAILED"));
5946 	return r;
5947 }
5948 
5949 /**
5950  * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
5951  * @ioc: per adapter object
5952  *
5953  * Returns 0 for success, non-zero for failure.
5954  */
5955 int
5956 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
5957 {
5958 	Mpi2PortEnableRequest_t *mpi_request;
5959 	u16 smid;
5960 
5961 	pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
5962 
5963 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5964 		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
5965 		    ioc->name, __func__);
5966 		return -EAGAIN;
5967 	}
5968 
5969 	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
5970 	if (!smid) {
5971 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
5972 		    ioc->name, __func__);
5973 		return -EAGAIN;
5974 	}
5975 
5976 	ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
5977 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5978 	ioc->port_enable_cmds.smid = smid;
5979 	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
5980 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
5981 
5982 	mpt3sas_base_put_smid_default(ioc, smid);
5983 	return 0;
5984 }
5985 
5986 /**
5987  * _base_determine_wait_on_discovery - desposition
5988  * @ioc: per adapter object
5989  *
5990  * Decide whether to wait on discovery to complete. Used to either
5991  * locate boot device, or report volumes ahead of physical devices.
5992  *
5993  * Returns 1 for wait, 0 for don't wait
5994  */
5995 static int
5996 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
5997 {
5998 	/* We wait for discovery to complete if IR firmware is loaded.
5999 	 * The sas topology events arrive before PD events, so we need time to
6000 	 * turn on the bit in ioc->pd_handles to indicate PD
6001 	 * Also, it maybe required to report Volumes ahead of physical
6002 	 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
6003 	 */
6004 	if (ioc->ir_firmware)
6005 		return 1;
6006 
6007 	/* if no Bios, then we don't need to wait */
6008 	if (!ioc->bios_pg3.BiosVersion)
6009 		return 0;
6010 
6011 	/* Bios is present, then we drop down here.
6012 	 *
6013 	 * If there any entries in the Bios Page 2, then we wait
6014 	 * for discovery to complete.
6015 	 */
6016 
6017 	/* Current Boot Device */
6018 	if ((ioc->bios_pg2.CurrentBootDeviceForm &
6019 	    MPI2_BIOSPAGE2_FORM_MASK) ==
6020 	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6021 	/* Request Boot Device */
6022 	   (ioc->bios_pg2.ReqBootDeviceForm &
6023 	    MPI2_BIOSPAGE2_FORM_MASK) ==
6024 	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6025 	/* Alternate Request Boot Device */
6026 	   (ioc->bios_pg2.ReqAltBootDeviceForm &
6027 	    MPI2_BIOSPAGE2_FORM_MASK) ==
6028 	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
6029 		return 0;
6030 
6031 	return 1;
6032 }
6033 
6034 /**
6035  * _base_unmask_events - turn on notification for this event
6036  * @ioc: per adapter object
6037  * @event: firmware event
6038  *
6039  * The mask is stored in ioc->event_masks.
6040  */
6041 static void
6042 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
6043 {
6044 	u32 desired_event;
6045 
6046 	if (event >= 128)
6047 		return;
6048 
6049 	desired_event = (1 << (event % 32));
6050 
6051 	if (event < 32)
6052 		ioc->event_masks[0] &= ~desired_event;
6053 	else if (event < 64)
6054 		ioc->event_masks[1] &= ~desired_event;
6055 	else if (event < 96)
6056 		ioc->event_masks[2] &= ~desired_event;
6057 	else if (event < 128)
6058 		ioc->event_masks[3] &= ~desired_event;
6059 }
6060 
6061 /**
6062  * _base_event_notification - send event notification
6063  * @ioc: per adapter object
6064  *
6065  * Returns 0 for success, non-zero for failure.
6066  */
6067 static int
6068 _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
6069 {
6070 	Mpi2EventNotificationRequest_t *mpi_request;
6071 	u16 smid;
6072 	int r = 0;
6073 	int i;
6074 
6075 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
6076 	    __func__));
6077 
6078 	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6079 		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
6080 		    ioc->name, __func__);
6081 		return -EAGAIN;
6082 	}
6083 
6084 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6085 	if (!smid) {
6086 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
6087 		    ioc->name, __func__);
6088 		return -EAGAIN;
6089 	}
6090 	ioc->base_cmds.status = MPT3_CMD_PENDING;
6091 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6092 	ioc->base_cmds.smid = smid;
6093 	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
6094 	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
6095 	mpi_request->VF_ID = 0; /* TODO */
6096 	mpi_request->VP_ID = 0;
6097 	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6098 		mpi_request->EventMasks[i] =
6099 		    cpu_to_le32(ioc->event_masks[i]);
6100 	init_completion(&ioc->base_cmds.done);
6101 	mpt3sas_base_put_smid_default(ioc, smid);
6102 	wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
6103 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6104 		pr_err(MPT3SAS_FMT "%s: timeout\n",
6105 		    ioc->name, __func__);
6106 		_debug_dump_mf(mpi_request,
6107 		    sizeof(Mpi2EventNotificationRequest_t)/4);
6108 		if (ioc->base_cmds.status & MPT3_CMD_RESET)
6109 			r = -EFAULT;
6110 		else
6111 			r = -ETIME;
6112 	} else
6113 		dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
6114 		    ioc->name, __func__));
6115 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6116 	return r;
6117 }
6118 
6119 /**
6120  * mpt3sas_base_validate_event_type - validating event types
6121  * @ioc: per adapter object
6122  * @event: firmware event
6123  *
6124  * This will turn on firmware event notification when application
6125  * ask for that event. We don't mask events that are already enabled.
6126  */
6127 void
6128 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
6129 {
6130 	int i, j;
6131 	u32 event_mask, desired_event;
6132 	u8 send_update_to_fw;
6133 
6134 	for (i = 0, send_update_to_fw = 0; i <
6135 	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
6136 		event_mask = ~event_type[i];
6137 		desired_event = 1;
6138 		for (j = 0; j < 32; j++) {
6139 			if (!(event_mask & desired_event) &&
6140 			    (ioc->event_masks[i] & desired_event)) {
6141 				ioc->event_masks[i] &= ~desired_event;
6142 				send_update_to_fw = 1;
6143 			}
6144 			desired_event = (desired_event << 1);
6145 		}
6146 	}
6147 
6148 	if (!send_update_to_fw)
6149 		return;
6150 
6151 	mutex_lock(&ioc->base_cmds.mutex);
6152 	_base_event_notification(ioc);
6153 	mutex_unlock(&ioc->base_cmds.mutex);
6154 }
6155 
6156 /**
6157  * _base_diag_reset - the "big hammer" start of day reset
6158  * @ioc: per adapter object
6159  *
6160  * Returns 0 for success, non-zero for failure.
6161  */
6162 static int
6163 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6164 {
6165 	u32 host_diagnostic;
6166 	u32 ioc_state;
6167 	u32 count;
6168 	u32 hcb_size;
6169 
6170 	pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
6171 
6172 	drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
6173 	    ioc->name));
6174 
6175 	count = 0;
6176 	do {
6177 		/* Write magic sequence to WriteSequence register
6178 		 * Loop until in diagnostic mode
6179 		 */
6180 		drsprintk(ioc, pr_info(MPT3SAS_FMT
6181 			"write magic sequence\n", ioc->name));
6182 		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6183 		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
6184 		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
6185 		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
6186 		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
6187 		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
6188 		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
6189 
6190 		/* wait 100 msec */
6191 		msleep(100);
6192 
6193 		if (count++ > 20)
6194 			goto out;
6195 
6196 		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
6197 		drsprintk(ioc, pr_info(MPT3SAS_FMT
6198 			"wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6199 		    ioc->name, count, host_diagnostic));
6200 
6201 	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6202 
6203 	hcb_size = readl(&ioc->chip->HCBSize);
6204 
6205 	drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
6206 	    ioc->name));
6207 	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
6208 	     &ioc->chip->HostDiagnostic);
6209 
6210 	/*This delay allows the chip PCIe hardware time to finish reset tasks*/
6211 	msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
6212 
6213 	/* Approximately 300 second max wait */
6214 	for (count = 0; count < (300000000 /
6215 		MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
6216 
6217 		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
6218 
6219 		if (host_diagnostic == 0xFFFFFFFF)
6220 			goto out;
6221 		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
6222 			break;
6223 
6224 		msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
6225 	}
6226 
6227 	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
6228 
6229 		drsprintk(ioc, pr_info(MPT3SAS_FMT
6230 		"restart the adapter assuming the HCB Address points to good F/W\n",
6231 		    ioc->name));
6232 		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
6233 		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
6234 		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
6235 
6236 		drsprintk(ioc, pr_info(MPT3SAS_FMT
6237 		    "re-enable the HCDW\n", ioc->name));
6238 		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
6239 		    &ioc->chip->HCBSize);
6240 	}
6241 
6242 	drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
6243 	    ioc->name));
6244 	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
6245 	    &ioc->chip->HostDiagnostic);
6246 
6247 	drsprintk(ioc, pr_info(MPT3SAS_FMT
6248 		"disable writes to the diagnostic register\n", ioc->name));
6249 	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6250 
6251 	drsprintk(ioc, pr_info(MPT3SAS_FMT
6252 		"Wait for FW to go to the READY state\n", ioc->name));
6253 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
6254 	if (ioc_state) {
6255 		pr_err(MPT3SAS_FMT
6256 			"%s: failed going to ready state (ioc_state=0x%x)\n",
6257 			ioc->name, __func__, ioc_state);
6258 		goto out;
6259 	}
6260 
6261 	pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
6262 	return 0;
6263 
6264  out:
6265 	pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
6266 	return -EFAULT;
6267 }
6268 
6269 /**
6270  * _base_make_ioc_ready - put controller in READY state
6271  * @ioc: per adapter object
6272  * @type: FORCE_BIG_HAMMER or SOFT_RESET
6273  *
6274  * Returns 0 for success, non-zero for failure.
6275  */
6276 static int
6277 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6278 {
6279 	u32 ioc_state;
6280 	int rc;
6281 	int count;
6282 
6283 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
6284 	    __func__));
6285 
6286 	if (ioc->pci_error_recovery)
6287 		return 0;
6288 
6289 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6290 	dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
6291 	    ioc->name, __func__, ioc_state));
6292 
6293 	/* if in RESET state, it should move to READY state shortly */
6294 	count = 0;
6295 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
6296 		while ((ioc_state & MPI2_IOC_STATE_MASK) !=
6297 		    MPI2_IOC_STATE_READY) {
6298 			if (count++ == 10) {
6299 				pr_err(MPT3SAS_FMT
6300 					"%s: failed going to ready state (ioc_state=0x%x)\n",
6301 				    ioc->name, __func__, ioc_state);
6302 				return -EFAULT;
6303 			}
6304 			ssleep(1);
6305 			ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6306 		}
6307 	}
6308 
6309 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
6310 		return 0;
6311 
6312 	if (ioc_state & MPI2_DOORBELL_USED) {
6313 		dhsprintk(ioc, pr_info(MPT3SAS_FMT
6314 			"unexpected doorbell active!\n",
6315 			ioc->name));
6316 		goto issue_diag_reset;
6317 	}
6318 
6319 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6320 		mpt3sas_base_fault_info(ioc, ioc_state &
6321 		    MPI2_DOORBELL_DATA_MASK);
6322 		goto issue_diag_reset;
6323 	}
6324 
6325 	if (type == FORCE_BIG_HAMMER)
6326 		goto issue_diag_reset;
6327 
6328 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6329 		if (!(_base_send_ioc_reset(ioc,
6330 		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
6331 			return 0;
6332 	}
6333 
6334  issue_diag_reset:
6335 	rc = _base_diag_reset(ioc);
6336 	return rc;
6337 }
6338 
6339 /**
6340  * _base_make_ioc_operational - put controller in OPERATIONAL state
6341  * @ioc: per adapter object
6342  *
6343  * Returns 0 for success, non-zero for failure.
6344  */
6345 static int
6346 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6347 {
6348 	int r, i, index;
6349 	unsigned long	flags;
6350 	u32 reply_address;
6351 	u16 smid;
6352 	struct _tr_list *delayed_tr, *delayed_tr_next;
6353 	struct _sc_list *delayed_sc, *delayed_sc_next;
6354 	struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
6355 	u8 hide_flag;
6356 	struct adapter_reply_queue *reply_q;
6357 	Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
6358 
6359 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
6360 	    __func__));
6361 
6362 	/* clean the delayed target reset list */
6363 	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6364 	    &ioc->delayed_tr_list, list) {
6365 		list_del(&delayed_tr->list);
6366 		kfree(delayed_tr);
6367 	}
6368 
6369 
6370 	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6371 	    &ioc->delayed_tr_volume_list, list) {
6372 		list_del(&delayed_tr->list);
6373 		kfree(delayed_tr);
6374 	}
6375 
6376 	list_for_each_entry_safe(delayed_sc, delayed_sc_next,
6377 	    &ioc->delayed_sc_list, list) {
6378 		list_del(&delayed_sc->list);
6379 		kfree(delayed_sc);
6380 	}
6381 
6382 	list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
6383 	    &ioc->delayed_event_ack_list, list) {
6384 		list_del(&delayed_event_ack->list);
6385 		kfree(delayed_event_ack);
6386 	}
6387 
6388 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
6389 
6390 	/* hi-priority queue */
6391 	INIT_LIST_HEAD(&ioc->hpr_free_list);
6392 	smid = ioc->hi_priority_smid;
6393 	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
6394 		ioc->hpr_lookup[i].cb_idx = 0xFF;
6395 		ioc->hpr_lookup[i].smid = smid;
6396 		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
6397 		    &ioc->hpr_free_list);
6398 	}
6399 
6400 	/* internal queue */
6401 	INIT_LIST_HEAD(&ioc->internal_free_list);
6402 	smid = ioc->internal_smid;
6403 	for (i = 0; i < ioc->internal_depth; i++, smid++) {
6404 		ioc->internal_lookup[i].cb_idx = 0xFF;
6405 		ioc->internal_lookup[i].smid = smid;
6406 		list_add_tail(&ioc->internal_lookup[i].tracker_list,
6407 		    &ioc->internal_free_list);
6408 	}
6409 
6410 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
6411 
6412 	/* initialize Reply Free Queue */
6413 	for (i = 0, reply_address = (u32)ioc->reply_dma ;
6414 	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
6415 	    ioc->reply_sz) {
6416 		ioc->reply_free[i] = cpu_to_le32(reply_address);
6417 		if (ioc->is_mcpu_endpoint)
6418 			_base_clone_reply_to_sys_mem(ioc,
6419 					reply_address, i);
6420 	}
6421 
6422 	/* initialize reply queues */
6423 	if (ioc->is_driver_loading)
6424 		_base_assign_reply_queues(ioc);
6425 
6426 	/* initialize Reply Post Free Queue */
6427 	index = 0;
6428 	reply_post_free_contig = ioc->reply_post[0].reply_post_free;
6429 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
6430 		/*
6431 		 * If RDPQ is enabled, switch to the next allocation.
6432 		 * Otherwise advance within the contiguous region.
6433 		 */
6434 		if (ioc->rdpq_array_enable) {
6435 			reply_q->reply_post_free =
6436 				ioc->reply_post[index++].reply_post_free;
6437 		} else {
6438 			reply_q->reply_post_free = reply_post_free_contig;
6439 			reply_post_free_contig += ioc->reply_post_queue_depth;
6440 		}
6441 
6442 		reply_q->reply_post_host_index = 0;
6443 		for (i = 0; i < ioc->reply_post_queue_depth; i++)
6444 			reply_q->reply_post_free[i].Words =
6445 			    cpu_to_le64(ULLONG_MAX);
6446 		if (!_base_is_controller_msix_enabled(ioc))
6447 			goto skip_init_reply_post_free_queue;
6448 	}
6449  skip_init_reply_post_free_queue:
6450 
6451 	r = _base_send_ioc_init(ioc);
6452 	if (r)
6453 		return r;
6454 
6455 	/* initialize reply free host index */
6456 	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
6457 	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
6458 
6459 	/* initialize reply post host index */
6460 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
6461 		if (ioc->combined_reply_queue)
6462 			writel((reply_q->msix_index & 7)<<
6463 			   MPI2_RPHI_MSIX_INDEX_SHIFT,
6464 			   ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
6465 		else
6466 			writel(reply_q->msix_index <<
6467 				MPI2_RPHI_MSIX_INDEX_SHIFT,
6468 				&ioc->chip->ReplyPostHostIndex);
6469 
6470 		if (!_base_is_controller_msix_enabled(ioc))
6471 			goto skip_init_reply_post_host_index;
6472 	}
6473 
6474  skip_init_reply_post_host_index:
6475 
6476 	_base_unmask_interrupts(ioc);
6477 
6478 	if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6479 		r = _base_display_fwpkg_version(ioc);
6480 		if (r)
6481 			return r;
6482 	}
6483 
6484 	_base_static_config_pages(ioc);
6485 	r = _base_event_notification(ioc);
6486 	if (r)
6487 		return r;
6488 
6489 	if (ioc->is_driver_loading) {
6490 
6491 		if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
6492 		    == 0x80) {
6493 			hide_flag = (u8) (
6494 			    le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
6495 			    MFG_PAGE10_HIDE_SSDS_MASK);
6496 			if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
6497 				ioc->mfg_pg10_hide_flag = hide_flag;
6498 		}
6499 
6500 		ioc->wait_for_discovery_to_complete =
6501 		    _base_determine_wait_on_discovery(ioc);
6502 
6503 		return r; /* scan_start and scan_finished support */
6504 	}
6505 
6506 	r = _base_send_port_enable(ioc);
6507 	if (r)
6508 		return r;
6509 
6510 	return r;
6511 }
6512 
6513 /**
6514  * mpt3sas_base_free_resources - free resources controller resources
6515  * @ioc: per adapter object
6516  *
6517  * Return nothing.
6518  */
6519 void
6520 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
6521 {
6522 	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
6523 	    __func__));
6524 
6525 	/* synchronizing freeing resource with pci_access_mutex lock */
6526 	mutex_lock(&ioc->pci_access_mutex);
6527 	if (ioc->chip_phys && ioc->chip) {
6528 		_base_mask_interrupts(ioc);
6529 		ioc->shost_recovery = 1;
6530 		_base_make_ioc_ready(ioc, SOFT_RESET);
6531 		ioc->shost_recovery = 0;
6532 	}
6533 
6534 	mpt3sas_base_unmap_resources(ioc);
6535 	mutex_unlock(&ioc->pci_access_mutex);
6536 	return;
6537 }
6538 
6539 /**
6540  * mpt3sas_base_attach - attach controller instance
6541  * @ioc: per adapter object
6542  *
6543  * Returns 0 for success, non-zero for failure.
6544  */
6545 int
6546 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6547 {
6548 	int r, i;
6549 	int cpu_id, last_cpu_id = 0;
6550 
6551 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
6552 	    __func__));
6553 
6554 	/* setup cpu_msix_table */
6555 	ioc->cpu_count = num_online_cpus();
6556 	for_each_online_cpu(cpu_id)
6557 		last_cpu_id = cpu_id;
6558 	ioc->cpu_msix_table_sz = last_cpu_id + 1;
6559 	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
6560 	ioc->reply_queue_count = 1;
6561 	if (!ioc->cpu_msix_table) {
6562 		dfailprintk(ioc, pr_info(MPT3SAS_FMT
6563 			"allocation for cpu_msix_table failed!!!\n",
6564 			ioc->name));
6565 		r = -ENOMEM;
6566 		goto out_free_resources;
6567 	}
6568 
6569 	if (ioc->is_warpdrive) {
6570 		ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
6571 		    sizeof(resource_size_t *), GFP_KERNEL);
6572 		if (!ioc->reply_post_host_index) {
6573 			dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
6574 				"for reply_post_host_index failed!!!\n",
6575 				ioc->name));
6576 			r = -ENOMEM;
6577 			goto out_free_resources;
6578 		}
6579 	}
6580 
6581 	ioc->rdpq_array_enable_assigned = 0;
6582 	ioc->dma_mask = 0;
6583 	r = mpt3sas_base_map_resources(ioc);
6584 	if (r)
6585 		goto out_free_resources;
6586 
6587 	pci_set_drvdata(ioc->pdev, ioc->shost);
6588 	r = _base_get_ioc_facts(ioc);
6589 	if (r)
6590 		goto out_free_resources;
6591 
6592 	switch (ioc->hba_mpi_version_belonged) {
6593 	case MPI2_VERSION:
6594 		ioc->build_sg_scmd = &_base_build_sg_scmd;
6595 		ioc->build_sg = &_base_build_sg;
6596 		ioc->build_zero_len_sge = &_base_build_zero_len_sge;
6597 		break;
6598 	case MPI25_VERSION:
6599 	case MPI26_VERSION:
6600 		/*
6601 		 * In SAS3.0,
6602 		 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
6603 		 * Target Status - all require the IEEE formated scatter gather
6604 		 * elements.
6605 		 */
6606 		ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
6607 		ioc->build_sg = &_base_build_sg_ieee;
6608 		ioc->build_nvme_prp = &_base_build_nvme_prp;
6609 		ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
6610 		ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
6611 
6612 		break;
6613 	}
6614 
6615 	if (ioc->is_mcpu_endpoint)
6616 		ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io;
6617 	else
6618 		ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
6619 
6620 	/*
6621 	 * These function pointers for other requests that don't
6622 	 * the require IEEE scatter gather elements.
6623 	 *
6624 	 * For example Configuration Pages and SAS IOUNIT Control don't.
6625 	 */
6626 	ioc->build_sg_mpi = &_base_build_sg;
6627 	ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
6628 
6629 	r = _base_make_ioc_ready(ioc, SOFT_RESET);
6630 	if (r)
6631 		goto out_free_resources;
6632 
6633 	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
6634 	    sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
6635 	if (!ioc->pfacts) {
6636 		r = -ENOMEM;
6637 		goto out_free_resources;
6638 	}
6639 
6640 	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
6641 		r = _base_get_port_facts(ioc, i);
6642 		if (r)
6643 			goto out_free_resources;
6644 	}
6645 
6646 	r = _base_allocate_memory_pools(ioc);
6647 	if (r)
6648 		goto out_free_resources;
6649 
6650 	init_waitqueue_head(&ioc->reset_wq);
6651 
6652 	/* allocate memory pd handle bitmask list */
6653 	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
6654 	if (ioc->facts.MaxDevHandle % 8)
6655 		ioc->pd_handles_sz++;
6656 	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
6657 	    GFP_KERNEL);
6658 	if (!ioc->pd_handles) {
6659 		r = -ENOMEM;
6660 		goto out_free_resources;
6661 	}
6662 	ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
6663 	    GFP_KERNEL);
6664 	if (!ioc->blocking_handles) {
6665 		r = -ENOMEM;
6666 		goto out_free_resources;
6667 	}
6668 
6669 	/* allocate memory for pending OS device add list */
6670 	ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
6671 	if (ioc->facts.MaxDevHandle % 8)
6672 		ioc->pend_os_device_add_sz++;
6673 	ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
6674 	    GFP_KERNEL);
6675 	if (!ioc->pend_os_device_add)
6676 		goto out_free_resources;
6677 
6678 	ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
6679 	ioc->device_remove_in_progress =
6680 		kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
6681 	if (!ioc->device_remove_in_progress)
6682 		goto out_free_resources;
6683 
6684 	ioc->fwfault_debug = mpt3sas_fwfault_debug;
6685 
6686 	/* base internal command bits */
6687 	mutex_init(&ioc->base_cmds.mutex);
6688 	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6689 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6690 
6691 	/* port_enable command bits */
6692 	ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6693 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
6694 
6695 	/* transport internal command bits */
6696 	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6697 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
6698 	mutex_init(&ioc->transport_cmds.mutex);
6699 
6700 	/* scsih internal command bits */
6701 	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6702 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
6703 	mutex_init(&ioc->scsih_cmds.mutex);
6704 
6705 	/* task management internal command bits */
6706 	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6707 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
6708 	mutex_init(&ioc->tm_cmds.mutex);
6709 
6710 	/* config page internal command bits */
6711 	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6712 	ioc->config_cmds.status = MPT3_CMD_NOT_USED;
6713 	mutex_init(&ioc->config_cmds.mutex);
6714 
6715 	/* ctl module internal command bits */
6716 	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6717 	ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
6718 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
6719 	mutex_init(&ioc->ctl_cmds.mutex);
6720 
6721 	if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
6722 	    !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
6723 	    !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
6724 	    !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
6725 		r = -ENOMEM;
6726 		goto out_free_resources;
6727 	}
6728 
6729 	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6730 		ioc->event_masks[i] = -1;
6731 
6732 	/* here we enable the events we care about */
6733 	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
6734 	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
6735 	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
6736 	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
6737 	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
6738 	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
6739 	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
6740 	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
6741 	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
6742 	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
6743 	_base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
6744 	_base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
6745 	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
6746 	if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
6747 		if (ioc->is_gen35_ioc) {
6748 			_base_unmask_events(ioc,
6749 				MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
6750 			_base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
6751 			_base_unmask_events(ioc,
6752 				MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
6753 		}
6754 	}
6755 	r = _base_make_ioc_operational(ioc);
6756 	if (r)
6757 		goto out_free_resources;
6758 
6759 	ioc->non_operational_loop = 0;
6760 	ioc->got_task_abort_from_ioctl = 0;
6761 	return 0;
6762 
6763  out_free_resources:
6764 
6765 	ioc->remove_host = 1;
6766 
6767 	mpt3sas_base_free_resources(ioc);
6768 	_base_release_memory_pools(ioc);
6769 	pci_set_drvdata(ioc->pdev, NULL);
6770 	kfree(ioc->cpu_msix_table);
6771 	if (ioc->is_warpdrive)
6772 		kfree(ioc->reply_post_host_index);
6773 	kfree(ioc->pd_handles);
6774 	kfree(ioc->blocking_handles);
6775 	kfree(ioc->device_remove_in_progress);
6776 	kfree(ioc->pend_os_device_add);
6777 	kfree(ioc->tm_cmds.reply);
6778 	kfree(ioc->transport_cmds.reply);
6779 	kfree(ioc->scsih_cmds.reply);
6780 	kfree(ioc->config_cmds.reply);
6781 	kfree(ioc->base_cmds.reply);
6782 	kfree(ioc->port_enable_cmds.reply);
6783 	kfree(ioc->ctl_cmds.reply);
6784 	kfree(ioc->ctl_cmds.sense);
6785 	kfree(ioc->pfacts);
6786 	ioc->ctl_cmds.reply = NULL;
6787 	ioc->base_cmds.reply = NULL;
6788 	ioc->tm_cmds.reply = NULL;
6789 	ioc->scsih_cmds.reply = NULL;
6790 	ioc->transport_cmds.reply = NULL;
6791 	ioc->config_cmds.reply = NULL;
6792 	ioc->pfacts = NULL;
6793 	return r;
6794 }
6795 
6796 
6797 /**
6798  * mpt3sas_base_detach - remove controller instance
6799  * @ioc: per adapter object
6800  *
6801  * Return nothing.
6802  */
6803 void
6804 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
6805 {
6806 	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
6807 	    __func__));
6808 
6809 	mpt3sas_base_stop_watchdog(ioc);
6810 	mpt3sas_base_free_resources(ioc);
6811 	_base_release_memory_pools(ioc);
6812 	mpt3sas_free_enclosure_list(ioc);
6813 	pci_set_drvdata(ioc->pdev, NULL);
6814 	kfree(ioc->cpu_msix_table);
6815 	if (ioc->is_warpdrive)
6816 		kfree(ioc->reply_post_host_index);
6817 	kfree(ioc->pd_handles);
6818 	kfree(ioc->blocking_handles);
6819 	kfree(ioc->device_remove_in_progress);
6820 	kfree(ioc->pend_os_device_add);
6821 	kfree(ioc->pfacts);
6822 	kfree(ioc->ctl_cmds.reply);
6823 	kfree(ioc->ctl_cmds.sense);
6824 	kfree(ioc->base_cmds.reply);
6825 	kfree(ioc->port_enable_cmds.reply);
6826 	kfree(ioc->tm_cmds.reply);
6827 	kfree(ioc->transport_cmds.reply);
6828 	kfree(ioc->scsih_cmds.reply);
6829 	kfree(ioc->config_cmds.reply);
6830 }
6831 
6832 /**
6833  * _base_reset_handler - reset callback handler (for base)
6834  * @ioc: per adapter object
6835  * @reset_phase: phase
6836  *
6837  * The handler for doing any required cleanup or initialization.
6838  *
6839  * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
6840  * MPT3_IOC_DONE_RESET
6841  *
6842  * Return nothing.
6843  */
6844 static void
6845 _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
6846 {
6847 	mpt3sas_scsih_reset_handler(ioc, reset_phase);
6848 	mpt3sas_ctl_reset_handler(ioc, reset_phase);
6849 	switch (reset_phase) {
6850 	case MPT3_IOC_PRE_RESET:
6851 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
6852 		"%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
6853 		break;
6854 	case MPT3_IOC_AFTER_RESET:
6855 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
6856 		"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
6857 		if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
6858 			ioc->transport_cmds.status |= MPT3_CMD_RESET;
6859 			mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
6860 			complete(&ioc->transport_cmds.done);
6861 		}
6862 		if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6863 			ioc->base_cmds.status |= MPT3_CMD_RESET;
6864 			mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
6865 			complete(&ioc->base_cmds.done);
6866 		}
6867 		if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6868 			ioc->port_enable_failed = 1;
6869 			ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
6870 			mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
6871 			if (ioc->is_driver_loading) {
6872 				ioc->start_scan_failed =
6873 				    MPI2_IOCSTATUS_INTERNAL_ERROR;
6874 				ioc->start_scan = 0;
6875 				ioc->port_enable_cmds.status =
6876 				    MPT3_CMD_NOT_USED;
6877 			} else
6878 				complete(&ioc->port_enable_cmds.done);
6879 		}
6880 		if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
6881 			ioc->config_cmds.status |= MPT3_CMD_RESET;
6882 			mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
6883 			ioc->config_cmds.smid = USHRT_MAX;
6884 			complete(&ioc->config_cmds.done);
6885 		}
6886 		break;
6887 	case MPT3_IOC_DONE_RESET:
6888 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
6889 			"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
6890 		break;
6891 	}
6892 }
6893 
6894 /**
6895  * mpt3sas_wait_for_commands_to_complete - reset controller
6896  * @ioc: Pointer to MPT_ADAPTER structure
6897  *
6898  * This function is waiting 10s for all pending commands to complete
6899  * prior to putting controller in reset.
6900  */
6901 void
6902 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
6903 {
6904 	u32 ioc_state;
6905 
6906 	ioc->pending_io_count = 0;
6907 
6908 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6909 	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
6910 		return;
6911 
6912 	/* pending command count */
6913 	ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
6914 
6915 	if (!ioc->pending_io_count)
6916 		return;
6917 
6918 	/* wait for pending commands to complete */
6919 	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
6920 }
6921 
6922 /**
6923  * mpt3sas_base_hard_reset_handler - reset controller
6924  * @ioc: Pointer to MPT_ADAPTER structure
6925  * @type: FORCE_BIG_HAMMER or SOFT_RESET
6926  *
6927  * Returns 0 for success, non-zero for failure.
6928  */
6929 int
6930 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
6931 	enum reset_type type)
6932 {
6933 	int r;
6934 	unsigned long flags;
6935 	u32 ioc_state;
6936 	u8 is_fault = 0, is_trigger = 0;
6937 
6938 	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
6939 	    __func__));
6940 
6941 	if (ioc->pci_error_recovery) {
6942 		pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
6943 		    ioc->name, __func__);
6944 		r = 0;
6945 		goto out_unlocked;
6946 	}
6947 
6948 	if (mpt3sas_fwfault_debug)
6949 		mpt3sas_halt_firmware(ioc);
6950 
6951 	/* wait for an active reset in progress to complete */
6952 	if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
6953 		do {
6954 			ssleep(1);
6955 		} while (ioc->shost_recovery == 1);
6956 		dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
6957 		    __func__));
6958 		return ioc->ioc_reset_in_progress_status;
6959 	}
6960 
6961 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6962 	ioc->shost_recovery = 1;
6963 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6964 
6965 	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
6966 	    MPT3_DIAG_BUFFER_IS_REGISTERED) &&
6967 	    (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
6968 	    MPT3_DIAG_BUFFER_IS_RELEASED))) {
6969 		is_trigger = 1;
6970 		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6971 		if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
6972 			is_fault = 1;
6973 	}
6974 	_base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
6975 	mpt3sas_wait_for_commands_to_complete(ioc);
6976 	_base_mask_interrupts(ioc);
6977 	r = _base_make_ioc_ready(ioc, type);
6978 	if (r)
6979 		goto out;
6980 	_base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
6981 
6982 	/* If this hard reset is called while port enable is active, then
6983 	 * there is no reason to call make_ioc_operational
6984 	 */
6985 	if (ioc->is_driver_loading && ioc->port_enable_failed) {
6986 		ioc->remove_host = 1;
6987 		r = -EFAULT;
6988 		goto out;
6989 	}
6990 	r = _base_get_ioc_facts(ioc);
6991 	if (r)
6992 		goto out;
6993 
6994 	if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
6995 		panic("%s: Issue occurred with flashing controller firmware."
6996 		      "Please reboot the system and ensure that the correct"
6997 		      " firmware version is running\n", ioc->name);
6998 
6999 	r = _base_make_ioc_operational(ioc);
7000 	if (!r)
7001 		_base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
7002 
7003  out:
7004 	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
7005 	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
7006 
7007 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7008 	ioc->ioc_reset_in_progress_status = r;
7009 	ioc->shost_recovery = 0;
7010 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7011 	ioc->ioc_reset_count++;
7012 	mutex_unlock(&ioc->reset_in_progress_mutex);
7013 
7014  out_unlocked:
7015 	if ((r == 0) && is_trigger) {
7016 		if (is_fault)
7017 			mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
7018 		else
7019 			mpt3sas_trigger_master(ioc,
7020 			    MASTER_TRIGGER_ADAPTER_RESET);
7021 	}
7022 	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
7023 	    __func__));
7024 	return r;
7025 }
7026