1 /*
2  * This is the Fusion MPT base driver providing common API layer interface
3  * for access to MPT (Message Passing Technology) firmware.
4  *
5  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6  * Copyright (C) 2012-2014  LSI Corporation
7  * Copyright (C) 2013-2014 Avago Technologies
8  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * NO WARRANTY
21  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25  * solely responsible for determining the appropriateness of using and
26  * distributing the Program and assumes all risks associated with its
27  * exercise of rights under this Agreement, including but not limited to
28  * the risks and costs of program errors, damage to or loss of data,
29  * programs or equipment, and unavailability or interruption of operations.
30 
31  * DISCLAIMER OF LIABILITY
32  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 
40  * You should have received a copy of the GNU General Public License
41  * along with this program; if not, write to the Free Software
42  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
43  * USA.
44  */
45 
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/kdev_t.h>
54 #include <linux/blkdev.h>
55 #include <linux/delay.h>
56 #include <linux/interrupt.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/io.h>
59 #include <linux/time.h>
60 #include <linux/ktime.h>
61 #include <linux/kthread.h>
62 #include <asm/page.h>        /* To get host page size per arch */
63 #include <linux/aer.h>
64 
65 
66 #include "mpt3sas_base.h"
67 
68 static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
69 
70 
71 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
72 
73  /* maximum controller queue depth */
74 #define MAX_HBA_QUEUE_DEPTH	30000
75 #define MAX_CHAIN_DEPTH		100000
76 static int max_queue_depth = -1;
77 module_param(max_queue_depth, int, 0444);
78 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
79 
80 static int max_sgl_entries = -1;
81 module_param(max_sgl_entries, int, 0444);
82 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
83 
84 static int msix_disable = -1;
85 module_param(msix_disable, int, 0444);
86 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
87 
88 static int smp_affinity_enable = 1;
89 module_param(smp_affinity_enable, int, 0444);
90 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
91 
92 static int max_msix_vectors = -1;
93 module_param(max_msix_vectors, int, 0444);
94 MODULE_PARM_DESC(max_msix_vectors,
95 	" max msix vectors");
96 
97 static int irqpoll_weight = -1;
98 module_param(irqpoll_weight, int, 0444);
99 MODULE_PARM_DESC(irqpoll_weight,
100 	"irq poll weight (default= one fourth of HBA queue depth)");
101 
102 static int mpt3sas_fwfault_debug;
103 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
104 	" enable detection of firmware fault and halt firmware - (default=0)");
105 
106 static int perf_mode = -1;
107 module_param(perf_mode, int, 0444);
108 MODULE_PARM_DESC(perf_mode,
109 	"Performance mode (only for Aero/Sea Generation), options:\n\t\t"
110 	"0 - balanced: high iops mode is enabled &\n\t\t"
111 	"interrupt coalescing is enabled only on high iops queues,\n\t\t"
112 	"1 - iops: high iops mode is disabled &\n\t\t"
113 	"interrupt coalescing is enabled on all queues,\n\t\t"
114 	"2 - latency: high iops mode is disabled &\n\t\t"
115 	"interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
116 	"\t\tdefault - default perf_mode is 'balanced'"
117 	);
118 
119 enum mpt3sas_perf_mode {
120 	MPT_PERF_MODE_DEFAULT	= -1,
121 	MPT_PERF_MODE_BALANCED	= 0,
122 	MPT_PERF_MODE_IOPS	= 1,
123 	MPT_PERF_MODE_LATENCY	= 2,
124 };
125 
126 static int
127 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
128 		u32 ioc_state, int timeout);
129 static int
130 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
131 static void
132 _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
133 
134 /**
135  * mpt3sas_base_check_cmd_timeout - Function
136  *		to check timeout and command termination due
137  *		to Host reset.
138  *
139  * @ioc:	per adapter object.
140  * @status:	Status of issued command.
141  * @mpi_request:mf request pointer.
142  * @sz:		size of buffer.
143  *
144  * @Returns - 1/0 Reset to be done or Not
145  */
146 u8
147 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
148 		u8 status, void *mpi_request, int sz)
149 {
150 	u8 issue_reset = 0;
151 
152 	if (!(status & MPT3_CMD_RESET))
153 		issue_reset = 1;
154 
155 	ioc_err(ioc, "Command %s\n",
156 		issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
157 	_debug_dump_mf(mpi_request, sz);
158 
159 	return issue_reset;
160 }
161 
162 /**
163  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
164  * @val: ?
165  * @kp: ?
166  *
167  * Return: ?
168  */
169 static int
170 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
171 {
172 	int ret = param_set_int(val, kp);
173 	struct MPT3SAS_ADAPTER *ioc;
174 
175 	if (ret)
176 		return ret;
177 
178 	/* global ioc spinlock to protect controller list on list operations */
179 	pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
180 	spin_lock(&gioc_lock);
181 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
182 		ioc->fwfault_debug = mpt3sas_fwfault_debug;
183 	spin_unlock(&gioc_lock);
184 	return 0;
185 }
186 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
187 	param_get_int, &mpt3sas_fwfault_debug, 0644);
188 
189 /**
190  * _base_readl_aero - retry readl for max three times.
191  * @addr: MPT Fusion system interface register address
192  *
193  * Retry the readl() for max three times if it gets zero value
194  * while reading the system interface register.
195  */
196 static inline u32
197 _base_readl_aero(const volatile void __iomem *addr)
198 {
199 	u32 i = 0, ret_val;
200 
201 	do {
202 		ret_val = readl(addr);
203 		i++;
204 	} while (ret_val == 0 && i < 3);
205 
206 	return ret_val;
207 }
208 
209 static inline u32
210 _base_readl(const volatile void __iomem *addr)
211 {
212 	return readl(addr);
213 }
214 
215 /**
216  * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
217  *				  in BAR0 space.
218  *
219  * @ioc: per adapter object
220  * @reply: reply message frame(lower 32bit addr)
221  * @index: System request message index.
222  */
223 static void
224 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
225 		u32 index)
226 {
227 	/*
228 	 * 256 is offset within sys register.
229 	 * 256 offset MPI frame starts. Max MPI frame supported is 32.
230 	 * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
231 	 */
232 	u16 cmd_credit = ioc->facts.RequestCredit + 1;
233 	void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
234 			MPI_FRAME_START_OFFSET +
235 			(cmd_credit * ioc->request_sz) + (index * sizeof(u32));
236 
237 	writel(reply, reply_free_iomem);
238 }
239 
240 /**
241  * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
242  *				to system/BAR0 region.
243  *
244  * @dst_iomem: Pointer to the destination location in BAR0 space.
245  * @src: Pointer to the Source data.
246  * @size: Size of data to be copied.
247  */
248 static void
249 _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
250 {
251 	int i;
252 	u32 *src_virt_mem = (u32 *)src;
253 
254 	for (i = 0; i < size/4; i++)
255 		writel((u32)src_virt_mem[i],
256 				(void __iomem *)dst_iomem + (i * 4));
257 }
258 
259 /**
260  * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
261  *
262  * @dst_iomem: Pointer to the destination location in BAR0 space.
263  * @src: Pointer to the Source data.
264  * @size: Size of data to be copied.
265  */
266 static void
267 _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
268 {
269 	int i;
270 	u32 *src_virt_mem = (u32 *)(src);
271 
272 	for (i = 0; i < size/4; i++)
273 		writel((u32)src_virt_mem[i],
274 			(void __iomem *)dst_iomem + (i * 4));
275 }
276 
277 /**
278  * _base_get_chain - Calculates and Returns virtual chain address
279  *			 for the provided smid in BAR0 space.
280  *
281  * @ioc: per adapter object
282  * @smid: system request message index
283  * @sge_chain_count: Scatter gather chain count.
284  *
285  * Return: the chain address.
286  */
287 static inline void __iomem*
288 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
289 		u8 sge_chain_count)
290 {
291 	void __iomem *base_chain, *chain_virt;
292 	u16 cmd_credit = ioc->facts.RequestCredit + 1;
293 
294 	base_chain  = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
295 		(cmd_credit * ioc->request_sz) +
296 		REPLY_FREE_POOL_SIZE;
297 	chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
298 			ioc->request_sz) + (sge_chain_count * ioc->request_sz);
299 	return chain_virt;
300 }
301 
302 /**
303  * _base_get_chain_phys - Calculates and Returns physical address
304  *			in BAR0 for scatter gather chains, for
305  *			the provided smid.
306  *
307  * @ioc: per adapter object
308  * @smid: system request message index
309  * @sge_chain_count: Scatter gather chain count.
310  *
311  * Return: Physical chain address.
312  */
313 static inline phys_addr_t
314 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
315 		u8 sge_chain_count)
316 {
317 	phys_addr_t base_chain_phys, chain_phys;
318 	u16 cmd_credit = ioc->facts.RequestCredit + 1;
319 
320 	base_chain_phys  = ioc->chip_phys + MPI_FRAME_START_OFFSET +
321 		(cmd_credit * ioc->request_sz) +
322 		REPLY_FREE_POOL_SIZE;
323 	chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
324 			ioc->request_sz) + (sge_chain_count * ioc->request_sz);
325 	return chain_phys;
326 }
327 
328 /**
329  * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
330  *			buffer address for the provided smid.
331  *			(Each smid can have 64K starts from 17024)
332  *
333  * @ioc: per adapter object
334  * @smid: system request message index
335  *
336  * Return: Pointer to buffer location in BAR0.
337  */
338 
339 static void __iomem *
340 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
341 {
342 	u16 cmd_credit = ioc->facts.RequestCredit + 1;
343 	// Added extra 1 to reach end of chain.
344 	void __iomem *chain_end = _base_get_chain(ioc,
345 			cmd_credit + 1,
346 			ioc->facts.MaxChainDepth);
347 	return chain_end + (smid * 64 * 1024);
348 }
349 
350 /**
351  * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
352  *		Host buffer Physical address for the provided smid.
353  *		(Each smid can have 64K starts from 17024)
354  *
355  * @ioc: per adapter object
356  * @smid: system request message index
357  *
358  * Return: Pointer to buffer location in BAR0.
359  */
360 static phys_addr_t
361 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
362 {
363 	u16 cmd_credit = ioc->facts.RequestCredit + 1;
364 	phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
365 			cmd_credit + 1,
366 			ioc->facts.MaxChainDepth);
367 	return chain_end_phys + (smid * 64 * 1024);
368 }
369 
370 /**
371  * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
372  *			lookup list and Provides chain_buffer
373  *			address for the matching dma address.
374  *			(Each smid can have 64K starts from 17024)
375  *
376  * @ioc: per adapter object
377  * @chain_buffer_dma: Chain buffer dma address.
378  *
379  * Return: Pointer to chain buffer. Or Null on Failure.
380  */
381 static void *
382 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
383 		dma_addr_t chain_buffer_dma)
384 {
385 	u16 index, j;
386 	struct chain_tracker *ct;
387 
388 	for (index = 0; index < ioc->scsiio_depth; index++) {
389 		for (j = 0; j < ioc->chains_needed_per_io; j++) {
390 			ct = &ioc->chain_lookup[index].chains_per_smid[j];
391 			if (ct && ct->chain_buffer_dma == chain_buffer_dma)
392 				return ct->chain_buffer;
393 		}
394 	}
395 	ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
396 	return NULL;
397 }
398 
399 /**
400  * _clone_sg_entries -	MPI EP's scsiio and config requests
401  *			are handled here. Base function for
402  *			double buffering, before submitting
403  *			the requests.
404  *
405  * @ioc: per adapter object.
406  * @mpi_request: mf request pointer.
407  * @smid: system request message index.
408  */
409 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
410 		void *mpi_request, u16 smid)
411 {
412 	Mpi2SGESimple32_t *sgel, *sgel_next;
413 	u32  sgl_flags, sge_chain_count = 0;
414 	bool is_write = false;
415 	u16 i = 0;
416 	void __iomem *buffer_iomem;
417 	phys_addr_t buffer_iomem_phys;
418 	void __iomem *buff_ptr;
419 	phys_addr_t buff_ptr_phys;
420 	void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
421 	void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
422 	phys_addr_t dst_addr_phys;
423 	MPI2RequestHeader_t *request_hdr;
424 	struct scsi_cmnd *scmd;
425 	struct scatterlist *sg_scmd = NULL;
426 	int is_scsiio_req = 0;
427 
428 	request_hdr = (MPI2RequestHeader_t *) mpi_request;
429 
430 	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
431 		Mpi25SCSIIORequest_t *scsiio_request =
432 			(Mpi25SCSIIORequest_t *)mpi_request;
433 		sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
434 		is_scsiio_req = 1;
435 	} else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
436 		Mpi2ConfigRequest_t  *config_req =
437 			(Mpi2ConfigRequest_t *)mpi_request;
438 		sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
439 	} else
440 		return;
441 
442 	/* From smid we can get scsi_cmd, once we have sg_scmd,
443 	 * we just need to get sg_virt and sg_next to get virual
444 	 * address associated with sgel->Address.
445 	 */
446 
447 	if (is_scsiio_req) {
448 		/* Get scsi_cmd using smid */
449 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
450 		if (scmd == NULL) {
451 			ioc_err(ioc, "scmd is NULL\n");
452 			return;
453 		}
454 
455 		/* Get sg_scmd from scmd provided */
456 		sg_scmd = scsi_sglist(scmd);
457 	}
458 
459 	/*
460 	 * 0 - 255	System register
461 	 * 256 - 4352	MPI Frame. (This is based on maxCredit 32)
462 	 * 4352 - 4864	Reply_free pool (512 byte is reserved
463 	 *		considering maxCredit 32. Reply need extra
464 	 *		room, for mCPU case kept four times of
465 	 *		maxCredit).
466 	 * 4864 - 17152	SGE chain element. (32cmd * 3 chain of
467 	 *		128 byte size = 12288)
468 	 * 17152 - x	Host buffer mapped with smid.
469 	 *		(Each smid can have 64K Max IO.)
470 	 * BAR0+Last 1K MSIX Addr and Data
471 	 * Total size in use 2113664 bytes of 4MB BAR0
472 	 */
473 
474 	buffer_iomem = _base_get_buffer_bar0(ioc, smid);
475 	buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
476 
477 	buff_ptr = buffer_iomem;
478 	buff_ptr_phys = buffer_iomem_phys;
479 	WARN_ON(buff_ptr_phys > U32_MAX);
480 
481 	if (le32_to_cpu(sgel->FlagsLength) &
482 			(MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
483 		is_write = true;
484 
485 	for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
486 
487 		sgl_flags =
488 		    (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
489 
490 		switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
491 		case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
492 			/*
493 			 * Helper function which on passing
494 			 * chain_buffer_dma returns chain_buffer. Get
495 			 * the virtual address for sgel->Address
496 			 */
497 			sgel_next =
498 				_base_get_chain_buffer_dma_to_chain_buffer(ioc,
499 						le32_to_cpu(sgel->Address));
500 			if (sgel_next == NULL)
501 				return;
502 			/*
503 			 * This is coping 128 byte chain
504 			 * frame (not a host buffer)
505 			 */
506 			dst_chain_addr[sge_chain_count] =
507 				_base_get_chain(ioc,
508 					smid, sge_chain_count);
509 			src_chain_addr[sge_chain_count] =
510 						(void *) sgel_next;
511 			dst_addr_phys = _base_get_chain_phys(ioc,
512 						smid, sge_chain_count);
513 			WARN_ON(dst_addr_phys > U32_MAX);
514 			sgel->Address =
515 				cpu_to_le32(lower_32_bits(dst_addr_phys));
516 			sgel = sgel_next;
517 			sge_chain_count++;
518 			break;
519 		case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
520 			if (is_write) {
521 				if (is_scsiio_req) {
522 					_base_clone_to_sys_mem(buff_ptr,
523 					    sg_virt(sg_scmd),
524 					    (le32_to_cpu(sgel->FlagsLength) &
525 					    0x00ffffff));
526 					/*
527 					 * FIXME: this relies on a a zero
528 					 * PCI mem_offset.
529 					 */
530 					sgel->Address =
531 					    cpu_to_le32((u32)buff_ptr_phys);
532 				} else {
533 					_base_clone_to_sys_mem(buff_ptr,
534 					    ioc->config_vaddr,
535 					    (le32_to_cpu(sgel->FlagsLength) &
536 					    0x00ffffff));
537 					sgel->Address =
538 					    cpu_to_le32((u32)buff_ptr_phys);
539 				}
540 			}
541 			buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
542 			    0x00ffffff);
543 			buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
544 			    0x00ffffff);
545 			if ((le32_to_cpu(sgel->FlagsLength) &
546 			    (MPI2_SGE_FLAGS_END_OF_BUFFER
547 					<< MPI2_SGE_FLAGS_SHIFT)))
548 				goto eob_clone_chain;
549 			else {
550 				/*
551 				 * Every single element in MPT will have
552 				 * associated sg_next. Better to sanity that
553 				 * sg_next is not NULL, but it will be a bug
554 				 * if it is null.
555 				 */
556 				if (is_scsiio_req) {
557 					sg_scmd = sg_next(sg_scmd);
558 					if (sg_scmd)
559 						sgel++;
560 					else
561 						goto eob_clone_chain;
562 				}
563 			}
564 			break;
565 		}
566 	}
567 
568 eob_clone_chain:
569 	for (i = 0; i < sge_chain_count; i++) {
570 		if (is_scsiio_req)
571 			_base_clone_to_sys_mem(dst_chain_addr[i],
572 				src_chain_addr[i], ioc->request_sz);
573 	}
574 }
575 
576 /**
577  *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
578  * @arg: input argument, used to derive ioc
579  *
580  * Return:
581  * 0 if controller is removed from pci subsystem.
582  * -1 for other case.
583  */
584 static int mpt3sas_remove_dead_ioc_func(void *arg)
585 {
586 	struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
587 	struct pci_dev *pdev;
588 
589 	if (!ioc)
590 		return -1;
591 
592 	pdev = ioc->pdev;
593 	if (!pdev)
594 		return -1;
595 	pci_stop_and_remove_bus_device_locked(pdev);
596 	return 0;
597 }
598 
599 /**
600  * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp.
601  * @ioc: Per Adapter Object
602  *
603  * Return nothing.
604  */
605 static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
606 {
607 	Mpi26IoUnitControlRequest_t *mpi_request;
608 	Mpi26IoUnitControlReply_t *mpi_reply;
609 	u16 smid;
610 	ktime_t current_time;
611 	u64 TimeStamp = 0;
612 	u8 issue_reset = 0;
613 
614 	mutex_lock(&ioc->scsih_cmds.mutex);
615 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
616 		ioc_err(ioc, "scsih_cmd in use %s\n", __func__);
617 		goto out;
618 	}
619 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
620 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
621 	if (!smid) {
622 		ioc_err(ioc, "Failed obtaining a smid %s\n", __func__);
623 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
624 		goto out;
625 	}
626 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
627 	ioc->scsih_cmds.smid = smid;
628 	memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
629 	mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
630 	mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER;
631 	mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP;
632 	current_time = ktime_get_real();
633 	TimeStamp = ktime_to_ms(current_time);
634 	mpi_request->Reserved7 = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
635 	mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp >> 32);
636 	init_completion(&ioc->scsih_cmds.done);
637 	ioc->put_smid_default(ioc, smid);
638 	dinitprintk(ioc, ioc_info(ioc,
639 	    "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n",
640 	    TimeStamp));
641 	wait_for_completion_timeout(&ioc->scsih_cmds.done,
642 		MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ);
643 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
644 		mpt3sas_check_cmd_timeout(ioc,
645 		    ioc->scsih_cmds.status, mpi_request,
646 		    sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset);
647 		goto issue_host_reset;
648 	}
649 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
650 		mpi_reply = ioc->scsih_cmds.reply;
651 		dinitprintk(ioc, ioc_info(ioc,
652 		    "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
653 		    le16_to_cpu(mpi_reply->IOCStatus),
654 		    le32_to_cpu(mpi_reply->IOCLogInfo)));
655 	}
656 issue_host_reset:
657 	if (issue_reset)
658 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
659 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
660 out:
661 	mutex_unlock(&ioc->scsih_cmds.mutex);
662 }
663 
664 /**
665  * _base_fault_reset_work - workq handling ioc fault conditions
666  * @work: input argument, used to derive ioc
667  *
668  * Context: sleep.
669  */
670 static void
671 _base_fault_reset_work(struct work_struct *work)
672 {
673 	struct MPT3SAS_ADAPTER *ioc =
674 	    container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
675 	unsigned long	 flags;
676 	u32 doorbell;
677 	int rc;
678 	struct task_struct *p;
679 
680 
681 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
682 	if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
683 			ioc->pci_error_recovery)
684 		goto rearm_timer;
685 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
686 
687 	doorbell = mpt3sas_base_get_iocstate(ioc, 0);
688 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
689 		ioc_err(ioc, "SAS host is non-operational !!!!\n");
690 
691 		/* It may be possible that EEH recovery can resolve some of
692 		 * pci bus failure issues rather removing the dead ioc function
693 		 * by considering controller is in a non-operational state. So
694 		 * here priority is given to the EEH recovery. If it doesn't
695 		 * not resolve this issue, mpt3sas driver will consider this
696 		 * controller to non-operational state and remove the dead ioc
697 		 * function.
698 		 */
699 		if (ioc->non_operational_loop++ < 5) {
700 			spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
701 							 flags);
702 			goto rearm_timer;
703 		}
704 
705 		/*
706 		 * Call _scsih_flush_pending_cmds callback so that we flush all
707 		 * pending commands back to OS. This call is required to aovid
708 		 * deadlock at block layer. Dead IOC will fail to do diag reset,
709 		 * and this call is safe since dead ioc will never return any
710 		 * command back from HW.
711 		 */
712 		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
713 		/*
714 		 * Set remove_host flag early since kernel thread will
715 		 * take some time to execute.
716 		 */
717 		ioc->remove_host = 1;
718 		/*Remove the Dead Host */
719 		p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
720 		    "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
721 		if (IS_ERR(p))
722 			ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
723 				__func__);
724 		else
725 			ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
726 				__func__);
727 		return; /* don't rearm timer */
728 	}
729 
730 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
731 		u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
732 		    ioc->manu_pg11.CoreDumpTOSec :
733 		    MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
734 
735 		timeout /= (FAULT_POLLING_INTERVAL/1000);
736 
737 		if (ioc->ioc_coredump_loop == 0) {
738 			mpt3sas_print_coredump_info(ioc,
739 			    doorbell & MPI2_DOORBELL_DATA_MASK);
740 			/* do not accept any IOs and disable the interrupts */
741 			spin_lock_irqsave(
742 			    &ioc->ioc_reset_in_progress_lock, flags);
743 			ioc->shost_recovery = 1;
744 			spin_unlock_irqrestore(
745 			    &ioc->ioc_reset_in_progress_lock, flags);
746 			mpt3sas_base_mask_interrupts(ioc);
747 			_base_clear_outstanding_commands(ioc);
748 		}
749 
750 		ioc_info(ioc, "%s: CoreDump loop %d.",
751 		    __func__, ioc->ioc_coredump_loop);
752 
753 		/* Wait until CoreDump completes or times out */
754 		if (ioc->ioc_coredump_loop++ < timeout) {
755 			spin_lock_irqsave(
756 			    &ioc->ioc_reset_in_progress_lock, flags);
757 			goto rearm_timer;
758 		}
759 	}
760 
761 	if (ioc->ioc_coredump_loop) {
762 		if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
763 			ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
764 			    __func__, ioc->ioc_coredump_loop);
765 		else
766 			ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
767 			    __func__, ioc->ioc_coredump_loop);
768 		ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
769 	}
770 	ioc->non_operational_loop = 0;
771 	if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
772 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
773 		ioc_warn(ioc, "%s: hard reset: %s\n",
774 			 __func__, rc == 0 ? "success" : "failed");
775 		doorbell = mpt3sas_base_get_iocstate(ioc, 0);
776 		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
777 			mpt3sas_print_fault_code(ioc, doorbell &
778 			    MPI2_DOORBELL_DATA_MASK);
779 		} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
780 		    MPI2_IOC_STATE_COREDUMP)
781 			mpt3sas_print_coredump_info(ioc, doorbell &
782 			    MPI2_DOORBELL_DATA_MASK);
783 		if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
784 		    MPI2_IOC_STATE_OPERATIONAL)
785 			return; /* don't rearm timer */
786 	}
787 	ioc->ioc_coredump_loop = 0;
788 	if (ioc->time_sync_interval &&
789 	    ++ioc->timestamp_update_count >= ioc->time_sync_interval) {
790 		ioc->timestamp_update_count = 0;
791 		_base_sync_drv_fw_timestamp(ioc);
792 	}
793 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
794  rearm_timer:
795 	if (ioc->fault_reset_work_q)
796 		queue_delayed_work(ioc->fault_reset_work_q,
797 		    &ioc->fault_reset_work,
798 		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
799 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
800 }
801 
802 /**
803  * mpt3sas_base_start_watchdog - start the fault_reset_work_q
804  * @ioc: per adapter object
805  *
806  * Context: sleep.
807  */
808 void
809 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
810 {
811 	unsigned long	 flags;
812 
813 	if (ioc->fault_reset_work_q)
814 		return;
815 
816 	ioc->timestamp_update_count = 0;
817 	/* initialize fault polling */
818 
819 	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
820 	snprintf(ioc->fault_reset_work_q_name,
821 	    sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
822 	    ioc->driver_name, ioc->id);
823 	ioc->fault_reset_work_q =
824 		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
825 	if (!ioc->fault_reset_work_q) {
826 		ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
827 		return;
828 	}
829 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
830 	if (ioc->fault_reset_work_q)
831 		queue_delayed_work(ioc->fault_reset_work_q,
832 		    &ioc->fault_reset_work,
833 		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
834 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
835 }
836 
837 /**
838  * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
839  * @ioc: per adapter object
840  *
841  * Context: sleep.
842  */
843 void
844 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
845 {
846 	unsigned long flags;
847 	struct workqueue_struct *wq;
848 
849 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
850 	wq = ioc->fault_reset_work_q;
851 	ioc->fault_reset_work_q = NULL;
852 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
853 	if (wq) {
854 		if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
855 			flush_workqueue(wq);
856 		destroy_workqueue(wq);
857 	}
858 }
859 
860 /**
861  * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
862  * @ioc: per adapter object
863  * @fault_code: fault code
864  */
865 void
866 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
867 {
868 	ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
869 }
870 
871 /**
872  * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
873  * @ioc: per adapter object
874  * @fault_code: fault code
875  *
876  * Return nothing.
877  */
878 void
879 mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
880 {
881 	ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
882 }
883 
884 /**
885  * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
886  * completes or times out
887  * @ioc: per adapter object
888  * @caller: caller function name
889  *
890  * Returns 0 for success, non-zero for failure.
891  */
892 int
893 mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
894 		const char *caller)
895 {
896 	u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
897 			ioc->manu_pg11.CoreDumpTOSec :
898 			MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
899 
900 	int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
901 					timeout);
902 
903 	if (ioc_state)
904 		ioc_err(ioc,
905 		    "%s: CoreDump timed out. (ioc_state=0x%x)\n",
906 		    caller, ioc_state);
907 	else
908 		ioc_info(ioc,
909 		    "%s: CoreDump completed. (ioc_state=0x%x)\n",
910 		    caller, ioc_state);
911 
912 	return ioc_state;
913 }
914 
915 /**
916  * mpt3sas_halt_firmware - halt's mpt controller firmware
917  * @ioc: per adapter object
918  *
919  * For debugging timeout related issues.  Writing 0xCOFFEE00
920  * to the doorbell register will halt controller firmware. With
921  * the purpose to stop both driver and firmware, the enduser can
922  * obtain a ring buffer from controller UART.
923  */
924 void
925 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
926 {
927 	u32 doorbell;
928 
929 	if (!ioc->fwfault_debug)
930 		return;
931 
932 	dump_stack();
933 
934 	doorbell = ioc->base_readl(&ioc->chip->Doorbell);
935 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
936 		mpt3sas_print_fault_code(ioc, doorbell &
937 		    MPI2_DOORBELL_DATA_MASK);
938 	} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
939 	    MPI2_IOC_STATE_COREDUMP) {
940 		mpt3sas_print_coredump_info(ioc, doorbell &
941 		    MPI2_DOORBELL_DATA_MASK);
942 	} else {
943 		writel(0xC0FFEE00, &ioc->chip->Doorbell);
944 		ioc_err(ioc, "Firmware is halted due to command timeout\n");
945 	}
946 
947 	if (ioc->fwfault_debug == 2)
948 		for (;;)
949 			;
950 	else
951 		panic("panic in %s\n", __func__);
952 }
953 
954 /**
955  * _base_sas_ioc_info - verbose translation of the ioc status
956  * @ioc: per adapter object
957  * @mpi_reply: reply mf payload returned from firmware
958  * @request_hdr: request mf
959  */
960 static void
961 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
962 	MPI2RequestHeader_t *request_hdr)
963 {
964 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
965 	    MPI2_IOCSTATUS_MASK;
966 	char *desc = NULL;
967 	u16 frame_sz;
968 	char *func_str = NULL;
969 
970 	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
971 	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
972 	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
973 	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
974 		return;
975 
976 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
977 		return;
978 	/*
979 	 * Older Firmware version doesn't support driver trigger pages.
980 	 * So, skip displaying 'config invalid type' type
981 	 * of error message.
982 	 */
983 	if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
984 		Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr;
985 
986 		if ((rqst->ExtPageType ==
987 		    MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) &&
988 		    !(ioc->logging_level & MPT_DEBUG_CONFIG)) {
989 			return;
990 		}
991 	}
992 
993 	switch (ioc_status) {
994 
995 /****************************************************************************
996 *  Common IOCStatus values for all replies
997 ****************************************************************************/
998 
999 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1000 		desc = "invalid function";
1001 		break;
1002 	case MPI2_IOCSTATUS_BUSY:
1003 		desc = "busy";
1004 		break;
1005 	case MPI2_IOCSTATUS_INVALID_SGL:
1006 		desc = "invalid sgl";
1007 		break;
1008 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
1009 		desc = "internal error";
1010 		break;
1011 	case MPI2_IOCSTATUS_INVALID_VPID:
1012 		desc = "invalid vpid";
1013 		break;
1014 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
1015 		desc = "insufficient resources";
1016 		break;
1017 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
1018 		desc = "insufficient power";
1019 		break;
1020 	case MPI2_IOCSTATUS_INVALID_FIELD:
1021 		desc = "invalid field";
1022 		break;
1023 	case MPI2_IOCSTATUS_INVALID_STATE:
1024 		desc = "invalid state";
1025 		break;
1026 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
1027 		desc = "op state not supported";
1028 		break;
1029 
1030 /****************************************************************************
1031 *  Config IOCStatus values
1032 ****************************************************************************/
1033 
1034 	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
1035 		desc = "config invalid action";
1036 		break;
1037 	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
1038 		desc = "config invalid type";
1039 		break;
1040 	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
1041 		desc = "config invalid page";
1042 		break;
1043 	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
1044 		desc = "config invalid data";
1045 		break;
1046 	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
1047 		desc = "config no defaults";
1048 		break;
1049 	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
1050 		desc = "config cant commit";
1051 		break;
1052 
1053 /****************************************************************************
1054 *  SCSI IO Reply
1055 ****************************************************************************/
1056 
1057 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1058 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1059 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1060 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1061 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1062 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1063 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1064 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1065 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1066 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1067 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1068 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1069 		break;
1070 
1071 /****************************************************************************
1072 *  For use by SCSI Initiator and SCSI Target end-to-end data protection
1073 ****************************************************************************/
1074 
1075 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1076 		desc = "eedp guard error";
1077 		break;
1078 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1079 		desc = "eedp ref tag error";
1080 		break;
1081 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1082 		desc = "eedp app tag error";
1083 		break;
1084 
1085 /****************************************************************************
1086 *  SCSI Target values
1087 ****************************************************************************/
1088 
1089 	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
1090 		desc = "target invalid io index";
1091 		break;
1092 	case MPI2_IOCSTATUS_TARGET_ABORTED:
1093 		desc = "target aborted";
1094 		break;
1095 	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
1096 		desc = "target no conn retryable";
1097 		break;
1098 	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
1099 		desc = "target no connection";
1100 		break;
1101 	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
1102 		desc = "target xfer count mismatch";
1103 		break;
1104 	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
1105 		desc = "target data offset error";
1106 		break;
1107 	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
1108 		desc = "target too much write data";
1109 		break;
1110 	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
1111 		desc = "target iu too short";
1112 		break;
1113 	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
1114 		desc = "target ack nak timeout";
1115 		break;
1116 	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
1117 		desc = "target nak received";
1118 		break;
1119 
1120 /****************************************************************************
1121 *  Serial Attached SCSI values
1122 ****************************************************************************/
1123 
1124 	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
1125 		desc = "smp request failed";
1126 		break;
1127 	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
1128 		desc = "smp data overrun";
1129 		break;
1130 
1131 /****************************************************************************
1132 *  Diagnostic Buffer Post / Diagnostic Release values
1133 ****************************************************************************/
1134 
1135 	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
1136 		desc = "diagnostic released";
1137 		break;
1138 	default:
1139 		break;
1140 	}
1141 
1142 	if (!desc)
1143 		return;
1144 
1145 	switch (request_hdr->Function) {
1146 	case MPI2_FUNCTION_CONFIG:
1147 		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
1148 		func_str = "config_page";
1149 		break;
1150 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
1151 		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
1152 		func_str = "task_mgmt";
1153 		break;
1154 	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
1155 		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
1156 		func_str = "sas_iounit_ctl";
1157 		break;
1158 	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1159 		frame_sz = sizeof(Mpi2SepRequest_t);
1160 		func_str = "enclosure";
1161 		break;
1162 	case MPI2_FUNCTION_IOC_INIT:
1163 		frame_sz = sizeof(Mpi2IOCInitRequest_t);
1164 		func_str = "ioc_init";
1165 		break;
1166 	case MPI2_FUNCTION_PORT_ENABLE:
1167 		frame_sz = sizeof(Mpi2PortEnableRequest_t);
1168 		func_str = "port_enable";
1169 		break;
1170 	case MPI2_FUNCTION_SMP_PASSTHROUGH:
1171 		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
1172 		func_str = "smp_passthru";
1173 		break;
1174 	case MPI2_FUNCTION_NVME_ENCAPSULATED:
1175 		frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
1176 		    ioc->sge_size;
1177 		func_str = "nvme_encapsulated";
1178 		break;
1179 	default:
1180 		frame_sz = 32;
1181 		func_str = "unknown";
1182 		break;
1183 	}
1184 
1185 	ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1186 		 desc, ioc_status, request_hdr, func_str);
1187 
1188 	_debug_dump_mf(request_hdr, frame_sz/4);
1189 }
1190 
1191 /**
1192  * _base_display_event_data - verbose translation of firmware asyn events
1193  * @ioc: per adapter object
1194  * @mpi_reply: reply mf payload returned from firmware
1195  */
1196 static void
1197 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1198 	Mpi2EventNotificationReply_t *mpi_reply)
1199 {
1200 	char *desc = NULL;
1201 	u16 event;
1202 
1203 	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1204 		return;
1205 
1206 	event = le16_to_cpu(mpi_reply->Event);
1207 
1208 	switch (event) {
1209 	case MPI2_EVENT_LOG_DATA:
1210 		desc = "Log Data";
1211 		break;
1212 	case MPI2_EVENT_STATE_CHANGE:
1213 		desc = "Status Change";
1214 		break;
1215 	case MPI2_EVENT_HARD_RESET_RECEIVED:
1216 		desc = "Hard Reset Received";
1217 		break;
1218 	case MPI2_EVENT_EVENT_CHANGE:
1219 		desc = "Event Change";
1220 		break;
1221 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1222 		desc = "Device Status Change";
1223 		break;
1224 	case MPI2_EVENT_IR_OPERATION_STATUS:
1225 		if (!ioc->hide_ir_msg)
1226 			desc = "IR Operation Status";
1227 		break;
1228 	case MPI2_EVENT_SAS_DISCOVERY:
1229 	{
1230 		Mpi2EventDataSasDiscovery_t *event_data =
1231 		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1232 		ioc_info(ioc, "Discovery: (%s)",
1233 			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1234 			 "start" : "stop");
1235 		if (event_data->DiscoveryStatus)
1236 			pr_cont(" discovery_status(0x%08x)",
1237 			    le32_to_cpu(event_data->DiscoveryStatus));
1238 		pr_cont("\n");
1239 		return;
1240 	}
1241 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1242 		desc = "SAS Broadcast Primitive";
1243 		break;
1244 	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1245 		desc = "SAS Init Device Status Change";
1246 		break;
1247 	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1248 		desc = "SAS Init Table Overflow";
1249 		break;
1250 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1251 		desc = "SAS Topology Change List";
1252 		break;
1253 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1254 		desc = "SAS Enclosure Device Status Change";
1255 		break;
1256 	case MPI2_EVENT_IR_VOLUME:
1257 		if (!ioc->hide_ir_msg)
1258 			desc = "IR Volume";
1259 		break;
1260 	case MPI2_EVENT_IR_PHYSICAL_DISK:
1261 		if (!ioc->hide_ir_msg)
1262 			desc = "IR Physical Disk";
1263 		break;
1264 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1265 		if (!ioc->hide_ir_msg)
1266 			desc = "IR Configuration Change List";
1267 		break;
1268 	case MPI2_EVENT_LOG_ENTRY_ADDED:
1269 		if (!ioc->hide_ir_msg)
1270 			desc = "Log Entry Added";
1271 		break;
1272 	case MPI2_EVENT_TEMP_THRESHOLD:
1273 		desc = "Temperature Threshold";
1274 		break;
1275 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1276 		desc = "Cable Event";
1277 		break;
1278 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1279 		desc = "SAS Device Discovery Error";
1280 		break;
1281 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1282 		desc = "PCIE Device Status Change";
1283 		break;
1284 	case MPI2_EVENT_PCIE_ENUMERATION:
1285 	{
1286 		Mpi26EventDataPCIeEnumeration_t *event_data =
1287 			(Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1288 		ioc_info(ioc, "PCIE Enumeration: (%s)",
1289 			 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1290 			 "start" : "stop");
1291 		if (event_data->EnumerationStatus)
1292 			pr_cont("enumeration_status(0x%08x)",
1293 				le32_to_cpu(event_data->EnumerationStatus));
1294 		pr_cont("\n");
1295 		return;
1296 	}
1297 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1298 		desc = "PCIE Topology Change List";
1299 		break;
1300 	}
1301 
1302 	if (!desc)
1303 		return;
1304 
1305 	ioc_info(ioc, "%s\n", desc);
1306 }
1307 
1308 /**
1309  * _base_sas_log_info - verbose translation of firmware log info
1310  * @ioc: per adapter object
1311  * @log_info: log info
1312  */
1313 static void
1314 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1315 {
1316 	union loginfo_type {
1317 		u32	loginfo;
1318 		struct {
1319 			u32	subcode:16;
1320 			u32	code:8;
1321 			u32	originator:4;
1322 			u32	bus_type:4;
1323 		} dw;
1324 	};
1325 	union loginfo_type sas_loginfo;
1326 	char *originator_str = NULL;
1327 
1328 	sas_loginfo.loginfo = log_info;
1329 	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1330 		return;
1331 
1332 	/* each nexus loss loginfo */
1333 	if (log_info == 0x31170000)
1334 		return;
1335 
1336 	/* eat the loginfos associated with task aborts */
1337 	if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1338 	    0x31140000 || log_info == 0x31130000))
1339 		return;
1340 
1341 	switch (sas_loginfo.dw.originator) {
1342 	case 0:
1343 		originator_str = "IOP";
1344 		break;
1345 	case 1:
1346 		originator_str = "PL";
1347 		break;
1348 	case 2:
1349 		if (!ioc->hide_ir_msg)
1350 			originator_str = "IR";
1351 		else
1352 			originator_str = "WarpDrive";
1353 		break;
1354 	}
1355 
1356 	ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1357 		 log_info,
1358 		 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1359 }
1360 
1361 /**
1362  * _base_display_reply_info -
1363  * @ioc: per adapter object
1364  * @smid: system request message index
1365  * @msix_index: MSIX table index supplied by the OS
1366  * @reply: reply message frame(lower 32bit addr)
1367  */
1368 static void
1369 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1370 	u32 reply)
1371 {
1372 	MPI2DefaultReply_t *mpi_reply;
1373 	u16 ioc_status;
1374 	u32 loginfo = 0;
1375 
1376 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1377 	if (unlikely(!mpi_reply)) {
1378 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1379 			__FILE__, __LINE__, __func__);
1380 		return;
1381 	}
1382 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1383 
1384 	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1385 	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
1386 		_base_sas_ioc_info(ioc , mpi_reply,
1387 		   mpt3sas_base_get_msg_frame(ioc, smid));
1388 	}
1389 
1390 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1391 		loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1392 		_base_sas_log_info(ioc, loginfo);
1393 	}
1394 
1395 	if (ioc_status || loginfo) {
1396 		ioc_status &= MPI2_IOCSTATUS_MASK;
1397 		mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1398 	}
1399 }
1400 
1401 /**
1402  * mpt3sas_base_done - base internal command completion routine
1403  * @ioc: per adapter object
1404  * @smid: system request message index
1405  * @msix_index: MSIX table index supplied by the OS
1406  * @reply: reply message frame(lower 32bit addr)
1407  *
1408  * Return:
1409  * 1 meaning mf should be freed from _base_interrupt
1410  * 0 means the mf is freed from this function.
1411  */
1412 u8
1413 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1414 	u32 reply)
1415 {
1416 	MPI2DefaultReply_t *mpi_reply;
1417 
1418 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1419 	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1420 		return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1421 
1422 	if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1423 		return 1;
1424 
1425 	ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1426 	if (mpi_reply) {
1427 		ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1428 		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1429 	}
1430 	ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1431 
1432 	complete(&ioc->base_cmds.done);
1433 	return 1;
1434 }
1435 
1436 /**
1437  * _base_async_event - main callback handler for firmware asyn events
1438  * @ioc: per adapter object
1439  * @msix_index: MSIX table index supplied by the OS
1440  * @reply: reply message frame(lower 32bit addr)
1441  *
1442  * Return:
1443  * 1 meaning mf should be freed from _base_interrupt
1444  * 0 means the mf is freed from this function.
1445  */
1446 static u8
1447 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1448 {
1449 	Mpi2EventNotificationReply_t *mpi_reply;
1450 	Mpi2EventAckRequest_t *ack_request;
1451 	u16 smid;
1452 	struct _event_ack_list *delayed_event_ack;
1453 
1454 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1455 	if (!mpi_reply)
1456 		return 1;
1457 	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1458 		return 1;
1459 
1460 	_base_display_event_data(ioc, mpi_reply);
1461 
1462 	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1463 		goto out;
1464 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1465 	if (!smid) {
1466 		delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1467 					GFP_ATOMIC);
1468 		if (!delayed_event_ack)
1469 			goto out;
1470 		INIT_LIST_HEAD(&delayed_event_ack->list);
1471 		delayed_event_ack->Event = mpi_reply->Event;
1472 		delayed_event_ack->EventContext = mpi_reply->EventContext;
1473 		list_add_tail(&delayed_event_ack->list,
1474 				&ioc->delayed_event_ack_list);
1475 		dewtprintk(ioc,
1476 			   ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1477 				    le16_to_cpu(mpi_reply->Event)));
1478 		goto out;
1479 	}
1480 
1481 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1482 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1483 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1484 	ack_request->Event = mpi_reply->Event;
1485 	ack_request->EventContext = mpi_reply->EventContext;
1486 	ack_request->VF_ID = 0;  /* TODO */
1487 	ack_request->VP_ID = 0;
1488 	ioc->put_smid_default(ioc, smid);
1489 
1490  out:
1491 
1492 	/* scsih callback handler */
1493 	mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1494 
1495 	/* ctl callback handler */
1496 	mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1497 
1498 	return 1;
1499 }
1500 
1501 static struct scsiio_tracker *
1502 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1503 {
1504 	struct scsi_cmnd *cmd;
1505 
1506 	if (WARN_ON(!smid) ||
1507 	    WARN_ON(smid >= ioc->hi_priority_smid))
1508 		return NULL;
1509 
1510 	cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1511 	if (cmd)
1512 		return scsi_cmd_priv(cmd);
1513 
1514 	return NULL;
1515 }
1516 
1517 /**
1518  * _base_get_cb_idx - obtain the callback index
1519  * @ioc: per adapter object
1520  * @smid: system request message index
1521  *
1522  * Return: callback index.
1523  */
1524 static u8
1525 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1526 {
1527 	int i;
1528 	u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1529 	u8 cb_idx = 0xFF;
1530 
1531 	if (smid < ioc->hi_priority_smid) {
1532 		struct scsiio_tracker *st;
1533 
1534 		if (smid < ctl_smid) {
1535 			st = _get_st_from_smid(ioc, smid);
1536 			if (st)
1537 				cb_idx = st->cb_idx;
1538 		} else if (smid == ctl_smid)
1539 			cb_idx = ioc->ctl_cb_idx;
1540 	} else if (smid < ioc->internal_smid) {
1541 		i = smid - ioc->hi_priority_smid;
1542 		cb_idx = ioc->hpr_lookup[i].cb_idx;
1543 	} else if (smid <= ioc->hba_queue_depth) {
1544 		i = smid - ioc->internal_smid;
1545 		cb_idx = ioc->internal_lookup[i].cb_idx;
1546 	}
1547 	return cb_idx;
1548 }
1549 
1550 /**
1551  * mpt3sas_base_mask_interrupts - disable interrupts
1552  * @ioc: per adapter object
1553  *
1554  * Disabling ResetIRQ, Reply and Doorbell Interrupts
1555  */
1556 void
1557 mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1558 {
1559 	u32 him_register;
1560 
1561 	ioc->mask_interrupts = 1;
1562 	him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1563 	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1564 	writel(him_register, &ioc->chip->HostInterruptMask);
1565 	ioc->base_readl(&ioc->chip->HostInterruptMask);
1566 }
1567 
1568 /**
1569  * mpt3sas_base_unmask_interrupts - enable interrupts
1570  * @ioc: per adapter object
1571  *
1572  * Enabling only Reply Interrupts
1573  */
1574 void
1575 mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1576 {
1577 	u32 him_register;
1578 
1579 	him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1580 	him_register &= ~MPI2_HIM_RIM;
1581 	writel(him_register, &ioc->chip->HostInterruptMask);
1582 	ioc->mask_interrupts = 0;
1583 }
1584 
1585 union reply_descriptor {
1586 	u64 word;
1587 	struct {
1588 		u32 low;
1589 		u32 high;
1590 	} u;
1591 };
1592 
1593 static u32 base_mod64(u64 dividend, u32 divisor)
1594 {
1595 	u32 remainder;
1596 
1597 	if (!divisor)
1598 		pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1599 	remainder = do_div(dividend, divisor);
1600 	return remainder;
1601 }
1602 
1603 /**
1604  * _base_process_reply_queue - Process reply descriptors from reply
1605  *		descriptor post queue.
1606  * @reply_q: per IRQ's reply queue object.
1607  *
1608  * Return: number of reply descriptors processed from reply
1609  *		descriptor queue.
1610  */
1611 static int
1612 _base_process_reply_queue(struct adapter_reply_queue *reply_q)
1613 {
1614 	union reply_descriptor rd;
1615 	u64 completed_cmds;
1616 	u8 request_descript_type;
1617 	u16 smid;
1618 	u8 cb_idx;
1619 	u32 reply;
1620 	u8 msix_index = reply_q->msix_index;
1621 	struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1622 	Mpi2ReplyDescriptorsUnion_t *rpf;
1623 	u8 rc;
1624 
1625 	completed_cmds = 0;
1626 	if (!atomic_add_unless(&reply_q->busy, 1, 1))
1627 		return completed_cmds;
1628 
1629 	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1630 	request_descript_type = rpf->Default.ReplyFlags
1631 	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1632 	if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1633 		atomic_dec(&reply_q->busy);
1634 		return completed_cmds;
1635 	}
1636 
1637 	cb_idx = 0xFF;
1638 	do {
1639 		rd.word = le64_to_cpu(rpf->Words);
1640 		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1641 			goto out;
1642 		reply = 0;
1643 		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1644 		if (request_descript_type ==
1645 		    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1646 		    request_descript_type ==
1647 		    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1648 		    request_descript_type ==
1649 		    MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1650 			cb_idx = _base_get_cb_idx(ioc, smid);
1651 			if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1652 			    (likely(mpt_callbacks[cb_idx] != NULL))) {
1653 				rc = mpt_callbacks[cb_idx](ioc, smid,
1654 				    msix_index, 0);
1655 				if (rc)
1656 					mpt3sas_base_free_smid(ioc, smid);
1657 			}
1658 		} else if (request_descript_type ==
1659 		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1660 			reply = le32_to_cpu(
1661 			    rpf->AddressReply.ReplyFrameAddress);
1662 			if (reply > ioc->reply_dma_max_address ||
1663 			    reply < ioc->reply_dma_min_address)
1664 				reply = 0;
1665 			if (smid) {
1666 				cb_idx = _base_get_cb_idx(ioc, smid);
1667 				if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1668 				    (likely(mpt_callbacks[cb_idx] != NULL))) {
1669 					rc = mpt_callbacks[cb_idx](ioc, smid,
1670 					    msix_index, reply);
1671 					if (reply)
1672 						_base_display_reply_info(ioc,
1673 						    smid, msix_index, reply);
1674 					if (rc)
1675 						mpt3sas_base_free_smid(ioc,
1676 						    smid);
1677 				}
1678 			} else {
1679 				_base_async_event(ioc, msix_index, reply);
1680 			}
1681 
1682 			/* reply free queue handling */
1683 			if (reply) {
1684 				ioc->reply_free_host_index =
1685 				    (ioc->reply_free_host_index ==
1686 				    (ioc->reply_free_queue_depth - 1)) ?
1687 				    0 : ioc->reply_free_host_index + 1;
1688 				ioc->reply_free[ioc->reply_free_host_index] =
1689 				    cpu_to_le32(reply);
1690 				if (ioc->is_mcpu_endpoint)
1691 					_base_clone_reply_to_sys_mem(ioc,
1692 						reply,
1693 						ioc->reply_free_host_index);
1694 				writel(ioc->reply_free_host_index,
1695 				    &ioc->chip->ReplyFreeHostIndex);
1696 			}
1697 		}
1698 
1699 		rpf->Words = cpu_to_le64(ULLONG_MAX);
1700 		reply_q->reply_post_host_index =
1701 		    (reply_q->reply_post_host_index ==
1702 		    (ioc->reply_post_queue_depth - 1)) ? 0 :
1703 		    reply_q->reply_post_host_index + 1;
1704 		request_descript_type =
1705 		    reply_q->reply_post_free[reply_q->reply_post_host_index].
1706 		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1707 		completed_cmds++;
1708 		/* Update the reply post host index after continuously
1709 		 * processing the threshold number of Reply Descriptors.
1710 		 * So that FW can find enough entries to post the Reply
1711 		 * Descriptors in the reply descriptor post queue.
1712 		 */
1713 		if (completed_cmds >= ioc->thresh_hold) {
1714 			if (ioc->combined_reply_queue) {
1715 				writel(reply_q->reply_post_host_index |
1716 						((msix_index  & 7) <<
1717 						 MPI2_RPHI_MSIX_INDEX_SHIFT),
1718 				    ioc->replyPostRegisterIndex[msix_index/8]);
1719 			} else {
1720 				writel(reply_q->reply_post_host_index |
1721 						(msix_index <<
1722 						 MPI2_RPHI_MSIX_INDEX_SHIFT),
1723 						&ioc->chip->ReplyPostHostIndex);
1724 			}
1725 			if (!reply_q->irq_poll_scheduled) {
1726 				reply_q->irq_poll_scheduled = true;
1727 				irq_poll_sched(&reply_q->irqpoll);
1728 			}
1729 			atomic_dec(&reply_q->busy);
1730 			return completed_cmds;
1731 		}
1732 		if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1733 			goto out;
1734 		if (!reply_q->reply_post_host_index)
1735 			rpf = reply_q->reply_post_free;
1736 		else
1737 			rpf++;
1738 	} while (1);
1739 
1740  out:
1741 
1742 	if (!completed_cmds) {
1743 		atomic_dec(&reply_q->busy);
1744 		return completed_cmds;
1745 	}
1746 
1747 	if (ioc->is_warpdrive) {
1748 		writel(reply_q->reply_post_host_index,
1749 		ioc->reply_post_host_index[msix_index]);
1750 		atomic_dec(&reply_q->busy);
1751 		return completed_cmds;
1752 	}
1753 
1754 	/* Update Reply Post Host Index.
1755 	 * For those HBA's which support combined reply queue feature
1756 	 * 1. Get the correct Supplemental Reply Post Host Index Register.
1757 	 *    i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1758 	 *    Index Register address bank i.e replyPostRegisterIndex[],
1759 	 * 2. Then update this register with new reply host index value
1760 	 *    in ReplyPostIndex field and the MSIxIndex field with
1761 	 *    msix_index value reduced to a value between 0 and 7,
1762 	 *    using a modulo 8 operation. Since each Supplemental Reply Post
1763 	 *    Host Index Register supports 8 MSI-X vectors.
1764 	 *
1765 	 * For other HBA's just update the Reply Post Host Index register with
1766 	 * new reply host index value in ReplyPostIndex Field and msix_index
1767 	 * value in MSIxIndex field.
1768 	 */
1769 	if (ioc->combined_reply_queue)
1770 		writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
1771 			MPI2_RPHI_MSIX_INDEX_SHIFT),
1772 			ioc->replyPostRegisterIndex[msix_index/8]);
1773 	else
1774 		writel(reply_q->reply_post_host_index | (msix_index <<
1775 			MPI2_RPHI_MSIX_INDEX_SHIFT),
1776 			&ioc->chip->ReplyPostHostIndex);
1777 	atomic_dec(&reply_q->busy);
1778 	return completed_cmds;
1779 }
1780 
1781 /**
1782  * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1783  * @irq: irq number (not used)
1784  * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
1785  *
1786  * Return: IRQ_HANDLED if processed, else IRQ_NONE.
1787  */
1788 static irqreturn_t
1789 _base_interrupt(int irq, void *bus_id)
1790 {
1791 	struct adapter_reply_queue *reply_q = bus_id;
1792 	struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1793 
1794 	if (ioc->mask_interrupts)
1795 		return IRQ_NONE;
1796 	if (reply_q->irq_poll_scheduled)
1797 		return IRQ_HANDLED;
1798 	return ((_base_process_reply_queue(reply_q) > 0) ?
1799 			IRQ_HANDLED : IRQ_NONE);
1800 }
1801 
1802 /**
1803  * _base_irqpoll - IRQ poll callback handler
1804  * @irqpoll: irq_poll object
1805  * @budget: irq poll weight
1806  *
1807  * returns number of reply descriptors processed
1808  */
1809 static int
1810 _base_irqpoll(struct irq_poll *irqpoll, int budget)
1811 {
1812 	struct adapter_reply_queue *reply_q;
1813 	int num_entries = 0;
1814 
1815 	reply_q = container_of(irqpoll, struct adapter_reply_queue,
1816 			irqpoll);
1817 	if (reply_q->irq_line_enable) {
1818 		disable_irq_nosync(reply_q->os_irq);
1819 		reply_q->irq_line_enable = false;
1820 	}
1821 	num_entries = _base_process_reply_queue(reply_q);
1822 	if (num_entries < budget) {
1823 		irq_poll_complete(irqpoll);
1824 		reply_q->irq_poll_scheduled = false;
1825 		reply_q->irq_line_enable = true;
1826 		enable_irq(reply_q->os_irq);
1827 		/*
1828 		 * Go for one more round of processing the
1829 		 * reply descriptor post queue incase if HBA
1830 		 * Firmware has posted some reply descriptors
1831 		 * while reenabling the IRQ.
1832 		 */
1833 		_base_process_reply_queue(reply_q);
1834 	}
1835 
1836 	return num_entries;
1837 }
1838 
1839 /**
1840  * _base_init_irqpolls - initliaze IRQ polls
1841  * @ioc: per adapter object
1842  *
1843  * returns nothing
1844  */
1845 static void
1846 _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1847 {
1848 	struct adapter_reply_queue *reply_q, *next;
1849 
1850 	if (list_empty(&ioc->reply_queue_list))
1851 		return;
1852 
1853 	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1854 		irq_poll_init(&reply_q->irqpoll,
1855 			ioc->hba_queue_depth/4, _base_irqpoll);
1856 		reply_q->irq_poll_scheduled = false;
1857 		reply_q->irq_line_enable = true;
1858 		reply_q->os_irq = pci_irq_vector(ioc->pdev,
1859 		    reply_q->msix_index);
1860 	}
1861 }
1862 
1863 /**
1864  * _base_is_controller_msix_enabled - is controller support muli-reply queues
1865  * @ioc: per adapter object
1866  *
1867  * Return: Whether or not MSI/X is enabled.
1868  */
1869 static inline int
1870 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1871 {
1872 	return (ioc->facts.IOCCapabilities &
1873 	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1874 }
1875 
1876 /**
1877  * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1878  * @ioc: per adapter object
1879  * @poll: poll over reply descriptor pools incase interrupt for
1880  *		timed-out SCSI command got delayed
1881  * Context: non ISR conext
1882  *
1883  * Called when a Task Management request has completed.
1884  */
1885 void
1886 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
1887 {
1888 	struct adapter_reply_queue *reply_q;
1889 
1890 	/* If MSIX capability is turned off
1891 	 * then multi-queues are not enabled
1892 	 */
1893 	if (!_base_is_controller_msix_enabled(ioc))
1894 		return;
1895 
1896 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1897 		if (ioc->shost_recovery || ioc->remove_host ||
1898 				ioc->pci_error_recovery)
1899 			return;
1900 		/* TMs are on msix_index == 0 */
1901 		if (reply_q->msix_index == 0)
1902 			continue;
1903 		synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1904 		if (reply_q->irq_poll_scheduled) {
1905 			/* Calling irq_poll_disable will wait for any pending
1906 			 * callbacks to have completed.
1907 			 */
1908 			irq_poll_disable(&reply_q->irqpoll);
1909 			irq_poll_enable(&reply_q->irqpoll);
1910 			/* check how the scheduled poll has ended,
1911 			 * clean up only if necessary
1912 			 */
1913 			if (reply_q->irq_poll_scheduled) {
1914 				reply_q->irq_poll_scheduled = false;
1915 				reply_q->irq_line_enable = true;
1916 				enable_irq(reply_q->os_irq);
1917 			}
1918 		}
1919 	}
1920 	if (poll)
1921 		_base_process_reply_queue(reply_q);
1922 }
1923 
1924 /**
1925  * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1926  * @cb_idx: callback index
1927  */
1928 void
1929 mpt3sas_base_release_callback_handler(u8 cb_idx)
1930 {
1931 	mpt_callbacks[cb_idx] = NULL;
1932 }
1933 
1934 /**
1935  * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1936  * @cb_func: callback function
1937  *
1938  * Return: Index of @cb_func.
1939  */
1940 u8
1941 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1942 {
1943 	u8 cb_idx;
1944 
1945 	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1946 		if (mpt_callbacks[cb_idx] == NULL)
1947 			break;
1948 
1949 	mpt_callbacks[cb_idx] = cb_func;
1950 	return cb_idx;
1951 }
1952 
1953 /**
1954  * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1955  */
1956 void
1957 mpt3sas_base_initialize_callback_handler(void)
1958 {
1959 	u8 cb_idx;
1960 
1961 	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1962 		mpt3sas_base_release_callback_handler(cb_idx);
1963 }
1964 
1965 
1966 /**
1967  * _base_build_zero_len_sge - build zero length sg entry
1968  * @ioc: per adapter object
1969  * @paddr: virtual address for SGE
1970  *
1971  * Create a zero length scatter gather entry to insure the IOCs hardware has
1972  * something to use if the target device goes brain dead and tries
1973  * to send data even when none is asked for.
1974  */
1975 static void
1976 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1977 {
1978 	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1979 	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1980 	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1981 	    MPI2_SGE_FLAGS_SHIFT);
1982 	ioc->base_add_sg_single(paddr, flags_length, -1);
1983 }
1984 
1985 /**
1986  * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1987  * @paddr: virtual address for SGE
1988  * @flags_length: SGE flags and data transfer length
1989  * @dma_addr: Physical address
1990  */
1991 static void
1992 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1993 {
1994 	Mpi2SGESimple32_t *sgel = paddr;
1995 
1996 	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1997 	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1998 	sgel->FlagsLength = cpu_to_le32(flags_length);
1999 	sgel->Address = cpu_to_le32(dma_addr);
2000 }
2001 
2002 
2003 /**
2004  * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
2005  * @paddr: virtual address for SGE
2006  * @flags_length: SGE flags and data transfer length
2007  * @dma_addr: Physical address
2008  */
2009 static void
2010 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
2011 {
2012 	Mpi2SGESimple64_t *sgel = paddr;
2013 
2014 	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
2015 	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
2016 	sgel->FlagsLength = cpu_to_le32(flags_length);
2017 	sgel->Address = cpu_to_le64(dma_addr);
2018 }
2019 
2020 /**
2021  * _base_get_chain_buffer_tracker - obtain chain tracker
2022  * @ioc: per adapter object
2023  * @scmd: SCSI commands of the IO request
2024  *
2025  * Return: chain tracker from chain_lookup table using key as
2026  * smid and smid's chain_offset.
2027  */
2028 static struct chain_tracker *
2029 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
2030 			       struct scsi_cmnd *scmd)
2031 {
2032 	struct chain_tracker *chain_req;
2033 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2034 	u16 smid = st->smid;
2035 	u8 chain_offset =
2036 	   atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
2037 
2038 	if (chain_offset == ioc->chains_needed_per_io)
2039 		return NULL;
2040 
2041 	chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
2042 	atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
2043 	return chain_req;
2044 }
2045 
2046 
2047 /**
2048  * _base_build_sg - build generic sg
2049  * @ioc: per adapter object
2050  * @psge: virtual address for SGE
2051  * @data_out_dma: physical address for WRITES
2052  * @data_out_sz: data xfer size for WRITES
2053  * @data_in_dma: physical address for READS
2054  * @data_in_sz: data xfer size for READS
2055  */
2056 static void
2057 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
2058 	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2059 	size_t data_in_sz)
2060 {
2061 	u32 sgl_flags;
2062 
2063 	if (!data_out_sz && !data_in_sz) {
2064 		_base_build_zero_len_sge(ioc, psge);
2065 		return;
2066 	}
2067 
2068 	if (data_out_sz && data_in_sz) {
2069 		/* WRITE sgel first */
2070 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2071 		    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
2072 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2073 		ioc->base_add_sg_single(psge, sgl_flags |
2074 		    data_out_sz, data_out_dma);
2075 
2076 		/* incr sgel */
2077 		psge += ioc->sge_size;
2078 
2079 		/* READ sgel last */
2080 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2081 		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2082 		    MPI2_SGE_FLAGS_END_OF_LIST);
2083 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2084 		ioc->base_add_sg_single(psge, sgl_flags |
2085 		    data_in_sz, data_in_dma);
2086 	} else if (data_out_sz) /* WRITE */ {
2087 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2088 		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2089 		    MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
2090 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2091 		ioc->base_add_sg_single(psge, sgl_flags |
2092 		    data_out_sz, data_out_dma);
2093 	} else if (data_in_sz) /* READ */ {
2094 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2095 		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2096 		    MPI2_SGE_FLAGS_END_OF_LIST);
2097 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2098 		ioc->base_add_sg_single(psge, sgl_flags |
2099 		    data_in_sz, data_in_dma);
2100 	}
2101 }
2102 
2103 /* IEEE format sgls */
2104 
2105 /**
2106  * _base_build_nvme_prp - This function is called for NVMe end devices to build
2107  * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
2108  * entry of the NVMe message (PRP1).  If the data buffer is small enough to be
2109  * described entirely using PRP1, then PRP2 is not used.  If needed, PRP2 is
2110  * used to describe a larger data buffer.  If the data buffer is too large to
2111  * describe using the two PRP entriess inside the NVMe message, then PRP1
2112  * describes the first data memory segment, and PRP2 contains a pointer to a PRP
2113  * list located elsewhere in memory to describe the remaining data memory
2114  * segments.  The PRP list will be contiguous.
2115  *
2116  * The native SGL for NVMe devices is a Physical Region Page (PRP).  A PRP
2117  * consists of a list of PRP entries to describe a number of noncontigous
2118  * physical memory segments as a single memory buffer, just as a SGL does.  Note
2119  * however, that this function is only used by the IOCTL call, so the memory
2120  * given will be guaranteed to be contiguous.  There is no need to translate
2121  * non-contiguous SGL into a PRP in this case.  All PRPs will describe
2122  * contiguous space that is one page size each.
2123  *
2124  * Each NVMe message contains two PRP entries.  The first (PRP1) either contains
2125  * a PRP list pointer or a PRP element, depending upon the command.  PRP2
2126  * contains the second PRP element if the memory being described fits within 2
2127  * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
2128  *
2129  * A PRP list pointer contains the address of a PRP list, structured as a linear
2130  * array of PRP entries.  Each PRP entry in this list describes a segment of
2131  * physical memory.
2132  *
2133  * Each 64-bit PRP entry comprises an address and an offset field.  The address
2134  * always points at the beginning of a 4KB physical memory page, and the offset
2135  * describes where within that 4KB page the memory segment begins.  Only the
2136  * first element in a PRP list may contain a non-zero offest, implying that all
2137  * memory segments following the first begin at the start of a 4KB page.
2138  *
2139  * Each PRP element normally describes 4KB of physical memory, with exceptions
2140  * for the first and last elements in the list.  If the memory being described
2141  * by the list begins at a non-zero offset within the first 4KB page, then the
2142  * first PRP element will contain a non-zero offset indicating where the region
2143  * begins within the 4KB page.  The last memory segment may end before the end
2144  * of the 4KB segment, depending upon the overall size of the memory being
2145  * described by the PRP list.
2146  *
2147  * Since PRP entries lack any indication of size, the overall data buffer length
2148  * is used to determine where the end of the data memory buffer is located, and
2149  * how many PRP entries are required to describe it.
2150  *
2151  * @ioc: per adapter object
2152  * @smid: system request message index for getting asscociated SGL
2153  * @nvme_encap_request: the NVMe request msg frame pointer
2154  * @data_out_dma: physical address for WRITES
2155  * @data_out_sz: data xfer size for WRITES
2156  * @data_in_dma: physical address for READS
2157  * @data_in_sz: data xfer size for READS
2158  */
2159 static void
2160 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2161 	Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
2162 	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2163 	size_t data_in_sz)
2164 {
2165 	int		prp_size = NVME_PRP_SIZE;
2166 	__le64		*prp_entry, *prp1_entry, *prp2_entry;
2167 	__le64		*prp_page;
2168 	dma_addr_t	prp_entry_dma, prp_page_dma, dma_addr;
2169 	u32		offset, entry_len;
2170 	u32		page_mask_result, page_mask;
2171 	size_t		length;
2172 	struct mpt3sas_nvme_cmd *nvme_cmd =
2173 		(void *)nvme_encap_request->NVMe_Command;
2174 
2175 	/*
2176 	 * Not all commands require a data transfer. If no data, just return
2177 	 * without constructing any PRP.
2178 	 */
2179 	if (!data_in_sz && !data_out_sz)
2180 		return;
2181 	prp1_entry = &nvme_cmd->prp1;
2182 	prp2_entry = &nvme_cmd->prp2;
2183 	prp_entry = prp1_entry;
2184 	/*
2185 	 * For the PRP entries, use the specially allocated buffer of
2186 	 * contiguous memory.
2187 	 */
2188 	prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
2189 	prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2190 
2191 	/*
2192 	 * Check if we are within 1 entry of a page boundary we don't
2193 	 * want our first entry to be a PRP List entry.
2194 	 */
2195 	page_mask = ioc->page_size - 1;
2196 	page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2197 	if (!page_mask_result) {
2198 		/* Bump up to next page boundary. */
2199 		prp_page = (__le64 *)((u8 *)prp_page + prp_size);
2200 		prp_page_dma = prp_page_dma + prp_size;
2201 	}
2202 
2203 	/*
2204 	 * Set PRP physical pointer, which initially points to the current PRP
2205 	 * DMA memory page.
2206 	 */
2207 	prp_entry_dma = prp_page_dma;
2208 
2209 	/* Get physical address and length of the data buffer. */
2210 	if (data_in_sz) {
2211 		dma_addr = data_in_dma;
2212 		length = data_in_sz;
2213 	} else {
2214 		dma_addr = data_out_dma;
2215 		length = data_out_sz;
2216 	}
2217 
2218 	/* Loop while the length is not zero. */
2219 	while (length) {
2220 		/*
2221 		 * Check if we need to put a list pointer here if we are at
2222 		 * page boundary - prp_size (8 bytes).
2223 		 */
2224 		page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2225 		if (!page_mask_result) {
2226 			/*
2227 			 * This is the last entry in a PRP List, so we need to
2228 			 * put a PRP list pointer here.  What this does is:
2229 			 *   - bump the current memory pointer to the next
2230 			 *     address, which will be the next full page.
2231 			 *   - set the PRP Entry to point to that page.  This
2232 			 *     is now the PRP List pointer.
2233 			 *   - bump the PRP Entry pointer the start of the
2234 			 *     next page.  Since all of this PRP memory is
2235 			 *     contiguous, no need to get a new page - it's
2236 			 *     just the next address.
2237 			 */
2238 			prp_entry_dma++;
2239 			*prp_entry = cpu_to_le64(prp_entry_dma);
2240 			prp_entry++;
2241 		}
2242 
2243 		/* Need to handle if entry will be part of a page. */
2244 		offset = dma_addr & page_mask;
2245 		entry_len = ioc->page_size - offset;
2246 
2247 		if (prp_entry == prp1_entry) {
2248 			/*
2249 			 * Must fill in the first PRP pointer (PRP1) before
2250 			 * moving on.
2251 			 */
2252 			*prp1_entry = cpu_to_le64(dma_addr);
2253 
2254 			/*
2255 			 * Now point to the second PRP entry within the
2256 			 * command (PRP2).
2257 			 */
2258 			prp_entry = prp2_entry;
2259 		} else if (prp_entry == prp2_entry) {
2260 			/*
2261 			 * Should the PRP2 entry be a PRP List pointer or just
2262 			 * a regular PRP pointer?  If there is more than one
2263 			 * more page of data, must use a PRP List pointer.
2264 			 */
2265 			if (length > ioc->page_size) {
2266 				/*
2267 				 * PRP2 will contain a PRP List pointer because
2268 				 * more PRP's are needed with this command. The
2269 				 * list will start at the beginning of the
2270 				 * contiguous buffer.
2271 				 */
2272 				*prp2_entry = cpu_to_le64(prp_entry_dma);
2273 
2274 				/*
2275 				 * The next PRP Entry will be the start of the
2276 				 * first PRP List.
2277 				 */
2278 				prp_entry = prp_page;
2279 			} else {
2280 				/*
2281 				 * After this, the PRP Entries are complete.
2282 				 * This command uses 2 PRP's and no PRP list.
2283 				 */
2284 				*prp2_entry = cpu_to_le64(dma_addr);
2285 			}
2286 		} else {
2287 			/*
2288 			 * Put entry in list and bump the addresses.
2289 			 *
2290 			 * After PRP1 and PRP2 are filled in, this will fill in
2291 			 * all remaining PRP entries in a PRP List, one per
2292 			 * each time through the loop.
2293 			 */
2294 			*prp_entry = cpu_to_le64(dma_addr);
2295 			prp_entry++;
2296 			prp_entry_dma++;
2297 		}
2298 
2299 		/*
2300 		 * Bump the phys address of the command's data buffer by the
2301 		 * entry_len.
2302 		 */
2303 		dma_addr += entry_len;
2304 
2305 		/* Decrement length accounting for last partial page. */
2306 		if (entry_len > length)
2307 			length = 0;
2308 		else
2309 			length -= entry_len;
2310 	}
2311 }
2312 
2313 /**
2314  * base_make_prp_nvme -
2315  * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
2316  *
2317  * @ioc:		per adapter object
2318  * @scmd:		SCSI command from the mid-layer
2319  * @mpi_request:	mpi request
2320  * @smid:		msg Index
2321  * @sge_count:		scatter gather element count.
2322  *
2323  * Return:		true: PRPs are built
2324  *			false: IEEE SGLs needs to be built
2325  */
2326 static void
2327 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2328 		struct scsi_cmnd *scmd,
2329 		Mpi25SCSIIORequest_t *mpi_request,
2330 		u16 smid, int sge_count)
2331 {
2332 	int sge_len, num_prp_in_chain = 0;
2333 	Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2334 	__le64 *curr_buff;
2335 	dma_addr_t msg_dma, sge_addr, offset;
2336 	u32 page_mask, page_mask_result;
2337 	struct scatterlist *sg_scmd;
2338 	u32 first_prp_len;
2339 	int data_len = scsi_bufflen(scmd);
2340 	u32 nvme_pg_size;
2341 
2342 	nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2343 	/*
2344 	 * Nvme has a very convoluted prp format.  One prp is required
2345 	 * for each page or partial page. Driver need to split up OS sg_list
2346 	 * entries if it is longer than one page or cross a page
2347 	 * boundary.  Driver also have to insert a PRP list pointer entry as
2348 	 * the last entry in each physical page of the PRP list.
2349 	 *
2350 	 * NOTE: The first PRP "entry" is actually placed in the first
2351 	 * SGL entry in the main message as IEEE 64 format.  The 2nd
2352 	 * entry in the main message is the chain element, and the rest
2353 	 * of the PRP entries are built in the contiguous pcie buffer.
2354 	 */
2355 	page_mask = nvme_pg_size - 1;
2356 
2357 	/*
2358 	 * Native SGL is needed.
2359 	 * Put a chain element in main message frame that points to the first
2360 	 * chain buffer.
2361 	 *
2362 	 * NOTE:  The ChainOffset field must be 0 when using a chain pointer to
2363 	 *        a native SGL.
2364 	 */
2365 
2366 	/* Set main message chain element pointer */
2367 	main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2368 	/*
2369 	 * For NVMe the chain element needs to be the 2nd SG entry in the main
2370 	 * message.
2371 	 */
2372 	main_chain_element = (Mpi25IeeeSgeChain64_t *)
2373 		((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2374 
2375 	/*
2376 	 * For the PRP entries, use the specially allocated buffer of
2377 	 * contiguous memory.  Normal chain buffers can't be used
2378 	 * because each chain buffer would need to be the size of an OS
2379 	 * page (4k).
2380 	 */
2381 	curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2382 	msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2383 
2384 	main_chain_element->Address = cpu_to_le64(msg_dma);
2385 	main_chain_element->NextChainOffset = 0;
2386 	main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2387 			MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2388 			MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2389 
2390 	/* Build first prp, sge need not to be page aligned*/
2391 	ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2392 	sg_scmd = scsi_sglist(scmd);
2393 	sge_addr = sg_dma_address(sg_scmd);
2394 	sge_len = sg_dma_len(sg_scmd);
2395 
2396 	offset = sge_addr & page_mask;
2397 	first_prp_len = nvme_pg_size - offset;
2398 
2399 	ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2400 	ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2401 
2402 	data_len -= first_prp_len;
2403 
2404 	if (sge_len > first_prp_len) {
2405 		sge_addr += first_prp_len;
2406 		sge_len -= first_prp_len;
2407 	} else if (data_len && (sge_len == first_prp_len)) {
2408 		sg_scmd = sg_next(sg_scmd);
2409 		sge_addr = sg_dma_address(sg_scmd);
2410 		sge_len = sg_dma_len(sg_scmd);
2411 	}
2412 
2413 	for (;;) {
2414 		offset = sge_addr & page_mask;
2415 
2416 		/* Put PRP pointer due to page boundary*/
2417 		page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2418 		if (unlikely(!page_mask_result)) {
2419 			scmd_printk(KERN_NOTICE,
2420 				scmd, "page boundary curr_buff: 0x%p\n",
2421 				curr_buff);
2422 			msg_dma += 8;
2423 			*curr_buff = cpu_to_le64(msg_dma);
2424 			curr_buff++;
2425 			num_prp_in_chain++;
2426 		}
2427 
2428 		*curr_buff = cpu_to_le64(sge_addr);
2429 		curr_buff++;
2430 		msg_dma += 8;
2431 		num_prp_in_chain++;
2432 
2433 		sge_addr += nvme_pg_size;
2434 		sge_len -= nvme_pg_size;
2435 		data_len -= nvme_pg_size;
2436 
2437 		if (data_len <= 0)
2438 			break;
2439 
2440 		if (sge_len > 0)
2441 			continue;
2442 
2443 		sg_scmd = sg_next(sg_scmd);
2444 		sge_addr = sg_dma_address(sg_scmd);
2445 		sge_len = sg_dma_len(sg_scmd);
2446 	}
2447 
2448 	main_chain_element->Length =
2449 		cpu_to_le32(num_prp_in_chain * sizeof(u64));
2450 	return;
2451 }
2452 
2453 static bool
2454 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2455 	struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2456 {
2457 	u32 data_length = 0;
2458 	bool build_prp = true;
2459 
2460 	data_length = scsi_bufflen(scmd);
2461 	if (pcie_device &&
2462 	    (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2463 		build_prp = false;
2464 		return build_prp;
2465 	}
2466 
2467 	/* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2468 	 * we built IEEE SGL
2469 	 */
2470 	if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2471 		build_prp = false;
2472 
2473 	return build_prp;
2474 }
2475 
2476 /**
2477  * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2478  * determine if the driver needs to build a native SGL.  If so, that native
2479  * SGL is built in the special contiguous buffers allocated especially for
2480  * PCIe SGL creation.  If the driver will not build a native SGL, return
2481  * TRUE and a normal IEEE SGL will be built.  Currently this routine
2482  * supports NVMe.
2483  * @ioc: per adapter object
2484  * @mpi_request: mf request pointer
2485  * @smid: system request message index
2486  * @scmd: scsi command
2487  * @pcie_device: points to the PCIe device's info
2488  *
2489  * Return: 0 if native SGL was built, 1 if no SGL was built
2490  */
2491 static int
2492 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2493 	Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2494 	struct _pcie_device *pcie_device)
2495 {
2496 	int sges_left;
2497 
2498 	/* Get the SG list pointer and info. */
2499 	sges_left = scsi_dma_map(scmd);
2500 	if (sges_left < 0) {
2501 		sdev_printk(KERN_ERR, scmd->device,
2502 			"scsi_dma_map failed: request for %d bytes!\n",
2503 			scsi_bufflen(scmd));
2504 		return 1;
2505 	}
2506 
2507 	/* Check if we need to build a native SG list. */
2508 	if (!base_is_prp_possible(ioc, pcie_device,
2509 				scmd, sges_left)) {
2510 		/* We built a native SG list, just return. */
2511 		goto out;
2512 	}
2513 
2514 	/*
2515 	 * Build native NVMe PRP.
2516 	 */
2517 	base_make_prp_nvme(ioc, scmd, mpi_request,
2518 			smid, sges_left);
2519 
2520 	return 0;
2521 out:
2522 	scsi_dma_unmap(scmd);
2523 	return 1;
2524 }
2525 
2526 /**
2527  * _base_add_sg_single_ieee - add sg element for IEEE format
2528  * @paddr: virtual address for SGE
2529  * @flags: SGE flags
2530  * @chain_offset: number of 128 byte elements from start of segment
2531  * @length: data transfer length
2532  * @dma_addr: Physical address
2533  */
2534 static void
2535 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2536 	dma_addr_t dma_addr)
2537 {
2538 	Mpi25IeeeSgeChain64_t *sgel = paddr;
2539 
2540 	sgel->Flags = flags;
2541 	sgel->NextChainOffset = chain_offset;
2542 	sgel->Length = cpu_to_le32(length);
2543 	sgel->Address = cpu_to_le64(dma_addr);
2544 }
2545 
2546 /**
2547  * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2548  * @ioc: per adapter object
2549  * @paddr: virtual address for SGE
2550  *
2551  * Create a zero length scatter gather entry to insure the IOCs hardware has
2552  * something to use if the target device goes brain dead and tries
2553  * to send data even when none is asked for.
2554  */
2555 static void
2556 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2557 {
2558 	u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2559 		MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2560 		MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2561 
2562 	_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2563 }
2564 
2565 /**
2566  * _base_build_sg_scmd - main sg creation routine
2567  *		pcie_device is unused here!
2568  * @ioc: per adapter object
2569  * @scmd: scsi command
2570  * @smid: system request message index
2571  * @unused: unused pcie_device pointer
2572  * Context: none.
2573  *
2574  * The main routine that builds scatter gather table from a given
2575  * scsi request sent via the .queuecommand main handler.
2576  *
2577  * Return: 0 success, anything else error
2578  */
2579 static int
2580 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2581 	struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2582 {
2583 	Mpi2SCSIIORequest_t *mpi_request;
2584 	dma_addr_t chain_dma;
2585 	struct scatterlist *sg_scmd;
2586 	void *sg_local, *chain;
2587 	u32 chain_offset;
2588 	u32 chain_length;
2589 	u32 chain_flags;
2590 	int sges_left;
2591 	u32 sges_in_segment;
2592 	u32 sgl_flags;
2593 	u32 sgl_flags_last_element;
2594 	u32 sgl_flags_end_buffer;
2595 	struct chain_tracker *chain_req;
2596 
2597 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2598 
2599 	/* init scatter gather flags */
2600 	sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2601 	if (scmd->sc_data_direction == DMA_TO_DEVICE)
2602 		sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2603 	sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2604 	    << MPI2_SGE_FLAGS_SHIFT;
2605 	sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2606 	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2607 	    << MPI2_SGE_FLAGS_SHIFT;
2608 	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2609 
2610 	sg_scmd = scsi_sglist(scmd);
2611 	sges_left = scsi_dma_map(scmd);
2612 	if (sges_left < 0) {
2613 		sdev_printk(KERN_ERR, scmd->device,
2614 		 "scsi_dma_map failed: request for %d bytes!\n",
2615 		 scsi_bufflen(scmd));
2616 		return -ENOMEM;
2617 	}
2618 
2619 	sg_local = &mpi_request->SGL;
2620 	sges_in_segment = ioc->max_sges_in_main_message;
2621 	if (sges_left <= sges_in_segment)
2622 		goto fill_in_last_segment;
2623 
2624 	mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2625 	    (sges_in_segment * ioc->sge_size))/4;
2626 
2627 	/* fill in main message segment when there is a chain following */
2628 	while (sges_in_segment) {
2629 		if (sges_in_segment == 1)
2630 			ioc->base_add_sg_single(sg_local,
2631 			    sgl_flags_last_element | sg_dma_len(sg_scmd),
2632 			    sg_dma_address(sg_scmd));
2633 		else
2634 			ioc->base_add_sg_single(sg_local, sgl_flags |
2635 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2636 		sg_scmd = sg_next(sg_scmd);
2637 		sg_local += ioc->sge_size;
2638 		sges_left--;
2639 		sges_in_segment--;
2640 	}
2641 
2642 	/* initializing the chain flags and pointers */
2643 	chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2644 	chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2645 	if (!chain_req)
2646 		return -1;
2647 	chain = chain_req->chain_buffer;
2648 	chain_dma = chain_req->chain_buffer_dma;
2649 	do {
2650 		sges_in_segment = (sges_left <=
2651 		    ioc->max_sges_in_chain_message) ? sges_left :
2652 		    ioc->max_sges_in_chain_message;
2653 		chain_offset = (sges_left == sges_in_segment) ?
2654 		    0 : (sges_in_segment * ioc->sge_size)/4;
2655 		chain_length = sges_in_segment * ioc->sge_size;
2656 		if (chain_offset) {
2657 			chain_offset = chain_offset <<
2658 			    MPI2_SGE_CHAIN_OFFSET_SHIFT;
2659 			chain_length += ioc->sge_size;
2660 		}
2661 		ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2662 		    chain_length, chain_dma);
2663 		sg_local = chain;
2664 		if (!chain_offset)
2665 			goto fill_in_last_segment;
2666 
2667 		/* fill in chain segments */
2668 		while (sges_in_segment) {
2669 			if (sges_in_segment == 1)
2670 				ioc->base_add_sg_single(sg_local,
2671 				    sgl_flags_last_element |
2672 				    sg_dma_len(sg_scmd),
2673 				    sg_dma_address(sg_scmd));
2674 			else
2675 				ioc->base_add_sg_single(sg_local, sgl_flags |
2676 				    sg_dma_len(sg_scmd),
2677 				    sg_dma_address(sg_scmd));
2678 			sg_scmd = sg_next(sg_scmd);
2679 			sg_local += ioc->sge_size;
2680 			sges_left--;
2681 			sges_in_segment--;
2682 		}
2683 
2684 		chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2685 		if (!chain_req)
2686 			return -1;
2687 		chain = chain_req->chain_buffer;
2688 		chain_dma = chain_req->chain_buffer_dma;
2689 	} while (1);
2690 
2691 
2692  fill_in_last_segment:
2693 
2694 	/* fill the last segment */
2695 	while (sges_left) {
2696 		if (sges_left == 1)
2697 			ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2698 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2699 		else
2700 			ioc->base_add_sg_single(sg_local, sgl_flags |
2701 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2702 		sg_scmd = sg_next(sg_scmd);
2703 		sg_local += ioc->sge_size;
2704 		sges_left--;
2705 	}
2706 
2707 	return 0;
2708 }
2709 
2710 /**
2711  * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2712  * @ioc: per adapter object
2713  * @scmd: scsi command
2714  * @smid: system request message index
2715  * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2716  * constructed on need.
2717  * Context: none.
2718  *
2719  * The main routine that builds scatter gather table from a given
2720  * scsi request sent via the .queuecommand main handler.
2721  *
2722  * Return: 0 success, anything else error
2723  */
2724 static int
2725 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2726 	struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2727 {
2728 	Mpi25SCSIIORequest_t *mpi_request;
2729 	dma_addr_t chain_dma;
2730 	struct scatterlist *sg_scmd;
2731 	void *sg_local, *chain;
2732 	u32 chain_offset;
2733 	u32 chain_length;
2734 	int sges_left;
2735 	u32 sges_in_segment;
2736 	u8 simple_sgl_flags;
2737 	u8 simple_sgl_flags_last;
2738 	u8 chain_sgl_flags;
2739 	struct chain_tracker *chain_req;
2740 
2741 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2742 
2743 	/* init scatter gather flags */
2744 	simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2745 	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2746 	simple_sgl_flags_last = simple_sgl_flags |
2747 	    MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2748 	chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2749 	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2750 
2751 	/* Check if we need to build a native SG list. */
2752 	if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2753 			smid, scmd, pcie_device) == 0)) {
2754 		/* We built a native SG list, just return. */
2755 		return 0;
2756 	}
2757 
2758 	sg_scmd = scsi_sglist(scmd);
2759 	sges_left = scsi_dma_map(scmd);
2760 	if (sges_left < 0) {
2761 		sdev_printk(KERN_ERR, scmd->device,
2762 			"scsi_dma_map failed: request for %d bytes!\n",
2763 			scsi_bufflen(scmd));
2764 		return -ENOMEM;
2765 	}
2766 
2767 	sg_local = &mpi_request->SGL;
2768 	sges_in_segment = (ioc->request_sz -
2769 		   offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2770 	if (sges_left <= sges_in_segment)
2771 		goto fill_in_last_segment;
2772 
2773 	mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
2774 	    (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2775 
2776 	/* fill in main message segment when there is a chain following */
2777 	while (sges_in_segment > 1) {
2778 		_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2779 		    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2780 		sg_scmd = sg_next(sg_scmd);
2781 		sg_local += ioc->sge_size_ieee;
2782 		sges_left--;
2783 		sges_in_segment--;
2784 	}
2785 
2786 	/* initializing the pointers */
2787 	chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2788 	if (!chain_req)
2789 		return -1;
2790 	chain = chain_req->chain_buffer;
2791 	chain_dma = chain_req->chain_buffer_dma;
2792 	do {
2793 		sges_in_segment = (sges_left <=
2794 		    ioc->max_sges_in_chain_message) ? sges_left :
2795 		    ioc->max_sges_in_chain_message;
2796 		chain_offset = (sges_left == sges_in_segment) ?
2797 		    0 : sges_in_segment;
2798 		chain_length = sges_in_segment * ioc->sge_size_ieee;
2799 		if (chain_offset)
2800 			chain_length += ioc->sge_size_ieee;
2801 		_base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2802 		    chain_offset, chain_length, chain_dma);
2803 
2804 		sg_local = chain;
2805 		if (!chain_offset)
2806 			goto fill_in_last_segment;
2807 
2808 		/* fill in chain segments */
2809 		while (sges_in_segment) {
2810 			_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2811 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2812 			sg_scmd = sg_next(sg_scmd);
2813 			sg_local += ioc->sge_size_ieee;
2814 			sges_left--;
2815 			sges_in_segment--;
2816 		}
2817 
2818 		chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2819 		if (!chain_req)
2820 			return -1;
2821 		chain = chain_req->chain_buffer;
2822 		chain_dma = chain_req->chain_buffer_dma;
2823 	} while (1);
2824 
2825 
2826  fill_in_last_segment:
2827 
2828 	/* fill the last segment */
2829 	while (sges_left > 0) {
2830 		if (sges_left == 1)
2831 			_base_add_sg_single_ieee(sg_local,
2832 			    simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2833 			    sg_dma_address(sg_scmd));
2834 		else
2835 			_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2836 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2837 		sg_scmd = sg_next(sg_scmd);
2838 		sg_local += ioc->sge_size_ieee;
2839 		sges_left--;
2840 	}
2841 
2842 	return 0;
2843 }
2844 
2845 /**
2846  * _base_build_sg_ieee - build generic sg for IEEE format
2847  * @ioc: per adapter object
2848  * @psge: virtual address for SGE
2849  * @data_out_dma: physical address for WRITES
2850  * @data_out_sz: data xfer size for WRITES
2851  * @data_in_dma: physical address for READS
2852  * @data_in_sz: data xfer size for READS
2853  */
2854 static void
2855 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2856 	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2857 	size_t data_in_sz)
2858 {
2859 	u8 sgl_flags;
2860 
2861 	if (!data_out_sz && !data_in_sz) {
2862 		_base_build_zero_len_sge_ieee(ioc, psge);
2863 		return;
2864 	}
2865 
2866 	if (data_out_sz && data_in_sz) {
2867 		/* WRITE sgel first */
2868 		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2869 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2870 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2871 		    data_out_dma);
2872 
2873 		/* incr sgel */
2874 		psge += ioc->sge_size_ieee;
2875 
2876 		/* READ sgel last */
2877 		sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2878 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2879 		    data_in_dma);
2880 	} else if (data_out_sz) /* WRITE */ {
2881 		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2882 		    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2883 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2884 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2885 		    data_out_dma);
2886 	} else if (data_in_sz) /* READ */ {
2887 		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2888 		    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2889 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2890 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2891 		    data_in_dma);
2892 	}
2893 }
2894 
2895 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2896 
2897 /**
2898  * _base_config_dma_addressing - set dma addressing
2899  * @ioc: per adapter object
2900  * @pdev: PCI device struct
2901  *
2902  * Return: 0 for success, non-zero for failure.
2903  */
2904 static int
2905 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2906 {
2907 	struct sysinfo s;
2908 
2909 	if (ioc->is_mcpu_endpoint ||
2910 	    sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
2911 	    dma_get_required_mask(&pdev->dev) <= 32)
2912 		ioc->dma_mask = 32;
2913 	/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
2914 	else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
2915 		ioc->dma_mask = 63;
2916 	else
2917 		ioc->dma_mask = 64;
2918 
2919 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
2920 	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
2921 		return -ENODEV;
2922 
2923 	if (ioc->dma_mask > 32) {
2924 		ioc->base_add_sg_single = &_base_add_sg_single_64;
2925 		ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2926 	} else {
2927 		ioc->base_add_sg_single = &_base_add_sg_single_32;
2928 		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2929 	}
2930 
2931 	si_meminfo(&s);
2932 	ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2933 		ioc->dma_mask, convert_to_kb(s.totalram));
2934 
2935 	return 0;
2936 }
2937 
2938 /**
2939  * _base_check_enable_msix - checks MSIX capabable.
2940  * @ioc: per adapter object
2941  *
2942  * Check to see if card is capable of MSIX, and set number
2943  * of available msix vectors
2944  */
2945 static int
2946 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2947 {
2948 	int base;
2949 	u16 message_control;
2950 
2951 	/* Check whether controller SAS2008 B0 controller,
2952 	 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
2953 	 */
2954 	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2955 	    ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2956 		return -EINVAL;
2957 	}
2958 
2959 	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2960 	if (!base) {
2961 		dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
2962 		return -EINVAL;
2963 	}
2964 
2965 	/* get msix vector count */
2966 	/* NUMA_IO not supported for older controllers */
2967 	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2968 	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2969 	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2970 	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2971 	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2972 	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2973 	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2974 		ioc->msix_vector_count = 1;
2975 	else {
2976 		pci_read_config_word(ioc->pdev, base + 2, &message_control);
2977 		ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2978 	}
2979 	dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2980 				  ioc->msix_vector_count));
2981 	return 0;
2982 }
2983 
2984 /**
2985  * _base_free_irq - free irq
2986  * @ioc: per adapter object
2987  *
2988  * Freeing respective reply_queue from the list.
2989  */
2990 static void
2991 _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2992 {
2993 	struct adapter_reply_queue *reply_q, *next;
2994 
2995 	if (list_empty(&ioc->reply_queue_list))
2996 		return;
2997 
2998 	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
2999 		list_del(&reply_q->list);
3000 		if (ioc->smp_affinity_enable)
3001 			irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
3002 			    reply_q->msix_index), NULL);
3003 		free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
3004 			 reply_q);
3005 		kfree(reply_q);
3006 	}
3007 }
3008 
3009 /**
3010  * _base_request_irq - request irq
3011  * @ioc: per adapter object
3012  * @index: msix index into vector table
3013  *
3014  * Inserting respective reply_queue into the list.
3015  */
3016 static int
3017 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
3018 {
3019 	struct pci_dev *pdev = ioc->pdev;
3020 	struct adapter_reply_queue *reply_q;
3021 	int r;
3022 
3023 	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
3024 	if (!reply_q) {
3025 		ioc_err(ioc, "unable to allocate memory %zu!\n",
3026 			sizeof(struct adapter_reply_queue));
3027 		return -ENOMEM;
3028 	}
3029 	reply_q->ioc = ioc;
3030 	reply_q->msix_index = index;
3031 
3032 	atomic_set(&reply_q->busy, 0);
3033 	if (ioc->msix_enable)
3034 		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
3035 		    ioc->driver_name, ioc->id, index);
3036 	else
3037 		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
3038 		    ioc->driver_name, ioc->id);
3039 	r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
3040 			IRQF_SHARED, reply_q->name, reply_q);
3041 	if (r) {
3042 		pr_err("%s: unable to allocate interrupt %d!\n",
3043 		       reply_q->name, pci_irq_vector(pdev, index));
3044 		kfree(reply_q);
3045 		return -EBUSY;
3046 	}
3047 
3048 	INIT_LIST_HEAD(&reply_q->list);
3049 	list_add_tail(&reply_q->list, &ioc->reply_queue_list);
3050 	return 0;
3051 }
3052 
3053 /**
3054  * _base_assign_reply_queues - assigning msix index for each cpu
3055  * @ioc: per adapter object
3056  *
3057  * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
3058  *
3059  * It would nice if we could call irq_set_affinity, however it is not
3060  * an exported symbol
3061  */
3062 static void
3063 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
3064 {
3065 	unsigned int cpu, nr_cpus, nr_msix, index = 0;
3066 	struct adapter_reply_queue *reply_q;
3067 	int local_numa_node;
3068 
3069 	if (!_base_is_controller_msix_enabled(ioc))
3070 		return;
3071 
3072 	if (ioc->msix_load_balance)
3073 		return;
3074 
3075 	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
3076 
3077 	nr_cpus = num_online_cpus();
3078 	nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
3079 					       ioc->facts.MaxMSIxVectors);
3080 	if (!nr_msix)
3081 		return;
3082 
3083 	if (ioc->smp_affinity_enable) {
3084 
3085 		/*
3086 		 * set irq affinity to local numa node for those irqs
3087 		 * corresponding to high iops queues.
3088 		 */
3089 		if (ioc->high_iops_queues) {
3090 			local_numa_node = dev_to_node(&ioc->pdev->dev);
3091 			for (index = 0; index < ioc->high_iops_queues;
3092 			    index++) {
3093 				irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
3094 				    index), cpumask_of_node(local_numa_node));
3095 			}
3096 		}
3097 
3098 		list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3099 			const cpumask_t *mask;
3100 
3101 			if (reply_q->msix_index < ioc->high_iops_queues)
3102 				continue;
3103 
3104 			mask = pci_irq_get_affinity(ioc->pdev,
3105 			    reply_q->msix_index);
3106 			if (!mask) {
3107 				ioc_warn(ioc, "no affinity for msi %x\n",
3108 					 reply_q->msix_index);
3109 				goto fall_back;
3110 			}
3111 
3112 			for_each_cpu_and(cpu, mask, cpu_online_mask) {
3113 				if (cpu >= ioc->cpu_msix_table_sz)
3114 					break;
3115 				ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3116 			}
3117 		}
3118 		return;
3119 	}
3120 
3121 fall_back:
3122 	cpu = cpumask_first(cpu_online_mask);
3123 	nr_msix -= ioc->high_iops_queues;
3124 	index = 0;
3125 
3126 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3127 		unsigned int i, group = nr_cpus / nr_msix;
3128 
3129 		if (reply_q->msix_index < ioc->high_iops_queues)
3130 			continue;
3131 
3132 		if (cpu >= nr_cpus)
3133 			break;
3134 
3135 		if (index < nr_cpus % nr_msix)
3136 			group++;
3137 
3138 		for (i = 0 ; i < group ; i++) {
3139 			ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3140 			cpu = cpumask_next(cpu, cpu_online_mask);
3141 		}
3142 		index++;
3143 	}
3144 }
3145 
3146 /**
3147  * _base_check_and_enable_high_iops_queues - enable high iops mode
3148  * @ioc: per adapter object
3149  * @hba_msix_vector_count: msix vectors supported by HBA
3150  *
3151  * Enable high iops queues only if
3152  *  - HBA is a SEA/AERO controller and
3153  *  - MSI-Xs vector supported by the HBA is 128 and
3154  *  - total CPU count in the system >=16 and
3155  *  - loaded driver with default max_msix_vectors module parameter and
3156  *  - system booted in non kdump mode
3157  *
3158  * returns nothing.
3159  */
3160 static void
3161 _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
3162 		int hba_msix_vector_count)
3163 {
3164 	u16 lnksta, speed;
3165 
3166 	if (perf_mode == MPT_PERF_MODE_IOPS ||
3167 	    perf_mode == MPT_PERF_MODE_LATENCY) {
3168 		ioc->high_iops_queues = 0;
3169 		return;
3170 	}
3171 
3172 	if (perf_mode == MPT_PERF_MODE_DEFAULT) {
3173 
3174 		pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
3175 		speed = lnksta & PCI_EXP_LNKSTA_CLS;
3176 
3177 		if (speed < 0x4) {
3178 			ioc->high_iops_queues = 0;
3179 			return;
3180 		}
3181 	}
3182 
3183 	if (!reset_devices && ioc->is_aero_ioc &&
3184 	    hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
3185 	    num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
3186 	    max_msix_vectors == -1)
3187 		ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3188 	else
3189 		ioc->high_iops_queues = 0;
3190 }
3191 
3192 /**
3193  * _base_disable_msix - disables msix
3194  * @ioc: per adapter object
3195  *
3196  */
3197 static void
3198 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3199 {
3200 	if (!ioc->msix_enable)
3201 		return;
3202 	pci_free_irq_vectors(ioc->pdev);
3203 	ioc->msix_enable = 0;
3204 }
3205 
3206 /**
3207  * _base_alloc_irq_vectors - allocate msix vectors
3208  * @ioc: per adapter object
3209  *
3210  */
3211 static int
3212 _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3213 {
3214 	int i, irq_flags = PCI_IRQ_MSIX;
3215 	struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3216 	struct irq_affinity *descp = &desc;
3217 
3218 	if (ioc->smp_affinity_enable)
3219 		irq_flags |= PCI_IRQ_AFFINITY;
3220 	else
3221 		descp = NULL;
3222 
3223 	ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
3224 	    ioc->reply_queue_count);
3225 
3226 	i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3227 	    ioc->high_iops_queues,
3228 	    ioc->reply_queue_count, irq_flags, descp);
3229 
3230 	return i;
3231 }
3232 
3233 /**
3234  * _base_enable_msix - enables msix, failback to io_apic
3235  * @ioc: per adapter object
3236  *
3237  */
3238 static int
3239 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3240 {
3241 	int r;
3242 	int i, local_max_msix_vectors;
3243 	u8 try_msix = 0;
3244 
3245 	ioc->msix_load_balance = false;
3246 
3247 	if (msix_disable == -1 || msix_disable == 0)
3248 		try_msix = 1;
3249 
3250 	if (!try_msix)
3251 		goto try_ioapic;
3252 
3253 	if (_base_check_enable_msix(ioc) != 0)
3254 		goto try_ioapic;
3255 
3256 	ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3257 	pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3258 		ioc->cpu_count, max_msix_vectors);
3259 	if (ioc->is_aero_ioc)
3260 		_base_check_and_enable_high_iops_queues(ioc,
3261 			ioc->msix_vector_count);
3262 	ioc->reply_queue_count =
3263 		min_t(int, ioc->cpu_count + ioc->high_iops_queues,
3264 		ioc->msix_vector_count);
3265 
3266 	if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3267 		local_max_msix_vectors = (reset_devices) ? 1 : 8;
3268 	else
3269 		local_max_msix_vectors = max_msix_vectors;
3270 
3271 	if (local_max_msix_vectors > 0)
3272 		ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3273 			ioc->reply_queue_count);
3274 	else if (local_max_msix_vectors == 0)
3275 		goto try_ioapic;
3276 
3277 	/*
3278 	 * Enable msix_load_balance only if combined reply queue mode is
3279 	 * disabled on SAS3 & above generation HBA devices.
3280 	 */
3281 	if (!ioc->combined_reply_queue &&
3282 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3283 		ioc_info(ioc,
3284 		    "combined ReplyQueue is off, Enabling msix load balance\n");
3285 		ioc->msix_load_balance = true;
3286 	}
3287 
3288 	/*
3289 	 * smp affinity setting is not need when msix load balance
3290 	 * is enabled.
3291 	 */
3292 	if (ioc->msix_load_balance)
3293 		ioc->smp_affinity_enable = 0;
3294 
3295 	r = _base_alloc_irq_vectors(ioc);
3296 	if (r < 0) {
3297 		ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
3298 		goto try_ioapic;
3299 	}
3300 
3301 	ioc->msix_enable = 1;
3302 	ioc->reply_queue_count = r;
3303 	for (i = 0; i < ioc->reply_queue_count; i++) {
3304 		r = _base_request_irq(ioc, i);
3305 		if (r) {
3306 			_base_free_irq(ioc);
3307 			_base_disable_msix(ioc);
3308 			goto try_ioapic;
3309 		}
3310 	}
3311 
3312 	ioc_info(ioc, "High IOPs queues : %s\n",
3313 			ioc->high_iops_queues ? "enabled" : "disabled");
3314 
3315 	return 0;
3316 
3317 /* failback to io_apic interrupt routing */
3318  try_ioapic:
3319 	ioc->high_iops_queues = 0;
3320 	ioc_info(ioc, "High IOPs queues : disabled\n");
3321 	ioc->reply_queue_count = 1;
3322 	r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3323 	if (r < 0) {
3324 		dfailprintk(ioc,
3325 			    ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3326 				     r));
3327 	} else
3328 		r = _base_request_irq(ioc, 0);
3329 
3330 	return r;
3331 }
3332 
3333 /**
3334  * mpt3sas_base_unmap_resources - free controller resources
3335  * @ioc: per adapter object
3336  */
3337 static void
3338 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3339 {
3340 	struct pci_dev *pdev = ioc->pdev;
3341 
3342 	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3343 
3344 	_base_free_irq(ioc);
3345 	_base_disable_msix(ioc);
3346 
3347 	kfree(ioc->replyPostRegisterIndex);
3348 	ioc->replyPostRegisterIndex = NULL;
3349 
3350 
3351 	if (ioc->chip_phys) {
3352 		iounmap(ioc->chip);
3353 		ioc->chip_phys = 0;
3354 	}
3355 
3356 	if (pci_is_enabled(pdev)) {
3357 		pci_release_selected_regions(ioc->pdev, ioc->bars);
3358 		pci_disable_pcie_error_reporting(pdev);
3359 		pci_disable_device(pdev);
3360 	}
3361 }
3362 
3363 static int
3364 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3365 
3366 /**
3367  * _base_check_for_fault_and_issue_reset - check if IOC is in fault state
3368  *     and if it is in fault state then issue diag reset.
3369  * @ioc: per adapter object
3370  *
3371  * Returns: 0 for success, non-zero for failure.
3372  */
3373 static int
3374 _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3375 {
3376 	u32 ioc_state;
3377 	int rc = -EFAULT;
3378 
3379 	dinitprintk(ioc, pr_info("%s\n", __func__));
3380 	if (ioc->pci_error_recovery)
3381 		return 0;
3382 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3383 	dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3384 
3385 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3386 		mpt3sas_print_fault_code(ioc, ioc_state &
3387 		    MPI2_DOORBELL_DATA_MASK);
3388 		rc = _base_diag_reset(ioc);
3389 	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3390 	    MPI2_IOC_STATE_COREDUMP) {
3391 		mpt3sas_print_coredump_info(ioc, ioc_state &
3392 		     MPI2_DOORBELL_DATA_MASK);
3393 		mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
3394 		rc = _base_diag_reset(ioc);
3395 	}
3396 
3397 	return rc;
3398 }
3399 
3400 /**
3401  * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
3402  * @ioc: per adapter object
3403  *
3404  * Return: 0 for success, non-zero for failure.
3405  */
3406 int
3407 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3408 {
3409 	struct pci_dev *pdev = ioc->pdev;
3410 	u32 memap_sz;
3411 	u32 pio_sz;
3412 	int i, r = 0, rc;
3413 	u64 pio_chip = 0;
3414 	phys_addr_t chip_phys = 0;
3415 	struct adapter_reply_queue *reply_q;
3416 
3417 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3418 
3419 	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3420 	if (pci_enable_device_mem(pdev)) {
3421 		ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3422 		ioc->bars = 0;
3423 		return -ENODEV;
3424 	}
3425 
3426 
3427 	if (pci_request_selected_regions(pdev, ioc->bars,
3428 	    ioc->driver_name)) {
3429 		ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3430 		ioc->bars = 0;
3431 		r = -ENODEV;
3432 		goto out_fail;
3433 	}
3434 
3435 /* AER (Advanced Error Reporting) hooks */
3436 	pci_enable_pcie_error_reporting(pdev);
3437 
3438 	pci_set_master(pdev);
3439 
3440 
3441 	if (_base_config_dma_addressing(ioc, pdev) != 0) {
3442 		ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3443 		r = -ENODEV;
3444 		goto out_fail;
3445 	}
3446 
3447 	for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3448 	     (!memap_sz || !pio_sz); i++) {
3449 		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3450 			if (pio_sz)
3451 				continue;
3452 			pio_chip = (u64)pci_resource_start(pdev, i);
3453 			pio_sz = pci_resource_len(pdev, i);
3454 		} else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3455 			if (memap_sz)
3456 				continue;
3457 			ioc->chip_phys = pci_resource_start(pdev, i);
3458 			chip_phys = ioc->chip_phys;
3459 			memap_sz = pci_resource_len(pdev, i);
3460 			ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3461 		}
3462 	}
3463 
3464 	if (ioc->chip == NULL) {
3465 		ioc_err(ioc,
3466 		    "unable to map adapter memory! or resource not found\n");
3467 		r = -EINVAL;
3468 		goto out_fail;
3469 	}
3470 
3471 	mpt3sas_base_mask_interrupts(ioc);
3472 
3473 	r = _base_get_ioc_facts(ioc);
3474 	if (r) {
3475 		rc = _base_check_for_fault_and_issue_reset(ioc);
3476 		if (rc || (_base_get_ioc_facts(ioc)))
3477 			goto out_fail;
3478 	}
3479 
3480 	if (!ioc->rdpq_array_enable_assigned) {
3481 		ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3482 		ioc->rdpq_array_enable_assigned = 1;
3483 	}
3484 
3485 	r = _base_enable_msix(ioc);
3486 	if (r)
3487 		goto out_fail;
3488 
3489 	if (!ioc->is_driver_loading)
3490 		_base_init_irqpolls(ioc);
3491 	/* Use the Combined reply queue feature only for SAS3 C0 & higher
3492 	 * revision HBAs and also only when reply queue count is greater than 8
3493 	 */
3494 	if (ioc->combined_reply_queue) {
3495 		/* Determine the Supplemental Reply Post Host Index Registers
3496 		 * Addresse. Supplemental Reply Post Host Index Registers
3497 		 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3498 		 * each register is at offset bytes of
3499 		 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3500 		 */
3501 		ioc->replyPostRegisterIndex = kcalloc(
3502 		     ioc->combined_reply_index_count,
3503 		     sizeof(resource_size_t *), GFP_KERNEL);
3504 		if (!ioc->replyPostRegisterIndex) {
3505 			ioc_err(ioc,
3506 			    "allocation for replyPostRegisterIndex failed!\n");
3507 			r = -ENOMEM;
3508 			goto out_fail;
3509 		}
3510 
3511 		for (i = 0; i < ioc->combined_reply_index_count; i++) {
3512 			ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3513 			     ((u8 __force *)&ioc->chip->Doorbell +
3514 			     MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3515 			     (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3516 		}
3517 	}
3518 
3519 	if (ioc->is_warpdrive) {
3520 		ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3521 		    &ioc->chip->ReplyPostHostIndex;
3522 
3523 		for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3524 			ioc->reply_post_host_index[i] =
3525 			(resource_size_t __iomem *)
3526 			((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3527 			* 4)));
3528 	}
3529 
3530 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3531 		pr_info("%s: %s enabled: IRQ %d\n",
3532 			reply_q->name,
3533 			ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3534 			pci_irq_vector(ioc->pdev, reply_q->msix_index));
3535 
3536 	ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3537 		 &chip_phys, ioc->chip, memap_sz);
3538 	ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3539 		 (unsigned long long)pio_chip, pio_sz);
3540 
3541 	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
3542 	pci_save_state(pdev);
3543 	return 0;
3544 
3545  out_fail:
3546 	mpt3sas_base_unmap_resources(ioc);
3547 	return r;
3548 }
3549 
3550 /**
3551  * mpt3sas_base_get_msg_frame - obtain request mf pointer
3552  * @ioc: per adapter object
3553  * @smid: system request message index(smid zero is invalid)
3554  *
3555  * Return: virt pointer to message frame.
3556  */
3557 void *
3558 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3559 {
3560 	return (void *)(ioc->request + (smid * ioc->request_sz));
3561 }
3562 
3563 /**
3564  * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3565  * @ioc: per adapter object
3566  * @smid: system request message index
3567  *
3568  * Return: virt pointer to sense buffer.
3569  */
3570 void *
3571 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3572 {
3573 	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3574 }
3575 
3576 /**
3577  * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3578  * @ioc: per adapter object
3579  * @smid: system request message index
3580  *
3581  * Return: phys pointer to the low 32bit address of the sense buffer.
3582  */
3583 __le32
3584 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3585 {
3586 	return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3587 	    SCSI_SENSE_BUFFERSIZE));
3588 }
3589 
3590 /**
3591  * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3592  * @ioc: per adapter object
3593  * @smid: system request message index
3594  *
3595  * Return: virt pointer to a PCIe SGL.
3596  */
3597 void *
3598 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3599 {
3600 	return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3601 }
3602 
3603 /**
3604  * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3605  * @ioc: per adapter object
3606  * @smid: system request message index
3607  *
3608  * Return: phys pointer to the address of the PCIe buffer.
3609  */
3610 dma_addr_t
3611 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3612 {
3613 	return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3614 }
3615 
3616 /**
3617  * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3618  * @ioc: per adapter object
3619  * @phys_addr: lower 32 physical addr of the reply
3620  *
3621  * Converts 32bit lower physical addr into a virt address.
3622  */
3623 void *
3624 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3625 {
3626 	if (!phys_addr)
3627 		return NULL;
3628 	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3629 }
3630 
3631 /**
3632  * _base_get_msix_index - get the msix index
3633  * @ioc: per adapter object
3634  * @scmd: scsi_cmnd object
3635  *
3636  * returns msix index of general reply queues,
3637  * i.e. reply queue on which IO request's reply
3638  * should be posted by the HBA firmware.
3639  */
3640 static inline u8
3641 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3642 	struct scsi_cmnd *scmd)
3643 {
3644 	/* Enables reply_queue load balancing */
3645 	if (ioc->msix_load_balance)
3646 		return ioc->reply_queue_count ?
3647 		    base_mod64(atomic64_add_return(1,
3648 		    &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3649 
3650 	if (scmd && ioc->shost->nr_hw_queues > 1) {
3651 		u32 tag = blk_mq_unique_tag(scmd->request);
3652 
3653 		return blk_mq_unique_tag_to_hwq(tag) +
3654 			ioc->high_iops_queues;
3655 	}
3656 
3657 	return ioc->cpu_msix_table[raw_smp_processor_id()];
3658 }
3659 
3660 /**
3661  * _base_get_high_iops_msix_index - get the msix index of
3662  *				high iops queues
3663  * @ioc: per adapter object
3664  * @scmd: scsi_cmnd object
3665  *
3666  * Returns: msix index of high iops reply queues.
3667  * i.e. high iops reply queue on which IO request's
3668  * reply should be posted by the HBA firmware.
3669  */
3670 static inline u8
3671 _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3672 	struct scsi_cmnd *scmd)
3673 {
3674 	/**
3675 	 * Round robin the IO interrupts among the high iops
3676 	 * reply queues in terms of batch count 16 when outstanding
3677 	 * IOs on the target device is >=8.
3678 	 */
3679 
3680 	if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
3681 		return base_mod64((
3682 		    atomic64_add_return(1, &ioc->high_iops_outstanding) /
3683 		    MPT3SAS_HIGH_IOPS_BATCH_COUNT),
3684 		    MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
3685 
3686 	return _base_get_msix_index(ioc, scmd);
3687 }
3688 
3689 /**
3690  * mpt3sas_base_get_smid - obtain a free smid from internal queue
3691  * @ioc: per adapter object
3692  * @cb_idx: callback index
3693  *
3694  * Return: smid (zero is invalid)
3695  */
3696 u16
3697 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3698 {
3699 	unsigned long flags;
3700 	struct request_tracker *request;
3701 	u16 smid;
3702 
3703 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3704 	if (list_empty(&ioc->internal_free_list)) {
3705 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3706 		ioc_err(ioc, "%s: smid not available\n", __func__);
3707 		return 0;
3708 	}
3709 
3710 	request = list_entry(ioc->internal_free_list.next,
3711 	    struct request_tracker, tracker_list);
3712 	request->cb_idx = cb_idx;
3713 	smid = request->smid;
3714 	list_del(&request->tracker_list);
3715 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3716 	return smid;
3717 }
3718 
3719 /**
3720  * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3721  * @ioc: per adapter object
3722  * @cb_idx: callback index
3723  * @scmd: pointer to scsi command object
3724  *
3725  * Return: smid (zero is invalid)
3726  */
3727 u16
3728 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3729 	struct scsi_cmnd *scmd)
3730 {
3731 	struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3732 	u16 smid;
3733 	u32 tag, unique_tag;
3734 
3735 	unique_tag = blk_mq_unique_tag(scmd->request);
3736 	tag = blk_mq_unique_tag_to_tag(unique_tag);
3737 
3738 	/*
3739 	 * Store hw queue number corresponding to the tag.
3740 	 * This hw queue number is used later to determine
3741 	 * the unique_tag using the logic below. This unique_tag
3742 	 * is used to retrieve the scmd pointer corresponding
3743 	 * to tag using scsi_host_find_tag() API.
3744 	 *
3745 	 * tag = smid - 1;
3746 	 * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
3747 	 */
3748 	ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag);
3749 
3750 	smid = tag + 1;
3751 	request->cb_idx = cb_idx;
3752 	request->smid = smid;
3753 	request->scmd = scmd;
3754 	INIT_LIST_HEAD(&request->chain_list);
3755 	return smid;
3756 }
3757 
3758 /**
3759  * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3760  * @ioc: per adapter object
3761  * @cb_idx: callback index
3762  *
3763  * Return: smid (zero is invalid)
3764  */
3765 u16
3766 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3767 {
3768 	unsigned long flags;
3769 	struct request_tracker *request;
3770 	u16 smid;
3771 
3772 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3773 	if (list_empty(&ioc->hpr_free_list)) {
3774 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3775 		return 0;
3776 	}
3777 
3778 	request = list_entry(ioc->hpr_free_list.next,
3779 	    struct request_tracker, tracker_list);
3780 	request->cb_idx = cb_idx;
3781 	smid = request->smid;
3782 	list_del(&request->tracker_list);
3783 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3784 	return smid;
3785 }
3786 
3787 static void
3788 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3789 {
3790 	/*
3791 	 * See _wait_for_commands_to_complete() call with regards to this code.
3792 	 */
3793 	if (ioc->shost_recovery && ioc->pending_io_count) {
3794 		ioc->pending_io_count = scsi_host_busy(ioc->shost);
3795 		if (ioc->pending_io_count == 0)
3796 			wake_up(&ioc->reset_wq);
3797 	}
3798 }
3799 
3800 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3801 			   struct scsiio_tracker *st)
3802 {
3803 	if (WARN_ON(st->smid == 0))
3804 		return;
3805 	st->cb_idx = 0xFF;
3806 	st->direct_io = 0;
3807 	st->scmd = NULL;
3808 	atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3809 	st->smid = 0;
3810 }
3811 
3812 /**
3813  * mpt3sas_base_free_smid - put smid back on free_list
3814  * @ioc: per adapter object
3815  * @smid: system request message index
3816  */
3817 void
3818 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3819 {
3820 	unsigned long flags;
3821 	int i;
3822 
3823 	if (smid < ioc->hi_priority_smid) {
3824 		struct scsiio_tracker *st;
3825 		void *request;
3826 
3827 		st = _get_st_from_smid(ioc, smid);
3828 		if (!st) {
3829 			_base_recovery_check(ioc);
3830 			return;
3831 		}
3832 
3833 		/* Clear MPI request frame */
3834 		request = mpt3sas_base_get_msg_frame(ioc, smid);
3835 		memset(request, 0, ioc->request_sz);
3836 
3837 		mpt3sas_base_clear_st(ioc, st);
3838 		_base_recovery_check(ioc);
3839 		ioc->io_queue_num[smid - 1] = 0;
3840 		return;
3841 	}
3842 
3843 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3844 	if (smid < ioc->internal_smid) {
3845 		/* hi-priority */
3846 		i = smid - ioc->hi_priority_smid;
3847 		ioc->hpr_lookup[i].cb_idx = 0xFF;
3848 		list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3849 	} else if (smid <= ioc->hba_queue_depth) {
3850 		/* internal queue */
3851 		i = smid - ioc->internal_smid;
3852 		ioc->internal_lookup[i].cb_idx = 0xFF;
3853 		list_add(&ioc->internal_lookup[i].tracker_list,
3854 		    &ioc->internal_free_list);
3855 	}
3856 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3857 }
3858 
3859 /**
3860  * _base_mpi_ep_writeq - 32 bit write to MMIO
3861  * @b: data payload
3862  * @addr: address in MMIO space
3863  * @writeq_lock: spin lock
3864  *
3865  * This special handling for MPI EP to take care of 32 bit
3866  * environment where its not quarenteed to send the entire word
3867  * in one transfer.
3868  */
3869 static inline void
3870 _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3871 					spinlock_t *writeq_lock)
3872 {
3873 	unsigned long flags;
3874 
3875 	spin_lock_irqsave(writeq_lock, flags);
3876 	__raw_writel((u32)(b), addr);
3877 	__raw_writel((u32)(b >> 32), (addr + 4));
3878 	spin_unlock_irqrestore(writeq_lock, flags);
3879 }
3880 
3881 /**
3882  * _base_writeq - 64 bit write to MMIO
3883  * @b: data payload
3884  * @addr: address in MMIO space
3885  * @writeq_lock: spin lock
3886  *
3887  * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
3888  * care of 32 bit environment where its not quarenteed to send the entire word
3889  * in one transfer.
3890  */
3891 #if defined(writeq) && defined(CONFIG_64BIT)
3892 static inline void
3893 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3894 {
3895 	wmb();
3896 	__raw_writeq(b, addr);
3897 	barrier();
3898 }
3899 #else
3900 static inline void
3901 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3902 {
3903 	_base_mpi_ep_writeq(b, addr, writeq_lock);
3904 }
3905 #endif
3906 
3907 /**
3908  * _base_set_and_get_msix_index - get the msix index and assign to msix_io
3909  *                                variable of scsi tracker
3910  * @ioc: per adapter object
3911  * @smid: system request message index
3912  *
3913  * returns msix index.
3914  */
3915 static u8
3916 _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3917 {
3918 	struct scsiio_tracker *st = NULL;
3919 
3920 	if (smid < ioc->hi_priority_smid)
3921 		st = _get_st_from_smid(ioc, smid);
3922 
3923 	if (st == NULL)
3924 		return  _base_get_msix_index(ioc, NULL);
3925 
3926 	st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
3927 	return st->msix_io;
3928 }
3929 
3930 /**
3931  * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
3932  * @ioc: per adapter object
3933  * @smid: system request message index
3934  * @handle: device handle
3935  */
3936 static void
3937 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
3938 	u16 smid, u16 handle)
3939 {
3940 	Mpi2RequestDescriptorUnion_t descriptor;
3941 	u64 *request = (u64 *)&descriptor;
3942 	void *mpi_req_iomem;
3943 	__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3944 
3945 	_clone_sg_entries(ioc, (void *) mfp, smid);
3946 	mpi_req_iomem = (void __force *)ioc->chip +
3947 			MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3948 	_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3949 					ioc->request_sz);
3950 	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3951 	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3952 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3953 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3954 	descriptor.SCSIIO.LMID = 0;
3955 	_base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3956 	    &ioc->scsi_lookup_lock);
3957 }
3958 
3959 /**
3960  * _base_put_smid_scsi_io - send SCSI_IO request to firmware
3961  * @ioc: per adapter object
3962  * @smid: system request message index
3963  * @handle: device handle
3964  */
3965 static void
3966 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3967 {
3968 	Mpi2RequestDescriptorUnion_t descriptor;
3969 	u64 *request = (u64 *)&descriptor;
3970 
3971 
3972 	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3973 	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3974 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3975 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3976 	descriptor.SCSIIO.LMID = 0;
3977 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3978 	    &ioc->scsi_lookup_lock);
3979 }
3980 
3981 /**
3982  * _base_put_smid_fast_path - send fast path request to firmware
3983  * @ioc: per adapter object
3984  * @smid: system request message index
3985  * @handle: device handle
3986  */
3987 static void
3988 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3989 	u16 handle)
3990 {
3991 	Mpi2RequestDescriptorUnion_t descriptor;
3992 	u64 *request = (u64 *)&descriptor;
3993 
3994 	descriptor.SCSIIO.RequestFlags =
3995 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3996 	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3997 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3998 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3999 	descriptor.SCSIIO.LMID = 0;
4000 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4001 	    &ioc->scsi_lookup_lock);
4002 }
4003 
4004 /**
4005  * _base_put_smid_hi_priority - send Task Management request to firmware
4006  * @ioc: per adapter object
4007  * @smid: system request message index
4008  * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
4009  */
4010 static void
4011 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4012 	u16 msix_task)
4013 {
4014 	Mpi2RequestDescriptorUnion_t descriptor;
4015 	void *mpi_req_iomem;
4016 	u64 *request;
4017 
4018 	if (ioc->is_mcpu_endpoint) {
4019 		__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4020 
4021 		/* TBD 256 is offset within sys register. */
4022 		mpi_req_iomem = (void __force *)ioc->chip
4023 					+ MPI_FRAME_START_OFFSET
4024 					+ (smid * ioc->request_sz);
4025 		_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4026 							ioc->request_sz);
4027 	}
4028 
4029 	request = (u64 *)&descriptor;
4030 
4031 	descriptor.HighPriority.RequestFlags =
4032 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4033 	descriptor.HighPriority.MSIxIndex =  msix_task;
4034 	descriptor.HighPriority.SMID = cpu_to_le16(smid);
4035 	descriptor.HighPriority.LMID = 0;
4036 	descriptor.HighPriority.Reserved1 = 0;
4037 	if (ioc->is_mcpu_endpoint)
4038 		_base_mpi_ep_writeq(*request,
4039 				&ioc->chip->RequestDescriptorPostLow,
4040 				&ioc->scsi_lookup_lock);
4041 	else
4042 		_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4043 		    &ioc->scsi_lookup_lock);
4044 }
4045 
4046 /**
4047  * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
4048  *  firmware
4049  * @ioc: per adapter object
4050  * @smid: system request message index
4051  */
4052 void
4053 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4054 {
4055 	Mpi2RequestDescriptorUnion_t descriptor;
4056 	u64 *request = (u64 *)&descriptor;
4057 
4058 	descriptor.Default.RequestFlags =
4059 		MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
4060 	descriptor.Default.MSIxIndex =  _base_set_and_get_msix_index(ioc, smid);
4061 	descriptor.Default.SMID = cpu_to_le16(smid);
4062 	descriptor.Default.LMID = 0;
4063 	descriptor.Default.DescriptorTypeDependent = 0;
4064 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4065 	    &ioc->scsi_lookup_lock);
4066 }
4067 
4068 /**
4069  * _base_put_smid_default - Default, primarily used for config pages
4070  * @ioc: per adapter object
4071  * @smid: system request message index
4072  */
4073 static void
4074 _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4075 {
4076 	Mpi2RequestDescriptorUnion_t descriptor;
4077 	void *mpi_req_iomem;
4078 	u64 *request;
4079 
4080 	if (ioc->is_mcpu_endpoint) {
4081 		__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4082 
4083 		_clone_sg_entries(ioc, (void *) mfp, smid);
4084 		/* TBD 256 is offset within sys register */
4085 		mpi_req_iomem = (void __force *)ioc->chip +
4086 			MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
4087 		_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4088 							ioc->request_sz);
4089 	}
4090 	request = (u64 *)&descriptor;
4091 	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4092 	descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4093 	descriptor.Default.SMID = cpu_to_le16(smid);
4094 	descriptor.Default.LMID = 0;
4095 	descriptor.Default.DescriptorTypeDependent = 0;
4096 	if (ioc->is_mcpu_endpoint)
4097 		_base_mpi_ep_writeq(*request,
4098 				&ioc->chip->RequestDescriptorPostLow,
4099 				&ioc->scsi_lookup_lock);
4100 	else
4101 		_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4102 				&ioc->scsi_lookup_lock);
4103 }
4104 
4105 /**
4106  * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
4107  *   Atomic Request Descriptor
4108  * @ioc: per adapter object
4109  * @smid: system request message index
4110  * @handle: device handle, unused in this function, for function type match
4111  *
4112  * Return nothing.
4113  */
4114 static void
4115 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4116 	u16 handle)
4117 {
4118 	Mpi26AtomicRequestDescriptor_t descriptor;
4119 	u32 *request = (u32 *)&descriptor;
4120 
4121 	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4122 	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4123 	descriptor.SMID = cpu_to_le16(smid);
4124 
4125 	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4126 }
4127 
4128 /**
4129  * _base_put_smid_fast_path_atomic - send fast path request to firmware
4130  * using Atomic Request Descriptor
4131  * @ioc: per adapter object
4132  * @smid: system request message index
4133  * @handle: device handle, unused in this function, for function type match
4134  * Return nothing
4135  */
4136 static void
4137 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4138 	u16 handle)
4139 {
4140 	Mpi26AtomicRequestDescriptor_t descriptor;
4141 	u32 *request = (u32 *)&descriptor;
4142 
4143 	descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4144 	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4145 	descriptor.SMID = cpu_to_le16(smid);
4146 
4147 	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4148 }
4149 
4150 /**
4151  * _base_put_smid_hi_priority_atomic - send Task Management request to
4152  * firmware using Atomic Request Descriptor
4153  * @ioc: per adapter object
4154  * @smid: system request message index
4155  * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
4156  *
4157  * Return nothing.
4158  */
4159 static void
4160 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4161 	u16 msix_task)
4162 {
4163 	Mpi26AtomicRequestDescriptor_t descriptor;
4164 	u32 *request = (u32 *)&descriptor;
4165 
4166 	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4167 	descriptor.MSIxIndex = msix_task;
4168 	descriptor.SMID = cpu_to_le16(smid);
4169 
4170 	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4171 }
4172 
4173 /**
4174  * _base_put_smid_default_atomic - Default, primarily used for config pages
4175  * use Atomic Request Descriptor
4176  * @ioc: per adapter object
4177  * @smid: system request message index
4178  *
4179  * Return nothing.
4180  */
4181 static void
4182 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4183 {
4184 	Mpi26AtomicRequestDescriptor_t descriptor;
4185 	u32 *request = (u32 *)&descriptor;
4186 
4187 	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4188 	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4189 	descriptor.SMID = cpu_to_le16(smid);
4190 
4191 	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4192 }
4193 
4194 /**
4195  * _base_display_OEMs_branding - Display branding string
4196  * @ioc: per adapter object
4197  */
4198 static void
4199 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
4200 {
4201 	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
4202 		return;
4203 
4204 	switch (ioc->pdev->subsystem_vendor) {
4205 	case PCI_VENDOR_ID_INTEL:
4206 		switch (ioc->pdev->device) {
4207 		case MPI2_MFGPAGE_DEVID_SAS2008:
4208 			switch (ioc->pdev->subsystem_device) {
4209 			case MPT2SAS_INTEL_RMS2LL080_SSDID:
4210 				ioc_info(ioc, "%s\n",
4211 					 MPT2SAS_INTEL_RMS2LL080_BRANDING);
4212 				break;
4213 			case MPT2SAS_INTEL_RMS2LL040_SSDID:
4214 				ioc_info(ioc, "%s\n",
4215 					 MPT2SAS_INTEL_RMS2LL040_BRANDING);
4216 				break;
4217 			case MPT2SAS_INTEL_SSD910_SSDID:
4218 				ioc_info(ioc, "%s\n",
4219 					 MPT2SAS_INTEL_SSD910_BRANDING);
4220 				break;
4221 			default:
4222 				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4223 					 ioc->pdev->subsystem_device);
4224 				break;
4225 			}
4226 			break;
4227 		case MPI2_MFGPAGE_DEVID_SAS2308_2:
4228 			switch (ioc->pdev->subsystem_device) {
4229 			case MPT2SAS_INTEL_RS25GB008_SSDID:
4230 				ioc_info(ioc, "%s\n",
4231 					 MPT2SAS_INTEL_RS25GB008_BRANDING);
4232 				break;
4233 			case MPT2SAS_INTEL_RMS25JB080_SSDID:
4234 				ioc_info(ioc, "%s\n",
4235 					 MPT2SAS_INTEL_RMS25JB080_BRANDING);
4236 				break;
4237 			case MPT2SAS_INTEL_RMS25JB040_SSDID:
4238 				ioc_info(ioc, "%s\n",
4239 					 MPT2SAS_INTEL_RMS25JB040_BRANDING);
4240 				break;
4241 			case MPT2SAS_INTEL_RMS25KB080_SSDID:
4242 				ioc_info(ioc, "%s\n",
4243 					 MPT2SAS_INTEL_RMS25KB080_BRANDING);
4244 				break;
4245 			case MPT2SAS_INTEL_RMS25KB040_SSDID:
4246 				ioc_info(ioc, "%s\n",
4247 					 MPT2SAS_INTEL_RMS25KB040_BRANDING);
4248 				break;
4249 			case MPT2SAS_INTEL_RMS25LB040_SSDID:
4250 				ioc_info(ioc, "%s\n",
4251 					 MPT2SAS_INTEL_RMS25LB040_BRANDING);
4252 				break;
4253 			case MPT2SAS_INTEL_RMS25LB080_SSDID:
4254 				ioc_info(ioc, "%s\n",
4255 					 MPT2SAS_INTEL_RMS25LB080_BRANDING);
4256 				break;
4257 			default:
4258 				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4259 					 ioc->pdev->subsystem_device);
4260 				break;
4261 			}
4262 			break;
4263 		case MPI25_MFGPAGE_DEVID_SAS3008:
4264 			switch (ioc->pdev->subsystem_device) {
4265 			case MPT3SAS_INTEL_RMS3JC080_SSDID:
4266 				ioc_info(ioc, "%s\n",
4267 					 MPT3SAS_INTEL_RMS3JC080_BRANDING);
4268 				break;
4269 
4270 			case MPT3SAS_INTEL_RS3GC008_SSDID:
4271 				ioc_info(ioc, "%s\n",
4272 					 MPT3SAS_INTEL_RS3GC008_BRANDING);
4273 				break;
4274 			case MPT3SAS_INTEL_RS3FC044_SSDID:
4275 				ioc_info(ioc, "%s\n",
4276 					 MPT3SAS_INTEL_RS3FC044_BRANDING);
4277 				break;
4278 			case MPT3SAS_INTEL_RS3UC080_SSDID:
4279 				ioc_info(ioc, "%s\n",
4280 					 MPT3SAS_INTEL_RS3UC080_BRANDING);
4281 				break;
4282 			default:
4283 				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4284 					 ioc->pdev->subsystem_device);
4285 				break;
4286 			}
4287 			break;
4288 		default:
4289 			ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4290 				 ioc->pdev->subsystem_device);
4291 			break;
4292 		}
4293 		break;
4294 	case PCI_VENDOR_ID_DELL:
4295 		switch (ioc->pdev->device) {
4296 		case MPI2_MFGPAGE_DEVID_SAS2008:
4297 			switch (ioc->pdev->subsystem_device) {
4298 			case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
4299 				ioc_info(ioc, "%s\n",
4300 					 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
4301 				break;
4302 			case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
4303 				ioc_info(ioc, "%s\n",
4304 					 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
4305 				break;
4306 			case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
4307 				ioc_info(ioc, "%s\n",
4308 					 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
4309 				break;
4310 			case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
4311 				ioc_info(ioc, "%s\n",
4312 					 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
4313 				break;
4314 			case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
4315 				ioc_info(ioc, "%s\n",
4316 					 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
4317 				break;
4318 			case MPT2SAS_DELL_PERC_H200_SSDID:
4319 				ioc_info(ioc, "%s\n",
4320 					 MPT2SAS_DELL_PERC_H200_BRANDING);
4321 				break;
4322 			case MPT2SAS_DELL_6GBPS_SAS_SSDID:
4323 				ioc_info(ioc, "%s\n",
4324 					 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
4325 				break;
4326 			default:
4327 				ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4328 					 ioc->pdev->subsystem_device);
4329 				break;
4330 			}
4331 			break;
4332 		case MPI25_MFGPAGE_DEVID_SAS3008:
4333 			switch (ioc->pdev->subsystem_device) {
4334 			case MPT3SAS_DELL_12G_HBA_SSDID:
4335 				ioc_info(ioc, "%s\n",
4336 					 MPT3SAS_DELL_12G_HBA_BRANDING);
4337 				break;
4338 			default:
4339 				ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4340 					 ioc->pdev->subsystem_device);
4341 				break;
4342 			}
4343 			break;
4344 		default:
4345 			ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4346 				 ioc->pdev->subsystem_device);
4347 			break;
4348 		}
4349 		break;
4350 	case PCI_VENDOR_ID_CISCO:
4351 		switch (ioc->pdev->device) {
4352 		case MPI25_MFGPAGE_DEVID_SAS3008:
4353 			switch (ioc->pdev->subsystem_device) {
4354 			case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
4355 				ioc_info(ioc, "%s\n",
4356 					 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
4357 				break;
4358 			case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
4359 				ioc_info(ioc, "%s\n",
4360 					 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
4361 				break;
4362 			case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4363 				ioc_info(ioc, "%s\n",
4364 					 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4365 				break;
4366 			default:
4367 				ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4368 					 ioc->pdev->subsystem_device);
4369 				break;
4370 			}
4371 			break;
4372 		case MPI25_MFGPAGE_DEVID_SAS3108_1:
4373 			switch (ioc->pdev->subsystem_device) {
4374 			case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4375 				ioc_info(ioc, "%s\n",
4376 					 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4377 				break;
4378 			case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
4379 				ioc_info(ioc, "%s\n",
4380 					 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
4381 				break;
4382 			default:
4383 				ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4384 					 ioc->pdev->subsystem_device);
4385 				break;
4386 			}
4387 			break;
4388 		default:
4389 			ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4390 				 ioc->pdev->subsystem_device);
4391 			break;
4392 		}
4393 		break;
4394 	case MPT2SAS_HP_3PAR_SSVID:
4395 		switch (ioc->pdev->device) {
4396 		case MPI2_MFGPAGE_DEVID_SAS2004:
4397 			switch (ioc->pdev->subsystem_device) {
4398 			case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
4399 				ioc_info(ioc, "%s\n",
4400 					 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
4401 				break;
4402 			default:
4403 				ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4404 					 ioc->pdev->subsystem_device);
4405 				break;
4406 			}
4407 			break;
4408 		case MPI2_MFGPAGE_DEVID_SAS2308_2:
4409 			switch (ioc->pdev->subsystem_device) {
4410 			case MPT2SAS_HP_2_4_INTERNAL_SSDID:
4411 				ioc_info(ioc, "%s\n",
4412 					 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
4413 				break;
4414 			case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
4415 				ioc_info(ioc, "%s\n",
4416 					 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
4417 				break;
4418 			case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
4419 				ioc_info(ioc, "%s\n",
4420 					 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
4421 				break;
4422 			case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
4423 				ioc_info(ioc, "%s\n",
4424 					 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
4425 				break;
4426 			default:
4427 				ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4428 					 ioc->pdev->subsystem_device);
4429 				break;
4430 			}
4431 			break;
4432 		default:
4433 			ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4434 				 ioc->pdev->subsystem_device);
4435 			break;
4436 		}
4437 	default:
4438 		break;
4439 	}
4440 }
4441 
4442 /**
4443  * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
4444  *				version from FW Image Header.
4445  * @ioc: per adapter object
4446  *
4447  * Return: 0 for success, non-zero for failure.
4448  */
4449 	static int
4450 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4451 {
4452 	Mpi2FWImageHeader_t *fw_img_hdr;
4453 	Mpi26ComponentImageHeader_t *cmp_img_hdr;
4454 	Mpi25FWUploadRequest_t *mpi_request;
4455 	Mpi2FWUploadReply_t mpi_reply;
4456 	int r = 0;
4457 	u32  package_version = 0;
4458 	void *fwpkg_data = NULL;
4459 	dma_addr_t fwpkg_data_dma;
4460 	u16 smid, ioc_status;
4461 	size_t data_length;
4462 
4463 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4464 
4465 	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4466 		ioc_err(ioc, "%s: internal command already in use\n", __func__);
4467 		return -EAGAIN;
4468 	}
4469 
4470 	data_length = sizeof(Mpi2FWImageHeader_t);
4471 	fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4472 			&fwpkg_data_dma, GFP_KERNEL);
4473 	if (!fwpkg_data) {
4474 		ioc_err(ioc,
4475 		    "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
4476 			__FILE__, __LINE__, __func__);
4477 		return -ENOMEM;
4478 	}
4479 
4480 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4481 	if (!smid) {
4482 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4483 		r = -EAGAIN;
4484 		goto out;
4485 	}
4486 
4487 	ioc->base_cmds.status = MPT3_CMD_PENDING;
4488 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4489 	ioc->base_cmds.smid = smid;
4490 	memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
4491 	mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
4492 	mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
4493 	mpi_request->ImageSize = cpu_to_le32(data_length);
4494 	ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4495 			data_length);
4496 	init_completion(&ioc->base_cmds.done);
4497 	ioc->put_smid_default(ioc, smid);
4498 	/* Wait for 15 seconds */
4499 	wait_for_completion_timeout(&ioc->base_cmds.done,
4500 			FW_IMG_HDR_READ_TIMEOUT*HZ);
4501 	ioc_info(ioc, "%s: complete\n", __func__);
4502 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4503 		ioc_err(ioc, "%s: timeout\n", __func__);
4504 		_debug_dump_mf(mpi_request,
4505 				sizeof(Mpi25FWUploadRequest_t)/4);
4506 		r = -ETIME;
4507 	} else {
4508 		memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
4509 		if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4510 			memcpy(&mpi_reply, ioc->base_cmds.reply,
4511 					sizeof(Mpi2FWUploadReply_t));
4512 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4513 						MPI2_IOCSTATUS_MASK;
4514 			if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4515 				fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
4516 				if (le32_to_cpu(fw_img_hdr->Signature) ==
4517 				    MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
4518 					cmp_img_hdr =
4519 					    (Mpi26ComponentImageHeader_t *)
4520 					    (fwpkg_data);
4521 					package_version =
4522 					    le32_to_cpu(
4523 					    cmp_img_hdr->ApplicationSpecific);
4524 				} else
4525 					package_version =
4526 					    le32_to_cpu(
4527 					    fw_img_hdr->PackageVersion.Word);
4528 				if (package_version)
4529 					ioc_info(ioc,
4530 					"FW Package Ver(%02d.%02d.%02d.%02d)\n",
4531 					((package_version) & 0xFF000000) >> 24,
4532 					((package_version) & 0x00FF0000) >> 16,
4533 					((package_version) & 0x0000FF00) >> 8,
4534 					(package_version) & 0x000000FF);
4535 			} else {
4536 				_debug_dump_mf(&mpi_reply,
4537 						sizeof(Mpi2FWUploadReply_t)/4);
4538 			}
4539 		}
4540 	}
4541 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4542 out:
4543 	if (fwpkg_data)
4544 		dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4545 				fwpkg_data_dma);
4546 	return r;
4547 }
4548 
4549 /**
4550  * _base_display_ioc_capabilities - Disply IOC's capabilities.
4551  * @ioc: per adapter object
4552  */
4553 static void
4554 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4555 {
4556 	int i = 0;
4557 	char desc[16];
4558 	u32 iounit_pg1_flags;
4559 	u32 bios_version;
4560 
4561 	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
4562 	strncpy(desc, ioc->manu_pg0.ChipName, 16);
4563 	ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
4564 		 desc,
4565 		 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4566 		 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4567 		 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4568 		 ioc->facts.FWVersion.Word & 0x000000FF,
4569 		 ioc->pdev->revision,
4570 		 (bios_version & 0xFF000000) >> 24,
4571 		 (bios_version & 0x00FF0000) >> 16,
4572 		 (bios_version & 0x0000FF00) >> 8,
4573 		 bios_version & 0x000000FF);
4574 
4575 	_base_display_OEMs_branding(ioc);
4576 
4577 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4578 		pr_info("%sNVMe", i ? "," : "");
4579 		i++;
4580 	}
4581 
4582 	ioc_info(ioc, "Protocol=(");
4583 
4584 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4585 		pr_cont("Initiator");
4586 		i++;
4587 	}
4588 
4589 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4590 		pr_cont("%sTarget", i ? "," : "");
4591 		i++;
4592 	}
4593 
4594 	i = 0;
4595 	pr_cont("), Capabilities=(");
4596 
4597 	if (!ioc->hide_ir_msg) {
4598 		if (ioc->facts.IOCCapabilities &
4599 		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4600 			pr_cont("Raid");
4601 			i++;
4602 		}
4603 	}
4604 
4605 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4606 		pr_cont("%sTLR", i ? "," : "");
4607 		i++;
4608 	}
4609 
4610 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4611 		pr_cont("%sMulticast", i ? "," : "");
4612 		i++;
4613 	}
4614 
4615 	if (ioc->facts.IOCCapabilities &
4616 	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4617 		pr_cont("%sBIDI Target", i ? "," : "");
4618 		i++;
4619 	}
4620 
4621 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4622 		pr_cont("%sEEDP", i ? "," : "");
4623 		i++;
4624 	}
4625 
4626 	if (ioc->facts.IOCCapabilities &
4627 	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4628 		pr_cont("%sSnapshot Buffer", i ? "," : "");
4629 		i++;
4630 	}
4631 
4632 	if (ioc->facts.IOCCapabilities &
4633 	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4634 		pr_cont("%sDiag Trace Buffer", i ? "," : "");
4635 		i++;
4636 	}
4637 
4638 	if (ioc->facts.IOCCapabilities &
4639 	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4640 		pr_cont("%sDiag Extended Buffer", i ? "," : "");
4641 		i++;
4642 	}
4643 
4644 	if (ioc->facts.IOCCapabilities &
4645 	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4646 		pr_cont("%sTask Set Full", i ? "," : "");
4647 		i++;
4648 	}
4649 
4650 	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4651 	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4652 		pr_cont("%sNCQ", i ? "," : "");
4653 		i++;
4654 	}
4655 
4656 	pr_cont(")\n");
4657 }
4658 
4659 /**
4660  * mpt3sas_base_update_missing_delay - change the missing delay timers
4661  * @ioc: per adapter object
4662  * @device_missing_delay: amount of time till device is reported missing
4663  * @io_missing_delay: interval IO is returned when there is a missing device
4664  *
4665  * Passed on the command line, this function will modify the device missing
4666  * delay, as well as the io missing delay. This should be called at driver
4667  * load time.
4668  */
4669 void
4670 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4671 	u16 device_missing_delay, u8 io_missing_delay)
4672 {
4673 	u16 dmd, dmd_new, dmd_orignal;
4674 	u8 io_missing_delay_original;
4675 	u16 sz;
4676 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4677 	Mpi2ConfigReply_t mpi_reply;
4678 	u8 num_phys = 0;
4679 	u16 ioc_status;
4680 
4681 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4682 	if (!num_phys)
4683 		return;
4684 
4685 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4686 	    sizeof(Mpi2SasIOUnit1PhyData_t));
4687 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4688 	if (!sas_iounit_pg1) {
4689 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
4690 			__FILE__, __LINE__, __func__);
4691 		goto out;
4692 	}
4693 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4694 	    sas_iounit_pg1, sz))) {
4695 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
4696 			__FILE__, __LINE__, __func__);
4697 		goto out;
4698 	}
4699 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4700 	    MPI2_IOCSTATUS_MASK;
4701 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4702 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
4703 			__FILE__, __LINE__, __func__);
4704 		goto out;
4705 	}
4706 
4707 	/* device missing delay */
4708 	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4709 	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4710 		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4711 	else
4712 		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4713 	dmd_orignal = dmd;
4714 	if (device_missing_delay > 0x7F) {
4715 		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4716 		    device_missing_delay;
4717 		dmd = dmd / 16;
4718 		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4719 	} else
4720 		dmd = device_missing_delay;
4721 	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4722 
4723 	/* io missing delay */
4724 	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4725 	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4726 
4727 	if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4728 	    sz)) {
4729 		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4730 			dmd_new = (dmd &
4731 			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4732 		else
4733 			dmd_new =
4734 		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4735 		ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4736 			 dmd_orignal, dmd_new);
4737 		ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4738 			 io_missing_delay_original,
4739 			 io_missing_delay);
4740 		ioc->device_missing_delay = dmd_new;
4741 		ioc->io_missing_delay = io_missing_delay;
4742 	}
4743 
4744 out:
4745 	kfree(sas_iounit_pg1);
4746 }
4747 
4748 /**
4749  * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
4750  *    according to performance mode.
4751  * @ioc : per adapter object
4752  *
4753  * Return nothing.
4754  */
4755 static void
4756 _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4757 {
4758 	Mpi2IOCPage1_t ioc_pg1;
4759 	Mpi2ConfigReply_t mpi_reply;
4760 
4761 	mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4762 	memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4763 
4764 	switch (perf_mode) {
4765 	case MPT_PERF_MODE_DEFAULT:
4766 	case MPT_PERF_MODE_BALANCED:
4767 		if (ioc->high_iops_queues) {
4768 			ioc_info(ioc,
4769 				"Enable interrupt coalescing only for first\t"
4770 				"%d reply queues\n",
4771 				MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
4772 			/*
4773 			 * If 31st bit is zero then interrupt coalescing is
4774 			 * enabled for all reply descriptor post queues.
4775 			 * If 31st bit is set to one then user can
4776 			 * enable/disable interrupt coalescing on per reply
4777 			 * descriptor post queue group(8) basis. So to enable
4778 			 * interrupt coalescing only on first reply descriptor
4779 			 * post queue group 31st bit and zero th bit is enabled.
4780 			 */
4781 			ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
4782 			    ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
4783 			mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4784 			ioc_info(ioc, "performance mode: balanced\n");
4785 			return;
4786 		}
4787 		fallthrough;
4788 	case MPT_PERF_MODE_LATENCY:
4789 		/*
4790 		 * Enable interrupt coalescing on all reply queues
4791 		 * with timeout value 0xA
4792 		 */
4793 		ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
4794 		ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4795 		ioc_pg1.ProductSpecific = 0;
4796 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4797 		ioc_info(ioc, "performance mode: latency\n");
4798 		break;
4799 	case MPT_PERF_MODE_IOPS:
4800 		/*
4801 		 * Enable interrupt coalescing on all reply queues.
4802 		 */
4803 		ioc_info(ioc,
4804 		    "performance mode: iops with coalescing timeout: 0x%x\n",
4805 		    le32_to_cpu(ioc_pg1.CoalescingTimeout));
4806 		ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4807 		ioc_pg1.ProductSpecific = 0;
4808 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4809 		break;
4810 	}
4811 }
4812 
4813 /**
4814  * _base_get_event_diag_triggers - get event diag trigger values from
4815  *				persistent pages
4816  * @ioc : per adapter object
4817  *
4818  * Return nothing.
4819  */
4820 static void
4821 _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
4822 {
4823 	Mpi26DriverTriggerPage2_t trigger_pg2;
4824 	struct SL_WH_EVENT_TRIGGER_T *event_tg;
4825 	MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY *mpi_event_tg;
4826 	Mpi2ConfigReply_t mpi_reply;
4827 	int r = 0, i = 0;
4828 	u16 count = 0;
4829 	u16 ioc_status;
4830 
4831 	r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply,
4832 	    &trigger_pg2);
4833 	if (r)
4834 		return;
4835 
4836 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4837 	    MPI2_IOCSTATUS_MASK;
4838 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4839 		dinitprintk(ioc,
4840 		    ioc_err(ioc,
4841 		    "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
4842 		   __func__, ioc_status));
4843 		return;
4844 	}
4845 
4846 	if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) {
4847 		count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger);
4848 		count = min_t(u16, NUM_VALID_ENTRIES, count);
4849 		ioc->diag_trigger_event.ValidEntries = count;
4850 
4851 		event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0];
4852 		mpi_event_tg = &trigger_pg2.MPIEventTriggers[0];
4853 		for (i = 0; i < count; i++) {
4854 			event_tg->EventValue = le16_to_cpu(
4855 			    mpi_event_tg->MPIEventCode);
4856 			event_tg->LogEntryQualifier = le16_to_cpu(
4857 			    mpi_event_tg->MPIEventCodeSpecific);
4858 			event_tg++;
4859 			mpi_event_tg++;
4860 		}
4861 	}
4862 }
4863 
4864 /**
4865  * _base_get_scsi_diag_triggers - get scsi diag trigger values from
4866  *				persistent pages
4867  * @ioc : per adapter object
4868  *
4869  * Return nothing.
4870  */
4871 static void
4872 _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
4873 {
4874 	Mpi26DriverTriggerPage3_t trigger_pg3;
4875 	struct SL_WH_SCSI_TRIGGER_T *scsi_tg;
4876 	MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY *mpi_scsi_tg;
4877 	Mpi2ConfigReply_t mpi_reply;
4878 	int r = 0, i = 0;
4879 	u16 count = 0;
4880 	u16 ioc_status;
4881 
4882 	r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply,
4883 	    &trigger_pg3);
4884 	if (r)
4885 		return;
4886 
4887 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4888 	    MPI2_IOCSTATUS_MASK;
4889 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4890 		dinitprintk(ioc,
4891 		    ioc_err(ioc,
4892 		    "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
4893 		    __func__, ioc_status));
4894 		return;
4895 	}
4896 
4897 	if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) {
4898 		count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger);
4899 		count = min_t(u16, NUM_VALID_ENTRIES, count);
4900 		ioc->diag_trigger_scsi.ValidEntries = count;
4901 
4902 		scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0];
4903 		mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0];
4904 		for (i = 0; i < count; i++) {
4905 			scsi_tg->ASCQ = mpi_scsi_tg->ASCQ;
4906 			scsi_tg->ASC = mpi_scsi_tg->ASC;
4907 			scsi_tg->SenseKey = mpi_scsi_tg->SenseKey;
4908 
4909 			scsi_tg++;
4910 			mpi_scsi_tg++;
4911 		}
4912 	}
4913 }
4914 
4915 /**
4916  * _base_get_mpi_diag_triggers - get mpi diag trigger values from
4917  *				persistent pages
4918  * @ioc : per adapter object
4919  *
4920  * Return nothing.
4921  */
4922 static void
4923 _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
4924 {
4925 	Mpi26DriverTriggerPage4_t trigger_pg4;
4926 	struct SL_WH_MPI_TRIGGER_T *status_tg;
4927 	MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY *mpi_status_tg;
4928 	Mpi2ConfigReply_t mpi_reply;
4929 	int r = 0, i = 0;
4930 	u16 count = 0;
4931 	u16 ioc_status;
4932 
4933 	r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply,
4934 	    &trigger_pg4);
4935 	if (r)
4936 		return;
4937 
4938 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4939 	    MPI2_IOCSTATUS_MASK;
4940 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4941 		dinitprintk(ioc,
4942 		    ioc_err(ioc,
4943 		    "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
4944 		    __func__, ioc_status));
4945 		return;
4946 	}
4947 
4948 	if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) {
4949 		count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger);
4950 		count = min_t(u16, NUM_VALID_ENTRIES, count);
4951 		ioc->diag_trigger_mpi.ValidEntries = count;
4952 
4953 		status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0];
4954 		mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0];
4955 
4956 		for (i = 0; i < count; i++) {
4957 			status_tg->IOCStatus = le16_to_cpu(
4958 			    mpi_status_tg->IOCStatus);
4959 			status_tg->IocLogInfo = le32_to_cpu(
4960 			    mpi_status_tg->LogInfo);
4961 
4962 			status_tg++;
4963 			mpi_status_tg++;
4964 		}
4965 	}
4966 }
4967 
4968 /**
4969  * _base_get_master_diag_triggers - get master diag trigger values from
4970  *				persistent pages
4971  * @ioc : per adapter object
4972  *
4973  * Return nothing.
4974  */
4975 static void
4976 _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
4977 {
4978 	Mpi26DriverTriggerPage1_t trigger_pg1;
4979 	Mpi2ConfigReply_t mpi_reply;
4980 	int r;
4981 	u16 ioc_status;
4982 
4983 	r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply,
4984 	    &trigger_pg1);
4985 	if (r)
4986 		return;
4987 
4988 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4989 	    MPI2_IOCSTATUS_MASK;
4990 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4991 		dinitprintk(ioc,
4992 		    ioc_err(ioc,
4993 		    "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
4994 		   __func__, ioc_status));
4995 		return;
4996 	}
4997 
4998 	if (le16_to_cpu(trigger_pg1.NumMasterTrigger))
4999 		ioc->diag_trigger_master.MasterData |=
5000 		    le32_to_cpu(
5001 		    trigger_pg1.MasterTriggers[0].MasterTriggerFlags);
5002 }
5003 
5004 /**
5005  * _base_check_for_trigger_pages_support - checks whether HBA FW supports
5006  *					driver trigger pages or not
5007  * @ioc : per adapter object
5008  *
5009  * Returns trigger flags mask if HBA FW supports driver trigger pages,
5010  * otherwise returns EFAULT.
5011  */
5012 static int
5013 _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
5014 {
5015 	Mpi26DriverTriggerPage0_t trigger_pg0;
5016 	int r = 0;
5017 	Mpi2ConfigReply_t mpi_reply;
5018 	u16 ioc_status;
5019 
5020 	r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply,
5021 	    &trigger_pg0);
5022 	if (r)
5023 		return -EFAULT;
5024 
5025 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5026 	    MPI2_IOCSTATUS_MASK;
5027 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5028 		return -EFAULT;
5029 
5030 	return le16_to_cpu(trigger_pg0.TriggerFlags);
5031 }
5032 
5033 /**
5034  * _base_get_diag_triggers - Retrieve diag trigger values from
5035  *				persistent pages.
5036  * @ioc : per adapter object
5037  *
5038  * Return nothing.
5039  */
5040 static void
5041 _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5042 {
5043 	int trigger_flags;
5044 
5045 	/*
5046 	 * Default setting of master trigger.
5047 	 */
5048 	ioc->diag_trigger_master.MasterData =
5049 	    (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
5050 
5051 	trigger_flags = _base_check_for_trigger_pages_support(ioc);
5052 	if (trigger_flags < 0)
5053 		return;
5054 
5055 	ioc->supports_trigger_pages = 1;
5056 
5057 	/*
5058 	 * Retrieve master diag trigger values from driver trigger pg1
5059 	 * if master trigger bit enabled in TriggerFlags.
5060 	 */
5061 	if ((u16)trigger_flags &
5062 	    MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID)
5063 		_base_get_master_diag_triggers(ioc);
5064 
5065 	/*
5066 	 * Retrieve event diag trigger values from driver trigger pg2
5067 	 * if event trigger bit enabled in TriggerFlags.
5068 	 */
5069 	if ((u16)trigger_flags &
5070 	    MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID)
5071 		_base_get_event_diag_triggers(ioc);
5072 
5073 	/*
5074 	 * Retrieve scsi diag trigger values from driver trigger pg3
5075 	 * if scsi trigger bit enabled in TriggerFlags.
5076 	 */
5077 	if ((u16)trigger_flags &
5078 	    MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID)
5079 		_base_get_scsi_diag_triggers(ioc);
5080 	/*
5081 	 * Retrieve mpi error diag trigger values from driver trigger pg4
5082 	 * if loginfo trigger bit enabled in TriggerFlags.
5083 	 */
5084 	if ((u16)trigger_flags &
5085 	    MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID)
5086 		_base_get_mpi_diag_triggers(ioc);
5087 }
5088 
5089 /**
5090  * _base_update_diag_trigger_pages - Update the driver trigger pages after
5091  *			online FW update, incase updated FW supports driver
5092  *			trigger pages.
5093  * @ioc : per adapter object
5094  *
5095  * Return nothing.
5096  */
5097 static void
5098 _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
5099 {
5100 
5101 	if (ioc->diag_trigger_master.MasterData)
5102 		mpt3sas_config_update_driver_trigger_pg1(ioc,
5103 		    &ioc->diag_trigger_master, 1);
5104 
5105 	if (ioc->diag_trigger_event.ValidEntries)
5106 		mpt3sas_config_update_driver_trigger_pg2(ioc,
5107 		    &ioc->diag_trigger_event, 1);
5108 
5109 	if (ioc->diag_trigger_scsi.ValidEntries)
5110 		mpt3sas_config_update_driver_trigger_pg3(ioc,
5111 		    &ioc->diag_trigger_scsi, 1);
5112 
5113 	if (ioc->diag_trigger_mpi.ValidEntries)
5114 		mpt3sas_config_update_driver_trigger_pg4(ioc,
5115 		    &ioc->diag_trigger_mpi, 1);
5116 }
5117 
5118 /**
5119  * _base_static_config_pages - static start of day config pages
5120  * @ioc: per adapter object
5121  */
5122 static void
5123 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
5124 {
5125 	Mpi2ConfigReply_t mpi_reply;
5126 	u32 iounit_pg1_flags;
5127 	int tg_flags = 0;
5128 	ioc->nvme_abort_timeout = 30;
5129 	mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
5130 	if (ioc->ir_firmware)
5131 		mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
5132 		    &ioc->manu_pg10);
5133 
5134 	/*
5135 	 * Ensure correct T10 PI operation if vendor left EEDPTagMode
5136 	 * flag unset in NVDATA.
5137 	 */
5138 	mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
5139 	if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
5140 		pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
5141 		    ioc->name);
5142 		ioc->manu_pg11.EEDPTagMode &= ~0x3;
5143 		ioc->manu_pg11.EEDPTagMode |= 0x1;
5144 		mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
5145 		    &ioc->manu_pg11);
5146 	}
5147 	if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
5148 		ioc->tm_custom_handling = 1;
5149 	else {
5150 		ioc->tm_custom_handling = 0;
5151 		if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
5152 			ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
5153 		else if (ioc->manu_pg11.NVMeAbortTO >
5154 					NVME_TASK_ABORT_MAX_TIMEOUT)
5155 			ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
5156 		else
5157 			ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
5158 	}
5159 	ioc->time_sync_interval =
5160 	    ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK;
5161 	if (ioc->time_sync_interval) {
5162 		if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK)
5163 			ioc->time_sync_interval =
5164 			    ioc->time_sync_interval * SECONDS_PER_HOUR;
5165 		else
5166 			ioc->time_sync_interval =
5167 			    ioc->time_sync_interval * SECONDS_PER_MIN;
5168 		dinitprintk(ioc, ioc_info(ioc,
5169 		    "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n",
5170 		    ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval &
5171 		    MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute"));
5172 	} else {
5173 		if (ioc->is_gen35_ioc)
5174 			ioc_warn(ioc,
5175 			    "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
5176 	}
5177 	mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
5178 	mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
5179 	mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
5180 	mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
5181 	mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5182 	mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
5183 	_base_display_ioc_capabilities(ioc);
5184 
5185 	/*
5186 	 * Enable task_set_full handling in iounit_pg1 when the
5187 	 * facts capabilities indicate that its supported.
5188 	 */
5189 	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
5190 	if ((ioc->facts.IOCCapabilities &
5191 	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
5192 		iounit_pg1_flags &=
5193 		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
5194 	else
5195 		iounit_pg1_flags |=
5196 		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
5197 	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
5198 	mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5199 
5200 	if (ioc->iounit_pg8.NumSensors)
5201 		ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
5202 	if (ioc->is_aero_ioc)
5203 		_base_update_ioc_page1_inlinewith_perf_mode(ioc);
5204 	if (ioc->is_gen35_ioc) {
5205 		if (ioc->is_driver_loading)
5206 			_base_get_diag_triggers(ioc);
5207 		else {
5208 			/*
5209 			 * In case of online HBA FW update operation,
5210 			 * check whether updated FW supports the driver trigger
5211 			 * pages or not.
5212 			 * - If previous FW has not supported driver trigger
5213 			 *   pages and newer FW supports them then update these
5214 			 *   pages with current diag trigger values.
5215 			 * - If previous FW has supported driver trigger pages
5216 			 *   and new FW doesn't support them then disable
5217 			 *   support_trigger_pages flag.
5218 			 */
5219 			tg_flags = _base_check_for_trigger_pages_support(ioc);
5220 			if (!ioc->supports_trigger_pages && tg_flags != -EFAULT)
5221 				_base_update_diag_trigger_pages(ioc);
5222 			else if (ioc->supports_trigger_pages &&
5223 			    tg_flags == -EFAULT)
5224 				ioc->supports_trigger_pages = 0;
5225 		}
5226 	}
5227 }
5228 
5229 /**
5230  * mpt3sas_free_enclosure_list - release memory
5231  * @ioc: per adapter object
5232  *
5233  * Free memory allocated during enclosure add.
5234  */
5235 void
5236 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
5237 {
5238 	struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
5239 
5240 	/* Free enclosure list */
5241 	list_for_each_entry_safe(enclosure_dev,
5242 			enclosure_dev_next, &ioc->enclosure_list, list) {
5243 		list_del(&enclosure_dev->list);
5244 		kfree(enclosure_dev);
5245 	}
5246 }
5247 
5248 /**
5249  * _base_release_memory_pools - release memory
5250  * @ioc: per adapter object
5251  *
5252  * Free memory allocated from _base_allocate_memory_pools.
5253  */
5254 static void
5255 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5256 {
5257 	int i = 0;
5258 	int j = 0;
5259 	int dma_alloc_count = 0;
5260 	struct chain_tracker *ct;
5261 	int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
5262 
5263 	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5264 
5265 	if (ioc->request) {
5266 		dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
5267 		    ioc->request,  ioc->request_dma);
5268 		dexitprintk(ioc,
5269 			    ioc_info(ioc, "request_pool(0x%p): free\n",
5270 				     ioc->request));
5271 		ioc->request = NULL;
5272 	}
5273 
5274 	if (ioc->sense) {
5275 		dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5276 		dma_pool_destroy(ioc->sense_dma_pool);
5277 		dexitprintk(ioc,
5278 			    ioc_info(ioc, "sense_pool(0x%p): free\n",
5279 				     ioc->sense));
5280 		ioc->sense = NULL;
5281 	}
5282 
5283 	if (ioc->reply) {
5284 		dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
5285 		dma_pool_destroy(ioc->reply_dma_pool);
5286 		dexitprintk(ioc,
5287 			    ioc_info(ioc, "reply_pool(0x%p): free\n",
5288 				     ioc->reply));
5289 		ioc->reply = NULL;
5290 	}
5291 
5292 	if (ioc->reply_free) {
5293 		dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
5294 		    ioc->reply_free_dma);
5295 		dma_pool_destroy(ioc->reply_free_dma_pool);
5296 		dexitprintk(ioc,
5297 			    ioc_info(ioc, "reply_free_pool(0x%p): free\n",
5298 				     ioc->reply_free));
5299 		ioc->reply_free = NULL;
5300 	}
5301 
5302 	if (ioc->reply_post) {
5303 		dma_alloc_count = DIV_ROUND_UP(count,
5304 				RDPQ_MAX_INDEX_IN_ONE_CHUNK);
5305 		for (i = 0; i < count; i++) {
5306 			if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
5307 			    && dma_alloc_count) {
5308 				if (ioc->reply_post[i].reply_post_free) {
5309 					dma_pool_free(
5310 					    ioc->reply_post_free_dma_pool,
5311 					    ioc->reply_post[i].reply_post_free,
5312 					ioc->reply_post[i].reply_post_free_dma);
5313 					dexitprintk(ioc, ioc_info(ioc,
5314 					   "reply_post_free_pool(0x%p): free\n",
5315 					   ioc->reply_post[i].reply_post_free));
5316 					ioc->reply_post[i].reply_post_free =
5317 									NULL;
5318 				}
5319 				--dma_alloc_count;
5320 			}
5321 		}
5322 		dma_pool_destroy(ioc->reply_post_free_dma_pool);
5323 		if (ioc->reply_post_free_array &&
5324 			ioc->rdpq_array_enable) {
5325 			dma_pool_free(ioc->reply_post_free_array_dma_pool,
5326 			    ioc->reply_post_free_array,
5327 			    ioc->reply_post_free_array_dma);
5328 			ioc->reply_post_free_array = NULL;
5329 		}
5330 		dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
5331 		kfree(ioc->reply_post);
5332 	}
5333 
5334 	if (ioc->pcie_sgl_dma_pool) {
5335 		for (i = 0; i < ioc->scsiio_depth; i++) {
5336 			dma_pool_free(ioc->pcie_sgl_dma_pool,
5337 					ioc->pcie_sg_lookup[i].pcie_sgl,
5338 					ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5339 			ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
5340 		}
5341 		dma_pool_destroy(ioc->pcie_sgl_dma_pool);
5342 	}
5343 	if (ioc->config_page) {
5344 		dexitprintk(ioc,
5345 			    ioc_info(ioc, "config_page(0x%p): free\n",
5346 				     ioc->config_page));
5347 		dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
5348 		    ioc->config_page, ioc->config_page_dma);
5349 	}
5350 
5351 	kfree(ioc->hpr_lookup);
5352 	ioc->hpr_lookup = NULL;
5353 	kfree(ioc->internal_lookup);
5354 	ioc->internal_lookup = NULL;
5355 	if (ioc->chain_lookup) {
5356 		for (i = 0; i < ioc->scsiio_depth; i++) {
5357 			for (j = ioc->chains_per_prp_buffer;
5358 			    j < ioc->chains_needed_per_io; j++) {
5359 				ct = &ioc->chain_lookup[i].chains_per_smid[j];
5360 				if (ct && ct->chain_buffer)
5361 					dma_pool_free(ioc->chain_dma_pool,
5362 						ct->chain_buffer,
5363 						ct->chain_buffer_dma);
5364 			}
5365 			kfree(ioc->chain_lookup[i].chains_per_smid);
5366 		}
5367 		dma_pool_destroy(ioc->chain_dma_pool);
5368 		kfree(ioc->chain_lookup);
5369 		ioc->chain_lookup = NULL;
5370 	}
5371 
5372 	kfree(ioc->io_queue_num);
5373 	ioc->io_queue_num = NULL;
5374 }
5375 
5376 /**
5377  * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
5378  *	having same upper 32bits in their base memory address.
5379  * @reply_pool_start_address: Base address of a reply queue set
5380  * @pool_sz: Size of single Reply Descriptor Post Queues pool size
5381  *
5382  * Return: 1 if reply queues in a set have a same upper 32bits in their base
5383  * memory address, else 0.
5384  */
5385 
5386 static int
5387 mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
5388 {
5389 	long reply_pool_end_address;
5390 
5391 	reply_pool_end_address = reply_pool_start_address + pool_sz;
5392 
5393 	if (upper_32_bits(reply_pool_start_address) ==
5394 		upper_32_bits(reply_pool_end_address))
5395 		return 1;
5396 	else
5397 		return 0;
5398 }
5399 
5400 /**
5401  * _base_reduce_hba_queue_depth- Retry with reduced queue depth
5402  * @ioc: Adapter object
5403  *
5404  * Return: 0 for success, non-zero for failure.
5405  **/
5406 static inline int
5407 _base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
5408 {
5409 	int reduce_sz = 64;
5410 
5411 	if ((ioc->hba_queue_depth - reduce_sz) >
5412 	    (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
5413 		ioc->hba_queue_depth -= reduce_sz;
5414 		return 0;
5415 	} else
5416 		return -ENOMEM;
5417 }
5418 
5419 /**
5420  * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
5421  *			for pcie sgl pools.
5422  * @ioc: Adapter object
5423  * @sz: DMA Pool size
5424  *
5425  * Return: 0 for success, non-zero for failure.
5426  */
5427 
5428 static int
5429 _base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5430 {
5431 	int i = 0, j = 0;
5432 	struct chain_tracker *ct;
5433 
5434 	ioc->pcie_sgl_dma_pool =
5435 	    dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
5436 	    ioc->page_size, 0);
5437 	if (!ioc->pcie_sgl_dma_pool) {
5438 		ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5439 		return -ENOMEM;
5440 	}
5441 
5442 	ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5443 	ioc->chains_per_prp_buffer =
5444 	    min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
5445 	for (i = 0; i < ioc->scsiio_depth; i++) {
5446 		ioc->pcie_sg_lookup[i].pcie_sgl =
5447 		    dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5448 		    &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5449 		if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5450 			ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5451 			return -EAGAIN;
5452 		}
5453 
5454 		if (!mpt3sas_check_same_4gb_region(
5455 		    (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) {
5456 			ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
5457 			    ioc->pcie_sg_lookup[i].pcie_sgl,
5458 			    (unsigned long long)
5459 			    ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5460 			ioc->use_32bit_dma = true;
5461 			return -EAGAIN;
5462 		}
5463 
5464 		for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5465 			ct = &ioc->chain_lookup[i].chains_per_smid[j];
5466 			ct->chain_buffer =
5467 			    ioc->pcie_sg_lookup[i].pcie_sgl +
5468 			    (j * ioc->chain_segment_sz);
5469 			ct->chain_buffer_dma =
5470 			    ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5471 			    (j * ioc->chain_segment_sz);
5472 		}
5473 	}
5474 	dinitprintk(ioc, ioc_info(ioc,
5475 	    "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5476 	    ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
5477 	dinitprintk(ioc, ioc_info(ioc,
5478 	    "Number of chains can fit in a PRP page(%d)\n",
5479 	    ioc->chains_per_prp_buffer));
5480 	return 0;
5481 }
5482 
5483 /**
5484  * _base_allocate_chain_dma_pool - Allocating DMA'able memory
5485  *			for chain dma pool.
5486  * @ioc: Adapter object
5487  * @sz: DMA Pool size
5488  *
5489  * Return: 0 for success, non-zero for failure.
5490  */
5491 static int
5492 _base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5493 {
5494 	int i = 0, j = 0;
5495 	struct chain_tracker *ctr;
5496 
5497 	ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
5498 	    ioc->chain_segment_sz, 16, 0);
5499 	if (!ioc->chain_dma_pool)
5500 		return -ENOMEM;
5501 
5502 	for (i = 0; i < ioc->scsiio_depth; i++) {
5503 		for (j = ioc->chains_per_prp_buffer;
5504 		    j < ioc->chains_needed_per_io; j++) {
5505 			ctr = &ioc->chain_lookup[i].chains_per_smid[j];
5506 			ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool,
5507 			    GFP_KERNEL, &ctr->chain_buffer_dma);
5508 			if (!ctr->chain_buffer)
5509 				return -EAGAIN;
5510 			if (!mpt3sas_check_same_4gb_region((long)
5511 			    ctr->chain_buffer, ioc->chain_segment_sz)) {
5512 				ioc_err(ioc,
5513 				    "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
5514 				    ctr->chain_buffer,
5515 				    (unsigned long long)ctr->chain_buffer_dma);
5516 				ioc->use_32bit_dma = true;
5517 				return -EAGAIN;
5518 			}
5519 		}
5520 	}
5521 	dinitprintk(ioc, ioc_info(ioc,
5522 	    "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n",
5523 	    ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth *
5524 	    (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) *
5525 	    ioc->chain_segment_sz))/1024));
5526 	return 0;
5527 }
5528 
5529 /**
5530  * _base_allocate_sense_dma_pool - Allocating DMA'able memory
5531  *			for sense dma pool.
5532  * @ioc: Adapter object
5533  * @sz: DMA Pool size
5534  * Return: 0 for success, non-zero for failure.
5535  */
5536 static int
5537 _base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5538 {
5539 	ioc->sense_dma_pool =
5540 	    dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0);
5541 	if (!ioc->sense_dma_pool)
5542 		return -ENOMEM;
5543 	ioc->sense = dma_pool_alloc(ioc->sense_dma_pool,
5544 	    GFP_KERNEL, &ioc->sense_dma);
5545 	if (!ioc->sense)
5546 		return -EAGAIN;
5547 	if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
5548 		dinitprintk(ioc, pr_err(
5549 		    "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
5550 		    ioc->sense, (unsigned long long) ioc->sense_dma));
5551 		ioc->use_32bit_dma = true;
5552 		return -EAGAIN;
5553 	}
5554 	ioc_info(ioc,
5555 	    "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n",
5556 	    ioc->sense, (unsigned long long)ioc->sense_dma,
5557 	    ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024);
5558 	return 0;
5559 }
5560 
5561 /**
5562  * _base_allocate_reply_pool - Allocating DMA'able memory
5563  *			for reply pool.
5564  * @ioc: Adapter object
5565  * @sz: DMA Pool size
5566  * Return: 0 for success, non-zero for failure.
5567  */
5568 static int
5569 _base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5570 {
5571 	/* reply pool, 4 byte align */
5572 	ioc->reply_dma_pool = dma_pool_create("reply pool",
5573 	    &ioc->pdev->dev, sz, 4, 0);
5574 	if (!ioc->reply_dma_pool)
5575 		return -ENOMEM;
5576 	ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
5577 	    &ioc->reply_dma);
5578 	if (!ioc->reply)
5579 		return -EAGAIN;
5580 	if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
5581 		dinitprintk(ioc, pr_err(
5582 		    "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
5583 		    ioc->reply, (unsigned long long) ioc->reply_dma));
5584 		ioc->use_32bit_dma = true;
5585 		return -EAGAIN;
5586 	}
5587 	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
5588 	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
5589 	ioc_info(ioc,
5590 	    "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n",
5591 	    ioc->reply, (unsigned long long)ioc->reply_dma,
5592 	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024);
5593 	return 0;
5594 }
5595 
5596 /**
5597  * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory
5598  *			for reply free dma pool.
5599  * @ioc: Adapter object
5600  * @sz: DMA Pool size
5601  * Return: 0 for success, non-zero for failure.
5602  */
5603 static int
5604 _base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5605 {
5606 	/* reply free queue, 16 byte align */
5607 	ioc->reply_free_dma_pool = dma_pool_create(
5608 	    "reply_free pool", &ioc->pdev->dev, sz, 16, 0);
5609 	if (!ioc->reply_free_dma_pool)
5610 		return -ENOMEM;
5611 	ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool,
5612 	    GFP_KERNEL, &ioc->reply_free_dma);
5613 	if (!ioc->reply_free)
5614 		return -EAGAIN;
5615 	if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
5616 		dinitprintk(ioc,
5617 		    pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
5618 		    ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
5619 		ioc->use_32bit_dma = true;
5620 		return -EAGAIN;
5621 	}
5622 	memset(ioc->reply_free, 0, sz);
5623 	dinitprintk(ioc, ioc_info(ioc,
5624 	    "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
5625 	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
5626 	dinitprintk(ioc, ioc_info(ioc,
5627 	    "reply_free_dma (0x%llx)\n",
5628 	    (unsigned long long)ioc->reply_free_dma));
5629 	return 0;
5630 }
5631 
5632 /**
5633  * _base_allocate_reply_post_free_array - Allocating DMA'able memory
5634  *			for reply post free array.
5635  * @ioc: Adapter object
5636  * @reply_post_free_array_sz: DMA Pool size
5637  * Return: 0 for success, non-zero for failure.
5638  */
5639 
5640 static int
5641 _base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
5642 	u32 reply_post_free_array_sz)
5643 {
5644 	ioc->reply_post_free_array_dma_pool =
5645 	    dma_pool_create("reply_post_free_array pool",
5646 	    &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
5647 	if (!ioc->reply_post_free_array_dma_pool)
5648 		return -ENOMEM;
5649 	ioc->reply_post_free_array =
5650 	    dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
5651 	    GFP_KERNEL, &ioc->reply_post_free_array_dma);
5652 	if (!ioc->reply_post_free_array)
5653 		return -EAGAIN;
5654 	if (!mpt3sas_check_same_4gb_region((long)ioc->reply_post_free_array,
5655 	    reply_post_free_array_sz)) {
5656 		dinitprintk(ioc, pr_err(
5657 		    "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
5658 		    ioc->reply_free,
5659 		    (unsigned long long) ioc->reply_free_dma));
5660 		ioc->use_32bit_dma = true;
5661 		return -EAGAIN;
5662 	}
5663 	return 0;
5664 }
5665 /**
5666  * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
5667  *                     for reply queues.
5668  * @ioc: per adapter object
5669  * @sz: DMA Pool size
5670  * Return: 0 for success, non-zero for failure.
5671  */
5672 static int
5673 base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
5674 {
5675 	int i = 0;
5676 	u32 dma_alloc_count = 0;
5677 	int reply_post_free_sz = ioc->reply_post_queue_depth *
5678 		sizeof(Mpi2DefaultReplyDescriptor_t);
5679 	int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
5680 
5681 	ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
5682 			GFP_KERNEL);
5683 	if (!ioc->reply_post)
5684 		return -ENOMEM;
5685 	/*
5686 	 *  For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
5687 	 *  VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
5688 	 *  be within 4GB boundary i.e reply queues in a set must have same
5689 	 *  upper 32-bits in their memory address. so here driver is allocating
5690 	 *  the DMA'able memory for reply queues according.
5691 	 *  Driver uses limitation of
5692 	 *  VENTURA_SERIES to manage INVADER_SERIES as well.
5693 	 */
5694 	dma_alloc_count = DIV_ROUND_UP(count,
5695 				RDPQ_MAX_INDEX_IN_ONE_CHUNK);
5696 	ioc->reply_post_free_dma_pool =
5697 		dma_pool_create("reply_post_free pool",
5698 		    &ioc->pdev->dev, sz, 16, 0);
5699 	if (!ioc->reply_post_free_dma_pool)
5700 		return -ENOMEM;
5701 	for (i = 0; i < count; i++) {
5702 		if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
5703 			ioc->reply_post[i].reply_post_free =
5704 			    dma_pool_zalloc(ioc->reply_post_free_dma_pool,
5705 				GFP_KERNEL,
5706 				&ioc->reply_post[i].reply_post_free_dma);
5707 			if (!ioc->reply_post[i].reply_post_free)
5708 				return -ENOMEM;
5709 			/*
5710 			 * Each set of RDPQ pool must satisfy 4gb boundary
5711 			 * restriction.
5712 			 * 1) Check if allocated resources for RDPQ pool are in
5713 			 *	the same 4GB range.
5714 			 * 2) If #1 is true, continue with 64 bit DMA.
5715 			 * 3) If #1 is false, return 1. which means free all the
5716 			 * resources and set DMA mask to 32 and allocate.
5717 			 */
5718 			if (!mpt3sas_check_same_4gb_region(
5719 				(long)ioc->reply_post[i].reply_post_free, sz)) {
5720 				dinitprintk(ioc,
5721 				    ioc_err(ioc, "bad Replypost free pool(0x%p)"
5722 				    "reply_post_free_dma = (0x%llx)\n",
5723 				    ioc->reply_post[i].reply_post_free,
5724 				    (unsigned long long)
5725 				    ioc->reply_post[i].reply_post_free_dma));
5726 				return -EAGAIN;
5727 			}
5728 			dma_alloc_count--;
5729 
5730 		} else {
5731 			ioc->reply_post[i].reply_post_free =
5732 			    (Mpi2ReplyDescriptorsUnion_t *)
5733 			    ((long)ioc->reply_post[i-1].reply_post_free
5734 			    + reply_post_free_sz);
5735 			ioc->reply_post[i].reply_post_free_dma =
5736 			    (dma_addr_t)
5737 			    (ioc->reply_post[i-1].reply_post_free_dma +
5738 			    reply_post_free_sz);
5739 		}
5740 	}
5741 	return 0;
5742 }
5743 
5744 /**
5745  * _base_allocate_memory_pools - allocate start of day memory pools
5746  * @ioc: per adapter object
5747  *
5748  * Return: 0 success, anything else error.
5749  */
5750 static int
5751 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5752 {
5753 	struct mpt3sas_facts *facts;
5754 	u16 max_sge_elements;
5755 	u16 chains_needed_per_io;
5756 	u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
5757 	u32 retry_sz;
5758 	u32 rdpq_sz = 0, sense_sz = 0;
5759 	u16 max_request_credit, nvme_blocks_needed;
5760 	unsigned short sg_tablesize;
5761 	u16 sge_size;
5762 	int i;
5763 	int ret = 0, rc = 0;
5764 
5765 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5766 
5767 
5768 	retry_sz = 0;
5769 	facts = &ioc->facts;
5770 
5771 	/* command line tunables for max sgl entries */
5772 	if (max_sgl_entries != -1)
5773 		sg_tablesize = max_sgl_entries;
5774 	else {
5775 		if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
5776 			sg_tablesize = MPT2SAS_SG_DEPTH;
5777 		else
5778 			sg_tablesize = MPT3SAS_SG_DEPTH;
5779 	}
5780 
5781 	/* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
5782 	if (reset_devices)
5783 		sg_tablesize = min_t(unsigned short, sg_tablesize,
5784 		   MPT_KDUMP_MIN_PHYS_SEGMENTS);
5785 
5786 	if (ioc->is_mcpu_endpoint)
5787 		ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5788 	else {
5789 		if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
5790 			sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
5791 		else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
5792 			sg_tablesize = min_t(unsigned short, sg_tablesize,
5793 					SG_MAX_SEGMENTS);
5794 			ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
5795 				 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
5796 		}
5797 		ioc->shost->sg_tablesize = sg_tablesize;
5798 	}
5799 
5800 	ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
5801 		(facts->RequestCredit / 4));
5802 	if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
5803 		if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
5804 				INTERNAL_SCSIIO_CMDS_COUNT)) {
5805 			ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
5806 				facts->RequestCredit);
5807 			return -ENOMEM;
5808 		}
5809 		ioc->internal_depth = 10;
5810 	}
5811 
5812 	ioc->hi_priority_depth = ioc->internal_depth - (5);
5813 	/* command line tunables  for max controller queue depth */
5814 	if (max_queue_depth != -1 && max_queue_depth != 0) {
5815 		max_request_credit = min_t(u16, max_queue_depth +
5816 			ioc->internal_depth, facts->RequestCredit);
5817 		if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
5818 			max_request_credit =  MAX_HBA_QUEUE_DEPTH;
5819 	} else if (reset_devices)
5820 		max_request_credit = min_t(u16, facts->RequestCredit,
5821 		    (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
5822 	else
5823 		max_request_credit = min_t(u16, facts->RequestCredit,
5824 		    MAX_HBA_QUEUE_DEPTH);
5825 
5826 	/* Firmware maintains additional facts->HighPriorityCredit number of
5827 	 * credits for HiPriprity Request messages, so hba queue depth will be
5828 	 * sum of max_request_credit and high priority queue depth.
5829 	 */
5830 	ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
5831 
5832 	/* request frame size */
5833 	ioc->request_sz = facts->IOCRequestFrameSize * 4;
5834 
5835 	/* reply frame size */
5836 	ioc->reply_sz = facts->ReplyFrameSize * 4;
5837 
5838 	/* chain segment size */
5839 	if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5840 		if (facts->IOCMaxChainSegmentSize)
5841 			ioc->chain_segment_sz =
5842 					facts->IOCMaxChainSegmentSize *
5843 					MAX_CHAIN_ELEMT_SZ;
5844 		else
5845 		/* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
5846 			ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
5847 						    MAX_CHAIN_ELEMT_SZ;
5848 	} else
5849 		ioc->chain_segment_sz = ioc->request_sz;
5850 
5851 	/* calculate the max scatter element size */
5852 	sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
5853 
5854  retry_allocation:
5855 	total_sz = 0;
5856 	/* calculate number of sg elements left over in the 1st frame */
5857 	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
5858 	    sizeof(Mpi2SGEIOUnion_t)) + sge_size);
5859 	ioc->max_sges_in_main_message = max_sge_elements/sge_size;
5860 
5861 	/* now do the same for a chain buffer */
5862 	max_sge_elements = ioc->chain_segment_sz - sge_size;
5863 	ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
5864 
5865 	/*
5866 	 *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
5867 	 */
5868 	chains_needed_per_io = ((ioc->shost->sg_tablesize -
5869 	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
5870 	    + 1;
5871 	if (chains_needed_per_io > facts->MaxChainDepth) {
5872 		chains_needed_per_io = facts->MaxChainDepth;
5873 		ioc->shost->sg_tablesize = min_t(u16,
5874 		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
5875 		* chains_needed_per_io), ioc->shost->sg_tablesize);
5876 	}
5877 	ioc->chains_needed_per_io = chains_needed_per_io;
5878 
5879 	/* reply free queue sizing - taking into account for 64 FW events */
5880 	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5881 
5882 	/* mCPU manage single counters for simplicity */
5883 	if (ioc->is_mcpu_endpoint)
5884 		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
5885 	else {
5886 		/* calculate reply descriptor post queue depth */
5887 		ioc->reply_post_queue_depth = ioc->hba_queue_depth +
5888 			ioc->reply_free_queue_depth +  1;
5889 		/* align the reply post queue on the next 16 count boundary */
5890 		if (ioc->reply_post_queue_depth % 16)
5891 			ioc->reply_post_queue_depth += 16 -
5892 				(ioc->reply_post_queue_depth % 16);
5893 	}
5894 
5895 	if (ioc->reply_post_queue_depth >
5896 	    facts->MaxReplyDescriptorPostQueueDepth) {
5897 		ioc->reply_post_queue_depth =
5898 				facts->MaxReplyDescriptorPostQueueDepth -
5899 		    (facts->MaxReplyDescriptorPostQueueDepth % 16);
5900 		ioc->hba_queue_depth =
5901 				((ioc->reply_post_queue_depth - 64) / 2) - 1;
5902 		ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
5903 	}
5904 
5905 	ioc_info(ioc,
5906 	    "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
5907 	    "sge_per_io(%d), chains_per_io(%d)\n",
5908 	    ioc->max_sges_in_main_message,
5909 	    ioc->max_sges_in_chain_message,
5910 	    ioc->shost->sg_tablesize,
5911 	    ioc->chains_needed_per_io);
5912 
5913 	/* reply post queue, 16 byte align */
5914 	reply_post_free_sz = ioc->reply_post_queue_depth *
5915 	    sizeof(Mpi2DefaultReplyDescriptor_t);
5916 	rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
5917 	if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
5918 	    || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK))
5919 		rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
5920 	ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
5921 	if (ret == -EAGAIN) {
5922 		/*
5923 		 * Free allocated bad RDPQ memory pools.
5924 		 * Change dma coherent mask to 32 bit and reallocate RDPQ
5925 		 */
5926 		_base_release_memory_pools(ioc);
5927 		ioc->use_32bit_dma = true;
5928 		if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
5929 			ioc_err(ioc,
5930 			    "32 DMA mask failed %s\n", pci_name(ioc->pdev));
5931 			return -ENODEV;
5932 		}
5933 		if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
5934 			return -ENOMEM;
5935 	} else if (ret == -ENOMEM)
5936 		return -ENOMEM;
5937 	total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
5938 	    DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
5939 	ioc->scsiio_depth = ioc->hba_queue_depth -
5940 	    ioc->hi_priority_depth - ioc->internal_depth;
5941 
5942 	/* set the scsi host can_queue depth
5943 	 * with some internal commands that could be outstanding
5944 	 */
5945 	ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
5946 	dinitprintk(ioc,
5947 		    ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
5948 			     ioc->shost->can_queue));
5949 
5950 	/* contiguous pool for request and chains, 16 byte align, one extra "
5951 	 * "frame for smid=0
5952 	 */
5953 	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
5954 	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
5955 
5956 	/* hi-priority queue */
5957 	sz += (ioc->hi_priority_depth * ioc->request_sz);
5958 
5959 	/* internal queue */
5960 	sz += (ioc->internal_depth * ioc->request_sz);
5961 
5962 	ioc->request_dma_sz = sz;
5963 	ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
5964 			&ioc->request_dma, GFP_KERNEL);
5965 	if (!ioc->request) {
5966 		ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
5967 			ioc->hba_queue_depth, ioc->chains_needed_per_io,
5968 			ioc->request_sz, sz / 1024);
5969 		if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
5970 			goto out;
5971 		retry_sz = 64;
5972 		ioc->hba_queue_depth -= retry_sz;
5973 		_base_release_memory_pools(ioc);
5974 		goto retry_allocation;
5975 	}
5976 
5977 	if (retry_sz)
5978 		ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
5979 			ioc->hba_queue_depth, ioc->chains_needed_per_io,
5980 			ioc->request_sz, sz / 1024);
5981 
5982 	/* hi-priority queue */
5983 	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
5984 	    ioc->request_sz);
5985 	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
5986 	    ioc->request_sz);
5987 
5988 	/* internal queue */
5989 	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
5990 	    ioc->request_sz);
5991 	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
5992 	    ioc->request_sz);
5993 
5994 	ioc_info(ioc,
5995 	    "request pool(0x%p) - dma(0x%llx): "
5996 	    "depth(%d), frame_size(%d), pool_size(%d kB)\n",
5997 	    ioc->request, (unsigned long long) ioc->request_dma,
5998 	    ioc->hba_queue_depth, ioc->request_sz,
5999 	    (ioc->hba_queue_depth * ioc->request_sz) / 1024);
6000 
6001 	total_sz += sz;
6002 
6003 	dinitprintk(ioc,
6004 		    ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
6005 			     ioc->request, ioc->scsiio_depth));
6006 
6007 	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
6008 	sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
6009 	ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
6010 	if (!ioc->chain_lookup) {
6011 		ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
6012 		goto out;
6013 	}
6014 
6015 	sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
6016 	for (i = 0; i < ioc->scsiio_depth; i++) {
6017 		ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
6018 		if (!ioc->chain_lookup[i].chains_per_smid) {
6019 			ioc_err(ioc, "chain_lookup: kzalloc failed\n");
6020 			goto out;
6021 		}
6022 	}
6023 
6024 	/* initialize hi-priority queue smid's */
6025 	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
6026 	    sizeof(struct request_tracker), GFP_KERNEL);
6027 	if (!ioc->hpr_lookup) {
6028 		ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
6029 		goto out;
6030 	}
6031 	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
6032 	dinitprintk(ioc,
6033 		    ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
6034 			     ioc->hi_priority,
6035 			     ioc->hi_priority_depth, ioc->hi_priority_smid));
6036 
6037 	/* initialize internal queue smid's */
6038 	ioc->internal_lookup = kcalloc(ioc->internal_depth,
6039 	    sizeof(struct request_tracker), GFP_KERNEL);
6040 	if (!ioc->internal_lookup) {
6041 		ioc_err(ioc, "internal_lookup: kcalloc failed\n");
6042 		goto out;
6043 	}
6044 	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
6045 	dinitprintk(ioc,
6046 		    ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
6047 			     ioc->internal,
6048 			     ioc->internal_depth, ioc->internal_smid));
6049 
6050 	ioc->io_queue_num = kcalloc(ioc->scsiio_depth,
6051 	    sizeof(u16), GFP_KERNEL);
6052 	if (!ioc->io_queue_num)
6053 		goto out;
6054 	/*
6055 	 * The number of NVMe page sized blocks needed is:
6056 	 *     (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
6057 	 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
6058 	 * that is placed in the main message frame.  8 is the size of each PRP
6059 	 * entry or PRP list pointer entry.  8 is subtracted from page_size
6060 	 * because of the PRP list pointer entry at the end of a page, so this
6061 	 * is not counted as a PRP entry.  The 1 added page is a round up.
6062 	 *
6063 	 * To avoid allocation failures due to the amount of memory that could
6064 	 * be required for NVMe PRP's, only each set of NVMe blocks will be
6065 	 * contiguous, so a new set is allocated for each possible I/O.
6066 	 */
6067 
6068 	ioc->chains_per_prp_buffer = 0;
6069 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
6070 		nvme_blocks_needed =
6071 			(ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
6072 		nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
6073 		nvme_blocks_needed++;
6074 
6075 		sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
6076 		ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
6077 		if (!ioc->pcie_sg_lookup) {
6078 			ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
6079 			goto out;
6080 		}
6081 		sz = nvme_blocks_needed * ioc->page_size;
6082 		rc = _base_allocate_pcie_sgl_pool(ioc, sz);
6083 		if (rc == -ENOMEM)
6084 			return -ENOMEM;
6085 		else if (rc == -EAGAIN)
6086 			goto try_32bit_dma;
6087 		total_sz += sz * ioc->scsiio_depth;
6088 	}
6089 
6090 	rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz);
6091 	if (rc == -ENOMEM)
6092 		return -ENOMEM;
6093 	else if (rc == -EAGAIN)
6094 		goto try_32bit_dma;
6095 	total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io -
6096 		ioc->chains_per_prp_buffer) * ioc->scsiio_depth);
6097 	dinitprintk(ioc,
6098 	    ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
6099 	    ioc->chain_depth, ioc->chain_segment_sz,
6100 	    (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
6101 	/* sense buffers, 4 byte align */
6102 	sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
6103 	rc = _base_allocate_sense_dma_pool(ioc, sense_sz);
6104 	if (rc  == -ENOMEM)
6105 		return -ENOMEM;
6106 	else if (rc == -EAGAIN)
6107 		goto try_32bit_dma;
6108 	total_sz += sense_sz;
6109 	ioc_info(ioc,
6110 	    "sense pool(0x%p)- dma(0x%llx): depth(%d),"
6111 	    "element_size(%d), pool_size(%d kB)\n",
6112 	    ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
6113 	    SCSI_SENSE_BUFFERSIZE, sz / 1024);
6114 	/* reply pool, 4 byte align */
6115 	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
6116 	rc = _base_allocate_reply_pool(ioc, sz);
6117 	if (rc == -ENOMEM)
6118 		return -ENOMEM;
6119 	else if (rc == -EAGAIN)
6120 		goto try_32bit_dma;
6121 	total_sz += sz;
6122 
6123 	/* reply free queue, 16 byte align */
6124 	sz = ioc->reply_free_queue_depth * 4;
6125 	rc = _base_allocate_reply_free_dma_pool(ioc, sz);
6126 	if (rc  == -ENOMEM)
6127 		return -ENOMEM;
6128 	else if (rc == -EAGAIN)
6129 		goto try_32bit_dma;
6130 	dinitprintk(ioc,
6131 		    ioc_info(ioc, "reply_free_dma (0x%llx)\n",
6132 			     (unsigned long long)ioc->reply_free_dma));
6133 	total_sz += sz;
6134 	if (ioc->rdpq_array_enable) {
6135 		reply_post_free_array_sz = ioc->reply_queue_count *
6136 		    sizeof(Mpi2IOCInitRDPQArrayEntry);
6137 		rc = _base_allocate_reply_post_free_array(ioc,
6138 		    reply_post_free_array_sz);
6139 		if (rc == -ENOMEM)
6140 			return -ENOMEM;
6141 		else if (rc == -EAGAIN)
6142 			goto try_32bit_dma;
6143 	}
6144 	ioc->config_page_sz = 512;
6145 	ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
6146 			ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
6147 	if (!ioc->config_page) {
6148 		ioc_err(ioc, "config page: dma_pool_alloc failed\n");
6149 		goto out;
6150 	}
6151 
6152 	ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
6153 	    ioc->config_page, (unsigned long long)ioc->config_page_dma,
6154 	    ioc->config_page_sz);
6155 	total_sz += ioc->config_page_sz;
6156 
6157 	ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
6158 		 total_sz / 1024);
6159 	ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
6160 		 ioc->shost->can_queue, facts->RequestCredit);
6161 	ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
6162 		 ioc->shost->sg_tablesize);
6163 	return 0;
6164 
6165 try_32bit_dma:
6166 	_base_release_memory_pools(ioc);
6167 	if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
6168 		/* Change dma coherent mask to 32 bit and reallocate */
6169 		if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6170 			pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
6171 			    pci_name(ioc->pdev));
6172 			return -ENODEV;
6173 		}
6174 	} else if (_base_reduce_hba_queue_depth(ioc) != 0)
6175 		return -ENOMEM;
6176 	goto retry_allocation;
6177 
6178  out:
6179 	return -ENOMEM;
6180 }
6181 
6182 /**
6183  * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
6184  * @ioc: Pointer to MPT_ADAPTER structure
6185  * @cooked: Request raw or cooked IOC state
6186  *
6187  * Return: all IOC Doorbell register bits if cooked==0, else just the
6188  * Doorbell bits in MPI_IOC_STATE_MASK.
6189  */
6190 u32
6191 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
6192 {
6193 	u32 s, sc;
6194 
6195 	s = ioc->base_readl(&ioc->chip->Doorbell);
6196 	sc = s & MPI2_IOC_STATE_MASK;
6197 	return cooked ? sc : s;
6198 }
6199 
6200 /**
6201  * _base_wait_on_iocstate - waiting on a particular ioc state
6202  * @ioc: ?
6203  * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
6204  * @timeout: timeout in second
6205  *
6206  * Return: 0 for success, non-zero for failure.
6207  */
6208 static int
6209 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
6210 {
6211 	u32 count, cntdn;
6212 	u32 current_state;
6213 
6214 	count = 0;
6215 	cntdn = 1000 * timeout;
6216 	do {
6217 		current_state = mpt3sas_base_get_iocstate(ioc, 1);
6218 		if (current_state == ioc_state)
6219 			return 0;
6220 		if (count && current_state == MPI2_IOC_STATE_FAULT)
6221 			break;
6222 		if (count && current_state == MPI2_IOC_STATE_COREDUMP)
6223 			break;
6224 
6225 		usleep_range(1000, 1500);
6226 		count++;
6227 	} while (--cntdn);
6228 
6229 	return current_state;
6230 }
6231 
6232 /**
6233  * _base_dump_reg_set -	This function will print hexdump of register set.
6234  * @ioc: per adapter object
6235  *
6236  * Returns nothing.
6237  */
6238 static inline void
6239 _base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
6240 {
6241 	unsigned int i, sz = 256;
6242 	u32 __iomem *reg = (u32 __iomem *)ioc->chip;
6243 
6244 	ioc_info(ioc, "System Register set:\n");
6245 	for (i = 0; i < (sz / sizeof(u32)); i++)
6246 		pr_info("%08x: %08x\n", (i * 4), readl(&reg[i]));
6247 }
6248 
6249 /**
6250  * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
6251  * a write to the doorbell)
6252  * @ioc: per adapter object
6253  * @timeout: timeout in seconds
6254  *
6255  * Return: 0 for success, non-zero for failure.
6256  *
6257  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
6258  */
6259 
6260 static int
6261 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6262 {
6263 	u32 cntdn, count;
6264 	u32 int_status;
6265 
6266 	count = 0;
6267 	cntdn = 1000 * timeout;
6268 	do {
6269 		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6270 		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6271 			dhsprintk(ioc,
6272 				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6273 					   __func__, count, timeout));
6274 			return 0;
6275 		}
6276 
6277 		usleep_range(1000, 1500);
6278 		count++;
6279 	} while (--cntdn);
6280 
6281 	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6282 		__func__, count, int_status);
6283 	return -EFAULT;
6284 }
6285 
6286 static int
6287 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6288 {
6289 	u32 cntdn, count;
6290 	u32 int_status;
6291 
6292 	count = 0;
6293 	cntdn = 2000 * timeout;
6294 	do {
6295 		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6296 		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6297 			dhsprintk(ioc,
6298 				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6299 					   __func__, count, timeout));
6300 			return 0;
6301 		}
6302 
6303 		udelay(500);
6304 		count++;
6305 	} while (--cntdn);
6306 
6307 	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6308 		__func__, count, int_status);
6309 	return -EFAULT;
6310 
6311 }
6312 
6313 /**
6314  * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
6315  * @ioc: per adapter object
6316  * @timeout: timeout in second
6317  *
6318  * Return: 0 for success, non-zero for failure.
6319  *
6320  * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
6321  * doorbell.
6322  */
6323 static int
6324 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
6325 {
6326 	u32 cntdn, count;
6327 	u32 int_status;
6328 	u32 doorbell;
6329 
6330 	count = 0;
6331 	cntdn = 1000 * timeout;
6332 	do {
6333 		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6334 		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
6335 			dhsprintk(ioc,
6336 				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6337 					   __func__, count, timeout));
6338 			return 0;
6339 		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6340 			doorbell = ioc->base_readl(&ioc->chip->Doorbell);
6341 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
6342 			    MPI2_IOC_STATE_FAULT) {
6343 				mpt3sas_print_fault_code(ioc, doorbell);
6344 				return -EFAULT;
6345 			}
6346 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
6347 			    MPI2_IOC_STATE_COREDUMP) {
6348 				mpt3sas_print_coredump_info(ioc, doorbell);
6349 				return -EFAULT;
6350 			}
6351 		} else if (int_status == 0xFFFFFFFF)
6352 			goto out;
6353 
6354 		usleep_range(1000, 1500);
6355 		count++;
6356 	} while (--cntdn);
6357 
6358  out:
6359 	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6360 		__func__, count, int_status);
6361 	return -EFAULT;
6362 }
6363 
6364 /**
6365  * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
6366  * @ioc: per adapter object
6367  * @timeout: timeout in second
6368  *
6369  * Return: 0 for success, non-zero for failure.
6370  */
6371 static int
6372 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
6373 {
6374 	u32 cntdn, count;
6375 	u32 doorbell_reg;
6376 
6377 	count = 0;
6378 	cntdn = 1000 * timeout;
6379 	do {
6380 		doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
6381 		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
6382 			dhsprintk(ioc,
6383 				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6384 					   __func__, count, timeout));
6385 			return 0;
6386 		}
6387 
6388 		usleep_range(1000, 1500);
6389 		count++;
6390 	} while (--cntdn);
6391 
6392 	ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
6393 		__func__, count, doorbell_reg);
6394 	return -EFAULT;
6395 }
6396 
6397 /**
6398  * _base_send_ioc_reset - send doorbell reset
6399  * @ioc: per adapter object
6400  * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
6401  * @timeout: timeout in second
6402  *
6403  * Return: 0 for success, non-zero for failure.
6404  */
6405 static int
6406 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
6407 {
6408 	u32 ioc_state;
6409 	int r = 0;
6410 	unsigned long flags;
6411 
6412 	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
6413 		ioc_err(ioc, "%s: unknown reset_type\n", __func__);
6414 		return -EFAULT;
6415 	}
6416 
6417 	if (!(ioc->facts.IOCCapabilities &
6418 	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
6419 		return -EFAULT;
6420 
6421 	ioc_info(ioc, "sending message unit reset !!\n");
6422 
6423 	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
6424 	    &ioc->chip->Doorbell);
6425 	if ((_base_wait_for_doorbell_ack(ioc, 15))) {
6426 		r = -EFAULT;
6427 		goto out;
6428 	}
6429 
6430 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6431 	if (ioc_state) {
6432 		ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6433 			__func__, ioc_state);
6434 		r = -EFAULT;
6435 		goto out;
6436 	}
6437  out:
6438 	if (r != 0) {
6439 		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6440 		spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6441 		/*
6442 		 * Wait for IOC state CoreDump to clear only during
6443 		 * HBA initialization & release time.
6444 		 */
6445 		if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6446 		    MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
6447 		    ioc->fault_reset_work_q == NULL)) {
6448 			spin_unlock_irqrestore(
6449 			    &ioc->ioc_reset_in_progress_lock, flags);
6450 			mpt3sas_print_coredump_info(ioc, ioc_state);
6451 			mpt3sas_base_wait_for_coredump_completion(ioc,
6452 			    __func__);
6453 			spin_lock_irqsave(
6454 			    &ioc->ioc_reset_in_progress_lock, flags);
6455 		}
6456 		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6457 	}
6458 	ioc_info(ioc, "message unit reset: %s\n",
6459 		 r == 0 ? "SUCCESS" : "FAILED");
6460 	return r;
6461 }
6462 
6463 /**
6464  * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
6465  * @ioc: per adapter object
6466  * @timeout: timeout in seconds
6467  *
6468  * Return: Waits up to timeout seconds for the IOC to
6469  * become operational. Returns 0 if IOC is present
6470  * and operational; otherwise returns -EFAULT.
6471  */
6472 
6473 int
6474 mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
6475 {
6476 	int wait_state_count = 0;
6477 	u32 ioc_state;
6478 
6479 	do {
6480 		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
6481 		if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
6482 			break;
6483 		ssleep(1);
6484 		ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
6485 				__func__, ++wait_state_count);
6486 	} while (--timeout);
6487 	if (!timeout) {
6488 		ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
6489 		return -EFAULT;
6490 	}
6491 	if (wait_state_count)
6492 		ioc_info(ioc, "ioc is operational\n");
6493 	return 0;
6494 }
6495 
6496 /**
6497  * _base_handshake_req_reply_wait - send request thru doorbell interface
6498  * @ioc: per adapter object
6499  * @request_bytes: request length
6500  * @request: pointer having request payload
6501  * @reply_bytes: reply length
6502  * @reply: pointer to reply payload
6503  * @timeout: timeout in second
6504  *
6505  * Return: 0 for success, non-zero for failure.
6506  */
6507 static int
6508 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
6509 	u32 *request, int reply_bytes, u16 *reply, int timeout)
6510 {
6511 	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
6512 	int i;
6513 	u8 failed;
6514 	__le32 *mfp;
6515 
6516 	/* make sure doorbell is not in use */
6517 	if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
6518 		ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
6519 		return -EFAULT;
6520 	}
6521 
6522 	/* clear pending doorbell interrupts from previous state changes */
6523 	if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
6524 	    MPI2_HIS_IOC2SYS_DB_STATUS)
6525 		writel(0, &ioc->chip->HostInterruptStatus);
6526 
6527 	/* send message to ioc */
6528 	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
6529 	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
6530 	    &ioc->chip->Doorbell);
6531 
6532 	if ((_base_spin_on_doorbell_int(ioc, 5))) {
6533 		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6534 			__LINE__);
6535 		return -EFAULT;
6536 	}
6537 	writel(0, &ioc->chip->HostInterruptStatus);
6538 
6539 	if ((_base_wait_for_doorbell_ack(ioc, 5))) {
6540 		ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
6541 			__LINE__);
6542 		return -EFAULT;
6543 	}
6544 
6545 	/* send message 32-bits at a time */
6546 	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
6547 		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
6548 		if ((_base_wait_for_doorbell_ack(ioc, 5)))
6549 			failed = 1;
6550 	}
6551 
6552 	if (failed) {
6553 		ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
6554 			__LINE__);
6555 		return -EFAULT;
6556 	}
6557 
6558 	/* now wait for the reply */
6559 	if ((_base_wait_for_doorbell_int(ioc, timeout))) {
6560 		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6561 			__LINE__);
6562 		return -EFAULT;
6563 	}
6564 
6565 	/* read the first two 16-bits, it gives the total length of the reply */
6566 	reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
6567 	    & MPI2_DOORBELL_DATA_MASK);
6568 	writel(0, &ioc->chip->HostInterruptStatus);
6569 	if ((_base_wait_for_doorbell_int(ioc, 5))) {
6570 		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6571 			__LINE__);
6572 		return -EFAULT;
6573 	}
6574 	reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
6575 	    & MPI2_DOORBELL_DATA_MASK);
6576 	writel(0, &ioc->chip->HostInterruptStatus);
6577 
6578 	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
6579 		if ((_base_wait_for_doorbell_int(ioc, 5))) {
6580 			ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
6581 				__LINE__);
6582 			return -EFAULT;
6583 		}
6584 		if (i >=  reply_bytes/2) /* overflow case */
6585 			ioc->base_readl(&ioc->chip->Doorbell);
6586 		else
6587 			reply[i] = le16_to_cpu(
6588 			    ioc->base_readl(&ioc->chip->Doorbell)
6589 			    & MPI2_DOORBELL_DATA_MASK);
6590 		writel(0, &ioc->chip->HostInterruptStatus);
6591 	}
6592 
6593 	_base_wait_for_doorbell_int(ioc, 5);
6594 	if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
6595 		dhsprintk(ioc,
6596 			  ioc_info(ioc, "doorbell is in use (line=%d)\n",
6597 				   __LINE__));
6598 	}
6599 	writel(0, &ioc->chip->HostInterruptStatus);
6600 
6601 	if (ioc->logging_level & MPT_DEBUG_INIT) {
6602 		mfp = (__le32 *)reply;
6603 		pr_info("\toffset:data\n");
6604 		for (i = 0; i < reply_bytes/4; i++)
6605 			ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
6606 			    le32_to_cpu(mfp[i]));
6607 	}
6608 	return 0;
6609 }
6610 
6611 /**
6612  * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
6613  * @ioc: per adapter object
6614  * @mpi_reply: the reply payload from FW
6615  * @mpi_request: the request payload sent to FW
6616  *
6617  * The SAS IO Unit Control Request message allows the host to perform low-level
6618  * operations, such as resets on the PHYs of the IO Unit, also allows the host
6619  * to obtain the IOC assigned device handles for a device if it has other
6620  * identifying information about the device, in addition allows the host to
6621  * remove IOC resources associated with the device.
6622  *
6623  * Return: 0 for success, non-zero for failure.
6624  */
6625 int
6626 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
6627 	Mpi2SasIoUnitControlReply_t *mpi_reply,
6628 	Mpi2SasIoUnitControlRequest_t *mpi_request)
6629 {
6630 	u16 smid;
6631 	u8 issue_reset = 0;
6632 	int rc;
6633 	void *request;
6634 
6635 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6636 
6637 	mutex_lock(&ioc->base_cmds.mutex);
6638 
6639 	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6640 		ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6641 		rc = -EAGAIN;
6642 		goto out;
6643 	}
6644 
6645 	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6646 	if (rc)
6647 		goto out;
6648 
6649 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6650 	if (!smid) {
6651 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6652 		rc = -EAGAIN;
6653 		goto out;
6654 	}
6655 
6656 	rc = 0;
6657 	ioc->base_cmds.status = MPT3_CMD_PENDING;
6658 	request = mpt3sas_base_get_msg_frame(ioc, smid);
6659 	ioc->base_cmds.smid = smid;
6660 	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
6661 	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6662 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
6663 		ioc->ioc_link_reset_in_progress = 1;
6664 	init_completion(&ioc->base_cmds.done);
6665 	ioc->put_smid_default(ioc, smid);
6666 	wait_for_completion_timeout(&ioc->base_cmds.done,
6667 	    msecs_to_jiffies(10000));
6668 	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
6669 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
6670 	    ioc->ioc_link_reset_in_progress)
6671 		ioc->ioc_link_reset_in_progress = 0;
6672 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6673 		mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
6674 		    mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
6675 		    issue_reset);
6676 		goto issue_host_reset;
6677 	}
6678 	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6679 		memcpy(mpi_reply, ioc->base_cmds.reply,
6680 		    sizeof(Mpi2SasIoUnitControlReply_t));
6681 	else
6682 		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
6683 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6684 	goto out;
6685 
6686  issue_host_reset:
6687 	if (issue_reset)
6688 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6689 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6690 	rc = -EFAULT;
6691  out:
6692 	mutex_unlock(&ioc->base_cmds.mutex);
6693 	return rc;
6694 }
6695 
6696 /**
6697  * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
6698  * @ioc: per adapter object
6699  * @mpi_reply: the reply payload from FW
6700  * @mpi_request: the request payload sent to FW
6701  *
6702  * The SCSI Enclosure Processor request message causes the IOC to
6703  * communicate with SES devices to control LED status signals.
6704  *
6705  * Return: 0 for success, non-zero for failure.
6706  */
6707 int
6708 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
6709 	Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
6710 {
6711 	u16 smid;
6712 	u8 issue_reset = 0;
6713 	int rc;
6714 	void *request;
6715 
6716 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6717 
6718 	mutex_lock(&ioc->base_cmds.mutex);
6719 
6720 	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
6721 		ioc_err(ioc, "%s: base_cmd in use\n", __func__);
6722 		rc = -EAGAIN;
6723 		goto out;
6724 	}
6725 
6726 	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
6727 	if (rc)
6728 		goto out;
6729 
6730 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6731 	if (!smid) {
6732 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6733 		rc = -EAGAIN;
6734 		goto out;
6735 	}
6736 
6737 	rc = 0;
6738 	ioc->base_cmds.status = MPT3_CMD_PENDING;
6739 	request = mpt3sas_base_get_msg_frame(ioc, smid);
6740 	ioc->base_cmds.smid = smid;
6741 	memset(request, 0, ioc->request_sz);
6742 	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
6743 	init_completion(&ioc->base_cmds.done);
6744 	ioc->put_smid_default(ioc, smid);
6745 	wait_for_completion_timeout(&ioc->base_cmds.done,
6746 	    msecs_to_jiffies(10000));
6747 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6748 		mpt3sas_check_cmd_timeout(ioc,
6749 		    ioc->base_cmds.status, mpi_request,
6750 		    sizeof(Mpi2SepRequest_t)/4, issue_reset);
6751 		goto issue_host_reset;
6752 	}
6753 	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
6754 		memcpy(mpi_reply, ioc->base_cmds.reply,
6755 		    sizeof(Mpi2SepReply_t));
6756 	else
6757 		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
6758 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6759 	goto out;
6760 
6761  issue_host_reset:
6762 	if (issue_reset)
6763 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6764 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6765 	rc = -EFAULT;
6766  out:
6767 	mutex_unlock(&ioc->base_cmds.mutex);
6768 	return rc;
6769 }
6770 
6771 /**
6772  * _base_get_port_facts - obtain port facts reply and save in ioc
6773  * @ioc: per adapter object
6774  * @port: ?
6775  *
6776  * Return: 0 for success, non-zero for failure.
6777  */
6778 static int
6779 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
6780 {
6781 	Mpi2PortFactsRequest_t mpi_request;
6782 	Mpi2PortFactsReply_t mpi_reply;
6783 	struct mpt3sas_port_facts *pfacts;
6784 	int mpi_reply_sz, mpi_request_sz, r;
6785 
6786 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6787 
6788 	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
6789 	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
6790 	memset(&mpi_request, 0, mpi_request_sz);
6791 	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
6792 	mpi_request.PortNumber = port;
6793 	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6794 	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6795 
6796 	if (r != 0) {
6797 		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6798 		return r;
6799 	}
6800 
6801 	pfacts = &ioc->pfacts[port];
6802 	memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
6803 	pfacts->PortNumber = mpi_reply.PortNumber;
6804 	pfacts->VP_ID = mpi_reply.VP_ID;
6805 	pfacts->VF_ID = mpi_reply.VF_ID;
6806 	pfacts->MaxPostedCmdBuffers =
6807 	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
6808 
6809 	return 0;
6810 }
6811 
6812 /**
6813  * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
6814  * @ioc: per adapter object
6815  * @timeout:
6816  *
6817  * Return: 0 for success, non-zero for failure.
6818  */
6819 static int
6820 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
6821 {
6822 	u32 ioc_state;
6823 	int rc;
6824 
6825 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6826 
6827 	if (ioc->pci_error_recovery) {
6828 		dfailprintk(ioc,
6829 			    ioc_info(ioc, "%s: host in pci error recovery\n",
6830 				     __func__));
6831 		return -EFAULT;
6832 	}
6833 
6834 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6835 	dhsprintk(ioc,
6836 		  ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6837 			   __func__, ioc_state));
6838 
6839 	if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
6840 	    (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6841 		return 0;
6842 
6843 	if (ioc_state & MPI2_DOORBELL_USED) {
6844 		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6845 		goto issue_diag_reset;
6846 	}
6847 
6848 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6849 		mpt3sas_print_fault_code(ioc, ioc_state &
6850 		    MPI2_DOORBELL_DATA_MASK);
6851 		goto issue_diag_reset;
6852 	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6853 	    MPI2_IOC_STATE_COREDUMP) {
6854 		ioc_info(ioc,
6855 		    "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
6856 		    __func__, ioc_state);
6857 		return -EFAULT;
6858 	}
6859 
6860 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6861 	if (ioc_state) {
6862 		dfailprintk(ioc,
6863 			    ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6864 				     __func__, ioc_state));
6865 		return -EFAULT;
6866 	}
6867 
6868  issue_diag_reset:
6869 	rc = _base_diag_reset(ioc);
6870 	return rc;
6871 }
6872 
6873 /**
6874  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
6875  * @ioc: per adapter object
6876  *
6877  * Return: 0 for success, non-zero for failure.
6878  */
6879 static int
6880 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
6881 {
6882 	Mpi2IOCFactsRequest_t mpi_request;
6883 	Mpi2IOCFactsReply_t mpi_reply;
6884 	struct mpt3sas_facts *facts;
6885 	int mpi_reply_sz, mpi_request_sz, r;
6886 
6887 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6888 
6889 	r = _base_wait_for_iocstate(ioc, 10);
6890 	if (r) {
6891 		dfailprintk(ioc,
6892 			    ioc_info(ioc, "%s: failed getting to correct state\n",
6893 				     __func__));
6894 		return r;
6895 	}
6896 	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
6897 	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
6898 	memset(&mpi_request, 0, mpi_request_sz);
6899 	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
6900 	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6901 	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6902 
6903 	if (r != 0) {
6904 		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6905 		return r;
6906 	}
6907 
6908 	facts = &ioc->facts;
6909 	memset(facts, 0, sizeof(struct mpt3sas_facts));
6910 	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
6911 	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
6912 	facts->VP_ID = mpi_reply.VP_ID;
6913 	facts->VF_ID = mpi_reply.VF_ID;
6914 	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
6915 	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
6916 	facts->WhoInit = mpi_reply.WhoInit;
6917 	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
6918 	facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
6919 	if (ioc->msix_enable && (facts->MaxMSIxVectors <=
6920 	    MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
6921 		ioc->combined_reply_queue = 0;
6922 	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
6923 	facts->MaxReplyDescriptorPostQueueDepth =
6924 	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
6925 	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
6926 	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
6927 	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
6928 		ioc->ir_firmware = 1;
6929 	if ((facts->IOCCapabilities &
6930 	      MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
6931 		ioc->rdpq_array_capable = 1;
6932 	if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
6933 	    && ioc->is_aero_ioc)
6934 		ioc->atomic_desc_capable = 1;
6935 	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
6936 	facts->IOCRequestFrameSize =
6937 	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
6938 	if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6939 		facts->IOCMaxChainSegmentSize =
6940 			le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
6941 	}
6942 	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
6943 	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
6944 	ioc->shost->max_id = -1;
6945 	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
6946 	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
6947 	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
6948 	facts->HighPriorityCredit =
6949 	    le16_to_cpu(mpi_reply.HighPriorityCredit);
6950 	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
6951 	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
6952 	facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
6953 
6954 	/*
6955 	 * Get the Page Size from IOC Facts. If it's 0, default to 4k.
6956 	 */
6957 	ioc->page_size = 1 << facts->CurrentHostPageSize;
6958 	if (ioc->page_size == 1) {
6959 		ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
6960 		ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
6961 	}
6962 	dinitprintk(ioc,
6963 		    ioc_info(ioc, "CurrentHostPageSize(%d)\n",
6964 			     facts->CurrentHostPageSize));
6965 
6966 	dinitprintk(ioc,
6967 		    ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
6968 			     facts->RequestCredit, facts->MaxChainDepth));
6969 	dinitprintk(ioc,
6970 		    ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
6971 			     facts->IOCRequestFrameSize * 4,
6972 			     facts->ReplyFrameSize * 4));
6973 	return 0;
6974 }
6975 
6976 /**
6977  * _base_send_ioc_init - send ioc_init to firmware
6978  * @ioc: per adapter object
6979  *
6980  * Return: 0 for success, non-zero for failure.
6981  */
6982 static int
6983 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
6984 {
6985 	Mpi2IOCInitRequest_t mpi_request;
6986 	Mpi2IOCInitReply_t mpi_reply;
6987 	int i, r = 0;
6988 	ktime_t current_time;
6989 	u16 ioc_status;
6990 	u32 reply_post_free_array_sz = 0;
6991 
6992 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6993 
6994 	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
6995 	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
6996 	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
6997 	mpi_request.VF_ID = 0; /* TODO */
6998 	mpi_request.VP_ID = 0;
6999 	mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
7000 	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
7001 	mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
7002 
7003 	if (_base_is_controller_msix_enabled(ioc))
7004 		mpi_request.HostMSIxVectors = ioc->reply_queue_count;
7005 	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
7006 	mpi_request.ReplyDescriptorPostQueueDepth =
7007 	    cpu_to_le16(ioc->reply_post_queue_depth);
7008 	mpi_request.ReplyFreeQueueDepth =
7009 	    cpu_to_le16(ioc->reply_free_queue_depth);
7010 
7011 	mpi_request.SenseBufferAddressHigh =
7012 	    cpu_to_le32((u64)ioc->sense_dma >> 32);
7013 	mpi_request.SystemReplyAddressHigh =
7014 	    cpu_to_le32((u64)ioc->reply_dma >> 32);
7015 	mpi_request.SystemRequestFrameBaseAddress =
7016 	    cpu_to_le64((u64)ioc->request_dma);
7017 	mpi_request.ReplyFreeQueueAddress =
7018 	    cpu_to_le64((u64)ioc->reply_free_dma);
7019 
7020 	if (ioc->rdpq_array_enable) {
7021 		reply_post_free_array_sz = ioc->reply_queue_count *
7022 		    sizeof(Mpi2IOCInitRDPQArrayEntry);
7023 		memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
7024 		for (i = 0; i < ioc->reply_queue_count; i++)
7025 			ioc->reply_post_free_array[i].RDPQBaseAddress =
7026 			    cpu_to_le64(
7027 				(u64)ioc->reply_post[i].reply_post_free_dma);
7028 		mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
7029 		mpi_request.ReplyDescriptorPostQueueAddress =
7030 		    cpu_to_le64((u64)ioc->reply_post_free_array_dma);
7031 	} else {
7032 		mpi_request.ReplyDescriptorPostQueueAddress =
7033 		    cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
7034 	}
7035 
7036 	/*
7037 	 * Set the flag to enable CoreDump state feature in IOC firmware.
7038 	 */
7039 	mpi_request.ConfigurationFlags |=
7040 	    cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
7041 
7042 	/* This time stamp specifies number of milliseconds
7043 	 * since epoch ~ midnight January 1, 1970.
7044 	 */
7045 	current_time = ktime_get_real();
7046 	mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
7047 
7048 	if (ioc->logging_level & MPT_DEBUG_INIT) {
7049 		__le32 *mfp;
7050 		int i;
7051 
7052 		mfp = (__le32 *)&mpi_request;
7053 		ioc_info(ioc, "\toffset:data\n");
7054 		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
7055 			ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
7056 			    le32_to_cpu(mfp[i]));
7057 	}
7058 
7059 	r = _base_handshake_req_reply_wait(ioc,
7060 	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
7061 	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
7062 
7063 	if (r != 0) {
7064 		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7065 		return r;
7066 	}
7067 
7068 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7069 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
7070 	    mpi_reply.IOCLogInfo) {
7071 		ioc_err(ioc, "%s: failed\n", __func__);
7072 		r = -EIO;
7073 	}
7074 
7075 	/* Reset TimeSync Counter*/
7076 	ioc->timestamp_update_count = 0;
7077 	return r;
7078 }
7079 
7080 /**
7081  * mpt3sas_port_enable_done - command completion routine for port enable
7082  * @ioc: per adapter object
7083  * @smid: system request message index
7084  * @msix_index: MSIX table index supplied by the OS
7085  * @reply: reply message frame(lower 32bit addr)
7086  *
7087  * Return: 1 meaning mf should be freed from _base_interrupt
7088  *          0 means the mf is freed from this function.
7089  */
7090 u8
7091 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
7092 	u32 reply)
7093 {
7094 	MPI2DefaultReply_t *mpi_reply;
7095 	u16 ioc_status;
7096 
7097 	if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
7098 		return 1;
7099 
7100 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7101 	if (!mpi_reply)
7102 		return 1;
7103 
7104 	if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
7105 		return 1;
7106 
7107 	ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
7108 	ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
7109 	ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
7110 	memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
7111 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
7112 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7113 		ioc->port_enable_failed = 1;
7114 
7115 	if (ioc->is_driver_loading) {
7116 		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
7117 			mpt3sas_port_enable_complete(ioc);
7118 			return 1;
7119 		} else {
7120 			ioc->start_scan_failed = ioc_status;
7121 			ioc->start_scan = 0;
7122 			return 1;
7123 		}
7124 	}
7125 	complete(&ioc->port_enable_cmds.done);
7126 	return 1;
7127 }
7128 
7129 /**
7130  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
7131  * @ioc: per adapter object
7132  *
7133  * Return: 0 for success, non-zero for failure.
7134  */
7135 static int
7136 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
7137 {
7138 	Mpi2PortEnableRequest_t *mpi_request;
7139 	Mpi2PortEnableReply_t *mpi_reply;
7140 	int r = 0;
7141 	u16 smid;
7142 	u16 ioc_status;
7143 
7144 	ioc_info(ioc, "sending port enable !!\n");
7145 
7146 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7147 		ioc_err(ioc, "%s: internal command already in use\n", __func__);
7148 		return -EAGAIN;
7149 	}
7150 
7151 	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7152 	if (!smid) {
7153 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7154 		return -EAGAIN;
7155 	}
7156 
7157 	ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7158 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7159 	ioc->port_enable_cmds.smid = smid;
7160 	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
7161 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
7162 
7163 	init_completion(&ioc->port_enable_cmds.done);
7164 	ioc->put_smid_default(ioc, smid);
7165 	wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
7166 	if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
7167 		ioc_err(ioc, "%s: timeout\n", __func__);
7168 		_debug_dump_mf(mpi_request,
7169 		    sizeof(Mpi2PortEnableRequest_t)/4);
7170 		if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
7171 			r = -EFAULT;
7172 		else
7173 			r = -ETIME;
7174 		goto out;
7175 	}
7176 
7177 	mpi_reply = ioc->port_enable_cmds.reply;
7178 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
7179 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7180 		ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
7181 			__func__, ioc_status);
7182 		r = -EFAULT;
7183 		goto out;
7184 	}
7185 
7186  out:
7187 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7188 	ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
7189 	return r;
7190 }
7191 
7192 /**
7193  * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
7194  * @ioc: per adapter object
7195  *
7196  * Return: 0 for success, non-zero for failure.
7197  */
7198 int
7199 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
7200 {
7201 	Mpi2PortEnableRequest_t *mpi_request;
7202 	u16 smid;
7203 
7204 	ioc_info(ioc, "sending port enable !!\n");
7205 
7206 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7207 		ioc_err(ioc, "%s: internal command already in use\n", __func__);
7208 		return -EAGAIN;
7209 	}
7210 
7211 	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7212 	if (!smid) {
7213 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7214 		return -EAGAIN;
7215 	}
7216 
7217 	ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7218 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7219 	ioc->port_enable_cmds.smid = smid;
7220 	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
7221 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
7222 
7223 	ioc->put_smid_default(ioc, smid);
7224 	return 0;
7225 }
7226 
7227 /**
7228  * _base_determine_wait_on_discovery - desposition
7229  * @ioc: per adapter object
7230  *
7231  * Decide whether to wait on discovery to complete. Used to either
7232  * locate boot device, or report volumes ahead of physical devices.
7233  *
7234  * Return: 1 for wait, 0 for don't wait.
7235  */
7236 static int
7237 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
7238 {
7239 	/* We wait for discovery to complete if IR firmware is loaded.
7240 	 * The sas topology events arrive before PD events, so we need time to
7241 	 * turn on the bit in ioc->pd_handles to indicate PD
7242 	 * Also, it maybe required to report Volumes ahead of physical
7243 	 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
7244 	 */
7245 	if (ioc->ir_firmware)
7246 		return 1;
7247 
7248 	/* if no Bios, then we don't need to wait */
7249 	if (!ioc->bios_pg3.BiosVersion)
7250 		return 0;
7251 
7252 	/* Bios is present, then we drop down here.
7253 	 *
7254 	 * If there any entries in the Bios Page 2, then we wait
7255 	 * for discovery to complete.
7256 	 */
7257 
7258 	/* Current Boot Device */
7259 	if ((ioc->bios_pg2.CurrentBootDeviceForm &
7260 	    MPI2_BIOSPAGE2_FORM_MASK) ==
7261 	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
7262 	/* Request Boot Device */
7263 	   (ioc->bios_pg2.ReqBootDeviceForm &
7264 	    MPI2_BIOSPAGE2_FORM_MASK) ==
7265 	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
7266 	/* Alternate Request Boot Device */
7267 	   (ioc->bios_pg2.ReqAltBootDeviceForm &
7268 	    MPI2_BIOSPAGE2_FORM_MASK) ==
7269 	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
7270 		return 0;
7271 
7272 	return 1;
7273 }
7274 
7275 /**
7276  * _base_unmask_events - turn on notification for this event
7277  * @ioc: per adapter object
7278  * @event: firmware event
7279  *
7280  * The mask is stored in ioc->event_masks.
7281  */
7282 static void
7283 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
7284 {
7285 	u32 desired_event;
7286 
7287 	if (event >= 128)
7288 		return;
7289 
7290 	desired_event = (1 << (event % 32));
7291 
7292 	if (event < 32)
7293 		ioc->event_masks[0] &= ~desired_event;
7294 	else if (event < 64)
7295 		ioc->event_masks[1] &= ~desired_event;
7296 	else if (event < 96)
7297 		ioc->event_masks[2] &= ~desired_event;
7298 	else if (event < 128)
7299 		ioc->event_masks[3] &= ~desired_event;
7300 }
7301 
7302 /**
7303  * _base_event_notification - send event notification
7304  * @ioc: per adapter object
7305  *
7306  * Return: 0 for success, non-zero for failure.
7307  */
7308 static int
7309 _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
7310 {
7311 	Mpi2EventNotificationRequest_t *mpi_request;
7312 	u16 smid;
7313 	int r = 0;
7314 	int i;
7315 
7316 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7317 
7318 	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7319 		ioc_err(ioc, "%s: internal command already in use\n", __func__);
7320 		return -EAGAIN;
7321 	}
7322 
7323 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7324 	if (!smid) {
7325 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7326 		return -EAGAIN;
7327 	}
7328 	ioc->base_cmds.status = MPT3_CMD_PENDING;
7329 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7330 	ioc->base_cmds.smid = smid;
7331 	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
7332 	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
7333 	mpi_request->VF_ID = 0; /* TODO */
7334 	mpi_request->VP_ID = 0;
7335 	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
7336 		mpi_request->EventMasks[i] =
7337 		    cpu_to_le32(ioc->event_masks[i]);
7338 	init_completion(&ioc->base_cmds.done);
7339 	ioc->put_smid_default(ioc, smid);
7340 	wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
7341 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7342 		ioc_err(ioc, "%s: timeout\n", __func__);
7343 		_debug_dump_mf(mpi_request,
7344 		    sizeof(Mpi2EventNotificationRequest_t)/4);
7345 		if (ioc->base_cmds.status & MPT3_CMD_RESET)
7346 			r = -EFAULT;
7347 		else
7348 			r = -ETIME;
7349 	} else
7350 		dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
7351 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7352 	return r;
7353 }
7354 
7355 /**
7356  * mpt3sas_base_validate_event_type - validating event types
7357  * @ioc: per adapter object
7358  * @event_type: firmware event
7359  *
7360  * This will turn on firmware event notification when application
7361  * ask for that event. We don't mask events that are already enabled.
7362  */
7363 void
7364 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
7365 {
7366 	int i, j;
7367 	u32 event_mask, desired_event;
7368 	u8 send_update_to_fw;
7369 
7370 	for (i = 0, send_update_to_fw = 0; i <
7371 	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
7372 		event_mask = ~event_type[i];
7373 		desired_event = 1;
7374 		for (j = 0; j < 32; j++) {
7375 			if (!(event_mask & desired_event) &&
7376 			    (ioc->event_masks[i] & desired_event)) {
7377 				ioc->event_masks[i] &= ~desired_event;
7378 				send_update_to_fw = 1;
7379 			}
7380 			desired_event = (desired_event << 1);
7381 		}
7382 	}
7383 
7384 	if (!send_update_to_fw)
7385 		return;
7386 
7387 	mutex_lock(&ioc->base_cmds.mutex);
7388 	_base_event_notification(ioc);
7389 	mutex_unlock(&ioc->base_cmds.mutex);
7390 }
7391 
7392 /**
7393  * _base_diag_reset - the "big hammer" start of day reset
7394  * @ioc: per adapter object
7395  *
7396  * Return: 0 for success, non-zero for failure.
7397  */
7398 static int
7399 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
7400 {
7401 	u32 host_diagnostic;
7402 	u32 ioc_state;
7403 	u32 count;
7404 	u32 hcb_size;
7405 
7406 	ioc_info(ioc, "sending diag reset !!\n");
7407 
7408 	pci_cfg_access_lock(ioc->pdev);
7409 
7410 	drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
7411 
7412 	count = 0;
7413 	do {
7414 		/* Write magic sequence to WriteSequence register
7415 		 * Loop until in diagnostic mode
7416 		 */
7417 		drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
7418 		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7419 		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
7420 		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
7421 		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
7422 		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
7423 		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
7424 		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
7425 
7426 		/* wait 100 msec */
7427 		msleep(100);
7428 
7429 		if (count++ > 20) {
7430 			ioc_info(ioc,
7431 			    "Stop writing magic sequence after 20 retries\n");
7432 			_base_dump_reg_set(ioc);
7433 			goto out;
7434 		}
7435 
7436 		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
7437 		drsprintk(ioc,
7438 			  ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
7439 				   count, host_diagnostic));
7440 
7441 	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
7442 
7443 	hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
7444 
7445 	drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
7446 	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
7447 	     &ioc->chip->HostDiagnostic);
7448 
7449 	/*This delay allows the chip PCIe hardware time to finish reset tasks*/
7450 	msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
7451 
7452 	/* Approximately 300 second max wait */
7453 	for (count = 0; count < (300000000 /
7454 		MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
7455 
7456 		host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
7457 
7458 		if (host_diagnostic == 0xFFFFFFFF) {
7459 			ioc_info(ioc,
7460 			    "Invalid host diagnostic register value\n");
7461 			_base_dump_reg_set(ioc);
7462 			goto out;
7463 		}
7464 		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
7465 			break;
7466 
7467 		msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
7468 	}
7469 
7470 	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
7471 
7472 		drsprintk(ioc,
7473 			  ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
7474 		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
7475 		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
7476 		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
7477 
7478 		drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
7479 		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
7480 		    &ioc->chip->HCBSize);
7481 	}
7482 
7483 	drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
7484 	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
7485 	    &ioc->chip->HostDiagnostic);
7486 
7487 	drsprintk(ioc,
7488 		  ioc_info(ioc, "disable writes to the diagnostic register\n"));
7489 	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7490 
7491 	drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
7492 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
7493 	if (ioc_state) {
7494 		ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
7495 			__func__, ioc_state);
7496 		_base_dump_reg_set(ioc);
7497 		goto out;
7498 	}
7499 
7500 	pci_cfg_access_unlock(ioc->pdev);
7501 	ioc_info(ioc, "diag reset: SUCCESS\n");
7502 	return 0;
7503 
7504  out:
7505 	pci_cfg_access_unlock(ioc->pdev);
7506 	ioc_err(ioc, "diag reset: FAILED\n");
7507 	return -EFAULT;
7508 }
7509 
7510 /**
7511  * _base_make_ioc_ready - put controller in READY state
7512  * @ioc: per adapter object
7513  * @type: FORCE_BIG_HAMMER or SOFT_RESET
7514  *
7515  * Return: 0 for success, non-zero for failure.
7516  */
7517 static int
7518 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
7519 {
7520 	u32 ioc_state;
7521 	int rc;
7522 	int count;
7523 
7524 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7525 
7526 	if (ioc->pci_error_recovery)
7527 		return 0;
7528 
7529 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7530 	dhsprintk(ioc,
7531 		  ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
7532 			   __func__, ioc_state));
7533 
7534 	/* if in RESET state, it should move to READY state shortly */
7535 	count = 0;
7536 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
7537 		while ((ioc_state & MPI2_IOC_STATE_MASK) !=
7538 		    MPI2_IOC_STATE_READY) {
7539 			if (count++ == 10) {
7540 				ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
7541 					__func__, ioc_state);
7542 				return -EFAULT;
7543 			}
7544 			ssleep(1);
7545 			ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7546 		}
7547 	}
7548 
7549 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
7550 		return 0;
7551 
7552 	if (ioc_state & MPI2_DOORBELL_USED) {
7553 		ioc_info(ioc, "unexpected doorbell active!\n");
7554 		goto issue_diag_reset;
7555 	}
7556 
7557 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
7558 		mpt3sas_print_fault_code(ioc, ioc_state &
7559 		    MPI2_DOORBELL_DATA_MASK);
7560 		goto issue_diag_reset;
7561 	}
7562 
7563 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
7564 		/*
7565 		 * if host reset is invoked while watch dog thread is waiting
7566 		 * for IOC state to be changed to Fault state then driver has
7567 		 * to wait here for CoreDump state to clear otherwise reset
7568 		 * will be issued to the FW and FW move the IOC state to
7569 		 * reset state without copying the FW logs to coredump region.
7570 		 */
7571 		if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
7572 			mpt3sas_print_coredump_info(ioc, ioc_state &
7573 			    MPI2_DOORBELL_DATA_MASK);
7574 			mpt3sas_base_wait_for_coredump_completion(ioc,
7575 			    __func__);
7576 		}
7577 		goto issue_diag_reset;
7578 	}
7579 
7580 	if (type == FORCE_BIG_HAMMER)
7581 		goto issue_diag_reset;
7582 
7583 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
7584 		if (!(_base_send_ioc_reset(ioc,
7585 		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
7586 			return 0;
7587 	}
7588 
7589  issue_diag_reset:
7590 	rc = _base_diag_reset(ioc);
7591 	return rc;
7592 }
7593 
7594 /**
7595  * _base_make_ioc_operational - put controller in OPERATIONAL state
7596  * @ioc: per adapter object
7597  *
7598  * Return: 0 for success, non-zero for failure.
7599  */
7600 static int
7601 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
7602 {
7603 	int r, i, index, rc;
7604 	unsigned long	flags;
7605 	u32 reply_address;
7606 	u16 smid;
7607 	struct _tr_list *delayed_tr, *delayed_tr_next;
7608 	struct _sc_list *delayed_sc, *delayed_sc_next;
7609 	struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
7610 	u8 hide_flag;
7611 	struct adapter_reply_queue *reply_q;
7612 	Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
7613 
7614 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7615 
7616 	/* clean the delayed target reset list */
7617 	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7618 	    &ioc->delayed_tr_list, list) {
7619 		list_del(&delayed_tr->list);
7620 		kfree(delayed_tr);
7621 	}
7622 
7623 
7624 	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
7625 	    &ioc->delayed_tr_volume_list, list) {
7626 		list_del(&delayed_tr->list);
7627 		kfree(delayed_tr);
7628 	}
7629 
7630 	list_for_each_entry_safe(delayed_sc, delayed_sc_next,
7631 	    &ioc->delayed_sc_list, list) {
7632 		list_del(&delayed_sc->list);
7633 		kfree(delayed_sc);
7634 	}
7635 
7636 	list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
7637 	    &ioc->delayed_event_ack_list, list) {
7638 		list_del(&delayed_event_ack->list);
7639 		kfree(delayed_event_ack);
7640 	}
7641 
7642 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7643 
7644 	/* hi-priority queue */
7645 	INIT_LIST_HEAD(&ioc->hpr_free_list);
7646 	smid = ioc->hi_priority_smid;
7647 	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
7648 		ioc->hpr_lookup[i].cb_idx = 0xFF;
7649 		ioc->hpr_lookup[i].smid = smid;
7650 		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
7651 		    &ioc->hpr_free_list);
7652 	}
7653 
7654 	/* internal queue */
7655 	INIT_LIST_HEAD(&ioc->internal_free_list);
7656 	smid = ioc->internal_smid;
7657 	for (i = 0; i < ioc->internal_depth; i++, smid++) {
7658 		ioc->internal_lookup[i].cb_idx = 0xFF;
7659 		ioc->internal_lookup[i].smid = smid;
7660 		list_add_tail(&ioc->internal_lookup[i].tracker_list,
7661 		    &ioc->internal_free_list);
7662 	}
7663 
7664 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7665 
7666 	/* initialize Reply Free Queue */
7667 	for (i = 0, reply_address = (u32)ioc->reply_dma ;
7668 	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
7669 	    ioc->reply_sz) {
7670 		ioc->reply_free[i] = cpu_to_le32(reply_address);
7671 		if (ioc->is_mcpu_endpoint)
7672 			_base_clone_reply_to_sys_mem(ioc,
7673 					reply_address, i);
7674 	}
7675 
7676 	/* initialize reply queues */
7677 	if (ioc->is_driver_loading)
7678 		_base_assign_reply_queues(ioc);
7679 
7680 	/* initialize Reply Post Free Queue */
7681 	index = 0;
7682 	reply_post_free_contig = ioc->reply_post[0].reply_post_free;
7683 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7684 		/*
7685 		 * If RDPQ is enabled, switch to the next allocation.
7686 		 * Otherwise advance within the contiguous region.
7687 		 */
7688 		if (ioc->rdpq_array_enable) {
7689 			reply_q->reply_post_free =
7690 				ioc->reply_post[index++].reply_post_free;
7691 		} else {
7692 			reply_q->reply_post_free = reply_post_free_contig;
7693 			reply_post_free_contig += ioc->reply_post_queue_depth;
7694 		}
7695 
7696 		reply_q->reply_post_host_index = 0;
7697 		for (i = 0; i < ioc->reply_post_queue_depth; i++)
7698 			reply_q->reply_post_free[i].Words =
7699 			    cpu_to_le64(ULLONG_MAX);
7700 		if (!_base_is_controller_msix_enabled(ioc))
7701 			goto skip_init_reply_post_free_queue;
7702 	}
7703  skip_init_reply_post_free_queue:
7704 
7705 	r = _base_send_ioc_init(ioc);
7706 	if (r) {
7707 		/*
7708 		 * No need to check IOC state for fault state & issue
7709 		 * diag reset during host reset. This check is need
7710 		 * only during driver load time.
7711 		 */
7712 		if (!ioc->is_driver_loading)
7713 			return r;
7714 
7715 		rc = _base_check_for_fault_and_issue_reset(ioc);
7716 		if (rc || (_base_send_ioc_init(ioc)))
7717 			return r;
7718 	}
7719 
7720 	/* initialize reply free host index */
7721 	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
7722 	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
7723 
7724 	/* initialize reply post host index */
7725 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
7726 		if (ioc->combined_reply_queue)
7727 			writel((reply_q->msix_index & 7)<<
7728 			   MPI2_RPHI_MSIX_INDEX_SHIFT,
7729 			   ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
7730 		else
7731 			writel(reply_q->msix_index <<
7732 				MPI2_RPHI_MSIX_INDEX_SHIFT,
7733 				&ioc->chip->ReplyPostHostIndex);
7734 
7735 		if (!_base_is_controller_msix_enabled(ioc))
7736 			goto skip_init_reply_post_host_index;
7737 	}
7738 
7739  skip_init_reply_post_host_index:
7740 
7741 	mpt3sas_base_unmask_interrupts(ioc);
7742 
7743 	if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7744 		r = _base_display_fwpkg_version(ioc);
7745 		if (r)
7746 			return r;
7747 	}
7748 
7749 	_base_static_config_pages(ioc);
7750 	r = _base_event_notification(ioc);
7751 	if (r)
7752 		return r;
7753 
7754 	if (ioc->is_driver_loading) {
7755 
7756 		if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
7757 		    == 0x80) {
7758 			hide_flag = (u8) (
7759 			    le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
7760 			    MFG_PAGE10_HIDE_SSDS_MASK);
7761 			if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
7762 				ioc->mfg_pg10_hide_flag = hide_flag;
7763 		}
7764 
7765 		ioc->wait_for_discovery_to_complete =
7766 		    _base_determine_wait_on_discovery(ioc);
7767 
7768 		return r; /* scan_start and scan_finished support */
7769 	}
7770 
7771 	r = _base_send_port_enable(ioc);
7772 	if (r)
7773 		return r;
7774 
7775 	return r;
7776 }
7777 
7778 /**
7779  * mpt3sas_base_free_resources - free resources controller resources
7780  * @ioc: per adapter object
7781  */
7782 void
7783 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
7784 {
7785 	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7786 
7787 	/* synchronizing freeing resource with pci_access_mutex lock */
7788 	mutex_lock(&ioc->pci_access_mutex);
7789 	if (ioc->chip_phys && ioc->chip) {
7790 		mpt3sas_base_mask_interrupts(ioc);
7791 		ioc->shost_recovery = 1;
7792 		_base_make_ioc_ready(ioc, SOFT_RESET);
7793 		ioc->shost_recovery = 0;
7794 	}
7795 
7796 	mpt3sas_base_unmap_resources(ioc);
7797 	mutex_unlock(&ioc->pci_access_mutex);
7798 	return;
7799 }
7800 
7801 /**
7802  * mpt3sas_base_attach - attach controller instance
7803  * @ioc: per adapter object
7804  *
7805  * Return: 0 for success, non-zero for failure.
7806  */
7807 int
7808 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
7809 {
7810 	int r, i, rc;
7811 	int cpu_id, last_cpu_id = 0;
7812 
7813 	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7814 
7815 	/* setup cpu_msix_table */
7816 	ioc->cpu_count = num_online_cpus();
7817 	for_each_online_cpu(cpu_id)
7818 		last_cpu_id = cpu_id;
7819 	ioc->cpu_msix_table_sz = last_cpu_id + 1;
7820 	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
7821 	ioc->reply_queue_count = 1;
7822 	if (!ioc->cpu_msix_table) {
7823 		ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
7824 		r = -ENOMEM;
7825 		goto out_free_resources;
7826 	}
7827 
7828 	if (ioc->is_warpdrive) {
7829 		ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
7830 		    sizeof(resource_size_t *), GFP_KERNEL);
7831 		if (!ioc->reply_post_host_index) {
7832 			ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
7833 			r = -ENOMEM;
7834 			goto out_free_resources;
7835 		}
7836 	}
7837 
7838 	ioc->smp_affinity_enable = smp_affinity_enable;
7839 
7840 	ioc->rdpq_array_enable_assigned = 0;
7841 	ioc->use_32bit_dma = false;
7842 	ioc->dma_mask = 64;
7843 	if (ioc->is_aero_ioc)
7844 		ioc->base_readl = &_base_readl_aero;
7845 	else
7846 		ioc->base_readl = &_base_readl;
7847 	r = mpt3sas_base_map_resources(ioc);
7848 	if (r)
7849 		goto out_free_resources;
7850 
7851 	pci_set_drvdata(ioc->pdev, ioc->shost);
7852 	r = _base_get_ioc_facts(ioc);
7853 	if (r) {
7854 		rc = _base_check_for_fault_and_issue_reset(ioc);
7855 		if (rc || (_base_get_ioc_facts(ioc)))
7856 			goto out_free_resources;
7857 	}
7858 
7859 	switch (ioc->hba_mpi_version_belonged) {
7860 	case MPI2_VERSION:
7861 		ioc->build_sg_scmd = &_base_build_sg_scmd;
7862 		ioc->build_sg = &_base_build_sg;
7863 		ioc->build_zero_len_sge = &_base_build_zero_len_sge;
7864 		ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7865 		break;
7866 	case MPI25_VERSION:
7867 	case MPI26_VERSION:
7868 		/*
7869 		 * In SAS3.0,
7870 		 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
7871 		 * Target Status - all require the IEEE formated scatter gather
7872 		 * elements.
7873 		 */
7874 		ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
7875 		ioc->build_sg = &_base_build_sg_ieee;
7876 		ioc->build_nvme_prp = &_base_build_nvme_prp;
7877 		ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
7878 		ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
7879 		if (ioc->high_iops_queues)
7880 			ioc->get_msix_index_for_smlio =
7881 					&_base_get_high_iops_msix_index;
7882 		else
7883 			ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7884 		break;
7885 	}
7886 	if (ioc->atomic_desc_capable) {
7887 		ioc->put_smid_default = &_base_put_smid_default_atomic;
7888 		ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
7889 		ioc->put_smid_fast_path =
7890 				&_base_put_smid_fast_path_atomic;
7891 		ioc->put_smid_hi_priority =
7892 				&_base_put_smid_hi_priority_atomic;
7893 	} else {
7894 		ioc->put_smid_default = &_base_put_smid_default;
7895 		ioc->put_smid_fast_path = &_base_put_smid_fast_path;
7896 		ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
7897 		if (ioc->is_mcpu_endpoint)
7898 			ioc->put_smid_scsi_io =
7899 				&_base_put_smid_mpi_ep_scsi_io;
7900 		else
7901 			ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
7902 	}
7903 	/*
7904 	 * These function pointers for other requests that don't
7905 	 * the require IEEE scatter gather elements.
7906 	 *
7907 	 * For example Configuration Pages and SAS IOUNIT Control don't.
7908 	 */
7909 	ioc->build_sg_mpi = &_base_build_sg;
7910 	ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
7911 
7912 	r = _base_make_ioc_ready(ioc, SOFT_RESET);
7913 	if (r)
7914 		goto out_free_resources;
7915 
7916 	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
7917 	    sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
7918 	if (!ioc->pfacts) {
7919 		r = -ENOMEM;
7920 		goto out_free_resources;
7921 	}
7922 
7923 	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
7924 		r = _base_get_port_facts(ioc, i);
7925 		if (r) {
7926 			rc = _base_check_for_fault_and_issue_reset(ioc);
7927 			if (rc || (_base_get_port_facts(ioc, i)))
7928 				goto out_free_resources;
7929 		}
7930 	}
7931 
7932 	r = _base_allocate_memory_pools(ioc);
7933 	if (r)
7934 		goto out_free_resources;
7935 
7936 	if (irqpoll_weight > 0)
7937 		ioc->thresh_hold = irqpoll_weight;
7938 	else
7939 		ioc->thresh_hold = ioc->hba_queue_depth/4;
7940 
7941 	_base_init_irqpolls(ioc);
7942 	init_waitqueue_head(&ioc->reset_wq);
7943 
7944 	/* allocate memory pd handle bitmask list */
7945 	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7946 	if (ioc->facts.MaxDevHandle % 8)
7947 		ioc->pd_handles_sz++;
7948 	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
7949 	    GFP_KERNEL);
7950 	if (!ioc->pd_handles) {
7951 		r = -ENOMEM;
7952 		goto out_free_resources;
7953 	}
7954 	ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
7955 	    GFP_KERNEL);
7956 	if (!ioc->blocking_handles) {
7957 		r = -ENOMEM;
7958 		goto out_free_resources;
7959 	}
7960 
7961 	/* allocate memory for pending OS device add list */
7962 	ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
7963 	if (ioc->facts.MaxDevHandle % 8)
7964 		ioc->pend_os_device_add_sz++;
7965 	ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
7966 	    GFP_KERNEL);
7967 	if (!ioc->pend_os_device_add) {
7968 		r = -ENOMEM;
7969 		goto out_free_resources;
7970 	}
7971 
7972 	ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
7973 	ioc->device_remove_in_progress =
7974 		kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
7975 	if (!ioc->device_remove_in_progress) {
7976 		r = -ENOMEM;
7977 		goto out_free_resources;
7978 	}
7979 
7980 	ioc->fwfault_debug = mpt3sas_fwfault_debug;
7981 
7982 	/* base internal command bits */
7983 	mutex_init(&ioc->base_cmds.mutex);
7984 	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7985 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7986 
7987 	/* port_enable command bits */
7988 	ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7989 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7990 
7991 	/* transport internal command bits */
7992 	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7993 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
7994 	mutex_init(&ioc->transport_cmds.mutex);
7995 
7996 	/* scsih internal command bits */
7997 	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7998 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7999 	mutex_init(&ioc->scsih_cmds.mutex);
8000 
8001 	/* task management internal command bits */
8002 	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8003 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
8004 	mutex_init(&ioc->tm_cmds.mutex);
8005 
8006 	/* config page internal command bits */
8007 	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8008 	ioc->config_cmds.status = MPT3_CMD_NOT_USED;
8009 	mutex_init(&ioc->config_cmds.mutex);
8010 
8011 	/* ctl module internal command bits */
8012 	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8013 	ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
8014 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
8015 	mutex_init(&ioc->ctl_cmds.mutex);
8016 
8017 	if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
8018 	    !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
8019 	    !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
8020 	    !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
8021 		r = -ENOMEM;
8022 		goto out_free_resources;
8023 	}
8024 
8025 	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
8026 		ioc->event_masks[i] = -1;
8027 
8028 	/* here we enable the events we care about */
8029 	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
8030 	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
8031 	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
8032 	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
8033 	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
8034 	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
8035 	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
8036 	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
8037 	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
8038 	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
8039 	_base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
8040 	_base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
8041 	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
8042 	if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
8043 		if (ioc->is_gen35_ioc) {
8044 			_base_unmask_events(ioc,
8045 				MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
8046 			_base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
8047 			_base_unmask_events(ioc,
8048 				MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
8049 		}
8050 	}
8051 	r = _base_make_ioc_operational(ioc);
8052 	if (r)
8053 		goto out_free_resources;
8054 
8055 	/*
8056 	 * Copy current copy of IOCFacts in prev_fw_facts
8057 	 * and it will be used during online firmware upgrade.
8058 	 */
8059 	memcpy(&ioc->prev_fw_facts, &ioc->facts,
8060 	    sizeof(struct mpt3sas_facts));
8061 
8062 	ioc->non_operational_loop = 0;
8063 	ioc->ioc_coredump_loop = 0;
8064 	ioc->got_task_abort_from_ioctl = 0;
8065 	return 0;
8066 
8067  out_free_resources:
8068 
8069 	ioc->remove_host = 1;
8070 
8071 	mpt3sas_base_free_resources(ioc);
8072 	_base_release_memory_pools(ioc);
8073 	pci_set_drvdata(ioc->pdev, NULL);
8074 	kfree(ioc->cpu_msix_table);
8075 	if (ioc->is_warpdrive)
8076 		kfree(ioc->reply_post_host_index);
8077 	kfree(ioc->pd_handles);
8078 	kfree(ioc->blocking_handles);
8079 	kfree(ioc->device_remove_in_progress);
8080 	kfree(ioc->pend_os_device_add);
8081 	kfree(ioc->tm_cmds.reply);
8082 	kfree(ioc->transport_cmds.reply);
8083 	kfree(ioc->scsih_cmds.reply);
8084 	kfree(ioc->config_cmds.reply);
8085 	kfree(ioc->base_cmds.reply);
8086 	kfree(ioc->port_enable_cmds.reply);
8087 	kfree(ioc->ctl_cmds.reply);
8088 	kfree(ioc->ctl_cmds.sense);
8089 	kfree(ioc->pfacts);
8090 	ioc->ctl_cmds.reply = NULL;
8091 	ioc->base_cmds.reply = NULL;
8092 	ioc->tm_cmds.reply = NULL;
8093 	ioc->scsih_cmds.reply = NULL;
8094 	ioc->transport_cmds.reply = NULL;
8095 	ioc->config_cmds.reply = NULL;
8096 	ioc->pfacts = NULL;
8097 	return r;
8098 }
8099 
8100 
8101 /**
8102  * mpt3sas_base_detach - remove controller instance
8103  * @ioc: per adapter object
8104  */
8105 void
8106 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
8107 {
8108 	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8109 
8110 	mpt3sas_base_stop_watchdog(ioc);
8111 	mpt3sas_base_free_resources(ioc);
8112 	_base_release_memory_pools(ioc);
8113 	mpt3sas_free_enclosure_list(ioc);
8114 	pci_set_drvdata(ioc->pdev, NULL);
8115 	kfree(ioc->cpu_msix_table);
8116 	if (ioc->is_warpdrive)
8117 		kfree(ioc->reply_post_host_index);
8118 	kfree(ioc->pd_handles);
8119 	kfree(ioc->blocking_handles);
8120 	kfree(ioc->device_remove_in_progress);
8121 	kfree(ioc->pend_os_device_add);
8122 	kfree(ioc->pfacts);
8123 	kfree(ioc->ctl_cmds.reply);
8124 	kfree(ioc->ctl_cmds.sense);
8125 	kfree(ioc->base_cmds.reply);
8126 	kfree(ioc->port_enable_cmds.reply);
8127 	kfree(ioc->tm_cmds.reply);
8128 	kfree(ioc->transport_cmds.reply);
8129 	kfree(ioc->scsih_cmds.reply);
8130 	kfree(ioc->config_cmds.reply);
8131 }
8132 
8133 /**
8134  * _base_pre_reset_handler - pre reset handler
8135  * @ioc: per adapter object
8136  */
8137 static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
8138 {
8139 	mpt3sas_scsih_pre_reset_handler(ioc);
8140 	mpt3sas_ctl_pre_reset_handler(ioc);
8141 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
8142 }
8143 
8144 /**
8145  * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
8146  * @ioc: per adapter object
8147  */
8148 static void
8149 _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
8150 {
8151 	dtmprintk(ioc,
8152 	    ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
8153 	if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
8154 		ioc->transport_cmds.status |= MPT3_CMD_RESET;
8155 		mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
8156 		complete(&ioc->transport_cmds.done);
8157 	}
8158 	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
8159 		ioc->base_cmds.status |= MPT3_CMD_RESET;
8160 		mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
8161 		complete(&ioc->base_cmds.done);
8162 	}
8163 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
8164 		ioc->port_enable_failed = 1;
8165 		ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
8166 		mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
8167 		if (ioc->is_driver_loading) {
8168 			ioc->start_scan_failed =
8169 				MPI2_IOCSTATUS_INTERNAL_ERROR;
8170 			ioc->start_scan = 0;
8171 			ioc->port_enable_cmds.status =
8172 				MPT3_CMD_NOT_USED;
8173 		} else {
8174 			complete(&ioc->port_enable_cmds.done);
8175 		}
8176 	}
8177 	if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
8178 		ioc->config_cmds.status |= MPT3_CMD_RESET;
8179 		mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
8180 		ioc->config_cmds.smid = USHRT_MAX;
8181 		complete(&ioc->config_cmds.done);
8182 	}
8183 }
8184 
8185 /**
8186  * _base_clear_outstanding_commands - clear all outstanding commands
8187  * @ioc: per adapter object
8188  */
8189 static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
8190 {
8191 	mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
8192 	mpt3sas_ctl_clear_outstanding_ioctls(ioc);
8193 	_base_clear_outstanding_mpt_commands(ioc);
8194 }
8195 
8196 /**
8197  * _base_reset_done_handler - reset done handler
8198  * @ioc: per adapter object
8199  */
8200 static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
8201 {
8202 	mpt3sas_scsih_reset_done_handler(ioc);
8203 	mpt3sas_ctl_reset_done_handler(ioc);
8204 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
8205 }
8206 
8207 /**
8208  * mpt3sas_wait_for_commands_to_complete - reset controller
8209  * @ioc: Pointer to MPT_ADAPTER structure
8210  *
8211  * This function is waiting 10s for all pending commands to complete
8212  * prior to putting controller in reset.
8213  */
8214 void
8215 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
8216 {
8217 	u32 ioc_state;
8218 
8219 	ioc->pending_io_count = 0;
8220 
8221 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8222 	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
8223 		return;
8224 
8225 	/* pending command count */
8226 	ioc->pending_io_count = scsi_host_busy(ioc->shost);
8227 
8228 	if (!ioc->pending_io_count)
8229 		return;
8230 
8231 	/* wait for pending commands to complete */
8232 	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
8233 }
8234 
8235 /**
8236  * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
8237  *     attributes during online firmware upgrade and update the corresponding
8238  *     IOC variables accordingly.
8239  *
8240  * @ioc: Pointer to MPT_ADAPTER structure
8241  */
8242 static int
8243 _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
8244 {
8245 	u16 pd_handles_sz;
8246 	void *pd_handles = NULL, *blocking_handles = NULL;
8247 	void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
8248 	struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
8249 
8250 	if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
8251 		pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8252 		if (ioc->facts.MaxDevHandle % 8)
8253 			pd_handles_sz++;
8254 
8255 		pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
8256 		    GFP_KERNEL);
8257 		if (!pd_handles) {
8258 			ioc_info(ioc,
8259 			    "Unable to allocate the memory for pd_handles of sz: %d\n",
8260 			    pd_handles_sz);
8261 			return -ENOMEM;
8262 		}
8263 		memset(pd_handles + ioc->pd_handles_sz, 0,
8264 		    (pd_handles_sz - ioc->pd_handles_sz));
8265 		ioc->pd_handles = pd_handles;
8266 
8267 		blocking_handles = krealloc(ioc->blocking_handles,
8268 		    pd_handles_sz, GFP_KERNEL);
8269 		if (!blocking_handles) {
8270 			ioc_info(ioc,
8271 			    "Unable to allocate the memory for "
8272 			    "blocking_handles of sz: %d\n",
8273 			    pd_handles_sz);
8274 			return -ENOMEM;
8275 		}
8276 		memset(blocking_handles + ioc->pd_handles_sz, 0,
8277 		    (pd_handles_sz - ioc->pd_handles_sz));
8278 		ioc->blocking_handles = blocking_handles;
8279 		ioc->pd_handles_sz = pd_handles_sz;
8280 
8281 		pend_os_device_add = krealloc(ioc->pend_os_device_add,
8282 		    pd_handles_sz, GFP_KERNEL);
8283 		if (!pend_os_device_add) {
8284 			ioc_info(ioc,
8285 			    "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
8286 			    pd_handles_sz);
8287 			return -ENOMEM;
8288 		}
8289 		memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
8290 		    (pd_handles_sz - ioc->pend_os_device_add_sz));
8291 		ioc->pend_os_device_add = pend_os_device_add;
8292 		ioc->pend_os_device_add_sz = pd_handles_sz;
8293 
8294 		device_remove_in_progress = krealloc(
8295 		    ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
8296 		if (!device_remove_in_progress) {
8297 			ioc_info(ioc,
8298 			    "Unable to allocate the memory for "
8299 			    "device_remove_in_progress of sz: %d\n "
8300 			    , pd_handles_sz);
8301 			return -ENOMEM;
8302 		}
8303 		memset(device_remove_in_progress +
8304 		    ioc->device_remove_in_progress_sz, 0,
8305 		    (pd_handles_sz - ioc->device_remove_in_progress_sz));
8306 		ioc->device_remove_in_progress = device_remove_in_progress;
8307 		ioc->device_remove_in_progress_sz = pd_handles_sz;
8308 	}
8309 
8310 	memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
8311 	return 0;
8312 }
8313 
8314 /**
8315  * mpt3sas_base_hard_reset_handler - reset controller
8316  * @ioc: Pointer to MPT_ADAPTER structure
8317  * @type: FORCE_BIG_HAMMER or SOFT_RESET
8318  *
8319  * Return: 0 for success, non-zero for failure.
8320  */
8321 int
8322 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
8323 	enum reset_type type)
8324 {
8325 	int r;
8326 	unsigned long flags;
8327 	u32 ioc_state;
8328 	u8 is_fault = 0, is_trigger = 0;
8329 
8330 	dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
8331 
8332 	if (ioc->pci_error_recovery) {
8333 		ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
8334 		r = 0;
8335 		goto out_unlocked;
8336 	}
8337 
8338 	if (mpt3sas_fwfault_debug)
8339 		mpt3sas_halt_firmware(ioc);
8340 
8341 	/* wait for an active reset in progress to complete */
8342 	mutex_lock(&ioc->reset_in_progress_mutex);
8343 
8344 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8345 	ioc->shost_recovery = 1;
8346 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8347 
8348 	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8349 	    MPT3_DIAG_BUFFER_IS_REGISTERED) &&
8350 	    (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8351 	    MPT3_DIAG_BUFFER_IS_RELEASED))) {
8352 		is_trigger = 1;
8353 		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8354 		if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
8355 		    (ioc_state & MPI2_IOC_STATE_MASK) ==
8356 		    MPI2_IOC_STATE_COREDUMP) {
8357 			is_fault = 1;
8358 			ioc->htb_rel.trigger_info_dwords[1] =
8359 			    (ioc_state & MPI2_DOORBELL_DATA_MASK);
8360 		}
8361 	}
8362 	_base_pre_reset_handler(ioc);
8363 	mpt3sas_wait_for_commands_to_complete(ioc);
8364 	mpt3sas_base_mask_interrupts(ioc);
8365 	r = _base_make_ioc_ready(ioc, type);
8366 	if (r)
8367 		goto out;
8368 	_base_clear_outstanding_commands(ioc);
8369 
8370 	/* If this hard reset is called while port enable is active, then
8371 	 * there is no reason to call make_ioc_operational
8372 	 */
8373 	if (ioc->is_driver_loading && ioc->port_enable_failed) {
8374 		ioc->remove_host = 1;
8375 		r = -EFAULT;
8376 		goto out;
8377 	}
8378 	r = _base_get_ioc_facts(ioc);
8379 	if (r)
8380 		goto out;
8381 
8382 	r = _base_check_ioc_facts_changes(ioc);
8383 	if (r) {
8384 		ioc_info(ioc,
8385 		    "Some of the parameters got changed in this new firmware"
8386 		    " image and it requires system reboot\n");
8387 		goto out;
8388 	}
8389 	if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
8390 		panic("%s: Issue occurred with flashing controller firmware."
8391 		      "Please reboot the system and ensure that the correct"
8392 		      " firmware version is running\n", ioc->name);
8393 
8394 	r = _base_make_ioc_operational(ioc);
8395 	if (!r)
8396 		_base_reset_done_handler(ioc);
8397 
8398  out:
8399 	ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
8400 
8401 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8402 	ioc->shost_recovery = 0;
8403 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8404 	ioc->ioc_reset_count++;
8405 	mutex_unlock(&ioc->reset_in_progress_mutex);
8406 
8407  out_unlocked:
8408 	if ((r == 0) && is_trigger) {
8409 		if (is_fault)
8410 			mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
8411 		else
8412 			mpt3sas_trigger_master(ioc,
8413 			    MASTER_TRIGGER_ADAPTER_RESET);
8414 	}
8415 	dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
8416 	return r;
8417 }
8418