1 /*
2  * This is the Fusion MPT base driver providing common API layer interface
3  * for access to MPT (Message Passing Technology) firmware.
4  *
5  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6  * Copyright (C) 2012-2013  LSI Corporation
7  *  (mailto:DL-MPTFusionLinux@lsi.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/kernel.h>
46 #include <linux/module.h>
47 #include <linux/errno.h>
48 #include <linux/init.h>
49 #include <linux/slab.h>
50 #include <linux/types.h>
51 #include <linux/pci.h>
52 #include <linux/kdev_t.h>
53 #include <linux/blkdev.h>
54 #include <linux/delay.h>
55 #include <linux/interrupt.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/io.h>
58 #include <linux/time.h>
59 #include <linux/kthread.h>
60 #include <linux/aer.h>
61 
62 
63 #include "mpt3sas_base.h"
64 
65 static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
66 
67 
68 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
69 
70  /* maximum controller queue depth */
71 #define MAX_HBA_QUEUE_DEPTH	30000
72 #define MAX_CHAIN_DEPTH		100000
73 static int max_queue_depth = -1;
74 module_param(max_queue_depth, int, 0);
75 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
76 
77 static int max_sgl_entries = -1;
78 module_param(max_sgl_entries, int, 0);
79 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
80 
81 static int msix_disable = -1;
82 module_param(msix_disable, int, 0);
83 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
84 
85 static int max_msix_vectors = 8;
86 module_param(max_msix_vectors, int, 0);
87 MODULE_PARM_DESC(max_msix_vectors,
88 	" max msix vectors - (default=8)");
89 
90 static int mpt3sas_fwfault_debug;
91 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
92 	" enable detection of firmware fault and halt firmware - (default=0)");
93 
94 
95 /**
96  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
97  *
98  */
99 static int
100 _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
101 {
102 	int ret = param_set_int(val, kp);
103 	struct MPT3SAS_ADAPTER *ioc;
104 
105 	if (ret)
106 		return ret;
107 
108 	pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
109 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
110 		ioc->fwfault_debug = mpt3sas_fwfault_debug;
111 	return 0;
112 }
113 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
114 	param_get_int, &mpt3sas_fwfault_debug, 0644);
115 
116 /**
117  *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
118  * @arg: input argument, used to derive ioc
119  *
120  * Return 0 if controller is removed from pci subsystem.
121  * Return -1 for other case.
122  */
123 static int mpt3sas_remove_dead_ioc_func(void *arg)
124 {
125 	struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
126 	struct pci_dev *pdev;
127 
128 	if ((ioc == NULL))
129 		return -1;
130 
131 	pdev = ioc->pdev;
132 	if ((pdev == NULL))
133 		return -1;
134 	pci_stop_and_remove_bus_device(pdev);
135 	return 0;
136 }
137 
138 /**
139  * _base_fault_reset_work - workq handling ioc fault conditions
140  * @work: input argument, used to derive ioc
141  * Context: sleep.
142  *
143  * Return nothing.
144  */
145 static void
146 _base_fault_reset_work(struct work_struct *work)
147 {
148 	struct MPT3SAS_ADAPTER *ioc =
149 	    container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
150 	unsigned long	 flags;
151 	u32 doorbell;
152 	int rc;
153 	struct task_struct *p;
154 
155 
156 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
157 	if (ioc->shost_recovery)
158 		goto rearm_timer;
159 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
160 
161 	doorbell = mpt3sas_base_get_iocstate(ioc, 0);
162 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
163 		pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
164 		    ioc->name);
165 
166 		/*
167 		 * Call _scsih_flush_pending_cmds callback so that we flush all
168 		 * pending commands back to OS. This call is required to aovid
169 		 * deadlock at block layer. Dead IOC will fail to do diag reset,
170 		 * and this call is safe since dead ioc will never return any
171 		 * command back from HW.
172 		 */
173 		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
174 		/*
175 		 * Set remove_host flag early since kernel thread will
176 		 * take some time to execute.
177 		 */
178 		ioc->remove_host = 1;
179 		/*Remove the Dead Host */
180 		p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
181 		    "mpt3sas_dead_ioc_%d", ioc->id);
182 		if (IS_ERR(p))
183 			pr_err(MPT3SAS_FMT
184 			"%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
185 			ioc->name, __func__);
186 		else
187 			pr_err(MPT3SAS_FMT
188 			"%s: Running mpt3sas_dead_ioc thread success !!!!\n",
189 			ioc->name, __func__);
190 		return; /* don't rearm timer */
191 	}
192 
193 	if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
194 		rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
195 		    FORCE_BIG_HAMMER);
196 		pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
197 		    __func__, (rc == 0) ? "success" : "failed");
198 		doorbell = mpt3sas_base_get_iocstate(ioc, 0);
199 		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
200 			mpt3sas_base_fault_info(ioc, doorbell &
201 			    MPI2_DOORBELL_DATA_MASK);
202 		if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
203 		    MPI2_IOC_STATE_OPERATIONAL)
204 			return; /* don't rearm timer */
205 	}
206 
207 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
208  rearm_timer:
209 	if (ioc->fault_reset_work_q)
210 		queue_delayed_work(ioc->fault_reset_work_q,
211 		    &ioc->fault_reset_work,
212 		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
213 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
214 }
215 
216 /**
217  * mpt3sas_base_start_watchdog - start the fault_reset_work_q
218  * @ioc: per adapter object
219  * Context: sleep.
220  *
221  * Return nothing.
222  */
223 void
224 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
225 {
226 	unsigned long	 flags;
227 
228 	if (ioc->fault_reset_work_q)
229 		return;
230 
231 	/* initialize fault polling */
232 
233 	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
234 	snprintf(ioc->fault_reset_work_q_name,
235 	    sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
236 	ioc->fault_reset_work_q =
237 		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
238 	if (!ioc->fault_reset_work_q) {
239 		pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
240 		    ioc->name, __func__, __LINE__);
241 			return;
242 	}
243 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
244 	if (ioc->fault_reset_work_q)
245 		queue_delayed_work(ioc->fault_reset_work_q,
246 		    &ioc->fault_reset_work,
247 		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
248 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
249 }
250 
251 /**
252  * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
253  * @ioc: per adapter object
254  * Context: sleep.
255  *
256  * Return nothing.
257  */
258 void
259 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
260 {
261 	unsigned long flags;
262 	struct workqueue_struct *wq;
263 
264 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
265 	wq = ioc->fault_reset_work_q;
266 	ioc->fault_reset_work_q = NULL;
267 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
268 	if (wq) {
269 		if (!cancel_delayed_work(&ioc->fault_reset_work))
270 			flush_workqueue(wq);
271 		destroy_workqueue(wq);
272 	}
273 }
274 
275 /**
276  * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
277  * @ioc: per adapter object
278  * @fault_code: fault code
279  *
280  * Return nothing.
281  */
282 void
283 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
284 {
285 	pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
286 	    ioc->name, fault_code);
287 }
288 
289 /**
290  * mpt3sas_halt_firmware - halt's mpt controller firmware
291  * @ioc: per adapter object
292  *
293  * For debugging timeout related issues.  Writing 0xCOFFEE00
294  * to the doorbell register will halt controller firmware. With
295  * the purpose to stop both driver and firmware, the enduser can
296  * obtain a ring buffer from controller UART.
297  */
298 void
299 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
300 {
301 	u32 doorbell;
302 
303 	if (!ioc->fwfault_debug)
304 		return;
305 
306 	dump_stack();
307 
308 	doorbell = readl(&ioc->chip->Doorbell);
309 	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
310 		mpt3sas_base_fault_info(ioc , doorbell);
311 	else {
312 		writel(0xC0FFEE00, &ioc->chip->Doorbell);
313 		pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
314 			ioc->name);
315 	}
316 
317 	if (ioc->fwfault_debug == 2)
318 		for (;;)
319 			;
320 	else
321 		panic("panic in %s\n", __func__);
322 }
323 
324 #ifdef CONFIG_SCSI_MPT3SAS_LOGGING
325 /**
326  * _base_sas_ioc_info - verbose translation of the ioc status
327  * @ioc: per adapter object
328  * @mpi_reply: reply mf payload returned from firmware
329  * @request_hdr: request mf
330  *
331  * Return nothing.
332  */
333 static void
334 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
335 	MPI2RequestHeader_t *request_hdr)
336 {
337 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
338 	    MPI2_IOCSTATUS_MASK;
339 	char *desc = NULL;
340 	u16 frame_sz;
341 	char *func_str = NULL;
342 
343 	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
344 	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
345 	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
346 	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
347 		return;
348 
349 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
350 		return;
351 
352 	switch (ioc_status) {
353 
354 /****************************************************************************
355 *  Common IOCStatus values for all replies
356 ****************************************************************************/
357 
358 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
359 		desc = "invalid function";
360 		break;
361 	case MPI2_IOCSTATUS_BUSY:
362 		desc = "busy";
363 		break;
364 	case MPI2_IOCSTATUS_INVALID_SGL:
365 		desc = "invalid sgl";
366 		break;
367 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
368 		desc = "internal error";
369 		break;
370 	case MPI2_IOCSTATUS_INVALID_VPID:
371 		desc = "invalid vpid";
372 		break;
373 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
374 		desc = "insufficient resources";
375 		break;
376 	case MPI2_IOCSTATUS_INVALID_FIELD:
377 		desc = "invalid field";
378 		break;
379 	case MPI2_IOCSTATUS_INVALID_STATE:
380 		desc = "invalid state";
381 		break;
382 	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
383 		desc = "op state not supported";
384 		break;
385 
386 /****************************************************************************
387 *  Config IOCStatus values
388 ****************************************************************************/
389 
390 	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
391 		desc = "config invalid action";
392 		break;
393 	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
394 		desc = "config invalid type";
395 		break;
396 	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
397 		desc = "config invalid page";
398 		break;
399 	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
400 		desc = "config invalid data";
401 		break;
402 	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
403 		desc = "config no defaults";
404 		break;
405 	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
406 		desc = "config cant commit";
407 		break;
408 
409 /****************************************************************************
410 *  SCSI IO Reply
411 ****************************************************************************/
412 
413 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
414 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
415 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
416 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
417 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
418 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
419 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
420 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
421 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
422 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
423 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
424 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
425 		break;
426 
427 /****************************************************************************
428 *  For use by SCSI Initiator and SCSI Target end-to-end data protection
429 ****************************************************************************/
430 
431 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
432 		desc = "eedp guard error";
433 		break;
434 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
435 		desc = "eedp ref tag error";
436 		break;
437 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
438 		desc = "eedp app tag error";
439 		break;
440 
441 /****************************************************************************
442 *  SCSI Target values
443 ****************************************************************************/
444 
445 	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
446 		desc = "target invalid io index";
447 		break;
448 	case MPI2_IOCSTATUS_TARGET_ABORTED:
449 		desc = "target aborted";
450 		break;
451 	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
452 		desc = "target no conn retryable";
453 		break;
454 	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
455 		desc = "target no connection";
456 		break;
457 	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
458 		desc = "target xfer count mismatch";
459 		break;
460 	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
461 		desc = "target data offset error";
462 		break;
463 	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
464 		desc = "target too much write data";
465 		break;
466 	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
467 		desc = "target iu too short";
468 		break;
469 	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
470 		desc = "target ack nak timeout";
471 		break;
472 	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
473 		desc = "target nak received";
474 		break;
475 
476 /****************************************************************************
477 *  Serial Attached SCSI values
478 ****************************************************************************/
479 
480 	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
481 		desc = "smp request failed";
482 		break;
483 	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
484 		desc = "smp data overrun";
485 		break;
486 
487 /****************************************************************************
488 *  Diagnostic Buffer Post / Diagnostic Release values
489 ****************************************************************************/
490 
491 	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
492 		desc = "diagnostic released";
493 		break;
494 	default:
495 		break;
496 	}
497 
498 	if (!desc)
499 		return;
500 
501 	switch (request_hdr->Function) {
502 	case MPI2_FUNCTION_CONFIG:
503 		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
504 		func_str = "config_page";
505 		break;
506 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
507 		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
508 		func_str = "task_mgmt";
509 		break;
510 	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
511 		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
512 		func_str = "sas_iounit_ctl";
513 		break;
514 	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
515 		frame_sz = sizeof(Mpi2SepRequest_t);
516 		func_str = "enclosure";
517 		break;
518 	case MPI2_FUNCTION_IOC_INIT:
519 		frame_sz = sizeof(Mpi2IOCInitRequest_t);
520 		func_str = "ioc_init";
521 		break;
522 	case MPI2_FUNCTION_PORT_ENABLE:
523 		frame_sz = sizeof(Mpi2PortEnableRequest_t);
524 		func_str = "port_enable";
525 		break;
526 	case MPI2_FUNCTION_SMP_PASSTHROUGH:
527 		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
528 		func_str = "smp_passthru";
529 		break;
530 	default:
531 		frame_sz = 32;
532 		func_str = "unknown";
533 		break;
534 	}
535 
536 	pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
537 		ioc->name, desc, ioc_status, request_hdr, func_str);
538 
539 	_debug_dump_mf(request_hdr, frame_sz/4);
540 }
541 
542 /**
543  * _base_display_event_data - verbose translation of firmware asyn events
544  * @ioc: per adapter object
545  * @mpi_reply: reply mf payload returned from firmware
546  *
547  * Return nothing.
548  */
549 static void
550 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
551 	Mpi2EventNotificationReply_t *mpi_reply)
552 {
553 	char *desc = NULL;
554 	u16 event;
555 
556 	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
557 		return;
558 
559 	event = le16_to_cpu(mpi_reply->Event);
560 
561 	switch (event) {
562 	case MPI2_EVENT_LOG_DATA:
563 		desc = "Log Data";
564 		break;
565 	case MPI2_EVENT_STATE_CHANGE:
566 		desc = "Status Change";
567 		break;
568 	case MPI2_EVENT_HARD_RESET_RECEIVED:
569 		desc = "Hard Reset Received";
570 		break;
571 	case MPI2_EVENT_EVENT_CHANGE:
572 		desc = "Event Change";
573 		break;
574 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
575 		desc = "Device Status Change";
576 		break;
577 	case MPI2_EVENT_IR_OPERATION_STATUS:
578 		desc = "IR Operation Status";
579 		break;
580 	case MPI2_EVENT_SAS_DISCOVERY:
581 	{
582 		Mpi2EventDataSasDiscovery_t *event_data =
583 		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
584 		pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
585 		    (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
586 		    "start" : "stop");
587 		if (event_data->DiscoveryStatus)
588 			pr_info("discovery_status(0x%08x)",
589 			    le32_to_cpu(event_data->DiscoveryStatus));
590 			pr_info("\n");
591 		return;
592 	}
593 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
594 		desc = "SAS Broadcast Primitive";
595 		break;
596 	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
597 		desc = "SAS Init Device Status Change";
598 		break;
599 	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
600 		desc = "SAS Init Table Overflow";
601 		break;
602 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
603 		desc = "SAS Topology Change List";
604 		break;
605 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
606 		desc = "SAS Enclosure Device Status Change";
607 		break;
608 	case MPI2_EVENT_IR_VOLUME:
609 		desc = "IR Volume";
610 		break;
611 	case MPI2_EVENT_IR_PHYSICAL_DISK:
612 		desc = "IR Physical Disk";
613 		break;
614 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
615 		desc = "IR Configuration Change List";
616 		break;
617 	case MPI2_EVENT_LOG_ENTRY_ADDED:
618 		desc = "Log Entry Added";
619 		break;
620 	}
621 
622 	if (!desc)
623 		return;
624 
625 	pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
626 }
627 #endif
628 
629 /**
630  * _base_sas_log_info - verbose translation of firmware log info
631  * @ioc: per adapter object
632  * @log_info: log info
633  *
634  * Return nothing.
635  */
636 static void
637 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
638 {
639 	union loginfo_type {
640 		u32	loginfo;
641 		struct {
642 			u32	subcode:16;
643 			u32	code:8;
644 			u32	originator:4;
645 			u32	bus_type:4;
646 		} dw;
647 	};
648 	union loginfo_type sas_loginfo;
649 	char *originator_str = NULL;
650 
651 	sas_loginfo.loginfo = log_info;
652 	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
653 		return;
654 
655 	/* each nexus loss loginfo */
656 	if (log_info == 0x31170000)
657 		return;
658 
659 	/* eat the loginfos associated with task aborts */
660 	if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
661 	    0x31140000 || log_info == 0x31130000))
662 		return;
663 
664 	switch (sas_loginfo.dw.originator) {
665 	case 0:
666 		originator_str = "IOP";
667 		break;
668 	case 1:
669 		originator_str = "PL";
670 		break;
671 	case 2:
672 		originator_str = "IR";
673 		break;
674 	}
675 
676 	pr_warn(MPT3SAS_FMT
677 		"log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
678 		ioc->name, log_info,
679 	     originator_str, sas_loginfo.dw.code,
680 	     sas_loginfo.dw.subcode);
681 }
682 
683 /**
684  * _base_display_reply_info -
685  * @ioc: per adapter object
686  * @smid: system request message index
687  * @msix_index: MSIX table index supplied by the OS
688  * @reply: reply message frame(lower 32bit addr)
689  *
690  * Return nothing.
691  */
692 static void
693 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
694 	u32 reply)
695 {
696 	MPI2DefaultReply_t *mpi_reply;
697 	u16 ioc_status;
698 	u32 loginfo = 0;
699 
700 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
701 	if (unlikely(!mpi_reply)) {
702 		pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
703 		    ioc->name, __FILE__, __LINE__, __func__);
704 		return;
705 	}
706 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
707 #ifdef CONFIG_SCSI_MPT3SAS_LOGGING
708 	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
709 	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
710 		_base_sas_ioc_info(ioc , mpi_reply,
711 		   mpt3sas_base_get_msg_frame(ioc, smid));
712 	}
713 #endif
714 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
715 		loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
716 		_base_sas_log_info(ioc, loginfo);
717 	}
718 
719 	if (ioc_status || loginfo) {
720 		ioc_status &= MPI2_IOCSTATUS_MASK;
721 		mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
722 	}
723 }
724 
725 /**
726  * mpt3sas_base_done - base internal command completion routine
727  * @ioc: per adapter object
728  * @smid: system request message index
729  * @msix_index: MSIX table index supplied by the OS
730  * @reply: reply message frame(lower 32bit addr)
731  *
732  * Return 1 meaning mf should be freed from _base_interrupt
733  *        0 means the mf is freed from this function.
734  */
735 u8
736 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
737 	u32 reply)
738 {
739 	MPI2DefaultReply_t *mpi_reply;
740 
741 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
742 	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
743 		return 1;
744 
745 	if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
746 		return 1;
747 
748 	ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
749 	if (mpi_reply) {
750 		ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
751 		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
752 	}
753 	ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
754 
755 	complete(&ioc->base_cmds.done);
756 	return 1;
757 }
758 
759 /**
760  * _base_async_event - main callback handler for firmware asyn events
761  * @ioc: per adapter object
762  * @msix_index: MSIX table index supplied by the OS
763  * @reply: reply message frame(lower 32bit addr)
764  *
765  * Return 1 meaning mf should be freed from _base_interrupt
766  *        0 means the mf is freed from this function.
767  */
768 static u8
769 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
770 {
771 	Mpi2EventNotificationReply_t *mpi_reply;
772 	Mpi2EventAckRequest_t *ack_request;
773 	u16 smid;
774 
775 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
776 	if (!mpi_reply)
777 		return 1;
778 	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
779 		return 1;
780 #ifdef CONFIG_SCSI_MPT3SAS_LOGGING
781 	_base_display_event_data(ioc, mpi_reply);
782 #endif
783 	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
784 		goto out;
785 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
786 	if (!smid) {
787 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
788 		    ioc->name, __func__);
789 		goto out;
790 	}
791 
792 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
793 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
794 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
795 	ack_request->Event = mpi_reply->Event;
796 	ack_request->EventContext = mpi_reply->EventContext;
797 	ack_request->VF_ID = 0;  /* TODO */
798 	ack_request->VP_ID = 0;
799 	mpt3sas_base_put_smid_default(ioc, smid);
800 
801  out:
802 
803 	/* scsih callback handler */
804 	mpt3sas_scsih_event_callback(ioc, msix_index, reply);
805 
806 	/* ctl callback handler */
807 	mpt3sas_ctl_event_callback(ioc, msix_index, reply);
808 
809 	return 1;
810 }
811 
812 /**
813  * _base_get_cb_idx - obtain the callback index
814  * @ioc: per adapter object
815  * @smid: system request message index
816  *
817  * Return callback index.
818  */
819 static u8
820 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
821 {
822 	int i;
823 	u8 cb_idx;
824 
825 	if (smid < ioc->hi_priority_smid) {
826 		i = smid - 1;
827 		cb_idx = ioc->scsi_lookup[i].cb_idx;
828 	} else if (smid < ioc->internal_smid) {
829 		i = smid - ioc->hi_priority_smid;
830 		cb_idx = ioc->hpr_lookup[i].cb_idx;
831 	} else if (smid <= ioc->hba_queue_depth) {
832 		i = smid - ioc->internal_smid;
833 		cb_idx = ioc->internal_lookup[i].cb_idx;
834 	} else
835 		cb_idx = 0xFF;
836 	return cb_idx;
837 }
838 
839 /**
840  * _base_mask_interrupts - disable interrupts
841  * @ioc: per adapter object
842  *
843  * Disabling ResetIRQ, Reply and Doorbell Interrupts
844  *
845  * Return nothing.
846  */
847 static void
848 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
849 {
850 	u32 him_register;
851 
852 	ioc->mask_interrupts = 1;
853 	him_register = readl(&ioc->chip->HostInterruptMask);
854 	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
855 	writel(him_register, &ioc->chip->HostInterruptMask);
856 	readl(&ioc->chip->HostInterruptMask);
857 }
858 
859 /**
860  * _base_unmask_interrupts - enable interrupts
861  * @ioc: per adapter object
862  *
863  * Enabling only Reply Interrupts
864  *
865  * Return nothing.
866  */
867 static void
868 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
869 {
870 	u32 him_register;
871 
872 	him_register = readl(&ioc->chip->HostInterruptMask);
873 	him_register &= ~MPI2_HIM_RIM;
874 	writel(him_register, &ioc->chip->HostInterruptMask);
875 	ioc->mask_interrupts = 0;
876 }
877 
878 union reply_descriptor {
879 	u64 word;
880 	struct {
881 		u32 low;
882 		u32 high;
883 	} u;
884 };
885 
886 /**
887  * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
888  * @irq: irq number (not used)
889  * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
890  * @r: pt_regs pointer (not used)
891  *
892  * Return IRQ_HANDLE if processed, else IRQ_NONE.
893  */
894 static irqreturn_t
895 _base_interrupt(int irq, void *bus_id)
896 {
897 	struct adapter_reply_queue *reply_q = bus_id;
898 	union reply_descriptor rd;
899 	u32 completed_cmds;
900 	u8 request_desript_type;
901 	u16 smid;
902 	u8 cb_idx;
903 	u32 reply;
904 	u8 msix_index = reply_q->msix_index;
905 	struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
906 	Mpi2ReplyDescriptorsUnion_t *rpf;
907 	u8 rc;
908 
909 	if (ioc->mask_interrupts)
910 		return IRQ_NONE;
911 
912 	if (!atomic_add_unless(&reply_q->busy, 1, 1))
913 		return IRQ_NONE;
914 
915 	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
916 	request_desript_type = rpf->Default.ReplyFlags
917 	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
918 	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
919 		atomic_dec(&reply_q->busy);
920 		return IRQ_NONE;
921 	}
922 
923 	completed_cmds = 0;
924 	cb_idx = 0xFF;
925 	do {
926 		rd.word = le64_to_cpu(rpf->Words);
927 		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
928 			goto out;
929 		reply = 0;
930 		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
931 		if (request_desript_type ==
932 		    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
933 		    request_desript_type ==
934 		    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
935 			cb_idx = _base_get_cb_idx(ioc, smid);
936 			if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
937 			    (likely(mpt_callbacks[cb_idx] != NULL))) {
938 				rc = mpt_callbacks[cb_idx](ioc, smid,
939 				    msix_index, 0);
940 				if (rc)
941 					mpt3sas_base_free_smid(ioc, smid);
942 			}
943 		} else if (request_desript_type ==
944 		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
945 			reply = le32_to_cpu(
946 			    rpf->AddressReply.ReplyFrameAddress);
947 			if (reply > ioc->reply_dma_max_address ||
948 			    reply < ioc->reply_dma_min_address)
949 				reply = 0;
950 			if (smid) {
951 				cb_idx = _base_get_cb_idx(ioc, smid);
952 				if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
953 				    (likely(mpt_callbacks[cb_idx] != NULL))) {
954 					rc = mpt_callbacks[cb_idx](ioc, smid,
955 					    msix_index, reply);
956 					if (reply)
957 						_base_display_reply_info(ioc,
958 						    smid, msix_index, reply);
959 					if (rc)
960 						mpt3sas_base_free_smid(ioc,
961 						    smid);
962 				}
963 			} else {
964 				_base_async_event(ioc, msix_index, reply);
965 			}
966 
967 			/* reply free queue handling */
968 			if (reply) {
969 				ioc->reply_free_host_index =
970 				    (ioc->reply_free_host_index ==
971 				    (ioc->reply_free_queue_depth - 1)) ?
972 				    0 : ioc->reply_free_host_index + 1;
973 				ioc->reply_free[ioc->reply_free_host_index] =
974 				    cpu_to_le32(reply);
975 				wmb();
976 				writel(ioc->reply_free_host_index,
977 				    &ioc->chip->ReplyFreeHostIndex);
978 			}
979 		}
980 
981 		rpf->Words = cpu_to_le64(ULLONG_MAX);
982 		reply_q->reply_post_host_index =
983 		    (reply_q->reply_post_host_index ==
984 		    (ioc->reply_post_queue_depth - 1)) ? 0 :
985 		    reply_q->reply_post_host_index + 1;
986 		request_desript_type =
987 		    reply_q->reply_post_free[reply_q->reply_post_host_index].
988 		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
989 		completed_cmds++;
990 		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
991 			goto out;
992 		if (!reply_q->reply_post_host_index)
993 			rpf = reply_q->reply_post_free;
994 		else
995 			rpf++;
996 	} while (1);
997 
998  out:
999 
1000 	if (!completed_cmds) {
1001 		atomic_dec(&reply_q->busy);
1002 		return IRQ_NONE;
1003 	}
1004 
1005 	wmb();
1006 	writel(reply_q->reply_post_host_index | (msix_index <<
1007 	    MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
1008 	atomic_dec(&reply_q->busy);
1009 	return IRQ_HANDLED;
1010 }
1011 
1012 /**
1013  * _base_is_controller_msix_enabled - is controller support muli-reply queues
1014  * @ioc: per adapter object
1015  *
1016  */
1017 static inline int
1018 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1019 {
1020 	return (ioc->facts.IOCCapabilities &
1021 	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1022 }
1023 
1024 /**
1025  * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
1026  * @ioc: per adapter object
1027  * Context: ISR conext
1028  *
1029  * Called when a Task Management request has completed. We want
1030  * to flush the other reply queues so all the outstanding IO has been
1031  * completed back to OS before we process the TM completetion.
1032  *
1033  * Return nothing.
1034  */
1035 void
1036 mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1037 {
1038 	struct adapter_reply_queue *reply_q;
1039 
1040 	/* If MSIX capability is turned off
1041 	 * then multi-queues are not enabled
1042 	 */
1043 	if (!_base_is_controller_msix_enabled(ioc))
1044 		return;
1045 
1046 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1047 		if (ioc->shost_recovery)
1048 			return;
1049 		/* TMs are on msix_index == 0 */
1050 		if (reply_q->msix_index == 0)
1051 			continue;
1052 		_base_interrupt(reply_q->vector, (void *)reply_q);
1053 	}
1054 }
1055 
1056 /**
1057  * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1058  * @cb_idx: callback index
1059  *
1060  * Return nothing.
1061  */
1062 void
1063 mpt3sas_base_release_callback_handler(u8 cb_idx)
1064 {
1065 	mpt_callbacks[cb_idx] = NULL;
1066 }
1067 
1068 /**
1069  * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1070  * @cb_func: callback function
1071  *
1072  * Returns cb_func.
1073  */
1074 u8
1075 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1076 {
1077 	u8 cb_idx;
1078 
1079 	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1080 		if (mpt_callbacks[cb_idx] == NULL)
1081 			break;
1082 
1083 	mpt_callbacks[cb_idx] = cb_func;
1084 	return cb_idx;
1085 }
1086 
1087 /**
1088  * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1089  *
1090  * Return nothing.
1091  */
1092 void
1093 mpt3sas_base_initialize_callback_handler(void)
1094 {
1095 	u8 cb_idx;
1096 
1097 	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1098 		mpt3sas_base_release_callback_handler(cb_idx);
1099 }
1100 
1101 
1102 /**
1103  * _base_build_zero_len_sge - build zero length sg entry
1104  * @ioc: per adapter object
1105  * @paddr: virtual address for SGE
1106  *
1107  * Create a zero length scatter gather entry to insure the IOCs hardware has
1108  * something to use if the target device goes brain dead and tries
1109  * to send data even when none is asked for.
1110  *
1111  * Return nothing.
1112  */
1113 static void
1114 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1115 {
1116 	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1117 	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1118 	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1119 	    MPI2_SGE_FLAGS_SHIFT);
1120 	ioc->base_add_sg_single(paddr, flags_length, -1);
1121 }
1122 
1123 /**
1124  * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1125  * @paddr: virtual address for SGE
1126  * @flags_length: SGE flags and data transfer length
1127  * @dma_addr: Physical address
1128  *
1129  * Return nothing.
1130  */
1131 static void
1132 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1133 {
1134 	Mpi2SGESimple32_t *sgel = paddr;
1135 
1136 	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1137 	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1138 	sgel->FlagsLength = cpu_to_le32(flags_length);
1139 	sgel->Address = cpu_to_le32(dma_addr);
1140 }
1141 
1142 
1143 /**
1144  * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1145  * @paddr: virtual address for SGE
1146  * @flags_length: SGE flags and data transfer length
1147  * @dma_addr: Physical address
1148  *
1149  * Return nothing.
1150  */
1151 static void
1152 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1153 {
1154 	Mpi2SGESimple64_t *sgel = paddr;
1155 
1156 	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1157 	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1158 	sgel->FlagsLength = cpu_to_le32(flags_length);
1159 	sgel->Address = cpu_to_le64(dma_addr);
1160 }
1161 
1162 /**
1163  * _base_get_chain_buffer_tracker - obtain chain tracker
1164  * @ioc: per adapter object
1165  * @smid: smid associated to an IO request
1166  *
1167  * Returns chain tracker(from ioc->free_chain_list)
1168  */
1169 static struct chain_tracker *
1170 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1171 {
1172 	struct chain_tracker *chain_req;
1173 	unsigned long flags;
1174 
1175 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1176 	if (list_empty(&ioc->free_chain_list)) {
1177 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1178 		dfailprintk(ioc, pr_warn(MPT3SAS_FMT
1179 			"chain buffers not available\n", ioc->name));
1180 		return NULL;
1181 	}
1182 	chain_req = list_entry(ioc->free_chain_list.next,
1183 	    struct chain_tracker, tracker_list);
1184 	list_del_init(&chain_req->tracker_list);
1185 	list_add_tail(&chain_req->tracker_list,
1186 	    &ioc->scsi_lookup[smid - 1].chain_list);
1187 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1188 	return chain_req;
1189 }
1190 
1191 
1192 /**
1193  * _base_build_sg - build generic sg
1194  * @ioc: per adapter object
1195  * @psge: virtual address for SGE
1196  * @data_out_dma: physical address for WRITES
1197  * @data_out_sz: data xfer size for WRITES
1198  * @data_in_dma: physical address for READS
1199  * @data_in_sz: data xfer size for READS
1200  *
1201  * Return nothing.
1202  */
1203 static void
1204 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1205 	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1206 	size_t data_in_sz)
1207 {
1208 	u32 sgl_flags;
1209 
1210 	if (!data_out_sz && !data_in_sz) {
1211 		_base_build_zero_len_sge(ioc, psge);
1212 		return;
1213 	}
1214 
1215 	if (data_out_sz && data_in_sz) {
1216 		/* WRITE sgel first */
1217 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1218 		    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1219 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1220 		ioc->base_add_sg_single(psge, sgl_flags |
1221 		    data_out_sz, data_out_dma);
1222 
1223 		/* incr sgel */
1224 		psge += ioc->sge_size;
1225 
1226 		/* READ sgel last */
1227 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1228 		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1229 		    MPI2_SGE_FLAGS_END_OF_LIST);
1230 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1231 		ioc->base_add_sg_single(psge, sgl_flags |
1232 		    data_in_sz, data_in_dma);
1233 	} else if (data_out_sz) /* WRITE */ {
1234 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1235 		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1236 		    MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1237 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1238 		ioc->base_add_sg_single(psge, sgl_flags |
1239 		    data_out_sz, data_out_dma);
1240 	} else if (data_in_sz) /* READ */ {
1241 		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1242 		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1243 		    MPI2_SGE_FLAGS_END_OF_LIST);
1244 		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1245 		ioc->base_add_sg_single(psge, sgl_flags |
1246 		    data_in_sz, data_in_dma);
1247 	}
1248 }
1249 
1250 /* IEEE format sgls */
1251 
1252 /**
1253  * _base_add_sg_single_ieee - add sg element for IEEE format
1254  * @paddr: virtual address for SGE
1255  * @flags: SGE flags
1256  * @chain_offset: number of 128 byte elements from start of segment
1257  * @length: data transfer length
1258  * @dma_addr: Physical address
1259  *
1260  * Return nothing.
1261  */
1262 static void
1263 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
1264 	dma_addr_t dma_addr)
1265 {
1266 	Mpi25IeeeSgeChain64_t *sgel = paddr;
1267 
1268 	sgel->Flags = flags;
1269 	sgel->NextChainOffset = chain_offset;
1270 	sgel->Length = cpu_to_le32(length);
1271 	sgel->Address = cpu_to_le64(dma_addr);
1272 }
1273 
1274 /**
1275  * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
1276  * @ioc: per adapter object
1277  * @paddr: virtual address for SGE
1278  *
1279  * Create a zero length scatter gather entry to insure the IOCs hardware has
1280  * something to use if the target device goes brain dead and tries
1281  * to send data even when none is asked for.
1282  *
1283  * Return nothing.
1284  */
1285 static void
1286 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1287 {
1288 	u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1289 		MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
1290 		MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
1291 	_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
1292 }
1293 
1294 /**
1295  * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
1296  * @ioc: per adapter object
1297  * @scmd: scsi command
1298  * @smid: system request message index
1299  * Context: none.
1300  *
1301  * The main routine that builds scatter gather table from a given
1302  * scsi request sent via the .queuecommand main handler.
1303  *
1304  * Returns 0 success, anything else error
1305  */
1306 static int
1307 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1308 	struct scsi_cmnd *scmd, u16 smid)
1309 {
1310 	Mpi2SCSIIORequest_t *mpi_request;
1311 	dma_addr_t chain_dma;
1312 	struct scatterlist *sg_scmd;
1313 	void *sg_local, *chain;
1314 	u32 chain_offset;
1315 	u32 chain_length;
1316 	int sges_left;
1317 	u32 sges_in_segment;
1318 	u8 simple_sgl_flags;
1319 	u8 simple_sgl_flags_last;
1320 	u8 chain_sgl_flags;
1321 	struct chain_tracker *chain_req;
1322 
1323 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1324 
1325 	/* init scatter gather flags */
1326 	simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1327 	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1328 	simple_sgl_flags_last = simple_sgl_flags |
1329 	    MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1330 	chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1331 	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1332 
1333 	sg_scmd = scsi_sglist(scmd);
1334 	sges_left = scsi_dma_map(scmd);
1335 	if (!sges_left) {
1336 		sdev_printk(KERN_ERR, scmd->device,
1337 			"pci_map_sg failed: request for %d bytes!\n",
1338 			scsi_bufflen(scmd));
1339 		return -ENOMEM;
1340 	}
1341 
1342 	sg_local = &mpi_request->SGL;
1343 	sges_in_segment = (ioc->request_sz -
1344 	    offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
1345 	if (sges_left <= sges_in_segment)
1346 		goto fill_in_last_segment;
1347 
1348 	mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
1349 	    (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
1350 
1351 	/* fill in main message segment when there is a chain following */
1352 	while (sges_in_segment > 1) {
1353 		_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1354 		    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1355 		sg_scmd = sg_next(sg_scmd);
1356 		sg_local += ioc->sge_size_ieee;
1357 		sges_left--;
1358 		sges_in_segment--;
1359 	}
1360 
1361 	/* initializing the pointers */
1362 	chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1363 	if (!chain_req)
1364 		return -1;
1365 	chain = chain_req->chain_buffer;
1366 	chain_dma = chain_req->chain_buffer_dma;
1367 	do {
1368 		sges_in_segment = (sges_left <=
1369 		    ioc->max_sges_in_chain_message) ? sges_left :
1370 		    ioc->max_sges_in_chain_message;
1371 		chain_offset = (sges_left == sges_in_segment) ?
1372 		    0 : sges_in_segment;
1373 		chain_length = sges_in_segment * ioc->sge_size_ieee;
1374 		if (chain_offset)
1375 			chain_length += ioc->sge_size_ieee;
1376 		_base_add_sg_single_ieee(sg_local, chain_sgl_flags,
1377 		    chain_offset, chain_length, chain_dma);
1378 
1379 		sg_local = chain;
1380 		if (!chain_offset)
1381 			goto fill_in_last_segment;
1382 
1383 		/* fill in chain segments */
1384 		while (sges_in_segment) {
1385 			_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1386 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1387 			sg_scmd = sg_next(sg_scmd);
1388 			sg_local += ioc->sge_size_ieee;
1389 			sges_left--;
1390 			sges_in_segment--;
1391 		}
1392 
1393 		chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1394 		if (!chain_req)
1395 			return -1;
1396 		chain = chain_req->chain_buffer;
1397 		chain_dma = chain_req->chain_buffer_dma;
1398 	} while (1);
1399 
1400 
1401  fill_in_last_segment:
1402 
1403 	/* fill the last segment */
1404 	while (sges_left) {
1405 		if (sges_left == 1)
1406 			_base_add_sg_single_ieee(sg_local,
1407 			    simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
1408 			    sg_dma_address(sg_scmd));
1409 		else
1410 			_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1411 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1412 		sg_scmd = sg_next(sg_scmd);
1413 		sg_local += ioc->sge_size_ieee;
1414 		sges_left--;
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 /**
1421  * _base_build_sg_ieee - build generic sg for IEEE format
1422  * @ioc: per adapter object
1423  * @psge: virtual address for SGE
1424  * @data_out_dma: physical address for WRITES
1425  * @data_out_sz: data xfer size for WRITES
1426  * @data_in_dma: physical address for READS
1427  * @data_in_sz: data xfer size for READS
1428  *
1429  * Return nothing.
1430  */
1431 static void
1432 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
1433 	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1434 	size_t data_in_sz)
1435 {
1436 	u8 sgl_flags;
1437 
1438 	if (!data_out_sz && !data_in_sz) {
1439 		_base_build_zero_len_sge_ieee(ioc, psge);
1440 		return;
1441 	}
1442 
1443 	if (data_out_sz && data_in_sz) {
1444 		/* WRITE sgel first */
1445 		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1446 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1447 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1448 		    data_out_dma);
1449 
1450 		/* incr sgel */
1451 		psge += ioc->sge_size_ieee;
1452 
1453 		/* READ sgel last */
1454 		sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1455 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1456 		    data_in_dma);
1457 	} else if (data_out_sz) /* WRITE */ {
1458 		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1459 		    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1460 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1461 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1462 		    data_out_dma);
1463 	} else if (data_in_sz) /* READ */ {
1464 		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1465 		    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1466 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1467 		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1468 		    data_in_dma);
1469 	}
1470 }
1471 
1472 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1473 
1474 /**
1475  * _base_config_dma_addressing - set dma addressing
1476  * @ioc: per adapter object
1477  * @pdev: PCI device struct
1478  *
1479  * Returns 0 for success, non-zero for failure.
1480  */
1481 static int
1482 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1483 {
1484 	struct sysinfo s;
1485 	char *desc = NULL;
1486 
1487 	if (sizeof(dma_addr_t) > 4) {
1488 		const uint64_t required_mask =
1489 		    dma_get_required_mask(&pdev->dev);
1490 		if ((required_mask > DMA_BIT_MASK(32)) &&
1491 		    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1492 		    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1493 			ioc->base_add_sg_single = &_base_add_sg_single_64;
1494 			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1495 			desc = "64";
1496 			goto out;
1497 		}
1498 	}
1499 
1500 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1501 	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1502 		ioc->base_add_sg_single = &_base_add_sg_single_32;
1503 		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1504 		desc = "32";
1505 	} else
1506 		return -ENODEV;
1507 
1508  out:
1509 	si_meminfo(&s);
1510 	pr_info(MPT3SAS_FMT
1511 		"%s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
1512 		ioc->name, desc, convert_to_kb(s.totalram));
1513 
1514 	return 0;
1515 }
1516 
1517 /**
1518  * _base_check_enable_msix - checks MSIX capabable.
1519  * @ioc: per adapter object
1520  *
1521  * Check to see if card is capable of MSIX, and set number
1522  * of available msix vectors
1523  */
1524 static int
1525 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1526 {
1527 	int base;
1528 	u16 message_control;
1529 
1530 	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1531 	if (!base) {
1532 		dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
1533 			ioc->name));
1534 		return -EINVAL;
1535 	}
1536 
1537 	/* get msix vector count */
1538 
1539 	pci_read_config_word(ioc->pdev, base + 2, &message_control);
1540 	ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1541 	if (ioc->msix_vector_count > 8)
1542 		ioc->msix_vector_count = 8;
1543 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
1544 		"msix is supported, vector_count(%d)\n",
1545 		ioc->name, ioc->msix_vector_count));
1546 	return 0;
1547 }
1548 
1549 /**
1550  * _base_free_irq - free irq
1551  * @ioc: per adapter object
1552  *
1553  * Freeing respective reply_queue from the list.
1554  */
1555 static void
1556 _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
1557 {
1558 	struct adapter_reply_queue *reply_q, *next;
1559 
1560 	if (list_empty(&ioc->reply_queue_list))
1561 		return;
1562 
1563 	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1564 		list_del(&reply_q->list);
1565 		synchronize_irq(reply_q->vector);
1566 		free_irq(reply_q->vector, reply_q);
1567 		kfree(reply_q);
1568 	}
1569 }
1570 
1571 /**
1572  * _base_request_irq - request irq
1573  * @ioc: per adapter object
1574  * @index: msix index into vector table
1575  * @vector: irq vector
1576  *
1577  * Inserting respective reply_queue into the list.
1578  */
1579 static int
1580 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
1581 {
1582 	struct adapter_reply_queue *reply_q;
1583 	int r;
1584 
1585 	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1586 	if (!reply_q) {
1587 		pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
1588 		    ioc->name, (int)sizeof(struct adapter_reply_queue));
1589 		return -ENOMEM;
1590 	}
1591 	reply_q->ioc = ioc;
1592 	reply_q->msix_index = index;
1593 	reply_q->vector = vector;
1594 	atomic_set(&reply_q->busy, 0);
1595 	if (ioc->msix_enable)
1596 		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1597 		    MPT3SAS_DRIVER_NAME, ioc->id, index);
1598 	else
1599 		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1600 		    MPT3SAS_DRIVER_NAME, ioc->id);
1601 	r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1602 	    reply_q);
1603 	if (r) {
1604 		pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
1605 		    reply_q->name, vector);
1606 		kfree(reply_q);
1607 		return -EBUSY;
1608 	}
1609 
1610 	INIT_LIST_HEAD(&reply_q->list);
1611 	list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1612 	return 0;
1613 }
1614 
1615 /**
1616  * _base_assign_reply_queues - assigning msix index for each cpu
1617  * @ioc: per adapter object
1618  *
1619  * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1620  *
1621  * It would nice if we could call irq_set_affinity, however it is not
1622  * an exported symbol
1623  */
1624 static void
1625 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1626 {
1627 	struct adapter_reply_queue *reply_q;
1628 	int cpu_id;
1629 	int cpu_grouping, loop, grouping, grouping_mod;
1630 	int reply_queue;
1631 
1632 	if (!_base_is_controller_msix_enabled(ioc))
1633 		return;
1634 
1635 	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1636 
1637 	/* NUMA Hardware bug workaround - drop to less reply queues */
1638 	if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) {
1639 		ioc->reply_queue_count = ioc->facts.MaxMSIxVectors;
1640 		reply_queue = 0;
1641 		list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1642 			reply_q->msix_index = reply_queue;
1643 			if (++reply_queue == ioc->reply_queue_count)
1644 				reply_queue = 0;
1645 		}
1646 	}
1647 
1648 	/* when there are more cpus than available msix vectors,
1649 	 * then group cpus togeather on same irq
1650 	 */
1651 	if (ioc->cpu_count > ioc->msix_vector_count) {
1652 		grouping = ioc->cpu_count / ioc->msix_vector_count;
1653 		grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
1654 		if (grouping < 2 || (grouping == 2 && !grouping_mod))
1655 			cpu_grouping = 2;
1656 		else if (grouping < 4 || (grouping == 4 && !grouping_mod))
1657 			cpu_grouping = 4;
1658 		else if (grouping < 8 || (grouping == 8 && !grouping_mod))
1659 			cpu_grouping = 8;
1660 		else
1661 			cpu_grouping = 16;
1662 	} else
1663 		cpu_grouping = 0;
1664 
1665 	loop = 0;
1666 	reply_q = list_entry(ioc->reply_queue_list.next,
1667 	     struct adapter_reply_queue, list);
1668 	for_each_online_cpu(cpu_id) {
1669 		if (!cpu_grouping) {
1670 			ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
1671 			reply_q = list_entry(reply_q->list.next,
1672 			    struct adapter_reply_queue, list);
1673 		} else {
1674 			if (loop < cpu_grouping) {
1675 				ioc->cpu_msix_table[cpu_id] =
1676 				    reply_q->msix_index;
1677 				loop++;
1678 			} else {
1679 				reply_q = list_entry(reply_q->list.next,
1680 				    struct adapter_reply_queue, list);
1681 				ioc->cpu_msix_table[cpu_id] =
1682 				    reply_q->msix_index;
1683 				loop = 1;
1684 			}
1685 		}
1686 	}
1687 }
1688 
1689 /**
1690  * _base_disable_msix - disables msix
1691  * @ioc: per adapter object
1692  *
1693  */
1694 static void
1695 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
1696 {
1697 	if (!ioc->msix_enable)
1698 		return;
1699 	pci_disable_msix(ioc->pdev);
1700 	ioc->msix_enable = 0;
1701 }
1702 
1703 /**
1704  * _base_enable_msix - enables msix, failback to io_apic
1705  * @ioc: per adapter object
1706  *
1707  */
1708 static int
1709 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1710 {
1711 	struct msix_entry *entries, *a;
1712 	int r;
1713 	int i;
1714 	u8 try_msix = 0;
1715 
1716 	if (msix_disable == -1 || msix_disable == 0)
1717 		try_msix = 1;
1718 
1719 	if (!try_msix)
1720 		goto try_ioapic;
1721 
1722 	if (_base_check_enable_msix(ioc) != 0)
1723 		goto try_ioapic;
1724 
1725 	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1726 	    ioc->msix_vector_count);
1727 
1728 	printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
1729 	  ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
1730 	  ioc->cpu_count, max_msix_vectors);
1731 
1732 	if (max_msix_vectors > 0) {
1733 		ioc->reply_queue_count = min_t(int, max_msix_vectors,
1734 			ioc->reply_queue_count);
1735 		ioc->msix_vector_count = ioc->reply_queue_count;
1736 	}
1737 
1738 	entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1739 	    GFP_KERNEL);
1740 	if (!entries) {
1741 		dfailprintk(ioc, pr_info(MPT3SAS_FMT
1742 			"kcalloc failed @ at %s:%d/%s() !!!\n",
1743 			ioc->name, __FILE__, __LINE__, __func__));
1744 		goto try_ioapic;
1745 	}
1746 
1747 	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1748 		a->entry = i;
1749 
1750 	r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
1751 	if (r) {
1752 		dfailprintk(ioc, pr_info(MPT3SAS_FMT
1753 			"pci_enable_msix failed (r=%d) !!!\n",
1754 			ioc->name, r));
1755 		kfree(entries);
1756 		goto try_ioapic;
1757 	}
1758 
1759 	ioc->msix_enable = 1;
1760 	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
1761 		r = _base_request_irq(ioc, i, a->vector);
1762 		if (r) {
1763 			_base_free_irq(ioc);
1764 			_base_disable_msix(ioc);
1765 			kfree(entries);
1766 			goto try_ioapic;
1767 		}
1768 	}
1769 
1770 	kfree(entries);
1771 	return 0;
1772 
1773 /* failback to io_apic interrupt routing */
1774  try_ioapic:
1775 
1776 	r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1777 
1778 	return r;
1779 }
1780 
1781 /**
1782  * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
1783  * @ioc: per adapter object
1784  *
1785  * Returns 0 for success, non-zero for failure.
1786  */
1787 int
1788 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
1789 {
1790 	struct pci_dev *pdev = ioc->pdev;
1791 	u32 memap_sz;
1792 	u32 pio_sz;
1793 	int i, r = 0;
1794 	u64 pio_chip = 0;
1795 	u64 chip_phys = 0;
1796 	struct adapter_reply_queue *reply_q;
1797 
1798 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
1799 	    ioc->name, __func__));
1800 
1801 	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1802 	if (pci_enable_device_mem(pdev)) {
1803 		pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
1804 			ioc->name);
1805 		ioc->bars = 0;
1806 		return -ENODEV;
1807 	}
1808 
1809 
1810 	if (pci_request_selected_regions(pdev, ioc->bars,
1811 	    MPT3SAS_DRIVER_NAME)) {
1812 		pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
1813 			ioc->name);
1814 		ioc->bars = 0;
1815 		r = -ENODEV;
1816 		goto out_fail;
1817 	}
1818 
1819 /* AER (Advanced Error Reporting) hooks */
1820 	pci_enable_pcie_error_reporting(pdev);
1821 
1822 	pci_set_master(pdev);
1823 
1824 
1825 	if (_base_config_dma_addressing(ioc, pdev) != 0) {
1826 		pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
1827 		    ioc->name, pci_name(pdev));
1828 		r = -ENODEV;
1829 		goto out_fail;
1830 	}
1831 
1832 	for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1833 		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1834 			if (pio_sz)
1835 				continue;
1836 			pio_chip = (u64)pci_resource_start(pdev, i);
1837 			pio_sz = pci_resource_len(pdev, i);
1838 		} else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
1839 			if (memap_sz)
1840 				continue;
1841 			ioc->chip_phys = pci_resource_start(pdev, i);
1842 			chip_phys = (u64)ioc->chip_phys;
1843 			memap_sz = pci_resource_len(pdev, i);
1844 			ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1845 			if (ioc->chip == NULL) {
1846 				pr_err(MPT3SAS_FMT "unable to map adapter memory!\n",
1847 					ioc->name);
1848 				r = -EINVAL;
1849 				goto out_fail;
1850 			}
1851 		}
1852 	}
1853 
1854 	_base_mask_interrupts(ioc);
1855 	r = _base_enable_msix(ioc);
1856 	if (r)
1857 		goto out_fail;
1858 
1859 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
1860 		pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
1861 		    reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1862 		    "IO-APIC enabled"), reply_q->vector);
1863 
1864 	pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1865 	    ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1866 	pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
1867 	    ioc->name, (unsigned long long)pio_chip, pio_sz);
1868 
1869 	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
1870 	pci_save_state(pdev);
1871 	return 0;
1872 
1873  out_fail:
1874 	if (ioc->chip_phys)
1875 		iounmap(ioc->chip);
1876 	ioc->chip_phys = 0;
1877 	pci_release_selected_regions(ioc->pdev, ioc->bars);
1878 	pci_disable_pcie_error_reporting(pdev);
1879 	pci_disable_device(pdev);
1880 	return r;
1881 }
1882 
1883 /**
1884  * mpt3sas_base_get_msg_frame - obtain request mf pointer
1885  * @ioc: per adapter object
1886  * @smid: system request message index(smid zero is invalid)
1887  *
1888  * Returns virt pointer to message frame.
1889  */
1890 void *
1891 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1892 {
1893 	return (void *)(ioc->request + (smid * ioc->request_sz));
1894 }
1895 
1896 /**
1897  * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
1898  * @ioc: per adapter object
1899  * @smid: system request message index
1900  *
1901  * Returns virt pointer to sense buffer.
1902  */
1903 void *
1904 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1905 {
1906 	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1907 }
1908 
1909 /**
1910  * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
1911  * @ioc: per adapter object
1912  * @smid: system request message index
1913  *
1914  * Returns phys pointer to the low 32bit address of the sense buffer.
1915  */
1916 __le32
1917 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1918 {
1919 	return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
1920 	    SCSI_SENSE_BUFFERSIZE));
1921 }
1922 
1923 /**
1924  * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
1925  * @ioc: per adapter object
1926  * @phys_addr: lower 32 physical addr of the reply
1927  *
1928  * Converts 32bit lower physical addr into a virt address.
1929  */
1930 void *
1931 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
1932 {
1933 	if (!phys_addr)
1934 		return NULL;
1935 	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
1936 }
1937 
1938 /**
1939  * mpt3sas_base_get_smid - obtain a free smid from internal queue
1940  * @ioc: per adapter object
1941  * @cb_idx: callback index
1942  *
1943  * Returns smid (zero is invalid)
1944  */
1945 u16
1946 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
1947 {
1948 	unsigned long flags;
1949 	struct request_tracker *request;
1950 	u16 smid;
1951 
1952 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1953 	if (list_empty(&ioc->internal_free_list)) {
1954 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1955 		pr_err(MPT3SAS_FMT "%s: smid not available\n",
1956 		    ioc->name, __func__);
1957 		return 0;
1958 	}
1959 
1960 	request = list_entry(ioc->internal_free_list.next,
1961 	    struct request_tracker, tracker_list);
1962 	request->cb_idx = cb_idx;
1963 	smid = request->smid;
1964 	list_del(&request->tracker_list);
1965 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1966 	return smid;
1967 }
1968 
1969 /**
1970  * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
1971  * @ioc: per adapter object
1972  * @cb_idx: callback index
1973  * @scmd: pointer to scsi command object
1974  *
1975  * Returns smid (zero is invalid)
1976  */
1977 u16
1978 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
1979 	struct scsi_cmnd *scmd)
1980 {
1981 	unsigned long flags;
1982 	struct scsiio_tracker *request;
1983 	u16 smid;
1984 
1985 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1986 	if (list_empty(&ioc->free_list)) {
1987 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1988 		pr_err(MPT3SAS_FMT "%s: smid not available\n",
1989 		    ioc->name, __func__);
1990 		return 0;
1991 	}
1992 
1993 	request = list_entry(ioc->free_list.next,
1994 	    struct scsiio_tracker, tracker_list);
1995 	request->scmd = scmd;
1996 	request->cb_idx = cb_idx;
1997 	smid = request->smid;
1998 	list_del(&request->tracker_list);
1999 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2000 	return smid;
2001 }
2002 
2003 /**
2004  * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
2005  * @ioc: per adapter object
2006  * @cb_idx: callback index
2007  *
2008  * Returns smid (zero is invalid)
2009  */
2010 u16
2011 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2012 {
2013 	unsigned long flags;
2014 	struct request_tracker *request;
2015 	u16 smid;
2016 
2017 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2018 	if (list_empty(&ioc->hpr_free_list)) {
2019 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2020 		return 0;
2021 	}
2022 
2023 	request = list_entry(ioc->hpr_free_list.next,
2024 	    struct request_tracker, tracker_list);
2025 	request->cb_idx = cb_idx;
2026 	smid = request->smid;
2027 	list_del(&request->tracker_list);
2028 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2029 	return smid;
2030 }
2031 
2032 /**
2033  * mpt3sas_base_free_smid - put smid back on free_list
2034  * @ioc: per adapter object
2035  * @smid: system request message index
2036  *
2037  * Return nothing.
2038  */
2039 void
2040 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2041 {
2042 	unsigned long flags;
2043 	int i;
2044 	struct chain_tracker *chain_req, *next;
2045 
2046 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2047 	if (smid < ioc->hi_priority_smid) {
2048 		/* scsiio queue */
2049 		i = smid - 1;
2050 		if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
2051 			list_for_each_entry_safe(chain_req, next,
2052 			    &ioc->scsi_lookup[i].chain_list, tracker_list) {
2053 				list_del_init(&chain_req->tracker_list);
2054 				list_add(&chain_req->tracker_list,
2055 				    &ioc->free_chain_list);
2056 			}
2057 		}
2058 		ioc->scsi_lookup[i].cb_idx = 0xFF;
2059 		ioc->scsi_lookup[i].scmd = NULL;
2060 		list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
2061 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2062 
2063 		/*
2064 		 * See _wait_for_commands_to_complete() call with regards
2065 		 * to this code.
2066 		 */
2067 		if (ioc->shost_recovery && ioc->pending_io_count) {
2068 			if (ioc->pending_io_count == 1)
2069 				wake_up(&ioc->reset_wq);
2070 			ioc->pending_io_count--;
2071 		}
2072 		return;
2073 	} else if (smid < ioc->internal_smid) {
2074 		/* hi-priority */
2075 		i = smid - ioc->hi_priority_smid;
2076 		ioc->hpr_lookup[i].cb_idx = 0xFF;
2077 		list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
2078 	} else if (smid <= ioc->hba_queue_depth) {
2079 		/* internal queue */
2080 		i = smid - ioc->internal_smid;
2081 		ioc->internal_lookup[i].cb_idx = 0xFF;
2082 		list_add(&ioc->internal_lookup[i].tracker_list,
2083 		    &ioc->internal_free_list);
2084 	}
2085 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2086 }
2087 
2088 /**
2089  * _base_writeq - 64 bit write to MMIO
2090  * @ioc: per adapter object
2091  * @b: data payload
2092  * @addr: address in MMIO space
2093  * @writeq_lock: spin lock
2094  *
2095  * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
2096  * care of 32 bit environment where its not quarenteed to send the entire word
2097  * in one transfer.
2098  */
2099 #if defined(writeq) && defined(CONFIG_64BIT)
2100 static inline void
2101 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2102 {
2103 	writeq(cpu_to_le64(b), addr);
2104 }
2105 #else
2106 static inline void
2107 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2108 {
2109 	unsigned long flags;
2110 	__u64 data_out = cpu_to_le64(b);
2111 
2112 	spin_lock_irqsave(writeq_lock, flags);
2113 	writel((u32)(data_out), addr);
2114 	writel((u32)(data_out >> 32), (addr + 4));
2115 	spin_unlock_irqrestore(writeq_lock, flags);
2116 }
2117 #endif
2118 
2119 static inline u8
2120 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
2121 {
2122 	return ioc->cpu_msix_table[raw_smp_processor_id()];
2123 }
2124 
2125 /**
2126  * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
2127  * @ioc: per adapter object
2128  * @smid: system request message index
2129  * @handle: device handle
2130  *
2131  * Return nothing.
2132  */
2133 void
2134 mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
2135 {
2136 	Mpi2RequestDescriptorUnion_t descriptor;
2137 	u64 *request = (u64 *)&descriptor;
2138 
2139 
2140 	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2141 	descriptor.SCSIIO.MSIxIndex =  _base_get_msix_index(ioc);
2142 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2143 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2144 	descriptor.SCSIIO.LMID = 0;
2145 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2146 	    &ioc->scsi_lookup_lock);
2147 }
2148 
2149 /**
2150  * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
2151  * @ioc: per adapter object
2152  * @smid: system request message index
2153  * @handle: device handle
2154  *
2155  * Return nothing.
2156  */
2157 void
2158 mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2159 	u16 handle)
2160 {
2161 	Mpi2RequestDescriptorUnion_t descriptor;
2162 	u64 *request = (u64 *)&descriptor;
2163 
2164 	descriptor.SCSIIO.RequestFlags =
2165 	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2166 	descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
2167 	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2168 	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2169 	descriptor.SCSIIO.LMID = 0;
2170 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2171 	    &ioc->scsi_lookup_lock);
2172 }
2173 
2174 /**
2175  * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
2176  * @ioc: per adapter object
2177  * @smid: system request message index
2178  *
2179  * Return nothing.
2180  */
2181 void
2182 mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2183 {
2184 	Mpi2RequestDescriptorUnion_t descriptor;
2185 	u64 *request = (u64 *)&descriptor;
2186 
2187 	descriptor.HighPriority.RequestFlags =
2188 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2189 	descriptor.HighPriority.MSIxIndex =  0;
2190 	descriptor.HighPriority.SMID = cpu_to_le16(smid);
2191 	descriptor.HighPriority.LMID = 0;
2192 	descriptor.HighPriority.Reserved1 = 0;
2193 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2194 	    &ioc->scsi_lookup_lock);
2195 }
2196 
2197 /**
2198  * mpt3sas_base_put_smid_default - Default, primarily used for config pages
2199  * @ioc: per adapter object
2200  * @smid: system request message index
2201  *
2202  * Return nothing.
2203  */
2204 void
2205 mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2206 {
2207 	Mpi2RequestDescriptorUnion_t descriptor;
2208 	u64 *request = (u64 *)&descriptor;
2209 
2210 	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2211 	descriptor.Default.MSIxIndex =  _base_get_msix_index(ioc);
2212 	descriptor.Default.SMID = cpu_to_le16(smid);
2213 	descriptor.Default.LMID = 0;
2214 	descriptor.Default.DescriptorTypeDependent = 0;
2215 	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2216 	    &ioc->scsi_lookup_lock);
2217 }
2218 
2219 
2220 
2221 /**
2222  * _base_display_ioc_capabilities - Disply IOC's capabilities.
2223  * @ioc: per adapter object
2224  *
2225  * Return nothing.
2226  */
2227 static void
2228 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
2229 {
2230 	int i = 0;
2231 	char desc[16];
2232 	u32 iounit_pg1_flags;
2233 	u32 bios_version;
2234 
2235 	bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2236 	strncpy(desc, ioc->manu_pg0.ChipName, 16);
2237 	pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
2238 	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2239 	    ioc->name, desc,
2240 	   (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2241 	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2242 	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2243 	   ioc->facts.FWVersion.Word & 0x000000FF,
2244 	   ioc->pdev->revision,
2245 	   (bios_version & 0xFF000000) >> 24,
2246 	   (bios_version & 0x00FF0000) >> 16,
2247 	   (bios_version & 0x0000FF00) >> 8,
2248 	    bios_version & 0x000000FF);
2249 
2250 	pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
2251 
2252 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2253 		pr_info("Initiator");
2254 		i++;
2255 	}
2256 
2257 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2258 		pr_info("%sTarget", i ? "," : "");
2259 		i++;
2260 	}
2261 
2262 	i = 0;
2263 	pr_info("), ");
2264 	pr_info("Capabilities=(");
2265 
2266 	if (ioc->facts.IOCCapabilities &
2267 		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2268 			pr_info("Raid");
2269 			i++;
2270 	}
2271 
2272 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2273 		pr_info("%sTLR", i ? "," : "");
2274 		i++;
2275 	}
2276 
2277 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2278 		pr_info("%sMulticast", i ? "," : "");
2279 		i++;
2280 	}
2281 
2282 	if (ioc->facts.IOCCapabilities &
2283 	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2284 		pr_info("%sBIDI Target", i ? "," : "");
2285 		i++;
2286 	}
2287 
2288 	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
2289 		pr_info("%sEEDP", i ? "," : "");
2290 		i++;
2291 	}
2292 
2293 	if (ioc->facts.IOCCapabilities &
2294 	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
2295 		pr_info("%sSnapshot Buffer", i ? "," : "");
2296 		i++;
2297 	}
2298 
2299 	if (ioc->facts.IOCCapabilities &
2300 	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
2301 		pr_info("%sDiag Trace Buffer", i ? "," : "");
2302 		i++;
2303 	}
2304 
2305 	if (ioc->facts.IOCCapabilities &
2306 	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
2307 		pr_info("%sDiag Extended Buffer", i ? "," : "");
2308 		i++;
2309 	}
2310 
2311 	if (ioc->facts.IOCCapabilities &
2312 	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
2313 		pr_info("%sTask Set Full", i ? "," : "");
2314 		i++;
2315 	}
2316 
2317 	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2318 	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
2319 		pr_info("%sNCQ", i ? "," : "");
2320 		i++;
2321 	}
2322 
2323 	pr_info(")\n");
2324 }
2325 
2326 /**
2327  * mpt3sas_base_update_missing_delay - change the missing delay timers
2328  * @ioc: per adapter object
2329  * @device_missing_delay: amount of time till device is reported missing
2330  * @io_missing_delay: interval IO is returned when there is a missing device
2331  *
2332  * Return nothing.
2333  *
2334  * Passed on the command line, this function will modify the device missing
2335  * delay, as well as the io missing delay. This should be called at driver
2336  * load time.
2337  */
2338 void
2339 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
2340 	u16 device_missing_delay, u8 io_missing_delay)
2341 {
2342 	u16 dmd, dmd_new, dmd_orignal;
2343 	u8 io_missing_delay_original;
2344 	u16 sz;
2345 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
2346 	Mpi2ConfigReply_t mpi_reply;
2347 	u8 num_phys = 0;
2348 	u16 ioc_status;
2349 
2350 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
2351 	if (!num_phys)
2352 		return;
2353 
2354 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
2355 	    sizeof(Mpi2SasIOUnit1PhyData_t));
2356 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
2357 	if (!sas_iounit_pg1) {
2358 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2359 		    ioc->name, __FILE__, __LINE__, __func__);
2360 		goto out;
2361 	}
2362 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
2363 	    sas_iounit_pg1, sz))) {
2364 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2365 		    ioc->name, __FILE__, __LINE__, __func__);
2366 		goto out;
2367 	}
2368 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2369 	    MPI2_IOCSTATUS_MASK;
2370 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2371 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
2372 		    ioc->name, __FILE__, __LINE__, __func__);
2373 		goto out;
2374 	}
2375 
2376 	/* device missing delay */
2377 	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
2378 	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2379 		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2380 	else
2381 		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2382 	dmd_orignal = dmd;
2383 	if (device_missing_delay > 0x7F) {
2384 		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
2385 		    device_missing_delay;
2386 		dmd = dmd / 16;
2387 		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
2388 	} else
2389 		dmd = device_missing_delay;
2390 	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
2391 
2392 	/* io missing delay */
2393 	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
2394 	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
2395 
2396 	if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
2397 	    sz)) {
2398 		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
2399 			dmd_new = (dmd &
2400 			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
2401 		else
2402 			dmd_new =
2403 		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
2404 		pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
2405 			ioc->name, dmd_orignal, dmd_new);
2406 		pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
2407 			ioc->name, io_missing_delay_original,
2408 		    io_missing_delay);
2409 		ioc->device_missing_delay = dmd_new;
2410 		ioc->io_missing_delay = io_missing_delay;
2411 	}
2412 
2413 out:
2414 	kfree(sas_iounit_pg1);
2415 }
2416 /**
2417  * _base_static_config_pages - static start of day config pages
2418  * @ioc: per adapter object
2419  *
2420  * Return nothing.
2421  */
2422 static void
2423 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
2424 {
2425 	Mpi2ConfigReply_t mpi_reply;
2426 	u32 iounit_pg1_flags;
2427 
2428 	mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
2429 	if (ioc->ir_firmware)
2430 		mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
2431 		    &ioc->manu_pg10);
2432 
2433 	/*
2434 	 * Ensure correct T10 PI operation if vendor left EEDPTagMode
2435 	 * flag unset in NVDATA.
2436 	 */
2437 	mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
2438 	if (ioc->manu_pg11.EEDPTagMode == 0) {
2439 		pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
2440 		    ioc->name);
2441 		ioc->manu_pg11.EEDPTagMode &= ~0x3;
2442 		ioc->manu_pg11.EEDPTagMode |= 0x1;
2443 		mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
2444 		    &ioc->manu_pg11);
2445 	}
2446 
2447 	mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
2448 	mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
2449 	mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
2450 	mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
2451 	mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2452 	_base_display_ioc_capabilities(ioc);
2453 
2454 	/*
2455 	 * Enable task_set_full handling in iounit_pg1 when the
2456 	 * facts capabilities indicate that its supported.
2457 	 */
2458 	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
2459 	if ((ioc->facts.IOCCapabilities &
2460 	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
2461 		iounit_pg1_flags &=
2462 		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2463 	else
2464 		iounit_pg1_flags |=
2465 		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
2466 	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
2467 	mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2468 }
2469 
2470 /**
2471  * _base_release_memory_pools - release memory
2472  * @ioc: per adapter object
2473  *
2474  * Free memory allocated from _base_allocate_memory_pools.
2475  *
2476  * Return nothing.
2477  */
2478 static void
2479 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
2480 {
2481 	int i;
2482 
2483 	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2484 	    __func__));
2485 
2486 	if (ioc->request) {
2487 		pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
2488 		    ioc->request,  ioc->request_dma);
2489 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
2490 			"request_pool(0x%p): free\n",
2491 			ioc->name, ioc->request));
2492 		ioc->request = NULL;
2493 	}
2494 
2495 	if (ioc->sense) {
2496 		pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
2497 		if (ioc->sense_dma_pool)
2498 			pci_pool_destroy(ioc->sense_dma_pool);
2499 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
2500 			"sense_pool(0x%p): free\n",
2501 			ioc->name, ioc->sense));
2502 		ioc->sense = NULL;
2503 	}
2504 
2505 	if (ioc->reply) {
2506 		pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
2507 		if (ioc->reply_dma_pool)
2508 			pci_pool_destroy(ioc->reply_dma_pool);
2509 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
2510 			"reply_pool(0x%p): free\n",
2511 			ioc->name, ioc->reply));
2512 		ioc->reply = NULL;
2513 	}
2514 
2515 	if (ioc->reply_free) {
2516 		pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
2517 		    ioc->reply_free_dma);
2518 		if (ioc->reply_free_dma_pool)
2519 			pci_pool_destroy(ioc->reply_free_dma_pool);
2520 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
2521 			"reply_free_pool(0x%p): free\n",
2522 			ioc->name, ioc->reply_free));
2523 		ioc->reply_free = NULL;
2524 	}
2525 
2526 	if (ioc->reply_post_free) {
2527 		pci_pool_free(ioc->reply_post_free_dma_pool,
2528 		    ioc->reply_post_free, ioc->reply_post_free_dma);
2529 		if (ioc->reply_post_free_dma_pool)
2530 			pci_pool_destroy(ioc->reply_post_free_dma_pool);
2531 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
2532 		    "reply_post_free_pool(0x%p): free\n", ioc->name,
2533 		    ioc->reply_post_free));
2534 		ioc->reply_post_free = NULL;
2535 	}
2536 
2537 	if (ioc->config_page) {
2538 		dexitprintk(ioc, pr_info(MPT3SAS_FMT
2539 		    "config_page(0x%p): free\n", ioc->name,
2540 		    ioc->config_page));
2541 		pci_free_consistent(ioc->pdev, ioc->config_page_sz,
2542 		    ioc->config_page, ioc->config_page_dma);
2543 	}
2544 
2545 	if (ioc->scsi_lookup) {
2546 		free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
2547 		ioc->scsi_lookup = NULL;
2548 	}
2549 	kfree(ioc->hpr_lookup);
2550 	kfree(ioc->internal_lookup);
2551 	if (ioc->chain_lookup) {
2552 		for (i = 0; i < ioc->chain_depth; i++) {
2553 			if (ioc->chain_lookup[i].chain_buffer)
2554 				pci_pool_free(ioc->chain_dma_pool,
2555 				    ioc->chain_lookup[i].chain_buffer,
2556 				    ioc->chain_lookup[i].chain_buffer_dma);
2557 		}
2558 		if (ioc->chain_dma_pool)
2559 			pci_pool_destroy(ioc->chain_dma_pool);
2560 		free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
2561 		ioc->chain_lookup = NULL;
2562 	}
2563 }
2564 
2565 /**
2566  * _base_allocate_memory_pools - allocate start of day memory pools
2567  * @ioc: per adapter object
2568  * @sleep_flag: CAN_SLEEP or NO_SLEEP
2569  *
2570  * Returns 0 success, anything else error
2571  */
2572 static int
2573 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc,  int sleep_flag)
2574 {
2575 	struct mpt3sas_facts *facts;
2576 	u16 max_sge_elements;
2577 	u16 chains_needed_per_io;
2578 	u32 sz, total_sz, reply_post_free_sz;
2579 	u32 retry_sz;
2580 	u16 max_request_credit;
2581 	unsigned short sg_tablesize;
2582 	u16 sge_size;
2583 	int i;
2584 
2585 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2586 	    __func__));
2587 
2588 
2589 	retry_sz = 0;
2590 	facts = &ioc->facts;
2591 
2592 	/* command line tunables for max sgl entries */
2593 	if (max_sgl_entries != -1)
2594 		sg_tablesize = max_sgl_entries;
2595 	else
2596 		sg_tablesize = MPT3SAS_SG_DEPTH;
2597 
2598 	if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS)
2599 		sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS;
2600 	else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS)
2601 		sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS;
2602 	ioc->shost->sg_tablesize = sg_tablesize;
2603 
2604 	ioc->hi_priority_depth = facts->HighPriorityCredit;
2605 	ioc->internal_depth = ioc->hi_priority_depth + (5);
2606 	/* command line tunables  for max controller queue depth */
2607 	if (max_queue_depth != -1 && max_queue_depth != 0) {
2608 		max_request_credit = min_t(u16, max_queue_depth +
2609 		    ioc->hi_priority_depth + ioc->internal_depth,
2610 		    facts->RequestCredit);
2611 		if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
2612 			max_request_credit =  MAX_HBA_QUEUE_DEPTH;
2613 	} else
2614 		max_request_credit = min_t(u16, facts->RequestCredit,
2615 		    MAX_HBA_QUEUE_DEPTH);
2616 
2617 	ioc->hba_queue_depth = max_request_credit;
2618 
2619 	/* request frame size */
2620 	ioc->request_sz = facts->IOCRequestFrameSize * 4;
2621 
2622 	/* reply frame size */
2623 	ioc->reply_sz = facts->ReplyFrameSize * 4;
2624 
2625 	/* calculate the max scatter element size */
2626 	sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
2627 
2628  retry_allocation:
2629 	total_sz = 0;
2630 	/* calculate number of sg elements left over in the 1st frame */
2631 	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
2632 	    sizeof(Mpi2SGEIOUnion_t)) + sge_size);
2633 	ioc->max_sges_in_main_message = max_sge_elements/sge_size;
2634 
2635 	/* now do the same for a chain buffer */
2636 	max_sge_elements = ioc->request_sz - sge_size;
2637 	ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
2638 
2639 	/*
2640 	 *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
2641 	 */
2642 	chains_needed_per_io = ((ioc->shost->sg_tablesize -
2643 	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
2644 	    + 1;
2645 	if (chains_needed_per_io > facts->MaxChainDepth) {
2646 		chains_needed_per_io = facts->MaxChainDepth;
2647 		ioc->shost->sg_tablesize = min_t(u16,
2648 		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
2649 		* chains_needed_per_io), ioc->shost->sg_tablesize);
2650 	}
2651 	ioc->chains_needed_per_io = chains_needed_per_io;
2652 
2653 	/* reply free queue sizing - taking into account for 64 FW events */
2654 	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2655 
2656 	/* calculate reply descriptor post queue depth */
2657 	ioc->reply_post_queue_depth = ioc->hba_queue_depth +
2658 				ioc->reply_free_queue_depth +  1 ;
2659 	/* align the reply post queue on the next 16 count boundary */
2660 	if (ioc->reply_post_queue_depth % 16)
2661 		ioc->reply_post_queue_depth += 16 -
2662 		(ioc->reply_post_queue_depth % 16);
2663 
2664 
2665 	if (ioc->reply_post_queue_depth >
2666 	    facts->MaxReplyDescriptorPostQueueDepth) {
2667 		ioc->reply_post_queue_depth =
2668 				facts->MaxReplyDescriptorPostQueueDepth -
2669 		    (facts->MaxReplyDescriptorPostQueueDepth % 16);
2670 		ioc->hba_queue_depth =
2671 				((ioc->reply_post_queue_depth - 64) / 2) - 1;
2672 		ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
2673 	}
2674 
2675 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
2676 	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2677 	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
2678 	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
2679 	    ioc->chains_needed_per_io));
2680 
2681 	ioc->scsiio_depth = ioc->hba_queue_depth -
2682 	    ioc->hi_priority_depth - ioc->internal_depth;
2683 
2684 	/* set the scsi host can_queue depth
2685 	 * with some internal commands that could be outstanding
2686 	 */
2687 	ioc->shost->can_queue = ioc->scsiio_depth;
2688 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2689 		"scsi host: can_queue depth (%d)\n",
2690 		ioc->name, ioc->shost->can_queue));
2691 
2692 
2693 	/* contiguous pool for request and chains, 16 byte align, one extra "
2694 	 * "frame for smid=0
2695 	 */
2696 	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
2697 	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
2698 
2699 	/* hi-priority queue */
2700 	sz += (ioc->hi_priority_depth * ioc->request_sz);
2701 
2702 	/* internal queue */
2703 	sz += (ioc->internal_depth * ioc->request_sz);
2704 
2705 	ioc->request_dma_sz = sz;
2706 	ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
2707 	if (!ioc->request) {
2708 		pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
2709 		    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2710 		    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
2711 		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2712 		if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
2713 			goto out;
2714 		retry_sz += 64;
2715 		ioc->hba_queue_depth = max_request_credit - retry_sz;
2716 		goto retry_allocation;
2717 	}
2718 
2719 	if (retry_sz)
2720 		pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
2721 		    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2722 		    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
2723 		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2724 
2725 	/* hi-priority queue */
2726 	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
2727 	    ioc->request_sz);
2728 	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
2729 	    ioc->request_sz);
2730 
2731 	/* internal queue */
2732 	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
2733 	    ioc->request_sz);
2734 	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
2735 	    ioc->request_sz);
2736 
2737 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2738 		"request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
2739 		ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
2740 	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
2741 
2742 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
2743 	    ioc->name, (unsigned long long) ioc->request_dma));
2744 	total_sz += sz;
2745 
2746 	sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
2747 	ioc->scsi_lookup_pages = get_order(sz);
2748 	ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
2749 	    GFP_KERNEL, ioc->scsi_lookup_pages);
2750 	if (!ioc->scsi_lookup) {
2751 		pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
2752 			ioc->name, (int)sz);
2753 		goto out;
2754 	}
2755 
2756 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
2757 		ioc->name, ioc->request, ioc->scsiio_depth));
2758 
2759 	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
2760 	sz = ioc->chain_depth * sizeof(struct chain_tracker);
2761 	ioc->chain_pages = get_order(sz);
2762 	ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2763 	    GFP_KERNEL, ioc->chain_pages);
2764 	if (!ioc->chain_lookup) {
2765 		pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
2766 			ioc->name);
2767 		goto out;
2768 	}
2769 	ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2770 	    ioc->request_sz, 16, 0);
2771 	if (!ioc->chain_dma_pool) {
2772 		pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
2773 			ioc->name);
2774 		goto out;
2775 	}
2776 	for (i = 0; i < ioc->chain_depth; i++) {
2777 		ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
2778 		    ioc->chain_dma_pool , GFP_KERNEL,
2779 		    &ioc->chain_lookup[i].chain_buffer_dma);
2780 		if (!ioc->chain_lookup[i].chain_buffer) {
2781 			ioc->chain_depth = i;
2782 			goto chain_done;
2783 		}
2784 		total_sz += ioc->request_sz;
2785 	}
2786  chain_done:
2787 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2788 		"chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
2789 		ioc->name, ioc->chain_depth, ioc->request_sz,
2790 		((ioc->chain_depth *  ioc->request_sz))/1024));
2791 
2792 	/* initialize hi-priority queue smid's */
2793 	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
2794 	    sizeof(struct request_tracker), GFP_KERNEL);
2795 	if (!ioc->hpr_lookup) {
2796 		pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
2797 		    ioc->name);
2798 		goto out;
2799 	}
2800 	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
2801 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2802 		"hi_priority(0x%p): depth(%d), start smid(%d)\n",
2803 		ioc->name, ioc->hi_priority,
2804 	    ioc->hi_priority_depth, ioc->hi_priority_smid));
2805 
2806 	/* initialize internal queue smid's */
2807 	ioc->internal_lookup = kcalloc(ioc->internal_depth,
2808 	    sizeof(struct request_tracker), GFP_KERNEL);
2809 	if (!ioc->internal_lookup) {
2810 		pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
2811 		    ioc->name);
2812 		goto out;
2813 	}
2814 	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
2815 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2816 		"internal(0x%p): depth(%d), start smid(%d)\n",
2817 		ioc->name, ioc->internal,
2818 	    ioc->internal_depth, ioc->internal_smid));
2819 
2820 	/* sense buffers, 4 byte align */
2821 	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
2822 	ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
2823 	    0);
2824 	if (!ioc->sense_dma_pool) {
2825 		pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
2826 		    ioc->name);
2827 		goto out;
2828 	}
2829 	ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
2830 	    &ioc->sense_dma);
2831 	if (!ioc->sense) {
2832 		pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
2833 		    ioc->name);
2834 		goto out;
2835 	}
2836 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2837 	    "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
2838 	    "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
2839 	    SCSI_SENSE_BUFFERSIZE, sz/1024));
2840 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
2841 	    ioc->name, (unsigned long long)ioc->sense_dma));
2842 	total_sz += sz;
2843 
2844 	/* reply pool, 4 byte align */
2845 	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
2846 	ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
2847 	    0);
2848 	if (!ioc->reply_dma_pool) {
2849 		pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
2850 		    ioc->name);
2851 		goto out;
2852 	}
2853 	ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
2854 	    &ioc->reply_dma);
2855 	if (!ioc->reply) {
2856 		pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
2857 		    ioc->name);
2858 		goto out;
2859 	}
2860 	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
2861 	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
2862 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2863 		"reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
2864 		ioc->name, ioc->reply,
2865 	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
2866 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
2867 	    ioc->name, (unsigned long long)ioc->reply_dma));
2868 	total_sz += sz;
2869 
2870 	/* reply free queue, 16 byte align */
2871 	sz = ioc->reply_free_queue_depth * 4;
2872 	ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
2873 	    ioc->pdev, sz, 16, 0);
2874 	if (!ioc->reply_free_dma_pool) {
2875 		pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
2876 			ioc->name);
2877 		goto out;
2878 	}
2879 	ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
2880 	    &ioc->reply_free_dma);
2881 	if (!ioc->reply_free) {
2882 		pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
2883 			ioc->name);
2884 		goto out;
2885 	}
2886 	memset(ioc->reply_free, 0, sz);
2887 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
2888 	    "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
2889 	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
2890 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2891 		"reply_free_dma (0x%llx)\n",
2892 		ioc->name, (unsigned long long)ioc->reply_free_dma));
2893 	total_sz += sz;
2894 
2895 	/* reply post queue, 16 byte align */
2896 	reply_post_free_sz = ioc->reply_post_queue_depth *
2897 	    sizeof(Mpi2DefaultReplyDescriptor_t);
2898 	if (_base_is_controller_msix_enabled(ioc))
2899 		sz = reply_post_free_sz * ioc->reply_queue_count;
2900 	else
2901 		sz = reply_post_free_sz;
2902 	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2903 	    ioc->pdev, sz, 16, 0);
2904 	if (!ioc->reply_post_free_dma_pool) {
2905 		pr_err(MPT3SAS_FMT
2906 			"reply_post_free pool: pci_pool_create failed\n",
2907 			ioc->name);
2908 		goto out;
2909 	}
2910 	ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
2911 	    GFP_KERNEL, &ioc->reply_post_free_dma);
2912 	if (!ioc->reply_post_free) {
2913 		pr_err(MPT3SAS_FMT
2914 			"reply_post_free pool: pci_pool_alloc failed\n",
2915 			ioc->name);
2916 		goto out;
2917 	}
2918 	memset(ioc->reply_post_free, 0, sz);
2919 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply post free pool" \
2920 	    "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
2921 	    ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
2922 	    sz/1024));
2923 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2924 		"reply_post_free_dma = (0x%llx)\n",
2925 		ioc->name, (unsigned long long)
2926 	    ioc->reply_post_free_dma));
2927 	total_sz += sz;
2928 
2929 	ioc->config_page_sz = 512;
2930 	ioc->config_page = pci_alloc_consistent(ioc->pdev,
2931 	    ioc->config_page_sz, &ioc->config_page_dma);
2932 	if (!ioc->config_page) {
2933 		pr_err(MPT3SAS_FMT
2934 			"config page: pci_pool_alloc failed\n",
2935 			ioc->name);
2936 		goto out;
2937 	}
2938 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
2939 		"config page(0x%p): size(%d)\n",
2940 		ioc->name, ioc->config_page, ioc->config_page_sz));
2941 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
2942 		ioc->name, (unsigned long long)ioc->config_page_dma));
2943 	total_sz += ioc->config_page_sz;
2944 
2945 	pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
2946 	    ioc->name, total_sz/1024);
2947 	pr_info(MPT3SAS_FMT
2948 		"Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
2949 	    ioc->name, ioc->shost->can_queue, facts->RequestCredit);
2950 	pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
2951 	    ioc->name, ioc->shost->sg_tablesize);
2952 	return 0;
2953 
2954  out:
2955 	return -ENOMEM;
2956 }
2957 
2958 /**
2959  * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
2960  * @ioc: Pointer to MPT_ADAPTER structure
2961  * @cooked: Request raw or cooked IOC state
2962  *
2963  * Returns all IOC Doorbell register bits if cooked==0, else just the
2964  * Doorbell bits in MPI_IOC_STATE_MASK.
2965  */
2966 u32
2967 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
2968 {
2969 	u32 s, sc;
2970 
2971 	s = readl(&ioc->chip->Doorbell);
2972 	sc = s & MPI2_IOC_STATE_MASK;
2973 	return cooked ? sc : s;
2974 }
2975 
2976 /**
2977  * _base_wait_on_iocstate - waiting on a particular ioc state
2978  * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
2979  * @timeout: timeout in second
2980  * @sleep_flag: CAN_SLEEP or NO_SLEEP
2981  *
2982  * Returns 0 for success, non-zero for failure.
2983  */
2984 static int
2985 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
2986 	int sleep_flag)
2987 {
2988 	u32 count, cntdn;
2989 	u32 current_state;
2990 
2991 	count = 0;
2992 	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2993 	do {
2994 		current_state = mpt3sas_base_get_iocstate(ioc, 1);
2995 		if (current_state == ioc_state)
2996 			return 0;
2997 		if (count && current_state == MPI2_IOC_STATE_FAULT)
2998 			break;
2999 		if (sleep_flag == CAN_SLEEP)
3000 			usleep_range(1000, 1500);
3001 		else
3002 			udelay(500);
3003 		count++;
3004 	} while (--cntdn);
3005 
3006 	return current_state;
3007 }
3008 
3009 /**
3010  * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
3011  * a write to the doorbell)
3012  * @ioc: per adapter object
3013  * @timeout: timeout in second
3014  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3015  *
3016  * Returns 0 for success, non-zero for failure.
3017  *
3018  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
3019  */
3020 static int
3021 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
3022 	int sleep_flag)
3023 {
3024 	u32 cntdn, count;
3025 	u32 int_status;
3026 
3027 	count = 0;
3028 	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3029 	do {
3030 		int_status = readl(&ioc->chip->HostInterruptStatus);
3031 		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3032 			dhsprintk(ioc, pr_info(MPT3SAS_FMT
3033 				"%s: successful count(%d), timeout(%d)\n",
3034 				ioc->name, __func__, count, timeout));
3035 			return 0;
3036 		}
3037 		if (sleep_flag == CAN_SLEEP)
3038 			usleep_range(1000, 1500);
3039 		else
3040 			udelay(500);
3041 		count++;
3042 	} while (--cntdn);
3043 
3044 	pr_err(MPT3SAS_FMT
3045 		"%s: failed due to timeout count(%d), int_status(%x)!\n",
3046 		ioc->name, __func__, count, int_status);
3047 	return -EFAULT;
3048 }
3049 
3050 /**
3051  * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
3052  * @ioc: per adapter object
3053  * @timeout: timeout in second
3054  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3055  *
3056  * Returns 0 for success, non-zero for failure.
3057  *
3058  * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
3059  * doorbell.
3060  */
3061 static int
3062 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
3063 	int sleep_flag)
3064 {
3065 	u32 cntdn, count;
3066 	u32 int_status;
3067 	u32 doorbell;
3068 
3069 	count = 0;
3070 	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3071 	do {
3072 		int_status = readl(&ioc->chip->HostInterruptStatus);
3073 		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
3074 			dhsprintk(ioc, pr_info(MPT3SAS_FMT
3075 				"%s: successful count(%d), timeout(%d)\n",
3076 				ioc->name, __func__, count, timeout));
3077 			return 0;
3078 		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3079 			doorbell = readl(&ioc->chip->Doorbell);
3080 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
3081 			    MPI2_IOC_STATE_FAULT) {
3082 				mpt3sas_base_fault_info(ioc , doorbell);
3083 				return -EFAULT;
3084 			}
3085 		} else if (int_status == 0xFFFFFFFF)
3086 			goto out;
3087 
3088 		if (sleep_flag == CAN_SLEEP)
3089 			usleep_range(1000, 1500);
3090 		else
3091 			udelay(500);
3092 		count++;
3093 	} while (--cntdn);
3094 
3095  out:
3096 	pr_err(MPT3SAS_FMT
3097 	 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3098 	 ioc->name, __func__, count, int_status);
3099 	return -EFAULT;
3100 }
3101 
3102 /**
3103  * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
3104  * @ioc: per adapter object
3105  * @timeout: timeout in second
3106  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3107  *
3108  * Returns 0 for success, non-zero for failure.
3109  *
3110  */
3111 static int
3112 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
3113 	int sleep_flag)
3114 {
3115 	u32 cntdn, count;
3116 	u32 doorbell_reg;
3117 
3118 	count = 0;
3119 	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
3120 	do {
3121 		doorbell_reg = readl(&ioc->chip->Doorbell);
3122 		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
3123 			dhsprintk(ioc, pr_info(MPT3SAS_FMT
3124 				"%s: successful count(%d), timeout(%d)\n",
3125 				ioc->name, __func__, count, timeout));
3126 			return 0;
3127 		}
3128 		if (sleep_flag == CAN_SLEEP)
3129 			usleep_range(1000, 1500);
3130 		else
3131 			udelay(500);
3132 		count++;
3133 	} while (--cntdn);
3134 
3135 	pr_err(MPT3SAS_FMT
3136 		"%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
3137 		ioc->name, __func__, count, doorbell_reg);
3138 	return -EFAULT;
3139 }
3140 
3141 /**
3142  * _base_send_ioc_reset - send doorbell reset
3143  * @ioc: per adapter object
3144  * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
3145  * @timeout: timeout in second
3146  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3147  *
3148  * Returns 0 for success, non-zero for failure.
3149  */
3150 static int
3151 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
3152 	int sleep_flag)
3153 {
3154 	u32 ioc_state;
3155 	int r = 0;
3156 
3157 	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
3158 		pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
3159 		    ioc->name, __func__);
3160 		return -EFAULT;
3161 	}
3162 
3163 	if (!(ioc->facts.IOCCapabilities &
3164 	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
3165 		return -EFAULT;
3166 
3167 	pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
3168 
3169 	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3170 	    &ioc->chip->Doorbell);
3171 	if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
3172 		r = -EFAULT;
3173 		goto out;
3174 	}
3175 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3176 	    timeout, sleep_flag);
3177 	if (ioc_state) {
3178 		pr_err(MPT3SAS_FMT
3179 			"%s: failed going to ready state (ioc_state=0x%x)\n",
3180 			ioc->name, __func__, ioc_state);
3181 		r = -EFAULT;
3182 		goto out;
3183 	}
3184  out:
3185 	pr_info(MPT3SAS_FMT "message unit reset: %s\n",
3186 	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3187 	return r;
3188 }
3189 
3190 /**
3191  * _base_handshake_req_reply_wait - send request thru doorbell interface
3192  * @ioc: per adapter object
3193  * @request_bytes: request length
3194  * @request: pointer having request payload
3195  * @reply_bytes: reply length
3196  * @reply: pointer to reply payload
3197  * @timeout: timeout in second
3198  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3199  *
3200  * Returns 0 for success, non-zero for failure.
3201  */
3202 static int
3203 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3204 	u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
3205 {
3206 	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3207 	int i;
3208 	u8 failed;
3209 	u16 dummy;
3210 	__le32 *mfp;
3211 
3212 	/* make sure doorbell is not in use */
3213 	if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
3214 		pr_err(MPT3SAS_FMT
3215 			"doorbell is in use (line=%d)\n",
3216 			ioc->name, __LINE__);
3217 		return -EFAULT;
3218 	}
3219 
3220 	/* clear pending doorbell interrupts from previous state changes */
3221 	if (readl(&ioc->chip->HostInterruptStatus) &
3222 	    MPI2_HIS_IOC2SYS_DB_STATUS)
3223 		writel(0, &ioc->chip->HostInterruptStatus);
3224 
3225 	/* send message to ioc */
3226 	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
3227 	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3228 	    &ioc->chip->Doorbell);
3229 
3230 	if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
3231 		pr_err(MPT3SAS_FMT
3232 			"doorbell handshake int failed (line=%d)\n",
3233 			ioc->name, __LINE__);
3234 		return -EFAULT;
3235 	}
3236 	writel(0, &ioc->chip->HostInterruptStatus);
3237 
3238 	if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
3239 		pr_err(MPT3SAS_FMT
3240 			"doorbell handshake ack failed (line=%d)\n",
3241 			ioc->name, __LINE__);
3242 		return -EFAULT;
3243 	}
3244 
3245 	/* send message 32-bits at a time */
3246 	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3247 		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3248 		if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
3249 			failed = 1;
3250 	}
3251 
3252 	if (failed) {
3253 		pr_err(MPT3SAS_FMT
3254 			"doorbell handshake sending request failed (line=%d)\n",
3255 			ioc->name, __LINE__);
3256 		return -EFAULT;
3257 	}
3258 
3259 	/* now wait for the reply */
3260 	if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
3261 		pr_err(MPT3SAS_FMT
3262 			"doorbell handshake int failed (line=%d)\n",
3263 			ioc->name, __LINE__);
3264 		return -EFAULT;
3265 	}
3266 
3267 	/* read the first two 16-bits, it gives the total length of the reply */
3268 	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3269 	    & MPI2_DOORBELL_DATA_MASK);
3270 	writel(0, &ioc->chip->HostInterruptStatus);
3271 	if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3272 		pr_err(MPT3SAS_FMT
3273 			"doorbell handshake int failed (line=%d)\n",
3274 			ioc->name, __LINE__);
3275 		return -EFAULT;
3276 	}
3277 	reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3278 	    & MPI2_DOORBELL_DATA_MASK);
3279 	writel(0, &ioc->chip->HostInterruptStatus);
3280 
3281 	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
3282 		if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
3283 			pr_err(MPT3SAS_FMT
3284 				"doorbell handshake int failed (line=%d)\n",
3285 				ioc->name, __LINE__);
3286 			return -EFAULT;
3287 		}
3288 		if (i >=  reply_bytes/2) /* overflow case */
3289 			dummy = readl(&ioc->chip->Doorbell);
3290 		else
3291 			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3292 			    & MPI2_DOORBELL_DATA_MASK);
3293 		writel(0, &ioc->chip->HostInterruptStatus);
3294 	}
3295 
3296 	_base_wait_for_doorbell_int(ioc, 5, sleep_flag);
3297 	if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
3298 		dhsprintk(ioc, pr_info(MPT3SAS_FMT
3299 			"doorbell is in use (line=%d)\n", ioc->name, __LINE__));
3300 	}
3301 	writel(0, &ioc->chip->HostInterruptStatus);
3302 
3303 	if (ioc->logging_level & MPT_DEBUG_INIT) {
3304 		mfp = (__le32 *)reply;
3305 		pr_info("\toffset:data\n");
3306 		for (i = 0; i < reply_bytes/4; i++)
3307 			pr_info("\t[0x%02x]:%08x\n", i*4,
3308 			    le32_to_cpu(mfp[i]));
3309 	}
3310 	return 0;
3311 }
3312 
3313 /**
3314  * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
3315  * @ioc: per adapter object
3316  * @mpi_reply: the reply payload from FW
3317  * @mpi_request: the request payload sent to FW
3318  *
3319  * The SAS IO Unit Control Request message allows the host to perform low-level
3320  * operations, such as resets on the PHYs of the IO Unit, also allows the host
3321  * to obtain the IOC assigned device handles for a device if it has other
3322  * identifying information about the device, in addition allows the host to
3323  * remove IOC resources associated with the device.
3324  *
3325  * Returns 0 for success, non-zero for failure.
3326  */
3327 int
3328 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
3329 	Mpi2SasIoUnitControlReply_t *mpi_reply,
3330 	Mpi2SasIoUnitControlRequest_t *mpi_request)
3331 {
3332 	u16 smid;
3333 	u32 ioc_state;
3334 	unsigned long timeleft;
3335 	u8 issue_reset;
3336 	int rc;
3337 	void *request;
3338 	u16 wait_state_count;
3339 
3340 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3341 	    __func__));
3342 
3343 	mutex_lock(&ioc->base_cmds.mutex);
3344 
3345 	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
3346 		pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
3347 		    ioc->name, __func__);
3348 		rc = -EAGAIN;
3349 		goto out;
3350 	}
3351 
3352 	wait_state_count = 0;
3353 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3354 	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3355 		if (wait_state_count++ == 10) {
3356 			pr_err(MPT3SAS_FMT
3357 			    "%s: failed due to ioc not operational\n",
3358 			    ioc->name, __func__);
3359 			rc = -EFAULT;
3360 			goto out;
3361 		}
3362 		ssleep(1);
3363 		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3364 		pr_info(MPT3SAS_FMT
3365 			"%s: waiting for operational state(count=%d)\n",
3366 			ioc->name, __func__, wait_state_count);
3367 	}
3368 
3369 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3370 	if (!smid) {
3371 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3372 		    ioc->name, __func__);
3373 		rc = -EAGAIN;
3374 		goto out;
3375 	}
3376 
3377 	rc = 0;
3378 	ioc->base_cmds.status = MPT3_CMD_PENDING;
3379 	request = mpt3sas_base_get_msg_frame(ioc, smid);
3380 	ioc->base_cmds.smid = smid;
3381 	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
3382 	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3383 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
3384 		ioc->ioc_link_reset_in_progress = 1;
3385 	init_completion(&ioc->base_cmds.done);
3386 	mpt3sas_base_put_smid_default(ioc, smid);
3387 	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3388 	    msecs_to_jiffies(10000));
3389 	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
3390 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
3391 	    ioc->ioc_link_reset_in_progress)
3392 		ioc->ioc_link_reset_in_progress = 0;
3393 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3394 		pr_err(MPT3SAS_FMT "%s: timeout\n",
3395 		    ioc->name, __func__);
3396 		_debug_dump_mf(mpi_request,
3397 		    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
3398 		if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
3399 			issue_reset = 1;
3400 		goto issue_host_reset;
3401 	}
3402 	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
3403 		memcpy(mpi_reply, ioc->base_cmds.reply,
3404 		    sizeof(Mpi2SasIoUnitControlReply_t));
3405 	else
3406 		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
3407 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3408 	goto out;
3409 
3410  issue_host_reset:
3411 	if (issue_reset)
3412 		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3413 		    FORCE_BIG_HAMMER);
3414 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3415 	rc = -EFAULT;
3416  out:
3417 	mutex_unlock(&ioc->base_cmds.mutex);
3418 	return rc;
3419 }
3420 
3421 /**
3422  * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
3423  * @ioc: per adapter object
3424  * @mpi_reply: the reply payload from FW
3425  * @mpi_request: the request payload sent to FW
3426  *
3427  * The SCSI Enclosure Processor request message causes the IOC to
3428  * communicate with SES devices to control LED status signals.
3429  *
3430  * Returns 0 for success, non-zero for failure.
3431  */
3432 int
3433 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
3434 	Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
3435 {
3436 	u16 smid;
3437 	u32 ioc_state;
3438 	unsigned long timeleft;
3439 	u8 issue_reset;
3440 	int rc;
3441 	void *request;
3442 	u16 wait_state_count;
3443 
3444 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3445 	    __func__));
3446 
3447 	mutex_lock(&ioc->base_cmds.mutex);
3448 
3449 	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
3450 		pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
3451 		    ioc->name, __func__);
3452 		rc = -EAGAIN;
3453 		goto out;
3454 	}
3455 
3456 	wait_state_count = 0;
3457 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3458 	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3459 		if (wait_state_count++ == 10) {
3460 			pr_err(MPT3SAS_FMT
3461 			    "%s: failed due to ioc not operational\n",
3462 			    ioc->name, __func__);
3463 			rc = -EFAULT;
3464 			goto out;
3465 		}
3466 		ssleep(1);
3467 		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3468 		pr_info(MPT3SAS_FMT
3469 			"%s: waiting for operational state(count=%d)\n",
3470 			ioc->name,
3471 		    __func__, wait_state_count);
3472 	}
3473 
3474 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3475 	if (!smid) {
3476 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3477 		    ioc->name, __func__);
3478 		rc = -EAGAIN;
3479 		goto out;
3480 	}
3481 
3482 	rc = 0;
3483 	ioc->base_cmds.status = MPT3_CMD_PENDING;
3484 	request = mpt3sas_base_get_msg_frame(ioc, smid);
3485 	ioc->base_cmds.smid = smid;
3486 	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
3487 	init_completion(&ioc->base_cmds.done);
3488 	mpt3sas_base_put_smid_default(ioc, smid);
3489 	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3490 	    msecs_to_jiffies(10000));
3491 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3492 		pr_err(MPT3SAS_FMT "%s: timeout\n",
3493 		    ioc->name, __func__);
3494 		_debug_dump_mf(mpi_request,
3495 		    sizeof(Mpi2SepRequest_t)/4);
3496 		if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
3497 			issue_reset = 1;
3498 		goto issue_host_reset;
3499 	}
3500 	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
3501 		memcpy(mpi_reply, ioc->base_cmds.reply,
3502 		    sizeof(Mpi2SepReply_t));
3503 	else
3504 		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
3505 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3506 	goto out;
3507 
3508  issue_host_reset:
3509 	if (issue_reset)
3510 		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
3511 		    FORCE_BIG_HAMMER);
3512 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3513 	rc = -EFAULT;
3514  out:
3515 	mutex_unlock(&ioc->base_cmds.mutex);
3516 	return rc;
3517 }
3518 
3519 /**
3520  * _base_get_port_facts - obtain port facts reply and save in ioc
3521  * @ioc: per adapter object
3522  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3523  *
3524  * Returns 0 for success, non-zero for failure.
3525  */
3526 static int
3527 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
3528 {
3529 	Mpi2PortFactsRequest_t mpi_request;
3530 	Mpi2PortFactsReply_t mpi_reply;
3531 	struct mpt3sas_port_facts *pfacts;
3532 	int mpi_reply_sz, mpi_request_sz, r;
3533 
3534 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3535 	    __func__));
3536 
3537 	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
3538 	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
3539 	memset(&mpi_request, 0, mpi_request_sz);
3540 	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
3541 	mpi_request.PortNumber = port;
3542 	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3543 	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3544 
3545 	if (r != 0) {
3546 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
3547 		    ioc->name, __func__, r);
3548 		return r;
3549 	}
3550 
3551 	pfacts = &ioc->pfacts[port];
3552 	memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
3553 	pfacts->PortNumber = mpi_reply.PortNumber;
3554 	pfacts->VP_ID = mpi_reply.VP_ID;
3555 	pfacts->VF_ID = mpi_reply.VF_ID;
3556 	pfacts->MaxPostedCmdBuffers =
3557 	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
3558 
3559 	return 0;
3560 }
3561 
3562 /**
3563  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
3564  * @ioc: per adapter object
3565  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3566  *
3567  * Returns 0 for success, non-zero for failure.
3568  */
3569 static int
3570 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3571 {
3572 	Mpi2IOCFactsRequest_t mpi_request;
3573 	Mpi2IOCFactsReply_t mpi_reply;
3574 	struct mpt3sas_facts *facts;
3575 	int mpi_reply_sz, mpi_request_sz, r;
3576 
3577 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3578 	    __func__));
3579 
3580 	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
3581 	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
3582 	memset(&mpi_request, 0, mpi_request_sz);
3583 	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
3584 	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
3585 	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
3586 
3587 	if (r != 0) {
3588 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
3589 		    ioc->name, __func__, r);
3590 		return r;
3591 	}
3592 
3593 	facts = &ioc->facts;
3594 	memset(facts, 0, sizeof(struct mpt3sas_facts));
3595 	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
3596 	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
3597 	facts->VP_ID = mpi_reply.VP_ID;
3598 	facts->VF_ID = mpi_reply.VF_ID;
3599 	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
3600 	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
3601 	facts->WhoInit = mpi_reply.WhoInit;
3602 	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
3603 	facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
3604 	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
3605 	facts->MaxReplyDescriptorPostQueueDepth =
3606 	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
3607 	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
3608 	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
3609 	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
3610 		ioc->ir_firmware = 1;
3611 	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
3612 	facts->IOCRequestFrameSize =
3613 	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
3614 	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
3615 	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
3616 	ioc->shost->max_id = -1;
3617 	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
3618 	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
3619 	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
3620 	facts->HighPriorityCredit =
3621 	    le16_to_cpu(mpi_reply.HighPriorityCredit);
3622 	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
3623 	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
3624 
3625 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
3626 		"hba queue depth(%d), max chains per io(%d)\n",
3627 		ioc->name, facts->RequestCredit,
3628 	    facts->MaxChainDepth));
3629 	dinitprintk(ioc, pr_info(MPT3SAS_FMT
3630 		"request frame size(%d), reply frame size(%d)\n", ioc->name,
3631 	    facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
3632 	return 0;
3633 }
3634 
3635 /**
3636  * _base_send_ioc_init - send ioc_init to firmware
3637  * @ioc: per adapter object
3638  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3639  *
3640  * Returns 0 for success, non-zero for failure.
3641  */
3642 static int
3643 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3644 {
3645 	Mpi2IOCInitRequest_t mpi_request;
3646 	Mpi2IOCInitReply_t mpi_reply;
3647 	int r;
3648 	struct timeval current_time;
3649 	u16 ioc_status;
3650 
3651 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3652 	    __func__));
3653 
3654 	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
3655 	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
3656 	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
3657 	mpi_request.VF_ID = 0; /* TODO */
3658 	mpi_request.VP_ID = 0;
3659 	mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
3660 	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3661 
3662 	if (_base_is_controller_msix_enabled(ioc))
3663 		mpi_request.HostMSIxVectors = ioc->reply_queue_count;
3664 	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
3665 	mpi_request.ReplyDescriptorPostQueueDepth =
3666 	    cpu_to_le16(ioc->reply_post_queue_depth);
3667 	mpi_request.ReplyFreeQueueDepth =
3668 	    cpu_to_le16(ioc->reply_free_queue_depth);
3669 
3670 	mpi_request.SenseBufferAddressHigh =
3671 	    cpu_to_le32((u64)ioc->sense_dma >> 32);
3672 	mpi_request.SystemReplyAddressHigh =
3673 	    cpu_to_le32((u64)ioc->reply_dma >> 32);
3674 	mpi_request.SystemRequestFrameBaseAddress =
3675 	    cpu_to_le64((u64)ioc->request_dma);
3676 	mpi_request.ReplyFreeQueueAddress =
3677 	    cpu_to_le64((u64)ioc->reply_free_dma);
3678 	mpi_request.ReplyDescriptorPostQueueAddress =
3679 	    cpu_to_le64((u64)ioc->reply_post_free_dma);
3680 
3681 
3682 	/* This time stamp specifies number of milliseconds
3683 	 * since epoch ~ midnight January 1, 1970.
3684 	 */
3685 	do_gettimeofday(&current_time);
3686 	mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
3687 	    (current_time.tv_usec / 1000));
3688 
3689 	if (ioc->logging_level & MPT_DEBUG_INIT) {
3690 		__le32 *mfp;
3691 		int i;
3692 
3693 		mfp = (__le32 *)&mpi_request;
3694 		pr_info("\toffset:data\n");
3695 		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
3696 			pr_info("\t[0x%02x]:%08x\n", i*4,
3697 			    le32_to_cpu(mfp[i]));
3698 	}
3699 
3700 	r = _base_handshake_req_reply_wait(ioc,
3701 	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
3702 	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
3703 	    sleep_flag);
3704 
3705 	if (r != 0) {
3706 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
3707 		    ioc->name, __func__, r);
3708 		return r;
3709 	}
3710 
3711 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
3712 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
3713 	    mpi_reply.IOCLogInfo) {
3714 		pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
3715 		r = -EIO;
3716 	}
3717 
3718 	return 0;
3719 }
3720 
3721 /**
3722  * mpt3sas_port_enable_done - command completion routine for port enable
3723  * @ioc: per adapter object
3724  * @smid: system request message index
3725  * @msix_index: MSIX table index supplied by the OS
3726  * @reply: reply message frame(lower 32bit addr)
3727  *
3728  * Return 1 meaning mf should be freed from _base_interrupt
3729  *        0 means the mf is freed from this function.
3730  */
3731 u8
3732 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3733 	u32 reply)
3734 {
3735 	MPI2DefaultReply_t *mpi_reply;
3736 	u16 ioc_status;
3737 
3738 	if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
3739 		return 1;
3740 
3741 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
3742 	if (!mpi_reply)
3743 		return 1;
3744 
3745 	if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
3746 		return 1;
3747 
3748 	ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
3749 	ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
3750 	ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
3751 	memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
3752 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3753 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
3754 		ioc->port_enable_failed = 1;
3755 
3756 	if (ioc->is_driver_loading) {
3757 		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3758 			mpt3sas_port_enable_complete(ioc);
3759 			return 1;
3760 		} else {
3761 			ioc->start_scan_failed = ioc_status;
3762 			ioc->start_scan = 0;
3763 			return 1;
3764 		}
3765 	}
3766 	complete(&ioc->port_enable_cmds.done);
3767 	return 1;
3768 }
3769 
3770 /**
3771  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
3772  * @ioc: per adapter object
3773  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3774  *
3775  * Returns 0 for success, non-zero for failure.
3776  */
3777 static int
3778 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3779 {
3780 	Mpi2PortEnableRequest_t *mpi_request;
3781 	Mpi2PortEnableReply_t *mpi_reply;
3782 	unsigned long timeleft;
3783 	int r = 0;
3784 	u16 smid;
3785 	u16 ioc_status;
3786 
3787 	pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
3788 
3789 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
3790 		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
3791 		    ioc->name, __func__);
3792 		return -EAGAIN;
3793 	}
3794 
3795 	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3796 	if (!smid) {
3797 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3798 		    ioc->name, __func__);
3799 		return -EAGAIN;
3800 	}
3801 
3802 	ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
3803 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3804 	ioc->port_enable_cmds.smid = smid;
3805 	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3806 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3807 
3808 	init_completion(&ioc->port_enable_cmds.done);
3809 	mpt3sas_base_put_smid_default(ioc, smid);
3810 	timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
3811 	    300*HZ);
3812 	if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
3813 		pr_err(MPT3SAS_FMT "%s: timeout\n",
3814 		    ioc->name, __func__);
3815 		_debug_dump_mf(mpi_request,
3816 		    sizeof(Mpi2PortEnableRequest_t)/4);
3817 		if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
3818 			r = -EFAULT;
3819 		else
3820 			r = -ETIME;
3821 		goto out;
3822 	}
3823 
3824 	mpi_reply = ioc->port_enable_cmds.reply;
3825 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
3826 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3827 		pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
3828 		    ioc->name, __func__, ioc_status);
3829 		r = -EFAULT;
3830 		goto out;
3831 	}
3832 
3833  out:
3834 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
3835 	pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
3836 	    "SUCCESS" : "FAILED"));
3837 	return r;
3838 }
3839 
3840 /**
3841  * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
3842  * @ioc: per adapter object
3843  *
3844  * Returns 0 for success, non-zero for failure.
3845  */
3846 int
3847 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
3848 {
3849 	Mpi2PortEnableRequest_t *mpi_request;
3850 	u16 smid;
3851 
3852 	pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
3853 
3854 	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
3855 		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
3856 		    ioc->name, __func__);
3857 		return -EAGAIN;
3858 	}
3859 
3860 	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
3861 	if (!smid) {
3862 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3863 		    ioc->name, __func__);
3864 		return -EAGAIN;
3865 	}
3866 
3867 	ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
3868 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3869 	ioc->port_enable_cmds.smid = smid;
3870 	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3871 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3872 
3873 	mpt3sas_base_put_smid_default(ioc, smid);
3874 	return 0;
3875 }
3876 
3877 /**
3878  * _base_determine_wait_on_discovery - desposition
3879  * @ioc: per adapter object
3880  *
3881  * Decide whether to wait on discovery to complete. Used to either
3882  * locate boot device, or report volumes ahead of physical devices.
3883  *
3884  * Returns 1 for wait, 0 for don't wait
3885  */
3886 static int
3887 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
3888 {
3889 	/* We wait for discovery to complete if IR firmware is loaded.
3890 	 * The sas topology events arrive before PD events, so we need time to
3891 	 * turn on the bit in ioc->pd_handles to indicate PD
3892 	 * Also, it maybe required to report Volumes ahead of physical
3893 	 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
3894 	 */
3895 	if (ioc->ir_firmware)
3896 		return 1;
3897 
3898 	/* if no Bios, then we don't need to wait */
3899 	if (!ioc->bios_pg3.BiosVersion)
3900 		return 0;
3901 
3902 	/* Bios is present, then we drop down here.
3903 	 *
3904 	 * If there any entries in the Bios Page 2, then we wait
3905 	 * for discovery to complete.
3906 	 */
3907 
3908 	/* Current Boot Device */
3909 	if ((ioc->bios_pg2.CurrentBootDeviceForm &
3910 	    MPI2_BIOSPAGE2_FORM_MASK) ==
3911 	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3912 	/* Request Boot Device */
3913 	   (ioc->bios_pg2.ReqBootDeviceForm &
3914 	    MPI2_BIOSPAGE2_FORM_MASK) ==
3915 	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
3916 	/* Alternate Request Boot Device */
3917 	   (ioc->bios_pg2.ReqAltBootDeviceForm &
3918 	    MPI2_BIOSPAGE2_FORM_MASK) ==
3919 	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
3920 		return 0;
3921 
3922 	return 1;
3923 }
3924 
3925 /**
3926  * _base_unmask_events - turn on notification for this event
3927  * @ioc: per adapter object
3928  * @event: firmware event
3929  *
3930  * The mask is stored in ioc->event_masks.
3931  */
3932 static void
3933 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
3934 {
3935 	u32 desired_event;
3936 
3937 	if (event >= 128)
3938 		return;
3939 
3940 	desired_event = (1 << (event % 32));
3941 
3942 	if (event < 32)
3943 		ioc->event_masks[0] &= ~desired_event;
3944 	else if (event < 64)
3945 		ioc->event_masks[1] &= ~desired_event;
3946 	else if (event < 96)
3947 		ioc->event_masks[2] &= ~desired_event;
3948 	else if (event < 128)
3949 		ioc->event_masks[3] &= ~desired_event;
3950 }
3951 
3952 /**
3953  * _base_event_notification - send event notification
3954  * @ioc: per adapter object
3955  * @sleep_flag: CAN_SLEEP or NO_SLEEP
3956  *
3957  * Returns 0 for success, non-zero for failure.
3958  */
3959 static int
3960 _base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3961 {
3962 	Mpi2EventNotificationRequest_t *mpi_request;
3963 	unsigned long timeleft;
3964 	u16 smid;
3965 	int r = 0;
3966 	int i;
3967 
3968 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3969 	    __func__));
3970 
3971 	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
3972 		pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
3973 		    ioc->name, __func__);
3974 		return -EAGAIN;
3975 	}
3976 
3977 	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3978 	if (!smid) {
3979 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
3980 		    ioc->name, __func__);
3981 		return -EAGAIN;
3982 	}
3983 	ioc->base_cmds.status = MPT3_CMD_PENDING;
3984 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3985 	ioc->base_cmds.smid = smid;
3986 	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
3987 	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
3988 	mpi_request->VF_ID = 0; /* TODO */
3989 	mpi_request->VP_ID = 0;
3990 	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3991 		mpi_request->EventMasks[i] =
3992 		    cpu_to_le32(ioc->event_masks[i]);
3993 	init_completion(&ioc->base_cmds.done);
3994 	mpt3sas_base_put_smid_default(ioc, smid);
3995 	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
3996 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
3997 		pr_err(MPT3SAS_FMT "%s: timeout\n",
3998 		    ioc->name, __func__);
3999 		_debug_dump_mf(mpi_request,
4000 		    sizeof(Mpi2EventNotificationRequest_t)/4);
4001 		if (ioc->base_cmds.status & MPT3_CMD_RESET)
4002 			r = -EFAULT;
4003 		else
4004 			r = -ETIME;
4005 	} else
4006 		dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
4007 		    ioc->name, __func__));
4008 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4009 	return r;
4010 }
4011 
4012 /**
4013  * mpt3sas_base_validate_event_type - validating event types
4014  * @ioc: per adapter object
4015  * @event: firmware event
4016  *
4017  * This will turn on firmware event notification when application
4018  * ask for that event. We don't mask events that are already enabled.
4019  */
4020 void
4021 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
4022 {
4023 	int i, j;
4024 	u32 event_mask, desired_event;
4025 	u8 send_update_to_fw;
4026 
4027 	for (i = 0, send_update_to_fw = 0; i <
4028 	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
4029 		event_mask = ~event_type[i];
4030 		desired_event = 1;
4031 		for (j = 0; j < 32; j++) {
4032 			if (!(event_mask & desired_event) &&
4033 			    (ioc->event_masks[i] & desired_event)) {
4034 				ioc->event_masks[i] &= ~desired_event;
4035 				send_update_to_fw = 1;
4036 			}
4037 			desired_event = (desired_event << 1);
4038 		}
4039 	}
4040 
4041 	if (!send_update_to_fw)
4042 		return;
4043 
4044 	mutex_lock(&ioc->base_cmds.mutex);
4045 	_base_event_notification(ioc, CAN_SLEEP);
4046 	mutex_unlock(&ioc->base_cmds.mutex);
4047 }
4048 
4049 /**
4050  * _base_diag_reset - the "big hammer" start of day reset
4051  * @ioc: per adapter object
4052  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4053  *
4054  * Returns 0 for success, non-zero for failure.
4055  */
4056 static int
4057 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4058 {
4059 	u32 host_diagnostic;
4060 	u32 ioc_state;
4061 	u32 count;
4062 	u32 hcb_size;
4063 
4064 	pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
4065 
4066 	drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
4067 	    ioc->name));
4068 
4069 	count = 0;
4070 	do {
4071 		/* Write magic sequence to WriteSequence register
4072 		 * Loop until in diagnostic mode
4073 		 */
4074 		drsprintk(ioc, pr_info(MPT3SAS_FMT
4075 			"write magic sequence\n", ioc->name));
4076 		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4077 		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
4078 		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
4079 		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
4080 		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
4081 		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
4082 		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
4083 
4084 		/* wait 100 msec */
4085 		if (sleep_flag == CAN_SLEEP)
4086 			msleep(100);
4087 		else
4088 			mdelay(100);
4089 
4090 		if (count++ > 20)
4091 			goto out;
4092 
4093 		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4094 		drsprintk(ioc, pr_info(MPT3SAS_FMT
4095 			"wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
4096 		    ioc->name, count, host_diagnostic));
4097 
4098 	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
4099 
4100 	hcb_size = readl(&ioc->chip->HCBSize);
4101 
4102 	drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
4103 	    ioc->name));
4104 	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
4105 	     &ioc->chip->HostDiagnostic);
4106 
4107 	/*This delay allows the chip PCIe hardware time to finish reset tasks*/
4108 	if (sleep_flag == CAN_SLEEP)
4109 		msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4110 	else
4111 		mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4112 
4113 	/* Approximately 300 second max wait */
4114 	for (count = 0; count < (300000000 /
4115 		MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
4116 
4117 		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4118 
4119 		if (host_diagnostic == 0xFFFFFFFF)
4120 			goto out;
4121 		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
4122 			break;
4123 
4124 		/* Wait to pass the second read delay window */
4125 		if (sleep_flag == CAN_SLEEP)
4126 			msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4127 								/ 1000);
4128 		else
4129 			mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4130 								/ 1000);
4131 	}
4132 
4133 	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
4134 
4135 		drsprintk(ioc, pr_info(MPT3SAS_FMT
4136 		"restart the adapter assuming the HCB Address points to good F/W\n",
4137 		    ioc->name));
4138 		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
4139 		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
4140 		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
4141 
4142 		drsprintk(ioc, pr_info(MPT3SAS_FMT
4143 		    "re-enable the HCDW\n", ioc->name));
4144 		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
4145 		    &ioc->chip->HCBSize);
4146 	}
4147 
4148 	drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
4149 	    ioc->name));
4150 	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
4151 	    &ioc->chip->HostDiagnostic);
4152 
4153 	drsprintk(ioc, pr_info(MPT3SAS_FMT
4154 		"disable writes to the diagnostic register\n", ioc->name));
4155 	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4156 
4157 	drsprintk(ioc, pr_info(MPT3SAS_FMT
4158 		"Wait for FW to go to the READY state\n", ioc->name));
4159 	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
4160 	    sleep_flag);
4161 	if (ioc_state) {
4162 		pr_err(MPT3SAS_FMT
4163 			"%s: failed going to ready state (ioc_state=0x%x)\n",
4164 			ioc->name, __func__, ioc_state);
4165 		goto out;
4166 	}
4167 
4168 	pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
4169 	return 0;
4170 
4171  out:
4172 	pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
4173 	return -EFAULT;
4174 }
4175 
4176 /**
4177  * _base_make_ioc_ready - put controller in READY state
4178  * @ioc: per adapter object
4179  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4180  * @type: FORCE_BIG_HAMMER or SOFT_RESET
4181  *
4182  * Returns 0 for success, non-zero for failure.
4183  */
4184 static int
4185 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
4186 	enum reset_type type)
4187 {
4188 	u32 ioc_state;
4189 	int rc;
4190 	int count;
4191 
4192 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4193 	    __func__));
4194 
4195 	if (ioc->pci_error_recovery)
4196 		return 0;
4197 
4198 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4199 	dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4200 	    ioc->name, __func__, ioc_state));
4201 
4202 	/* if in RESET state, it should move to READY state shortly */
4203 	count = 0;
4204 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
4205 		while ((ioc_state & MPI2_IOC_STATE_MASK) !=
4206 		    MPI2_IOC_STATE_READY) {
4207 			if (count++ == 10) {
4208 				pr_err(MPT3SAS_FMT
4209 					"%s: failed going to ready state (ioc_state=0x%x)\n",
4210 				    ioc->name, __func__, ioc_state);
4211 				return -EFAULT;
4212 			}
4213 			if (sleep_flag == CAN_SLEEP)
4214 				ssleep(1);
4215 			else
4216 				mdelay(1000);
4217 			ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4218 		}
4219 	}
4220 
4221 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
4222 		return 0;
4223 
4224 	if (ioc_state & MPI2_DOORBELL_USED) {
4225 		dhsprintk(ioc, pr_info(MPT3SAS_FMT
4226 			"unexpected doorbell active!\n",
4227 			ioc->name));
4228 		goto issue_diag_reset;
4229 	}
4230 
4231 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4232 		mpt3sas_base_fault_info(ioc, ioc_state &
4233 		    MPI2_DOORBELL_DATA_MASK);
4234 		goto issue_diag_reset;
4235 	}
4236 
4237 	if (type == FORCE_BIG_HAMMER)
4238 		goto issue_diag_reset;
4239 
4240 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4241 		if (!(_base_send_ioc_reset(ioc,
4242 		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
4243 			return 0;
4244 	}
4245 
4246  issue_diag_reset:
4247 	rc = _base_diag_reset(ioc, CAN_SLEEP);
4248 	return rc;
4249 }
4250 
4251 /**
4252  * _base_make_ioc_operational - put controller in OPERATIONAL state
4253  * @ioc: per adapter object
4254  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4255  *
4256  * Returns 0 for success, non-zero for failure.
4257  */
4258 static int
4259 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4260 {
4261 	int r, i;
4262 	unsigned long	flags;
4263 	u32 reply_address;
4264 	u16 smid;
4265 	struct _tr_list *delayed_tr, *delayed_tr_next;
4266 	struct adapter_reply_queue *reply_q;
4267 	long reply_post_free;
4268 	u32 reply_post_free_sz;
4269 
4270 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4271 	    __func__));
4272 
4273 	/* clean the delayed target reset list */
4274 	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4275 	    &ioc->delayed_tr_list, list) {
4276 		list_del(&delayed_tr->list);
4277 		kfree(delayed_tr);
4278 	}
4279 
4280 
4281 	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
4282 	    &ioc->delayed_tr_volume_list, list) {
4283 		list_del(&delayed_tr->list);
4284 		kfree(delayed_tr);
4285 	}
4286 
4287 	/* initialize the scsi lookup free list */
4288 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4289 	INIT_LIST_HEAD(&ioc->free_list);
4290 	smid = 1;
4291 	for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
4292 		INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
4293 		ioc->scsi_lookup[i].cb_idx = 0xFF;
4294 		ioc->scsi_lookup[i].smid = smid;
4295 		ioc->scsi_lookup[i].scmd = NULL;
4296 		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
4297 		    &ioc->free_list);
4298 	}
4299 
4300 	/* hi-priority queue */
4301 	INIT_LIST_HEAD(&ioc->hpr_free_list);
4302 	smid = ioc->hi_priority_smid;
4303 	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
4304 		ioc->hpr_lookup[i].cb_idx = 0xFF;
4305 		ioc->hpr_lookup[i].smid = smid;
4306 		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
4307 		    &ioc->hpr_free_list);
4308 	}
4309 
4310 	/* internal queue */
4311 	INIT_LIST_HEAD(&ioc->internal_free_list);
4312 	smid = ioc->internal_smid;
4313 	for (i = 0; i < ioc->internal_depth; i++, smid++) {
4314 		ioc->internal_lookup[i].cb_idx = 0xFF;
4315 		ioc->internal_lookup[i].smid = smid;
4316 		list_add_tail(&ioc->internal_lookup[i].tracker_list,
4317 		    &ioc->internal_free_list);
4318 	}
4319 
4320 	/* chain pool */
4321 	INIT_LIST_HEAD(&ioc->free_chain_list);
4322 	for (i = 0; i < ioc->chain_depth; i++)
4323 		list_add_tail(&ioc->chain_lookup[i].tracker_list,
4324 		    &ioc->free_chain_list);
4325 
4326 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4327 
4328 	/* initialize Reply Free Queue */
4329 	for (i = 0, reply_address = (u32)ioc->reply_dma ;
4330 	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
4331 	    ioc->reply_sz)
4332 		ioc->reply_free[i] = cpu_to_le32(reply_address);
4333 
4334 	/* initialize reply queues */
4335 	if (ioc->is_driver_loading)
4336 		_base_assign_reply_queues(ioc);
4337 
4338 	/* initialize Reply Post Free Queue */
4339 	reply_post_free = (long)ioc->reply_post_free;
4340 	reply_post_free_sz = ioc->reply_post_queue_depth *
4341 	    sizeof(Mpi2DefaultReplyDescriptor_t);
4342 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4343 		reply_q->reply_post_host_index = 0;
4344 		reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
4345 		    reply_post_free;
4346 		for (i = 0; i < ioc->reply_post_queue_depth; i++)
4347 			reply_q->reply_post_free[i].Words =
4348 			    cpu_to_le64(ULLONG_MAX);
4349 		if (!_base_is_controller_msix_enabled(ioc))
4350 			goto skip_init_reply_post_free_queue;
4351 		reply_post_free += reply_post_free_sz;
4352 	}
4353  skip_init_reply_post_free_queue:
4354 
4355 	r = _base_send_ioc_init(ioc, sleep_flag);
4356 	if (r)
4357 		return r;
4358 
4359 	/* initialize reply free host index */
4360 	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
4361 	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
4362 
4363 	/* initialize reply post host index */
4364 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4365 		writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
4366 		    &ioc->chip->ReplyPostHostIndex);
4367 		if (!_base_is_controller_msix_enabled(ioc))
4368 			goto skip_init_reply_post_host_index;
4369 	}
4370 
4371  skip_init_reply_post_host_index:
4372 
4373 	_base_unmask_interrupts(ioc);
4374 	r = _base_event_notification(ioc, sleep_flag);
4375 	if (r)
4376 		return r;
4377 
4378 	if (sleep_flag == CAN_SLEEP)
4379 		_base_static_config_pages(ioc);
4380 
4381 
4382 	if (ioc->is_driver_loading) {
4383 		ioc->wait_for_discovery_to_complete =
4384 		    _base_determine_wait_on_discovery(ioc);
4385 
4386 		return r; /* scan_start and scan_finished support */
4387 	}
4388 
4389 	r = _base_send_port_enable(ioc, sleep_flag);
4390 	if (r)
4391 		return r;
4392 
4393 	return r;
4394 }
4395 
4396 /**
4397  * mpt3sas_base_free_resources - free resources controller resources
4398  * @ioc: per adapter object
4399  *
4400  * Return nothing.
4401  */
4402 void
4403 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
4404 {
4405 	struct pci_dev *pdev = ioc->pdev;
4406 
4407 	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4408 	    __func__));
4409 
4410 	if (ioc->chip_phys && ioc->chip) {
4411 		_base_mask_interrupts(ioc);
4412 		ioc->shost_recovery = 1;
4413 		_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4414 		ioc->shost_recovery = 0;
4415 	}
4416 
4417 	_base_free_irq(ioc);
4418 	_base_disable_msix(ioc);
4419 
4420 	if (ioc->chip_phys && ioc->chip)
4421 		iounmap(ioc->chip);
4422 	ioc->chip_phys = 0;
4423 
4424 	if (pci_is_enabled(pdev)) {
4425 		pci_release_selected_regions(ioc->pdev, ioc->bars);
4426 		pci_disable_pcie_error_reporting(pdev);
4427 		pci_disable_device(pdev);
4428 	}
4429 	return;
4430 }
4431 
4432 /**
4433  * mpt3sas_base_attach - attach controller instance
4434  * @ioc: per adapter object
4435  *
4436  * Returns 0 for success, non-zero for failure.
4437  */
4438 int
4439 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
4440 {
4441 	int r, i;
4442 	int cpu_id, last_cpu_id = 0;
4443 
4444 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4445 	    __func__));
4446 
4447 	/* setup cpu_msix_table */
4448 	ioc->cpu_count = num_online_cpus();
4449 	for_each_online_cpu(cpu_id)
4450 		last_cpu_id = cpu_id;
4451 	ioc->cpu_msix_table_sz = last_cpu_id + 1;
4452 	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
4453 	ioc->reply_queue_count = 1;
4454 	if (!ioc->cpu_msix_table) {
4455 		dfailprintk(ioc, pr_info(MPT3SAS_FMT
4456 			"allocation for cpu_msix_table failed!!!\n",
4457 			ioc->name));
4458 		r = -ENOMEM;
4459 		goto out_free_resources;
4460 	}
4461 
4462 	r = mpt3sas_base_map_resources(ioc);
4463 	if (r)
4464 		goto out_free_resources;
4465 
4466 
4467 	pci_set_drvdata(ioc->pdev, ioc->shost);
4468 	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4469 	if (r)
4470 		goto out_free_resources;
4471 
4472 	/*
4473 	 * In SAS3.0,
4474 	 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
4475 	 * Target Status - all require the IEEE formated scatter gather
4476 	 * elements.
4477 	 */
4478 
4479 	ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
4480 	ioc->build_sg = &_base_build_sg_ieee;
4481 	ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
4482 	ioc->mpi25 = 1;
4483 	ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
4484 
4485 	/*
4486 	 * These function pointers for other requests that don't
4487 	 * the require IEEE scatter gather elements.
4488 	 *
4489 	 * For example Configuration Pages and SAS IOUNIT Control don't.
4490 	 */
4491 	ioc->build_sg_mpi = &_base_build_sg;
4492 	ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
4493 
4494 	r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
4495 	if (r)
4496 		goto out_free_resources;
4497 
4498 	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
4499 	    sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
4500 	if (!ioc->pfacts) {
4501 		r = -ENOMEM;
4502 		goto out_free_resources;
4503 	}
4504 
4505 	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
4506 		r = _base_get_port_facts(ioc, i, CAN_SLEEP);
4507 		if (r)
4508 			goto out_free_resources;
4509 	}
4510 
4511 	r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
4512 	if (r)
4513 		goto out_free_resources;
4514 
4515 	init_waitqueue_head(&ioc->reset_wq);
4516 
4517 	/* allocate memory pd handle bitmask list */
4518 	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
4519 	if (ioc->facts.MaxDevHandle % 8)
4520 		ioc->pd_handles_sz++;
4521 	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
4522 	    GFP_KERNEL);
4523 	if (!ioc->pd_handles) {
4524 		r = -ENOMEM;
4525 		goto out_free_resources;
4526 	}
4527 	ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
4528 	    GFP_KERNEL);
4529 	if (!ioc->blocking_handles) {
4530 		r = -ENOMEM;
4531 		goto out_free_resources;
4532 	}
4533 
4534 	ioc->fwfault_debug = mpt3sas_fwfault_debug;
4535 
4536 	/* base internal command bits */
4537 	mutex_init(&ioc->base_cmds.mutex);
4538 	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4539 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4540 
4541 	/* port_enable command bits */
4542 	ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4543 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
4544 
4545 	/* transport internal command bits */
4546 	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4547 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
4548 	mutex_init(&ioc->transport_cmds.mutex);
4549 
4550 	/* scsih internal command bits */
4551 	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4552 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
4553 	mutex_init(&ioc->scsih_cmds.mutex);
4554 
4555 	/* task management internal command bits */
4556 	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4557 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
4558 	mutex_init(&ioc->tm_cmds.mutex);
4559 
4560 	/* config page internal command bits */
4561 	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4562 	ioc->config_cmds.status = MPT3_CMD_NOT_USED;
4563 	mutex_init(&ioc->config_cmds.mutex);
4564 
4565 	/* ctl module internal command bits */
4566 	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
4567 	ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
4568 	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
4569 	mutex_init(&ioc->ctl_cmds.mutex);
4570 
4571 	if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
4572 	    !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
4573 	    !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
4574 	    !ioc->ctl_cmds.sense) {
4575 		r = -ENOMEM;
4576 		goto out_free_resources;
4577 	}
4578 
4579 	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4580 		ioc->event_masks[i] = -1;
4581 
4582 	/* here we enable the events we care about */
4583 	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
4584 	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
4585 	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
4586 	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
4587 	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
4588 	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
4589 	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
4590 	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
4591 	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
4592 	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
4593 
4594 	r = _base_make_ioc_operational(ioc, CAN_SLEEP);
4595 	if (r)
4596 		goto out_free_resources;
4597 
4598 	return 0;
4599 
4600  out_free_resources:
4601 
4602 	ioc->remove_host = 1;
4603 
4604 	mpt3sas_base_free_resources(ioc);
4605 	_base_release_memory_pools(ioc);
4606 	pci_set_drvdata(ioc->pdev, NULL);
4607 	kfree(ioc->cpu_msix_table);
4608 	kfree(ioc->pd_handles);
4609 	kfree(ioc->blocking_handles);
4610 	kfree(ioc->tm_cmds.reply);
4611 	kfree(ioc->transport_cmds.reply);
4612 	kfree(ioc->scsih_cmds.reply);
4613 	kfree(ioc->config_cmds.reply);
4614 	kfree(ioc->base_cmds.reply);
4615 	kfree(ioc->port_enable_cmds.reply);
4616 	kfree(ioc->ctl_cmds.reply);
4617 	kfree(ioc->ctl_cmds.sense);
4618 	kfree(ioc->pfacts);
4619 	ioc->ctl_cmds.reply = NULL;
4620 	ioc->base_cmds.reply = NULL;
4621 	ioc->tm_cmds.reply = NULL;
4622 	ioc->scsih_cmds.reply = NULL;
4623 	ioc->transport_cmds.reply = NULL;
4624 	ioc->config_cmds.reply = NULL;
4625 	ioc->pfacts = NULL;
4626 	return r;
4627 }
4628 
4629 
4630 /**
4631  * mpt3sas_base_detach - remove controller instance
4632  * @ioc: per adapter object
4633  *
4634  * Return nothing.
4635  */
4636 void
4637 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
4638 {
4639 	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4640 	    __func__));
4641 
4642 	mpt3sas_base_stop_watchdog(ioc);
4643 	mpt3sas_base_free_resources(ioc);
4644 	_base_release_memory_pools(ioc);
4645 	pci_set_drvdata(ioc->pdev, NULL);
4646 	kfree(ioc->cpu_msix_table);
4647 	kfree(ioc->pd_handles);
4648 	kfree(ioc->blocking_handles);
4649 	kfree(ioc->pfacts);
4650 	kfree(ioc->ctl_cmds.reply);
4651 	kfree(ioc->ctl_cmds.sense);
4652 	kfree(ioc->base_cmds.reply);
4653 	kfree(ioc->port_enable_cmds.reply);
4654 	kfree(ioc->tm_cmds.reply);
4655 	kfree(ioc->transport_cmds.reply);
4656 	kfree(ioc->scsih_cmds.reply);
4657 	kfree(ioc->config_cmds.reply);
4658 }
4659 
4660 /**
4661  * _base_reset_handler - reset callback handler (for base)
4662  * @ioc: per adapter object
4663  * @reset_phase: phase
4664  *
4665  * The handler for doing any required cleanup or initialization.
4666  *
4667  * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
4668  * MPT3_IOC_DONE_RESET
4669  *
4670  * Return nothing.
4671  */
4672 static void
4673 _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
4674 {
4675 	mpt3sas_scsih_reset_handler(ioc, reset_phase);
4676 	mpt3sas_ctl_reset_handler(ioc, reset_phase);
4677 	switch (reset_phase) {
4678 	case MPT3_IOC_PRE_RESET:
4679 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
4680 		"%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
4681 		break;
4682 	case MPT3_IOC_AFTER_RESET:
4683 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
4684 		"%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
4685 		if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
4686 			ioc->transport_cmds.status |= MPT3_CMD_RESET;
4687 			mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
4688 			complete(&ioc->transport_cmds.done);
4689 		}
4690 		if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4691 			ioc->base_cmds.status |= MPT3_CMD_RESET;
4692 			mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
4693 			complete(&ioc->base_cmds.done);
4694 		}
4695 		if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4696 			ioc->port_enable_failed = 1;
4697 			ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
4698 			mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
4699 			if (ioc->is_driver_loading) {
4700 				ioc->start_scan_failed =
4701 				    MPI2_IOCSTATUS_INTERNAL_ERROR;
4702 				ioc->start_scan = 0;
4703 				ioc->port_enable_cmds.status =
4704 				    MPT3_CMD_NOT_USED;
4705 			} else
4706 				complete(&ioc->port_enable_cmds.done);
4707 		}
4708 		if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
4709 			ioc->config_cmds.status |= MPT3_CMD_RESET;
4710 			mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
4711 			ioc->config_cmds.smid = USHRT_MAX;
4712 			complete(&ioc->config_cmds.done);
4713 		}
4714 		break;
4715 	case MPT3_IOC_DONE_RESET:
4716 		dtmprintk(ioc, pr_info(MPT3SAS_FMT
4717 			"%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
4718 		break;
4719 	}
4720 }
4721 
4722 /**
4723  * _wait_for_commands_to_complete - reset controller
4724  * @ioc: Pointer to MPT_ADAPTER structure
4725  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4726  *
4727  * This function waiting(3s) for all pending commands to complete
4728  * prior to putting controller in reset.
4729  */
4730 static void
4731 _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4732 {
4733 	u32 ioc_state;
4734 	unsigned long flags;
4735 	u16 i;
4736 
4737 	ioc->pending_io_count = 0;
4738 	if (sleep_flag != CAN_SLEEP)
4739 		return;
4740 
4741 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4742 	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
4743 		return;
4744 
4745 	/* pending command count */
4746 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4747 	for (i = 0; i < ioc->scsiio_depth; i++)
4748 		if (ioc->scsi_lookup[i].cb_idx != 0xFF)
4749 			ioc->pending_io_count++;
4750 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4751 
4752 	if (!ioc->pending_io_count)
4753 		return;
4754 
4755 	/* wait for pending commands to complete */
4756 	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
4757 }
4758 
4759 /**
4760  * mpt3sas_base_hard_reset_handler - reset controller
4761  * @ioc: Pointer to MPT_ADAPTER structure
4762  * @sleep_flag: CAN_SLEEP or NO_SLEEP
4763  * @type: FORCE_BIG_HAMMER or SOFT_RESET
4764  *
4765  * Returns 0 for success, non-zero for failure.
4766  */
4767 int
4768 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
4769 	enum reset_type type)
4770 {
4771 	int r;
4772 	unsigned long flags;
4773 	u32 ioc_state;
4774 	u8 is_fault = 0, is_trigger = 0;
4775 
4776 	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
4777 	    __func__));
4778 
4779 	if (ioc->pci_error_recovery) {
4780 		pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
4781 		    ioc->name, __func__);
4782 		r = 0;
4783 		goto out_unlocked;
4784 	}
4785 
4786 	if (mpt3sas_fwfault_debug)
4787 		mpt3sas_halt_firmware(ioc);
4788 
4789 	/* TODO - What we really should be doing is pulling
4790 	 * out all the code associated with NO_SLEEP; its never used.
4791 	 * That is legacy code from mpt fusion driver, ported over.
4792 	 * I will leave this BUG_ON here for now till its been resolved.
4793 	 */
4794 	BUG_ON(sleep_flag == NO_SLEEP);
4795 
4796 	/* wait for an active reset in progress to complete */
4797 	if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
4798 		do {
4799 			ssleep(1);
4800 		} while (ioc->shost_recovery == 1);
4801 		dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
4802 		    __func__));
4803 		return ioc->ioc_reset_in_progress_status;
4804 	}
4805 
4806 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4807 	ioc->shost_recovery = 1;
4808 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4809 
4810 	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
4811 	    MPT3_DIAG_BUFFER_IS_REGISTERED) &&
4812 	    (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
4813 	    MPT3_DIAG_BUFFER_IS_RELEASED))) {
4814 		is_trigger = 1;
4815 		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4816 		if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
4817 			is_fault = 1;
4818 	}
4819 	_base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
4820 	_wait_for_commands_to_complete(ioc, sleep_flag);
4821 	_base_mask_interrupts(ioc);
4822 	r = _base_make_ioc_ready(ioc, sleep_flag, type);
4823 	if (r)
4824 		goto out;
4825 	_base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
4826 
4827 	/* If this hard reset is called while port enable is active, then
4828 	 * there is no reason to call make_ioc_operational
4829 	 */
4830 	if (ioc->is_driver_loading && ioc->port_enable_failed) {
4831 		ioc->remove_host = 1;
4832 		r = -EFAULT;
4833 		goto out;
4834 	}
4835 	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4836 	if (r)
4837 		goto out;
4838 	r = _base_make_ioc_operational(ioc, sleep_flag);
4839 	if (!r)
4840 		_base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
4841 
4842  out:
4843 	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
4844 	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
4845 
4846 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
4847 	ioc->ioc_reset_in_progress_status = r;
4848 	ioc->shost_recovery = 0;
4849 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
4850 	ioc->ioc_reset_count++;
4851 	mutex_unlock(&ioc->reset_in_progress_mutex);
4852 
4853  out_unlocked:
4854 	if ((r == 0) && is_trigger) {
4855 		if (is_fault)
4856 			mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
4857 		else
4858 			mpt3sas_trigger_master(ioc,
4859 			    MASTER_TRIGGER_ADAPTER_RESET);
4860 	}
4861 	dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
4862 	    __func__));
4863 	return r;
4864 }
4865