xref: /openbmc/linux/drivers/scsi/mpi3mr/mpi3mr_fw.c (revision b64845a7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2021 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12 
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 
17 #if defined(writeq) && defined(CONFIG_64BIT)
18 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
19 {
20 	writeq(b, addr);
21 }
22 #else
23 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
24 {
25 	__u64 data_out = b;
26 
27 	writel((u32)(data_out), addr);
28 	writel((u32)(data_out >> 32), (addr + 4));
29 }
30 #endif
31 
32 static inline bool
33 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
34 {
35 	u16 pi, ci, max_entries;
36 	bool is_qfull = false;
37 
38 	pi = op_req_q->pi;
39 	ci = READ_ONCE(op_req_q->ci);
40 	max_entries = op_req_q->num_requests;
41 
42 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
43 		is_qfull = true;
44 
45 	return is_qfull;
46 }
47 
48 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
49 {
50 	u16 i, max_vectors;
51 
52 	max_vectors = mrioc->intr_info_count;
53 
54 	for (i = 0; i < max_vectors; i++)
55 		synchronize_irq(pci_irq_vector(mrioc->pdev, i));
56 }
57 
58 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
59 {
60 	mrioc->intr_enabled = 0;
61 	mpi3mr_sync_irqs(mrioc);
62 }
63 
64 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
65 {
66 	mrioc->intr_enabled = 1;
67 }
68 
69 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
70 {
71 	u16 i;
72 
73 	mpi3mr_ioc_disable_intr(mrioc);
74 
75 	if (!mrioc->intr_info)
76 		return;
77 
78 	for (i = 0; i < mrioc->intr_info_count; i++)
79 		free_irq(pci_irq_vector(mrioc->pdev, i),
80 		    (mrioc->intr_info + i));
81 
82 	kfree(mrioc->intr_info);
83 	mrioc->intr_info = NULL;
84 	mrioc->intr_info_count = 0;
85 	mrioc->is_intr_info_set = false;
86 	pci_free_irq_vectors(mrioc->pdev);
87 }
88 
89 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
90 	dma_addr_t dma_addr)
91 {
92 	struct mpi3_sge_common *sgel = paddr;
93 
94 	sgel->flags = flags;
95 	sgel->length = cpu_to_le32(length);
96 	sgel->address = cpu_to_le64(dma_addr);
97 }
98 
99 void mpi3mr_build_zero_len_sge(void *paddr)
100 {
101 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
102 
103 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
104 }
105 
106 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
107 	dma_addr_t phys_addr)
108 {
109 	if (!phys_addr)
110 		return NULL;
111 
112 	if ((phys_addr < mrioc->reply_buf_dma) ||
113 	    (phys_addr > mrioc->reply_buf_dma_max_address))
114 		return NULL;
115 
116 	return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
117 }
118 
119 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
120 	dma_addr_t phys_addr)
121 {
122 	if (!phys_addr)
123 		return NULL;
124 
125 	return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
126 }
127 
128 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
129 	u64 reply_dma)
130 {
131 	u32 old_idx = 0;
132 	unsigned long flags;
133 
134 	spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
135 	old_idx  =  mrioc->reply_free_queue_host_index;
136 	mrioc->reply_free_queue_host_index = (
137 	    (mrioc->reply_free_queue_host_index ==
138 	    (mrioc->reply_free_qsz - 1)) ? 0 :
139 	    (mrioc->reply_free_queue_host_index + 1));
140 	mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
141 	writel(mrioc->reply_free_queue_host_index,
142 	    &mrioc->sysif_regs->reply_free_host_index);
143 	spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
144 }
145 
146 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
147 	u64 sense_buf_dma)
148 {
149 	u32 old_idx = 0;
150 	unsigned long flags;
151 
152 	spin_lock_irqsave(&mrioc->sbq_lock, flags);
153 	old_idx  =  mrioc->sbq_host_index;
154 	mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
155 	    (mrioc->sense_buf_q_sz - 1)) ? 0 :
156 	    (mrioc->sbq_host_index + 1));
157 	mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
158 	writel(mrioc->sbq_host_index,
159 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
160 	spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
161 }
162 
163 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
164 	struct mpi3_event_notification_reply *event_reply)
165 {
166 	char *desc = NULL;
167 	u16 event;
168 
169 	event = event_reply->event;
170 
171 	switch (event) {
172 	case MPI3_EVENT_LOG_DATA:
173 		desc = "Log Data";
174 		break;
175 	case MPI3_EVENT_CHANGE:
176 		desc = "Event Change";
177 		break;
178 	case MPI3_EVENT_GPIO_INTERRUPT:
179 		desc = "GPIO Interrupt";
180 		break;
181 	case MPI3_EVENT_TEMP_THRESHOLD:
182 		desc = "Temperature Threshold";
183 		break;
184 	case MPI3_EVENT_CABLE_MGMT:
185 		desc = "Cable Management";
186 		break;
187 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
188 		desc = "Energy Pack Change";
189 		break;
190 	case MPI3_EVENT_DEVICE_ADDED:
191 	{
192 		struct mpi3_device_page0 *event_data =
193 		    (struct mpi3_device_page0 *)event_reply->event_data;
194 		ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
195 		    event_data->dev_handle, event_data->device_form);
196 		return;
197 	}
198 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
199 	{
200 		struct mpi3_device_page0 *event_data =
201 		    (struct mpi3_device_page0 *)event_reply->event_data;
202 		ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
203 		    event_data->dev_handle, event_data->device_form);
204 		return;
205 	}
206 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
207 	{
208 		struct mpi3_event_data_device_status_change *event_data =
209 		    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
210 		ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
211 		    event_data->dev_handle, event_data->reason_code);
212 		return;
213 	}
214 	case MPI3_EVENT_SAS_DISCOVERY:
215 	{
216 		struct mpi3_event_data_sas_discovery *event_data =
217 		    (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
218 		ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
219 		    (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
220 		    "start" : "stop",
221 		    le32_to_cpu(event_data->discovery_status));
222 		return;
223 	}
224 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
225 		desc = "SAS Broadcast Primitive";
226 		break;
227 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
228 		desc = "SAS Notify Primitive";
229 		break;
230 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
231 		desc = "SAS Init Device Status Change";
232 		break;
233 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
234 		desc = "SAS Init Table Overflow";
235 		break;
236 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
237 		desc = "SAS Topology Change List";
238 		break;
239 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
240 		desc = "Enclosure Device Status Change";
241 		break;
242 	case MPI3_EVENT_HARD_RESET_RECEIVED:
243 		desc = "Hard Reset Received";
244 		break;
245 	case MPI3_EVENT_SAS_PHY_COUNTER:
246 		desc = "SAS PHY Counter";
247 		break;
248 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
249 		desc = "SAS Device Discovery Error";
250 		break;
251 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
252 		desc = "PCIE Topology Change List";
253 		break;
254 	case MPI3_EVENT_PCIE_ENUMERATION:
255 	{
256 		struct mpi3_event_data_pcie_enumeration *event_data =
257 		    (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
258 		ioc_info(mrioc, "PCIE Enumeration: (%s)",
259 		    (event_data->reason_code ==
260 		    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
261 		if (event_data->enumeration_status)
262 			ioc_info(mrioc, "enumeration_status(0x%08x)\n",
263 			    le32_to_cpu(event_data->enumeration_status));
264 		return;
265 	}
266 	case MPI3_EVENT_PREPARE_FOR_RESET:
267 		desc = "Prepare For Reset";
268 		break;
269 	}
270 
271 	if (!desc)
272 		return;
273 
274 	ioc_info(mrioc, "%s\n", desc);
275 }
276 
277 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
278 	struct mpi3_default_reply *def_reply)
279 {
280 	struct mpi3_event_notification_reply *event_reply =
281 	    (struct mpi3_event_notification_reply *)def_reply;
282 
283 	mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
284 	mpi3mr_print_event_data(mrioc, event_reply);
285 	mpi3mr_os_handle_events(mrioc, event_reply);
286 }
287 
288 static struct mpi3mr_drv_cmd *
289 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
290 	struct mpi3_default_reply *def_reply)
291 {
292 	u16 idx;
293 
294 	switch (host_tag) {
295 	case MPI3MR_HOSTTAG_INITCMDS:
296 		return &mrioc->init_cmds;
297 	case MPI3MR_HOSTTAG_BLK_TMS:
298 		return &mrioc->host_tm_cmds;
299 	case MPI3MR_HOSTTAG_INVALID:
300 		if (def_reply && def_reply->function ==
301 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
302 			mpi3mr_handle_events(mrioc, def_reply);
303 		return NULL;
304 	default:
305 		break;
306 	}
307 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
308 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
309 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
310 		return &mrioc->dev_rmhs_cmds[idx];
311 	}
312 
313 	return NULL;
314 }
315 
316 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
317 	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
318 {
319 	u16 reply_desc_type, host_tag = 0;
320 	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
321 	u32 ioc_loginfo = 0;
322 	struct mpi3_status_reply_descriptor *status_desc;
323 	struct mpi3_address_reply_descriptor *addr_desc;
324 	struct mpi3_success_reply_descriptor *success_desc;
325 	struct mpi3_default_reply *def_reply = NULL;
326 	struct mpi3mr_drv_cmd *cmdptr = NULL;
327 	struct mpi3_scsi_io_reply *scsi_reply;
328 	u8 *sense_buf = NULL;
329 
330 	*reply_dma = 0;
331 	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
332 	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
333 	switch (reply_desc_type) {
334 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
335 		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
336 		host_tag = le16_to_cpu(status_desc->host_tag);
337 		ioc_status = le16_to_cpu(status_desc->ioc_status);
338 		if (ioc_status &
339 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
340 			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
341 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
342 		break;
343 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
344 		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
345 		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
346 		def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
347 		if (!def_reply)
348 			goto out;
349 		host_tag = le16_to_cpu(def_reply->host_tag);
350 		ioc_status = le16_to_cpu(def_reply->ioc_status);
351 		if (ioc_status &
352 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
353 			ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
354 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
355 		if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
356 			scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
357 			sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
358 			    le64_to_cpu(scsi_reply->sense_data_buffer_address));
359 		}
360 		break;
361 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
362 		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
363 		host_tag = le16_to_cpu(success_desc->host_tag);
364 		break;
365 	default:
366 		break;
367 	}
368 
369 	cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
370 	if (cmdptr) {
371 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
372 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
373 			cmdptr->ioc_loginfo = ioc_loginfo;
374 			cmdptr->ioc_status = ioc_status;
375 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
376 			if (def_reply) {
377 				cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
378 				memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
379 				    mrioc->facts.reply_sz);
380 			}
381 			if (cmdptr->is_waiting) {
382 				complete(&cmdptr->done);
383 				cmdptr->is_waiting = 0;
384 			} else if (cmdptr->callback)
385 				cmdptr->callback(mrioc, cmdptr);
386 		}
387 	}
388 out:
389 	if (sense_buf)
390 		mpi3mr_repost_sense_buf(mrioc,
391 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
392 }
393 
394 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
395 {
396 	u32 exp_phase = mrioc->admin_reply_ephase;
397 	u32 admin_reply_ci = mrioc->admin_reply_ci;
398 	u32 num_admin_replies = 0;
399 	u64 reply_dma = 0;
400 	struct mpi3_default_reply_descriptor *reply_desc;
401 
402 	reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
403 	    admin_reply_ci;
404 
405 	if ((le16_to_cpu(reply_desc->reply_flags) &
406 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
407 		return 0;
408 
409 	do {
410 		mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
411 		mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
412 		if (reply_dma)
413 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
414 		num_admin_replies++;
415 		if (++admin_reply_ci == mrioc->num_admin_replies) {
416 			admin_reply_ci = 0;
417 			exp_phase ^= 1;
418 		}
419 		reply_desc =
420 		    (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
421 		    admin_reply_ci;
422 		if ((le16_to_cpu(reply_desc->reply_flags) &
423 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
424 			break;
425 	} while (1);
426 
427 	writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
428 	mrioc->admin_reply_ci = admin_reply_ci;
429 	mrioc->admin_reply_ephase = exp_phase;
430 
431 	return num_admin_replies;
432 }
433 
434 /**
435  * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
436  *	queue's consumer index from operational reply descriptor queue.
437  * @op_reply_q: op_reply_qinfo object
438  * @reply_ci: operational reply descriptor's queue consumer index
439  *
440  * Returns reply descriptor frame address
441  */
442 static inline struct mpi3_default_reply_descriptor *
443 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
444 {
445 	void *segment_base_addr;
446 	struct segments *segments = op_reply_q->q_segments;
447 	struct mpi3_default_reply_descriptor *reply_desc = NULL;
448 
449 	segment_base_addr =
450 	    segments[reply_ci / op_reply_q->segment_qd].segment;
451 	reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
452 	    (reply_ci % op_reply_q->segment_qd);
453 	return reply_desc;
454 }
455 
456 static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
457 	struct mpi3mr_intr_info *intr_info)
458 {
459 	struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q;
460 	struct op_req_qinfo *op_req_q;
461 	u32 exp_phase;
462 	u32 reply_ci;
463 	u32 num_op_reply = 0;
464 	u64 reply_dma = 0;
465 	struct mpi3_default_reply_descriptor *reply_desc;
466 	u16 req_q_idx = 0, reply_qidx;
467 
468 	reply_qidx = op_reply_q->qid - 1;
469 
470 	if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
471 		return 0;
472 
473 	exp_phase = op_reply_q->ephase;
474 	reply_ci = op_reply_q->ci;
475 
476 	reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
477 	if ((le16_to_cpu(reply_desc->reply_flags) &
478 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
479 		atomic_dec(&op_reply_q->in_use);
480 		return 0;
481 	}
482 
483 	do {
484 		req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
485 		op_req_q = &mrioc->req_qinfo[req_q_idx];
486 
487 		WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
488 		mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
489 		    reply_qidx);
490 		atomic_dec(&op_reply_q->pend_ios);
491 		if (reply_dma)
492 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
493 		num_op_reply++;
494 
495 		if (++reply_ci == op_reply_q->num_replies) {
496 			reply_ci = 0;
497 			exp_phase ^= 1;
498 		}
499 
500 		reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
501 
502 		if ((le16_to_cpu(reply_desc->reply_flags) &
503 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
504 			break;
505 		/*
506 		 * Exit completion loop to avoid CPU lockup
507 		 * Ensure remaining completion happens from threaded ISR.
508 		 */
509 		if (num_op_reply > mrioc->max_host_ios) {
510 			intr_info->op_reply_q->enable_irq_poll = true;
511 			break;
512 		}
513 
514 	} while (1);
515 
516 	writel(reply_ci,
517 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
518 	op_reply_q->ci = reply_ci;
519 	op_reply_q->ephase = exp_phase;
520 
521 	atomic_dec(&op_reply_q->in_use);
522 	return num_op_reply;
523 }
524 
525 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
526 {
527 	struct mpi3mr_intr_info *intr_info = privdata;
528 	struct mpi3mr_ioc *mrioc;
529 	u16 midx;
530 	u32 num_admin_replies = 0, num_op_reply = 0;
531 
532 	if (!intr_info)
533 		return IRQ_NONE;
534 
535 	mrioc = intr_info->mrioc;
536 
537 	if (!mrioc->intr_enabled)
538 		return IRQ_NONE;
539 
540 	midx = intr_info->msix_index;
541 
542 	if (!midx)
543 		num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
544 	if (intr_info->op_reply_q)
545 		num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info);
546 
547 	if (num_admin_replies || num_op_reply)
548 		return IRQ_HANDLED;
549 	else
550 		return IRQ_NONE;
551 }
552 
553 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
554 {
555 	struct mpi3mr_intr_info *intr_info = privdata;
556 	struct mpi3mr_ioc *mrioc;
557 	u16 midx;
558 	int ret;
559 
560 	if (!intr_info)
561 		return IRQ_NONE;
562 
563 	mrioc = intr_info->mrioc;
564 	midx = intr_info->msix_index;
565 	/* Call primary ISR routine */
566 	ret = mpi3mr_isr_primary(irq, privdata);
567 
568 	/*
569 	 * If more IOs are expected, schedule IRQ polling thread.
570 	 * Otherwise exit from ISR.
571 	 */
572 	if (!intr_info->op_reply_q)
573 		return ret;
574 
575 	if (!intr_info->op_reply_q->enable_irq_poll ||
576 	    !atomic_read(&intr_info->op_reply_q->pend_ios))
577 		return ret;
578 
579 	disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
580 
581 	return IRQ_WAKE_THREAD;
582 }
583 
584 /**
585  * mpi3mr_isr_poll - Reply queue polling routine
586  * @irq: IRQ
587  * @privdata: Interrupt info
588  *
589  * poll for pending I/O completions in a loop until pending I/Os
590  * present or controller queue depth I/Os are processed.
591  *
592  * Return: IRQ_NONE or IRQ_HANDLED
593  */
594 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
595 {
596 	struct mpi3mr_intr_info *intr_info = privdata;
597 	struct mpi3mr_ioc *mrioc;
598 	u16 midx;
599 	u32 num_op_reply = 0;
600 
601 	if (!intr_info || !intr_info->op_reply_q)
602 		return IRQ_NONE;
603 
604 	mrioc = intr_info->mrioc;
605 	midx = intr_info->msix_index;
606 
607 	/* Poll for pending IOs completions */
608 	do {
609 		if (!mrioc->intr_enabled)
610 			break;
611 
612 		if (!midx)
613 			mpi3mr_process_admin_reply_q(mrioc);
614 		if (intr_info->op_reply_q)
615 			num_op_reply +=
616 			    mpi3mr_process_op_reply_q(mrioc, intr_info);
617 
618 		usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep);
619 
620 	} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
621 	    (num_op_reply < mrioc->max_host_ios));
622 
623 	intr_info->op_reply_q->enable_irq_poll = false;
624 	enable_irq(pci_irq_vector(mrioc->pdev, midx));
625 
626 	return IRQ_HANDLED;
627 }
628 
629 /**
630  * mpi3mr_request_irq - Request IRQ and register ISR
631  * @mrioc: Adapter instance reference
632  * @index: IRQ vector index
633  *
634  * Request threaded ISR with primary ISR and secondary
635  *
636  * Return: 0 on success and non zero on failures.
637  */
638 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
639 {
640 	struct pci_dev *pdev = mrioc->pdev;
641 	struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
642 	int retval = 0;
643 
644 	intr_info->mrioc = mrioc;
645 	intr_info->msix_index = index;
646 	intr_info->op_reply_q = NULL;
647 
648 	snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
649 	    mrioc->driver_name, mrioc->id, index);
650 
651 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
652 	    mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
653 	if (retval) {
654 		ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
655 		    intr_info->name, pci_irq_vector(pdev, index));
656 		return retval;
657 	}
658 
659 	return retval;
660 }
661 
662 /**
663  * mpi3mr_setup_isr - Setup ISR for the controller
664  * @mrioc: Adapter instance reference
665  * @setup_one: Request one IRQ or more
666  *
667  * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
668  *
669  * Return: 0 on success and non zero on failures.
670  */
671 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
672 {
673 	unsigned int irq_flags = PCI_IRQ_MSIX;
674 	int max_vectors;
675 	int retval;
676 	int i;
677 	struct irq_affinity desc = { .pre_vectors =  1};
678 
679 	if (mrioc->is_intr_info_set)
680 		return 0;
681 
682 	mpi3mr_cleanup_isr(mrioc);
683 
684 	if (setup_one || reset_devices)
685 		max_vectors = 1;
686 	else {
687 		max_vectors =
688 		    min_t(int, mrioc->cpu_count + 1, mrioc->msix_count);
689 
690 		ioc_info(mrioc,
691 		    "MSI-X vectors supported: %d, no of cores: %d,",
692 		    mrioc->msix_count, mrioc->cpu_count);
693 		ioc_info(mrioc,
694 		    "MSI-x vectors requested: %d\n", max_vectors);
695 	}
696 
697 	irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
698 
699 	mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
700 	retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
701 				1, max_vectors, irq_flags, &desc);
702 	if (retval < 0) {
703 		ioc_err(mrioc, "Cannot alloc irq vectors\n");
704 		goto out_failed;
705 	}
706 	if (retval != max_vectors) {
707 		ioc_info(mrioc,
708 		    "allocated vectors (%d) are less than configured (%d)\n",
709 		    retval, max_vectors);
710 		/*
711 		 * If only one MSI-x is allocated, then MSI-x 0 will be shared
712 		 * between Admin queue and operational queue
713 		 */
714 		if (retval == 1)
715 			mrioc->op_reply_q_offset = 0;
716 
717 		max_vectors = retval;
718 	}
719 	mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
720 	    GFP_KERNEL);
721 	if (!mrioc->intr_info) {
722 		retval = -ENOMEM;
723 		pci_free_irq_vectors(mrioc->pdev);
724 		goto out_failed;
725 	}
726 	for (i = 0; i < max_vectors; i++) {
727 		retval = mpi3mr_request_irq(mrioc, i);
728 		if (retval) {
729 			mrioc->intr_info_count = i;
730 			goto out_failed;
731 		}
732 	}
733 	if (reset_devices || !setup_one)
734 		mrioc->is_intr_info_set = true;
735 	mrioc->intr_info_count = max_vectors;
736 	mpi3mr_ioc_enable_intr(mrioc);
737 	return 0;
738 
739 out_failed:
740 	mpi3mr_cleanup_isr(mrioc);
741 
742 	return retval;
743 }
744 
745 static const struct {
746 	enum mpi3mr_iocstate value;
747 	char *name;
748 } mrioc_states[] = {
749 	{ MRIOC_STATE_READY, "ready" },
750 	{ MRIOC_STATE_FAULT, "fault" },
751 	{ MRIOC_STATE_RESET, "reset" },
752 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
753 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
754 	{ MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
755 };
756 
757 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
758 {
759 	int i;
760 	char *name = NULL;
761 
762 	for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
763 		if (mrioc_states[i].value == mrioc_state) {
764 			name = mrioc_states[i].name;
765 			break;
766 		}
767 	}
768 	return name;
769 }
770 
771 /* Reset reason to name mapper structure*/
772 static const struct {
773 	enum mpi3mr_reset_reason value;
774 	char *name;
775 } mpi3mr_reset_reason_codes[] = {
776 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
777 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
778 	{ MPI3MR_RESET_FROM_IOCTL, "application invocation" },
779 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
780 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
781 	{ MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
782 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
783 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
784 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
785 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
786 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
787 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
788 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
789 	{
790 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
791 		"create request queue timeout"
792 	},
793 	{
794 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
795 		"create reply queue timeout"
796 	},
797 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
798 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
799 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
800 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
801 	{
802 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
803 		"component image activation timeout"
804 	},
805 	{
806 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
807 		"get package version timeout"
808 	},
809 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
810 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
811 	{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronus reset" },
812 };
813 
814 /**
815  * mpi3mr_reset_rc_name - get reset reason code name
816  * @reason_code: reset reason code value
817  *
818  * Map reset reason to an NULL terminated ASCII string
819  *
820  * Return: name corresponding to reset reason value or NULL.
821  */
822 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
823 {
824 	int i;
825 	char *name = NULL;
826 
827 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
828 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
829 			name = mpi3mr_reset_reason_codes[i].name;
830 			break;
831 		}
832 	}
833 	return name;
834 }
835 
836 /* Reset type to name mapper structure*/
837 static const struct {
838 	u16 reset_type;
839 	char *name;
840 } mpi3mr_reset_types[] = {
841 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
842 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
843 };
844 
845 /**
846  * mpi3mr_reset_type_name - get reset type name
847  * @reset_type: reset type value
848  *
849  * Map reset type to an NULL terminated ASCII string
850  *
851  * Return: name corresponding to reset type value or NULL.
852  */
853 static const char *mpi3mr_reset_type_name(u16 reset_type)
854 {
855 	int i;
856 	char *name = NULL;
857 
858 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
859 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
860 			name = mpi3mr_reset_types[i].name;
861 			break;
862 		}
863 	}
864 	return name;
865 }
866 
867 /**
868  * mpi3mr_print_fault_info - Display fault information
869  * @mrioc: Adapter instance reference
870  *
871  * Display the controller fault information if there is a
872  * controller fault.
873  *
874  * Return: Nothing.
875  */
876 void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
877 {
878 	u32 ioc_status, code, code1, code2, code3;
879 
880 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
881 
882 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
883 		code = readl(&mrioc->sysif_regs->fault);
884 		code1 = readl(&mrioc->sysif_regs->fault_info[0]);
885 		code2 = readl(&mrioc->sysif_regs->fault_info[1]);
886 		code3 = readl(&mrioc->sysif_regs->fault_info[2]);
887 
888 		ioc_info(mrioc,
889 		    "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
890 		    code, code1, code2, code3);
891 	}
892 }
893 
894 /**
895  * mpi3mr_get_iocstate - Get IOC State
896  * @mrioc: Adapter instance reference
897  *
898  * Return a proper IOC state enum based on the IOC status and
899  * IOC configuration and unrcoverable state of the controller.
900  *
901  * Return: Current IOC state.
902  */
903 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
904 {
905 	u32 ioc_status, ioc_config;
906 	u8 ready, enabled;
907 
908 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
909 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
910 
911 	if (mrioc->unrecoverable)
912 		return MRIOC_STATE_UNRECOVERABLE;
913 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
914 		return MRIOC_STATE_FAULT;
915 
916 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
917 	enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
918 
919 	if (ready && enabled)
920 		return MRIOC_STATE_READY;
921 	if ((!ready) && (!enabled))
922 		return MRIOC_STATE_RESET;
923 	if ((!ready) && (enabled))
924 		return MRIOC_STATE_BECOMING_READY;
925 
926 	return MRIOC_STATE_RESET_REQUESTED;
927 }
928 
929 /**
930  * mpi3mr_clear_reset_history - clear reset history
931  * @mrioc: Adapter instance reference
932  *
933  * Write the reset history bit in IOC status to clear the bit,
934  * if it is already set.
935  *
936  * Return: Nothing.
937  */
938 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
939 {
940 	u32 ioc_status;
941 
942 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
943 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
944 		writel(ioc_status, &mrioc->sysif_regs->ioc_status);
945 }
946 
947 /**
948  * mpi3mr_issue_and_process_mur - Message unit Reset handler
949  * @mrioc: Adapter instance reference
950  * @reset_reason: Reset reason code
951  *
952  * Issue Message unit Reset to the controller and wait for it to
953  * be complete.
954  *
955  * Return: 0 on success, -1 on failure.
956  */
957 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
958 	u32 reset_reason)
959 {
960 	u32 ioc_config, timeout, ioc_status;
961 	int retval = -1;
962 
963 	ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
964 	if (mrioc->unrecoverable) {
965 		ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
966 		return retval;
967 	}
968 	mpi3mr_clear_reset_history(mrioc);
969 	writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
970 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
971 	ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
972 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
973 
974 	timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
975 	do {
976 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
977 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
978 			mpi3mr_clear_reset_history(mrioc);
979 			break;
980 		}
981 		if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
982 			mpi3mr_print_fault_info(mrioc);
983 			break;
984 		}
985 		msleep(100);
986 	} while (--timeout);
987 
988 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
989 	if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
990 	      (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
991 	      (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
992 		retval = 0;
993 
994 	ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
995 	    (!retval) ? "successful" : "failed", ioc_status, ioc_config);
996 	return retval;
997 }
998 
999 /**
1000  * mpi3mr_bring_ioc_ready - Bring controller to ready state
1001  * @mrioc: Adapter instance reference
1002  *
1003  * Set Enable IOC bit in IOC configuration register and wait for
1004  * the controller to become ready.
1005  *
1006  * Return: 0 on success, appropriate error on failure.
1007  */
1008 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1009 {
1010 	u32 ioc_config, ioc_status, timeout;
1011 	int retval = 0;
1012 	enum mpi3mr_iocstate ioc_state;
1013 	u64 base_info;
1014 
1015 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1016 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1017 	base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1018 	ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1019 	    ioc_status, ioc_config, base_info);
1020 
1021 	/*The timeout value is in 2sec unit, changing it to seconds*/
1022 	mrioc->ready_timeout =
1023 	    ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1024 	    MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1025 
1026 	ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1027 
1028 	ioc_state = mpi3mr_get_iocstate(mrioc);
1029 	ioc_info(mrioc, "controller is in %s state during detection\n",
1030 	    mpi3mr_iocstate_name(ioc_state));
1031 
1032 	if (ioc_state == MRIOC_STATE_BECOMING_READY ||
1033 	    ioc_state == MRIOC_STATE_RESET_REQUESTED) {
1034 		timeout = mrioc->ready_timeout * 10;
1035 		do {
1036 			msleep(100);
1037 		} while (--timeout);
1038 
1039 		ioc_state = mpi3mr_get_iocstate(mrioc);
1040 		ioc_info(mrioc,
1041 		    "controller is in %s state after waiting to reset\n",
1042 		    mpi3mr_iocstate_name(ioc_state));
1043 	}
1044 
1045 	if (ioc_state == MRIOC_STATE_READY) {
1046 		ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1047 		retval = mpi3mr_issue_and_process_mur(mrioc,
1048 		    MPI3MR_RESET_FROM_BRINGUP);
1049 		ioc_state = mpi3mr_get_iocstate(mrioc);
1050 		if (retval)
1051 			ioc_err(mrioc,
1052 			    "message unit reset failed with error %d current state %s\n",
1053 			    retval, mpi3mr_iocstate_name(ioc_state));
1054 	}
1055 	if (ioc_state != MRIOC_STATE_RESET) {
1056 		mpi3mr_print_fault_info(mrioc);
1057 		ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1058 		retval = mpi3mr_issue_reset(mrioc,
1059 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1060 		    MPI3MR_RESET_FROM_BRINGUP);
1061 		if (retval) {
1062 			ioc_err(mrioc,
1063 			    "soft reset failed with error %d\n", retval);
1064 			goto out_failed;
1065 		}
1066 	}
1067 	ioc_state = mpi3mr_get_iocstate(mrioc);
1068 	if (ioc_state != MRIOC_STATE_RESET) {
1069 		ioc_err(mrioc,
1070 		    "cannot bring controller to reset state, current state: %s\n",
1071 		    mpi3mr_iocstate_name(ioc_state));
1072 		goto out_failed;
1073 	}
1074 	mpi3mr_clear_reset_history(mrioc);
1075 	retval = mpi3mr_setup_admin_qpair(mrioc);
1076 	if (retval) {
1077 		ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1078 		    retval);
1079 		goto out_failed;
1080 	}
1081 
1082 	ioc_info(mrioc, "bringing controller to ready state\n");
1083 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1084 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1085 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1086 
1087 	timeout = mrioc->ready_timeout * 10;
1088 	do {
1089 		ioc_state = mpi3mr_get_iocstate(mrioc);
1090 		if (ioc_state == MRIOC_STATE_READY) {
1091 			ioc_info(mrioc,
1092 			    "successfully transistioned to %s state\n",
1093 			    mpi3mr_iocstate_name(ioc_state));
1094 			return 0;
1095 		}
1096 		msleep(100);
1097 	} while (--timeout);
1098 
1099 out_failed:
1100 	ioc_state = mpi3mr_get_iocstate(mrioc);
1101 	ioc_err(mrioc,
1102 	    "failed to bring to ready state,  current state: %s\n",
1103 	    mpi3mr_iocstate_name(ioc_state));
1104 	return retval;
1105 }
1106 
1107 /**
1108  * mpi3mr_soft_reset_success - Check softreset is success or not
1109  * @ioc_status: IOC status register value
1110  * @ioc_config: IOC config register value
1111  *
1112  * Check whether the soft reset is successful or not based on
1113  * IOC status and IOC config register values.
1114  *
1115  * Return: True when the soft reset is success, false otherwise.
1116  */
1117 static inline bool
1118 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1119 {
1120 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1121 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1122 		return true;
1123 	return false;
1124 }
1125 
1126 /**
1127  * mpi3mr_diagfault_success - Check diag fault is success or not
1128  * @mrioc: Adapter reference
1129  * @ioc_status: IOC status register value
1130  *
1131  * Check whether the controller hit diag reset fault code.
1132  *
1133  * Return: True when there is diag fault, false otherwise.
1134  */
1135 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1136 	u32 ioc_status)
1137 {
1138 	u32 fault;
1139 
1140 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1141 		return false;
1142 	fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1143 	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
1144 		mpi3mr_print_fault_info(mrioc);
1145 		return true;
1146 	}
1147 	return false;
1148 }
1149 
1150 /**
1151  * mpi3mr_set_diagsave - Set diag save bit for snapdump
1152  * @mrioc: Adapter reference
1153  *
1154  * Set diag save bit in IOC configuration register to enable
1155  * snapdump.
1156  *
1157  * Return: Nothing.
1158  */
1159 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1160 {
1161 	u32 ioc_config;
1162 
1163 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1164 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1165 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1166 }
1167 
1168 /**
1169  * mpi3mr_issue_reset - Issue reset to the controller
1170  * @mrioc: Adapter reference
1171  * @reset_type: Reset type
1172  * @reset_reason: Reset reason code
1173  *
1174  * Unlock the host diagnostic registers and write the specific
1175  * reset type to that, wait for reset acknowledgment from the
1176  * controller, if the reset is not successful retry for the
1177  * predefined number of times.
1178  *
1179  * Return: 0 on success, non-zero on failure.
1180  */
1181 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1182 	u32 reset_reason)
1183 {
1184 	int retval = -1;
1185 	u8 unlock_retry_count = 0;
1186 	u32 host_diagnostic, ioc_status, ioc_config;
1187 	u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
1188 
1189 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1190 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1191 		return retval;
1192 	if (mrioc->unrecoverable)
1193 		return retval;
1194 	if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
1195 		retval = 0;
1196 		return retval;
1197 	}
1198 
1199 	ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1200 	    mpi3mr_reset_type_name(reset_type),
1201 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
1202 
1203 	mpi3mr_clear_reset_history(mrioc);
1204 	do {
1205 		ioc_info(mrioc,
1206 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
1207 		    ++unlock_retry_count);
1208 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1209 			ioc_err(mrioc,
1210 			    "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
1211 			    mpi3mr_reset_type_name(reset_type),
1212 			    host_diagnostic);
1213 			mrioc->unrecoverable = 1;
1214 			return retval;
1215 		}
1216 
1217 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1218 		    &mrioc->sysif_regs->write_sequence);
1219 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1220 		    &mrioc->sysif_regs->write_sequence);
1221 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1222 		    &mrioc->sysif_regs->write_sequence);
1223 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1224 		    &mrioc->sysif_regs->write_sequence);
1225 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1226 		    &mrioc->sysif_regs->write_sequence);
1227 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1228 		    &mrioc->sysif_regs->write_sequence);
1229 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1230 		    &mrioc->sysif_regs->write_sequence);
1231 		usleep_range(1000, 1100);
1232 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1233 		ioc_info(mrioc,
1234 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1235 		    unlock_retry_count, host_diagnostic);
1236 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1237 
1238 	writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1239 	writel(host_diagnostic | reset_type,
1240 	    &mrioc->sysif_regs->host_diagnostic);
1241 	switch (reset_type) {
1242 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
1243 		do {
1244 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1245 			ioc_config =
1246 			    readl(&mrioc->sysif_regs->ioc_configuration);
1247 			if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
1248 			    && mpi3mr_soft_reset_success(ioc_status, ioc_config)
1249 			    ) {
1250 				mpi3mr_clear_reset_history(mrioc);
1251 				retval = 0;
1252 				break;
1253 			}
1254 			msleep(100);
1255 		} while (--timeout);
1256 		mpi3mr_print_fault_info(mrioc);
1257 		break;
1258 	case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
1259 		do {
1260 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1261 			if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1262 				retval = 0;
1263 				break;
1264 			}
1265 			msleep(100);
1266 		} while (--timeout);
1267 		break;
1268 	default:
1269 		break;
1270 	}
1271 
1272 	writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1273 	    &mrioc->sysif_regs->write_sequence);
1274 
1275 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1276 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1277 	ioc_info(mrioc,
1278 	    "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
1279 	    (!retval)?"successful":"failed", ioc_status,
1280 	    ioc_config);
1281 	if (retval)
1282 		mrioc->unrecoverable = 1;
1283 	return retval;
1284 }
1285 
1286 /**
1287  * mpi3mr_admin_request_post - Post request to admin queue
1288  * @mrioc: Adapter reference
1289  * @admin_req: MPI3 request
1290  * @admin_req_sz: Request size
1291  * @ignore_reset: Ignore reset in process
1292  *
1293  * Post the MPI3 request into admin request queue and
1294  * inform the controller, if the queue is full return
1295  * appropriate error.
1296  *
1297  * Return: 0 on success, non-zero on failure.
1298  */
1299 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1300 	u16 admin_req_sz, u8 ignore_reset)
1301 {
1302 	u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1303 	int retval = 0;
1304 	unsigned long flags;
1305 	u8 *areq_entry;
1306 
1307 	if (mrioc->unrecoverable) {
1308 		ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1309 		return -EFAULT;
1310 	}
1311 
1312 	spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1313 	areq_pi = mrioc->admin_req_pi;
1314 	areq_ci = mrioc->admin_req_ci;
1315 	max_entries = mrioc->num_admin_req;
1316 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1317 	    (areq_pi == (max_entries - 1)))) {
1318 		ioc_err(mrioc, "AdminReqQ full condition detected\n");
1319 		retval = -EAGAIN;
1320 		goto out;
1321 	}
1322 	if (!ignore_reset && mrioc->reset_in_progress) {
1323 		ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1324 		retval = -EAGAIN;
1325 		goto out;
1326 	}
1327 	areq_entry = (u8 *)mrioc->admin_req_base +
1328 	    (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1329 	memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1330 	memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1331 
1332 	if (++areq_pi == max_entries)
1333 		areq_pi = 0;
1334 	mrioc->admin_req_pi = areq_pi;
1335 
1336 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1337 
1338 out:
1339 	spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1340 
1341 	return retval;
1342 }
1343 
1344 /**
1345  * mpi3mr_free_op_req_q_segments - free request memory segments
1346  * @mrioc: Adapter instance reference
1347  * @q_idx: operational request queue index
1348  *
1349  * Free memory segments allocated for operational request queue
1350  *
1351  * Return: Nothing.
1352  */
1353 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1354 {
1355 	u16 j;
1356 	int size;
1357 	struct segments *segments;
1358 
1359 	segments = mrioc->req_qinfo[q_idx].q_segments;
1360 	if (!segments)
1361 		return;
1362 
1363 	if (mrioc->enable_segqueue) {
1364 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1365 		if (mrioc->req_qinfo[q_idx].q_segment_list) {
1366 			dma_free_coherent(&mrioc->pdev->dev,
1367 			    MPI3MR_MAX_SEG_LIST_SIZE,
1368 			    mrioc->req_qinfo[q_idx].q_segment_list,
1369 			    mrioc->req_qinfo[q_idx].q_segment_list_dma);
1370 			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1371 		}
1372 	} else
1373 		size = mrioc->req_qinfo[q_idx].num_requests *
1374 		    mrioc->facts.op_req_sz;
1375 
1376 	for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1377 		if (!segments[j].segment)
1378 			continue;
1379 		dma_free_coherent(&mrioc->pdev->dev,
1380 		    size, segments[j].segment, segments[j].segment_dma);
1381 		segments[j].segment = NULL;
1382 	}
1383 	kfree(mrioc->req_qinfo[q_idx].q_segments);
1384 	mrioc->req_qinfo[q_idx].q_segments = NULL;
1385 	mrioc->req_qinfo[q_idx].qid = 0;
1386 }
1387 
1388 /**
1389  * mpi3mr_free_op_reply_q_segments - free reply memory segments
1390  * @mrioc: Adapter instance reference
1391  * @q_idx: operational reply queue index
1392  *
1393  * Free memory segments allocated for operational reply queue
1394  *
1395  * Return: Nothing.
1396  */
1397 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1398 {
1399 	u16 j;
1400 	int size;
1401 	struct segments *segments;
1402 
1403 	segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1404 	if (!segments)
1405 		return;
1406 
1407 	if (mrioc->enable_segqueue) {
1408 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1409 		if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1410 			dma_free_coherent(&mrioc->pdev->dev,
1411 			    MPI3MR_MAX_SEG_LIST_SIZE,
1412 			    mrioc->op_reply_qinfo[q_idx].q_segment_list,
1413 			    mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1414 			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1415 		}
1416 	} else
1417 		size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1418 		    mrioc->op_reply_desc_sz;
1419 
1420 	for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1421 		if (!segments[j].segment)
1422 			continue;
1423 		dma_free_coherent(&mrioc->pdev->dev,
1424 		    size, segments[j].segment, segments[j].segment_dma);
1425 		segments[j].segment = NULL;
1426 	}
1427 
1428 	kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1429 	mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1430 	mrioc->op_reply_qinfo[q_idx].qid = 0;
1431 }
1432 
1433 /**
1434  * mpi3mr_delete_op_reply_q - delete operational reply queue
1435  * @mrioc: Adapter instance reference
1436  * @qidx: operational reply queue index
1437  *
1438  * Delete operatinal reply queue by issuing MPI request
1439  * through admin queue.
1440  *
1441  * Return:  0 on success, non-zero on failure.
1442  */
1443 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1444 {
1445 	struct mpi3_delete_reply_queue_request delq_req;
1446 	int retval = 0;
1447 	u16 reply_qid = 0, midx;
1448 
1449 	reply_qid = mrioc->op_reply_qinfo[qidx].qid;
1450 
1451 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1452 
1453 	if (!reply_qid)	{
1454 		retval = -1;
1455 		ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
1456 		goto out;
1457 	}
1458 
1459 	memset(&delq_req, 0, sizeof(delq_req));
1460 	mutex_lock(&mrioc->init_cmds.mutex);
1461 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1462 		retval = -1;
1463 		ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
1464 		mutex_unlock(&mrioc->init_cmds.mutex);
1465 		goto out;
1466 	}
1467 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1468 	mrioc->init_cmds.is_waiting = 1;
1469 	mrioc->init_cmds.callback = NULL;
1470 	delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1471 	delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
1472 	delq_req.queue_id = cpu_to_le16(reply_qid);
1473 
1474 	init_completion(&mrioc->init_cmds.done);
1475 	retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
1476 	    1);
1477 	if (retval) {
1478 		ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
1479 		goto out_unlock;
1480 	}
1481 	wait_for_completion_timeout(&mrioc->init_cmds.done,
1482 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1483 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1484 		ioc_err(mrioc, "delete reply queue timed out\n");
1485 		mpi3mr_check_rh_fault_ioc(mrioc,
1486 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
1487 		retval = -1;
1488 		goto out_unlock;
1489 	}
1490 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1491 	    != MPI3_IOCSTATUS_SUCCESS) {
1492 		ioc_err(mrioc,
1493 		    "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1494 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1495 		    mrioc->init_cmds.ioc_loginfo);
1496 		retval = -1;
1497 		goto out_unlock;
1498 	}
1499 	mrioc->intr_info[midx].op_reply_q = NULL;
1500 
1501 	mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1502 out_unlock:
1503 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1504 	mutex_unlock(&mrioc->init_cmds.mutex);
1505 out:
1506 
1507 	return retval;
1508 }
1509 
1510 /**
1511  * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
1512  * @mrioc: Adapter instance reference
1513  * @qidx: request queue index
1514  *
1515  * Allocate segmented memory pools for operational reply
1516  * queue.
1517  *
1518  * Return: 0 on success, non-zero on failure.
1519  */
1520 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1521 {
1522 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1523 	int i, size;
1524 	u64 *q_segment_list_entry = NULL;
1525 	struct segments *segments;
1526 
1527 	if (mrioc->enable_segqueue) {
1528 		op_reply_q->segment_qd =
1529 		    MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
1530 
1531 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1532 
1533 		op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1534 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
1535 		    GFP_KERNEL);
1536 		if (!op_reply_q->q_segment_list)
1537 			return -ENOMEM;
1538 		q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
1539 	} else {
1540 		op_reply_q->segment_qd = op_reply_q->num_replies;
1541 		size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
1542 	}
1543 
1544 	op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
1545 	    op_reply_q->segment_qd);
1546 
1547 	op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
1548 	    sizeof(struct segments), GFP_KERNEL);
1549 	if (!op_reply_q->q_segments)
1550 		return -ENOMEM;
1551 
1552 	segments = op_reply_q->q_segments;
1553 	for (i = 0; i < op_reply_q->num_segments; i++) {
1554 		segments[i].segment =
1555 		    dma_alloc_coherent(&mrioc->pdev->dev,
1556 		    size, &segments[i].segment_dma, GFP_KERNEL);
1557 		if (!segments[i].segment)
1558 			return -ENOMEM;
1559 		if (mrioc->enable_segqueue)
1560 			q_segment_list_entry[i] =
1561 			    (unsigned long)segments[i].segment_dma;
1562 	}
1563 
1564 	return 0;
1565 }
1566 
1567 /**
1568  * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
1569  * @mrioc: Adapter instance reference
1570  * @qidx: request queue index
1571  *
1572  * Allocate segmented memory pools for operational request
1573  * queue.
1574  *
1575  * Return: 0 on success, non-zero on failure.
1576  */
1577 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1578 {
1579 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
1580 	int i, size;
1581 	u64 *q_segment_list_entry = NULL;
1582 	struct segments *segments;
1583 
1584 	if (mrioc->enable_segqueue) {
1585 		op_req_q->segment_qd =
1586 		    MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
1587 
1588 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1589 
1590 		op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1591 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
1592 		    GFP_KERNEL);
1593 		if (!op_req_q->q_segment_list)
1594 			return -ENOMEM;
1595 		q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
1596 
1597 	} else {
1598 		op_req_q->segment_qd = op_req_q->num_requests;
1599 		size = op_req_q->num_requests * mrioc->facts.op_req_sz;
1600 	}
1601 
1602 	op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
1603 	    op_req_q->segment_qd);
1604 
1605 	op_req_q->q_segments = kcalloc(op_req_q->num_segments,
1606 	    sizeof(struct segments), GFP_KERNEL);
1607 	if (!op_req_q->q_segments)
1608 		return -ENOMEM;
1609 
1610 	segments = op_req_q->q_segments;
1611 	for (i = 0; i < op_req_q->num_segments; i++) {
1612 		segments[i].segment =
1613 		    dma_alloc_coherent(&mrioc->pdev->dev,
1614 		    size, &segments[i].segment_dma, GFP_KERNEL);
1615 		if (!segments[i].segment)
1616 			return -ENOMEM;
1617 		if (mrioc->enable_segqueue)
1618 			q_segment_list_entry[i] =
1619 			    (unsigned long)segments[i].segment_dma;
1620 	}
1621 
1622 	return 0;
1623 }
1624 
1625 /**
1626  * mpi3mr_create_op_reply_q - create operational reply queue
1627  * @mrioc: Adapter instance reference
1628  * @qidx: operational reply queue index
1629  *
1630  * Create operatinal reply queue by issuing MPI request
1631  * through admin queue.
1632  *
1633  * Return:  0 on success, non-zero on failure.
1634  */
1635 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1636 {
1637 	struct mpi3_create_reply_queue_request create_req;
1638 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1639 	int retval = 0;
1640 	u16 reply_qid = 0, midx;
1641 
1642 	reply_qid = op_reply_q->qid;
1643 
1644 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1645 
1646 	if (reply_qid) {
1647 		retval = -1;
1648 		ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
1649 		    reply_qid);
1650 
1651 		return retval;
1652 	}
1653 
1654 	reply_qid = qidx + 1;
1655 	op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
1656 	op_reply_q->ci = 0;
1657 	op_reply_q->ephase = 1;
1658 	atomic_set(&op_reply_q->pend_ios, 0);
1659 	atomic_set(&op_reply_q->in_use, 0);
1660 	op_reply_q->enable_irq_poll = false;
1661 
1662 	if (!op_reply_q->q_segments) {
1663 		retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
1664 		if (retval) {
1665 			mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1666 			goto out;
1667 		}
1668 	}
1669 
1670 	memset(&create_req, 0, sizeof(create_req));
1671 	mutex_lock(&mrioc->init_cmds.mutex);
1672 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1673 		retval = -1;
1674 		ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
1675 		goto out_unlock;
1676 	}
1677 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1678 	mrioc->init_cmds.is_waiting = 1;
1679 	mrioc->init_cmds.callback = NULL;
1680 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1681 	create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
1682 	create_req.queue_id = cpu_to_le16(reply_qid);
1683 	create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
1684 	create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index);
1685 	if (mrioc->enable_segqueue) {
1686 		create_req.flags |=
1687 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
1688 		create_req.base_address = cpu_to_le64(
1689 		    op_reply_q->q_segment_list_dma);
1690 	} else
1691 		create_req.base_address = cpu_to_le64(
1692 		    op_reply_q->q_segments[0].segment_dma);
1693 
1694 	create_req.size = cpu_to_le16(op_reply_q->num_replies);
1695 
1696 	init_completion(&mrioc->init_cmds.done);
1697 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
1698 	    sizeof(create_req), 1);
1699 	if (retval) {
1700 		ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
1701 		goto out_unlock;
1702 	}
1703 	wait_for_completion_timeout(&mrioc->init_cmds.done,
1704 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1705 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1706 		ioc_err(mrioc, "create reply queue timed out\n");
1707 		mpi3mr_check_rh_fault_ioc(mrioc,
1708 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
1709 		retval = -1;
1710 		goto out_unlock;
1711 	}
1712 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1713 	    != MPI3_IOCSTATUS_SUCCESS) {
1714 		ioc_err(mrioc,
1715 		    "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1716 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1717 		    mrioc->init_cmds.ioc_loginfo);
1718 		retval = -1;
1719 		goto out_unlock;
1720 	}
1721 	op_reply_q->qid = reply_qid;
1722 	if (midx < mrioc->intr_info_count)
1723 		mrioc->intr_info[midx].op_reply_q = op_reply_q;
1724 
1725 out_unlock:
1726 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1727 	mutex_unlock(&mrioc->init_cmds.mutex);
1728 out:
1729 
1730 	return retval;
1731 }
1732 
1733 /**
1734  * mpi3mr_create_op_req_q - create operational request queue
1735  * @mrioc: Adapter instance reference
1736  * @idx: operational request queue index
1737  * @reply_qid: Reply queue ID
1738  *
1739  * Create operatinal request queue by issuing MPI request
1740  * through admin queue.
1741  *
1742  * Return:  0 on success, non-zero on failure.
1743  */
1744 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
1745 	u16 reply_qid)
1746 {
1747 	struct mpi3_create_request_queue_request create_req;
1748 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
1749 	int retval = 0;
1750 	u16 req_qid = 0;
1751 
1752 	req_qid = op_req_q->qid;
1753 
1754 	if (req_qid) {
1755 		retval = -1;
1756 		ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
1757 		    req_qid);
1758 
1759 		return retval;
1760 	}
1761 	req_qid = idx + 1;
1762 
1763 	op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
1764 	op_req_q->ci = 0;
1765 	op_req_q->pi = 0;
1766 	op_req_q->reply_qid = reply_qid;
1767 	spin_lock_init(&op_req_q->q_lock);
1768 
1769 	if (!op_req_q->q_segments) {
1770 		retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
1771 		if (retval) {
1772 			mpi3mr_free_op_req_q_segments(mrioc, idx);
1773 			goto out;
1774 		}
1775 	}
1776 
1777 	memset(&create_req, 0, sizeof(create_req));
1778 	mutex_lock(&mrioc->init_cmds.mutex);
1779 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1780 		retval = -1;
1781 		ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
1782 		goto out_unlock;
1783 	}
1784 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1785 	mrioc->init_cmds.is_waiting = 1;
1786 	mrioc->init_cmds.callback = NULL;
1787 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1788 	create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
1789 	create_req.queue_id = cpu_to_le16(req_qid);
1790 	if (mrioc->enable_segqueue) {
1791 		create_req.flags =
1792 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
1793 		create_req.base_address = cpu_to_le64(
1794 		    op_req_q->q_segment_list_dma);
1795 	} else
1796 		create_req.base_address = cpu_to_le64(
1797 		    op_req_q->q_segments[0].segment_dma);
1798 	create_req.reply_queue_id = cpu_to_le16(reply_qid);
1799 	create_req.size = cpu_to_le16(op_req_q->num_requests);
1800 
1801 	init_completion(&mrioc->init_cmds.done);
1802 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
1803 	    sizeof(create_req), 1);
1804 	if (retval) {
1805 		ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
1806 		goto out_unlock;
1807 	}
1808 	wait_for_completion_timeout(&mrioc->init_cmds.done,
1809 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1810 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1811 		ioc_err(mrioc, "create request queue timed out\n");
1812 		mpi3mr_check_rh_fault_ioc(mrioc,
1813 		    MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
1814 		retval = -1;
1815 		goto out_unlock;
1816 	}
1817 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1818 	    != MPI3_IOCSTATUS_SUCCESS) {
1819 		ioc_err(mrioc,
1820 		    "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1821 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1822 		    mrioc->init_cmds.ioc_loginfo);
1823 		retval = -1;
1824 		goto out_unlock;
1825 	}
1826 	op_req_q->qid = req_qid;
1827 
1828 out_unlock:
1829 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1830 	mutex_unlock(&mrioc->init_cmds.mutex);
1831 out:
1832 
1833 	return retval;
1834 }
1835 
1836 /**
1837  * mpi3mr_create_op_queues - create operational queue pairs
1838  * @mrioc: Adapter instance reference
1839  *
1840  * Allocate memory for operational queue meta data and call
1841  * create request and reply queue functions.
1842  *
1843  * Return: 0 on success, non-zero on failures.
1844  */
1845 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
1846 {
1847 	int retval = 0;
1848 	u16 num_queues = 0, i = 0, msix_count_op_q = 1;
1849 
1850 	num_queues = min_t(int, mrioc->facts.max_op_reply_q,
1851 	    mrioc->facts.max_op_req_q);
1852 
1853 	msix_count_op_q =
1854 	    mrioc->intr_info_count - mrioc->op_reply_q_offset;
1855 	if (!mrioc->num_queues)
1856 		mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
1857 	num_queues = mrioc->num_queues;
1858 	ioc_info(mrioc, "Trying to create %d Operational Q pairs\n",
1859 	    num_queues);
1860 
1861 	if (!mrioc->req_qinfo) {
1862 		mrioc->req_qinfo = kcalloc(num_queues,
1863 		    sizeof(struct op_req_qinfo), GFP_KERNEL);
1864 		if (!mrioc->req_qinfo) {
1865 			retval = -1;
1866 			goto out_failed;
1867 		}
1868 
1869 		mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
1870 		    num_queues, GFP_KERNEL);
1871 		if (!mrioc->op_reply_qinfo) {
1872 			retval = -1;
1873 			goto out_failed;
1874 		}
1875 	}
1876 
1877 	if (mrioc->enable_segqueue)
1878 		ioc_info(mrioc,
1879 		    "allocating operational queues through segmented queues\n");
1880 
1881 	for (i = 0; i < num_queues; i++) {
1882 		if (mpi3mr_create_op_reply_q(mrioc, i)) {
1883 			ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
1884 			break;
1885 		}
1886 		if (mpi3mr_create_op_req_q(mrioc, i,
1887 		    mrioc->op_reply_qinfo[i].qid)) {
1888 			ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
1889 			mpi3mr_delete_op_reply_q(mrioc, i);
1890 			break;
1891 		}
1892 	}
1893 
1894 	if (i == 0) {
1895 		/* Not even one queue is created successfully*/
1896 		retval = -1;
1897 		goto out_failed;
1898 	}
1899 	mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
1900 	ioc_info(mrioc, "Successfully created %d Operational Q pairs\n",
1901 	    mrioc->num_op_reply_q);
1902 
1903 	return retval;
1904 out_failed:
1905 	kfree(mrioc->req_qinfo);
1906 	mrioc->req_qinfo = NULL;
1907 
1908 	kfree(mrioc->op_reply_qinfo);
1909 	mrioc->op_reply_qinfo = NULL;
1910 
1911 	return retval;
1912 }
1913 
1914 /**
1915  * mpi3mr_op_request_post - Post request to operational queue
1916  * @mrioc: Adapter reference
1917  * @op_req_q: Operational request queue info
1918  * @req: MPI3 request
1919  *
1920  * Post the MPI3 request into operational request queue and
1921  * inform the controller, if the queue is full return
1922  * appropriate error.
1923  *
1924  * Return: 0 on success, non-zero on failure.
1925  */
1926 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
1927 	struct op_req_qinfo *op_req_q, u8 *req)
1928 {
1929 	u16 pi = 0, max_entries, reply_qidx = 0, midx;
1930 	int retval = 0;
1931 	unsigned long flags;
1932 	u8 *req_entry;
1933 	void *segment_base_addr;
1934 	u16 req_sz = mrioc->facts.op_req_sz;
1935 	struct segments *segments = op_req_q->q_segments;
1936 
1937 	reply_qidx = op_req_q->reply_qid - 1;
1938 
1939 	if (mrioc->unrecoverable)
1940 		return -EFAULT;
1941 
1942 	spin_lock_irqsave(&op_req_q->q_lock, flags);
1943 	pi = op_req_q->pi;
1944 	max_entries = op_req_q->num_requests;
1945 
1946 	if (mpi3mr_check_req_qfull(op_req_q)) {
1947 		midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
1948 		    reply_qidx, mrioc->op_reply_q_offset);
1949 		mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]);
1950 
1951 		if (mpi3mr_check_req_qfull(op_req_q)) {
1952 			retval = -EAGAIN;
1953 			goto out;
1954 		}
1955 	}
1956 
1957 	if (mrioc->reset_in_progress) {
1958 		ioc_err(mrioc, "OpReqQ submit reset in progress\n");
1959 		retval = -EAGAIN;
1960 		goto out;
1961 	}
1962 
1963 	segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
1964 	req_entry = (u8 *)segment_base_addr +
1965 	    ((pi % op_req_q->segment_qd) * req_sz);
1966 
1967 	memset(req_entry, 0, req_sz);
1968 	memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
1969 
1970 	if (++pi == max_entries)
1971 		pi = 0;
1972 	op_req_q->pi = pi;
1973 
1974 	if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
1975 	    > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
1976 		mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
1977 
1978 	writel(op_req_q->pi,
1979 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
1980 
1981 out:
1982 	spin_unlock_irqrestore(&op_req_q->q_lock, flags);
1983 	return retval;
1984 }
1985 
1986 /**
1987  * mpi3mr_check_rh_fault_ioc - check reset history and fault
1988  * controller
1989  * @mrioc: Adapter instance reference
1990  * @reason_code, reason code for the fault.
1991  *
1992  * This routine will save snapdump and fault the controller with
1993  * the given reason code if it is not already in the fault or
1994  * not asynchronosuly reset. This will be used to handle
1995  * initilaization time faults/resets/timeout as in those cases
1996  * immediate soft reset invocation is not required.
1997  *
1998  * Return:  None.
1999  */
2000 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
2001 {
2002 	u32 ioc_status, host_diagnostic, timeout;
2003 
2004 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
2005 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
2006 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
2007 		mpi3mr_print_fault_info(mrioc);
2008 		return;
2009 	}
2010 	mpi3mr_set_diagsave(mrioc);
2011 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2012 	    reason_code);
2013 	timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2014 	do {
2015 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2016 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2017 			break;
2018 		msleep(100);
2019 	} while (--timeout);
2020 }
2021 
2022 /**
2023  * mpi3mr_sync_timestamp - Issue time stamp sync request
2024  * @mrioc: Adapter reference
2025  *
2026  * Issue IO unit control MPI request to synchornize firmware
2027  * timestamp with host time.
2028  *
2029  * Return: 0 on success, non-zero on failure.
2030  */
2031 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2032 {
2033 	ktime_t current_time;
2034 	struct mpi3_iounit_control_request iou_ctrl;
2035 	int retval = 0;
2036 
2037 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2038 	mutex_lock(&mrioc->init_cmds.mutex);
2039 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2040 		retval = -1;
2041 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2042 		mutex_unlock(&mrioc->init_cmds.mutex);
2043 		goto out;
2044 	}
2045 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2046 	mrioc->init_cmds.is_waiting = 1;
2047 	mrioc->init_cmds.callback = NULL;
2048 	iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2049 	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2050 	iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2051 	current_time = ktime_get_real();
2052 	iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2053 
2054 	init_completion(&mrioc->init_cmds.done);
2055 	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2056 	    sizeof(iou_ctrl), 0);
2057 	if (retval) {
2058 		ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2059 		goto out_unlock;
2060 	}
2061 
2062 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2063 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2064 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2065 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2066 		mrioc->init_cmds.is_waiting = 0;
2067 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2068 			mpi3mr_soft_reset_handler(mrioc,
2069 			    MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
2070 		retval = -1;
2071 		goto out_unlock;
2072 	}
2073 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2074 	    != MPI3_IOCSTATUS_SUCCESS) {
2075 		ioc_err(mrioc,
2076 		    "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2077 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2078 		    mrioc->init_cmds.ioc_loginfo);
2079 		retval = -1;
2080 		goto out_unlock;
2081 	}
2082 
2083 out_unlock:
2084 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2085 	mutex_unlock(&mrioc->init_cmds.mutex);
2086 
2087 out:
2088 	return retval;
2089 }
2090 
2091 /**
2092  * mpi3mr_print_pkg_ver - display controller fw package version
2093  * @mrioc: Adapter reference
2094  *
2095  * Retrieve firmware package version from the component image
2096  * header of the controller flash and display it.
2097  *
2098  * Return: 0 on success and non-zero on failure.
2099  */
2100 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2101 {
2102 	struct mpi3_ci_upload_request ci_upload;
2103 	int retval = -1;
2104 	void *data = NULL;
2105 	dma_addr_t data_dma;
2106 	struct mpi3_ci_manifest_mpi *manifest;
2107 	u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2108 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2109 
2110 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2111 	    GFP_KERNEL);
2112 	if (!data)
2113 		return -ENOMEM;
2114 
2115 	memset(&ci_upload, 0, sizeof(ci_upload));
2116 	mutex_lock(&mrioc->init_cmds.mutex);
2117 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2118 		ioc_err(mrioc, "sending get package version failed due to command in use\n");
2119 		mutex_unlock(&mrioc->init_cmds.mutex);
2120 		goto out;
2121 	}
2122 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2123 	mrioc->init_cmds.is_waiting = 1;
2124 	mrioc->init_cmds.callback = NULL;
2125 	ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2126 	ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2127 	ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2128 	ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2129 	ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2130 	ci_upload.segment_size = cpu_to_le32(data_len);
2131 
2132 	mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2133 	    data_dma);
2134 	init_completion(&mrioc->init_cmds.done);
2135 	retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2136 	    sizeof(ci_upload), 1);
2137 	if (retval) {
2138 		ioc_err(mrioc, "posting get package version failed\n");
2139 		goto out_unlock;
2140 	}
2141 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2142 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2143 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2144 		ioc_err(mrioc, "get package version timed out\n");
2145 		mpi3mr_check_rh_fault_ioc(mrioc,
2146 		    MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2147 		retval = -1;
2148 		goto out_unlock;
2149 	}
2150 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2151 	    == MPI3_IOCSTATUS_SUCCESS) {
2152 		manifest = (struct mpi3_ci_manifest_mpi *) data;
2153 		if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2154 			ioc_info(mrioc,
2155 			    "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2156 			    manifest->package_version.gen_major,
2157 			    manifest->package_version.gen_minor,
2158 			    manifest->package_version.phase_major,
2159 			    manifest->package_version.phase_minor,
2160 			    manifest->package_version.customer_id,
2161 			    manifest->package_version.build_num);
2162 		}
2163 	}
2164 	retval = 0;
2165 out_unlock:
2166 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2167 	mutex_unlock(&mrioc->init_cmds.mutex);
2168 
2169 out:
2170 	if (data)
2171 		dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2172 		    data_dma);
2173 	return retval;
2174 }
2175 
2176 /**
2177  * mpi3mr_watchdog_work - watchdog thread to monitor faults
2178  * @work: work struct
2179  *
2180  * Watch dog work periodically executed (1 second interval) to
2181  * monitor firmware fault and to issue periodic timer sync to
2182  * the firmware.
2183  *
2184  * Return: Nothing.
2185  */
2186 static void mpi3mr_watchdog_work(struct work_struct *work)
2187 {
2188 	struct mpi3mr_ioc *mrioc =
2189 	    container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2190 	unsigned long flags;
2191 	enum mpi3mr_iocstate ioc_state;
2192 	u32 fault, host_diagnostic;
2193 
2194 	if (mrioc->reset_in_progress || mrioc->unrecoverable)
2195 		return;
2196 
2197 	if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
2198 		mrioc->ts_update_counter = 0;
2199 		mpi3mr_sync_timestamp(mrioc);
2200 	}
2201 
2202 	/*Check for fault state every one second and issue Soft reset*/
2203 	ioc_state = mpi3mr_get_iocstate(mrioc);
2204 	if (ioc_state == MRIOC_STATE_FAULT) {
2205 		fault = readl(&mrioc->sysif_regs->fault) &
2206 		    MPI3_SYSIF_FAULT_CODE_MASK;
2207 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2208 		if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2209 			if (!mrioc->diagsave_timeout) {
2210 				mpi3mr_print_fault_info(mrioc);
2211 				ioc_warn(mrioc, "Diag save in progress\n");
2212 			}
2213 			if ((mrioc->diagsave_timeout++) <=
2214 			    MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2215 				goto schedule_work;
2216 		} else
2217 			mpi3mr_print_fault_info(mrioc);
2218 		mrioc->diagsave_timeout = 0;
2219 
2220 		if (fault == MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED) {
2221 			ioc_info(mrioc,
2222 			    "Factory Reset fault occurred marking controller as unrecoverable"
2223 			    );
2224 			mrioc->unrecoverable = 1;
2225 			goto out;
2226 		}
2227 
2228 		if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) ||
2229 		    (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) ||
2230 		    (mrioc->reset_in_progress))
2231 			goto out;
2232 		if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
2233 			mpi3mr_soft_reset_handler(mrioc,
2234 			    MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
2235 		else
2236 			mpi3mr_soft_reset_handler(mrioc,
2237 			    MPI3MR_RESET_FROM_FAULT_WATCH, 0);
2238 	}
2239 
2240 schedule_work:
2241 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2242 	if (mrioc->watchdog_work_q)
2243 		queue_delayed_work(mrioc->watchdog_work_q,
2244 		    &mrioc->watchdog_work,
2245 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2246 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2247 out:
2248 	return;
2249 }
2250 
2251 /**
2252  * mpi3mr_start_watchdog - Start watchdog
2253  * @mrioc: Adapter instance reference
2254  *
2255  * Create and start the watchdog thread to monitor controller
2256  * faults.
2257  *
2258  * Return: Nothing.
2259  */
2260 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2261 {
2262 	if (mrioc->watchdog_work_q)
2263 		return;
2264 
2265 	INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2266 	snprintf(mrioc->watchdog_work_q_name,
2267 	    sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
2268 	    mrioc->id);
2269 	mrioc->watchdog_work_q =
2270 	    create_singlethread_workqueue(mrioc->watchdog_work_q_name);
2271 	if (!mrioc->watchdog_work_q) {
2272 		ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2273 		return;
2274 	}
2275 
2276 	if (mrioc->watchdog_work_q)
2277 		queue_delayed_work(mrioc->watchdog_work_q,
2278 		    &mrioc->watchdog_work,
2279 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2280 }
2281 
2282 /**
2283  * mpi3mr_stop_watchdog - Stop watchdog
2284  * @mrioc: Adapter instance reference
2285  *
2286  * Stop the watchdog thread created to monitor controller
2287  * faults.
2288  *
2289  * Return: Nothing.
2290  */
2291 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
2292 {
2293 	unsigned long flags;
2294 	struct workqueue_struct *wq;
2295 
2296 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2297 	wq = mrioc->watchdog_work_q;
2298 	mrioc->watchdog_work_q = NULL;
2299 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2300 	if (wq) {
2301 		if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
2302 			flush_workqueue(wq);
2303 		destroy_workqueue(wq);
2304 	}
2305 }
2306 
2307 /**
2308  * mpi3mr_setup_admin_qpair - Setup admin queue pair
2309  * @mrioc: Adapter instance reference
2310  *
2311  * Allocate memory for admin queue pair if required and register
2312  * the admin queue with the controller.
2313  *
2314  * Return: 0 on success, non-zero on failures.
2315  */
2316 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
2317 {
2318 	int retval = 0;
2319 	u32 num_admin_entries = 0;
2320 
2321 	mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
2322 	mrioc->num_admin_req = mrioc->admin_req_q_sz /
2323 	    MPI3MR_ADMIN_REQ_FRAME_SZ;
2324 	mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
2325 	mrioc->admin_req_base = NULL;
2326 
2327 	mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
2328 	mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
2329 	    MPI3MR_ADMIN_REPLY_FRAME_SZ;
2330 	mrioc->admin_reply_ci = 0;
2331 	mrioc->admin_reply_ephase = 1;
2332 	mrioc->admin_reply_base = NULL;
2333 
2334 	if (!mrioc->admin_req_base) {
2335 		mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
2336 		    mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
2337 
2338 		if (!mrioc->admin_req_base) {
2339 			retval = -1;
2340 			goto out_failed;
2341 		}
2342 
2343 		mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
2344 		    mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
2345 		    GFP_KERNEL);
2346 
2347 		if (!mrioc->admin_reply_base) {
2348 			retval = -1;
2349 			goto out_failed;
2350 		}
2351 	}
2352 
2353 	num_admin_entries = (mrioc->num_admin_replies << 16) |
2354 	    (mrioc->num_admin_req);
2355 	writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
2356 	mpi3mr_writeq(mrioc->admin_req_dma,
2357 	    &mrioc->sysif_regs->admin_request_queue_address);
2358 	mpi3mr_writeq(mrioc->admin_reply_dma,
2359 	    &mrioc->sysif_regs->admin_reply_queue_address);
2360 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
2361 	writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
2362 	return retval;
2363 
2364 out_failed:
2365 
2366 	if (mrioc->admin_reply_base) {
2367 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
2368 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
2369 		mrioc->admin_reply_base = NULL;
2370 	}
2371 	if (mrioc->admin_req_base) {
2372 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
2373 		    mrioc->admin_req_base, mrioc->admin_req_dma);
2374 		mrioc->admin_req_base = NULL;
2375 	}
2376 	return retval;
2377 }
2378 
2379 /**
2380  * mpi3mr_issue_iocfacts - Send IOC Facts
2381  * @mrioc: Adapter instance reference
2382  * @facts_data: Cached IOC facts data
2383  *
2384  * Issue IOC Facts MPI request through admin queue and wait for
2385  * the completion of it or time out.
2386  *
2387  * Return: 0 on success, non-zero on failures.
2388  */
2389 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
2390 	struct mpi3_ioc_facts_data *facts_data)
2391 {
2392 	struct mpi3_ioc_facts_request iocfacts_req;
2393 	void *data = NULL;
2394 	dma_addr_t data_dma;
2395 	u32 data_len = sizeof(*facts_data);
2396 	int retval = 0;
2397 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2398 
2399 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2400 	    GFP_KERNEL);
2401 
2402 	if (!data) {
2403 		retval = -1;
2404 		goto out;
2405 	}
2406 
2407 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
2408 	mutex_lock(&mrioc->init_cmds.mutex);
2409 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2410 		retval = -1;
2411 		ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
2412 		mutex_unlock(&mrioc->init_cmds.mutex);
2413 		goto out;
2414 	}
2415 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2416 	mrioc->init_cmds.is_waiting = 1;
2417 	mrioc->init_cmds.callback = NULL;
2418 	iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2419 	iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
2420 
2421 	mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
2422 	    data_dma);
2423 
2424 	init_completion(&mrioc->init_cmds.done);
2425 	retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
2426 	    sizeof(iocfacts_req), 1);
2427 	if (retval) {
2428 		ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
2429 		goto out_unlock;
2430 	}
2431 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2432 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2433 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2434 		ioc_err(mrioc, "ioc_facts timed out\n");
2435 		mpi3mr_check_rh_fault_ioc(mrioc,
2436 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
2437 		retval = -1;
2438 		goto out_unlock;
2439 	}
2440 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2441 	    != MPI3_IOCSTATUS_SUCCESS) {
2442 		ioc_err(mrioc,
2443 		    "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2444 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2445 		    mrioc->init_cmds.ioc_loginfo);
2446 		retval = -1;
2447 		goto out_unlock;
2448 	}
2449 	memcpy(facts_data, (u8 *)data, data_len);
2450 out_unlock:
2451 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2452 	mutex_unlock(&mrioc->init_cmds.mutex);
2453 
2454 out:
2455 	if (data)
2456 		dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
2457 
2458 	return retval;
2459 }
2460 
2461 /**
2462  * mpi3mr_check_reset_dma_mask - Process IOC facts data
2463  * @mrioc: Adapter instance reference
2464  *
2465  * Check whether the new DMA mask requested through IOCFacts by
2466  * firmware needs to be set, if so set it .
2467  *
2468  * Return: 0 on success, non-zero on failure.
2469  */
2470 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
2471 {
2472 	struct pci_dev *pdev = mrioc->pdev;
2473 	int r;
2474 	u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
2475 
2476 	if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
2477 		return 0;
2478 
2479 	ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
2480 	    mrioc->dma_mask, facts_dma_mask);
2481 
2482 	r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
2483 	if (r) {
2484 		ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
2485 		    facts_dma_mask, r);
2486 		return r;
2487 	}
2488 	mrioc->dma_mask = facts_dma_mask;
2489 	return r;
2490 }
2491 
2492 /**
2493  * mpi3mr_process_factsdata - Process IOC facts data
2494  * @mrioc: Adapter instance reference
2495  * @facts_data: Cached IOC facts data
2496  *
2497  * Convert IOC facts data into cpu endianness and cache it in
2498  * the driver .
2499  *
2500  * Return: Nothing.
2501  */
2502 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
2503 	struct mpi3_ioc_facts_data *facts_data)
2504 {
2505 	u32 ioc_config, req_sz, facts_flags;
2506 
2507 	if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
2508 	    (sizeof(*facts_data) / 4)) {
2509 		ioc_warn(mrioc,
2510 		    "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
2511 		    sizeof(*facts_data),
2512 		    le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
2513 	}
2514 
2515 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
2516 	req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
2517 	    MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
2518 	if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
2519 		ioc_err(mrioc,
2520 		    "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
2521 		    req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
2522 	}
2523 
2524 	memset(&mrioc->facts, 0, sizeof(mrioc->facts));
2525 
2526 	facts_flags = le32_to_cpu(facts_data->flags);
2527 	mrioc->facts.op_req_sz = req_sz;
2528 	mrioc->op_reply_desc_sz = 1 << ((ioc_config &
2529 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
2530 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
2531 
2532 	mrioc->facts.ioc_num = facts_data->ioc_number;
2533 	mrioc->facts.who_init = facts_data->who_init;
2534 	mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
2535 	mrioc->facts.personality = (facts_flags &
2536 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
2537 	mrioc->facts.dma_mask = (facts_flags &
2538 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
2539 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
2540 	mrioc->facts.protocol_flags = facts_data->protocol_flags;
2541 	mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
2542 	mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request);
2543 	mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
2544 	mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
2545 	mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
2546 	mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
2547 	mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
2548 	mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
2549 	mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
2550 	mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
2551 	mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
2552 	mrioc->facts.max_pcie_switches =
2553 	    le16_to_cpu(facts_data->max_pcie_switches);
2554 	mrioc->facts.max_sasexpanders =
2555 	    le16_to_cpu(facts_data->max_sas_expanders);
2556 	mrioc->facts.max_sasinitiators =
2557 	    le16_to_cpu(facts_data->max_sas_initiators);
2558 	mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
2559 	mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
2560 	mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
2561 	mrioc->facts.max_op_req_q =
2562 	    le16_to_cpu(facts_data->max_operational_request_queues);
2563 	mrioc->facts.max_op_reply_q =
2564 	    le16_to_cpu(facts_data->max_operational_reply_queues);
2565 	mrioc->facts.ioc_capabilities =
2566 	    le32_to_cpu(facts_data->ioc_capabilities);
2567 	mrioc->facts.fw_ver.build_num =
2568 	    le16_to_cpu(facts_data->fw_version.build_num);
2569 	mrioc->facts.fw_ver.cust_id =
2570 	    le16_to_cpu(facts_data->fw_version.customer_id);
2571 	mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
2572 	mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
2573 	mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
2574 	mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
2575 	mrioc->msix_count = min_t(int, mrioc->msix_count,
2576 	    mrioc->facts.max_msix_vectors);
2577 	mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
2578 	mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
2579 	mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
2580 	mrioc->facts.shutdown_timeout =
2581 	    le16_to_cpu(facts_data->shutdown_timeout);
2582 
2583 	ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
2584 	    mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
2585 	    mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
2586 	ioc_info(mrioc,
2587 	    "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
2588 	    mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
2589 	    mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
2590 	ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
2591 	    mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
2592 	    mrioc->facts.sge_mod_shift);
2593 	ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
2594 	    mrioc->facts.dma_mask, (facts_flags &
2595 	    MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
2596 
2597 	mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
2598 
2599 	if (reset_devices)
2600 		mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
2601 		    MPI3MR_HOST_IOS_KDUMP);
2602 }
2603 
2604 /**
2605  * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
2606  * @mrioc: Adapter instance reference
2607  *
2608  * Allocate and initialize the reply free buffers, sense
2609  * buffers, reply free queue and sense buffer queue.
2610  *
2611  * Return: 0 on success, non-zero on failures.
2612  */
2613 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
2614 {
2615 	int retval = 0;
2616 	u32 sz, i;
2617 
2618 	if (mrioc->init_cmds.reply)
2619 		return retval;
2620 
2621 	mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
2622 	if (!mrioc->init_cmds.reply)
2623 		goto out_failed;
2624 
2625 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2626 		mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->facts.reply_sz,
2627 		    GFP_KERNEL);
2628 		if (!mrioc->dev_rmhs_cmds[i].reply)
2629 			goto out_failed;
2630 	}
2631 
2632 	mrioc->host_tm_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
2633 	if (!mrioc->host_tm_cmds.reply)
2634 		goto out_failed;
2635 
2636 	mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
2637 	if (mrioc->facts.max_devhandle % 8)
2638 		mrioc->dev_handle_bitmap_sz++;
2639 	mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz,
2640 	    GFP_KERNEL);
2641 	if (!mrioc->removepend_bitmap)
2642 		goto out_failed;
2643 
2644 	mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8;
2645 	if (MPI3MR_NUM_DEVRMCMD % 8)
2646 		mrioc->devrem_bitmap_sz++;
2647 	mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz,
2648 	    GFP_KERNEL);
2649 	if (!mrioc->devrem_bitmap)
2650 		goto out_failed;
2651 
2652 	mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
2653 	mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
2654 	mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
2655 	mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
2656 
2657 	/* reply buffer pool, 16 byte align */
2658 	sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
2659 	mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
2660 	    &mrioc->pdev->dev, sz, 16, 0);
2661 	if (!mrioc->reply_buf_pool) {
2662 		ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
2663 		goto out_failed;
2664 	}
2665 
2666 	mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
2667 	    &mrioc->reply_buf_dma);
2668 	if (!mrioc->reply_buf)
2669 		goto out_failed;
2670 
2671 	mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
2672 
2673 	/* reply free queue, 8 byte align */
2674 	sz = mrioc->reply_free_qsz * 8;
2675 	mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
2676 	    &mrioc->pdev->dev, sz, 8, 0);
2677 	if (!mrioc->reply_free_q_pool) {
2678 		ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
2679 		goto out_failed;
2680 	}
2681 	mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
2682 	    GFP_KERNEL, &mrioc->reply_free_q_dma);
2683 	if (!mrioc->reply_free_q)
2684 		goto out_failed;
2685 
2686 	/* sense buffer pool,  4 byte align */
2687 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
2688 	mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
2689 	    &mrioc->pdev->dev, sz, 4, 0);
2690 	if (!mrioc->sense_buf_pool) {
2691 		ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
2692 		goto out_failed;
2693 	}
2694 	mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
2695 	    &mrioc->sense_buf_dma);
2696 	if (!mrioc->sense_buf)
2697 		goto out_failed;
2698 
2699 	/* sense buffer queue, 8 byte align */
2700 	sz = mrioc->sense_buf_q_sz * 8;
2701 	mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
2702 	    &mrioc->pdev->dev, sz, 8, 0);
2703 	if (!mrioc->sense_buf_q_pool) {
2704 		ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
2705 		goto out_failed;
2706 	}
2707 	mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
2708 	    GFP_KERNEL, &mrioc->sense_buf_q_dma);
2709 	if (!mrioc->sense_buf_q)
2710 		goto out_failed;
2711 
2712 	return retval;
2713 
2714 out_failed:
2715 	retval = -1;
2716 	return retval;
2717 }
2718 
2719 /**
2720  * mpimr_initialize_reply_sbuf_queues - initialize reply sense
2721  * buffers
2722  * @mrioc: Adapter instance reference
2723  *
2724  * Helper function to initialize reply and sense buffers along
2725  * with some debug prints.
2726  *
2727  * Return:  None.
2728  */
2729 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
2730 {
2731 	u32 sz, i;
2732 	dma_addr_t phy_addr;
2733 
2734 	sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
2735 	ioc_info(mrioc,
2736 	    "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
2737 	    mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz,
2738 	    (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
2739 	sz = mrioc->reply_free_qsz * 8;
2740 	ioc_info(mrioc,
2741 	    "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
2742 	    mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
2743 	    (unsigned long long)mrioc->reply_free_q_dma);
2744 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
2745 	ioc_info(mrioc,
2746 	    "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
2747 	    mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
2748 	    (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
2749 	sz = mrioc->sense_buf_q_sz * 8;
2750 	ioc_info(mrioc,
2751 	    "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
2752 	    mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
2753 	    (unsigned long long)mrioc->sense_buf_q_dma);
2754 
2755 	/* initialize Reply buffer Queue */
2756 	for (i = 0, phy_addr = mrioc->reply_buf_dma;
2757 	    i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz)
2758 		mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
2759 	mrioc->reply_free_q[i] = cpu_to_le64(0);
2760 
2761 	/* initialize Sense Buffer Queue */
2762 	for (i = 0, phy_addr = mrioc->sense_buf_dma;
2763 	    i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
2764 		mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
2765 	mrioc->sense_buf_q[i] = cpu_to_le64(0);
2766 }
2767 
2768 /**
2769  * mpi3mr_issue_iocinit - Send IOC Init
2770  * @mrioc: Adapter instance reference
2771  *
2772  * Issue IOC Init MPI request through admin queue and wait for
2773  * the completion of it or time out.
2774  *
2775  * Return: 0 on success, non-zero on failures.
2776  */
2777 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
2778 {
2779 	struct mpi3_ioc_init_request iocinit_req;
2780 	struct mpi3_driver_info_layout *drv_info;
2781 	dma_addr_t data_dma;
2782 	u32 data_len = sizeof(*drv_info);
2783 	int retval = 0;
2784 	ktime_t current_time;
2785 
2786 	drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2787 	    GFP_KERNEL);
2788 	if (!drv_info) {
2789 		retval = -1;
2790 		goto out;
2791 	}
2792 	mpimr_initialize_reply_sbuf_queues(mrioc);
2793 
2794 	drv_info->information_length = cpu_to_le32(data_len);
2795 	strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
2796 	strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
2797 	strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
2798 	strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
2799 	strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
2800 	strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
2801 	    sizeof(drv_info->driver_release_date));
2802 	drv_info->driver_capabilities = 0;
2803 	memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
2804 	    sizeof(mrioc->driver_info));
2805 
2806 	memset(&iocinit_req, 0, sizeof(iocinit_req));
2807 	mutex_lock(&mrioc->init_cmds.mutex);
2808 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2809 		retval = -1;
2810 		ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
2811 		mutex_unlock(&mrioc->init_cmds.mutex);
2812 		goto out;
2813 	}
2814 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2815 	mrioc->init_cmds.is_waiting = 1;
2816 	mrioc->init_cmds.callback = NULL;
2817 	iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2818 	iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
2819 	iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
2820 	iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
2821 	iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
2822 	iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
2823 	iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
2824 	iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
2825 	iocinit_req.reply_free_queue_address =
2826 	    cpu_to_le64(mrioc->reply_free_q_dma);
2827 	iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
2828 	iocinit_req.sense_buffer_free_queue_depth =
2829 	    cpu_to_le16(mrioc->sense_buf_q_sz);
2830 	iocinit_req.sense_buffer_free_queue_address =
2831 	    cpu_to_le64(mrioc->sense_buf_q_dma);
2832 	iocinit_req.driver_information_address = cpu_to_le64(data_dma);
2833 
2834 	current_time = ktime_get_real();
2835 	iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
2836 
2837 	init_completion(&mrioc->init_cmds.done);
2838 	retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
2839 	    sizeof(iocinit_req), 1);
2840 	if (retval) {
2841 		ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
2842 		goto out_unlock;
2843 	}
2844 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2845 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2846 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2847 		mpi3mr_check_rh_fault_ioc(mrioc,
2848 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
2849 		ioc_err(mrioc, "ioc_init timed out\n");
2850 		retval = -1;
2851 		goto out_unlock;
2852 	}
2853 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2854 	    != MPI3_IOCSTATUS_SUCCESS) {
2855 		ioc_err(mrioc,
2856 		    "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2857 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2858 		    mrioc->init_cmds.ioc_loginfo);
2859 		retval = -1;
2860 		goto out_unlock;
2861 	}
2862 
2863 	mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
2864 	writel(mrioc->reply_free_queue_host_index,
2865 	    &mrioc->sysif_regs->reply_free_host_index);
2866 
2867 	mrioc->sbq_host_index = mrioc->num_sense_bufs;
2868 	writel(mrioc->sbq_host_index,
2869 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
2870 out_unlock:
2871 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2872 	mutex_unlock(&mrioc->init_cmds.mutex);
2873 
2874 out:
2875 	if (drv_info)
2876 		dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
2877 		    data_dma);
2878 
2879 	return retval;
2880 }
2881 
2882 /**
2883  * mpi3mr_unmask_events - Unmask events in event mask bitmap
2884  * @mrioc: Adapter instance reference
2885  * @event: MPI event ID
2886  *
2887  * Un mask the specific event by resetting the event_mask
2888  * bitmap.
2889  *
2890  * Return: 0 on success, non-zero on failures.
2891  */
2892 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
2893 {
2894 	u32 desired_event;
2895 	u8 word;
2896 
2897 	if (event >= 128)
2898 		return;
2899 
2900 	desired_event = (1 << (event % 32));
2901 	word = event / 32;
2902 
2903 	mrioc->event_masks[word] &= ~desired_event;
2904 }
2905 
2906 /**
2907  * mpi3mr_issue_event_notification - Send event notification
2908  * @mrioc: Adapter instance reference
2909  *
2910  * Issue event notification MPI request through admin queue and
2911  * wait for the completion of it or time out.
2912  *
2913  * Return: 0 on success, non-zero on failures.
2914  */
2915 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
2916 {
2917 	struct mpi3_event_notification_request evtnotify_req;
2918 	int retval = 0;
2919 	u8 i;
2920 
2921 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
2922 	mutex_lock(&mrioc->init_cmds.mutex);
2923 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2924 		retval = -1;
2925 		ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
2926 		mutex_unlock(&mrioc->init_cmds.mutex);
2927 		goto out;
2928 	}
2929 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2930 	mrioc->init_cmds.is_waiting = 1;
2931 	mrioc->init_cmds.callback = NULL;
2932 	evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2933 	evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
2934 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2935 		evtnotify_req.event_masks[i] =
2936 		    cpu_to_le32(mrioc->event_masks[i]);
2937 	init_completion(&mrioc->init_cmds.done);
2938 	retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
2939 	    sizeof(evtnotify_req), 1);
2940 	if (retval) {
2941 		ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
2942 		goto out_unlock;
2943 	}
2944 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2945 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2946 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2947 		ioc_err(mrioc, "event notification timed out\n");
2948 		mpi3mr_check_rh_fault_ioc(mrioc,
2949 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
2950 		retval = -1;
2951 		goto out_unlock;
2952 	}
2953 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2954 	    != MPI3_IOCSTATUS_SUCCESS) {
2955 		ioc_err(mrioc,
2956 		    "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2957 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2958 		    mrioc->init_cmds.ioc_loginfo);
2959 		retval = -1;
2960 		goto out_unlock;
2961 	}
2962 
2963 out_unlock:
2964 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2965 	mutex_unlock(&mrioc->init_cmds.mutex);
2966 out:
2967 	return retval;
2968 }
2969 
2970 /**
2971  * mpi3mr_send_event_ack - Send event acknowledgment
2972  * @mrioc: Adapter instance reference
2973  * @event: MPI3 event ID
2974  * @event_ctx: Event context
2975  *
2976  * Send event acknowledgment through admin queue and wait for
2977  * it to complete.
2978  *
2979  * Return: 0 on success, non-zero on failures.
2980  */
2981 int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
2982 	u32 event_ctx)
2983 {
2984 	struct mpi3_event_ack_request evtack_req;
2985 	int retval = 0;
2986 
2987 	memset(&evtack_req, 0, sizeof(evtack_req));
2988 	mutex_lock(&mrioc->init_cmds.mutex);
2989 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2990 		retval = -1;
2991 		ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
2992 		mutex_unlock(&mrioc->init_cmds.mutex);
2993 		goto out;
2994 	}
2995 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2996 	mrioc->init_cmds.is_waiting = 1;
2997 	mrioc->init_cmds.callback = NULL;
2998 	evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2999 	evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3000 	evtack_req.event = event;
3001 	evtack_req.event_context = cpu_to_le32(event_ctx);
3002 
3003 	init_completion(&mrioc->init_cmds.done);
3004 	retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3005 	    sizeof(evtack_req), 1);
3006 	if (retval) {
3007 		ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3008 		goto out_unlock;
3009 	}
3010 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3011 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3012 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3013 		ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3014 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3015 			mpi3mr_soft_reset_handler(mrioc,
3016 			    MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
3017 		retval = -1;
3018 		goto out_unlock;
3019 	}
3020 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3021 	    != MPI3_IOCSTATUS_SUCCESS) {
3022 		ioc_err(mrioc,
3023 		    "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3024 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3025 		    mrioc->init_cmds.ioc_loginfo);
3026 		retval = -1;
3027 		goto out_unlock;
3028 	}
3029 
3030 out_unlock:
3031 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3032 	mutex_unlock(&mrioc->init_cmds.mutex);
3033 out:
3034 	return retval;
3035 }
3036 
3037 /**
3038  * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3039  * @mrioc: Adapter instance reference
3040  *
3041  * Allocate chain buffers and set a bitmap to indicate free
3042  * chain buffers. Chain buffers are used to pass the SGE
3043  * information along with MPI3 SCSI IO requests for host I/O.
3044  *
3045  * Return: 0 on success, non-zero on failure
3046  */
3047 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3048 {
3049 	int retval = 0;
3050 	u32 sz, i;
3051 	u16 num_chains;
3052 
3053 	if (mrioc->chain_sgl_list)
3054 		return retval;
3055 
3056 	num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3057 
3058 	if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3059 	    | SHOST_DIX_TYPE1_PROTECTION
3060 	    | SHOST_DIX_TYPE2_PROTECTION
3061 	    | SHOST_DIX_TYPE3_PROTECTION))
3062 		num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3063 
3064 	mrioc->chain_buf_count = num_chains;
3065 	sz = sizeof(struct chain_element) * num_chains;
3066 	mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3067 	if (!mrioc->chain_sgl_list)
3068 		goto out_failed;
3069 
3070 	sz = MPI3MR_PAGE_SIZE_4K;
3071 	mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3072 	    &mrioc->pdev->dev, sz, 16, 0);
3073 	if (!mrioc->chain_buf_pool) {
3074 		ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3075 		goto out_failed;
3076 	}
3077 
3078 	for (i = 0; i < num_chains; i++) {
3079 		mrioc->chain_sgl_list[i].addr =
3080 		    dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3081 		    &mrioc->chain_sgl_list[i].dma_addr);
3082 
3083 		if (!mrioc->chain_sgl_list[i].addr)
3084 			goto out_failed;
3085 	}
3086 	mrioc->chain_bitmap_sz = num_chains / 8;
3087 	if (num_chains % 8)
3088 		mrioc->chain_bitmap_sz++;
3089 	mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL);
3090 	if (!mrioc->chain_bitmap)
3091 		goto out_failed;
3092 	return retval;
3093 out_failed:
3094 	retval = -1;
3095 	return retval;
3096 }
3097 
3098 /**
3099  * mpi3mr_port_enable_complete - Mark port enable complete
3100  * @mrioc: Adapter instance reference
3101  * @drv_cmd: Internal command tracker
3102  *
3103  * Call back for asynchronous port enable request sets the
3104  * driver command to indicate port enable request is complete.
3105  *
3106  * Return: Nothing
3107  */
3108 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3109 	struct mpi3mr_drv_cmd *drv_cmd)
3110 {
3111 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3112 	drv_cmd->callback = NULL;
3113 	mrioc->scan_failed = drv_cmd->ioc_status;
3114 	mrioc->scan_started = 0;
3115 }
3116 
3117 /**
3118  * mpi3mr_issue_port_enable - Issue Port Enable
3119  * @mrioc: Adapter instance reference
3120  * @async: Flag to wait for completion or not
3121  *
3122  * Issue Port Enable MPI request through admin queue and if the
3123  * async flag is not set wait for the completion of the port
3124  * enable or time out.
3125  *
3126  * Return: 0 on success, non-zero on failures.
3127  */
3128 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3129 {
3130 	struct mpi3_port_enable_request pe_req;
3131 	int retval = 0;
3132 	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3133 
3134 	memset(&pe_req, 0, sizeof(pe_req));
3135 	mutex_lock(&mrioc->init_cmds.mutex);
3136 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3137 		retval = -1;
3138 		ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3139 		mutex_unlock(&mrioc->init_cmds.mutex);
3140 		goto out;
3141 	}
3142 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3143 	if (async) {
3144 		mrioc->init_cmds.is_waiting = 0;
3145 		mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3146 	} else {
3147 		mrioc->init_cmds.is_waiting = 1;
3148 		mrioc->init_cmds.callback = NULL;
3149 		init_completion(&mrioc->init_cmds.done);
3150 	}
3151 	pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3152 	pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3153 
3154 	retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3155 	if (retval) {
3156 		ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3157 		goto out_unlock;
3158 	}
3159 	if (async) {
3160 		mutex_unlock(&mrioc->init_cmds.mutex);
3161 		goto out;
3162 	}
3163 
3164 	wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3165 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3166 		ioc_err(mrioc, "port enable timed out\n");
3167 		retval = -1;
3168 		mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3169 		goto out_unlock;
3170 	}
3171 	mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3172 
3173 out_unlock:
3174 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3175 	mutex_unlock(&mrioc->init_cmds.mutex);
3176 out:
3177 	return retval;
3178 }
3179 
3180 /* Protocol type to name mapper structure */
3181 static const struct {
3182 	u8 protocol;
3183 	char *name;
3184 } mpi3mr_protocols[] = {
3185 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3186 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3187 	{ MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3188 };
3189 
3190 /* Capability to name mapper structure*/
3191 static const struct {
3192 	u32 capability;
3193 	char *name;
3194 } mpi3mr_capabilities[] = {
3195 	{ MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
3196 };
3197 
3198 /**
3199  * mpi3mr_print_ioc_info - Display controller information
3200  * @mrioc: Adapter instance reference
3201  *
3202  * Display controller personalit, capability, supported
3203  * protocols etc.
3204  *
3205  * Return: Nothing
3206  */
3207 static void
3208 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
3209 {
3210 	int i = 0, bytes_written = 0;
3211 	char personality[16];
3212 	char protocol[50] = {0};
3213 	char capabilities[100] = {0};
3214 	struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
3215 
3216 	switch (mrioc->facts.personality) {
3217 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
3218 		strncpy(personality, "Enhanced HBA", sizeof(personality));
3219 		break;
3220 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
3221 		strncpy(personality, "RAID", sizeof(personality));
3222 		break;
3223 	default:
3224 		strncpy(personality, "Unknown", sizeof(personality));
3225 		break;
3226 	}
3227 
3228 	ioc_info(mrioc, "Running in %s Personality", personality);
3229 
3230 	ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
3231 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
3232 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
3233 
3234 	for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
3235 		if (mrioc->facts.protocol_flags &
3236 		    mpi3mr_protocols[i].protocol) {
3237 			bytes_written += scnprintf(protocol + bytes_written,
3238 				    sizeof(protocol) - bytes_written, "%s%s",
3239 				    bytes_written ? "," : "",
3240 				    mpi3mr_protocols[i].name);
3241 		}
3242 	}
3243 
3244 	bytes_written = 0;
3245 	for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
3246 		if (mrioc->facts.protocol_flags &
3247 		    mpi3mr_capabilities[i].capability) {
3248 			bytes_written += scnprintf(capabilities + bytes_written,
3249 				    sizeof(capabilities) - bytes_written, "%s%s",
3250 				    bytes_written ? "," : "",
3251 				    mpi3mr_capabilities[i].name);
3252 		}
3253 	}
3254 
3255 	ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
3256 		 protocol, capabilities);
3257 }
3258 
3259 /**
3260  * mpi3mr_cleanup_resources - Free PCI resources
3261  * @mrioc: Adapter instance reference
3262  *
3263  * Unmap PCI device memory and disable PCI device.
3264  *
3265  * Return: 0 on success and non-zero on failure.
3266  */
3267 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
3268 {
3269 	struct pci_dev *pdev = mrioc->pdev;
3270 
3271 	mpi3mr_cleanup_isr(mrioc);
3272 
3273 	if (mrioc->sysif_regs) {
3274 		iounmap((void __iomem *)mrioc->sysif_regs);
3275 		mrioc->sysif_regs = NULL;
3276 	}
3277 
3278 	if (pci_is_enabled(pdev)) {
3279 		if (mrioc->bars)
3280 			pci_release_selected_regions(pdev, mrioc->bars);
3281 		pci_disable_device(pdev);
3282 	}
3283 }
3284 
3285 /**
3286  * mpi3mr_setup_resources - Enable PCI resources
3287  * @mrioc: Adapter instance reference
3288  *
3289  * Enable PCI device memory, MSI-x registers and set DMA mask.
3290  *
3291  * Return: 0 on success and non-zero on failure.
3292  */
3293 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
3294 {
3295 	struct pci_dev *pdev = mrioc->pdev;
3296 	u32 memap_sz = 0;
3297 	int i, retval = 0, capb = 0;
3298 	u16 message_control;
3299 	u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
3300 	    (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) &&
3301 	    (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
3302 
3303 	if (pci_enable_device_mem(pdev)) {
3304 		ioc_err(mrioc, "pci_enable_device_mem: failed\n");
3305 		retval = -ENODEV;
3306 		goto out_failed;
3307 	}
3308 
3309 	capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3310 	if (!capb) {
3311 		ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
3312 		retval = -ENODEV;
3313 		goto out_failed;
3314 	}
3315 	mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3316 
3317 	if (pci_request_selected_regions(pdev, mrioc->bars,
3318 	    mrioc->driver_name)) {
3319 		ioc_err(mrioc, "pci_request_selected_regions: failed\n");
3320 		retval = -ENODEV;
3321 		goto out_failed;
3322 	}
3323 
3324 	for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
3325 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3326 			mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
3327 			memap_sz = pci_resource_len(pdev, i);
3328 			mrioc->sysif_regs =
3329 			    ioremap(mrioc->sysif_regs_phys, memap_sz);
3330 			break;
3331 		}
3332 	}
3333 
3334 	pci_set_master(pdev);
3335 
3336 	retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
3337 	if (retval) {
3338 		if (dma_mask != DMA_BIT_MASK(32)) {
3339 			ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
3340 			dma_mask = DMA_BIT_MASK(32);
3341 			retval = dma_set_mask_and_coherent(&pdev->dev,
3342 			    dma_mask);
3343 		}
3344 		if (retval) {
3345 			mrioc->dma_mask = 0;
3346 			ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
3347 			goto out_failed;
3348 		}
3349 	}
3350 	mrioc->dma_mask = dma_mask;
3351 
3352 	if (!mrioc->sysif_regs) {
3353 		ioc_err(mrioc,
3354 		    "Unable to map adapter memory or resource not found\n");
3355 		retval = -EINVAL;
3356 		goto out_failed;
3357 	}
3358 
3359 	pci_read_config_word(pdev, capb + 2, &message_control);
3360 	mrioc->msix_count = (message_control & 0x3FF) + 1;
3361 
3362 	pci_save_state(pdev);
3363 
3364 	pci_set_drvdata(pdev, mrioc->shost);
3365 
3366 	mpi3mr_ioc_disable_intr(mrioc);
3367 
3368 	ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
3369 	    (unsigned long long)mrioc->sysif_regs_phys,
3370 	    mrioc->sysif_regs, memap_sz);
3371 	ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
3372 	    mrioc->msix_count);
3373 	return retval;
3374 
3375 out_failed:
3376 	mpi3mr_cleanup_resources(mrioc);
3377 	return retval;
3378 }
3379 
3380 /**
3381  * mpi3mr_enable_events - Enable required events
3382  * @mrioc: Adapter instance reference
3383  *
3384  * This routine unmasks the events required by the driver by
3385  * sennding appropriate event mask bitmapt through an event
3386  * notification request.
3387  *
3388  * Return: 0 on success and non-zero on failure.
3389  */
3390 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
3391 {
3392 	int retval = 0;
3393 	u32  i;
3394 
3395 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3396 		mrioc->event_masks[i] = -1;
3397 
3398 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
3399 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
3400 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
3401 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
3402 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
3403 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
3404 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
3405 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
3406 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
3407 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
3408 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
3409 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
3410 
3411 	retval = mpi3mr_issue_event_notification(mrioc);
3412 	if (retval)
3413 		ioc_err(mrioc, "failed to issue event notification %d\n",
3414 		    retval);
3415 	return retval;
3416 }
3417 
3418 /**
3419  * mpi3mr_init_ioc - Initialize the controller
3420  * @mrioc: Adapter instance reference
3421  * @init_type: Flag to indicate is the init_type
3422  *
3423  * This the controller initialization routine, executed either
3424  * after soft reset or from pci probe callback.
3425  * Setup the required resources, memory map the controller
3426  * registers, create admin and operational reply queue pairs,
3427  * allocate required memory for reply pool, sense buffer pool,
3428  * issue IOC init request to the firmware, unmask the events and
3429  * issue port enable to discover SAS/SATA/NVMe devies and RAID
3430  * volumes.
3431  *
3432  * Return: 0 on success and non-zero on failure.
3433  */
3434 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
3435 {
3436 	int retval = 0;
3437 	u8 retry = 0;
3438 	struct mpi3_ioc_facts_data facts_data;
3439 
3440 retry_init:
3441 	retval = mpi3mr_bring_ioc_ready(mrioc);
3442 	if (retval) {
3443 		ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
3444 		    retval);
3445 		goto out_failed_noretry;
3446 	}
3447 
3448 	retval = mpi3mr_setup_isr(mrioc, 1);
3449 	if (retval) {
3450 		ioc_err(mrioc, "Failed to setup ISR error %d\n",
3451 		    retval);
3452 		goto out_failed_noretry;
3453 	}
3454 
3455 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
3456 	if (retval) {
3457 		ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
3458 		    retval);
3459 		goto out_failed;
3460 	}
3461 
3462 	mpi3mr_process_factsdata(mrioc, &facts_data);
3463 
3464 	retval = mpi3mr_check_reset_dma_mask(mrioc);
3465 	if (retval) {
3466 		ioc_err(mrioc, "Resetting dma mask failed %d\n",
3467 		    retval);
3468 		goto out_failed_noretry;
3469 	}
3470 
3471 	mpi3mr_print_ioc_info(mrioc);
3472 
3473 	retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
3474 	if (retval) {
3475 		ioc_err(mrioc,
3476 		    "%s :Failed to allocated reply sense buffers %d\n",
3477 		    __func__, retval);
3478 		goto out_failed_noretry;
3479 	}
3480 
3481 	retval = mpi3mr_alloc_chain_bufs(mrioc);
3482 	if (retval) {
3483 		ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
3484 		    retval);
3485 		goto out_failed_noretry;
3486 	}
3487 
3488 	retval = mpi3mr_issue_iocinit(mrioc);
3489 	if (retval) {
3490 		ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
3491 		    retval);
3492 		goto out_failed;
3493 	}
3494 
3495 	retval = mpi3mr_print_pkg_ver(mrioc);
3496 	if (retval) {
3497 		ioc_err(mrioc, "failed to get package version\n");
3498 		goto out_failed;
3499 	}
3500 
3501 	retval = mpi3mr_setup_isr(mrioc, 0);
3502 	if (retval) {
3503 		ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
3504 		    retval);
3505 		goto out_failed_noretry;
3506 	}
3507 
3508 	retval = mpi3mr_create_op_queues(mrioc);
3509 	if (retval) {
3510 		ioc_err(mrioc, "Failed to create OpQueues error %d\n",
3511 		    retval);
3512 		goto out_failed;
3513 	}
3514 
3515 	retval = mpi3mr_enable_events(mrioc);
3516 	if (retval) {
3517 		ioc_err(mrioc, "failed to enable events %d\n",
3518 		    retval);
3519 		goto out_failed;
3520 	}
3521 
3522 	ioc_info(mrioc, "controller initialization completed successfully\n");
3523 	return retval;
3524 out_failed:
3525 	if (retry < 2) {
3526 		retry++;
3527 		ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
3528 		    retry);
3529 		mpi3mr_memset_buffers(mrioc);
3530 		goto retry_init;
3531 	}
3532 out_failed_noretry:
3533 	ioc_err(mrioc, "controller initialization failed\n");
3534 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
3535 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
3536 	mrioc->unrecoverable = 1;
3537 	return retval;
3538 }
3539 
3540 /**
3541  * mpi3mr_reinit_ioc - Re-Initialize the controller
3542  * @mrioc: Adapter instance reference
3543  * @is_resume: Called from resume or reset path
3544  *
3545  * This the controller re-initialization routine, executed from
3546  * the soft reset handler or resume callback. Creates
3547  * operational reply queue pairs, allocate required memory for
3548  * reply pool, sense buffer pool, issue IOC init request to the
3549  * firmware, unmask the events and issue port enable to discover
3550  * SAS/SATA/NVMe devices and RAID volumes.
3551  *
3552  * Return: 0 on success and non-zero on failure.
3553  */
3554 int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
3555 {
3556 	int retval = 0;
3557 	u8 retry = 0;
3558 	struct mpi3_ioc_facts_data facts_data;
3559 
3560 retry_init:
3561 	dprint_reset(mrioc, "bringing up the controller to ready state\n");
3562 	retval = mpi3mr_bring_ioc_ready(mrioc);
3563 	if (retval) {
3564 		ioc_err(mrioc, "failed to bring to ready state\n");
3565 		goto out_failed_noretry;
3566 	}
3567 
3568 	if (is_resume) {
3569 		dprint_reset(mrioc, "setting up single ISR\n");
3570 		retval = mpi3mr_setup_isr(mrioc, 1);
3571 		if (retval) {
3572 			ioc_err(mrioc, "failed to setup ISR\n");
3573 			goto out_failed_noretry;
3574 		}
3575 	} else
3576 		mpi3mr_ioc_enable_intr(mrioc);
3577 
3578 	dprint_reset(mrioc, "getting ioc_facts\n");
3579 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
3580 	if (retval) {
3581 		ioc_err(mrioc, "failed to get ioc_facts\n");
3582 		goto out_failed;
3583 	}
3584 
3585 	mpi3mr_process_factsdata(mrioc, &facts_data);
3586 
3587 	mpi3mr_print_ioc_info(mrioc);
3588 
3589 	dprint_reset(mrioc, "sending ioc_init\n");
3590 	retval = mpi3mr_issue_iocinit(mrioc);
3591 	if (retval) {
3592 		ioc_err(mrioc, "failed to send ioc_init\n");
3593 		goto out_failed;
3594 	}
3595 
3596 	dprint_reset(mrioc, "getting package version\n");
3597 	retval = mpi3mr_print_pkg_ver(mrioc);
3598 	if (retval) {
3599 		ioc_err(mrioc, "failed to get package version\n");
3600 		goto out_failed;
3601 	}
3602 
3603 	if (is_resume) {
3604 		dprint_reset(mrioc, "setting up multiple ISR\n");
3605 		retval = mpi3mr_setup_isr(mrioc, 0);
3606 		if (retval) {
3607 			ioc_err(mrioc, "failed to re-setup ISR\n");
3608 			goto out_failed_noretry;
3609 		}
3610 	}
3611 
3612 	dprint_reset(mrioc, "creating operational queue pairs\n");
3613 	retval = mpi3mr_create_op_queues(mrioc);
3614 	if (retval) {
3615 		ioc_err(mrioc, "failed to create operational queue pairs\n");
3616 		goto out_failed;
3617 	}
3618 
3619 	if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
3620 		ioc_err(mrioc,
3621 		    "cannot create minimum number of operatioanl queues expected:%d created:%d\n",
3622 		    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
3623 		goto out_failed_noretry;
3624 	}
3625 
3626 	dprint_reset(mrioc, "enabling events\n");
3627 	retval = mpi3mr_enable_events(mrioc);
3628 	if (retval) {
3629 		ioc_err(mrioc, "failed to enable events\n");
3630 		goto out_failed;
3631 	}
3632 
3633 	ioc_info(mrioc, "sending port enable\n");
3634 	retval = mpi3mr_issue_port_enable(mrioc, 0);
3635 	if (retval) {
3636 		ioc_err(mrioc, "failed to issue port enable\n");
3637 		goto out_failed;
3638 	}
3639 
3640 	ioc_info(mrioc, "controller %s completed successfully\n",
3641 	    (is_resume)?"resume":"re-initialization");
3642 	return retval;
3643 out_failed:
3644 	if (retry < 2) {
3645 		retry++;
3646 		ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
3647 		    (is_resume)?"resume":"re-initialization", retry);
3648 		mpi3mr_memset_buffers(mrioc);
3649 		goto retry_init;
3650 	}
3651 out_failed_noretry:
3652 	ioc_err(mrioc, "controller %s is failed\n",
3653 	    (is_resume)?"resume":"re-initialization");
3654 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
3655 	    MPI3MR_RESET_FROM_CTLR_CLEANUP);
3656 	mrioc->unrecoverable = 1;
3657 	return retval;
3658 }
3659 
3660 /**
3661  * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
3662  *					segments
3663  * @mrioc: Adapter instance reference
3664  * @qidx: Operational reply queue index
3665  *
3666  * Return: Nothing.
3667  */
3668 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
3669 {
3670 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
3671 	struct segments *segments;
3672 	int i, size;
3673 
3674 	if (!op_reply_q->q_segments)
3675 		return;
3676 
3677 	size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
3678 	segments = op_reply_q->q_segments;
3679 	for (i = 0; i < op_reply_q->num_segments; i++)
3680 		memset(segments[i].segment, 0, size);
3681 }
3682 
3683 /**
3684  * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
3685  *					segments
3686  * @mrioc: Adapter instance reference
3687  * @qidx: Operational request queue index
3688  *
3689  * Return: Nothing.
3690  */
3691 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
3692 {
3693 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
3694 	struct segments *segments;
3695 	int i, size;
3696 
3697 	if (!op_req_q->q_segments)
3698 		return;
3699 
3700 	size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
3701 	segments = op_req_q->q_segments;
3702 	for (i = 0; i < op_req_q->num_segments; i++)
3703 		memset(segments[i].segment, 0, size);
3704 }
3705 
3706 /**
3707  * mpi3mr_memset_buffers - memset memory for a controller
3708  * @mrioc: Adapter instance reference
3709  *
3710  * clear all the memory allocated for a controller, typically
3711  * called post reset to reuse the memory allocated during the
3712  * controller init.
3713  *
3714  * Return: Nothing.
3715  */
3716 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
3717 {
3718 	u16 i;
3719 
3720 	mrioc->change_count = 0;
3721 	if (mrioc->admin_req_base)
3722 		memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
3723 	if (mrioc->admin_reply_base)
3724 		memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
3725 
3726 	if (mrioc->init_cmds.reply) {
3727 		memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
3728 		memset(mrioc->host_tm_cmds.reply, 0,
3729 		    sizeof(*mrioc->host_tm_cmds.reply));
3730 		for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
3731 			memset(mrioc->dev_rmhs_cmds[i].reply, 0,
3732 			    sizeof(*mrioc->dev_rmhs_cmds[i].reply));
3733 		memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
3734 		memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
3735 	}
3736 
3737 	for (i = 0; i < mrioc->num_queues; i++) {
3738 		mrioc->op_reply_qinfo[i].qid = 0;
3739 		mrioc->op_reply_qinfo[i].ci = 0;
3740 		mrioc->op_reply_qinfo[i].num_replies = 0;
3741 		mrioc->op_reply_qinfo[i].ephase = 0;
3742 		atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
3743 		atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
3744 		mpi3mr_memset_op_reply_q_buffers(mrioc, i);
3745 
3746 		mrioc->req_qinfo[i].ci = 0;
3747 		mrioc->req_qinfo[i].pi = 0;
3748 		mrioc->req_qinfo[i].num_requests = 0;
3749 		mrioc->req_qinfo[i].qid = 0;
3750 		mrioc->req_qinfo[i].reply_qid = 0;
3751 		spin_lock_init(&mrioc->req_qinfo[i].q_lock);
3752 		mpi3mr_memset_op_req_q_buffers(mrioc, i);
3753 	}
3754 }
3755 
3756 /**
3757  * mpi3mr_free_mem - Free memory allocated for a controller
3758  * @mrioc: Adapter instance reference
3759  *
3760  * Free all the memory allocated for a controller.
3761  *
3762  * Return: Nothing.
3763  */
3764 void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
3765 {
3766 	u16 i;
3767 	struct mpi3mr_intr_info *intr_info;
3768 
3769 	if (mrioc->sense_buf_pool) {
3770 		if (mrioc->sense_buf)
3771 			dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
3772 			    mrioc->sense_buf_dma);
3773 		dma_pool_destroy(mrioc->sense_buf_pool);
3774 		mrioc->sense_buf = NULL;
3775 		mrioc->sense_buf_pool = NULL;
3776 	}
3777 	if (mrioc->sense_buf_q_pool) {
3778 		if (mrioc->sense_buf_q)
3779 			dma_pool_free(mrioc->sense_buf_q_pool,
3780 			    mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
3781 		dma_pool_destroy(mrioc->sense_buf_q_pool);
3782 		mrioc->sense_buf_q = NULL;
3783 		mrioc->sense_buf_q_pool = NULL;
3784 	}
3785 
3786 	if (mrioc->reply_buf_pool) {
3787 		if (mrioc->reply_buf)
3788 			dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
3789 			    mrioc->reply_buf_dma);
3790 		dma_pool_destroy(mrioc->reply_buf_pool);
3791 		mrioc->reply_buf = NULL;
3792 		mrioc->reply_buf_pool = NULL;
3793 	}
3794 	if (mrioc->reply_free_q_pool) {
3795 		if (mrioc->reply_free_q)
3796 			dma_pool_free(mrioc->reply_free_q_pool,
3797 			    mrioc->reply_free_q, mrioc->reply_free_q_dma);
3798 		dma_pool_destroy(mrioc->reply_free_q_pool);
3799 		mrioc->reply_free_q = NULL;
3800 		mrioc->reply_free_q_pool = NULL;
3801 	}
3802 
3803 	for (i = 0; i < mrioc->num_op_req_q; i++)
3804 		mpi3mr_free_op_req_q_segments(mrioc, i);
3805 
3806 	for (i = 0; i < mrioc->num_op_reply_q; i++)
3807 		mpi3mr_free_op_reply_q_segments(mrioc, i);
3808 
3809 	for (i = 0; i < mrioc->intr_info_count; i++) {
3810 		intr_info = mrioc->intr_info + i;
3811 		intr_info->op_reply_q = NULL;
3812 	}
3813 
3814 	kfree(mrioc->req_qinfo);
3815 	mrioc->req_qinfo = NULL;
3816 	mrioc->num_op_req_q = 0;
3817 
3818 	kfree(mrioc->op_reply_qinfo);
3819 	mrioc->op_reply_qinfo = NULL;
3820 	mrioc->num_op_reply_q = 0;
3821 
3822 	kfree(mrioc->init_cmds.reply);
3823 	mrioc->init_cmds.reply = NULL;
3824 
3825 	kfree(mrioc->host_tm_cmds.reply);
3826 	mrioc->host_tm_cmds.reply = NULL;
3827 
3828 	kfree(mrioc->removepend_bitmap);
3829 	mrioc->removepend_bitmap = NULL;
3830 
3831 	kfree(mrioc->devrem_bitmap);
3832 	mrioc->devrem_bitmap = NULL;
3833 
3834 	kfree(mrioc->chain_bitmap);
3835 	mrioc->chain_bitmap = NULL;
3836 
3837 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3838 		kfree(mrioc->dev_rmhs_cmds[i].reply);
3839 		mrioc->dev_rmhs_cmds[i].reply = NULL;
3840 	}
3841 
3842 	if (mrioc->chain_buf_pool) {
3843 		for (i = 0; i < mrioc->chain_buf_count; i++) {
3844 			if (mrioc->chain_sgl_list[i].addr) {
3845 				dma_pool_free(mrioc->chain_buf_pool,
3846 				    mrioc->chain_sgl_list[i].addr,
3847 				    mrioc->chain_sgl_list[i].dma_addr);
3848 				mrioc->chain_sgl_list[i].addr = NULL;
3849 			}
3850 		}
3851 		dma_pool_destroy(mrioc->chain_buf_pool);
3852 		mrioc->chain_buf_pool = NULL;
3853 	}
3854 
3855 	kfree(mrioc->chain_sgl_list);
3856 	mrioc->chain_sgl_list = NULL;
3857 
3858 	if (mrioc->admin_reply_base) {
3859 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
3860 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
3861 		mrioc->admin_reply_base = NULL;
3862 	}
3863 	if (mrioc->admin_req_base) {
3864 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
3865 		    mrioc->admin_req_base, mrioc->admin_req_dma);
3866 		mrioc->admin_req_base = NULL;
3867 	}
3868 }
3869 
3870 /**
3871  * mpi3mr_issue_ioc_shutdown - shutdown controller
3872  * @mrioc: Adapter instance reference
3873  *
3874  * Send shutodwn notification to the controller and wait for the
3875  * shutdown_timeout for it to be completed.
3876  *
3877  * Return: Nothing.
3878  */
3879 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
3880 {
3881 	u32 ioc_config, ioc_status;
3882 	u8 retval = 1;
3883 	u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
3884 
3885 	ioc_info(mrioc, "Issuing shutdown Notification\n");
3886 	if (mrioc->unrecoverable) {
3887 		ioc_warn(mrioc,
3888 		    "IOC is unrecoverable shutdown is not issued\n");
3889 		return;
3890 	}
3891 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3892 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
3893 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
3894 		ioc_info(mrioc, "shutdown already in progress\n");
3895 		return;
3896 	}
3897 
3898 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3899 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
3900 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
3901 
3902 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
3903 
3904 	if (mrioc->facts.shutdown_timeout)
3905 		timeout = mrioc->facts.shutdown_timeout * 10;
3906 
3907 	do {
3908 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3909 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
3910 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
3911 			retval = 0;
3912 			break;
3913 		}
3914 		msleep(100);
3915 	} while (--timeout);
3916 
3917 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3918 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3919 
3920 	if (retval) {
3921 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
3922 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
3923 			ioc_warn(mrioc,
3924 			    "shutdown still in progress after timeout\n");
3925 	}
3926 
3927 	ioc_info(mrioc,
3928 	    "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
3929 	    (!retval) ? "successful" : "failed", ioc_status,
3930 	    ioc_config);
3931 }
3932 
3933 /**
3934  * mpi3mr_cleanup_ioc - Cleanup controller
3935  * @mrioc: Adapter instance reference
3936 
3937  * controller cleanup handler, Message unit reset or soft reset
3938  * and shutdown notification is issued to the controller.
3939  *
3940  * Return: Nothing.
3941  */
3942 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
3943 {
3944 	enum mpi3mr_iocstate ioc_state;
3945 
3946 	dprint_exit(mrioc, "cleaning up the controller\n");
3947 	mpi3mr_ioc_disable_intr(mrioc);
3948 
3949 	ioc_state = mpi3mr_get_iocstate(mrioc);
3950 
3951 	if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) &&
3952 	    (ioc_state == MRIOC_STATE_READY)) {
3953 		if (mpi3mr_issue_and_process_mur(mrioc,
3954 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
3955 			mpi3mr_issue_reset(mrioc,
3956 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
3957 			    MPI3MR_RESET_FROM_MUR_FAILURE);
3958 		mpi3mr_issue_ioc_shutdown(mrioc);
3959 	}
3960 	dprint_exit(mrioc, "controller cleanup completed\n");
3961 }
3962 
3963 /**
3964  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
3965  * @mrioc: Adapter instance reference
3966  * @cmdptr: Internal command tracker
3967  *
3968  * Complete an internal driver commands with state indicating it
3969  * is completed due to reset.
3970  *
3971  * Return: Nothing.
3972  */
3973 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
3974 	struct mpi3mr_drv_cmd *cmdptr)
3975 {
3976 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
3977 		cmdptr->state |= MPI3MR_CMD_RESET;
3978 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
3979 		if (cmdptr->is_waiting) {
3980 			complete(&cmdptr->done);
3981 			cmdptr->is_waiting = 0;
3982 		} else if (cmdptr->callback)
3983 			cmdptr->callback(mrioc, cmdptr);
3984 	}
3985 }
3986 
3987 /**
3988  * mpi3mr_flush_drv_cmds - Flush internaldriver commands
3989  * @mrioc: Adapter instance reference
3990  *
3991  * Flush all internal driver commands post reset
3992  *
3993  * Return: Nothing.
3994  */
3995 static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
3996 {
3997 	struct mpi3mr_drv_cmd *cmdptr;
3998 	u8 i;
3999 
4000 	cmdptr = &mrioc->init_cmds;
4001 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4002 	cmdptr = &mrioc->host_tm_cmds;
4003 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4004 
4005 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
4006 		cmdptr = &mrioc->dev_rmhs_cmds[i];
4007 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
4008 	}
4009 }
4010 
4011 /**
4012  * mpi3mr_soft_reset_handler - Reset the controller
4013  * @mrioc: Adapter instance reference
4014  * @reset_reason: Reset reason code
4015  * @snapdump: Flag to generate snapdump in firmware or not
4016  *
4017  * This is an handler for recovering controller by issuing soft
4018  * reset are diag fault reset.  This is a blocking function and
4019  * when one reset is executed if any other resets they will be
4020  * blocked. All IOCTLs/IO will be blocked during the reset. If
4021  * controller reset is successful then the controller will be
4022  * reinitalized, otherwise the controller will be marked as not
4023  * recoverable
4024  *
4025  * In snapdump bit is set, the controller is issued with diag
4026  * fault reset so that the firmware can create a snap dump and
4027  * post that the firmware will result in F000 fault and the
4028  * driver will issue soft reset to recover from that.
4029  *
4030  * Return: 0 on success, non-zero on failure.
4031  */
4032 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
4033 	u32 reset_reason, u8 snapdump)
4034 {
4035 	int retval = 0, i;
4036 	unsigned long flags;
4037 	u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
4038 
4039 	/* Block the reset handler until diag save in progress*/
4040 	dprint_reset(mrioc,
4041 	    "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
4042 	    mrioc->diagsave_timeout);
4043 	while (mrioc->diagsave_timeout)
4044 		ssleep(1);
4045 	/*
4046 	 * Block new resets until the currently executing one is finished and
4047 	 * return the status of the existing reset for all blocked resets
4048 	 */
4049 	dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
4050 	if (!mutex_trylock(&mrioc->reset_mutex)) {
4051 		ioc_info(mrioc,
4052 		    "controller reset triggered by %s is blocked due to another reset in progress\n",
4053 		    mpi3mr_reset_rc_name(reset_reason));
4054 		do {
4055 			ssleep(1);
4056 		} while (mrioc->reset_in_progress == 1);
4057 		ioc_info(mrioc,
4058 		    "returning previous reset result(%d) for the reset triggered by %s\n",
4059 		    mrioc->prev_reset_result,
4060 		    mpi3mr_reset_rc_name(reset_reason));
4061 		return mrioc->prev_reset_result;
4062 	}
4063 	ioc_info(mrioc, "controller reset is triggered by %s\n",
4064 	    mpi3mr_reset_rc_name(reset_reason));
4065 
4066 	mrioc->reset_in_progress = 1;
4067 	mrioc->prev_reset_result = -1;
4068 
4069 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
4070 	    (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
4071 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
4072 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4073 			mrioc->event_masks[i] = -1;
4074 
4075 		dprint_reset(mrioc, "soft_reset_handler: masking events\n");
4076 		mpi3mr_issue_event_notification(mrioc);
4077 	}
4078 
4079 	mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
4080 
4081 	mpi3mr_ioc_disable_intr(mrioc);
4082 
4083 	if (snapdump) {
4084 		mpi3mr_set_diagsave(mrioc);
4085 		retval = mpi3mr_issue_reset(mrioc,
4086 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
4087 		if (!retval) {
4088 			do {
4089 				host_diagnostic =
4090 				    readl(&mrioc->sysif_regs->host_diagnostic);
4091 				if (!(host_diagnostic &
4092 				    MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
4093 					break;
4094 				msleep(100);
4095 			} while (--timeout);
4096 		}
4097 	}
4098 
4099 	retval = mpi3mr_issue_reset(mrioc,
4100 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
4101 	if (retval) {
4102 		ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
4103 		goto out;
4104 	}
4105 
4106 	mpi3mr_flush_delayed_rmhs_list(mrioc);
4107 	mpi3mr_flush_drv_cmds(mrioc);
4108 	memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
4109 	memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
4110 	mpi3mr_cleanup_fwevt_list(mrioc);
4111 	mpi3mr_flush_host_io(mrioc);
4112 	mpi3mr_invalidate_devhandles(mrioc);
4113 	mpi3mr_memset_buffers(mrioc);
4114 	retval = mpi3mr_reinit_ioc(mrioc, 0);
4115 	if (retval) {
4116 		pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
4117 		    mrioc->name, reset_reason);
4118 		goto out;
4119 	}
4120 	ssleep(10);
4121 
4122 out:
4123 	if (!retval) {
4124 		mrioc->diagsave_timeout = 0;
4125 		mrioc->reset_in_progress = 0;
4126 		mpi3mr_rfresh_tgtdevs(mrioc);
4127 		mrioc->ts_update_counter = 0;
4128 		spin_lock_irqsave(&mrioc->watchdog_lock, flags);
4129 		if (mrioc->watchdog_work_q)
4130 			queue_delayed_work(mrioc->watchdog_work_q,
4131 			    &mrioc->watchdog_work,
4132 			    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
4133 		spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
4134 	} else {
4135 		mpi3mr_issue_reset(mrioc,
4136 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
4137 		mrioc->unrecoverable = 1;
4138 		mrioc->reset_in_progress = 0;
4139 		retval = -1;
4140 	}
4141 	mrioc->prev_reset_result = retval;
4142 	mutex_unlock(&mrioc->reset_mutex);
4143 	ioc_info(mrioc, "controller reset is %s\n",
4144 	    ((retval == 0) ? "successful" : "failed"));
4145 	return retval;
4146 }
4147