xref: /openbmc/linux/drivers/scsi/mpi3mr/mpi3mr_fw.c (revision 59bd9cfe)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2021 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12 
13 static int
14 mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
15 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
16 
17 #if defined(writeq) && defined(CONFIG_64BIT)
18 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
19 {
20 	writeq(b, addr);
21 }
22 #else
23 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
24 {
25 	__u64 data_out = b;
26 
27 	writel((u32)(data_out), addr);
28 	writel((u32)(data_out >> 32), (addr + 4));
29 }
30 #endif
31 
32 static inline bool
33 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
34 {
35 	u16 pi, ci, max_entries;
36 	bool is_qfull = false;
37 
38 	pi = op_req_q->pi;
39 	ci = READ_ONCE(op_req_q->ci);
40 	max_entries = op_req_q->num_requests;
41 
42 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
43 		is_qfull = true;
44 
45 	return is_qfull;
46 }
47 
48 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
49 {
50 	u16 i, max_vectors;
51 
52 	max_vectors = mrioc->intr_info_count;
53 
54 	for (i = 0; i < max_vectors; i++)
55 		synchronize_irq(pci_irq_vector(mrioc->pdev, i));
56 }
57 
58 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
59 {
60 	mrioc->intr_enabled = 0;
61 	mpi3mr_sync_irqs(mrioc);
62 }
63 
64 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
65 {
66 	mrioc->intr_enabled = 1;
67 }
68 
69 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
70 {
71 	u16 i;
72 
73 	mpi3mr_ioc_disable_intr(mrioc);
74 
75 	if (!mrioc->intr_info)
76 		return;
77 
78 	for (i = 0; i < mrioc->intr_info_count; i++)
79 		free_irq(pci_irq_vector(mrioc->pdev, i),
80 		    (mrioc->intr_info + i));
81 
82 	kfree(mrioc->intr_info);
83 	mrioc->intr_info = NULL;
84 	mrioc->intr_info_count = 0;
85 	pci_free_irq_vectors(mrioc->pdev);
86 }
87 
88 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
89 	dma_addr_t dma_addr)
90 {
91 	struct mpi3_sge_common *sgel = paddr;
92 
93 	sgel->flags = flags;
94 	sgel->length = cpu_to_le32(length);
95 	sgel->address = cpu_to_le64(dma_addr);
96 }
97 
98 void mpi3mr_build_zero_len_sge(void *paddr)
99 {
100 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
101 
102 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
103 }
104 
105 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
106 	dma_addr_t phys_addr)
107 {
108 	if (!phys_addr)
109 		return NULL;
110 
111 	if ((phys_addr < mrioc->reply_buf_dma) ||
112 	    (phys_addr > mrioc->reply_buf_dma_max_address))
113 		return NULL;
114 
115 	return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
116 }
117 
118 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
119 	dma_addr_t phys_addr)
120 {
121 	if (!phys_addr)
122 		return NULL;
123 
124 	return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
125 }
126 
127 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
128 	u64 reply_dma)
129 {
130 	u32 old_idx = 0;
131 	unsigned long flags;
132 
133 	spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
134 	old_idx  =  mrioc->reply_free_queue_host_index;
135 	mrioc->reply_free_queue_host_index = (
136 	    (mrioc->reply_free_queue_host_index ==
137 	    (mrioc->reply_free_qsz - 1)) ? 0 :
138 	    (mrioc->reply_free_queue_host_index + 1));
139 	mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
140 	writel(mrioc->reply_free_queue_host_index,
141 	    &mrioc->sysif_regs->reply_free_host_index);
142 	spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
143 }
144 
145 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
146 	u64 sense_buf_dma)
147 {
148 	u32 old_idx = 0;
149 	unsigned long flags;
150 
151 	spin_lock_irqsave(&mrioc->sbq_lock, flags);
152 	old_idx  =  mrioc->sbq_host_index;
153 	mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
154 	    (mrioc->sense_buf_q_sz - 1)) ? 0 :
155 	    (mrioc->sbq_host_index + 1));
156 	mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
157 	writel(mrioc->sbq_host_index,
158 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
159 	spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
160 }
161 
162 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
163 	struct mpi3_event_notification_reply *event_reply)
164 {
165 	char *desc = NULL;
166 	u16 event;
167 
168 	event = event_reply->event;
169 
170 	switch (event) {
171 	case MPI3_EVENT_LOG_DATA:
172 		desc = "Log Data";
173 		break;
174 	case MPI3_EVENT_CHANGE:
175 		desc = "Event Change";
176 		break;
177 	case MPI3_EVENT_GPIO_INTERRUPT:
178 		desc = "GPIO Interrupt";
179 		break;
180 	case MPI3_EVENT_TEMP_THRESHOLD:
181 		desc = "Temperature Threshold";
182 		break;
183 	case MPI3_EVENT_CABLE_MGMT:
184 		desc = "Cable Management";
185 		break;
186 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
187 		desc = "Energy Pack Change";
188 		break;
189 	case MPI3_EVENT_DEVICE_ADDED:
190 	{
191 		struct mpi3_device_page0 *event_data =
192 		    (struct mpi3_device_page0 *)event_reply->event_data;
193 		ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
194 		    event_data->dev_handle, event_data->device_form);
195 		return;
196 	}
197 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
198 	{
199 		struct mpi3_device_page0 *event_data =
200 		    (struct mpi3_device_page0 *)event_reply->event_data;
201 		ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
202 		    event_data->dev_handle, event_data->device_form);
203 		return;
204 	}
205 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
206 	{
207 		struct mpi3_event_data_device_status_change *event_data =
208 		    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
209 		ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
210 		    event_data->dev_handle, event_data->reason_code);
211 		return;
212 	}
213 	case MPI3_EVENT_SAS_DISCOVERY:
214 	{
215 		struct mpi3_event_data_sas_discovery *event_data =
216 		    (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
217 		ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
218 		    (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
219 		    "start" : "stop",
220 		    le32_to_cpu(event_data->discovery_status));
221 		return;
222 	}
223 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
224 		desc = "SAS Broadcast Primitive";
225 		break;
226 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
227 		desc = "SAS Notify Primitive";
228 		break;
229 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
230 		desc = "SAS Init Device Status Change";
231 		break;
232 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
233 		desc = "SAS Init Table Overflow";
234 		break;
235 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
236 		desc = "SAS Topology Change List";
237 		break;
238 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
239 		desc = "Enclosure Device Status Change";
240 		break;
241 	case MPI3_EVENT_HARD_RESET_RECEIVED:
242 		desc = "Hard Reset Received";
243 		break;
244 	case MPI3_EVENT_SAS_PHY_COUNTER:
245 		desc = "SAS PHY Counter";
246 		break;
247 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
248 		desc = "SAS Device Discovery Error";
249 		break;
250 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
251 		desc = "PCIE Topology Change List";
252 		break;
253 	case MPI3_EVENT_PCIE_ENUMERATION:
254 	{
255 		struct mpi3_event_data_pcie_enumeration *event_data =
256 		    (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
257 		ioc_info(mrioc, "PCIE Enumeration: (%s)",
258 		    (event_data->reason_code ==
259 		    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
260 		if (event_data->enumeration_status)
261 			ioc_info(mrioc, "enumeration_status(0x%08x)\n",
262 			    le32_to_cpu(event_data->enumeration_status));
263 		return;
264 	}
265 	case MPI3_EVENT_PREPARE_FOR_RESET:
266 		desc = "Prepare For Reset";
267 		break;
268 	}
269 
270 	if (!desc)
271 		return;
272 
273 	ioc_info(mrioc, "%s\n", desc);
274 }
275 
276 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
277 	struct mpi3_default_reply *def_reply)
278 {
279 	struct mpi3_event_notification_reply *event_reply =
280 	    (struct mpi3_event_notification_reply *)def_reply;
281 
282 	mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
283 	mpi3mr_print_event_data(mrioc, event_reply);
284 	mpi3mr_os_handle_events(mrioc, event_reply);
285 }
286 
287 static struct mpi3mr_drv_cmd *
288 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
289 	struct mpi3_default_reply *def_reply)
290 {
291 	u16 idx;
292 
293 	switch (host_tag) {
294 	case MPI3MR_HOSTTAG_INITCMDS:
295 		return &mrioc->init_cmds;
296 	case MPI3MR_HOSTTAG_BLK_TMS:
297 		return &mrioc->host_tm_cmds;
298 	case MPI3MR_HOSTTAG_INVALID:
299 		if (def_reply && def_reply->function ==
300 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
301 			mpi3mr_handle_events(mrioc, def_reply);
302 		return NULL;
303 	default:
304 		break;
305 	}
306 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
307 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
308 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
309 		return &mrioc->dev_rmhs_cmds[idx];
310 	}
311 
312 	return NULL;
313 }
314 
315 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
316 	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
317 {
318 	u16 reply_desc_type, host_tag = 0;
319 	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
320 	u32 ioc_loginfo = 0;
321 	struct mpi3_status_reply_descriptor *status_desc;
322 	struct mpi3_address_reply_descriptor *addr_desc;
323 	struct mpi3_success_reply_descriptor *success_desc;
324 	struct mpi3_default_reply *def_reply = NULL;
325 	struct mpi3mr_drv_cmd *cmdptr = NULL;
326 	struct mpi3_scsi_io_reply *scsi_reply;
327 	u8 *sense_buf = NULL;
328 
329 	*reply_dma = 0;
330 	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
331 	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
332 	switch (reply_desc_type) {
333 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
334 		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
335 		host_tag = le16_to_cpu(status_desc->host_tag);
336 		ioc_status = le16_to_cpu(status_desc->ioc_status);
337 		if (ioc_status &
338 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
339 			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
340 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
341 		break;
342 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
343 		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
344 		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
345 		def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
346 		if (!def_reply)
347 			goto out;
348 		host_tag = le16_to_cpu(def_reply->host_tag);
349 		ioc_status = le16_to_cpu(def_reply->ioc_status);
350 		if (ioc_status &
351 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
352 			ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
353 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
354 		if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
355 			scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
356 			sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
357 			    le64_to_cpu(scsi_reply->sense_data_buffer_address));
358 		}
359 		break;
360 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
361 		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
362 		host_tag = le16_to_cpu(success_desc->host_tag);
363 		break;
364 	default:
365 		break;
366 	}
367 
368 	cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
369 	if (cmdptr) {
370 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
371 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
372 			cmdptr->ioc_loginfo = ioc_loginfo;
373 			cmdptr->ioc_status = ioc_status;
374 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
375 			if (def_reply) {
376 				cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
377 				memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
378 				    mrioc->facts.reply_sz);
379 			}
380 			if (cmdptr->is_waiting) {
381 				complete(&cmdptr->done);
382 				cmdptr->is_waiting = 0;
383 			} else if (cmdptr->callback)
384 				cmdptr->callback(mrioc, cmdptr);
385 		}
386 	}
387 out:
388 	if (sense_buf)
389 		mpi3mr_repost_sense_buf(mrioc,
390 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
391 }
392 
393 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
394 {
395 	u32 exp_phase = mrioc->admin_reply_ephase;
396 	u32 admin_reply_ci = mrioc->admin_reply_ci;
397 	u32 num_admin_replies = 0;
398 	u64 reply_dma = 0;
399 	struct mpi3_default_reply_descriptor *reply_desc;
400 
401 	reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
402 	    admin_reply_ci;
403 
404 	if ((le16_to_cpu(reply_desc->reply_flags) &
405 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
406 		return 0;
407 
408 	do {
409 		mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
410 		mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
411 		if (reply_dma)
412 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
413 		num_admin_replies++;
414 		if (++admin_reply_ci == mrioc->num_admin_replies) {
415 			admin_reply_ci = 0;
416 			exp_phase ^= 1;
417 		}
418 		reply_desc =
419 		    (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
420 		    admin_reply_ci;
421 		if ((le16_to_cpu(reply_desc->reply_flags) &
422 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
423 			break;
424 	} while (1);
425 
426 	writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
427 	mrioc->admin_reply_ci = admin_reply_ci;
428 	mrioc->admin_reply_ephase = exp_phase;
429 
430 	return num_admin_replies;
431 }
432 
433 /**
434  * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
435  *	queue's consumer index from operational reply descriptor queue.
436  * @op_reply_q: op_reply_qinfo object
437  * @reply_ci: operational reply descriptor's queue consumer index
438  *
439  * Returns reply descriptor frame address
440  */
441 static inline struct mpi3_default_reply_descriptor *
442 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
443 {
444 	void *segment_base_addr;
445 	struct segments *segments = op_reply_q->q_segments;
446 	struct mpi3_default_reply_descriptor *reply_desc = NULL;
447 
448 	segment_base_addr =
449 	    segments[reply_ci / op_reply_q->segment_qd].segment;
450 	reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
451 	    (reply_ci % op_reply_q->segment_qd);
452 	return reply_desc;
453 }
454 
455 static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
456 	struct mpi3mr_intr_info *intr_info)
457 {
458 	struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q;
459 	struct op_req_qinfo *op_req_q;
460 	u32 exp_phase;
461 	u32 reply_ci;
462 	u32 num_op_reply = 0;
463 	u64 reply_dma = 0;
464 	struct mpi3_default_reply_descriptor *reply_desc;
465 	u16 req_q_idx = 0, reply_qidx;
466 
467 	reply_qidx = op_reply_q->qid - 1;
468 
469 	if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
470 		return 0;
471 
472 	exp_phase = op_reply_q->ephase;
473 	reply_ci = op_reply_q->ci;
474 
475 	reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
476 	if ((le16_to_cpu(reply_desc->reply_flags) &
477 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
478 		atomic_dec(&op_reply_q->in_use);
479 		return 0;
480 	}
481 
482 	do {
483 		req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
484 		op_req_q = &mrioc->req_qinfo[req_q_idx];
485 
486 		WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
487 		mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
488 		    reply_qidx);
489 		atomic_dec(&op_reply_q->pend_ios);
490 		if (reply_dma)
491 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
492 		num_op_reply++;
493 
494 		if (++reply_ci == op_reply_q->num_replies) {
495 			reply_ci = 0;
496 			exp_phase ^= 1;
497 		}
498 
499 		reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
500 
501 		if ((le16_to_cpu(reply_desc->reply_flags) &
502 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
503 			break;
504 		/*
505 		 * Exit completion loop to avoid CPU lockup
506 		 * Ensure remaining completion happens from threaded ISR.
507 		 */
508 		if (num_op_reply > mrioc->max_host_ios) {
509 			intr_info->op_reply_q->enable_irq_poll = true;
510 			break;
511 		}
512 
513 	} while (1);
514 
515 	writel(reply_ci,
516 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
517 	op_reply_q->ci = reply_ci;
518 	op_reply_q->ephase = exp_phase;
519 
520 	atomic_dec(&op_reply_q->in_use);
521 	return num_op_reply;
522 }
523 
524 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
525 {
526 	struct mpi3mr_intr_info *intr_info = privdata;
527 	struct mpi3mr_ioc *mrioc;
528 	u16 midx;
529 	u32 num_admin_replies = 0, num_op_reply = 0;
530 
531 	if (!intr_info)
532 		return IRQ_NONE;
533 
534 	mrioc = intr_info->mrioc;
535 
536 	if (!mrioc->intr_enabled)
537 		return IRQ_NONE;
538 
539 	midx = intr_info->msix_index;
540 
541 	if (!midx)
542 		num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
543 	if (intr_info->op_reply_q)
544 		num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info);
545 
546 	if (num_admin_replies || num_op_reply)
547 		return IRQ_HANDLED;
548 	else
549 		return IRQ_NONE;
550 }
551 
552 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
553 {
554 	struct mpi3mr_intr_info *intr_info = privdata;
555 	struct mpi3mr_ioc *mrioc;
556 	u16 midx;
557 	int ret;
558 
559 	if (!intr_info)
560 		return IRQ_NONE;
561 
562 	mrioc = intr_info->mrioc;
563 	midx = intr_info->msix_index;
564 	/* Call primary ISR routine */
565 	ret = mpi3mr_isr_primary(irq, privdata);
566 
567 	/*
568 	 * If more IOs are expected, schedule IRQ polling thread.
569 	 * Otherwise exit from ISR.
570 	 */
571 	if (!intr_info->op_reply_q)
572 		return ret;
573 
574 	if (!intr_info->op_reply_q->enable_irq_poll ||
575 	    !atomic_read(&intr_info->op_reply_q->pend_ios))
576 		return ret;
577 
578 	disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
579 
580 	return IRQ_WAKE_THREAD;
581 }
582 
583 /**
584  * mpi3mr_isr_poll - Reply queue polling routine
585  * @irq: IRQ
586  * @privdata: Interrupt info
587  *
588  * poll for pending I/O completions in a loop until pending I/Os
589  * present or controller queue depth I/Os are processed.
590  *
591  * Return: IRQ_NONE or IRQ_HANDLED
592  */
593 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
594 {
595 	struct mpi3mr_intr_info *intr_info = privdata;
596 	struct mpi3mr_ioc *mrioc;
597 	u16 midx;
598 	u32 num_op_reply = 0;
599 
600 	if (!intr_info || !intr_info->op_reply_q)
601 		return IRQ_NONE;
602 
603 	mrioc = intr_info->mrioc;
604 	midx = intr_info->msix_index;
605 
606 	/* Poll for pending IOs completions */
607 	do {
608 		if (!mrioc->intr_enabled)
609 			break;
610 
611 		if (!midx)
612 			mpi3mr_process_admin_reply_q(mrioc);
613 		if (intr_info->op_reply_q)
614 			num_op_reply +=
615 			    mpi3mr_process_op_reply_q(mrioc, intr_info);
616 
617 		usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep);
618 
619 	} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
620 	    (num_op_reply < mrioc->max_host_ios));
621 
622 	intr_info->op_reply_q->enable_irq_poll = false;
623 	enable_irq(pci_irq_vector(mrioc->pdev, midx));
624 
625 	return IRQ_HANDLED;
626 }
627 
628 /**
629  * mpi3mr_request_irq - Request IRQ and register ISR
630  * @mrioc: Adapter instance reference
631  * @index: IRQ vector index
632  *
633  * Request threaded ISR with primary ISR and secondary
634  *
635  * Return: 0 on success and non zero on failures.
636  */
637 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
638 {
639 	struct pci_dev *pdev = mrioc->pdev;
640 	struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
641 	int retval = 0;
642 
643 	intr_info->mrioc = mrioc;
644 	intr_info->msix_index = index;
645 	intr_info->op_reply_q = NULL;
646 
647 	snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
648 	    mrioc->driver_name, mrioc->id, index);
649 
650 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
651 	    mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
652 	if (retval) {
653 		ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
654 		    intr_info->name, pci_irq_vector(pdev, index));
655 		return retval;
656 	}
657 
658 	return retval;
659 }
660 
661 /**
662  * mpi3mr_setup_isr - Setup ISR for the controller
663  * @mrioc: Adapter instance reference
664  * @setup_one: Request one IRQ or more
665  *
666  * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
667  *
668  * Return: 0 on success and non zero on failures.
669  */
670 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
671 {
672 	unsigned int irq_flags = PCI_IRQ_MSIX;
673 	int max_vectors;
674 	int retval;
675 	int i;
676 	struct irq_affinity desc = { .pre_vectors =  1};
677 
678 	mpi3mr_cleanup_isr(mrioc);
679 
680 	if (setup_one || reset_devices)
681 		max_vectors = 1;
682 	else {
683 		max_vectors =
684 		    min_t(int, mrioc->cpu_count + 1, mrioc->msix_count);
685 
686 		ioc_info(mrioc,
687 		    "MSI-X vectors supported: %d, no of cores: %d,",
688 		    mrioc->msix_count, mrioc->cpu_count);
689 		ioc_info(mrioc,
690 		    "MSI-x vectors requested: %d\n", max_vectors);
691 	}
692 
693 	irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
694 
695 	mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
696 	retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
697 				1, max_vectors, irq_flags, &desc);
698 	if (retval < 0) {
699 		ioc_err(mrioc, "Cannot alloc irq vectors\n");
700 		goto out_failed;
701 	}
702 	if (retval != max_vectors) {
703 		ioc_info(mrioc,
704 		    "allocated vectors (%d) are less than configured (%d)\n",
705 		    retval, max_vectors);
706 		/*
707 		 * If only one MSI-x is allocated, then MSI-x 0 will be shared
708 		 * between Admin queue and operational queue
709 		 */
710 		if (retval == 1)
711 			mrioc->op_reply_q_offset = 0;
712 
713 		max_vectors = retval;
714 	}
715 	mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
716 	    GFP_KERNEL);
717 	if (!mrioc->intr_info) {
718 		retval = -ENOMEM;
719 		pci_free_irq_vectors(mrioc->pdev);
720 		goto out_failed;
721 	}
722 	for (i = 0; i < max_vectors; i++) {
723 		retval = mpi3mr_request_irq(mrioc, i);
724 		if (retval) {
725 			mrioc->intr_info_count = i;
726 			goto out_failed;
727 		}
728 	}
729 	mrioc->intr_info_count = max_vectors;
730 	mpi3mr_ioc_enable_intr(mrioc);
731 	return 0;
732 
733 out_failed:
734 	mpi3mr_cleanup_isr(mrioc);
735 
736 	return retval;
737 }
738 
739 static const struct {
740 	enum mpi3mr_iocstate value;
741 	char *name;
742 } mrioc_states[] = {
743 	{ MRIOC_STATE_READY, "ready" },
744 	{ MRIOC_STATE_FAULT, "fault" },
745 	{ MRIOC_STATE_RESET, "reset" },
746 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
747 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
748 	{ MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
749 };
750 
751 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
752 {
753 	int i;
754 	char *name = NULL;
755 
756 	for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
757 		if (mrioc_states[i].value == mrioc_state) {
758 			name = mrioc_states[i].name;
759 			break;
760 		}
761 	}
762 	return name;
763 }
764 
765 /* Reset reason to name mapper structure*/
766 static const struct {
767 	enum mpi3mr_reset_reason value;
768 	char *name;
769 } mpi3mr_reset_reason_codes[] = {
770 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
771 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
772 	{ MPI3MR_RESET_FROM_IOCTL, "application invocation" },
773 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
774 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
775 	{ MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
776 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
777 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
778 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
779 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
780 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
781 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
782 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
783 	{
784 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
785 		"create request queue timeout"
786 	},
787 	{
788 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
789 		"create reply queue timeout"
790 	},
791 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
792 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
793 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
794 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
795 	{
796 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
797 		"component image activation timeout"
798 	},
799 	{
800 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
801 		"get package version timeout"
802 	},
803 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
804 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
805 };
806 
807 /**
808  * mpi3mr_reset_rc_name - get reset reason code name
809  * @reason_code: reset reason code value
810  *
811  * Map reset reason to an NULL terminated ASCII string
812  *
813  * Return: name corresponding to reset reason value or NULL.
814  */
815 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
816 {
817 	int i;
818 	char *name = NULL;
819 
820 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
821 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
822 			name = mpi3mr_reset_reason_codes[i].name;
823 			break;
824 		}
825 	}
826 	return name;
827 }
828 
829 /* Reset type to name mapper structure*/
830 static const struct {
831 	u16 reset_type;
832 	char *name;
833 } mpi3mr_reset_types[] = {
834 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
835 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
836 };
837 
838 /**
839  * mpi3mr_reset_type_name - get reset type name
840  * @reset_type: reset type value
841  *
842  * Map reset type to an NULL terminated ASCII string
843  *
844  * Return: name corresponding to reset type value or NULL.
845  */
846 static const char *mpi3mr_reset_type_name(u16 reset_type)
847 {
848 	int i;
849 	char *name = NULL;
850 
851 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
852 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
853 			name = mpi3mr_reset_types[i].name;
854 			break;
855 		}
856 	}
857 	return name;
858 }
859 
860 /**
861  * mpi3mr_print_fault_info - Display fault information
862  * @mrioc: Adapter instance reference
863  *
864  * Display the controller fault information if there is a
865  * controller fault.
866  *
867  * Return: Nothing.
868  */
869 static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
870 {
871 	u32 ioc_status, code, code1, code2, code3;
872 
873 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
874 
875 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
876 		code = readl(&mrioc->sysif_regs->fault);
877 		code1 = readl(&mrioc->sysif_regs->fault_info[0]);
878 		code2 = readl(&mrioc->sysif_regs->fault_info[1]);
879 		code3 = readl(&mrioc->sysif_regs->fault_info[2]);
880 
881 		ioc_info(mrioc,
882 		    "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
883 		    code, code1, code2, code3);
884 	}
885 }
886 
887 /**
888  * mpi3mr_get_iocstate - Get IOC State
889  * @mrioc: Adapter instance reference
890  *
891  * Return a proper IOC state enum based on the IOC status and
892  * IOC configuration and unrcoverable state of the controller.
893  *
894  * Return: Current IOC state.
895  */
896 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
897 {
898 	u32 ioc_status, ioc_config;
899 	u8 ready, enabled;
900 
901 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
902 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
903 
904 	if (mrioc->unrecoverable)
905 		return MRIOC_STATE_UNRECOVERABLE;
906 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
907 		return MRIOC_STATE_FAULT;
908 
909 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
910 	enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
911 
912 	if (ready && enabled)
913 		return MRIOC_STATE_READY;
914 	if ((!ready) && (!enabled))
915 		return MRIOC_STATE_RESET;
916 	if ((!ready) && (enabled))
917 		return MRIOC_STATE_BECOMING_READY;
918 
919 	return MRIOC_STATE_RESET_REQUESTED;
920 }
921 
922 /**
923  * mpi3mr_clear_reset_history - clear reset history
924  * @mrioc: Adapter instance reference
925  *
926  * Write the reset history bit in IOC status to clear the bit,
927  * if it is already set.
928  *
929  * Return: Nothing.
930  */
931 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
932 {
933 	u32 ioc_status;
934 
935 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
936 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
937 		writel(ioc_status, &mrioc->sysif_regs->ioc_status);
938 }
939 
940 /**
941  * mpi3mr_issue_and_process_mur - Message unit Reset handler
942  * @mrioc: Adapter instance reference
943  * @reset_reason: Reset reason code
944  *
945  * Issue Message unit Reset to the controller and wait for it to
946  * be complete.
947  *
948  * Return: 0 on success, -1 on failure.
949  */
950 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
951 	u32 reset_reason)
952 {
953 	u32 ioc_config, timeout, ioc_status;
954 	int retval = -1;
955 
956 	ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
957 	if (mrioc->unrecoverable) {
958 		ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
959 		return retval;
960 	}
961 	mpi3mr_clear_reset_history(mrioc);
962 	writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
963 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
964 	ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
965 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
966 
967 	timeout = mrioc->ready_timeout * 10;
968 	do {
969 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
970 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
971 			mpi3mr_clear_reset_history(mrioc);
972 			ioc_config =
973 			    readl(&mrioc->sysif_regs->ioc_configuration);
974 			if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
975 			      (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
976 			    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
977 				retval = 0;
978 				break;
979 			}
980 		}
981 		msleep(100);
982 	} while (--timeout);
983 
984 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
985 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
986 
987 	ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
988 	    (!retval) ? "successful" : "failed", ioc_status, ioc_config);
989 	return retval;
990 }
991 
992 /**
993  * mpi3mr_bring_ioc_ready - Bring controller to ready state
994  * @mrioc: Adapter instance reference
995  *
996  * Set Enable IOC bit in IOC configuration register and wait for
997  * the controller to become ready.
998  *
999  * Return: 0 on success, appropriate error on failure.
1000  */
1001 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
1002 {
1003 	u32 ioc_config, ioc_status, timeout;
1004 	int retval = 0;
1005 	enum mpi3mr_iocstate ioc_state;
1006 	u64 base_info;
1007 
1008 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1009 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1010 	base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
1011 	ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
1012 	    ioc_status, ioc_config, base_info);
1013 
1014 	/*The timeout value is in 2sec unit, changing it to seconds*/
1015 	mrioc->ready_timeout =
1016 	    ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
1017 	    MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
1018 
1019 	ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
1020 
1021 	ioc_state = mpi3mr_get_iocstate(mrioc);
1022 	ioc_info(mrioc, "controller is in %s state during detection\n",
1023 	    mpi3mr_iocstate_name(ioc_state));
1024 
1025 	if (ioc_state == MRIOC_STATE_BECOMING_READY ||
1026 	    ioc_state == MRIOC_STATE_RESET_REQUESTED) {
1027 		timeout = mrioc->ready_timeout * 10;
1028 		do {
1029 			msleep(100);
1030 		} while (--timeout);
1031 
1032 		ioc_state = mpi3mr_get_iocstate(mrioc);
1033 		ioc_info(mrioc,
1034 		    "controller is in %s state after waiting to reset\n",
1035 		    mpi3mr_iocstate_name(ioc_state));
1036 	}
1037 
1038 	if (ioc_state == MRIOC_STATE_READY) {
1039 		ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
1040 		retval = mpi3mr_issue_and_process_mur(mrioc,
1041 		    MPI3MR_RESET_FROM_BRINGUP);
1042 		ioc_state = mpi3mr_get_iocstate(mrioc);
1043 		if (retval)
1044 			ioc_err(mrioc,
1045 			    "message unit reset failed with error %d current state %s\n",
1046 			    retval, mpi3mr_iocstate_name(ioc_state));
1047 	}
1048 	if (ioc_state != MRIOC_STATE_RESET) {
1049 		mpi3mr_print_fault_info(mrioc);
1050 		ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
1051 		retval = mpi3mr_issue_reset(mrioc,
1052 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
1053 		    MPI3MR_RESET_FROM_BRINGUP);
1054 		if (retval) {
1055 			ioc_err(mrioc,
1056 			    "soft reset failed with error %d\n", retval);
1057 			goto out_failed;
1058 		}
1059 	}
1060 	ioc_state = mpi3mr_get_iocstate(mrioc);
1061 	if (ioc_state != MRIOC_STATE_RESET) {
1062 		ioc_err(mrioc,
1063 		    "cannot bring controller to reset state, current state: %s\n",
1064 		    mpi3mr_iocstate_name(ioc_state));
1065 		goto out_failed;
1066 	}
1067 	mpi3mr_clear_reset_history(mrioc);
1068 	retval = mpi3mr_setup_admin_qpair(mrioc);
1069 	if (retval) {
1070 		ioc_err(mrioc, "failed to setup admin queues: error %d\n",
1071 		    retval);
1072 		goto out_failed;
1073 	}
1074 
1075 	ioc_info(mrioc, "bringing controller to ready state\n");
1076 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1077 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1078 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1079 
1080 	timeout = mrioc->ready_timeout * 10;
1081 	do {
1082 		ioc_state = mpi3mr_get_iocstate(mrioc);
1083 		if (ioc_state == MRIOC_STATE_READY) {
1084 			ioc_info(mrioc,
1085 			    "successfully transistioned to %s state\n",
1086 			    mpi3mr_iocstate_name(ioc_state));
1087 			return 0;
1088 		}
1089 		msleep(100);
1090 	} while (--timeout);
1091 
1092 out_failed:
1093 	ioc_state = mpi3mr_get_iocstate(mrioc);
1094 	ioc_err(mrioc,
1095 	    "failed to bring to ready state,  current state: %s\n",
1096 	    mpi3mr_iocstate_name(ioc_state));
1097 	return retval;
1098 }
1099 
1100 /**
1101  * mpi3mr_soft_reset_success - Check softreset is success or not
1102  * @ioc_status: IOC status register value
1103  * @ioc_config: IOC config register value
1104  *
1105  * Check whether the soft reset is successful or not based on
1106  * IOC status and IOC config register values.
1107  *
1108  * Return: True when the soft reset is success, false otherwise.
1109  */
1110 static inline bool
1111 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1112 {
1113 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1114 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1115 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1116 		return true;
1117 	return false;
1118 }
1119 
1120 /**
1121  * mpi3mr_diagfault_success - Check diag fault is success or not
1122  * @mrioc: Adapter reference
1123  * @ioc_status: IOC status register value
1124  *
1125  * Check whether the controller hit diag reset fault code.
1126  *
1127  * Return: True when there is diag fault, false otherwise.
1128  */
1129 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1130 	u32 ioc_status)
1131 {
1132 	u32 fault;
1133 
1134 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1135 		return false;
1136 	fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1137 	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
1138 		return true;
1139 	return false;
1140 }
1141 
1142 /**
1143  * mpi3mr_set_diagsave - Set diag save bit for snapdump
1144  * @mrioc: Adapter reference
1145  *
1146  * Set diag save bit in IOC configuration register to enable
1147  * snapdump.
1148  *
1149  * Return: Nothing.
1150  */
1151 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1152 {
1153 	u32 ioc_config;
1154 
1155 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1156 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1157 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1158 }
1159 
1160 /**
1161  * mpi3mr_issue_reset - Issue reset to the controller
1162  * @mrioc: Adapter reference
1163  * @reset_type: Reset type
1164  * @reset_reason: Reset reason code
1165  *
1166  * Unlock the host diagnostic registers and write the specific
1167  * reset type to that, wait for reset acknowledgment from the
1168  * controller, if the reset is not successful retry for the
1169  * predefined number of times.
1170  *
1171  * Return: 0 on success, non-zero on failure.
1172  */
1173 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1174 	u32 reset_reason)
1175 {
1176 	int retval = -1;
1177 	u8 unlock_retry_count, reset_retry_count = 0;
1178 	u32 host_diagnostic, timeout, ioc_status, ioc_config;
1179 
1180 	pci_cfg_access_lock(mrioc->pdev);
1181 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1182 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1183 		goto out;
1184 	if (mrioc->unrecoverable)
1185 		goto out;
1186 retry_reset:
1187 	unlock_retry_count = 0;
1188 	mpi3mr_clear_reset_history(mrioc);
1189 	do {
1190 		ioc_info(mrioc,
1191 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
1192 		    ++unlock_retry_count);
1193 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1194 			writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1195 			mrioc->unrecoverable = 1;
1196 			goto out;
1197 		}
1198 
1199 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1200 		    &mrioc->sysif_regs->write_sequence);
1201 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1202 		    &mrioc->sysif_regs->write_sequence);
1203 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1204 		    &mrioc->sysif_regs->write_sequence);
1205 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1206 		    &mrioc->sysif_regs->write_sequence);
1207 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1208 		    &mrioc->sysif_regs->write_sequence);
1209 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1210 		    &mrioc->sysif_regs->write_sequence);
1211 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1212 		    &mrioc->sysif_regs->write_sequence);
1213 		usleep_range(1000, 1100);
1214 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1215 		ioc_info(mrioc,
1216 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1217 		    unlock_retry_count, host_diagnostic);
1218 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1219 
1220 	writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1221 	ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1222 	    mpi3mr_reset_type_name(reset_type),
1223 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
1224 	writel(host_diagnostic | reset_type,
1225 	    &mrioc->sysif_regs->host_diagnostic);
1226 	timeout = mrioc->ready_timeout * 10;
1227 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
1228 		do {
1229 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1230 			if (ioc_status &
1231 			    MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
1232 				mpi3mr_clear_reset_history(mrioc);
1233 				ioc_config =
1234 				    readl(&mrioc->sysif_regs->ioc_configuration);
1235 				if (mpi3mr_soft_reset_success(ioc_status,
1236 				    ioc_config)) {
1237 					retval = 0;
1238 					break;
1239 				}
1240 			}
1241 			msleep(100);
1242 		} while (--timeout);
1243 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1244 		    &mrioc->sysif_regs->write_sequence);
1245 	} else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
1246 		do {
1247 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1248 			if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1249 				retval = 0;
1250 				break;
1251 			}
1252 			msleep(100);
1253 		} while (--timeout);
1254 		mpi3mr_clear_reset_history(mrioc);
1255 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1256 		    &mrioc->sysif_regs->write_sequence);
1257 	}
1258 	if (retval && ((++reset_retry_count) < MPI3MR_MAX_RESET_RETRY_COUNT)) {
1259 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1260 		ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1261 		ioc_info(mrioc,
1262 		    "Base IOC Sts/Config after reset try %d is (0x%x)/(0x%x)\n",
1263 		    reset_retry_count, ioc_status, ioc_config);
1264 		goto retry_reset;
1265 	}
1266 
1267 out:
1268 	pci_cfg_access_unlock(mrioc->pdev);
1269 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1270 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1271 
1272 	ioc_info(mrioc,
1273 	    "Base IOC Sts/Config after %s reset is (0x%x)/(0x%x)\n",
1274 	    (!retval) ? "successful" : "failed", ioc_status,
1275 	    ioc_config);
1276 	return retval;
1277 }
1278 
1279 /**
1280  * mpi3mr_admin_request_post - Post request to admin queue
1281  * @mrioc: Adapter reference
1282  * @admin_req: MPI3 request
1283  * @admin_req_sz: Request size
1284  * @ignore_reset: Ignore reset in process
1285  *
1286  * Post the MPI3 request into admin request queue and
1287  * inform the controller, if the queue is full return
1288  * appropriate error.
1289  *
1290  * Return: 0 on success, non-zero on failure.
1291  */
1292 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1293 	u16 admin_req_sz, u8 ignore_reset)
1294 {
1295 	u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1296 	int retval = 0;
1297 	unsigned long flags;
1298 	u8 *areq_entry;
1299 
1300 	if (mrioc->unrecoverable) {
1301 		ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1302 		return -EFAULT;
1303 	}
1304 
1305 	spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1306 	areq_pi = mrioc->admin_req_pi;
1307 	areq_ci = mrioc->admin_req_ci;
1308 	max_entries = mrioc->num_admin_req;
1309 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1310 	    (areq_pi == (max_entries - 1)))) {
1311 		ioc_err(mrioc, "AdminReqQ full condition detected\n");
1312 		retval = -EAGAIN;
1313 		goto out;
1314 	}
1315 	if (!ignore_reset && mrioc->reset_in_progress) {
1316 		ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1317 		retval = -EAGAIN;
1318 		goto out;
1319 	}
1320 	areq_entry = (u8 *)mrioc->admin_req_base +
1321 	    (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1322 	memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1323 	memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1324 
1325 	if (++areq_pi == max_entries)
1326 		areq_pi = 0;
1327 	mrioc->admin_req_pi = areq_pi;
1328 
1329 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1330 
1331 out:
1332 	spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1333 
1334 	return retval;
1335 }
1336 
1337 /**
1338  * mpi3mr_free_op_req_q_segments - free request memory segments
1339  * @mrioc: Adapter instance reference
1340  * @q_idx: operational request queue index
1341  *
1342  * Free memory segments allocated for operational request queue
1343  *
1344  * Return: Nothing.
1345  */
1346 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1347 {
1348 	u16 j;
1349 	int size;
1350 	struct segments *segments;
1351 
1352 	segments = mrioc->req_qinfo[q_idx].q_segments;
1353 	if (!segments)
1354 		return;
1355 
1356 	if (mrioc->enable_segqueue) {
1357 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1358 		if (mrioc->req_qinfo[q_idx].q_segment_list) {
1359 			dma_free_coherent(&mrioc->pdev->dev,
1360 			    MPI3MR_MAX_SEG_LIST_SIZE,
1361 			    mrioc->req_qinfo[q_idx].q_segment_list,
1362 			    mrioc->req_qinfo[q_idx].q_segment_list_dma);
1363 			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1364 		}
1365 	} else
1366 		size = mrioc->req_qinfo[q_idx].num_requests *
1367 		    mrioc->facts.op_req_sz;
1368 
1369 	for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1370 		if (!segments[j].segment)
1371 			continue;
1372 		dma_free_coherent(&mrioc->pdev->dev,
1373 		    size, segments[j].segment, segments[j].segment_dma);
1374 		segments[j].segment = NULL;
1375 	}
1376 	kfree(mrioc->req_qinfo[q_idx].q_segments);
1377 	mrioc->req_qinfo[q_idx].q_segments = NULL;
1378 	mrioc->req_qinfo[q_idx].qid = 0;
1379 }
1380 
1381 /**
1382  * mpi3mr_free_op_reply_q_segments - free reply memory segments
1383  * @mrioc: Adapter instance reference
1384  * @q_idx: operational reply queue index
1385  *
1386  * Free memory segments allocated for operational reply queue
1387  *
1388  * Return: Nothing.
1389  */
1390 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1391 {
1392 	u16 j;
1393 	int size;
1394 	struct segments *segments;
1395 
1396 	segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1397 	if (!segments)
1398 		return;
1399 
1400 	if (mrioc->enable_segqueue) {
1401 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1402 		if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1403 			dma_free_coherent(&mrioc->pdev->dev,
1404 			    MPI3MR_MAX_SEG_LIST_SIZE,
1405 			    mrioc->op_reply_qinfo[q_idx].q_segment_list,
1406 			    mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1407 			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1408 		}
1409 	} else
1410 		size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1411 		    mrioc->op_reply_desc_sz;
1412 
1413 	for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1414 		if (!segments[j].segment)
1415 			continue;
1416 		dma_free_coherent(&mrioc->pdev->dev,
1417 		    size, segments[j].segment, segments[j].segment_dma);
1418 		segments[j].segment = NULL;
1419 	}
1420 
1421 	kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1422 	mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1423 	mrioc->op_reply_qinfo[q_idx].qid = 0;
1424 }
1425 
1426 /**
1427  * mpi3mr_delete_op_reply_q - delete operational reply queue
1428  * @mrioc: Adapter instance reference
1429  * @qidx: operational reply queue index
1430  *
1431  * Delete operatinal reply queue by issuing MPI request
1432  * through admin queue.
1433  *
1434  * Return:  0 on success, non-zero on failure.
1435  */
1436 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1437 {
1438 	struct mpi3_delete_reply_queue_request delq_req;
1439 	int retval = 0;
1440 	u16 reply_qid = 0, midx;
1441 
1442 	reply_qid = mrioc->op_reply_qinfo[qidx].qid;
1443 
1444 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1445 
1446 	if (!reply_qid)	{
1447 		retval = -1;
1448 		ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
1449 		goto out;
1450 	}
1451 
1452 	memset(&delq_req, 0, sizeof(delq_req));
1453 	mutex_lock(&mrioc->init_cmds.mutex);
1454 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1455 		retval = -1;
1456 		ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
1457 		mutex_unlock(&mrioc->init_cmds.mutex);
1458 		goto out;
1459 	}
1460 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1461 	mrioc->init_cmds.is_waiting = 1;
1462 	mrioc->init_cmds.callback = NULL;
1463 	delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1464 	delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
1465 	delq_req.queue_id = cpu_to_le16(reply_qid);
1466 
1467 	init_completion(&mrioc->init_cmds.done);
1468 	retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
1469 	    1);
1470 	if (retval) {
1471 		ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
1472 		goto out_unlock;
1473 	}
1474 	wait_for_completion_timeout(&mrioc->init_cmds.done,
1475 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1476 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1477 		ioc_err(mrioc, "delete reply queue timed out\n");
1478 		mpi3mr_check_rh_fault_ioc(mrioc,
1479 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
1480 		retval = -1;
1481 		goto out_unlock;
1482 	}
1483 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1484 	    != MPI3_IOCSTATUS_SUCCESS) {
1485 		ioc_err(mrioc,
1486 		    "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1487 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1488 		    mrioc->init_cmds.ioc_loginfo);
1489 		retval = -1;
1490 		goto out_unlock;
1491 	}
1492 	mrioc->intr_info[midx].op_reply_q = NULL;
1493 
1494 	mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1495 out_unlock:
1496 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1497 	mutex_unlock(&mrioc->init_cmds.mutex);
1498 out:
1499 
1500 	return retval;
1501 }
1502 
1503 /**
1504  * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
1505  * @mrioc: Adapter instance reference
1506  * @qidx: request queue index
1507  *
1508  * Allocate segmented memory pools for operational reply
1509  * queue.
1510  *
1511  * Return: 0 on success, non-zero on failure.
1512  */
1513 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1514 {
1515 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1516 	int i, size;
1517 	u64 *q_segment_list_entry = NULL;
1518 	struct segments *segments;
1519 
1520 	if (mrioc->enable_segqueue) {
1521 		op_reply_q->segment_qd =
1522 		    MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
1523 
1524 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1525 
1526 		op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1527 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
1528 		    GFP_KERNEL);
1529 		if (!op_reply_q->q_segment_list)
1530 			return -ENOMEM;
1531 		q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
1532 	} else {
1533 		op_reply_q->segment_qd = op_reply_q->num_replies;
1534 		size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
1535 	}
1536 
1537 	op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
1538 	    op_reply_q->segment_qd);
1539 
1540 	op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
1541 	    sizeof(struct segments), GFP_KERNEL);
1542 	if (!op_reply_q->q_segments)
1543 		return -ENOMEM;
1544 
1545 	segments = op_reply_q->q_segments;
1546 	for (i = 0; i < op_reply_q->num_segments; i++) {
1547 		segments[i].segment =
1548 		    dma_alloc_coherent(&mrioc->pdev->dev,
1549 		    size, &segments[i].segment_dma, GFP_KERNEL);
1550 		if (!segments[i].segment)
1551 			return -ENOMEM;
1552 		if (mrioc->enable_segqueue)
1553 			q_segment_list_entry[i] =
1554 			    (unsigned long)segments[i].segment_dma;
1555 	}
1556 
1557 	return 0;
1558 }
1559 
1560 /**
1561  * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
1562  * @mrioc: Adapter instance reference
1563  * @qidx: request queue index
1564  *
1565  * Allocate segmented memory pools for operational request
1566  * queue.
1567  *
1568  * Return: 0 on success, non-zero on failure.
1569  */
1570 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1571 {
1572 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
1573 	int i, size;
1574 	u64 *q_segment_list_entry = NULL;
1575 	struct segments *segments;
1576 
1577 	if (mrioc->enable_segqueue) {
1578 		op_req_q->segment_qd =
1579 		    MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
1580 
1581 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1582 
1583 		op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1584 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
1585 		    GFP_KERNEL);
1586 		if (!op_req_q->q_segment_list)
1587 			return -ENOMEM;
1588 		q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
1589 
1590 	} else {
1591 		op_req_q->segment_qd = op_req_q->num_requests;
1592 		size = op_req_q->num_requests * mrioc->facts.op_req_sz;
1593 	}
1594 
1595 	op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
1596 	    op_req_q->segment_qd);
1597 
1598 	op_req_q->q_segments = kcalloc(op_req_q->num_segments,
1599 	    sizeof(struct segments), GFP_KERNEL);
1600 	if (!op_req_q->q_segments)
1601 		return -ENOMEM;
1602 
1603 	segments = op_req_q->q_segments;
1604 	for (i = 0; i < op_req_q->num_segments; i++) {
1605 		segments[i].segment =
1606 		    dma_alloc_coherent(&mrioc->pdev->dev,
1607 		    size, &segments[i].segment_dma, GFP_KERNEL);
1608 		if (!segments[i].segment)
1609 			return -ENOMEM;
1610 		if (mrioc->enable_segqueue)
1611 			q_segment_list_entry[i] =
1612 			    (unsigned long)segments[i].segment_dma;
1613 	}
1614 
1615 	return 0;
1616 }
1617 
1618 /**
1619  * mpi3mr_create_op_reply_q - create operational reply queue
1620  * @mrioc: Adapter instance reference
1621  * @qidx: operational reply queue index
1622  *
1623  * Create operatinal reply queue by issuing MPI request
1624  * through admin queue.
1625  *
1626  * Return:  0 on success, non-zero on failure.
1627  */
1628 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1629 {
1630 	struct mpi3_create_reply_queue_request create_req;
1631 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1632 	int retval = 0;
1633 	u16 reply_qid = 0, midx;
1634 
1635 	reply_qid = op_reply_q->qid;
1636 
1637 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1638 
1639 	if (reply_qid) {
1640 		retval = -1;
1641 		ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
1642 		    reply_qid);
1643 
1644 		return retval;
1645 	}
1646 
1647 	reply_qid = qidx + 1;
1648 	op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
1649 	op_reply_q->ci = 0;
1650 	op_reply_q->ephase = 1;
1651 	atomic_set(&op_reply_q->pend_ios, 0);
1652 	atomic_set(&op_reply_q->in_use, 0);
1653 	op_reply_q->enable_irq_poll = false;
1654 
1655 	if (!op_reply_q->q_segments) {
1656 		retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
1657 		if (retval) {
1658 			mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1659 			goto out;
1660 		}
1661 	}
1662 
1663 	memset(&create_req, 0, sizeof(create_req));
1664 	mutex_lock(&mrioc->init_cmds.mutex);
1665 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1666 		retval = -1;
1667 		ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
1668 		goto out_unlock;
1669 	}
1670 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1671 	mrioc->init_cmds.is_waiting = 1;
1672 	mrioc->init_cmds.callback = NULL;
1673 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1674 	create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
1675 	create_req.queue_id = cpu_to_le16(reply_qid);
1676 	create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
1677 	create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index);
1678 	if (mrioc->enable_segqueue) {
1679 		create_req.flags |=
1680 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
1681 		create_req.base_address = cpu_to_le64(
1682 		    op_reply_q->q_segment_list_dma);
1683 	} else
1684 		create_req.base_address = cpu_to_le64(
1685 		    op_reply_q->q_segments[0].segment_dma);
1686 
1687 	create_req.size = cpu_to_le16(op_reply_q->num_replies);
1688 
1689 	init_completion(&mrioc->init_cmds.done);
1690 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
1691 	    sizeof(create_req), 1);
1692 	if (retval) {
1693 		ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
1694 		goto out_unlock;
1695 	}
1696 	wait_for_completion_timeout(&mrioc->init_cmds.done,
1697 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1698 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1699 		ioc_err(mrioc, "create reply queue timed out\n");
1700 		mpi3mr_check_rh_fault_ioc(mrioc,
1701 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
1702 		retval = -1;
1703 		goto out_unlock;
1704 	}
1705 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1706 	    != MPI3_IOCSTATUS_SUCCESS) {
1707 		ioc_err(mrioc,
1708 		    "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1709 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1710 		    mrioc->init_cmds.ioc_loginfo);
1711 		retval = -1;
1712 		goto out_unlock;
1713 	}
1714 	op_reply_q->qid = reply_qid;
1715 	mrioc->intr_info[midx].op_reply_q = op_reply_q;
1716 
1717 out_unlock:
1718 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1719 	mutex_unlock(&mrioc->init_cmds.mutex);
1720 out:
1721 
1722 	return retval;
1723 }
1724 
1725 /**
1726  * mpi3mr_create_op_req_q - create operational request queue
1727  * @mrioc: Adapter instance reference
1728  * @idx: operational request queue index
1729  * @reply_qid: Reply queue ID
1730  *
1731  * Create operatinal request queue by issuing MPI request
1732  * through admin queue.
1733  *
1734  * Return:  0 on success, non-zero on failure.
1735  */
1736 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
1737 	u16 reply_qid)
1738 {
1739 	struct mpi3_create_request_queue_request create_req;
1740 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
1741 	int retval = 0;
1742 	u16 req_qid = 0;
1743 
1744 	req_qid = op_req_q->qid;
1745 
1746 	if (req_qid) {
1747 		retval = -1;
1748 		ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
1749 		    req_qid);
1750 
1751 		return retval;
1752 	}
1753 	req_qid = idx + 1;
1754 
1755 	op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
1756 	op_req_q->ci = 0;
1757 	op_req_q->pi = 0;
1758 	op_req_q->reply_qid = reply_qid;
1759 	spin_lock_init(&op_req_q->q_lock);
1760 
1761 	if (!op_req_q->q_segments) {
1762 		retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
1763 		if (retval) {
1764 			mpi3mr_free_op_req_q_segments(mrioc, idx);
1765 			goto out;
1766 		}
1767 	}
1768 
1769 	memset(&create_req, 0, sizeof(create_req));
1770 	mutex_lock(&mrioc->init_cmds.mutex);
1771 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1772 		retval = -1;
1773 		ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
1774 		goto out_unlock;
1775 	}
1776 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1777 	mrioc->init_cmds.is_waiting = 1;
1778 	mrioc->init_cmds.callback = NULL;
1779 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1780 	create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
1781 	create_req.queue_id = cpu_to_le16(req_qid);
1782 	if (mrioc->enable_segqueue) {
1783 		create_req.flags =
1784 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
1785 		create_req.base_address = cpu_to_le64(
1786 		    op_req_q->q_segment_list_dma);
1787 	} else
1788 		create_req.base_address = cpu_to_le64(
1789 		    op_req_q->q_segments[0].segment_dma);
1790 	create_req.reply_queue_id = cpu_to_le16(reply_qid);
1791 	create_req.size = cpu_to_le16(op_req_q->num_requests);
1792 
1793 	init_completion(&mrioc->init_cmds.done);
1794 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
1795 	    sizeof(create_req), 1);
1796 	if (retval) {
1797 		ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
1798 		goto out_unlock;
1799 	}
1800 	wait_for_completion_timeout(&mrioc->init_cmds.done,
1801 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1802 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1803 		ioc_err(mrioc, "create request queue timed out\n");
1804 		mpi3mr_check_rh_fault_ioc(mrioc,
1805 		    MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
1806 		retval = -1;
1807 		goto out_unlock;
1808 	}
1809 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1810 	    != MPI3_IOCSTATUS_SUCCESS) {
1811 		ioc_err(mrioc,
1812 		    "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1813 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1814 		    mrioc->init_cmds.ioc_loginfo);
1815 		retval = -1;
1816 		goto out_unlock;
1817 	}
1818 	op_req_q->qid = req_qid;
1819 
1820 out_unlock:
1821 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1822 	mutex_unlock(&mrioc->init_cmds.mutex);
1823 out:
1824 
1825 	return retval;
1826 }
1827 
1828 /**
1829  * mpi3mr_create_op_queues - create operational queue pairs
1830  * @mrioc: Adapter instance reference
1831  *
1832  * Allocate memory for operational queue meta data and call
1833  * create request and reply queue functions.
1834  *
1835  * Return: 0 on success, non-zero on failures.
1836  */
1837 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
1838 {
1839 	int retval = 0;
1840 	u16 num_queues = 0, i = 0, msix_count_op_q = 1;
1841 
1842 	num_queues = min_t(int, mrioc->facts.max_op_reply_q,
1843 	    mrioc->facts.max_op_req_q);
1844 
1845 	msix_count_op_q =
1846 	    mrioc->intr_info_count - mrioc->op_reply_q_offset;
1847 	if (!mrioc->num_queues)
1848 		mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
1849 	num_queues = mrioc->num_queues;
1850 	ioc_info(mrioc, "Trying to create %d Operational Q pairs\n",
1851 	    num_queues);
1852 
1853 	if (!mrioc->req_qinfo) {
1854 		mrioc->req_qinfo = kcalloc(num_queues,
1855 		    sizeof(struct op_req_qinfo), GFP_KERNEL);
1856 		if (!mrioc->req_qinfo) {
1857 			retval = -1;
1858 			goto out_failed;
1859 		}
1860 
1861 		mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
1862 		    num_queues, GFP_KERNEL);
1863 		if (!mrioc->op_reply_qinfo) {
1864 			retval = -1;
1865 			goto out_failed;
1866 		}
1867 	}
1868 
1869 	if (mrioc->enable_segqueue)
1870 		ioc_info(mrioc,
1871 		    "allocating operational queues through segmented queues\n");
1872 
1873 	for (i = 0; i < num_queues; i++) {
1874 		if (mpi3mr_create_op_reply_q(mrioc, i)) {
1875 			ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
1876 			break;
1877 		}
1878 		if (mpi3mr_create_op_req_q(mrioc, i,
1879 		    mrioc->op_reply_qinfo[i].qid)) {
1880 			ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
1881 			mpi3mr_delete_op_reply_q(mrioc, i);
1882 			break;
1883 		}
1884 	}
1885 
1886 	if (i == 0) {
1887 		/* Not even one queue is created successfully*/
1888 		retval = -1;
1889 		goto out_failed;
1890 	}
1891 	mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
1892 	ioc_info(mrioc, "Successfully created %d Operational Q pairs\n",
1893 	    mrioc->num_op_reply_q);
1894 
1895 	return retval;
1896 out_failed:
1897 	kfree(mrioc->req_qinfo);
1898 	mrioc->req_qinfo = NULL;
1899 
1900 	kfree(mrioc->op_reply_qinfo);
1901 	mrioc->op_reply_qinfo = NULL;
1902 
1903 	return retval;
1904 }
1905 
1906 /**
1907  * mpi3mr_op_request_post - Post request to operational queue
1908  * @mrioc: Adapter reference
1909  * @op_req_q: Operational request queue info
1910  * @req: MPI3 request
1911  *
1912  * Post the MPI3 request into operational request queue and
1913  * inform the controller, if the queue is full return
1914  * appropriate error.
1915  *
1916  * Return: 0 on success, non-zero on failure.
1917  */
1918 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
1919 	struct op_req_qinfo *op_req_q, u8 *req)
1920 {
1921 	u16 pi = 0, max_entries, reply_qidx = 0, midx;
1922 	int retval = 0;
1923 	unsigned long flags;
1924 	u8 *req_entry;
1925 	void *segment_base_addr;
1926 	u16 req_sz = mrioc->facts.op_req_sz;
1927 	struct segments *segments = op_req_q->q_segments;
1928 
1929 	reply_qidx = op_req_q->reply_qid - 1;
1930 
1931 	if (mrioc->unrecoverable)
1932 		return -EFAULT;
1933 
1934 	spin_lock_irqsave(&op_req_q->q_lock, flags);
1935 	pi = op_req_q->pi;
1936 	max_entries = op_req_q->num_requests;
1937 
1938 	if (mpi3mr_check_req_qfull(op_req_q)) {
1939 		midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
1940 		    reply_qidx, mrioc->op_reply_q_offset);
1941 		mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]);
1942 
1943 		if (mpi3mr_check_req_qfull(op_req_q)) {
1944 			retval = -EAGAIN;
1945 			goto out;
1946 		}
1947 	}
1948 
1949 	if (mrioc->reset_in_progress) {
1950 		ioc_err(mrioc, "OpReqQ submit reset in progress\n");
1951 		retval = -EAGAIN;
1952 		goto out;
1953 	}
1954 
1955 	segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
1956 	req_entry = (u8 *)segment_base_addr +
1957 	    ((pi % op_req_q->segment_qd) * req_sz);
1958 
1959 	memset(req_entry, 0, req_sz);
1960 	memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
1961 
1962 	if (++pi == max_entries)
1963 		pi = 0;
1964 	op_req_q->pi = pi;
1965 
1966 	if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
1967 	    > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
1968 		mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
1969 
1970 	writel(op_req_q->pi,
1971 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
1972 
1973 out:
1974 	spin_unlock_irqrestore(&op_req_q->q_lock, flags);
1975 	return retval;
1976 }
1977 
1978 /**
1979  * mpi3mr_check_rh_fault_ioc - check reset history and fault
1980  * controller
1981  * @mrioc: Adapter instance reference
1982  * @reason_code, reason code for the fault.
1983  *
1984  * This routine will save snapdump and fault the controller with
1985  * the given reason code if it is not already in the fault or
1986  * not asynchronosuly reset. This will be used to handle
1987  * initilaization time faults/resets/timeout as in those cases
1988  * immediate soft reset invocation is not required.
1989  *
1990  * Return:  None.
1991  */
1992 void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
1993 {
1994 	u32 ioc_status, host_diagnostic, timeout;
1995 
1996 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1997 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
1998 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
1999 		mpi3mr_print_fault_info(mrioc);
2000 		return;
2001 	}
2002 	mpi3mr_set_diagsave(mrioc);
2003 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2004 	    reason_code);
2005 	timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
2006 	do {
2007 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2008 		if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
2009 			break;
2010 		msleep(100);
2011 	} while (--timeout);
2012 }
2013 
2014 /**
2015  * mpi3mr_sync_timestamp - Issue time stamp sync request
2016  * @mrioc: Adapter reference
2017  *
2018  * Issue IO unit control MPI request to synchornize firmware
2019  * timestamp with host time.
2020  *
2021  * Return: 0 on success, non-zero on failure.
2022  */
2023 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
2024 {
2025 	ktime_t current_time;
2026 	struct mpi3_iounit_control_request iou_ctrl;
2027 	int retval = 0;
2028 
2029 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2030 	mutex_lock(&mrioc->init_cmds.mutex);
2031 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2032 		retval = -1;
2033 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
2034 		mutex_unlock(&mrioc->init_cmds.mutex);
2035 		goto out;
2036 	}
2037 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2038 	mrioc->init_cmds.is_waiting = 1;
2039 	mrioc->init_cmds.callback = NULL;
2040 	iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2041 	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2042 	iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
2043 	current_time = ktime_get_real();
2044 	iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
2045 
2046 	init_completion(&mrioc->init_cmds.done);
2047 	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
2048 	    sizeof(iou_ctrl), 0);
2049 	if (retval) {
2050 		ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
2051 		goto out_unlock;
2052 	}
2053 
2054 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2055 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2056 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2057 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
2058 		mrioc->init_cmds.is_waiting = 0;
2059 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
2060 			mpi3mr_soft_reset_handler(mrioc,
2061 			    MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
2062 		retval = -1;
2063 		goto out_unlock;
2064 	}
2065 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2066 	    != MPI3_IOCSTATUS_SUCCESS) {
2067 		ioc_err(mrioc,
2068 		    "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2069 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2070 		    mrioc->init_cmds.ioc_loginfo);
2071 		retval = -1;
2072 		goto out_unlock;
2073 	}
2074 
2075 out_unlock:
2076 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2077 	mutex_unlock(&mrioc->init_cmds.mutex);
2078 
2079 out:
2080 	return retval;
2081 }
2082 
2083 /**
2084  * mpi3mr_print_pkg_ver - display controller fw package version
2085  * @mrioc: Adapter reference
2086  *
2087  * Retrieve firmware package version from the component image
2088  * header of the controller flash and display it.
2089  *
2090  * Return: 0 on success and non-zero on failure.
2091  */
2092 static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
2093 {
2094 	struct mpi3_ci_upload_request ci_upload;
2095 	int retval = -1;
2096 	void *data = NULL;
2097 	dma_addr_t data_dma;
2098 	struct mpi3_ci_manifest_mpi *manifest;
2099 	u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
2100 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2101 
2102 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2103 	    GFP_KERNEL);
2104 	if (!data)
2105 		return -ENOMEM;
2106 
2107 	memset(&ci_upload, 0, sizeof(ci_upload));
2108 	mutex_lock(&mrioc->init_cmds.mutex);
2109 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2110 		ioc_err(mrioc, "sending get package version failed due to command in use\n");
2111 		mutex_unlock(&mrioc->init_cmds.mutex);
2112 		goto out;
2113 	}
2114 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2115 	mrioc->init_cmds.is_waiting = 1;
2116 	mrioc->init_cmds.callback = NULL;
2117 	ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2118 	ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
2119 	ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
2120 	ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
2121 	ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
2122 	ci_upload.segment_size = cpu_to_le32(data_len);
2123 
2124 	mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
2125 	    data_dma);
2126 	init_completion(&mrioc->init_cmds.done);
2127 	retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
2128 	    sizeof(ci_upload), 1);
2129 	if (retval) {
2130 		ioc_err(mrioc, "posting get package version failed\n");
2131 		goto out_unlock;
2132 	}
2133 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2134 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2135 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2136 		ioc_err(mrioc, "get package version timed out\n");
2137 		mpi3mr_check_rh_fault_ioc(mrioc,
2138 		    MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
2139 		retval = -1;
2140 		goto out_unlock;
2141 	}
2142 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2143 	    == MPI3_IOCSTATUS_SUCCESS) {
2144 		manifest = (struct mpi3_ci_manifest_mpi *) data;
2145 		if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
2146 			ioc_info(mrioc,
2147 			    "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
2148 			    manifest->package_version.gen_major,
2149 			    manifest->package_version.gen_minor,
2150 			    manifest->package_version.phase_major,
2151 			    manifest->package_version.phase_minor,
2152 			    manifest->package_version.customer_id,
2153 			    manifest->package_version.build_num);
2154 		}
2155 	}
2156 	retval = 0;
2157 out_unlock:
2158 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2159 	mutex_unlock(&mrioc->init_cmds.mutex);
2160 
2161 out:
2162 	if (data)
2163 		dma_free_coherent(&mrioc->pdev->dev, data_len, data,
2164 		    data_dma);
2165 	return retval;
2166 }
2167 
2168 /**
2169  * mpi3mr_watchdog_work - watchdog thread to monitor faults
2170  * @work: work struct
2171  *
2172  * Watch dog work periodically executed (1 second interval) to
2173  * monitor firmware fault and to issue periodic timer sync to
2174  * the firmware.
2175  *
2176  * Return: Nothing.
2177  */
2178 static void mpi3mr_watchdog_work(struct work_struct *work)
2179 {
2180 	struct mpi3mr_ioc *mrioc =
2181 	    container_of(work, struct mpi3mr_ioc, watchdog_work.work);
2182 	unsigned long flags;
2183 	enum mpi3mr_iocstate ioc_state;
2184 	u32 fault, host_diagnostic;
2185 
2186 	if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
2187 		mrioc->ts_update_counter = 0;
2188 		mpi3mr_sync_timestamp(mrioc);
2189 	}
2190 
2191 	/*Check for fault state every one second and issue Soft reset*/
2192 	ioc_state = mpi3mr_get_iocstate(mrioc);
2193 	if (ioc_state == MRIOC_STATE_FAULT) {
2194 		fault = readl(&mrioc->sysif_regs->fault) &
2195 		    MPI3_SYSIF_FAULT_CODE_MASK;
2196 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2197 		if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2198 			if (!mrioc->diagsave_timeout) {
2199 				mpi3mr_print_fault_info(mrioc);
2200 				ioc_warn(mrioc, "Diag save in progress\n");
2201 			}
2202 			if ((mrioc->diagsave_timeout++) <=
2203 			    MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2204 				goto schedule_work;
2205 		} else
2206 			mpi3mr_print_fault_info(mrioc);
2207 		mrioc->diagsave_timeout = 0;
2208 
2209 		if (fault == MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED) {
2210 			ioc_info(mrioc,
2211 			    "Factory Reset fault occurred marking controller as unrecoverable"
2212 			    );
2213 			mrioc->unrecoverable = 1;
2214 			goto out;
2215 		}
2216 
2217 		if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) ||
2218 		    (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) ||
2219 		    (mrioc->reset_in_progress))
2220 			goto out;
2221 		if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
2222 			mpi3mr_soft_reset_handler(mrioc,
2223 			    MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
2224 		else
2225 			mpi3mr_soft_reset_handler(mrioc,
2226 			    MPI3MR_RESET_FROM_FAULT_WATCH, 0);
2227 	}
2228 
2229 schedule_work:
2230 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2231 	if (mrioc->watchdog_work_q)
2232 		queue_delayed_work(mrioc->watchdog_work_q,
2233 		    &mrioc->watchdog_work,
2234 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2235 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2236 out:
2237 	return;
2238 }
2239 
2240 /**
2241  * mpi3mr_start_watchdog - Start watchdog
2242  * @mrioc: Adapter instance reference
2243  *
2244  * Create and start the watchdog thread to monitor controller
2245  * faults.
2246  *
2247  * Return: Nothing.
2248  */
2249 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2250 {
2251 	if (mrioc->watchdog_work_q)
2252 		return;
2253 
2254 	INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2255 	snprintf(mrioc->watchdog_work_q_name,
2256 	    sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
2257 	    mrioc->id);
2258 	mrioc->watchdog_work_q =
2259 	    create_singlethread_workqueue(mrioc->watchdog_work_q_name);
2260 	if (!mrioc->watchdog_work_q) {
2261 		ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2262 		return;
2263 	}
2264 
2265 	if (mrioc->watchdog_work_q)
2266 		queue_delayed_work(mrioc->watchdog_work_q,
2267 		    &mrioc->watchdog_work,
2268 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2269 }
2270 
2271 /**
2272  * mpi3mr_stop_watchdog - Stop watchdog
2273  * @mrioc: Adapter instance reference
2274  *
2275  * Stop the watchdog thread created to monitor controller
2276  * faults.
2277  *
2278  * Return: Nothing.
2279  */
2280 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
2281 {
2282 	unsigned long flags;
2283 	struct workqueue_struct *wq;
2284 
2285 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2286 	wq = mrioc->watchdog_work_q;
2287 	mrioc->watchdog_work_q = NULL;
2288 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2289 	if (wq) {
2290 		if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
2291 			flush_workqueue(wq);
2292 		destroy_workqueue(wq);
2293 	}
2294 }
2295 
2296 /**
2297  * mpi3mr_kill_ioc - Kill the controller
2298  * @mrioc: Adapter instance reference
2299  * @reason: reason for the failure.
2300  *
2301  * If fault debug is enabled, display the fault info else issue
2302  * diag fault and freeze the system for controller debug
2303  * purpose.
2304  *
2305  * Return: Nothing.
2306  */
2307 static void mpi3mr_kill_ioc(struct mpi3mr_ioc *mrioc, u32 reason)
2308 {
2309 	enum mpi3mr_iocstate ioc_state;
2310 
2311 	if (!mrioc->fault_dbg)
2312 		return;
2313 
2314 	dump_stack();
2315 
2316 	ioc_state = mpi3mr_get_iocstate(mrioc);
2317 	if (ioc_state == MRIOC_STATE_FAULT)
2318 		mpi3mr_print_fault_info(mrioc);
2319 	else {
2320 		ioc_err(mrioc, "Firmware is halted due to the reason %d\n",
2321 		    reason);
2322 		mpi3mr_diagfault_reset_handler(mrioc, reason);
2323 	}
2324 	if (mrioc->fault_dbg == 2)
2325 		for (;;)
2326 			;
2327 	else
2328 		panic("panic in %s\n", __func__);
2329 }
2330 
2331 /**
2332  * mpi3mr_setup_admin_qpair - Setup admin queue pair
2333  * @mrioc: Adapter instance reference
2334  *
2335  * Allocate memory for admin queue pair if required and register
2336  * the admin queue with the controller.
2337  *
2338  * Return: 0 on success, non-zero on failures.
2339  */
2340 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
2341 {
2342 	int retval = 0;
2343 	u32 num_admin_entries = 0;
2344 
2345 	mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
2346 	mrioc->num_admin_req = mrioc->admin_req_q_sz /
2347 	    MPI3MR_ADMIN_REQ_FRAME_SZ;
2348 	mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
2349 	mrioc->admin_req_base = NULL;
2350 
2351 	mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
2352 	mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
2353 	    MPI3MR_ADMIN_REPLY_FRAME_SZ;
2354 	mrioc->admin_reply_ci = 0;
2355 	mrioc->admin_reply_ephase = 1;
2356 	mrioc->admin_reply_base = NULL;
2357 
2358 	if (!mrioc->admin_req_base) {
2359 		mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
2360 		    mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
2361 
2362 		if (!mrioc->admin_req_base) {
2363 			retval = -1;
2364 			goto out_failed;
2365 		}
2366 
2367 		mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
2368 		    mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
2369 		    GFP_KERNEL);
2370 
2371 		if (!mrioc->admin_reply_base) {
2372 			retval = -1;
2373 			goto out_failed;
2374 		}
2375 	}
2376 
2377 	num_admin_entries = (mrioc->num_admin_replies << 16) |
2378 	    (mrioc->num_admin_req);
2379 	writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
2380 	mpi3mr_writeq(mrioc->admin_req_dma,
2381 	    &mrioc->sysif_regs->admin_request_queue_address);
2382 	mpi3mr_writeq(mrioc->admin_reply_dma,
2383 	    &mrioc->sysif_regs->admin_reply_queue_address);
2384 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
2385 	writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
2386 	return retval;
2387 
2388 out_failed:
2389 
2390 	if (mrioc->admin_reply_base) {
2391 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
2392 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
2393 		mrioc->admin_reply_base = NULL;
2394 	}
2395 	if (mrioc->admin_req_base) {
2396 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
2397 		    mrioc->admin_req_base, mrioc->admin_req_dma);
2398 		mrioc->admin_req_base = NULL;
2399 	}
2400 	return retval;
2401 }
2402 
2403 /**
2404  * mpi3mr_issue_iocfacts - Send IOC Facts
2405  * @mrioc: Adapter instance reference
2406  * @facts_data: Cached IOC facts data
2407  *
2408  * Issue IOC Facts MPI request through admin queue and wait for
2409  * the completion of it or time out.
2410  *
2411  * Return: 0 on success, non-zero on failures.
2412  */
2413 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
2414 	struct mpi3_ioc_facts_data *facts_data)
2415 {
2416 	struct mpi3_ioc_facts_request iocfacts_req;
2417 	void *data = NULL;
2418 	dma_addr_t data_dma;
2419 	u32 data_len = sizeof(*facts_data);
2420 	int retval = 0;
2421 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2422 
2423 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2424 	    GFP_KERNEL);
2425 
2426 	if (!data) {
2427 		retval = -1;
2428 		goto out;
2429 	}
2430 
2431 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
2432 	mutex_lock(&mrioc->init_cmds.mutex);
2433 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2434 		retval = -1;
2435 		ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
2436 		mutex_unlock(&mrioc->init_cmds.mutex);
2437 		goto out;
2438 	}
2439 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2440 	mrioc->init_cmds.is_waiting = 1;
2441 	mrioc->init_cmds.callback = NULL;
2442 	iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2443 	iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
2444 
2445 	mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
2446 	    data_dma);
2447 
2448 	init_completion(&mrioc->init_cmds.done);
2449 	retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
2450 	    sizeof(iocfacts_req), 1);
2451 	if (retval) {
2452 		ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
2453 		goto out_unlock;
2454 	}
2455 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2456 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2457 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2458 		ioc_err(mrioc, "ioc_facts timed out\n");
2459 		mpi3mr_check_rh_fault_ioc(mrioc,
2460 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
2461 		retval = -1;
2462 		goto out_unlock;
2463 	}
2464 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2465 	    != MPI3_IOCSTATUS_SUCCESS) {
2466 		ioc_err(mrioc,
2467 		    "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2468 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2469 		    mrioc->init_cmds.ioc_loginfo);
2470 		retval = -1;
2471 		goto out_unlock;
2472 	}
2473 	memcpy(facts_data, (u8 *)data, data_len);
2474 out_unlock:
2475 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2476 	mutex_unlock(&mrioc->init_cmds.mutex);
2477 
2478 out:
2479 	if (data)
2480 		dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
2481 
2482 	return retval;
2483 }
2484 
2485 /**
2486  * mpi3mr_check_reset_dma_mask - Process IOC facts data
2487  * @mrioc: Adapter instance reference
2488  *
2489  * Check whether the new DMA mask requested through IOCFacts by
2490  * firmware needs to be set, if so set it .
2491  *
2492  * Return: 0 on success, non-zero on failure.
2493  */
2494 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
2495 {
2496 	struct pci_dev *pdev = mrioc->pdev;
2497 	int r;
2498 	u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
2499 
2500 	if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
2501 		return 0;
2502 
2503 	ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
2504 	    mrioc->dma_mask, facts_dma_mask);
2505 
2506 	r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
2507 	if (r) {
2508 		ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
2509 		    facts_dma_mask, r);
2510 		return r;
2511 	}
2512 	mrioc->dma_mask = facts_dma_mask;
2513 	return r;
2514 }
2515 
2516 /**
2517  * mpi3mr_process_factsdata - Process IOC facts data
2518  * @mrioc: Adapter instance reference
2519  * @facts_data: Cached IOC facts data
2520  *
2521  * Convert IOC facts data into cpu endianness and cache it in
2522  * the driver .
2523  *
2524  * Return: Nothing.
2525  */
2526 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
2527 	struct mpi3_ioc_facts_data *facts_data)
2528 {
2529 	u32 ioc_config, req_sz, facts_flags;
2530 
2531 	if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
2532 	    (sizeof(*facts_data) / 4)) {
2533 		ioc_warn(mrioc,
2534 		    "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
2535 		    sizeof(*facts_data),
2536 		    le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
2537 	}
2538 
2539 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
2540 	req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
2541 	    MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
2542 	if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
2543 		ioc_err(mrioc,
2544 		    "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
2545 		    req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
2546 	}
2547 
2548 	memset(&mrioc->facts, 0, sizeof(mrioc->facts));
2549 
2550 	facts_flags = le32_to_cpu(facts_data->flags);
2551 	mrioc->facts.op_req_sz = req_sz;
2552 	mrioc->op_reply_desc_sz = 1 << ((ioc_config &
2553 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
2554 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
2555 
2556 	mrioc->facts.ioc_num = facts_data->ioc_number;
2557 	mrioc->facts.who_init = facts_data->who_init;
2558 	mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
2559 	mrioc->facts.personality = (facts_flags &
2560 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
2561 	mrioc->facts.dma_mask = (facts_flags &
2562 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
2563 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
2564 	mrioc->facts.protocol_flags = facts_data->protocol_flags;
2565 	mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
2566 	mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request);
2567 	mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
2568 	mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
2569 	mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
2570 	mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
2571 	mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
2572 	mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
2573 	mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
2574 	mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
2575 	mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
2576 	mrioc->facts.max_pcie_switches =
2577 	    le16_to_cpu(facts_data->max_pcie_switches);
2578 	mrioc->facts.max_sasexpanders =
2579 	    le16_to_cpu(facts_data->max_sas_expanders);
2580 	mrioc->facts.max_sasinitiators =
2581 	    le16_to_cpu(facts_data->max_sas_initiators);
2582 	mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
2583 	mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
2584 	mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
2585 	mrioc->facts.max_op_req_q =
2586 	    le16_to_cpu(facts_data->max_operational_request_queues);
2587 	mrioc->facts.max_op_reply_q =
2588 	    le16_to_cpu(facts_data->max_operational_reply_queues);
2589 	mrioc->facts.ioc_capabilities =
2590 	    le32_to_cpu(facts_data->ioc_capabilities);
2591 	mrioc->facts.fw_ver.build_num =
2592 	    le16_to_cpu(facts_data->fw_version.build_num);
2593 	mrioc->facts.fw_ver.cust_id =
2594 	    le16_to_cpu(facts_data->fw_version.customer_id);
2595 	mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
2596 	mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
2597 	mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
2598 	mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
2599 	mrioc->msix_count = min_t(int, mrioc->msix_count,
2600 	    mrioc->facts.max_msix_vectors);
2601 	mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
2602 	mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
2603 	mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
2604 	mrioc->facts.shutdown_timeout =
2605 	    le16_to_cpu(facts_data->shutdown_timeout);
2606 
2607 	ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
2608 	    mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
2609 	    mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
2610 	ioc_info(mrioc,
2611 	    "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
2612 	    mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
2613 	    mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
2614 	ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
2615 	    mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
2616 	    mrioc->facts.sge_mod_shift);
2617 	ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
2618 	    mrioc->facts.dma_mask, (facts_flags &
2619 	    MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
2620 
2621 	mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
2622 
2623 	if (reset_devices)
2624 		mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
2625 		    MPI3MR_HOST_IOS_KDUMP);
2626 }
2627 
2628 /**
2629  * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
2630  * @mrioc: Adapter instance reference
2631  *
2632  * Allocate and initialize the reply free buffers, sense
2633  * buffers, reply free queue and sense buffer queue.
2634  *
2635  * Return: 0 on success, non-zero on failures.
2636  */
2637 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
2638 {
2639 	int retval = 0;
2640 	u32 sz, i;
2641 
2642 	if (mrioc->init_cmds.reply)
2643 		return retval;
2644 
2645 	mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
2646 	if (!mrioc->init_cmds.reply)
2647 		goto out_failed;
2648 
2649 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2650 		mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->facts.reply_sz,
2651 		    GFP_KERNEL);
2652 		if (!mrioc->dev_rmhs_cmds[i].reply)
2653 			goto out_failed;
2654 	}
2655 
2656 	mrioc->host_tm_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
2657 	if (!mrioc->host_tm_cmds.reply)
2658 		goto out_failed;
2659 
2660 	mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
2661 	if (mrioc->facts.max_devhandle % 8)
2662 		mrioc->dev_handle_bitmap_sz++;
2663 	mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz,
2664 	    GFP_KERNEL);
2665 	if (!mrioc->removepend_bitmap)
2666 		goto out_failed;
2667 
2668 	mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8;
2669 	if (MPI3MR_NUM_DEVRMCMD % 8)
2670 		mrioc->devrem_bitmap_sz++;
2671 	mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz,
2672 	    GFP_KERNEL);
2673 	if (!mrioc->devrem_bitmap)
2674 		goto out_failed;
2675 
2676 	mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
2677 	mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
2678 	mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
2679 	mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
2680 
2681 	/* reply buffer pool, 16 byte align */
2682 	sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
2683 	mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
2684 	    &mrioc->pdev->dev, sz, 16, 0);
2685 	if (!mrioc->reply_buf_pool) {
2686 		ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
2687 		goto out_failed;
2688 	}
2689 
2690 	mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
2691 	    &mrioc->reply_buf_dma);
2692 	if (!mrioc->reply_buf)
2693 		goto out_failed;
2694 
2695 	mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
2696 
2697 	/* reply free queue, 8 byte align */
2698 	sz = mrioc->reply_free_qsz * 8;
2699 	mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
2700 	    &mrioc->pdev->dev, sz, 8, 0);
2701 	if (!mrioc->reply_free_q_pool) {
2702 		ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
2703 		goto out_failed;
2704 	}
2705 	mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
2706 	    GFP_KERNEL, &mrioc->reply_free_q_dma);
2707 	if (!mrioc->reply_free_q)
2708 		goto out_failed;
2709 
2710 	/* sense buffer pool,  4 byte align */
2711 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
2712 	mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
2713 	    &mrioc->pdev->dev, sz, 4, 0);
2714 	if (!mrioc->sense_buf_pool) {
2715 		ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
2716 		goto out_failed;
2717 	}
2718 	mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
2719 	    &mrioc->sense_buf_dma);
2720 	if (!mrioc->sense_buf)
2721 		goto out_failed;
2722 
2723 	/* sense buffer queue, 8 byte align */
2724 	sz = mrioc->sense_buf_q_sz * 8;
2725 	mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
2726 	    &mrioc->pdev->dev, sz, 8, 0);
2727 	if (!mrioc->sense_buf_q_pool) {
2728 		ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
2729 		goto out_failed;
2730 	}
2731 	mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
2732 	    GFP_KERNEL, &mrioc->sense_buf_q_dma);
2733 	if (!mrioc->sense_buf_q)
2734 		goto out_failed;
2735 
2736 	return retval;
2737 
2738 out_failed:
2739 	retval = -1;
2740 	return retval;
2741 }
2742 
2743 /**
2744  * mpimr_initialize_reply_sbuf_queues - initialize reply sense
2745  * buffers
2746  * @mrioc: Adapter instance reference
2747  *
2748  * Helper function to initialize reply and sense buffers along
2749  * with some debug prints.
2750  *
2751  * Return:  None.
2752  */
2753 static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
2754 {
2755 	u32 sz, i;
2756 	dma_addr_t phy_addr;
2757 
2758 	sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
2759 	ioc_info(mrioc,
2760 	    "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
2761 	    mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz,
2762 	    (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
2763 	sz = mrioc->reply_free_qsz * 8;
2764 	ioc_info(mrioc,
2765 	    "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
2766 	    mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
2767 	    (unsigned long long)mrioc->reply_free_q_dma);
2768 	sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
2769 	ioc_info(mrioc,
2770 	    "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
2771 	    mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
2772 	    (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
2773 	sz = mrioc->sense_buf_q_sz * 8;
2774 	ioc_info(mrioc,
2775 	    "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
2776 	    mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
2777 	    (unsigned long long)mrioc->sense_buf_q_dma);
2778 
2779 	/* initialize Reply buffer Queue */
2780 	for (i = 0, phy_addr = mrioc->reply_buf_dma;
2781 	    i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz)
2782 		mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
2783 	mrioc->reply_free_q[i] = cpu_to_le64(0);
2784 
2785 	/* initialize Sense Buffer Queue */
2786 	for (i = 0, phy_addr = mrioc->sense_buf_dma;
2787 	    i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
2788 		mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
2789 	mrioc->sense_buf_q[i] = cpu_to_le64(0);
2790 }
2791 
2792 /**
2793  * mpi3mr_issue_iocinit - Send IOC Init
2794  * @mrioc: Adapter instance reference
2795  *
2796  * Issue IOC Init MPI request through admin queue and wait for
2797  * the completion of it or time out.
2798  *
2799  * Return: 0 on success, non-zero on failures.
2800  */
2801 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
2802 {
2803 	struct mpi3_ioc_init_request iocinit_req;
2804 	struct mpi3_driver_info_layout *drv_info;
2805 	dma_addr_t data_dma;
2806 	u32 data_len = sizeof(*drv_info);
2807 	int retval = 0;
2808 	ktime_t current_time;
2809 
2810 	drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2811 	    GFP_KERNEL);
2812 	if (!drv_info) {
2813 		retval = -1;
2814 		goto out;
2815 	}
2816 	mpimr_initialize_reply_sbuf_queues(mrioc);
2817 
2818 	drv_info->information_length = cpu_to_le32(data_len);
2819 	strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
2820 	strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
2821 	strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
2822 	strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
2823 	strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
2824 	strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
2825 	    sizeof(drv_info->driver_release_date));
2826 	drv_info->driver_capabilities = 0;
2827 	memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
2828 	    sizeof(mrioc->driver_info));
2829 
2830 	memset(&iocinit_req, 0, sizeof(iocinit_req));
2831 	mutex_lock(&mrioc->init_cmds.mutex);
2832 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2833 		retval = -1;
2834 		ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
2835 		mutex_unlock(&mrioc->init_cmds.mutex);
2836 		goto out;
2837 	}
2838 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2839 	mrioc->init_cmds.is_waiting = 1;
2840 	mrioc->init_cmds.callback = NULL;
2841 	iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2842 	iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
2843 	iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
2844 	iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
2845 	iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
2846 	iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
2847 	iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
2848 	iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
2849 	iocinit_req.reply_free_queue_address =
2850 	    cpu_to_le64(mrioc->reply_free_q_dma);
2851 	iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
2852 	iocinit_req.sense_buffer_free_queue_depth =
2853 	    cpu_to_le16(mrioc->sense_buf_q_sz);
2854 	iocinit_req.sense_buffer_free_queue_address =
2855 	    cpu_to_le64(mrioc->sense_buf_q_dma);
2856 	iocinit_req.driver_information_address = cpu_to_le64(data_dma);
2857 
2858 	current_time = ktime_get_real();
2859 	iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
2860 
2861 	init_completion(&mrioc->init_cmds.done);
2862 	retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
2863 	    sizeof(iocinit_req), 1);
2864 	if (retval) {
2865 		ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
2866 		goto out_unlock;
2867 	}
2868 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2869 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2870 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2871 		mpi3mr_check_rh_fault_ioc(mrioc,
2872 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
2873 		ioc_err(mrioc, "ioc_init timed out\n");
2874 		retval = -1;
2875 		goto out_unlock;
2876 	}
2877 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2878 	    != MPI3_IOCSTATUS_SUCCESS) {
2879 		ioc_err(mrioc,
2880 		    "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2881 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2882 		    mrioc->init_cmds.ioc_loginfo);
2883 		retval = -1;
2884 		goto out_unlock;
2885 	}
2886 
2887 	mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
2888 	writel(mrioc->reply_free_queue_host_index,
2889 	    &mrioc->sysif_regs->reply_free_host_index);
2890 
2891 	mrioc->sbq_host_index = mrioc->num_sense_bufs;
2892 	writel(mrioc->sbq_host_index,
2893 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
2894 out_unlock:
2895 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2896 	mutex_unlock(&mrioc->init_cmds.mutex);
2897 
2898 out:
2899 	if (drv_info)
2900 		dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
2901 		    data_dma);
2902 
2903 	return retval;
2904 }
2905 
2906 /**
2907  * mpi3mr_unmask_events - Unmask events in event mask bitmap
2908  * @mrioc: Adapter instance reference
2909  * @event: MPI event ID
2910  *
2911  * Un mask the specific event by resetting the event_mask
2912  * bitmap.
2913  *
2914  * Return: 0 on success, non-zero on failures.
2915  */
2916 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
2917 {
2918 	u32 desired_event;
2919 	u8 word;
2920 
2921 	if (event >= 128)
2922 		return;
2923 
2924 	desired_event = (1 << (event % 32));
2925 	word = event / 32;
2926 
2927 	mrioc->event_masks[word] &= ~desired_event;
2928 }
2929 
2930 /**
2931  * mpi3mr_issue_event_notification - Send event notification
2932  * @mrioc: Adapter instance reference
2933  *
2934  * Issue event notification MPI request through admin queue and
2935  * wait for the completion of it or time out.
2936  *
2937  * Return: 0 on success, non-zero on failures.
2938  */
2939 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
2940 {
2941 	struct mpi3_event_notification_request evtnotify_req;
2942 	int retval = 0;
2943 	u8 i;
2944 
2945 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
2946 	mutex_lock(&mrioc->init_cmds.mutex);
2947 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2948 		retval = -1;
2949 		ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
2950 		mutex_unlock(&mrioc->init_cmds.mutex);
2951 		goto out;
2952 	}
2953 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2954 	mrioc->init_cmds.is_waiting = 1;
2955 	mrioc->init_cmds.callback = NULL;
2956 	evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2957 	evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
2958 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2959 		evtnotify_req.event_masks[i] =
2960 		    cpu_to_le32(mrioc->event_masks[i]);
2961 	init_completion(&mrioc->init_cmds.done);
2962 	retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
2963 	    sizeof(evtnotify_req), 1);
2964 	if (retval) {
2965 		ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
2966 		goto out_unlock;
2967 	}
2968 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2969 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2970 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2971 		ioc_err(mrioc, "event notification timed out\n");
2972 		mpi3mr_check_rh_fault_ioc(mrioc,
2973 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
2974 		retval = -1;
2975 		goto out_unlock;
2976 	}
2977 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2978 	    != MPI3_IOCSTATUS_SUCCESS) {
2979 		ioc_err(mrioc,
2980 		    "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2981 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2982 		    mrioc->init_cmds.ioc_loginfo);
2983 		retval = -1;
2984 		goto out_unlock;
2985 	}
2986 
2987 out_unlock:
2988 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2989 	mutex_unlock(&mrioc->init_cmds.mutex);
2990 out:
2991 	return retval;
2992 }
2993 
2994 /**
2995  * mpi3mr_send_event_ack - Send event acknowledgment
2996  * @mrioc: Adapter instance reference
2997  * @event: MPI3 event ID
2998  * @event_ctx: Event context
2999  *
3000  * Send event acknowledgment through admin queue and wait for
3001  * it to complete.
3002  *
3003  * Return: 0 on success, non-zero on failures.
3004  */
3005 int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
3006 	u32 event_ctx)
3007 {
3008 	struct mpi3_event_ack_request evtack_req;
3009 	int retval = 0;
3010 
3011 	memset(&evtack_req, 0, sizeof(evtack_req));
3012 	mutex_lock(&mrioc->init_cmds.mutex);
3013 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3014 		retval = -1;
3015 		ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
3016 		mutex_unlock(&mrioc->init_cmds.mutex);
3017 		goto out;
3018 	}
3019 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3020 	mrioc->init_cmds.is_waiting = 1;
3021 	mrioc->init_cmds.callback = NULL;
3022 	evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3023 	evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
3024 	evtack_req.event = event;
3025 	evtack_req.event_context = cpu_to_le32(event_ctx);
3026 
3027 	init_completion(&mrioc->init_cmds.done);
3028 	retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
3029 	    sizeof(evtack_req), 1);
3030 	if (retval) {
3031 		ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
3032 		goto out_unlock;
3033 	}
3034 	wait_for_completion_timeout(&mrioc->init_cmds.done,
3035 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
3036 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3037 		ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
3038 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
3039 			mpi3mr_soft_reset_handler(mrioc,
3040 			    MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
3041 		retval = -1;
3042 		goto out_unlock;
3043 	}
3044 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
3045 	    != MPI3_IOCSTATUS_SUCCESS) {
3046 		ioc_err(mrioc,
3047 		    "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
3048 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
3049 		    mrioc->init_cmds.ioc_loginfo);
3050 		retval = -1;
3051 		goto out_unlock;
3052 	}
3053 
3054 out_unlock:
3055 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3056 	mutex_unlock(&mrioc->init_cmds.mutex);
3057 out:
3058 	return retval;
3059 }
3060 
3061 /**
3062  * mpi3mr_alloc_chain_bufs - Allocate chain buffers
3063  * @mrioc: Adapter instance reference
3064  *
3065  * Allocate chain buffers and set a bitmap to indicate free
3066  * chain buffers. Chain buffers are used to pass the SGE
3067  * information along with MPI3 SCSI IO requests for host I/O.
3068  *
3069  * Return: 0 on success, non-zero on failure
3070  */
3071 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
3072 {
3073 	int retval = 0;
3074 	u32 sz, i;
3075 	u16 num_chains;
3076 
3077 	num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
3078 
3079 	if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
3080 	    | SHOST_DIX_TYPE1_PROTECTION
3081 	    | SHOST_DIX_TYPE2_PROTECTION
3082 	    | SHOST_DIX_TYPE3_PROTECTION))
3083 		num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
3084 
3085 	mrioc->chain_buf_count = num_chains;
3086 	sz = sizeof(struct chain_element) * num_chains;
3087 	mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
3088 	if (!mrioc->chain_sgl_list)
3089 		goto out_failed;
3090 
3091 	sz = MPI3MR_PAGE_SIZE_4K;
3092 	mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
3093 	    &mrioc->pdev->dev, sz, 16, 0);
3094 	if (!mrioc->chain_buf_pool) {
3095 		ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
3096 		goto out_failed;
3097 	}
3098 
3099 	for (i = 0; i < num_chains; i++) {
3100 		mrioc->chain_sgl_list[i].addr =
3101 		    dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
3102 		    &mrioc->chain_sgl_list[i].dma_addr);
3103 
3104 		if (!mrioc->chain_sgl_list[i].addr)
3105 			goto out_failed;
3106 	}
3107 	mrioc->chain_bitmap_sz = num_chains / 8;
3108 	if (num_chains % 8)
3109 		mrioc->chain_bitmap_sz++;
3110 	mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL);
3111 	if (!mrioc->chain_bitmap)
3112 		goto out_failed;
3113 	return retval;
3114 out_failed:
3115 	retval = -1;
3116 	return retval;
3117 }
3118 
3119 /**
3120  * mpi3mr_port_enable_complete - Mark port enable complete
3121  * @mrioc: Adapter instance reference
3122  * @drv_cmd: Internal command tracker
3123  *
3124  * Call back for asynchronous port enable request sets the
3125  * driver command to indicate port enable request is complete.
3126  *
3127  * Return: Nothing
3128  */
3129 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
3130 	struct mpi3mr_drv_cmd *drv_cmd)
3131 {
3132 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3133 	drv_cmd->callback = NULL;
3134 	mrioc->scan_failed = drv_cmd->ioc_status;
3135 	mrioc->scan_started = 0;
3136 }
3137 
3138 /**
3139  * mpi3mr_issue_port_enable - Issue Port Enable
3140  * @mrioc: Adapter instance reference
3141  * @async: Flag to wait for completion or not
3142  *
3143  * Issue Port Enable MPI request through admin queue and if the
3144  * async flag is not set wait for the completion of the port
3145  * enable or time out.
3146  *
3147  * Return: 0 on success, non-zero on failures.
3148  */
3149 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
3150 {
3151 	struct mpi3_port_enable_request pe_req;
3152 	int retval = 0;
3153 	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3154 
3155 	memset(&pe_req, 0, sizeof(pe_req));
3156 	mutex_lock(&mrioc->init_cmds.mutex);
3157 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
3158 		retval = -1;
3159 		ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
3160 		mutex_unlock(&mrioc->init_cmds.mutex);
3161 		goto out;
3162 	}
3163 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
3164 	if (async) {
3165 		mrioc->init_cmds.is_waiting = 0;
3166 		mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
3167 	} else {
3168 		mrioc->init_cmds.is_waiting = 1;
3169 		mrioc->init_cmds.callback = NULL;
3170 		init_completion(&mrioc->init_cmds.done);
3171 	}
3172 	pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
3173 	pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
3174 
3175 	retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
3176 	if (retval) {
3177 		ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
3178 		goto out_unlock;
3179 	}
3180 	if (async) {
3181 		mutex_unlock(&mrioc->init_cmds.mutex);
3182 		goto out;
3183 	}
3184 
3185 	wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
3186 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
3187 		ioc_err(mrioc, "port enable timed out\n");
3188 		retval = -1;
3189 		mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
3190 		goto out_unlock;
3191 	}
3192 	mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
3193 
3194 out_unlock:
3195 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3196 	mutex_unlock(&mrioc->init_cmds.mutex);
3197 out:
3198 	return retval;
3199 }
3200 
3201 /* Protocol type to name mapper structure */
3202 static const struct {
3203 	u8 protocol;
3204 	char *name;
3205 } mpi3mr_protocols[] = {
3206 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
3207 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
3208 	{ MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
3209 };
3210 
3211 /* Capability to name mapper structure*/
3212 static const struct {
3213 	u32 capability;
3214 	char *name;
3215 } mpi3mr_capabilities[] = {
3216 	{ MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
3217 };
3218 
3219 /**
3220  * mpi3mr_print_ioc_info - Display controller information
3221  * @mrioc: Adapter instance reference
3222  *
3223  * Display controller personalit, capability, supported
3224  * protocols etc.
3225  *
3226  * Return: Nothing
3227  */
3228 static void
3229 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
3230 {
3231 	int i = 0, bytes_written = 0;
3232 	char personality[16];
3233 	char protocol[50] = {0};
3234 	char capabilities[100] = {0};
3235 	struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
3236 
3237 	switch (mrioc->facts.personality) {
3238 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
3239 		strncpy(personality, "Enhanced HBA", sizeof(personality));
3240 		break;
3241 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
3242 		strncpy(personality, "RAID", sizeof(personality));
3243 		break;
3244 	default:
3245 		strncpy(personality, "Unknown", sizeof(personality));
3246 		break;
3247 	}
3248 
3249 	ioc_info(mrioc, "Running in %s Personality", personality);
3250 
3251 	ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
3252 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
3253 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
3254 
3255 	for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
3256 		if (mrioc->facts.protocol_flags &
3257 		    mpi3mr_protocols[i].protocol) {
3258 			bytes_written += scnprintf(protocol + bytes_written,
3259 				    sizeof(protocol) - bytes_written, "%s%s",
3260 				    bytes_written ? "," : "",
3261 				    mpi3mr_protocols[i].name);
3262 		}
3263 	}
3264 
3265 	bytes_written = 0;
3266 	for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
3267 		if (mrioc->facts.protocol_flags &
3268 		    mpi3mr_capabilities[i].capability) {
3269 			bytes_written += scnprintf(capabilities + bytes_written,
3270 				    sizeof(capabilities) - bytes_written, "%s%s",
3271 				    bytes_written ? "," : "",
3272 				    mpi3mr_capabilities[i].name);
3273 		}
3274 	}
3275 
3276 	ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
3277 		 protocol, capabilities);
3278 }
3279 
3280 /**
3281  * mpi3mr_cleanup_resources - Free PCI resources
3282  * @mrioc: Adapter instance reference
3283  *
3284  * Unmap PCI device memory and disable PCI device.
3285  *
3286  * Return: 0 on success and non-zero on failure.
3287  */
3288 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
3289 {
3290 	struct pci_dev *pdev = mrioc->pdev;
3291 
3292 	mpi3mr_cleanup_isr(mrioc);
3293 
3294 	if (mrioc->sysif_regs) {
3295 		iounmap((void __iomem *)mrioc->sysif_regs);
3296 		mrioc->sysif_regs = NULL;
3297 	}
3298 
3299 	if (pci_is_enabled(pdev)) {
3300 		if (mrioc->bars)
3301 			pci_release_selected_regions(pdev, mrioc->bars);
3302 		pci_disable_device(pdev);
3303 	}
3304 }
3305 
3306 /**
3307  * mpi3mr_setup_resources - Enable PCI resources
3308  * @mrioc: Adapter instance reference
3309  *
3310  * Enable PCI device memory, MSI-x registers and set DMA mask.
3311  *
3312  * Return: 0 on success and non-zero on failure.
3313  */
3314 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
3315 {
3316 	struct pci_dev *pdev = mrioc->pdev;
3317 	u32 memap_sz = 0;
3318 	int i, retval = 0, capb = 0;
3319 	u16 message_control;
3320 	u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
3321 	    (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) &&
3322 	    (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
3323 
3324 	if (pci_enable_device_mem(pdev)) {
3325 		ioc_err(mrioc, "pci_enable_device_mem: failed\n");
3326 		retval = -ENODEV;
3327 		goto out_failed;
3328 	}
3329 
3330 	capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3331 	if (!capb) {
3332 		ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
3333 		retval = -ENODEV;
3334 		goto out_failed;
3335 	}
3336 	mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3337 
3338 	if (pci_request_selected_regions(pdev, mrioc->bars,
3339 	    mrioc->driver_name)) {
3340 		ioc_err(mrioc, "pci_request_selected_regions: failed\n");
3341 		retval = -ENODEV;
3342 		goto out_failed;
3343 	}
3344 
3345 	for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
3346 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3347 			mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
3348 			memap_sz = pci_resource_len(pdev, i);
3349 			mrioc->sysif_regs =
3350 			    ioremap(mrioc->sysif_regs_phys, memap_sz);
3351 			break;
3352 		}
3353 	}
3354 
3355 	pci_set_master(pdev);
3356 
3357 	retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
3358 	if (retval) {
3359 		if (dma_mask != DMA_BIT_MASK(32)) {
3360 			ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
3361 			dma_mask = DMA_BIT_MASK(32);
3362 			retval = dma_set_mask_and_coherent(&pdev->dev,
3363 			    dma_mask);
3364 		}
3365 		if (retval) {
3366 			mrioc->dma_mask = 0;
3367 			ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
3368 			goto out_failed;
3369 		}
3370 	}
3371 	mrioc->dma_mask = dma_mask;
3372 
3373 	if (!mrioc->sysif_regs) {
3374 		ioc_err(mrioc,
3375 		    "Unable to map adapter memory or resource not found\n");
3376 		retval = -EINVAL;
3377 		goto out_failed;
3378 	}
3379 
3380 	pci_read_config_word(pdev, capb + 2, &message_control);
3381 	mrioc->msix_count = (message_control & 0x3FF) + 1;
3382 
3383 	pci_save_state(pdev);
3384 
3385 	pci_set_drvdata(pdev, mrioc->shost);
3386 
3387 	mpi3mr_ioc_disable_intr(mrioc);
3388 
3389 	ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
3390 	    (unsigned long long)mrioc->sysif_regs_phys,
3391 	    mrioc->sysif_regs, memap_sz);
3392 	ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
3393 	    mrioc->msix_count);
3394 	return retval;
3395 
3396 out_failed:
3397 	mpi3mr_cleanup_resources(mrioc);
3398 	return retval;
3399 }
3400 
3401 /**
3402  * mpi3mr_enable_events - Enable required events
3403  * @mrioc: Adapter instance reference
3404  *
3405  * This routine unmasks the events required by the driver by
3406  * sennding appropriate event mask bitmapt through an event
3407  * notification request.
3408  *
3409  * Return: 0 on success and non-zero on failure.
3410  */
3411 static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
3412 {
3413 	int retval = 0;
3414 	u32  i;
3415 
3416 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3417 		mrioc->event_masks[i] = -1;
3418 
3419 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
3420 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
3421 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
3422 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
3423 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
3424 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
3425 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
3426 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
3427 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
3428 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
3429 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
3430 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
3431 
3432 	retval = mpi3mr_issue_event_notification(mrioc);
3433 	if (retval)
3434 		ioc_err(mrioc, "failed to issue event notification %d\n",
3435 		    retval);
3436 	return retval;
3437 }
3438 
3439 /**
3440  * mpi3mr_init_ioc - Initialize the controller
3441  * @mrioc: Adapter instance reference
3442  * @init_type: Flag to indicate is the init_type
3443  *
3444  * This the controller initialization routine, executed either
3445  * after soft reset or from pci probe callback.
3446  * Setup the required resources, memory map the controller
3447  * registers, create admin and operational reply queue pairs,
3448  * allocate required memory for reply pool, sense buffer pool,
3449  * issue IOC init request to the firmware, unmask the events and
3450  * issue port enable to discover SAS/SATA/NVMe devies and RAID
3451  * volumes.
3452  *
3453  * Return: 0 on success and non-zero on failure.
3454  */
3455 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type)
3456 {
3457 	int retval = 0;
3458 	struct mpi3_ioc_facts_data facts_data;
3459 
3460 	mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
3461 	mrioc->change_count = 0;
3462 	if (init_type == MPI3MR_IT_INIT) {
3463 		mrioc->cpu_count = num_online_cpus();
3464 		retval = mpi3mr_setup_resources(mrioc);
3465 		if (retval) {
3466 			ioc_err(mrioc, "Failed to setup resources:error %d\n",
3467 			    retval);
3468 			goto out_nocleanup;
3469 		}
3470 	}
3471 
3472 	retval = mpi3mr_bring_ioc_ready(mrioc);
3473 	if (retval) {
3474 		ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
3475 		    retval);
3476 		goto out_failed;
3477 	}
3478 
3479 	if (init_type != MPI3MR_IT_RESET) {
3480 		retval = mpi3mr_setup_isr(mrioc, 1);
3481 		if (retval) {
3482 			ioc_err(mrioc, "Failed to setup ISR error %d\n",
3483 			    retval);
3484 			goto out_failed;
3485 		}
3486 	} else
3487 		mpi3mr_ioc_enable_intr(mrioc);
3488 
3489 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
3490 	if (retval) {
3491 		ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
3492 		    retval);
3493 		goto out_failed;
3494 	}
3495 
3496 	mpi3mr_process_factsdata(mrioc, &facts_data);
3497 	if (init_type == MPI3MR_IT_INIT) {
3498 		retval = mpi3mr_check_reset_dma_mask(mrioc);
3499 		if (retval) {
3500 			ioc_err(mrioc, "Resetting dma mask failed %d\n",
3501 			    retval);
3502 			goto out_failed;
3503 		}
3504 	}
3505 
3506 	mpi3mr_print_ioc_info(mrioc);
3507 
3508 	retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
3509 	if (retval) {
3510 		ioc_err(mrioc,
3511 		    "%s :Failed to allocated reply sense buffers %d\n",
3512 		    __func__, retval);
3513 		goto out_failed;
3514 	}
3515 
3516 	if (init_type == MPI3MR_IT_INIT) {
3517 		retval = mpi3mr_alloc_chain_bufs(mrioc);
3518 		if (retval) {
3519 			ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
3520 			    retval);
3521 			goto out_failed;
3522 		}
3523 	}
3524 
3525 	retval = mpi3mr_issue_iocinit(mrioc);
3526 	if (retval) {
3527 		ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
3528 		    retval);
3529 		goto out_failed;
3530 	}
3531 
3532 	retval = mpi3mr_print_pkg_ver(mrioc);
3533 	if (retval) {
3534 		ioc_err(mrioc, "failed to get package version\n");
3535 		goto out_failed;
3536 	}
3537 
3538 	if (init_type != MPI3MR_IT_RESET) {
3539 		retval = mpi3mr_setup_isr(mrioc, 0);
3540 		if (retval) {
3541 			ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
3542 			    retval);
3543 			goto out_failed;
3544 		}
3545 	}
3546 
3547 	retval = mpi3mr_create_op_queues(mrioc);
3548 	if (retval) {
3549 		ioc_err(mrioc, "Failed to create OpQueues error %d\n",
3550 		    retval);
3551 		goto out_failed;
3552 	}
3553 
3554 	if ((init_type != MPI3MR_IT_INIT) &&
3555 	    (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q)) {
3556 		retval = -1;
3557 		ioc_err(mrioc,
3558 		    "Cannot create minimum number of OpQueues expected:%d created:%d\n",
3559 		    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
3560 		goto out_failed;
3561 	}
3562 
3563 	retval = mpi3mr_enable_events(mrioc);
3564 	if (retval) {
3565 		ioc_err(mrioc, "failed to enable events %d\n",
3566 		    retval);
3567 		goto out_failed;
3568 	}
3569 
3570 	if (init_type != MPI3MR_IT_INIT) {
3571 		ioc_info(mrioc, "Issuing Port Enable\n");
3572 		retval = mpi3mr_issue_port_enable(mrioc, 0);
3573 		if (retval) {
3574 			ioc_err(mrioc, "Failed to issue port enable %d\n",
3575 			    retval);
3576 			goto out_failed;
3577 		}
3578 	}
3579 	return retval;
3580 
3581 out_failed:
3582 	if (init_type == MPI3MR_IT_INIT)
3583 		mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
3584 	else
3585 		mpi3mr_cleanup_ioc(mrioc, MPI3MR_REINIT_FAILURE);
3586 out_nocleanup:
3587 	return retval;
3588 }
3589 
3590 /**
3591  * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
3592  *					segments
3593  * @mrioc: Adapter instance reference
3594  * @qidx: Operational reply queue index
3595  *
3596  * Return: Nothing.
3597  */
3598 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
3599 {
3600 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
3601 	struct segments *segments;
3602 	int i, size;
3603 
3604 	if (!op_reply_q->q_segments)
3605 		return;
3606 
3607 	size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
3608 	segments = op_reply_q->q_segments;
3609 	for (i = 0; i < op_reply_q->num_segments; i++)
3610 		memset(segments[i].segment, 0, size);
3611 }
3612 
3613 /**
3614  * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
3615  *					segments
3616  * @mrioc: Adapter instance reference
3617  * @qidx: Operational request queue index
3618  *
3619  * Return: Nothing.
3620  */
3621 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
3622 {
3623 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
3624 	struct segments *segments;
3625 	int i, size;
3626 
3627 	if (!op_req_q->q_segments)
3628 		return;
3629 
3630 	size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
3631 	segments = op_req_q->q_segments;
3632 	for (i = 0; i < op_req_q->num_segments; i++)
3633 		memset(segments[i].segment, 0, size);
3634 }
3635 
3636 /**
3637  * mpi3mr_memset_buffers - memset memory for a controller
3638  * @mrioc: Adapter instance reference
3639  *
3640  * clear all the memory allocated for a controller, typically
3641  * called post reset to reuse the memory allocated during the
3642  * controller init.
3643  *
3644  * Return: Nothing.
3645  */
3646 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
3647 {
3648 	u16 i;
3649 
3650 	memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
3651 	memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
3652 
3653 	memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
3654 	memset(mrioc->host_tm_cmds.reply, 0,
3655 	    sizeof(*mrioc->host_tm_cmds.reply));
3656 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
3657 		memset(mrioc->dev_rmhs_cmds[i].reply, 0,
3658 		    sizeof(*mrioc->dev_rmhs_cmds[i].reply));
3659 	memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
3660 	memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
3661 
3662 	for (i = 0; i < mrioc->num_queues; i++) {
3663 		mrioc->op_reply_qinfo[i].qid = 0;
3664 		mrioc->op_reply_qinfo[i].ci = 0;
3665 		mrioc->op_reply_qinfo[i].num_replies = 0;
3666 		mrioc->op_reply_qinfo[i].ephase = 0;
3667 		atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
3668 		atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
3669 		mpi3mr_memset_op_reply_q_buffers(mrioc, i);
3670 
3671 		mrioc->req_qinfo[i].ci = 0;
3672 		mrioc->req_qinfo[i].pi = 0;
3673 		mrioc->req_qinfo[i].num_requests = 0;
3674 		mrioc->req_qinfo[i].qid = 0;
3675 		mrioc->req_qinfo[i].reply_qid = 0;
3676 		spin_lock_init(&mrioc->req_qinfo[i].q_lock);
3677 		mpi3mr_memset_op_req_q_buffers(mrioc, i);
3678 	}
3679 }
3680 
3681 /**
3682  * mpi3mr_free_mem - Free memory allocated for a controller
3683  * @mrioc: Adapter instance reference
3684  *
3685  * Free all the memory allocated for a controller.
3686  *
3687  * Return: Nothing.
3688  */
3689 static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
3690 {
3691 	u16 i;
3692 	struct mpi3mr_intr_info *intr_info;
3693 
3694 	if (mrioc->sense_buf_pool) {
3695 		if (mrioc->sense_buf)
3696 			dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
3697 			    mrioc->sense_buf_dma);
3698 		dma_pool_destroy(mrioc->sense_buf_pool);
3699 		mrioc->sense_buf = NULL;
3700 		mrioc->sense_buf_pool = NULL;
3701 	}
3702 	if (mrioc->sense_buf_q_pool) {
3703 		if (mrioc->sense_buf_q)
3704 			dma_pool_free(mrioc->sense_buf_q_pool,
3705 			    mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
3706 		dma_pool_destroy(mrioc->sense_buf_q_pool);
3707 		mrioc->sense_buf_q = NULL;
3708 		mrioc->sense_buf_q_pool = NULL;
3709 	}
3710 
3711 	if (mrioc->reply_buf_pool) {
3712 		if (mrioc->reply_buf)
3713 			dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
3714 			    mrioc->reply_buf_dma);
3715 		dma_pool_destroy(mrioc->reply_buf_pool);
3716 		mrioc->reply_buf = NULL;
3717 		mrioc->reply_buf_pool = NULL;
3718 	}
3719 	if (mrioc->reply_free_q_pool) {
3720 		if (mrioc->reply_free_q)
3721 			dma_pool_free(mrioc->reply_free_q_pool,
3722 			    mrioc->reply_free_q, mrioc->reply_free_q_dma);
3723 		dma_pool_destroy(mrioc->reply_free_q_pool);
3724 		mrioc->reply_free_q = NULL;
3725 		mrioc->reply_free_q_pool = NULL;
3726 	}
3727 
3728 	for (i = 0; i < mrioc->num_op_req_q; i++)
3729 		mpi3mr_free_op_req_q_segments(mrioc, i);
3730 
3731 	for (i = 0; i < mrioc->num_op_reply_q; i++)
3732 		mpi3mr_free_op_reply_q_segments(mrioc, i);
3733 
3734 	for (i = 0; i < mrioc->intr_info_count; i++) {
3735 		intr_info = mrioc->intr_info + i;
3736 		intr_info->op_reply_q = NULL;
3737 	}
3738 
3739 	kfree(mrioc->req_qinfo);
3740 	mrioc->req_qinfo = NULL;
3741 	mrioc->num_op_req_q = 0;
3742 
3743 	kfree(mrioc->op_reply_qinfo);
3744 	mrioc->op_reply_qinfo = NULL;
3745 	mrioc->num_op_reply_q = 0;
3746 
3747 	kfree(mrioc->init_cmds.reply);
3748 	mrioc->init_cmds.reply = NULL;
3749 
3750 	kfree(mrioc->host_tm_cmds.reply);
3751 	mrioc->host_tm_cmds.reply = NULL;
3752 
3753 	kfree(mrioc->removepend_bitmap);
3754 	mrioc->removepend_bitmap = NULL;
3755 
3756 	kfree(mrioc->devrem_bitmap);
3757 	mrioc->devrem_bitmap = NULL;
3758 
3759 	kfree(mrioc->chain_bitmap);
3760 	mrioc->chain_bitmap = NULL;
3761 
3762 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3763 		kfree(mrioc->dev_rmhs_cmds[i].reply);
3764 		mrioc->dev_rmhs_cmds[i].reply = NULL;
3765 	}
3766 
3767 	if (mrioc->chain_buf_pool) {
3768 		for (i = 0; i < mrioc->chain_buf_count; i++) {
3769 			if (mrioc->chain_sgl_list[i].addr) {
3770 				dma_pool_free(mrioc->chain_buf_pool,
3771 				    mrioc->chain_sgl_list[i].addr,
3772 				    mrioc->chain_sgl_list[i].dma_addr);
3773 				mrioc->chain_sgl_list[i].addr = NULL;
3774 			}
3775 		}
3776 		dma_pool_destroy(mrioc->chain_buf_pool);
3777 		mrioc->chain_buf_pool = NULL;
3778 	}
3779 
3780 	kfree(mrioc->chain_sgl_list);
3781 	mrioc->chain_sgl_list = NULL;
3782 
3783 	if (mrioc->admin_reply_base) {
3784 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
3785 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
3786 		mrioc->admin_reply_base = NULL;
3787 	}
3788 	if (mrioc->admin_req_base) {
3789 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
3790 		    mrioc->admin_req_base, mrioc->admin_req_dma);
3791 		mrioc->admin_req_base = NULL;
3792 	}
3793 }
3794 
3795 /**
3796  * mpi3mr_issue_ioc_shutdown - shutdown controller
3797  * @mrioc: Adapter instance reference
3798  *
3799  * Send shutodwn notification to the controller and wait for the
3800  * shutdown_timeout for it to be completed.
3801  *
3802  * Return: Nothing.
3803  */
3804 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
3805 {
3806 	u32 ioc_config, ioc_status;
3807 	u8 retval = 1;
3808 	u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
3809 
3810 	ioc_info(mrioc, "Issuing shutdown Notification\n");
3811 	if (mrioc->unrecoverable) {
3812 		ioc_warn(mrioc,
3813 		    "IOC is unrecoverable shutdown is not issued\n");
3814 		return;
3815 	}
3816 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3817 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
3818 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
3819 		ioc_info(mrioc, "shutdown already in progress\n");
3820 		return;
3821 	}
3822 
3823 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3824 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
3825 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
3826 
3827 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
3828 
3829 	if (mrioc->facts.shutdown_timeout)
3830 		timeout = mrioc->facts.shutdown_timeout * 10;
3831 
3832 	do {
3833 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3834 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
3835 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
3836 			retval = 0;
3837 			break;
3838 		}
3839 		msleep(100);
3840 	} while (--timeout);
3841 
3842 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3843 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3844 
3845 	if (retval) {
3846 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
3847 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
3848 			ioc_warn(mrioc,
3849 			    "shutdown still in progress after timeout\n");
3850 	}
3851 
3852 	ioc_info(mrioc,
3853 	    "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
3854 	    (!retval) ? "successful" : "failed", ioc_status,
3855 	    ioc_config);
3856 }
3857 
3858 /**
3859  * mpi3mr_cleanup_ioc - Cleanup controller
3860  * @mrioc: Adapter instance reference
3861  * @reason: Cleanup reason
3862  *
3863  * controller cleanup handler, Message unit reset or soft reset
3864  * and shutdown notification is issued to the controller and the
3865  * associated memory resources are freed.
3866  *
3867  * Return: Nothing.
3868  */
3869 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason)
3870 {
3871 	enum mpi3mr_iocstate ioc_state;
3872 
3873 	if (reason == MPI3MR_COMPLETE_CLEANUP)
3874 		mpi3mr_stop_watchdog(mrioc);
3875 
3876 	mpi3mr_ioc_disable_intr(mrioc);
3877 
3878 	ioc_state = mpi3mr_get_iocstate(mrioc);
3879 
3880 	if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) &&
3881 	    (ioc_state == MRIOC_STATE_READY)) {
3882 		if (mpi3mr_issue_and_process_mur(mrioc,
3883 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
3884 			mpi3mr_issue_reset(mrioc,
3885 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
3886 			    MPI3MR_RESET_FROM_MUR_FAILURE);
3887 
3888 		if (reason != MPI3MR_REINIT_FAILURE)
3889 			mpi3mr_issue_ioc_shutdown(mrioc);
3890 	}
3891 
3892 	if (reason == MPI3MR_COMPLETE_CLEANUP) {
3893 		mpi3mr_free_mem(mrioc);
3894 		mpi3mr_cleanup_resources(mrioc);
3895 	}
3896 }
3897 
3898 /**
3899  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
3900  * @mrioc: Adapter instance reference
3901  * @cmdptr: Internal command tracker
3902  *
3903  * Complete an internal driver commands with state indicating it
3904  * is completed due to reset.
3905  *
3906  * Return: Nothing.
3907  */
3908 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
3909 	struct mpi3mr_drv_cmd *cmdptr)
3910 {
3911 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
3912 		cmdptr->state |= MPI3MR_CMD_RESET;
3913 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
3914 		if (cmdptr->is_waiting) {
3915 			complete(&cmdptr->done);
3916 			cmdptr->is_waiting = 0;
3917 		} else if (cmdptr->callback)
3918 			cmdptr->callback(mrioc, cmdptr);
3919 	}
3920 }
3921 
3922 /**
3923  * mpi3mr_flush_drv_cmds - Flush internaldriver commands
3924  * @mrioc: Adapter instance reference
3925  *
3926  * Flush all internal driver commands post reset
3927  *
3928  * Return: Nothing.
3929  */
3930 static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
3931 {
3932 	struct mpi3mr_drv_cmd *cmdptr;
3933 	u8 i;
3934 
3935 	cmdptr = &mrioc->init_cmds;
3936 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
3937 	cmdptr = &mrioc->host_tm_cmds;
3938 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
3939 
3940 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3941 		cmdptr = &mrioc->dev_rmhs_cmds[i];
3942 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
3943 	}
3944 }
3945 
3946 /**
3947  * mpi3mr_diagfault_reset_handler - Diag fault reset handler
3948  * @mrioc: Adapter instance reference
3949  * @reset_reason: Reset reason code
3950  *
3951  * This is an handler for issuing diag fault reset from the
3952  * applications through IOCTL path to stop the execution of the
3953  * controller
3954  *
3955  * Return: 0 on success, non-zero on failure.
3956  */
3957 int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
3958 	u32 reset_reason)
3959 {
3960 	int retval = 0;
3961 
3962 	ioc_info(mrioc, "Entry: reason code: %s\n",
3963 	    mpi3mr_reset_rc_name(reset_reason));
3964 	mrioc->reset_in_progress = 1;
3965 
3966 	mpi3mr_ioc_disable_intr(mrioc);
3967 
3968 	retval = mpi3mr_issue_reset(mrioc,
3969 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
3970 
3971 	if (retval) {
3972 		ioc_err(mrioc, "The diag fault reset failed: reason %d\n",
3973 		    reset_reason);
3974 		mpi3mr_ioc_enable_intr(mrioc);
3975 	}
3976 	ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
3977 	mrioc->reset_in_progress = 0;
3978 	return retval;
3979 }
3980 
3981 /**
3982  * mpi3mr_soft_reset_handler - Reset the controller
3983  * @mrioc: Adapter instance reference
3984  * @reset_reason: Reset reason code
3985  * @snapdump: Flag to generate snapdump in firmware or not
3986  *
3987  * This is an handler for recovering controller by issuing soft
3988  * reset are diag fault reset.  This is a blocking function and
3989  * when one reset is executed if any other resets they will be
3990  * blocked. All IOCTLs/IO will be blocked during the reset. If
3991  * controller reset is successful then the controller will be
3992  * reinitalized, otherwise the controller will be marked as not
3993  * recoverable
3994  *
3995  * In snapdump bit is set, the controller is issued with diag
3996  * fault reset so that the firmware can create a snap dump and
3997  * post that the firmware will result in F000 fault and the
3998  * driver will issue soft reset to recover from that.
3999  *
4000  * Return: 0 on success, non-zero on failure.
4001  */
4002 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
4003 	u32 reset_reason, u8 snapdump)
4004 {
4005 	int retval = 0, i;
4006 	unsigned long flags;
4007 	u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
4008 
4009 	if (mrioc->fault_dbg) {
4010 		if (snapdump)
4011 			mpi3mr_set_diagsave(mrioc);
4012 		mpi3mr_kill_ioc(mrioc, reset_reason);
4013 	}
4014 
4015 	/*
4016 	 * Block new resets until the currently executing one is finished and
4017 	 * return the status of the existing reset for all blocked resets
4018 	 */
4019 	if (!mutex_trylock(&mrioc->reset_mutex)) {
4020 		ioc_info(mrioc, "Another reset in progress\n");
4021 		return -1;
4022 	}
4023 	mrioc->reset_in_progress = 1;
4024 
4025 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
4026 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
4027 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4028 			mrioc->event_masks[i] = -1;
4029 
4030 		retval = mpi3mr_issue_event_notification(mrioc);
4031 
4032 		if (retval) {
4033 			ioc_err(mrioc,
4034 			    "Failed to turn off events prior to reset %d\n",
4035 			    retval);
4036 		}
4037 	}
4038 
4039 	mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
4040 
4041 	mpi3mr_ioc_disable_intr(mrioc);
4042 
4043 	if (snapdump) {
4044 		mpi3mr_set_diagsave(mrioc);
4045 		retval = mpi3mr_issue_reset(mrioc,
4046 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
4047 		if (!retval) {
4048 			do {
4049 				host_diagnostic =
4050 				    readl(&mrioc->sysif_regs->host_diagnostic);
4051 				if (!(host_diagnostic &
4052 				    MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
4053 					break;
4054 				msleep(100);
4055 			} while (--timeout);
4056 		}
4057 	}
4058 
4059 	retval = mpi3mr_issue_reset(mrioc,
4060 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
4061 	if (retval) {
4062 		ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
4063 		goto out;
4064 	}
4065 
4066 	mpi3mr_flush_delayed_rmhs_list(mrioc);
4067 	mpi3mr_flush_drv_cmds(mrioc);
4068 	memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
4069 	memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
4070 	mpi3mr_cleanup_fwevt_list(mrioc);
4071 	mpi3mr_flush_host_io(mrioc);
4072 	mpi3mr_invalidate_devhandles(mrioc);
4073 	mpi3mr_memset_buffers(mrioc);
4074 	retval = mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESET);
4075 	if (retval) {
4076 		pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
4077 		    mrioc->name, reset_reason);
4078 		goto out;
4079 	}
4080 	ssleep(10);
4081 
4082 out:
4083 	if (!retval) {
4084 		mrioc->reset_in_progress = 0;
4085 		scsi_unblock_requests(mrioc->shost);
4086 		mpi3mr_rfresh_tgtdevs(mrioc);
4087 		mrioc->ts_update_counter = 0;
4088 		spin_lock_irqsave(&mrioc->watchdog_lock, flags);
4089 		if (mrioc->watchdog_work_q)
4090 			queue_delayed_work(mrioc->watchdog_work_q,
4091 			    &mrioc->watchdog_work,
4092 			    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
4093 		spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
4094 	} else {
4095 		mpi3mr_issue_reset(mrioc,
4096 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
4097 		mrioc->unrecoverable = 1;
4098 		mrioc->reset_in_progress = 0;
4099 		retval = -1;
4100 	}
4101 
4102 	mutex_unlock(&mrioc->reset_mutex);
4103 	ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
4104 	return retval;
4105 }
4106