xref: /openbmc/linux/drivers/scsi/mpi3mr/mpi3mr_fw.c (revision 63705da3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2021 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 #include <linux/io-64-nonatomic-lo-hi.h>
12 
13 #if defined(writeq) && defined(CONFIG_64BIT)
14 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
15 {
16 	writeq(b, addr);
17 }
18 #else
19 static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
20 {
21 	__u64 data_out = b;
22 
23 	writel((u32)(data_out), addr);
24 	writel((u32)(data_out >> 32), (addr + 4));
25 }
26 #endif
27 
28 static inline bool
29 mpi3mr_check_req_qfull(struct op_req_qinfo *op_req_q)
30 {
31 	u16 pi, ci, max_entries;
32 	bool is_qfull = false;
33 
34 	pi = op_req_q->pi;
35 	ci = READ_ONCE(op_req_q->ci);
36 	max_entries = op_req_q->num_requests;
37 
38 	if ((ci == (pi + 1)) || ((!ci) && (pi == (max_entries - 1))))
39 		is_qfull = true;
40 
41 	return is_qfull;
42 }
43 
44 static void mpi3mr_sync_irqs(struct mpi3mr_ioc *mrioc)
45 {
46 	u16 i, max_vectors;
47 
48 	max_vectors = mrioc->intr_info_count;
49 
50 	for (i = 0; i < max_vectors; i++)
51 		synchronize_irq(pci_irq_vector(mrioc->pdev, i));
52 }
53 
54 void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc)
55 {
56 	mrioc->intr_enabled = 0;
57 	mpi3mr_sync_irqs(mrioc);
58 }
59 
60 void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc)
61 {
62 	mrioc->intr_enabled = 1;
63 }
64 
65 static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
66 {
67 	u16 i;
68 
69 	mpi3mr_ioc_disable_intr(mrioc);
70 
71 	if (!mrioc->intr_info)
72 		return;
73 
74 	for (i = 0; i < mrioc->intr_info_count; i++)
75 		free_irq(pci_irq_vector(mrioc->pdev, i),
76 		    (mrioc->intr_info + i));
77 
78 	kfree(mrioc->intr_info);
79 	mrioc->intr_info = NULL;
80 	mrioc->intr_info_count = 0;
81 	pci_free_irq_vectors(mrioc->pdev);
82 }
83 
84 void mpi3mr_add_sg_single(void *paddr, u8 flags, u32 length,
85 	dma_addr_t dma_addr)
86 {
87 	struct mpi3_sge_common *sgel = paddr;
88 
89 	sgel->flags = flags;
90 	sgel->length = cpu_to_le32(length);
91 	sgel->address = cpu_to_le64(dma_addr);
92 }
93 
94 void mpi3mr_build_zero_len_sge(void *paddr)
95 {
96 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
97 
98 	mpi3mr_add_sg_single(paddr, sgl_flags, 0, -1);
99 }
100 
101 void *mpi3mr_get_reply_virt_addr(struct mpi3mr_ioc *mrioc,
102 	dma_addr_t phys_addr)
103 {
104 	if (!phys_addr)
105 		return NULL;
106 
107 	if ((phys_addr < mrioc->reply_buf_dma) ||
108 	    (phys_addr > mrioc->reply_buf_dma_max_address))
109 		return NULL;
110 
111 	return mrioc->reply_buf + (phys_addr - mrioc->reply_buf_dma);
112 }
113 
114 void *mpi3mr_get_sensebuf_virt_addr(struct mpi3mr_ioc *mrioc,
115 	dma_addr_t phys_addr)
116 {
117 	if (!phys_addr)
118 		return NULL;
119 
120 	return mrioc->sense_buf + (phys_addr - mrioc->sense_buf_dma);
121 }
122 
123 static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
124 	u64 reply_dma)
125 {
126 	u32 old_idx = 0;
127 
128 	spin_lock(&mrioc->reply_free_queue_lock);
129 	old_idx  =  mrioc->reply_free_queue_host_index;
130 	mrioc->reply_free_queue_host_index = (
131 	    (mrioc->reply_free_queue_host_index ==
132 	    (mrioc->reply_free_qsz - 1)) ? 0 :
133 	    (mrioc->reply_free_queue_host_index + 1));
134 	mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
135 	writel(mrioc->reply_free_queue_host_index,
136 	    &mrioc->sysif_regs->reply_free_host_index);
137 	spin_unlock(&mrioc->reply_free_queue_lock);
138 }
139 
140 void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
141 	u64 sense_buf_dma)
142 {
143 	u32 old_idx = 0;
144 
145 	spin_lock(&mrioc->sbq_lock);
146 	old_idx  =  mrioc->sbq_host_index;
147 	mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
148 	    (mrioc->sense_buf_q_sz - 1)) ? 0 :
149 	    (mrioc->sbq_host_index + 1));
150 	mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
151 	writel(mrioc->sbq_host_index,
152 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
153 	spin_unlock(&mrioc->sbq_lock);
154 }
155 
156 static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
157 	struct mpi3_event_notification_reply *event_reply)
158 {
159 	char *desc = NULL;
160 	u16 event;
161 
162 	event = event_reply->event;
163 
164 	switch (event) {
165 	case MPI3_EVENT_LOG_DATA:
166 		desc = "Log Data";
167 		break;
168 	case MPI3_EVENT_CHANGE:
169 		desc = "Event Change";
170 		break;
171 	case MPI3_EVENT_GPIO_INTERRUPT:
172 		desc = "GPIO Interrupt";
173 		break;
174 	case MPI3_EVENT_TEMP_THRESHOLD:
175 		desc = "Temperature Threshold";
176 		break;
177 	case MPI3_EVENT_CABLE_MGMT:
178 		desc = "Cable Management";
179 		break;
180 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
181 		desc = "Energy Pack Change";
182 		break;
183 	case MPI3_EVENT_DEVICE_ADDED:
184 	{
185 		struct mpi3_device_page0 *event_data =
186 		    (struct mpi3_device_page0 *)event_reply->event_data;
187 		ioc_info(mrioc, "Device Added: dev=0x%04x Form=0x%x\n",
188 		    event_data->dev_handle, event_data->device_form);
189 		return;
190 	}
191 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
192 	{
193 		struct mpi3_device_page0 *event_data =
194 		    (struct mpi3_device_page0 *)event_reply->event_data;
195 		ioc_info(mrioc, "Device Info Changed: dev=0x%04x Form=0x%x\n",
196 		    event_data->dev_handle, event_data->device_form);
197 		return;
198 	}
199 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
200 	{
201 		struct mpi3_event_data_device_status_change *event_data =
202 		    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
203 		ioc_info(mrioc, "Device status Change: dev=0x%04x RC=0x%x\n",
204 		    event_data->dev_handle, event_data->reason_code);
205 		return;
206 	}
207 	case MPI3_EVENT_SAS_DISCOVERY:
208 	{
209 		struct mpi3_event_data_sas_discovery *event_data =
210 		    (struct mpi3_event_data_sas_discovery *)event_reply->event_data;
211 		ioc_info(mrioc, "SAS Discovery: (%s) status (0x%08x)\n",
212 		    (event_data->reason_code == MPI3_EVENT_SAS_DISC_RC_STARTED) ?
213 		    "start" : "stop",
214 		    le32_to_cpu(event_data->discovery_status));
215 		return;
216 	}
217 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
218 		desc = "SAS Broadcast Primitive";
219 		break;
220 	case MPI3_EVENT_SAS_NOTIFY_PRIMITIVE:
221 		desc = "SAS Notify Primitive";
222 		break;
223 	case MPI3_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
224 		desc = "SAS Init Device Status Change";
225 		break;
226 	case MPI3_EVENT_SAS_INIT_TABLE_OVERFLOW:
227 		desc = "SAS Init Table Overflow";
228 		break;
229 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
230 		desc = "SAS Topology Change List";
231 		break;
232 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
233 		desc = "Enclosure Device Status Change";
234 		break;
235 	case MPI3_EVENT_HARD_RESET_RECEIVED:
236 		desc = "Hard Reset Received";
237 		break;
238 	case MPI3_EVENT_SAS_PHY_COUNTER:
239 		desc = "SAS PHY Counter";
240 		break;
241 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
242 		desc = "SAS Device Discovery Error";
243 		break;
244 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
245 		desc = "PCIE Topology Change List";
246 		break;
247 	case MPI3_EVENT_PCIE_ENUMERATION:
248 	{
249 		struct mpi3_event_data_pcie_enumeration *event_data =
250 		    (struct mpi3_event_data_pcie_enumeration *)event_reply->event_data;
251 		ioc_info(mrioc, "PCIE Enumeration: (%s)",
252 		    (event_data->reason_code ==
253 		    MPI3_EVENT_PCIE_ENUM_RC_STARTED) ? "start" : "stop");
254 		if (event_data->enumeration_status)
255 			ioc_info(mrioc, "enumeration_status(0x%08x)\n",
256 			    le32_to_cpu(event_data->enumeration_status));
257 		return;
258 	}
259 	case MPI3_EVENT_PREPARE_FOR_RESET:
260 		desc = "Prepare For Reset";
261 		break;
262 	}
263 
264 	if (!desc)
265 		return;
266 
267 	ioc_info(mrioc, "%s\n", desc);
268 }
269 
270 static void mpi3mr_handle_events(struct mpi3mr_ioc *mrioc,
271 	struct mpi3_default_reply *def_reply)
272 {
273 	struct mpi3_event_notification_reply *event_reply =
274 	    (struct mpi3_event_notification_reply *)def_reply;
275 
276 	mrioc->change_count = le16_to_cpu(event_reply->ioc_change_count);
277 	mpi3mr_print_event_data(mrioc, event_reply);
278 	mpi3mr_os_handle_events(mrioc, event_reply);
279 }
280 
281 static struct mpi3mr_drv_cmd *
282 mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
283 	struct mpi3_default_reply *def_reply)
284 {
285 	u16 idx;
286 
287 	switch (host_tag) {
288 	case MPI3MR_HOSTTAG_INITCMDS:
289 		return &mrioc->init_cmds;
290 	case MPI3MR_HOSTTAG_BLK_TMS:
291 		return &mrioc->host_tm_cmds;
292 	case MPI3MR_HOSTTAG_INVALID:
293 		if (def_reply && def_reply->function ==
294 		    MPI3_FUNCTION_EVENT_NOTIFICATION)
295 			mpi3mr_handle_events(mrioc, def_reply);
296 		return NULL;
297 	default:
298 		break;
299 	}
300 	if (host_tag >= MPI3MR_HOSTTAG_DEVRMCMD_MIN &&
301 	    host_tag <= MPI3MR_HOSTTAG_DEVRMCMD_MAX) {
302 		idx = host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
303 		return &mrioc->dev_rmhs_cmds[idx];
304 	}
305 
306 	return NULL;
307 }
308 
309 static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
310 	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma)
311 {
312 	u16 reply_desc_type, host_tag = 0;
313 	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
314 	u32 ioc_loginfo = 0;
315 	struct mpi3_status_reply_descriptor *status_desc;
316 	struct mpi3_address_reply_descriptor *addr_desc;
317 	struct mpi3_success_reply_descriptor *success_desc;
318 	struct mpi3_default_reply *def_reply = NULL;
319 	struct mpi3mr_drv_cmd *cmdptr = NULL;
320 	struct mpi3_scsi_io_reply *scsi_reply;
321 	u8 *sense_buf = NULL;
322 
323 	*reply_dma = 0;
324 	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
325 	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
326 	switch (reply_desc_type) {
327 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
328 		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
329 		host_tag = le16_to_cpu(status_desc->host_tag);
330 		ioc_status = le16_to_cpu(status_desc->ioc_status);
331 		if (ioc_status &
332 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
333 			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
334 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
335 		break;
336 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
337 		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
338 		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
339 		def_reply = mpi3mr_get_reply_virt_addr(mrioc, *reply_dma);
340 		if (!def_reply)
341 			goto out;
342 		host_tag = le16_to_cpu(def_reply->host_tag);
343 		ioc_status = le16_to_cpu(def_reply->ioc_status);
344 		if (ioc_status &
345 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
346 			ioc_loginfo = le32_to_cpu(def_reply->ioc_log_info);
347 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
348 		if (def_reply->function == MPI3_FUNCTION_SCSI_IO) {
349 			scsi_reply = (struct mpi3_scsi_io_reply *)def_reply;
350 			sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
351 			    le64_to_cpu(scsi_reply->sense_data_buffer_address));
352 		}
353 		break;
354 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
355 		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
356 		host_tag = le16_to_cpu(success_desc->host_tag);
357 		break;
358 	default:
359 		break;
360 	}
361 
362 	cmdptr = mpi3mr_get_drv_cmd(mrioc, host_tag, def_reply);
363 	if (cmdptr) {
364 		if (cmdptr->state & MPI3MR_CMD_PENDING) {
365 			cmdptr->state |= MPI3MR_CMD_COMPLETE;
366 			cmdptr->ioc_loginfo = ioc_loginfo;
367 			cmdptr->ioc_status = ioc_status;
368 			cmdptr->state &= ~MPI3MR_CMD_PENDING;
369 			if (def_reply) {
370 				cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
371 				memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
372 				    mrioc->facts.reply_sz);
373 			}
374 			if (cmdptr->is_waiting) {
375 				complete(&cmdptr->done);
376 				cmdptr->is_waiting = 0;
377 			} else if (cmdptr->callback)
378 				cmdptr->callback(mrioc, cmdptr);
379 		}
380 	}
381 out:
382 	if (sense_buf)
383 		mpi3mr_repost_sense_buf(mrioc,
384 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
385 }
386 
387 static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
388 {
389 	u32 exp_phase = mrioc->admin_reply_ephase;
390 	u32 admin_reply_ci = mrioc->admin_reply_ci;
391 	u32 num_admin_replies = 0;
392 	u64 reply_dma = 0;
393 	struct mpi3_default_reply_descriptor *reply_desc;
394 
395 	reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
396 	    admin_reply_ci;
397 
398 	if ((le16_to_cpu(reply_desc->reply_flags) &
399 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
400 		return 0;
401 
402 	do {
403 		mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
404 		mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
405 		if (reply_dma)
406 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
407 		num_admin_replies++;
408 		if (++admin_reply_ci == mrioc->num_admin_replies) {
409 			admin_reply_ci = 0;
410 			exp_phase ^= 1;
411 		}
412 		reply_desc =
413 		    (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
414 		    admin_reply_ci;
415 		if ((le16_to_cpu(reply_desc->reply_flags) &
416 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
417 			break;
418 	} while (1);
419 
420 	writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
421 	mrioc->admin_reply_ci = admin_reply_ci;
422 	mrioc->admin_reply_ephase = exp_phase;
423 
424 	return num_admin_replies;
425 }
426 
427 /**
428  * mpi3mr_get_reply_desc - get reply descriptor frame corresponding to
429  *	queue's consumer index from operational reply descriptor queue.
430  * @op_reply_q: op_reply_qinfo object
431  * @reply_ci: operational reply descriptor's queue consumer index
432  *
433  * Returns reply descriptor frame address
434  */
435 static inline struct mpi3_default_reply_descriptor *
436 mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
437 {
438 	void *segment_base_addr;
439 	struct segments *segments = op_reply_q->q_segments;
440 	struct mpi3_default_reply_descriptor *reply_desc = NULL;
441 
442 	segment_base_addr =
443 	    segments[reply_ci / op_reply_q->segment_qd].segment;
444 	reply_desc = (struct mpi3_default_reply_descriptor *)segment_base_addr +
445 	    (reply_ci % op_reply_q->segment_qd);
446 	return reply_desc;
447 }
448 
449 static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
450 	struct mpi3mr_intr_info *intr_info)
451 {
452 	struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q;
453 	struct op_req_qinfo *op_req_q;
454 	u32 exp_phase;
455 	u32 reply_ci;
456 	u32 num_op_reply = 0;
457 	u64 reply_dma = 0;
458 	struct mpi3_default_reply_descriptor *reply_desc;
459 	u16 req_q_idx = 0, reply_qidx;
460 
461 	reply_qidx = op_reply_q->qid - 1;
462 
463 	if (!atomic_add_unless(&op_reply_q->in_use, 1, 1))
464 		return 0;
465 
466 	exp_phase = op_reply_q->ephase;
467 	reply_ci = op_reply_q->ci;
468 
469 	reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
470 	if ((le16_to_cpu(reply_desc->reply_flags) &
471 	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
472 		atomic_dec(&op_reply_q->in_use);
473 		return 0;
474 	}
475 
476 	do {
477 		req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
478 		op_req_q = &mrioc->req_qinfo[req_q_idx];
479 
480 		WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
481 		mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
482 		    reply_qidx);
483 		atomic_dec(&op_reply_q->pend_ios);
484 		if (reply_dma)
485 			mpi3mr_repost_reply_buf(mrioc, reply_dma);
486 		num_op_reply++;
487 
488 		if (++reply_ci == op_reply_q->num_replies) {
489 			reply_ci = 0;
490 			exp_phase ^= 1;
491 		}
492 
493 		reply_desc = mpi3mr_get_reply_desc(op_reply_q, reply_ci);
494 
495 		if ((le16_to_cpu(reply_desc->reply_flags) &
496 		    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
497 			break;
498 		/*
499 		 * Exit completion loop to avoid CPU lockup
500 		 * Ensure remaining completion happens from threaded ISR.
501 		 */
502 		if (num_op_reply > mrioc->max_host_ios) {
503 			intr_info->op_reply_q->enable_irq_poll = true;
504 			break;
505 		}
506 
507 	} while (1);
508 
509 	writel(reply_ci,
510 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].consumer_index);
511 	op_reply_q->ci = reply_ci;
512 	op_reply_q->ephase = exp_phase;
513 
514 	atomic_dec(&op_reply_q->in_use);
515 	return num_op_reply;
516 }
517 
518 static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
519 {
520 	struct mpi3mr_intr_info *intr_info = privdata;
521 	struct mpi3mr_ioc *mrioc;
522 	u16 midx;
523 	u32 num_admin_replies = 0, num_op_reply = 0;
524 
525 	if (!intr_info)
526 		return IRQ_NONE;
527 
528 	mrioc = intr_info->mrioc;
529 
530 	if (!mrioc->intr_enabled)
531 		return IRQ_NONE;
532 
533 	midx = intr_info->msix_index;
534 
535 	if (!midx)
536 		num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
537 	if (intr_info->op_reply_q)
538 		num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info);
539 
540 	if (num_admin_replies || num_op_reply)
541 		return IRQ_HANDLED;
542 	else
543 		return IRQ_NONE;
544 }
545 
546 static irqreturn_t mpi3mr_isr(int irq, void *privdata)
547 {
548 	struct mpi3mr_intr_info *intr_info = privdata;
549 	struct mpi3mr_ioc *mrioc;
550 	u16 midx;
551 	int ret;
552 
553 	if (!intr_info)
554 		return IRQ_NONE;
555 
556 	mrioc = intr_info->mrioc;
557 	midx = intr_info->msix_index;
558 	/* Call primary ISR routine */
559 	ret = mpi3mr_isr_primary(irq, privdata);
560 
561 	/*
562 	 * If more IOs are expected, schedule IRQ polling thread.
563 	 * Otherwise exit from ISR.
564 	 */
565 	if (!intr_info->op_reply_q)
566 		return ret;
567 
568 	if (!intr_info->op_reply_q->enable_irq_poll ||
569 	    !atomic_read(&intr_info->op_reply_q->pend_ios))
570 		return ret;
571 
572 	disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
573 
574 	return IRQ_WAKE_THREAD;
575 }
576 
577 /**
578  * mpi3mr_isr_poll - Reply queue polling routine
579  * @irq: IRQ
580  * @privdata: Interrupt info
581  *
582  * poll for pending I/O completions in a loop until pending I/Os
583  * present or controller queue depth I/Os are processed.
584  *
585  * Return: IRQ_NONE or IRQ_HANDLED
586  */
587 static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
588 {
589 	struct mpi3mr_intr_info *intr_info = privdata;
590 	struct mpi3mr_ioc *mrioc;
591 	u16 midx;
592 	u32 num_op_reply = 0;
593 
594 	if (!intr_info || !intr_info->op_reply_q)
595 		return IRQ_NONE;
596 
597 	mrioc = intr_info->mrioc;
598 	midx = intr_info->msix_index;
599 
600 	/* Poll for pending IOs completions */
601 	do {
602 		if (!mrioc->intr_enabled)
603 			break;
604 
605 		if (!midx)
606 			mpi3mr_process_admin_reply_q(mrioc);
607 		if (intr_info->op_reply_q)
608 			num_op_reply +=
609 			    mpi3mr_process_op_reply_q(mrioc, intr_info);
610 
611 		usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep);
612 
613 	} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
614 	    (num_op_reply < mrioc->max_host_ios));
615 
616 	intr_info->op_reply_q->enable_irq_poll = false;
617 	enable_irq(pci_irq_vector(mrioc->pdev, midx));
618 
619 	return IRQ_HANDLED;
620 }
621 
622 /**
623  * mpi3mr_request_irq - Request IRQ and register ISR
624  * @mrioc: Adapter instance reference
625  * @index: IRQ vector index
626  *
627  * Request threaded ISR with primary ISR and secondary
628  *
629  * Return: 0 on success and non zero on failures.
630  */
631 static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
632 {
633 	struct pci_dev *pdev = mrioc->pdev;
634 	struct mpi3mr_intr_info *intr_info = mrioc->intr_info + index;
635 	int retval = 0;
636 
637 	intr_info->mrioc = mrioc;
638 	intr_info->msix_index = index;
639 	intr_info->op_reply_q = NULL;
640 
641 	snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
642 	    mrioc->driver_name, mrioc->id, index);
643 
644 	retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
645 	    mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
646 	if (retval) {
647 		ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
648 		    intr_info->name, pci_irq_vector(pdev, index));
649 		return retval;
650 	}
651 
652 	return retval;
653 }
654 
655 /**
656  * mpi3mr_setup_isr - Setup ISR for the controller
657  * @mrioc: Adapter instance reference
658  * @setup_one: Request one IRQ or more
659  *
660  * Allocate IRQ vectors and call mpi3mr_request_irq to setup ISR
661  *
662  * Return: 0 on success and non zero on failures.
663  */
664 static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
665 {
666 	unsigned int irq_flags = PCI_IRQ_MSIX;
667 	int max_vectors;
668 	int retval;
669 	int i;
670 	struct irq_affinity desc = { .pre_vectors =  1};
671 
672 	mpi3mr_cleanup_isr(mrioc);
673 
674 	if (setup_one || reset_devices)
675 		max_vectors = 1;
676 	else {
677 		max_vectors =
678 		    min_t(int, mrioc->cpu_count + 1, mrioc->msix_count);
679 
680 		ioc_info(mrioc,
681 		    "MSI-X vectors supported: %d, no of cores: %d,",
682 		    mrioc->msix_count, mrioc->cpu_count);
683 		ioc_info(mrioc,
684 		    "MSI-x vectors requested: %d\n", max_vectors);
685 	}
686 
687 	irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
688 
689 	mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
690 	retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
691 				1, max_vectors, irq_flags, &desc);
692 	if (retval < 0) {
693 		ioc_err(mrioc, "Cannot alloc irq vectors\n");
694 		goto out_failed;
695 	}
696 	if (retval != max_vectors) {
697 		ioc_info(mrioc,
698 		    "allocated vectors (%d) are less than configured (%d)\n",
699 		    retval, max_vectors);
700 		/*
701 		 * If only one MSI-x is allocated, then MSI-x 0 will be shared
702 		 * between Admin queue and operational queue
703 		 */
704 		if (retval == 1)
705 			mrioc->op_reply_q_offset = 0;
706 
707 		max_vectors = retval;
708 	}
709 	mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
710 	    GFP_KERNEL);
711 	if (!mrioc->intr_info) {
712 		retval = -ENOMEM;
713 		pci_free_irq_vectors(mrioc->pdev);
714 		goto out_failed;
715 	}
716 	for (i = 0; i < max_vectors; i++) {
717 		retval = mpi3mr_request_irq(mrioc, i);
718 		if (retval) {
719 			mrioc->intr_info_count = i;
720 			goto out_failed;
721 		}
722 	}
723 	mrioc->intr_info_count = max_vectors;
724 	mpi3mr_ioc_enable_intr(mrioc);
725 	return 0;
726 
727 out_failed:
728 	mpi3mr_cleanup_isr(mrioc);
729 
730 	return retval;
731 }
732 
733 static const struct {
734 	enum mpi3mr_iocstate value;
735 	char *name;
736 } mrioc_states[] = {
737 	{ MRIOC_STATE_READY, "ready" },
738 	{ MRIOC_STATE_FAULT, "fault" },
739 	{ MRIOC_STATE_RESET, "reset" },
740 	{ MRIOC_STATE_BECOMING_READY, "becoming ready" },
741 	{ MRIOC_STATE_RESET_REQUESTED, "reset requested" },
742 	{ MRIOC_STATE_UNRECOVERABLE, "unrecoverable error" },
743 };
744 
745 static const char *mpi3mr_iocstate_name(enum mpi3mr_iocstate mrioc_state)
746 {
747 	int i;
748 	char *name = NULL;
749 
750 	for (i = 0; i < ARRAY_SIZE(mrioc_states); i++) {
751 		if (mrioc_states[i].value == mrioc_state) {
752 			name = mrioc_states[i].name;
753 			break;
754 		}
755 	}
756 	return name;
757 }
758 
759 /* Reset reason to name mapper structure*/
760 static const struct {
761 	enum mpi3mr_reset_reason value;
762 	char *name;
763 } mpi3mr_reset_reason_codes[] = {
764 	{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
765 	{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
766 	{ MPI3MR_RESET_FROM_IOCTL, "application invocation" },
767 	{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
768 	{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
769 	{ MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
770 	{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
771 	{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
772 	{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
773 	{ MPI3MR_RESET_FROM_PE_TIMEOUT, "port enable timeout" },
774 	{ MPI3MR_RESET_FROM_TSU_TIMEOUT, "time stamp update timeout" },
775 	{ MPI3MR_RESET_FROM_DELREQQ_TIMEOUT, "delete request queue timeout" },
776 	{ MPI3MR_RESET_FROM_DELREPQ_TIMEOUT, "delete reply queue timeout" },
777 	{
778 		MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT,
779 		"create request queue timeout"
780 	},
781 	{
782 		MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT,
783 		"create reply queue timeout"
784 	},
785 	{ MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT, "IOC facts timeout" },
786 	{ MPI3MR_RESET_FROM_IOCINIT_TIMEOUT, "IOC init timeout" },
787 	{ MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT, "event notify timeout" },
788 	{ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, "event acknowledgment timeout" },
789 	{
790 		MPI3MR_RESET_FROM_CIACTVRST_TIMER,
791 		"component image activation timeout"
792 	},
793 	{
794 		MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT,
795 		"get package version timeout"
796 	},
797 	{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
798 	{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
799 };
800 
801 /**
802  * mpi3mr_reset_rc_name - get reset reason code name
803  * @reason_code: reset reason code value
804  *
805  * Map reset reason to an NULL terminated ASCII string
806  *
807  * Return: name corresponding to reset reason value or NULL.
808  */
809 static const char *mpi3mr_reset_rc_name(enum mpi3mr_reset_reason reason_code)
810 {
811 	int i;
812 	char *name = NULL;
813 
814 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_reason_codes); i++) {
815 		if (mpi3mr_reset_reason_codes[i].value == reason_code) {
816 			name = mpi3mr_reset_reason_codes[i].name;
817 			break;
818 		}
819 	}
820 	return name;
821 }
822 
823 /* Reset type to name mapper structure*/
824 static const struct {
825 	u16 reset_type;
826 	char *name;
827 } mpi3mr_reset_types[] = {
828 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, "soft" },
829 	{ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, "diag fault" },
830 };
831 
832 /**
833  * mpi3mr_reset_type_name - get reset type name
834  * @reset_type: reset type value
835  *
836  * Map reset type to an NULL terminated ASCII string
837  *
838  * Return: name corresponding to reset type value or NULL.
839  */
840 static const char *mpi3mr_reset_type_name(u16 reset_type)
841 {
842 	int i;
843 	char *name = NULL;
844 
845 	for (i = 0; i < ARRAY_SIZE(mpi3mr_reset_types); i++) {
846 		if (mpi3mr_reset_types[i].reset_type == reset_type) {
847 			name = mpi3mr_reset_types[i].name;
848 			break;
849 		}
850 	}
851 	return name;
852 }
853 
854 /**
855  * mpi3mr_print_fault_info - Display fault information
856  * @mrioc: Adapter instance reference
857  *
858  * Display the controller fault information if there is a
859  * controller fault.
860  *
861  * Return: Nothing.
862  */
863 static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
864 {
865 	u32 ioc_status, code, code1, code2, code3;
866 
867 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
868 
869 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
870 		code = readl(&mrioc->sysif_regs->fault);
871 		code1 = readl(&mrioc->sysif_regs->fault_info[0]);
872 		code2 = readl(&mrioc->sysif_regs->fault_info[1]);
873 		code3 = readl(&mrioc->sysif_regs->fault_info[2]);
874 
875 		ioc_info(mrioc,
876 		    "fault code(0x%08X): Additional code: (0x%08X:0x%08X:0x%08X)\n",
877 		    code, code1, code2, code3);
878 	}
879 }
880 
881 /**
882  * mpi3mr_get_iocstate - Get IOC State
883  * @mrioc: Adapter instance reference
884  *
885  * Return a proper IOC state enum based on the IOC status and
886  * IOC configuration and unrcoverable state of the controller.
887  *
888  * Return: Current IOC state.
889  */
890 enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc)
891 {
892 	u32 ioc_status, ioc_config;
893 	u8 ready, enabled;
894 
895 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
896 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
897 
898 	if (mrioc->unrecoverable)
899 		return MRIOC_STATE_UNRECOVERABLE;
900 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)
901 		return MRIOC_STATE_FAULT;
902 
903 	ready = (ioc_status & MPI3_SYSIF_IOC_STATUS_READY);
904 	enabled = (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC);
905 
906 	if (ready && enabled)
907 		return MRIOC_STATE_READY;
908 	if ((!ready) && (!enabled))
909 		return MRIOC_STATE_RESET;
910 	if ((!ready) && (enabled))
911 		return MRIOC_STATE_BECOMING_READY;
912 
913 	return MRIOC_STATE_RESET_REQUESTED;
914 }
915 
916 /**
917  * mpi3mr_clear_reset_history - clear reset history
918  * @mrioc: Adapter instance reference
919  *
920  * Write the reset history bit in IOC status to clear the bit,
921  * if it is already set.
922  *
923  * Return: Nothing.
924  */
925 static inline void mpi3mr_clear_reset_history(struct mpi3mr_ioc *mrioc)
926 {
927 	u32 ioc_status;
928 
929 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
930 	if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
931 		writel(ioc_status, &mrioc->sysif_regs->ioc_status);
932 }
933 
934 /**
935  * mpi3mr_issue_and_process_mur - Message unit Reset handler
936  * @mrioc: Adapter instance reference
937  * @reset_reason: Reset reason code
938  *
939  * Issue Message unit Reset to the controller and wait for it to
940  * be complete.
941  *
942  * Return: 0 on success, -1 on failure.
943  */
944 static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
945 	u32 reset_reason)
946 {
947 	u32 ioc_config, timeout, ioc_status;
948 	int retval = -1;
949 
950 	ioc_info(mrioc, "Issuing Message unit Reset(MUR)\n");
951 	if (mrioc->unrecoverable) {
952 		ioc_info(mrioc, "IOC is unrecoverable MUR not issued\n");
953 		return retval;
954 	}
955 	mpi3mr_clear_reset_history(mrioc);
956 	writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
957 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
958 	ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
959 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
960 
961 	timeout = mrioc->ready_timeout * 10;
962 	do {
963 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
964 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
965 			mpi3mr_clear_reset_history(mrioc);
966 			ioc_config =
967 			    readl(&mrioc->sysif_regs->ioc_configuration);
968 			if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
969 			      (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
970 			    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
971 				retval = 0;
972 				break;
973 			}
974 		}
975 		msleep(100);
976 	} while (--timeout);
977 
978 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
979 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
980 
981 	ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
982 	    (!retval) ? "successful" : "failed", ioc_status, ioc_config);
983 	return retval;
984 }
985 
986 /**
987  * mpi3mr_bring_ioc_ready - Bring controller to ready state
988  * @mrioc: Adapter instance reference
989  *
990  * Set Enable IOC bit in IOC configuration register and wait for
991  * the controller to become ready.
992  *
993  * Return: 0 on success, -1 on failure.
994  */
995 static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
996 {
997 	u32 ioc_config, timeout;
998 	enum mpi3mr_iocstate current_state;
999 
1000 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1001 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
1002 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1003 
1004 	timeout = mrioc->ready_timeout * 10;
1005 	do {
1006 		current_state = mpi3mr_get_iocstate(mrioc);
1007 		if (current_state == MRIOC_STATE_READY)
1008 			return 0;
1009 		msleep(100);
1010 	} while (--timeout);
1011 
1012 	return -1;
1013 }
1014 
1015 /**
1016  * mpi3mr_soft_reset_success - Check softreset is success or not
1017  * @ioc_status: IOC status register value
1018  * @ioc_config: IOC config register value
1019  *
1020  * Check whether the soft reset is successful or not based on
1021  * IOC status and IOC config register values.
1022  *
1023  * Return: True when the soft reset is success, false otherwise.
1024  */
1025 static inline bool
1026 mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
1027 {
1028 	if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
1029 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
1030 	    (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
1031 		return true;
1032 	return false;
1033 }
1034 
1035 /**
1036  * mpi3mr_diagfault_success - Check diag fault is success or not
1037  * @mrioc: Adapter reference
1038  * @ioc_status: IOC status register value
1039  *
1040  * Check whether the controller hit diag reset fault code.
1041  *
1042  * Return: True when there is diag fault, false otherwise.
1043  */
1044 static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
1045 	u32 ioc_status)
1046 {
1047 	u32 fault;
1048 
1049 	if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
1050 		return false;
1051 	fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
1052 	if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
1053 		return true;
1054 	return false;
1055 }
1056 
1057 /**
1058  * mpi3mr_set_diagsave - Set diag save bit for snapdump
1059  * @mrioc: Adapter reference
1060  *
1061  * Set diag save bit in IOC configuration register to enable
1062  * snapdump.
1063  *
1064  * Return: Nothing.
1065  */
1066 static inline void mpi3mr_set_diagsave(struct mpi3mr_ioc *mrioc)
1067 {
1068 	u32 ioc_config;
1069 
1070 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1071 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE;
1072 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
1073 }
1074 
1075 /**
1076  * mpi3mr_issue_reset - Issue reset to the controller
1077  * @mrioc: Adapter reference
1078  * @reset_type: Reset type
1079  * @reset_reason: Reset reason code
1080  *
1081  * Unlock the host diagnostic registers and write the specific
1082  * reset type to that, wait for reset acknowledgment from the
1083  * controller, if the reset is not successful retry for the
1084  * predefined number of times.
1085  *
1086  * Return: 0 on success, non-zero on failure.
1087  */
1088 static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
1089 	u32 reset_reason)
1090 {
1091 	int retval = -1;
1092 	u8 unlock_retry_count, reset_retry_count = 0;
1093 	u32 host_diagnostic, timeout, ioc_status, ioc_config;
1094 
1095 	pci_cfg_access_lock(mrioc->pdev);
1096 	if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
1097 	    (reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
1098 		goto out;
1099 	if (mrioc->unrecoverable)
1100 		goto out;
1101 retry_reset:
1102 	unlock_retry_count = 0;
1103 	mpi3mr_clear_reset_history(mrioc);
1104 	do {
1105 		ioc_info(mrioc,
1106 		    "Write magic sequence to unlock host diag register (retry=%d)\n",
1107 		    ++unlock_retry_count);
1108 		if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
1109 			writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1110 			mrioc->unrecoverable = 1;
1111 			goto out;
1112 		}
1113 
1114 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
1115 		    &mrioc->sysif_regs->write_sequence);
1116 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST,
1117 		    &mrioc->sysif_regs->write_sequence);
1118 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1119 		    &mrioc->sysif_regs->write_sequence);
1120 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_3RD,
1121 		    &mrioc->sysif_regs->write_sequence);
1122 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_4TH,
1123 		    &mrioc->sysif_regs->write_sequence);
1124 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_5TH,
1125 		    &mrioc->sysif_regs->write_sequence);
1126 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH,
1127 		    &mrioc->sysif_regs->write_sequence);
1128 		usleep_range(1000, 1100);
1129 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
1130 		ioc_info(mrioc,
1131 		    "wrote magic sequence: retry_count(%d), host_diagnostic(0x%08x)\n",
1132 		    unlock_retry_count, host_diagnostic);
1133 	} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
1134 
1135 	writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
1136 	ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
1137 	    mpi3mr_reset_type_name(reset_type),
1138 	    mpi3mr_reset_rc_name(reset_reason), reset_reason);
1139 	writel(host_diagnostic | reset_type,
1140 	    &mrioc->sysif_regs->host_diagnostic);
1141 	timeout = mrioc->ready_timeout * 10;
1142 	if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
1143 		do {
1144 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1145 			if (ioc_status &
1146 			    MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
1147 				mpi3mr_clear_reset_history(mrioc);
1148 				ioc_config =
1149 				    readl(&mrioc->sysif_regs->ioc_configuration);
1150 				if (mpi3mr_soft_reset_success(ioc_status,
1151 				    ioc_config)) {
1152 					retval = 0;
1153 					break;
1154 				}
1155 			}
1156 			msleep(100);
1157 		} while (--timeout);
1158 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1159 		    &mrioc->sysif_regs->write_sequence);
1160 	} else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
1161 		do {
1162 			ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1163 			if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
1164 				retval = 0;
1165 				break;
1166 			}
1167 			msleep(100);
1168 		} while (--timeout);
1169 		mpi3mr_clear_reset_history(mrioc);
1170 		writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
1171 		    &mrioc->sysif_regs->write_sequence);
1172 	}
1173 	if (retval && ((++reset_retry_count) < MPI3MR_MAX_RESET_RETRY_COUNT)) {
1174 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1175 		ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1176 		ioc_info(mrioc,
1177 		    "Base IOC Sts/Config after reset try %d is (0x%x)/(0x%x)\n",
1178 		    reset_retry_count, ioc_status, ioc_config);
1179 		goto retry_reset;
1180 	}
1181 
1182 out:
1183 	pci_cfg_access_unlock(mrioc->pdev);
1184 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
1185 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
1186 
1187 	ioc_info(mrioc,
1188 	    "Base IOC Sts/Config after %s reset is (0x%x)/(0x%x)\n",
1189 	    (!retval) ? "successful" : "failed", ioc_status,
1190 	    ioc_config);
1191 	return retval;
1192 }
1193 
1194 /**
1195  * mpi3mr_admin_request_post - Post request to admin queue
1196  * @mrioc: Adapter reference
1197  * @admin_req: MPI3 request
1198  * @admin_req_sz: Request size
1199  * @ignore_reset: Ignore reset in process
1200  *
1201  * Post the MPI3 request into admin request queue and
1202  * inform the controller, if the queue is full return
1203  * appropriate error.
1204  *
1205  * Return: 0 on success, non-zero on failure.
1206  */
1207 int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
1208 	u16 admin_req_sz, u8 ignore_reset)
1209 {
1210 	u16 areq_pi = 0, areq_ci = 0, max_entries = 0;
1211 	int retval = 0;
1212 	unsigned long flags;
1213 	u8 *areq_entry;
1214 
1215 	if (mrioc->unrecoverable) {
1216 		ioc_err(mrioc, "%s : Unrecoverable controller\n", __func__);
1217 		return -EFAULT;
1218 	}
1219 
1220 	spin_lock_irqsave(&mrioc->admin_req_lock, flags);
1221 	areq_pi = mrioc->admin_req_pi;
1222 	areq_ci = mrioc->admin_req_ci;
1223 	max_entries = mrioc->num_admin_req;
1224 	if ((areq_ci == (areq_pi + 1)) || ((!areq_ci) &&
1225 	    (areq_pi == (max_entries - 1)))) {
1226 		ioc_err(mrioc, "AdminReqQ full condition detected\n");
1227 		retval = -EAGAIN;
1228 		goto out;
1229 	}
1230 	if (!ignore_reset && mrioc->reset_in_progress) {
1231 		ioc_err(mrioc, "AdminReqQ submit reset in progress\n");
1232 		retval = -EAGAIN;
1233 		goto out;
1234 	}
1235 	areq_entry = (u8 *)mrioc->admin_req_base +
1236 	    (areq_pi * MPI3MR_ADMIN_REQ_FRAME_SZ);
1237 	memset(areq_entry, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
1238 	memcpy(areq_entry, (u8 *)admin_req, admin_req_sz);
1239 
1240 	if (++areq_pi == max_entries)
1241 		areq_pi = 0;
1242 	mrioc->admin_req_pi = areq_pi;
1243 
1244 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
1245 
1246 out:
1247 	spin_unlock_irqrestore(&mrioc->admin_req_lock, flags);
1248 
1249 	return retval;
1250 }
1251 
1252 /**
1253  * mpi3mr_free_op_req_q_segments - free request memory segments
1254  * @mrioc: Adapter instance reference
1255  * @q_idx: operational request queue index
1256  *
1257  * Free memory segments allocated for operational request queue
1258  *
1259  * Return: Nothing.
1260  */
1261 static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1262 {
1263 	u16 j;
1264 	int size;
1265 	struct segments *segments;
1266 
1267 	segments = mrioc->req_qinfo[q_idx].q_segments;
1268 	if (!segments)
1269 		return;
1270 
1271 	if (mrioc->enable_segqueue) {
1272 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1273 		if (mrioc->req_qinfo[q_idx].q_segment_list) {
1274 			dma_free_coherent(&mrioc->pdev->dev,
1275 			    MPI3MR_MAX_SEG_LIST_SIZE,
1276 			    mrioc->req_qinfo[q_idx].q_segment_list,
1277 			    mrioc->req_qinfo[q_idx].q_segment_list_dma);
1278 			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1279 		}
1280 	} else
1281 		size = mrioc->req_qinfo[q_idx].num_requests *
1282 		    mrioc->facts.op_req_sz;
1283 
1284 	for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
1285 		if (!segments[j].segment)
1286 			continue;
1287 		dma_free_coherent(&mrioc->pdev->dev,
1288 		    size, segments[j].segment, segments[j].segment_dma);
1289 		segments[j].segment = NULL;
1290 	}
1291 	kfree(mrioc->req_qinfo[q_idx].q_segments);
1292 	mrioc->req_qinfo[q_idx].q_segments = NULL;
1293 	mrioc->req_qinfo[q_idx].qid = 0;
1294 }
1295 
1296 /**
1297  * mpi3mr_free_op_reply_q_segments - free reply memory segments
1298  * @mrioc: Adapter instance reference
1299  * @q_idx: operational reply queue index
1300  *
1301  * Free memory segments allocated for operational reply queue
1302  *
1303  * Return: Nothing.
1304  */
1305 static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
1306 {
1307 	u16 j;
1308 	int size;
1309 	struct segments *segments;
1310 
1311 	segments = mrioc->op_reply_qinfo[q_idx].q_segments;
1312 	if (!segments)
1313 		return;
1314 
1315 	if (mrioc->enable_segqueue) {
1316 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1317 		if (mrioc->op_reply_qinfo[q_idx].q_segment_list) {
1318 			dma_free_coherent(&mrioc->pdev->dev,
1319 			    MPI3MR_MAX_SEG_LIST_SIZE,
1320 			    mrioc->op_reply_qinfo[q_idx].q_segment_list,
1321 			    mrioc->op_reply_qinfo[q_idx].q_segment_list_dma);
1322 			mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
1323 		}
1324 	} else
1325 		size = mrioc->op_reply_qinfo[q_idx].segment_qd *
1326 		    mrioc->op_reply_desc_sz;
1327 
1328 	for (j = 0; j < mrioc->op_reply_qinfo[q_idx].num_segments; j++) {
1329 		if (!segments[j].segment)
1330 			continue;
1331 		dma_free_coherent(&mrioc->pdev->dev,
1332 		    size, segments[j].segment, segments[j].segment_dma);
1333 		segments[j].segment = NULL;
1334 	}
1335 
1336 	kfree(mrioc->op_reply_qinfo[q_idx].q_segments);
1337 	mrioc->op_reply_qinfo[q_idx].q_segments = NULL;
1338 	mrioc->op_reply_qinfo[q_idx].qid = 0;
1339 }
1340 
1341 /**
1342  * mpi3mr_delete_op_reply_q - delete operational reply queue
1343  * @mrioc: Adapter instance reference
1344  * @qidx: operational reply queue index
1345  *
1346  * Delete operatinal reply queue by issuing MPI request
1347  * through admin queue.
1348  *
1349  * Return:  0 on success, non-zero on failure.
1350  */
1351 static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1352 {
1353 	struct mpi3_delete_reply_queue_request delq_req;
1354 	int retval = 0;
1355 	u16 reply_qid = 0, midx;
1356 
1357 	reply_qid = mrioc->op_reply_qinfo[qidx].qid;
1358 
1359 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1360 
1361 	if (!reply_qid)	{
1362 		retval = -1;
1363 		ioc_err(mrioc, "Issue DelRepQ: called with invalid ReqQID\n");
1364 		goto out;
1365 	}
1366 
1367 	memset(&delq_req, 0, sizeof(delq_req));
1368 	mutex_lock(&mrioc->init_cmds.mutex);
1369 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1370 		retval = -1;
1371 		ioc_err(mrioc, "Issue DelRepQ: Init command is in use\n");
1372 		mutex_unlock(&mrioc->init_cmds.mutex);
1373 		goto out;
1374 	}
1375 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1376 	mrioc->init_cmds.is_waiting = 1;
1377 	mrioc->init_cmds.callback = NULL;
1378 	delq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1379 	delq_req.function = MPI3_FUNCTION_DELETE_REPLY_QUEUE;
1380 	delq_req.queue_id = cpu_to_le16(reply_qid);
1381 
1382 	init_completion(&mrioc->init_cmds.done);
1383 	retval = mpi3mr_admin_request_post(mrioc, &delq_req, sizeof(delq_req),
1384 	    1);
1385 	if (retval) {
1386 		ioc_err(mrioc, "Issue DelRepQ: Admin Post failed\n");
1387 		goto out_unlock;
1388 	}
1389 	wait_for_completion_timeout(&mrioc->init_cmds.done,
1390 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1391 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1392 		ioc_err(mrioc, "Issue DelRepQ: command timed out\n");
1393 		mpi3mr_set_diagsave(mrioc);
1394 		mpi3mr_issue_reset(mrioc,
1395 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
1396 		    MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
1397 		mrioc->unrecoverable = 1;
1398 
1399 		retval = -1;
1400 		goto out_unlock;
1401 	}
1402 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1403 	    != MPI3_IOCSTATUS_SUCCESS) {
1404 		ioc_err(mrioc,
1405 		    "Issue DelRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1406 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1407 		    mrioc->init_cmds.ioc_loginfo);
1408 		retval = -1;
1409 		goto out_unlock;
1410 	}
1411 	mrioc->intr_info[midx].op_reply_q = NULL;
1412 
1413 	mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1414 out_unlock:
1415 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1416 	mutex_unlock(&mrioc->init_cmds.mutex);
1417 out:
1418 
1419 	return retval;
1420 }
1421 
1422 /**
1423  * mpi3mr_alloc_op_reply_q_segments -Alloc segmented reply pool
1424  * @mrioc: Adapter instance reference
1425  * @qidx: request queue index
1426  *
1427  * Allocate segmented memory pools for operational reply
1428  * queue.
1429  *
1430  * Return: 0 on success, non-zero on failure.
1431  */
1432 static int mpi3mr_alloc_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1433 {
1434 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1435 	int i, size;
1436 	u64 *q_segment_list_entry = NULL;
1437 	struct segments *segments;
1438 
1439 	if (mrioc->enable_segqueue) {
1440 		op_reply_q->segment_qd =
1441 		    MPI3MR_OP_REP_Q_SEG_SIZE / mrioc->op_reply_desc_sz;
1442 
1443 		size = MPI3MR_OP_REP_Q_SEG_SIZE;
1444 
1445 		op_reply_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1446 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_reply_q->q_segment_list_dma,
1447 		    GFP_KERNEL);
1448 		if (!op_reply_q->q_segment_list)
1449 			return -ENOMEM;
1450 		q_segment_list_entry = (u64 *)op_reply_q->q_segment_list;
1451 	} else {
1452 		op_reply_q->segment_qd = op_reply_q->num_replies;
1453 		size = op_reply_q->num_replies * mrioc->op_reply_desc_sz;
1454 	}
1455 
1456 	op_reply_q->num_segments = DIV_ROUND_UP(op_reply_q->num_replies,
1457 	    op_reply_q->segment_qd);
1458 
1459 	op_reply_q->q_segments = kcalloc(op_reply_q->num_segments,
1460 	    sizeof(struct segments), GFP_KERNEL);
1461 	if (!op_reply_q->q_segments)
1462 		return -ENOMEM;
1463 
1464 	segments = op_reply_q->q_segments;
1465 	for (i = 0; i < op_reply_q->num_segments; i++) {
1466 		segments[i].segment =
1467 		    dma_alloc_coherent(&mrioc->pdev->dev,
1468 		    size, &segments[i].segment_dma, GFP_KERNEL);
1469 		if (!segments[i].segment)
1470 			return -ENOMEM;
1471 		if (mrioc->enable_segqueue)
1472 			q_segment_list_entry[i] =
1473 			    (unsigned long)segments[i].segment_dma;
1474 	}
1475 
1476 	return 0;
1477 }
1478 
1479 /**
1480  * mpi3mr_alloc_op_req_q_segments - Alloc segmented req pool.
1481  * @mrioc: Adapter instance reference
1482  * @qidx: request queue index
1483  *
1484  * Allocate segmented memory pools for operational request
1485  * queue.
1486  *
1487  * Return: 0 on success, non-zero on failure.
1488  */
1489 static int mpi3mr_alloc_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 qidx)
1490 {
1491 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
1492 	int i, size;
1493 	u64 *q_segment_list_entry = NULL;
1494 	struct segments *segments;
1495 
1496 	if (mrioc->enable_segqueue) {
1497 		op_req_q->segment_qd =
1498 		    MPI3MR_OP_REQ_Q_SEG_SIZE / mrioc->facts.op_req_sz;
1499 
1500 		size = MPI3MR_OP_REQ_Q_SEG_SIZE;
1501 
1502 		op_req_q->q_segment_list = dma_alloc_coherent(&mrioc->pdev->dev,
1503 		    MPI3MR_MAX_SEG_LIST_SIZE, &op_req_q->q_segment_list_dma,
1504 		    GFP_KERNEL);
1505 		if (!op_req_q->q_segment_list)
1506 			return -ENOMEM;
1507 		q_segment_list_entry = (u64 *)op_req_q->q_segment_list;
1508 
1509 	} else {
1510 		op_req_q->segment_qd = op_req_q->num_requests;
1511 		size = op_req_q->num_requests * mrioc->facts.op_req_sz;
1512 	}
1513 
1514 	op_req_q->num_segments = DIV_ROUND_UP(op_req_q->num_requests,
1515 	    op_req_q->segment_qd);
1516 
1517 	op_req_q->q_segments = kcalloc(op_req_q->num_segments,
1518 	    sizeof(struct segments), GFP_KERNEL);
1519 	if (!op_req_q->q_segments)
1520 		return -ENOMEM;
1521 
1522 	segments = op_req_q->q_segments;
1523 	for (i = 0; i < op_req_q->num_segments; i++) {
1524 		segments[i].segment =
1525 		    dma_alloc_coherent(&mrioc->pdev->dev,
1526 		    size, &segments[i].segment_dma, GFP_KERNEL);
1527 		if (!segments[i].segment)
1528 			return -ENOMEM;
1529 		if (mrioc->enable_segqueue)
1530 			q_segment_list_entry[i] =
1531 			    (unsigned long)segments[i].segment_dma;
1532 	}
1533 
1534 	return 0;
1535 }
1536 
1537 /**
1538  * mpi3mr_create_op_reply_q - create operational reply queue
1539  * @mrioc: Adapter instance reference
1540  * @qidx: operational reply queue index
1541  *
1542  * Create operatinal reply queue by issuing MPI request
1543  * through admin queue.
1544  *
1545  * Return:  0 on success, non-zero on failure.
1546  */
1547 static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
1548 {
1549 	struct mpi3_create_reply_queue_request create_req;
1550 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
1551 	int retval = 0;
1552 	u16 reply_qid = 0, midx;
1553 
1554 	reply_qid = op_reply_q->qid;
1555 
1556 	midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
1557 
1558 	if (reply_qid) {
1559 		retval = -1;
1560 		ioc_err(mrioc, "CreateRepQ: called for duplicate qid %d\n",
1561 		    reply_qid);
1562 
1563 		return retval;
1564 	}
1565 
1566 	reply_qid = qidx + 1;
1567 	op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
1568 	op_reply_q->ci = 0;
1569 	op_reply_q->ephase = 1;
1570 	atomic_set(&op_reply_q->pend_ios, 0);
1571 	atomic_set(&op_reply_q->in_use, 0);
1572 	op_reply_q->enable_irq_poll = false;
1573 
1574 	if (!op_reply_q->q_segments) {
1575 		retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
1576 		if (retval) {
1577 			mpi3mr_free_op_reply_q_segments(mrioc, qidx);
1578 			goto out;
1579 		}
1580 	}
1581 
1582 	memset(&create_req, 0, sizeof(create_req));
1583 	mutex_lock(&mrioc->init_cmds.mutex);
1584 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1585 		retval = -1;
1586 		ioc_err(mrioc, "CreateRepQ: Init command is in use\n");
1587 		goto out_unlock;
1588 	}
1589 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1590 	mrioc->init_cmds.is_waiting = 1;
1591 	mrioc->init_cmds.callback = NULL;
1592 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1593 	create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
1594 	create_req.queue_id = cpu_to_le16(reply_qid);
1595 	create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
1596 	create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index);
1597 	if (mrioc->enable_segqueue) {
1598 		create_req.flags |=
1599 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
1600 		create_req.base_address = cpu_to_le64(
1601 		    op_reply_q->q_segment_list_dma);
1602 	} else
1603 		create_req.base_address = cpu_to_le64(
1604 		    op_reply_q->q_segments[0].segment_dma);
1605 
1606 	create_req.size = cpu_to_le16(op_reply_q->num_replies);
1607 
1608 	init_completion(&mrioc->init_cmds.done);
1609 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
1610 	    sizeof(create_req), 1);
1611 	if (retval) {
1612 		ioc_err(mrioc, "CreateRepQ: Admin Post failed\n");
1613 		goto out_unlock;
1614 	}
1615 	wait_for_completion_timeout(&mrioc->init_cmds.done,
1616 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1617 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1618 		ioc_err(mrioc, "CreateRepQ: command timed out\n");
1619 		mpi3mr_set_diagsave(mrioc);
1620 		mpi3mr_issue_reset(mrioc,
1621 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
1622 		    MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
1623 		mrioc->unrecoverable = 1;
1624 		retval = -1;
1625 		goto out_unlock;
1626 	}
1627 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1628 	    != MPI3_IOCSTATUS_SUCCESS) {
1629 		ioc_err(mrioc,
1630 		    "CreateRepQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1631 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1632 		    mrioc->init_cmds.ioc_loginfo);
1633 		retval = -1;
1634 		goto out_unlock;
1635 	}
1636 	op_reply_q->qid = reply_qid;
1637 	mrioc->intr_info[midx].op_reply_q = op_reply_q;
1638 
1639 out_unlock:
1640 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1641 	mutex_unlock(&mrioc->init_cmds.mutex);
1642 out:
1643 
1644 	return retval;
1645 }
1646 
1647 /**
1648  * mpi3mr_create_op_req_q - create operational request queue
1649  * @mrioc: Adapter instance reference
1650  * @idx: operational request queue index
1651  * @reply_qid: Reply queue ID
1652  *
1653  * Create operatinal request queue by issuing MPI request
1654  * through admin queue.
1655  *
1656  * Return:  0 on success, non-zero on failure.
1657  */
1658 static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
1659 	u16 reply_qid)
1660 {
1661 	struct mpi3_create_request_queue_request create_req;
1662 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + idx;
1663 	int retval = 0;
1664 	u16 req_qid = 0;
1665 
1666 	req_qid = op_req_q->qid;
1667 
1668 	if (req_qid) {
1669 		retval = -1;
1670 		ioc_err(mrioc, "CreateReqQ: called for duplicate qid %d\n",
1671 		    req_qid);
1672 
1673 		return retval;
1674 	}
1675 	req_qid = idx + 1;
1676 
1677 	op_req_q->num_requests = MPI3MR_OP_REQ_Q_QD;
1678 	op_req_q->ci = 0;
1679 	op_req_q->pi = 0;
1680 	op_req_q->reply_qid = reply_qid;
1681 	spin_lock_init(&op_req_q->q_lock);
1682 
1683 	if (!op_req_q->q_segments) {
1684 		retval = mpi3mr_alloc_op_req_q_segments(mrioc, idx);
1685 		if (retval) {
1686 			mpi3mr_free_op_req_q_segments(mrioc, idx);
1687 			goto out;
1688 		}
1689 	}
1690 
1691 	memset(&create_req, 0, sizeof(create_req));
1692 	mutex_lock(&mrioc->init_cmds.mutex);
1693 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1694 		retval = -1;
1695 		ioc_err(mrioc, "CreateReqQ: Init command is in use\n");
1696 		goto out_unlock;
1697 	}
1698 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1699 	mrioc->init_cmds.is_waiting = 1;
1700 	mrioc->init_cmds.callback = NULL;
1701 	create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1702 	create_req.function = MPI3_FUNCTION_CREATE_REQUEST_QUEUE;
1703 	create_req.queue_id = cpu_to_le16(req_qid);
1704 	if (mrioc->enable_segqueue) {
1705 		create_req.flags =
1706 		    MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
1707 		create_req.base_address = cpu_to_le64(
1708 		    op_req_q->q_segment_list_dma);
1709 	} else
1710 		create_req.base_address = cpu_to_le64(
1711 		    op_req_q->q_segments[0].segment_dma);
1712 	create_req.reply_queue_id = cpu_to_le16(reply_qid);
1713 	create_req.size = cpu_to_le16(op_req_q->num_requests);
1714 
1715 	init_completion(&mrioc->init_cmds.done);
1716 	retval = mpi3mr_admin_request_post(mrioc, &create_req,
1717 	    sizeof(create_req), 1);
1718 	if (retval) {
1719 		ioc_err(mrioc, "CreateReqQ: Admin Post failed\n");
1720 		goto out_unlock;
1721 	}
1722 	wait_for_completion_timeout(&mrioc->init_cmds.done,
1723 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1724 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1725 		ioc_err(mrioc, "CreateReqQ: command timed out\n");
1726 		mpi3mr_set_diagsave(mrioc);
1727 		if (mpi3mr_issue_reset(mrioc,
1728 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
1729 		    MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT))
1730 			mrioc->unrecoverable = 1;
1731 		retval = -1;
1732 		goto out_unlock;
1733 	}
1734 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1735 	    != MPI3_IOCSTATUS_SUCCESS) {
1736 		ioc_err(mrioc,
1737 		    "CreateReqQ: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1738 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1739 		    mrioc->init_cmds.ioc_loginfo);
1740 		retval = -1;
1741 		goto out_unlock;
1742 	}
1743 	op_req_q->qid = req_qid;
1744 
1745 out_unlock:
1746 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1747 	mutex_unlock(&mrioc->init_cmds.mutex);
1748 out:
1749 
1750 	return retval;
1751 }
1752 
1753 /**
1754  * mpi3mr_create_op_queues - create operational queue pairs
1755  * @mrioc: Adapter instance reference
1756  *
1757  * Allocate memory for operational queue meta data and call
1758  * create request and reply queue functions.
1759  *
1760  * Return: 0 on success, non-zero on failures.
1761  */
1762 static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
1763 {
1764 	int retval = 0;
1765 	u16 num_queues = 0, i = 0, msix_count_op_q = 1;
1766 
1767 	num_queues = min_t(int, mrioc->facts.max_op_reply_q,
1768 	    mrioc->facts.max_op_req_q);
1769 
1770 	msix_count_op_q =
1771 	    mrioc->intr_info_count - mrioc->op_reply_q_offset;
1772 	if (!mrioc->num_queues)
1773 		mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
1774 	num_queues = mrioc->num_queues;
1775 	ioc_info(mrioc, "Trying to create %d Operational Q pairs\n",
1776 	    num_queues);
1777 
1778 	if (!mrioc->req_qinfo) {
1779 		mrioc->req_qinfo = kcalloc(num_queues,
1780 		    sizeof(struct op_req_qinfo), GFP_KERNEL);
1781 		if (!mrioc->req_qinfo) {
1782 			retval = -1;
1783 			goto out_failed;
1784 		}
1785 
1786 		mrioc->op_reply_qinfo = kzalloc(sizeof(struct op_reply_qinfo) *
1787 		    num_queues, GFP_KERNEL);
1788 		if (!mrioc->op_reply_qinfo) {
1789 			retval = -1;
1790 			goto out_failed;
1791 		}
1792 	}
1793 
1794 	if (mrioc->enable_segqueue)
1795 		ioc_info(mrioc,
1796 		    "allocating operational queues through segmented queues\n");
1797 
1798 	for (i = 0; i < num_queues; i++) {
1799 		if (mpi3mr_create_op_reply_q(mrioc, i)) {
1800 			ioc_err(mrioc, "Cannot create OP RepQ %d\n", i);
1801 			break;
1802 		}
1803 		if (mpi3mr_create_op_req_q(mrioc, i,
1804 		    mrioc->op_reply_qinfo[i].qid)) {
1805 			ioc_err(mrioc, "Cannot create OP ReqQ %d\n", i);
1806 			mpi3mr_delete_op_reply_q(mrioc, i);
1807 			break;
1808 		}
1809 	}
1810 
1811 	if (i == 0) {
1812 		/* Not even one queue is created successfully*/
1813 		retval = -1;
1814 		goto out_failed;
1815 	}
1816 	mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
1817 	ioc_info(mrioc, "Successfully created %d Operational Q pairs\n",
1818 	    mrioc->num_op_reply_q);
1819 
1820 	return retval;
1821 out_failed:
1822 	kfree(mrioc->req_qinfo);
1823 	mrioc->req_qinfo = NULL;
1824 
1825 	kfree(mrioc->op_reply_qinfo);
1826 	mrioc->op_reply_qinfo = NULL;
1827 
1828 	return retval;
1829 }
1830 
1831 /**
1832  * mpi3mr_op_request_post - Post request to operational queue
1833  * @mrioc: Adapter reference
1834  * @op_req_q: Operational request queue info
1835  * @req: MPI3 request
1836  *
1837  * Post the MPI3 request into operational request queue and
1838  * inform the controller, if the queue is full return
1839  * appropriate error.
1840  *
1841  * Return: 0 on success, non-zero on failure.
1842  */
1843 int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
1844 	struct op_req_qinfo *op_req_q, u8 *req)
1845 {
1846 	u16 pi = 0, max_entries, reply_qidx = 0, midx;
1847 	int retval = 0;
1848 	unsigned long flags;
1849 	u8 *req_entry;
1850 	void *segment_base_addr;
1851 	u16 req_sz = mrioc->facts.op_req_sz;
1852 	struct segments *segments = op_req_q->q_segments;
1853 
1854 	reply_qidx = op_req_q->reply_qid - 1;
1855 
1856 	if (mrioc->unrecoverable)
1857 		return -EFAULT;
1858 
1859 	spin_lock_irqsave(&op_req_q->q_lock, flags);
1860 	pi = op_req_q->pi;
1861 	max_entries = op_req_q->num_requests;
1862 
1863 	if (mpi3mr_check_req_qfull(op_req_q)) {
1864 		midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
1865 		    reply_qidx, mrioc->op_reply_q_offset);
1866 		mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]);
1867 
1868 		if (mpi3mr_check_req_qfull(op_req_q)) {
1869 			retval = -EAGAIN;
1870 			goto out;
1871 		}
1872 	}
1873 
1874 	if (mrioc->reset_in_progress) {
1875 		ioc_err(mrioc, "OpReqQ submit reset in progress\n");
1876 		retval = -EAGAIN;
1877 		goto out;
1878 	}
1879 
1880 	segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
1881 	req_entry = (u8 *)segment_base_addr +
1882 	    ((pi % op_req_q->segment_qd) * req_sz);
1883 
1884 	memset(req_entry, 0, req_sz);
1885 	memcpy(req_entry, req, MPI3MR_ADMIN_REQ_FRAME_SZ);
1886 
1887 	if (++pi == max_entries)
1888 		pi = 0;
1889 	op_req_q->pi = pi;
1890 
1891 	if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
1892 	    > MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
1893 		mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
1894 
1895 	writel(op_req_q->pi,
1896 	    &mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
1897 
1898 out:
1899 	spin_unlock_irqrestore(&op_req_q->q_lock, flags);
1900 	return retval;
1901 }
1902 
1903 /**
1904  * mpi3mr_sync_timestamp - Issue time stamp sync request
1905  * @mrioc: Adapter reference
1906  *
1907  * Issue IO unit control MPI request to synchornize firmware
1908  * timestamp with host time.
1909  *
1910  * Return: 0 on success, non-zero on failure.
1911  */
1912 static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
1913 {
1914 	ktime_t current_time;
1915 	struct mpi3_iounit_control_request iou_ctrl;
1916 	int retval = 0;
1917 
1918 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
1919 	mutex_lock(&mrioc->init_cmds.mutex);
1920 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
1921 		retval = -1;
1922 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command is in use\n");
1923 		mutex_unlock(&mrioc->init_cmds.mutex);
1924 		goto out;
1925 	}
1926 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
1927 	mrioc->init_cmds.is_waiting = 1;
1928 	mrioc->init_cmds.callback = NULL;
1929 	iou_ctrl.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
1930 	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
1931 	iou_ctrl.operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
1932 	current_time = ktime_get_real();
1933 	iou_ctrl.param64[0] = cpu_to_le64(ktime_to_ms(current_time));
1934 
1935 	init_completion(&mrioc->init_cmds.done);
1936 	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl,
1937 	    sizeof(iou_ctrl), 0);
1938 	if (retval) {
1939 		ioc_err(mrioc, "Issue IOUCTL time_stamp: Admin Post failed\n");
1940 		goto out_unlock;
1941 	}
1942 
1943 	wait_for_completion_timeout(&mrioc->init_cmds.done,
1944 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
1945 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
1946 		ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
1947 		mrioc->init_cmds.is_waiting = 0;
1948 		mpi3mr_soft_reset_handler(mrioc,
1949 		    MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
1950 		retval = -1;
1951 		goto out_unlock;
1952 	}
1953 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
1954 	    != MPI3_IOCSTATUS_SUCCESS) {
1955 		ioc_err(mrioc,
1956 		    "Issue IOUCTL time_stamp: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
1957 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1958 		    mrioc->init_cmds.ioc_loginfo);
1959 		retval = -1;
1960 		goto out_unlock;
1961 	}
1962 
1963 out_unlock:
1964 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
1965 	mutex_unlock(&mrioc->init_cmds.mutex);
1966 
1967 out:
1968 	return retval;
1969 }
1970 
1971 /**
1972  * mpi3mr_watchdog_work - watchdog thread to monitor faults
1973  * @work: work struct
1974  *
1975  * Watch dog work periodically executed (1 second interval) to
1976  * monitor firmware fault and to issue periodic timer sync to
1977  * the firmware.
1978  *
1979  * Return: Nothing.
1980  */
1981 static void mpi3mr_watchdog_work(struct work_struct *work)
1982 {
1983 	struct mpi3mr_ioc *mrioc =
1984 	    container_of(work, struct mpi3mr_ioc, watchdog_work.work);
1985 	unsigned long flags;
1986 	enum mpi3mr_iocstate ioc_state;
1987 	u32 fault, host_diagnostic;
1988 
1989 	if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
1990 		mrioc->ts_update_counter = 0;
1991 		mpi3mr_sync_timestamp(mrioc);
1992 	}
1993 
1994 	/*Check for fault state every one second and issue Soft reset*/
1995 	ioc_state = mpi3mr_get_iocstate(mrioc);
1996 	if (ioc_state == MRIOC_STATE_FAULT) {
1997 		fault = readl(&mrioc->sysif_regs->fault) &
1998 		    MPI3_SYSIF_FAULT_CODE_MASK;
1999 		host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
2000 		if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
2001 			if (!mrioc->diagsave_timeout) {
2002 				mpi3mr_print_fault_info(mrioc);
2003 				ioc_warn(mrioc, "Diag save in progress\n");
2004 			}
2005 			if ((mrioc->diagsave_timeout++) <=
2006 			    MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
2007 				goto schedule_work;
2008 		} else
2009 			mpi3mr_print_fault_info(mrioc);
2010 		mrioc->diagsave_timeout = 0;
2011 
2012 		if (fault == MPI3_SYSIF_FAULT_CODE_FACTORY_RESET) {
2013 			ioc_info(mrioc,
2014 			    "Factory Reset fault occurred marking controller as unrecoverable"
2015 			    );
2016 			mrioc->unrecoverable = 1;
2017 			goto out;
2018 		}
2019 
2020 		if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) ||
2021 		    (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) ||
2022 		    (mrioc->reset_in_progress))
2023 			goto out;
2024 		if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
2025 			mpi3mr_soft_reset_handler(mrioc,
2026 			    MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
2027 		else
2028 			mpi3mr_soft_reset_handler(mrioc,
2029 			    MPI3MR_RESET_FROM_FAULT_WATCH, 0);
2030 	}
2031 
2032 schedule_work:
2033 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2034 	if (mrioc->watchdog_work_q)
2035 		queue_delayed_work(mrioc->watchdog_work_q,
2036 		    &mrioc->watchdog_work,
2037 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2038 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2039 out:
2040 	return;
2041 }
2042 
2043 /**
2044  * mpi3mr_start_watchdog - Start watchdog
2045  * @mrioc: Adapter instance reference
2046  *
2047  * Create and start the watchdog thread to monitor controller
2048  * faults.
2049  *
2050  * Return: Nothing.
2051  */
2052 void mpi3mr_start_watchdog(struct mpi3mr_ioc *mrioc)
2053 {
2054 	if (mrioc->watchdog_work_q)
2055 		return;
2056 
2057 	INIT_DELAYED_WORK(&mrioc->watchdog_work, mpi3mr_watchdog_work);
2058 	snprintf(mrioc->watchdog_work_q_name,
2059 	    sizeof(mrioc->watchdog_work_q_name), "watchdog_%s%d", mrioc->name,
2060 	    mrioc->id);
2061 	mrioc->watchdog_work_q =
2062 	    create_singlethread_workqueue(mrioc->watchdog_work_q_name);
2063 	if (!mrioc->watchdog_work_q) {
2064 		ioc_err(mrioc, "%s: failed (line=%d)\n", __func__, __LINE__);
2065 		return;
2066 	}
2067 
2068 	if (mrioc->watchdog_work_q)
2069 		queue_delayed_work(mrioc->watchdog_work_q,
2070 		    &mrioc->watchdog_work,
2071 		    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
2072 }
2073 
2074 /**
2075  * mpi3mr_stop_watchdog - Stop watchdog
2076  * @mrioc: Adapter instance reference
2077  *
2078  * Stop the watchdog thread created to monitor controller
2079  * faults.
2080  *
2081  * Return: Nothing.
2082  */
2083 void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
2084 {
2085 	unsigned long flags;
2086 	struct workqueue_struct *wq;
2087 
2088 	spin_lock_irqsave(&mrioc->watchdog_lock, flags);
2089 	wq = mrioc->watchdog_work_q;
2090 	mrioc->watchdog_work_q = NULL;
2091 	spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
2092 	if (wq) {
2093 		if (!cancel_delayed_work_sync(&mrioc->watchdog_work))
2094 			flush_workqueue(wq);
2095 		destroy_workqueue(wq);
2096 	}
2097 }
2098 
2099 /**
2100  * mpi3mr_kill_ioc - Kill the controller
2101  * @mrioc: Adapter instance reference
2102  * @reason: reason for the failure.
2103  *
2104  * If fault debug is enabled, display the fault info else issue
2105  * diag fault and freeze the system for controller debug
2106  * purpose.
2107  *
2108  * Return: Nothing.
2109  */
2110 static void mpi3mr_kill_ioc(struct mpi3mr_ioc *mrioc, u32 reason)
2111 {
2112 	enum mpi3mr_iocstate ioc_state;
2113 
2114 	if (!mrioc->fault_dbg)
2115 		return;
2116 
2117 	dump_stack();
2118 
2119 	ioc_state = mpi3mr_get_iocstate(mrioc);
2120 	if (ioc_state == MRIOC_STATE_FAULT)
2121 		mpi3mr_print_fault_info(mrioc);
2122 	else {
2123 		ioc_err(mrioc, "Firmware is halted due to the reason %d\n",
2124 		    reason);
2125 		mpi3mr_diagfault_reset_handler(mrioc, reason);
2126 	}
2127 	if (mrioc->fault_dbg == 2)
2128 		for (;;)
2129 			;
2130 	else
2131 		panic("panic in %s\n", __func__);
2132 }
2133 
2134 /**
2135  * mpi3mr_setup_admin_qpair - Setup admin queue pair
2136  * @mrioc: Adapter instance reference
2137  *
2138  * Allocate memory for admin queue pair if required and register
2139  * the admin queue with the controller.
2140  *
2141  * Return: 0 on success, non-zero on failures.
2142  */
2143 static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
2144 {
2145 	int retval = 0;
2146 	u32 num_admin_entries = 0;
2147 
2148 	mrioc->admin_req_q_sz = MPI3MR_ADMIN_REQ_Q_SIZE;
2149 	mrioc->num_admin_req = mrioc->admin_req_q_sz /
2150 	    MPI3MR_ADMIN_REQ_FRAME_SZ;
2151 	mrioc->admin_req_ci = mrioc->admin_req_pi = 0;
2152 	mrioc->admin_req_base = NULL;
2153 
2154 	mrioc->admin_reply_q_sz = MPI3MR_ADMIN_REPLY_Q_SIZE;
2155 	mrioc->num_admin_replies = mrioc->admin_reply_q_sz /
2156 	    MPI3MR_ADMIN_REPLY_FRAME_SZ;
2157 	mrioc->admin_reply_ci = 0;
2158 	mrioc->admin_reply_ephase = 1;
2159 	mrioc->admin_reply_base = NULL;
2160 
2161 	if (!mrioc->admin_req_base) {
2162 		mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
2163 		    mrioc->admin_req_q_sz, &mrioc->admin_req_dma, GFP_KERNEL);
2164 
2165 		if (!mrioc->admin_req_base) {
2166 			retval = -1;
2167 			goto out_failed;
2168 		}
2169 
2170 		mrioc->admin_reply_base = dma_alloc_coherent(&mrioc->pdev->dev,
2171 		    mrioc->admin_reply_q_sz, &mrioc->admin_reply_dma,
2172 		    GFP_KERNEL);
2173 
2174 		if (!mrioc->admin_reply_base) {
2175 			retval = -1;
2176 			goto out_failed;
2177 		}
2178 	}
2179 
2180 	num_admin_entries = (mrioc->num_admin_replies << 16) |
2181 	    (mrioc->num_admin_req);
2182 	writel(num_admin_entries, &mrioc->sysif_regs->admin_queue_num_entries);
2183 	mpi3mr_writeq(mrioc->admin_req_dma,
2184 	    &mrioc->sysif_regs->admin_request_queue_address);
2185 	mpi3mr_writeq(mrioc->admin_reply_dma,
2186 	    &mrioc->sysif_regs->admin_reply_queue_address);
2187 	writel(mrioc->admin_req_pi, &mrioc->sysif_regs->admin_request_queue_pi);
2188 	writel(mrioc->admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
2189 	return retval;
2190 
2191 out_failed:
2192 
2193 	if (mrioc->admin_reply_base) {
2194 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
2195 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
2196 		mrioc->admin_reply_base = NULL;
2197 	}
2198 	if (mrioc->admin_req_base) {
2199 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
2200 		    mrioc->admin_req_base, mrioc->admin_req_dma);
2201 		mrioc->admin_req_base = NULL;
2202 	}
2203 	return retval;
2204 }
2205 
2206 /**
2207  * mpi3mr_issue_iocfacts - Send IOC Facts
2208  * @mrioc: Adapter instance reference
2209  * @facts_data: Cached IOC facts data
2210  *
2211  * Issue IOC Facts MPI request through admin queue and wait for
2212  * the completion of it or time out.
2213  *
2214  * Return: 0 on success, non-zero on failures.
2215  */
2216 static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
2217 	struct mpi3_ioc_facts_data *facts_data)
2218 {
2219 	struct mpi3_ioc_facts_request iocfacts_req;
2220 	void *data = NULL;
2221 	dma_addr_t data_dma;
2222 	u32 data_len = sizeof(*facts_data);
2223 	int retval = 0;
2224 	u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
2225 
2226 	data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2227 	    GFP_KERNEL);
2228 
2229 	if (!data) {
2230 		retval = -1;
2231 		goto out;
2232 	}
2233 
2234 	memset(&iocfacts_req, 0, sizeof(iocfacts_req));
2235 	mutex_lock(&mrioc->init_cmds.mutex);
2236 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2237 		retval = -1;
2238 		ioc_err(mrioc, "Issue IOCFacts: Init command is in use\n");
2239 		mutex_unlock(&mrioc->init_cmds.mutex);
2240 		goto out;
2241 	}
2242 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2243 	mrioc->init_cmds.is_waiting = 1;
2244 	mrioc->init_cmds.callback = NULL;
2245 	iocfacts_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2246 	iocfacts_req.function = MPI3_FUNCTION_IOC_FACTS;
2247 
2248 	mpi3mr_add_sg_single(&iocfacts_req.sgl, sgl_flags, data_len,
2249 	    data_dma);
2250 
2251 	init_completion(&mrioc->init_cmds.done);
2252 	retval = mpi3mr_admin_request_post(mrioc, &iocfacts_req,
2253 	    sizeof(iocfacts_req), 1);
2254 	if (retval) {
2255 		ioc_err(mrioc, "Issue IOCFacts: Admin Post failed\n");
2256 		goto out_unlock;
2257 	}
2258 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2259 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2260 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2261 		ioc_err(mrioc, "Issue IOCFacts: command timed out\n");
2262 		mpi3mr_set_diagsave(mrioc);
2263 		mpi3mr_issue_reset(mrioc,
2264 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2265 		    MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
2266 		mrioc->unrecoverable = 1;
2267 		retval = -1;
2268 		goto out_unlock;
2269 	}
2270 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2271 	    != MPI3_IOCSTATUS_SUCCESS) {
2272 		ioc_err(mrioc,
2273 		    "Issue IOCFacts: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2274 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2275 		    mrioc->init_cmds.ioc_loginfo);
2276 		retval = -1;
2277 		goto out_unlock;
2278 	}
2279 	memcpy(facts_data, (u8 *)data, data_len);
2280 out_unlock:
2281 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2282 	mutex_unlock(&mrioc->init_cmds.mutex);
2283 
2284 out:
2285 	if (data)
2286 		dma_free_coherent(&mrioc->pdev->dev, data_len, data, data_dma);
2287 
2288 	return retval;
2289 }
2290 
2291 /**
2292  * mpi3mr_check_reset_dma_mask - Process IOC facts data
2293  * @mrioc: Adapter instance reference
2294  *
2295  * Check whether the new DMA mask requested through IOCFacts by
2296  * firmware needs to be set, if so set it .
2297  *
2298  * Return: 0 on success, non-zero on failure.
2299  */
2300 static inline int mpi3mr_check_reset_dma_mask(struct mpi3mr_ioc *mrioc)
2301 {
2302 	struct pci_dev *pdev = mrioc->pdev;
2303 	int r;
2304 	u64 facts_dma_mask = DMA_BIT_MASK(mrioc->facts.dma_mask);
2305 
2306 	if (!mrioc->facts.dma_mask || (mrioc->dma_mask <= facts_dma_mask))
2307 		return 0;
2308 
2309 	ioc_info(mrioc, "Changing DMA mask from 0x%016llx to 0x%016llx\n",
2310 	    mrioc->dma_mask, facts_dma_mask);
2311 
2312 	r = dma_set_mask_and_coherent(&pdev->dev, facts_dma_mask);
2313 	if (r) {
2314 		ioc_err(mrioc, "Setting DMA mask to 0x%016llx failed: %d\n",
2315 		    facts_dma_mask, r);
2316 		return r;
2317 	}
2318 	mrioc->dma_mask = facts_dma_mask;
2319 	return r;
2320 }
2321 
2322 /**
2323  * mpi3mr_process_factsdata - Process IOC facts data
2324  * @mrioc: Adapter instance reference
2325  * @facts_data: Cached IOC facts data
2326  *
2327  * Convert IOC facts data into cpu endianness and cache it in
2328  * the driver .
2329  *
2330  * Return: Nothing.
2331  */
2332 static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
2333 	struct mpi3_ioc_facts_data *facts_data)
2334 {
2335 	u32 ioc_config, req_sz, facts_flags;
2336 
2337 	if ((le16_to_cpu(facts_data->ioc_facts_data_length)) !=
2338 	    (sizeof(*facts_data) / 4)) {
2339 		ioc_warn(mrioc,
2340 		    "IOCFactsdata length mismatch driver_sz(%zu) firmware_sz(%d)\n",
2341 		    sizeof(*facts_data),
2342 		    le16_to_cpu(facts_data->ioc_facts_data_length) * 4);
2343 	}
2344 
2345 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
2346 	req_sz = 1 << ((ioc_config & MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ) >>
2347 	    MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT);
2348 	if (le16_to_cpu(facts_data->ioc_request_frame_size) != (req_sz / 4)) {
2349 		ioc_err(mrioc,
2350 		    "IOCFacts data reqFrameSize mismatch hw_size(%d) firmware_sz(%d)\n",
2351 		    req_sz / 4, le16_to_cpu(facts_data->ioc_request_frame_size));
2352 	}
2353 
2354 	memset(&mrioc->facts, 0, sizeof(mrioc->facts));
2355 
2356 	facts_flags = le32_to_cpu(facts_data->flags);
2357 	mrioc->facts.op_req_sz = req_sz;
2358 	mrioc->op_reply_desc_sz = 1 << ((ioc_config &
2359 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ) >>
2360 	    MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT);
2361 
2362 	mrioc->facts.ioc_num = facts_data->ioc_number;
2363 	mrioc->facts.who_init = facts_data->who_init;
2364 	mrioc->facts.max_msix_vectors = le16_to_cpu(facts_data->max_msix_vectors);
2365 	mrioc->facts.personality = (facts_flags &
2366 	    MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK);
2367 	mrioc->facts.dma_mask = (facts_flags &
2368 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
2369 	    MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
2370 	mrioc->facts.protocol_flags = facts_data->protocol_flags;
2371 	mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
2372 	mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request);
2373 	mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
2374 	mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
2375 	mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
2376 	mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
2377 	mrioc->facts.max_pds = le16_to_cpu(facts_data->max_pds);
2378 	mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
2379 	mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
2380 	mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_advanced_host_pds);
2381 	mrioc->facts.max_raidpds = le16_to_cpu(facts_data->max_raid_pds);
2382 	mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
2383 	mrioc->facts.max_pcie_switches =
2384 	    le16_to_cpu(facts_data->max_pc_ie_switches);
2385 	mrioc->facts.max_sasexpanders =
2386 	    le16_to_cpu(facts_data->max_sas_expanders);
2387 	mrioc->facts.max_sasinitiators =
2388 	    le16_to_cpu(facts_data->max_sas_initiators);
2389 	mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
2390 	mrioc->facts.min_devhandle = le16_to_cpu(facts_data->min_dev_handle);
2391 	mrioc->facts.max_devhandle = le16_to_cpu(facts_data->max_dev_handle);
2392 	mrioc->facts.max_op_req_q =
2393 	    le16_to_cpu(facts_data->max_operational_request_queues);
2394 	mrioc->facts.max_op_reply_q =
2395 	    le16_to_cpu(facts_data->max_operational_reply_queues);
2396 	mrioc->facts.ioc_capabilities =
2397 	    le32_to_cpu(facts_data->ioc_capabilities);
2398 	mrioc->facts.fw_ver.build_num =
2399 	    le16_to_cpu(facts_data->fw_version.build_num);
2400 	mrioc->facts.fw_ver.cust_id =
2401 	    le16_to_cpu(facts_data->fw_version.customer_id);
2402 	mrioc->facts.fw_ver.ph_minor = facts_data->fw_version.phase_minor;
2403 	mrioc->facts.fw_ver.ph_major = facts_data->fw_version.phase_major;
2404 	mrioc->facts.fw_ver.gen_minor = facts_data->fw_version.gen_minor;
2405 	mrioc->facts.fw_ver.gen_major = facts_data->fw_version.gen_major;
2406 	mrioc->msix_count = min_t(int, mrioc->msix_count,
2407 	    mrioc->facts.max_msix_vectors);
2408 	mrioc->facts.sge_mod_mask = facts_data->sge_modifier_mask;
2409 	mrioc->facts.sge_mod_value = facts_data->sge_modifier_value;
2410 	mrioc->facts.sge_mod_shift = facts_data->sge_modifier_shift;
2411 	mrioc->facts.shutdown_timeout =
2412 	    le16_to_cpu(facts_data->shutdown_timeout);
2413 
2414 	ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
2415 	    mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
2416 	    mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
2417 	ioc_info(mrioc,
2418 	    "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
2419 	    mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
2420 	    mrioc->facts.max_pds, mrioc->facts.max_msix_vectors,
2421 	    mrioc->facts.max_perids);
2422 	ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
2423 	    mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
2424 	    mrioc->facts.sge_mod_shift);
2425 	ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
2426 	    mrioc->facts.dma_mask, (facts_flags &
2427 	    MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
2428 
2429 	mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
2430 
2431 	if (reset_devices)
2432 		mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
2433 		    MPI3MR_HOST_IOS_KDUMP);
2434 }
2435 
2436 /**
2437  * mpi3mr_alloc_reply_sense_bufs - Send IOC Init
2438  * @mrioc: Adapter instance reference
2439  *
2440  * Allocate and initialize the reply free buffers, sense
2441  * buffers, reply free queue and sense buffer queue.
2442  *
2443  * Return: 0 on success, non-zero on failures.
2444  */
2445 static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
2446 {
2447 	int retval = 0;
2448 	u32 sz, i;
2449 	dma_addr_t phy_addr;
2450 
2451 	if (mrioc->init_cmds.reply)
2452 		goto post_reply_sbuf;
2453 
2454 	mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
2455 	if (!mrioc->init_cmds.reply)
2456 		goto out_failed;
2457 
2458 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
2459 		mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->facts.reply_sz,
2460 		    GFP_KERNEL);
2461 		if (!mrioc->dev_rmhs_cmds[i].reply)
2462 			goto out_failed;
2463 	}
2464 
2465 	mrioc->host_tm_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
2466 	if (!mrioc->host_tm_cmds.reply)
2467 		goto out_failed;
2468 
2469 	mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
2470 	if (mrioc->facts.max_devhandle % 8)
2471 		mrioc->dev_handle_bitmap_sz++;
2472 	mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz,
2473 	    GFP_KERNEL);
2474 	if (!mrioc->removepend_bitmap)
2475 		goto out_failed;
2476 
2477 	mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8;
2478 	if (MPI3MR_NUM_DEVRMCMD % 8)
2479 		mrioc->devrem_bitmap_sz++;
2480 	mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz,
2481 	    GFP_KERNEL);
2482 	if (!mrioc->devrem_bitmap)
2483 		goto out_failed;
2484 
2485 	mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
2486 	mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
2487 	mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
2488 	mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
2489 
2490 	/* reply buffer pool, 16 byte align */
2491 	sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
2492 	mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
2493 	    &mrioc->pdev->dev, sz, 16, 0);
2494 	if (!mrioc->reply_buf_pool) {
2495 		ioc_err(mrioc, "reply buf pool: dma_pool_create failed\n");
2496 		goto out_failed;
2497 	}
2498 
2499 	mrioc->reply_buf = dma_pool_zalloc(mrioc->reply_buf_pool, GFP_KERNEL,
2500 	    &mrioc->reply_buf_dma);
2501 	if (!mrioc->reply_buf)
2502 		goto out_failed;
2503 
2504 	mrioc->reply_buf_dma_max_address = mrioc->reply_buf_dma + sz;
2505 
2506 	/* reply free queue, 8 byte align */
2507 	sz = mrioc->reply_free_qsz * 8;
2508 	mrioc->reply_free_q_pool = dma_pool_create("reply_free_q pool",
2509 	    &mrioc->pdev->dev, sz, 8, 0);
2510 	if (!mrioc->reply_free_q_pool) {
2511 		ioc_err(mrioc, "reply_free_q pool: dma_pool_create failed\n");
2512 		goto out_failed;
2513 	}
2514 	mrioc->reply_free_q = dma_pool_zalloc(mrioc->reply_free_q_pool,
2515 	    GFP_KERNEL, &mrioc->reply_free_q_dma);
2516 	if (!mrioc->reply_free_q)
2517 		goto out_failed;
2518 
2519 	/* sense buffer pool,  4 byte align */
2520 	sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
2521 	mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
2522 	    &mrioc->pdev->dev, sz, 4, 0);
2523 	if (!mrioc->sense_buf_pool) {
2524 		ioc_err(mrioc, "sense_buf pool: dma_pool_create failed\n");
2525 		goto out_failed;
2526 	}
2527 	mrioc->sense_buf = dma_pool_zalloc(mrioc->sense_buf_pool, GFP_KERNEL,
2528 	    &mrioc->sense_buf_dma);
2529 	if (!mrioc->sense_buf)
2530 		goto out_failed;
2531 
2532 	/* sense buffer queue, 8 byte align */
2533 	sz = mrioc->sense_buf_q_sz * 8;
2534 	mrioc->sense_buf_q_pool = dma_pool_create("sense_buf_q pool",
2535 	    &mrioc->pdev->dev, sz, 8, 0);
2536 	if (!mrioc->sense_buf_q_pool) {
2537 		ioc_err(mrioc, "sense_buf_q pool: dma_pool_create failed\n");
2538 		goto out_failed;
2539 	}
2540 	mrioc->sense_buf_q = dma_pool_zalloc(mrioc->sense_buf_q_pool,
2541 	    GFP_KERNEL, &mrioc->sense_buf_q_dma);
2542 	if (!mrioc->sense_buf_q)
2543 		goto out_failed;
2544 
2545 post_reply_sbuf:
2546 	sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
2547 	ioc_info(mrioc,
2548 	    "reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
2549 	    mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz,
2550 	    (sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
2551 	sz = mrioc->reply_free_qsz * 8;
2552 	ioc_info(mrioc,
2553 	    "reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
2554 	    mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
2555 	    (unsigned long long)mrioc->reply_free_q_dma);
2556 	sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
2557 	ioc_info(mrioc,
2558 	    "sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
2559 	    mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
2560 	    (sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
2561 	sz = mrioc->sense_buf_q_sz * 8;
2562 	ioc_info(mrioc,
2563 	    "sense_buf_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
2564 	    mrioc->sense_buf_q, mrioc->sense_buf_q_sz, 8, (sz / 1024),
2565 	    (unsigned long long)mrioc->sense_buf_q_dma);
2566 
2567 	/* initialize Reply buffer Queue */
2568 	for (i = 0, phy_addr = mrioc->reply_buf_dma;
2569 	    i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz)
2570 		mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
2571 	mrioc->reply_free_q[i] = cpu_to_le64(0);
2572 
2573 	/* initialize Sense Buffer Queue */
2574 	for (i = 0, phy_addr = mrioc->sense_buf_dma;
2575 	    i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSEBUF_SZ)
2576 		mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
2577 	mrioc->sense_buf_q[i] = cpu_to_le64(0);
2578 	return retval;
2579 
2580 out_failed:
2581 	retval = -1;
2582 	return retval;
2583 }
2584 
2585 /**
2586  * mpi3mr_issue_iocinit - Send IOC Init
2587  * @mrioc: Adapter instance reference
2588  *
2589  * Issue IOC Init MPI request through admin queue and wait for
2590  * the completion of it or time out.
2591  *
2592  * Return: 0 on success, non-zero on failures.
2593  */
2594 static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
2595 {
2596 	struct mpi3_ioc_init_request iocinit_req;
2597 	struct mpi3_driver_info_layout *drv_info;
2598 	dma_addr_t data_dma;
2599 	u32 data_len = sizeof(*drv_info);
2600 	int retval = 0;
2601 	ktime_t current_time;
2602 
2603 	drv_info = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
2604 	    GFP_KERNEL);
2605 	if (!drv_info) {
2606 		retval = -1;
2607 		goto out;
2608 	}
2609 	drv_info->information_length = cpu_to_le32(data_len);
2610 	strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
2611 	strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
2612 	strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version));
2613 	strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name));
2614 	strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version));
2615 	strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE,
2616 	    sizeof(drv_info->driver_release_date));
2617 	drv_info->driver_capabilities = 0;
2618 	memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info,
2619 	    sizeof(mrioc->driver_info));
2620 
2621 	memset(&iocinit_req, 0, sizeof(iocinit_req));
2622 	mutex_lock(&mrioc->init_cmds.mutex);
2623 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2624 		retval = -1;
2625 		ioc_err(mrioc, "Issue IOCInit: Init command is in use\n");
2626 		mutex_unlock(&mrioc->init_cmds.mutex);
2627 		goto out;
2628 	}
2629 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2630 	mrioc->init_cmds.is_waiting = 1;
2631 	mrioc->init_cmds.callback = NULL;
2632 	iocinit_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2633 	iocinit_req.function = MPI3_FUNCTION_IOC_INIT;
2634 	iocinit_req.mpi_version.mpi3_version.dev = MPI3_VERSION_DEV;
2635 	iocinit_req.mpi_version.mpi3_version.unit = MPI3_VERSION_UNIT;
2636 	iocinit_req.mpi_version.mpi3_version.major = MPI3_VERSION_MAJOR;
2637 	iocinit_req.mpi_version.mpi3_version.minor = MPI3_VERSION_MINOR;
2638 	iocinit_req.who_init = MPI3_WHOINIT_HOST_DRIVER;
2639 	iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
2640 	iocinit_req.reply_free_queue_address =
2641 	    cpu_to_le64(mrioc->reply_free_q_dma);
2642 	iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSEBUF_SZ);
2643 	iocinit_req.sense_buffer_free_queue_depth =
2644 	    cpu_to_le16(mrioc->sense_buf_q_sz);
2645 	iocinit_req.sense_buffer_free_queue_address =
2646 	    cpu_to_le64(mrioc->sense_buf_q_dma);
2647 	iocinit_req.driver_information_address = cpu_to_le64(data_dma);
2648 
2649 	current_time = ktime_get_real();
2650 	iocinit_req.time_stamp = cpu_to_le64(ktime_to_ms(current_time));
2651 
2652 	init_completion(&mrioc->init_cmds.done);
2653 	retval = mpi3mr_admin_request_post(mrioc, &iocinit_req,
2654 	    sizeof(iocinit_req), 1);
2655 	if (retval) {
2656 		ioc_err(mrioc, "Issue IOCInit: Admin Post failed\n");
2657 		goto out_unlock;
2658 	}
2659 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2660 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2661 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2662 		mpi3mr_set_diagsave(mrioc);
2663 		mpi3mr_issue_reset(mrioc,
2664 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2665 		    MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
2666 		mrioc->unrecoverable = 1;
2667 		ioc_err(mrioc, "Issue IOCInit: command timed out\n");
2668 		retval = -1;
2669 		goto out_unlock;
2670 	}
2671 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2672 	    != MPI3_IOCSTATUS_SUCCESS) {
2673 		ioc_err(mrioc,
2674 		    "Issue IOCInit: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2675 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2676 		    mrioc->init_cmds.ioc_loginfo);
2677 		retval = -1;
2678 		goto out_unlock;
2679 	}
2680 
2681 out_unlock:
2682 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2683 	mutex_unlock(&mrioc->init_cmds.mutex);
2684 
2685 out:
2686 	if (drv_info)
2687 		dma_free_coherent(&mrioc->pdev->dev, data_len, drv_info,
2688 		    data_dma);
2689 
2690 	return retval;
2691 }
2692 
2693 /**
2694  * mpi3mr_unmask_events - Unmask events in event mask bitmap
2695  * @mrioc: Adapter instance reference
2696  * @event: MPI event ID
2697  *
2698  * Un mask the specific event by resetting the event_mask
2699  * bitmap.
2700  *
2701  * Return: 0 on success, non-zero on failures.
2702  */
2703 static void mpi3mr_unmask_events(struct mpi3mr_ioc *mrioc, u16 event)
2704 {
2705 	u32 desired_event;
2706 	u8 word;
2707 
2708 	if (event >= 128)
2709 		return;
2710 
2711 	desired_event = (1 << (event % 32));
2712 	word = event / 32;
2713 
2714 	mrioc->event_masks[word] &= ~desired_event;
2715 }
2716 
2717 /**
2718  * mpi3mr_issue_event_notification - Send event notification
2719  * @mrioc: Adapter instance reference
2720  *
2721  * Issue event notification MPI request through admin queue and
2722  * wait for the completion of it or time out.
2723  *
2724  * Return: 0 on success, non-zero on failures.
2725  */
2726 static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
2727 {
2728 	struct mpi3_event_notification_request evtnotify_req;
2729 	int retval = 0;
2730 	u8 i;
2731 
2732 	memset(&evtnotify_req, 0, sizeof(evtnotify_req));
2733 	mutex_lock(&mrioc->init_cmds.mutex);
2734 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2735 		retval = -1;
2736 		ioc_err(mrioc, "Issue EvtNotify: Init command is in use\n");
2737 		mutex_unlock(&mrioc->init_cmds.mutex);
2738 		goto out;
2739 	}
2740 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2741 	mrioc->init_cmds.is_waiting = 1;
2742 	mrioc->init_cmds.callback = NULL;
2743 	evtnotify_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2744 	evtnotify_req.function = MPI3_FUNCTION_EVENT_NOTIFICATION;
2745 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2746 		evtnotify_req.event_masks[i] =
2747 		    cpu_to_le32(mrioc->event_masks[i]);
2748 	init_completion(&mrioc->init_cmds.done);
2749 	retval = mpi3mr_admin_request_post(mrioc, &evtnotify_req,
2750 	    sizeof(evtnotify_req), 1);
2751 	if (retval) {
2752 		ioc_err(mrioc, "Issue EvtNotify: Admin Post failed\n");
2753 		goto out_unlock;
2754 	}
2755 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2756 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2757 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2758 		ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
2759 		mpi3mr_set_diagsave(mrioc);
2760 		mpi3mr_issue_reset(mrioc,
2761 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2762 		    MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
2763 		mrioc->unrecoverable = 1;
2764 		retval = -1;
2765 		goto out_unlock;
2766 	}
2767 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2768 	    != MPI3_IOCSTATUS_SUCCESS) {
2769 		ioc_err(mrioc,
2770 		    "Issue EvtNotify: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2771 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2772 		    mrioc->init_cmds.ioc_loginfo);
2773 		retval = -1;
2774 		goto out_unlock;
2775 	}
2776 
2777 out_unlock:
2778 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2779 	mutex_unlock(&mrioc->init_cmds.mutex);
2780 out:
2781 	return retval;
2782 }
2783 
2784 /**
2785  * mpi3mr_send_event_ack - Send event acknowledgment
2786  * @mrioc: Adapter instance reference
2787  * @event: MPI3 event ID
2788  * @event_ctx: Event context
2789  *
2790  * Send event acknowledgment through admin queue and wait for
2791  * it to complete.
2792  *
2793  * Return: 0 on success, non-zero on failures.
2794  */
2795 int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
2796 	u32 event_ctx)
2797 {
2798 	struct mpi3_event_ack_request evtack_req;
2799 	int retval = 0;
2800 
2801 	memset(&evtack_req, 0, sizeof(evtack_req));
2802 	mutex_lock(&mrioc->init_cmds.mutex);
2803 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2804 		retval = -1;
2805 		ioc_err(mrioc, "Send EvtAck: Init command is in use\n");
2806 		mutex_unlock(&mrioc->init_cmds.mutex);
2807 		goto out;
2808 	}
2809 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2810 	mrioc->init_cmds.is_waiting = 1;
2811 	mrioc->init_cmds.callback = NULL;
2812 	evtack_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2813 	evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
2814 	evtack_req.event = event;
2815 	evtack_req.event_context = cpu_to_le32(event_ctx);
2816 
2817 	init_completion(&mrioc->init_cmds.done);
2818 	retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
2819 	    sizeof(evtack_req), 1);
2820 	if (retval) {
2821 		ioc_err(mrioc, "Send EvtAck: Admin Post failed\n");
2822 		goto out_unlock;
2823 	}
2824 	wait_for_completion_timeout(&mrioc->init_cmds.done,
2825 	    (MPI3MR_INTADMCMD_TIMEOUT * HZ));
2826 	if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2827 		ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
2828 		mpi3mr_soft_reset_handler(mrioc,
2829 		    MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
2830 		retval = -1;
2831 		goto out_unlock;
2832 	}
2833 	if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
2834 	    != MPI3_IOCSTATUS_SUCCESS) {
2835 		ioc_err(mrioc,
2836 		    "Send EvtAck: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
2837 		    (mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2838 		    mrioc->init_cmds.ioc_loginfo);
2839 		retval = -1;
2840 		goto out_unlock;
2841 	}
2842 
2843 out_unlock:
2844 	mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2845 	mutex_unlock(&mrioc->init_cmds.mutex);
2846 out:
2847 	return retval;
2848 }
2849 
2850 /**
2851  * mpi3mr_alloc_chain_bufs - Allocate chain buffers
2852  * @mrioc: Adapter instance reference
2853  *
2854  * Allocate chain buffers and set a bitmap to indicate free
2855  * chain buffers. Chain buffers are used to pass the SGE
2856  * information along with MPI3 SCSI IO requests for host I/O.
2857  *
2858  * Return: 0 on success, non-zero on failure
2859  */
2860 static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
2861 {
2862 	int retval = 0;
2863 	u32 sz, i;
2864 	u16 num_chains;
2865 
2866 	num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
2867 
2868 	if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
2869 	    | SHOST_DIX_TYPE1_PROTECTION
2870 	    | SHOST_DIX_TYPE2_PROTECTION
2871 	    | SHOST_DIX_TYPE3_PROTECTION))
2872 		num_chains += (num_chains / MPI3MR_CHAINBUFDIX_FACTOR);
2873 
2874 	mrioc->chain_buf_count = num_chains;
2875 	sz = sizeof(struct chain_element) * num_chains;
2876 	mrioc->chain_sgl_list = kzalloc(sz, GFP_KERNEL);
2877 	if (!mrioc->chain_sgl_list)
2878 		goto out_failed;
2879 
2880 	sz = MPI3MR_PAGE_SIZE_4K;
2881 	mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
2882 	    &mrioc->pdev->dev, sz, 16, 0);
2883 	if (!mrioc->chain_buf_pool) {
2884 		ioc_err(mrioc, "chain buf pool: dma_pool_create failed\n");
2885 		goto out_failed;
2886 	}
2887 
2888 	for (i = 0; i < num_chains; i++) {
2889 		mrioc->chain_sgl_list[i].addr =
2890 		    dma_pool_zalloc(mrioc->chain_buf_pool, GFP_KERNEL,
2891 		    &mrioc->chain_sgl_list[i].dma_addr);
2892 
2893 		if (!mrioc->chain_sgl_list[i].addr)
2894 			goto out_failed;
2895 	}
2896 	mrioc->chain_bitmap_sz = num_chains / 8;
2897 	if (num_chains % 8)
2898 		mrioc->chain_bitmap_sz++;
2899 	mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL);
2900 	if (!mrioc->chain_bitmap)
2901 		goto out_failed;
2902 	return retval;
2903 out_failed:
2904 	retval = -1;
2905 	return retval;
2906 }
2907 
2908 /**
2909  * mpi3mr_port_enable_complete - Mark port enable complete
2910  * @mrioc: Adapter instance reference
2911  * @drv_cmd: Internal command tracker
2912  *
2913  * Call back for asynchronous port enable request sets the
2914  * driver command to indicate port enable request is complete.
2915  *
2916  * Return: Nothing
2917  */
2918 static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
2919 	struct mpi3mr_drv_cmd *drv_cmd)
2920 {
2921 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
2922 	drv_cmd->callback = NULL;
2923 	mrioc->scan_failed = drv_cmd->ioc_status;
2924 	mrioc->scan_started = 0;
2925 }
2926 
2927 /**
2928  * mpi3mr_issue_port_enable - Issue Port Enable
2929  * @mrioc: Adapter instance reference
2930  * @async: Flag to wait for completion or not
2931  *
2932  * Issue Port Enable MPI request through admin queue and if the
2933  * async flag is not set wait for the completion of the port
2934  * enable or time out.
2935  *
2936  * Return: 0 on success, non-zero on failures.
2937  */
2938 int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
2939 {
2940 	struct mpi3_port_enable_request pe_req;
2941 	int retval = 0;
2942 	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
2943 
2944 	memset(&pe_req, 0, sizeof(pe_req));
2945 	mutex_lock(&mrioc->init_cmds.mutex);
2946 	if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
2947 		retval = -1;
2948 		ioc_err(mrioc, "Issue PortEnable: Init command is in use\n");
2949 		mutex_unlock(&mrioc->init_cmds.mutex);
2950 		goto out;
2951 	}
2952 	mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
2953 	if (async) {
2954 		mrioc->init_cmds.is_waiting = 0;
2955 		mrioc->init_cmds.callback = mpi3mr_port_enable_complete;
2956 	} else {
2957 		mrioc->init_cmds.is_waiting = 1;
2958 		mrioc->init_cmds.callback = NULL;
2959 		init_completion(&mrioc->init_cmds.done);
2960 	}
2961 	pe_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
2962 	pe_req.function = MPI3_FUNCTION_PORT_ENABLE;
2963 
2964 	retval = mpi3mr_admin_request_post(mrioc, &pe_req, sizeof(pe_req), 1);
2965 	if (retval) {
2966 		ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
2967 		goto out_unlock;
2968 	}
2969 	if (!async) {
2970 		wait_for_completion_timeout(&mrioc->init_cmds.done,
2971 		    (pe_timeout * HZ));
2972 		if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
2973 			ioc_err(mrioc, "Issue PortEnable: command timed out\n");
2974 			retval = -1;
2975 			mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
2976 			mpi3mr_set_diagsave(mrioc);
2977 			mpi3mr_issue_reset(mrioc,
2978 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
2979 			    MPI3MR_RESET_FROM_PE_TIMEOUT);
2980 			mrioc->unrecoverable = 1;
2981 			goto out_unlock;
2982 		}
2983 		mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
2984 	}
2985 out_unlock:
2986 	mutex_unlock(&mrioc->init_cmds.mutex);
2987 out:
2988 	return retval;
2989 }
2990 
2991 /* Protocol type to name mapper structure*/
2992 static const struct {
2993 	u8 protocol;
2994 	char *name;
2995 } mpi3mr_protocols[] = {
2996 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR, "Initiator" },
2997 	{ MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET, "Target" },
2998 	{ MPI3_IOCFACTS_PROTOCOL_NVME, "NVMe attachment" },
2999 };
3000 
3001 /* Capability to name mapper structure*/
3002 static const struct {
3003 	u32 capability;
3004 	char *name;
3005 } mpi3mr_capabilities[] = {
3006 	{ MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
3007 };
3008 
3009 /**
3010  * mpi3mr_print_ioc_info - Display controller information
3011  * @mrioc: Adapter instance reference
3012  *
3013  * Display controller personalit, capability, supported
3014  * protocols etc.
3015  *
3016  * Return: Nothing
3017  */
3018 static void
3019 mpi3mr_print_ioc_info(struct mpi3mr_ioc *mrioc)
3020 {
3021 	int i = 0, bytes_written = 0;
3022 	char personality[16];
3023 	char protocol[50] = {0};
3024 	char capabilities[100] = {0};
3025 	struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
3026 
3027 	switch (mrioc->facts.personality) {
3028 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA:
3029 		strncpy(personality, "Enhanced HBA", sizeof(personality));
3030 		break;
3031 	case MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR:
3032 		strncpy(personality, "RAID", sizeof(personality));
3033 		break;
3034 	default:
3035 		strncpy(personality, "Unknown", sizeof(personality));
3036 		break;
3037 	}
3038 
3039 	ioc_info(mrioc, "Running in %s Personality", personality);
3040 
3041 	ioc_info(mrioc, "FW version(%d.%d.%d.%d.%d.%d)\n",
3042 	    fwver->gen_major, fwver->gen_minor, fwver->ph_major,
3043 	    fwver->ph_minor, fwver->cust_id, fwver->build_num);
3044 
3045 	for (i = 0; i < ARRAY_SIZE(mpi3mr_protocols); i++) {
3046 		if (mrioc->facts.protocol_flags &
3047 		    mpi3mr_protocols[i].protocol) {
3048 			bytes_written += scnprintf(protocol + bytes_written,
3049 				    sizeof(protocol) - bytes_written, "%s%s",
3050 				    bytes_written ? "," : "",
3051 				    mpi3mr_protocols[i].name);
3052 		}
3053 	}
3054 
3055 	bytes_written = 0;
3056 	for (i = 0; i < ARRAY_SIZE(mpi3mr_capabilities); i++) {
3057 		if (mrioc->facts.protocol_flags &
3058 		    mpi3mr_capabilities[i].capability) {
3059 			bytes_written += scnprintf(capabilities + bytes_written,
3060 				    sizeof(capabilities) - bytes_written, "%s%s",
3061 				    bytes_written ? "," : "",
3062 				    mpi3mr_capabilities[i].name);
3063 		}
3064 	}
3065 
3066 	ioc_info(mrioc, "Protocol=(%s), Capabilities=(%s)\n",
3067 		 protocol, capabilities);
3068 }
3069 
3070 /**
3071  * mpi3mr_cleanup_resources - Free PCI resources
3072  * @mrioc: Adapter instance reference
3073  *
3074  * Unmap PCI device memory and disable PCI device.
3075  *
3076  * Return: 0 on success and non-zero on failure.
3077  */
3078 void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc)
3079 {
3080 	struct pci_dev *pdev = mrioc->pdev;
3081 
3082 	mpi3mr_cleanup_isr(mrioc);
3083 
3084 	if (mrioc->sysif_regs) {
3085 		iounmap((void __iomem *)mrioc->sysif_regs);
3086 		mrioc->sysif_regs = NULL;
3087 	}
3088 
3089 	if (pci_is_enabled(pdev)) {
3090 		if (mrioc->bars)
3091 			pci_release_selected_regions(pdev, mrioc->bars);
3092 		pci_disable_device(pdev);
3093 	}
3094 }
3095 
3096 /**
3097  * mpi3mr_setup_resources - Enable PCI resources
3098  * @mrioc: Adapter instance reference
3099  *
3100  * Enable PCI device memory, MSI-x registers and set DMA mask.
3101  *
3102  * Return: 0 on success and non-zero on failure.
3103  */
3104 int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
3105 {
3106 	struct pci_dev *pdev = mrioc->pdev;
3107 	u32 memap_sz = 0;
3108 	int i, retval = 0, capb = 0;
3109 	u16 message_control;
3110 	u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
3111 	    (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) &&
3112 	    (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
3113 
3114 	if (pci_enable_device_mem(pdev)) {
3115 		ioc_err(mrioc, "pci_enable_device_mem: failed\n");
3116 		retval = -ENODEV;
3117 		goto out_failed;
3118 	}
3119 
3120 	capb = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3121 	if (!capb) {
3122 		ioc_err(mrioc, "Unable to find MSI-X Capabilities\n");
3123 		retval = -ENODEV;
3124 		goto out_failed;
3125 	}
3126 	mrioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3127 
3128 	if (pci_request_selected_regions(pdev, mrioc->bars,
3129 	    mrioc->driver_name)) {
3130 		ioc_err(mrioc, "pci_request_selected_regions: failed\n");
3131 		retval = -ENODEV;
3132 		goto out_failed;
3133 	}
3134 
3135 	for (i = 0; (i < DEVICE_COUNT_RESOURCE); i++) {
3136 		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3137 			mrioc->sysif_regs_phys = pci_resource_start(pdev, i);
3138 			memap_sz = pci_resource_len(pdev, i);
3139 			mrioc->sysif_regs =
3140 			    ioremap(mrioc->sysif_regs_phys, memap_sz);
3141 			break;
3142 		}
3143 	}
3144 
3145 	pci_set_master(pdev);
3146 
3147 	retval = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
3148 	if (retval) {
3149 		if (dma_mask != DMA_BIT_MASK(32)) {
3150 			ioc_warn(mrioc, "Setting 64 bit DMA mask failed\n");
3151 			dma_mask = DMA_BIT_MASK(32);
3152 			retval = dma_set_mask_and_coherent(&pdev->dev,
3153 			    dma_mask);
3154 		}
3155 		if (retval) {
3156 			mrioc->dma_mask = 0;
3157 			ioc_err(mrioc, "Setting 32 bit DMA mask also failed\n");
3158 			goto out_failed;
3159 		}
3160 	}
3161 	mrioc->dma_mask = dma_mask;
3162 
3163 	if (!mrioc->sysif_regs) {
3164 		ioc_err(mrioc,
3165 		    "Unable to map adapter memory or resource not found\n");
3166 		retval = -EINVAL;
3167 		goto out_failed;
3168 	}
3169 
3170 	pci_read_config_word(pdev, capb + 2, &message_control);
3171 	mrioc->msix_count = (message_control & 0x3FF) + 1;
3172 
3173 	pci_save_state(pdev);
3174 
3175 	pci_set_drvdata(pdev, mrioc->shost);
3176 
3177 	mpi3mr_ioc_disable_intr(mrioc);
3178 
3179 	ioc_info(mrioc, "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
3180 	    (unsigned long long)mrioc->sysif_regs_phys,
3181 	    mrioc->sysif_regs, memap_sz);
3182 	ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
3183 	    mrioc->msix_count);
3184 	return retval;
3185 
3186 out_failed:
3187 	mpi3mr_cleanup_resources(mrioc);
3188 	return retval;
3189 }
3190 
3191 /**
3192  * mpi3mr_init_ioc - Initialize the controller
3193  * @mrioc: Adapter instance reference
3194  * @init_type: Flag to indicate is the init_type
3195  *
3196  * This the controller initialization routine, executed either
3197  * after soft reset or from pci probe callback.
3198  * Setup the required resources, memory map the controller
3199  * registers, create admin and operational reply queue pairs,
3200  * allocate required memory for reply pool, sense buffer pool,
3201  * issue IOC init request to the firmware, unmask the events and
3202  * issue port enable to discover SAS/SATA/NVMe devies and RAID
3203  * volumes.
3204  *
3205  * Return: 0 on success and non-zero on failure.
3206  */
3207 int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type)
3208 {
3209 	int retval = 0;
3210 	enum mpi3mr_iocstate ioc_state;
3211 	u64 base_info;
3212 	u32 timeout;
3213 	u32 ioc_status, ioc_config, i;
3214 	struct mpi3_ioc_facts_data facts_data;
3215 
3216 	mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
3217 	mrioc->change_count = 0;
3218 	if (init_type == MPI3MR_IT_INIT) {
3219 		mrioc->cpu_count = num_online_cpus();
3220 		retval = mpi3mr_setup_resources(mrioc);
3221 		if (retval) {
3222 			ioc_err(mrioc, "Failed to setup resources:error %d\n",
3223 			    retval);
3224 			goto out_nocleanup;
3225 		}
3226 	}
3227 
3228 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3229 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3230 
3231 	ioc_info(mrioc, "SOD status %x configuration %x\n",
3232 	    ioc_status, ioc_config);
3233 
3234 	base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
3235 	ioc_info(mrioc, "SOD base_info %llx\n",	base_info);
3236 
3237 	/*The timeout value is in 2sec unit, changing it to seconds*/
3238 	mrioc->ready_timeout =
3239 	    ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
3240 	    MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
3241 
3242 	ioc_info(mrioc, "IOC ready timeout %d\n", mrioc->ready_timeout);
3243 
3244 	ioc_state = mpi3mr_get_iocstate(mrioc);
3245 	ioc_info(mrioc, "IOC in %s state during detection\n",
3246 	    mpi3mr_iocstate_name(ioc_state));
3247 
3248 	if (ioc_state == MRIOC_STATE_BECOMING_READY ||
3249 	    ioc_state == MRIOC_STATE_RESET_REQUESTED) {
3250 		timeout = mrioc->ready_timeout * 10;
3251 		do {
3252 			msleep(100);
3253 		} while (--timeout);
3254 
3255 		ioc_state = mpi3mr_get_iocstate(mrioc);
3256 		ioc_info(mrioc,
3257 		    "IOC in %s state after waiting for reset time\n",
3258 		    mpi3mr_iocstate_name(ioc_state));
3259 	}
3260 
3261 	if (ioc_state == MRIOC_STATE_READY) {
3262 		retval = mpi3mr_issue_and_process_mur(mrioc,
3263 		    MPI3MR_RESET_FROM_BRINGUP);
3264 		if (retval) {
3265 			ioc_err(mrioc, "Failed to MU reset IOC error %d\n",
3266 			    retval);
3267 		}
3268 		ioc_state = mpi3mr_get_iocstate(mrioc);
3269 	}
3270 	if (ioc_state != MRIOC_STATE_RESET) {
3271 		mpi3mr_print_fault_info(mrioc);
3272 		retval = mpi3mr_issue_reset(mrioc,
3273 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
3274 		    MPI3MR_RESET_FROM_BRINGUP);
3275 		if (retval) {
3276 			ioc_err(mrioc,
3277 			    "%s :Failed to soft reset IOC error %d\n",
3278 			    __func__, retval);
3279 			goto out_failed;
3280 		}
3281 	}
3282 	ioc_state = mpi3mr_get_iocstate(mrioc);
3283 	if (ioc_state != MRIOC_STATE_RESET) {
3284 		retval = -1;
3285 		ioc_err(mrioc, "Cannot bring IOC to reset state\n");
3286 		goto out_failed;
3287 	}
3288 
3289 	retval = mpi3mr_setup_admin_qpair(mrioc);
3290 	if (retval) {
3291 		ioc_err(mrioc, "Failed to setup admin Qs: error %d\n",
3292 		    retval);
3293 		goto out_failed;
3294 	}
3295 
3296 	retval = mpi3mr_bring_ioc_ready(mrioc);
3297 	if (retval) {
3298 		ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
3299 		    retval);
3300 		goto out_failed;
3301 	}
3302 
3303 	if (init_type != MPI3MR_IT_RESET) {
3304 		retval = mpi3mr_setup_isr(mrioc, 1);
3305 		if (retval) {
3306 			ioc_err(mrioc, "Failed to setup ISR error %d\n",
3307 			    retval);
3308 			goto out_failed;
3309 		}
3310 	} else
3311 		mpi3mr_ioc_enable_intr(mrioc);
3312 
3313 	retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
3314 	if (retval) {
3315 		ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
3316 		    retval);
3317 		goto out_failed;
3318 	}
3319 
3320 	mpi3mr_process_factsdata(mrioc, &facts_data);
3321 	if (init_type == MPI3MR_IT_INIT) {
3322 		retval = mpi3mr_check_reset_dma_mask(mrioc);
3323 		if (retval) {
3324 			ioc_err(mrioc, "Resetting dma mask failed %d\n",
3325 			    retval);
3326 			goto out_failed;
3327 		}
3328 	}
3329 
3330 	mpi3mr_print_ioc_info(mrioc);
3331 
3332 	retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
3333 	if (retval) {
3334 		ioc_err(mrioc,
3335 		    "%s :Failed to allocated reply sense buffers %d\n",
3336 		    __func__, retval);
3337 		goto out_failed;
3338 	}
3339 
3340 	if (init_type == MPI3MR_IT_INIT) {
3341 		retval = mpi3mr_alloc_chain_bufs(mrioc);
3342 		if (retval) {
3343 			ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
3344 			    retval);
3345 			goto out_failed;
3346 		}
3347 	}
3348 
3349 	retval = mpi3mr_issue_iocinit(mrioc);
3350 	if (retval) {
3351 		ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
3352 		    retval);
3353 		goto out_failed;
3354 	}
3355 	mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
3356 	writel(mrioc->reply_free_queue_host_index,
3357 	    &mrioc->sysif_regs->reply_free_host_index);
3358 
3359 	mrioc->sbq_host_index = mrioc->num_sense_bufs;
3360 	writel(mrioc->sbq_host_index,
3361 	    &mrioc->sysif_regs->sense_buffer_free_host_index);
3362 
3363 	if (init_type != MPI3MR_IT_RESET) {
3364 		retval = mpi3mr_setup_isr(mrioc, 0);
3365 		if (retval) {
3366 			ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
3367 			    retval);
3368 			goto out_failed;
3369 		}
3370 	}
3371 
3372 	retval = mpi3mr_create_op_queues(mrioc);
3373 	if (retval) {
3374 		ioc_err(mrioc, "Failed to create OpQueues error %d\n",
3375 		    retval);
3376 		goto out_failed;
3377 	}
3378 
3379 	if ((init_type != MPI3MR_IT_INIT) &&
3380 	    (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q)) {
3381 		retval = -1;
3382 		ioc_err(mrioc,
3383 		    "Cannot create minimum number of OpQueues expected:%d created:%d\n",
3384 		    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
3385 		goto out_failed;
3386 	}
3387 
3388 	for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3389 		mrioc->event_masks[i] = -1;
3390 
3391 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
3392 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
3393 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
3394 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
3395 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
3396 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
3397 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
3398 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
3399 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
3400 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
3401 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
3402 	mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
3403 
3404 	retval = mpi3mr_issue_event_notification(mrioc);
3405 	if (retval) {
3406 		ioc_err(mrioc, "Failed to issue event notification %d\n",
3407 		    retval);
3408 		goto out_failed;
3409 	}
3410 
3411 	if (init_type != MPI3MR_IT_INIT) {
3412 		ioc_info(mrioc, "Issuing Port Enable\n");
3413 		retval = mpi3mr_issue_port_enable(mrioc, 0);
3414 		if (retval) {
3415 			ioc_err(mrioc, "Failed to issue port enable %d\n",
3416 			    retval);
3417 			goto out_failed;
3418 		}
3419 	}
3420 	return retval;
3421 
3422 out_failed:
3423 	if (init_type == MPI3MR_IT_INIT)
3424 		mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
3425 	else
3426 		mpi3mr_cleanup_ioc(mrioc, MPI3MR_REINIT_FAILURE);
3427 out_nocleanup:
3428 	return retval;
3429 }
3430 
3431 /**
3432  * mpi3mr_memset_op_reply_q_buffers - memset the operational reply queue's
3433  *					segments
3434  * @mrioc: Adapter instance reference
3435  * @qidx: Operational reply queue index
3436  *
3437  * Return: Nothing.
3438  */
3439 static void mpi3mr_memset_op_reply_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
3440 {
3441 	struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
3442 	struct segments *segments;
3443 	int i, size;
3444 
3445 	if (!op_reply_q->q_segments)
3446 		return;
3447 
3448 	size = op_reply_q->segment_qd * mrioc->op_reply_desc_sz;
3449 	segments = op_reply_q->q_segments;
3450 	for (i = 0; i < op_reply_q->num_segments; i++)
3451 		memset(segments[i].segment, 0, size);
3452 }
3453 
3454 /**
3455  * mpi3mr_memset_op_req_q_buffers - memset the operational request queue's
3456  *					segments
3457  * @mrioc: Adapter instance reference
3458  * @qidx: Operational request queue index
3459  *
3460  * Return: Nothing.
3461  */
3462 static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
3463 {
3464 	struct op_req_qinfo *op_req_q = mrioc->req_qinfo + qidx;
3465 	struct segments *segments;
3466 	int i, size;
3467 
3468 	if (!op_req_q->q_segments)
3469 		return;
3470 
3471 	size = op_req_q->segment_qd * mrioc->facts.op_req_sz;
3472 	segments = op_req_q->q_segments;
3473 	for (i = 0; i < op_req_q->num_segments; i++)
3474 		memset(segments[i].segment, 0, size);
3475 }
3476 
3477 /**
3478  * mpi3mr_memset_buffers - memset memory for a controller
3479  * @mrioc: Adapter instance reference
3480  *
3481  * clear all the memory allocated for a controller, typically
3482  * called post reset to reuse the memory allocated during the
3483  * controller init.
3484  *
3485  * Return: Nothing.
3486  */
3487 void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
3488 {
3489 	u16 i;
3490 
3491 	memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
3492 	memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
3493 
3494 	memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
3495 	memset(mrioc->host_tm_cmds.reply, 0,
3496 	    sizeof(*mrioc->host_tm_cmds.reply));
3497 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
3498 		memset(mrioc->dev_rmhs_cmds[i].reply, 0,
3499 		    sizeof(*mrioc->dev_rmhs_cmds[i].reply));
3500 	memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
3501 	memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
3502 
3503 	for (i = 0; i < mrioc->num_queues; i++) {
3504 		mrioc->op_reply_qinfo[i].qid = 0;
3505 		mrioc->op_reply_qinfo[i].ci = 0;
3506 		mrioc->op_reply_qinfo[i].num_replies = 0;
3507 		mrioc->op_reply_qinfo[i].ephase = 0;
3508 		atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
3509 		atomic_set(&mrioc->op_reply_qinfo[i].in_use, 0);
3510 		mpi3mr_memset_op_reply_q_buffers(mrioc, i);
3511 
3512 		mrioc->req_qinfo[i].ci = 0;
3513 		mrioc->req_qinfo[i].pi = 0;
3514 		mrioc->req_qinfo[i].num_requests = 0;
3515 		mrioc->req_qinfo[i].qid = 0;
3516 		mrioc->req_qinfo[i].reply_qid = 0;
3517 		spin_lock_init(&mrioc->req_qinfo[i].q_lock);
3518 		mpi3mr_memset_op_req_q_buffers(mrioc, i);
3519 	}
3520 }
3521 
3522 /**
3523  * mpi3mr_free_mem - Free memory allocated for a controller
3524  * @mrioc: Adapter instance reference
3525  *
3526  * Free all the memory allocated for a controller.
3527  *
3528  * Return: Nothing.
3529  */
3530 static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
3531 {
3532 	u16 i;
3533 	struct mpi3mr_intr_info *intr_info;
3534 
3535 	if (mrioc->sense_buf_pool) {
3536 		if (mrioc->sense_buf)
3537 			dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
3538 			    mrioc->sense_buf_dma);
3539 		dma_pool_destroy(mrioc->sense_buf_pool);
3540 		mrioc->sense_buf = NULL;
3541 		mrioc->sense_buf_pool = NULL;
3542 	}
3543 	if (mrioc->sense_buf_q_pool) {
3544 		if (mrioc->sense_buf_q)
3545 			dma_pool_free(mrioc->sense_buf_q_pool,
3546 			    mrioc->sense_buf_q, mrioc->sense_buf_q_dma);
3547 		dma_pool_destroy(mrioc->sense_buf_q_pool);
3548 		mrioc->sense_buf_q = NULL;
3549 		mrioc->sense_buf_q_pool = NULL;
3550 	}
3551 
3552 	if (mrioc->reply_buf_pool) {
3553 		if (mrioc->reply_buf)
3554 			dma_pool_free(mrioc->reply_buf_pool, mrioc->reply_buf,
3555 			    mrioc->reply_buf_dma);
3556 		dma_pool_destroy(mrioc->reply_buf_pool);
3557 		mrioc->reply_buf = NULL;
3558 		mrioc->reply_buf_pool = NULL;
3559 	}
3560 	if (mrioc->reply_free_q_pool) {
3561 		if (mrioc->reply_free_q)
3562 			dma_pool_free(mrioc->reply_free_q_pool,
3563 			    mrioc->reply_free_q, mrioc->reply_free_q_dma);
3564 		dma_pool_destroy(mrioc->reply_free_q_pool);
3565 		mrioc->reply_free_q = NULL;
3566 		mrioc->reply_free_q_pool = NULL;
3567 	}
3568 
3569 	for (i = 0; i < mrioc->num_op_req_q; i++)
3570 		mpi3mr_free_op_req_q_segments(mrioc, i);
3571 
3572 	for (i = 0; i < mrioc->num_op_reply_q; i++)
3573 		mpi3mr_free_op_reply_q_segments(mrioc, i);
3574 
3575 	for (i = 0; i < mrioc->intr_info_count; i++) {
3576 		intr_info = mrioc->intr_info + i;
3577 		intr_info->op_reply_q = NULL;
3578 	}
3579 
3580 	kfree(mrioc->req_qinfo);
3581 	mrioc->req_qinfo = NULL;
3582 	mrioc->num_op_req_q = 0;
3583 
3584 	kfree(mrioc->op_reply_qinfo);
3585 	mrioc->op_reply_qinfo = NULL;
3586 	mrioc->num_op_reply_q = 0;
3587 
3588 	kfree(mrioc->init_cmds.reply);
3589 	mrioc->init_cmds.reply = NULL;
3590 
3591 	kfree(mrioc->host_tm_cmds.reply);
3592 	mrioc->host_tm_cmds.reply = NULL;
3593 
3594 	kfree(mrioc->removepend_bitmap);
3595 	mrioc->removepend_bitmap = NULL;
3596 
3597 	kfree(mrioc->devrem_bitmap);
3598 	mrioc->devrem_bitmap = NULL;
3599 
3600 	kfree(mrioc->chain_bitmap);
3601 	mrioc->chain_bitmap = NULL;
3602 
3603 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3604 		kfree(mrioc->dev_rmhs_cmds[i].reply);
3605 		mrioc->dev_rmhs_cmds[i].reply = NULL;
3606 	}
3607 
3608 	if (mrioc->chain_buf_pool) {
3609 		for (i = 0; i < mrioc->chain_buf_count; i++) {
3610 			if (mrioc->chain_sgl_list[i].addr) {
3611 				dma_pool_free(mrioc->chain_buf_pool,
3612 				    mrioc->chain_sgl_list[i].addr,
3613 				    mrioc->chain_sgl_list[i].dma_addr);
3614 				mrioc->chain_sgl_list[i].addr = NULL;
3615 			}
3616 		}
3617 		dma_pool_destroy(mrioc->chain_buf_pool);
3618 		mrioc->chain_buf_pool = NULL;
3619 	}
3620 
3621 	kfree(mrioc->chain_sgl_list);
3622 	mrioc->chain_sgl_list = NULL;
3623 
3624 	if (mrioc->admin_reply_base) {
3625 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_reply_q_sz,
3626 		    mrioc->admin_reply_base, mrioc->admin_reply_dma);
3627 		mrioc->admin_reply_base = NULL;
3628 	}
3629 	if (mrioc->admin_req_base) {
3630 		dma_free_coherent(&mrioc->pdev->dev, mrioc->admin_req_q_sz,
3631 		    mrioc->admin_req_base, mrioc->admin_req_dma);
3632 		mrioc->admin_req_base = NULL;
3633 	}
3634 }
3635 
3636 /**
3637  * mpi3mr_issue_ioc_shutdown - shutdown controller
3638  * @mrioc: Adapter instance reference
3639  *
3640  * Send shutodwn notification to the controller and wait for the
3641  * shutdown_timeout for it to be completed.
3642  *
3643  * Return: Nothing.
3644  */
3645 static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
3646 {
3647 	u32 ioc_config, ioc_status;
3648 	u8 retval = 1;
3649 	u32 timeout = MPI3MR_DEFAULT_SHUTDOWN_TIME * 10;
3650 
3651 	ioc_info(mrioc, "Issuing shutdown Notification\n");
3652 	if (mrioc->unrecoverable) {
3653 		ioc_warn(mrioc,
3654 		    "IOC is unrecoverable shutdown is not issued\n");
3655 		return;
3656 	}
3657 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3658 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
3659 	    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS) {
3660 		ioc_info(mrioc, "shutdown already in progress\n");
3661 		return;
3662 	}
3663 
3664 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3665 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
3666 	ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN;
3667 
3668 	writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
3669 
3670 	if (mrioc->facts.shutdown_timeout)
3671 		timeout = mrioc->facts.shutdown_timeout * 10;
3672 
3673 	do {
3674 		ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3675 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
3676 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE) {
3677 			retval = 0;
3678 			break;
3679 		}
3680 		msleep(100);
3681 	} while (--timeout);
3682 
3683 	ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3684 	ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
3685 
3686 	if (retval) {
3687 		if ((ioc_status & MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK)
3688 		    == MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS)
3689 			ioc_warn(mrioc,
3690 			    "shutdown still in progress after timeout\n");
3691 	}
3692 
3693 	ioc_info(mrioc,
3694 	    "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
3695 	    (!retval) ? "successful" : "failed", ioc_status,
3696 	    ioc_config);
3697 }
3698 
3699 /**
3700  * mpi3mr_cleanup_ioc - Cleanup controller
3701  * @mrioc: Adapter instance reference
3702  * @reason: Cleanup reason
3703  *
3704  * controller cleanup handler, Message unit reset or soft reset
3705  * and shutdown notification is issued to the controller and the
3706  * associated memory resources are freed.
3707  *
3708  * Return: Nothing.
3709  */
3710 void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason)
3711 {
3712 	enum mpi3mr_iocstate ioc_state;
3713 
3714 	if (reason == MPI3MR_COMPLETE_CLEANUP)
3715 		mpi3mr_stop_watchdog(mrioc);
3716 
3717 	mpi3mr_ioc_disable_intr(mrioc);
3718 
3719 	ioc_state = mpi3mr_get_iocstate(mrioc);
3720 
3721 	if ((!mrioc->unrecoverable) && (!mrioc->reset_in_progress) &&
3722 	    (ioc_state == MRIOC_STATE_READY)) {
3723 		if (mpi3mr_issue_and_process_mur(mrioc,
3724 		    MPI3MR_RESET_FROM_CTLR_CLEANUP))
3725 			mpi3mr_issue_reset(mrioc,
3726 			    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
3727 			    MPI3MR_RESET_FROM_MUR_FAILURE);
3728 
3729 		if (reason != MPI3MR_REINIT_FAILURE)
3730 			mpi3mr_issue_ioc_shutdown(mrioc);
3731 	}
3732 
3733 	if (reason == MPI3MR_COMPLETE_CLEANUP) {
3734 		mpi3mr_free_mem(mrioc);
3735 		mpi3mr_cleanup_resources(mrioc);
3736 	}
3737 }
3738 
3739 /**
3740  * mpi3mr_drv_cmd_comp_reset - Flush a internal driver command
3741  * @mrioc: Adapter instance reference
3742  * @cmdptr: Internal command tracker
3743  *
3744  * Complete an internal driver commands with state indicating it
3745  * is completed due to reset.
3746  *
3747  * Return: Nothing.
3748  */
3749 static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
3750 	struct mpi3mr_drv_cmd *cmdptr)
3751 {
3752 	if (cmdptr->state & MPI3MR_CMD_PENDING) {
3753 		cmdptr->state |= MPI3MR_CMD_RESET;
3754 		cmdptr->state &= ~MPI3MR_CMD_PENDING;
3755 		if (cmdptr->is_waiting) {
3756 			complete(&cmdptr->done);
3757 			cmdptr->is_waiting = 0;
3758 		} else if (cmdptr->callback)
3759 			cmdptr->callback(mrioc, cmdptr);
3760 	}
3761 }
3762 
3763 /**
3764  * mpi3mr_flush_drv_cmds - Flush internaldriver commands
3765  * @mrioc: Adapter instance reference
3766  *
3767  * Flush all internal driver commands post reset
3768  *
3769  * Return: Nothing.
3770  */
3771 static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
3772 {
3773 	struct mpi3mr_drv_cmd *cmdptr;
3774 	u8 i;
3775 
3776 	cmdptr = &mrioc->init_cmds;
3777 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
3778 	cmdptr = &mrioc->host_tm_cmds;
3779 	mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
3780 
3781 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
3782 		cmdptr = &mrioc->dev_rmhs_cmds[i];
3783 		mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
3784 	}
3785 }
3786 
3787 /**
3788  * mpi3mr_diagfault_reset_handler - Diag fault reset handler
3789  * @mrioc: Adapter instance reference
3790  * @reset_reason: Reset reason code
3791  *
3792  * This is an handler for issuing diag fault reset from the
3793  * applications through IOCTL path to stop the execution of the
3794  * controller
3795  *
3796  * Return: 0 on success, non-zero on failure.
3797  */
3798 int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
3799 	u32 reset_reason)
3800 {
3801 	int retval = 0;
3802 
3803 	ioc_info(mrioc, "Entry: reason code: %s\n",
3804 	    mpi3mr_reset_rc_name(reset_reason));
3805 	mrioc->reset_in_progress = 1;
3806 
3807 	mpi3mr_ioc_disable_intr(mrioc);
3808 
3809 	retval = mpi3mr_issue_reset(mrioc,
3810 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
3811 
3812 	if (retval) {
3813 		ioc_err(mrioc, "The diag fault reset failed: reason %d\n",
3814 		    reset_reason);
3815 		mpi3mr_ioc_enable_intr(mrioc);
3816 	}
3817 	ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
3818 	mrioc->reset_in_progress = 0;
3819 	return retval;
3820 }
3821 
3822 /**
3823  * mpi3mr_soft_reset_handler - Reset the controller
3824  * @mrioc: Adapter instance reference
3825  * @reset_reason: Reset reason code
3826  * @snapdump: Flag to generate snapdump in firmware or not
3827  *
3828  * This is an handler for recovering controller by issuing soft
3829  * reset are diag fault reset.  This is a blocking function and
3830  * when one reset is executed if any other resets they will be
3831  * blocked. All IOCTLs/IO will be blocked during the reset. If
3832  * controller reset is successful then the controller will be
3833  * reinitalized, otherwise the controller will be marked as not
3834  * recoverable
3835  *
3836  * In snapdump bit is set, the controller is issued with diag
3837  * fault reset so that the firmware can create a snap dump and
3838  * post that the firmware will result in F000 fault and the
3839  * driver will issue soft reset to recover from that.
3840  *
3841  * Return: 0 on success, non-zero on failure.
3842  */
3843 int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
3844 	u32 reset_reason, u8 snapdump)
3845 {
3846 	int retval = 0, i;
3847 	unsigned long flags;
3848 	u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
3849 
3850 	if (mrioc->fault_dbg) {
3851 		if (snapdump)
3852 			mpi3mr_set_diagsave(mrioc);
3853 		mpi3mr_kill_ioc(mrioc, reset_reason);
3854 	}
3855 
3856 	/*
3857 	 * Block new resets until the currently executing one is finished and
3858 	 * return the status of the existing reset for all blocked resets
3859 	 */
3860 	if (!mutex_trylock(&mrioc->reset_mutex)) {
3861 		ioc_info(mrioc, "Another reset in progress\n");
3862 		return -1;
3863 	}
3864 	mrioc->reset_in_progress = 1;
3865 
3866 	if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
3867 	    (reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
3868 		for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3869 			mrioc->event_masks[i] = -1;
3870 
3871 		retval = mpi3mr_issue_event_notification(mrioc);
3872 
3873 		if (retval) {
3874 			ioc_err(mrioc,
3875 			    "Failed to turn off events prior to reset %d\n",
3876 			    retval);
3877 		}
3878 	}
3879 
3880 	mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
3881 
3882 	mpi3mr_ioc_disable_intr(mrioc);
3883 
3884 	if (snapdump) {
3885 		mpi3mr_set_diagsave(mrioc);
3886 		retval = mpi3mr_issue_reset(mrioc,
3887 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
3888 		if (!retval) {
3889 			do {
3890 				host_diagnostic =
3891 				    readl(&mrioc->sysif_regs->host_diagnostic);
3892 				if (!(host_diagnostic &
3893 				    MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
3894 					break;
3895 				msleep(100);
3896 			} while (--timeout);
3897 		}
3898 	}
3899 
3900 	retval = mpi3mr_issue_reset(mrioc,
3901 	    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, reset_reason);
3902 	if (retval) {
3903 		ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
3904 		goto out;
3905 	}
3906 
3907 	mpi3mr_flush_delayed_rmhs_list(mrioc);
3908 	mpi3mr_flush_drv_cmds(mrioc);
3909 	memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
3910 	memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
3911 	mpi3mr_cleanup_fwevt_list(mrioc);
3912 	mpi3mr_flush_host_io(mrioc);
3913 	mpi3mr_invalidate_devhandles(mrioc);
3914 	mpi3mr_memset_buffers(mrioc);
3915 	retval = mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESET);
3916 	if (retval) {
3917 		pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
3918 		    mrioc->name, reset_reason);
3919 		goto out;
3920 	}
3921 	ssleep(10);
3922 
3923 out:
3924 	if (!retval) {
3925 		mrioc->reset_in_progress = 0;
3926 		scsi_unblock_requests(mrioc->shost);
3927 		mpi3mr_rfresh_tgtdevs(mrioc);
3928 		mrioc->ts_update_counter = 0;
3929 		spin_lock_irqsave(&mrioc->watchdog_lock, flags);
3930 		if (mrioc->watchdog_work_q)
3931 			queue_delayed_work(mrioc->watchdog_work_q,
3932 			    &mrioc->watchdog_work,
3933 			    msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
3934 		spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
3935 	} else {
3936 		mpi3mr_issue_reset(mrioc,
3937 		    MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
3938 		mrioc->unrecoverable = 1;
3939 		mrioc->reset_in_progress = 0;
3940 		retval = -1;
3941 	}
3942 
3943 	mutex_unlock(&mrioc->reset_mutex);
3944 	ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
3945 	return retval;
3946 }
3947