xref: /openbmc/linux/drivers/scsi/mpi3mr/mpi3mr_os.c (revision d9adb81e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2023 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 
12 /* global driver scop variables */
13 LIST_HEAD(mrioc_list);
14 DEFINE_SPINLOCK(mrioc_list_lock);
15 static int mrioc_ids;
16 static int warn_non_secure_ctlr;
17 atomic64_t event_counter;
18 
19 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
20 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
21 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
22 MODULE_VERSION(MPI3MR_DRIVER_VERSION);
23 
24 /* Module parameters*/
25 int prot_mask = -1;
26 module_param(prot_mask, int, 0);
27 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07");
28 
29 static int prot_guard_mask = 3;
30 module_param(prot_guard_mask, int, 0);
31 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3");
32 static int logging_level;
33 module_param(logging_level, int, 0);
34 MODULE_PARM_DESC(logging_level,
35 	" bits for enabling additional logging info (default=0)");
36 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
37 module_param(max_sgl_entries, int, 0444);
38 MODULE_PARM_DESC(max_sgl_entries,
39 	"Preferred max number of SG entries to be used for a single I/O\n"
40 	"The actual value will be determined by the driver\n"
41 	"(Minimum=256, Maximum=2048, default=256)");
42 
43 /* Forward declarations*/
44 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
45 	struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
46 
47 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION	(0xFFFF)
48 
49 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH	(0xFFFE)
50 
51 /**
52  * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
53  * @mrioc: Adapter instance reference
54  * @scmd: SCSI command reference
55  *
56  * Calculate the host tag based on block tag for a given scmd.
57  *
58  * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
59  */
60 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
61 	struct scsi_cmnd *scmd)
62 {
63 	struct scmd_priv *priv = NULL;
64 	u32 unique_tag;
65 	u16 host_tag, hw_queue;
66 
67 	unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
68 
69 	hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
70 	if (hw_queue >= mrioc->num_op_reply_q)
71 		return MPI3MR_HOSTTAG_INVALID;
72 	host_tag = blk_mq_unique_tag_to_tag(unique_tag);
73 
74 	if (WARN_ON(host_tag >= mrioc->max_host_ios))
75 		return MPI3MR_HOSTTAG_INVALID;
76 
77 	priv = scsi_cmd_priv(scmd);
78 	/*host_tag 0 is invalid hence incrementing by 1*/
79 	priv->host_tag = host_tag + 1;
80 	priv->scmd = scmd;
81 	priv->in_lld_scope = 1;
82 	priv->req_q_idx = hw_queue;
83 	priv->meta_chain_idx = -1;
84 	priv->chain_idx = -1;
85 	priv->meta_sg_valid = 0;
86 	return priv->host_tag;
87 }
88 
89 /**
90  * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
91  * @mrioc: Adapter instance reference
92  * @host_tag: Host tag
93  * @qidx: Operational queue index
94  *
95  * Identify the block tag from the host tag and queue index and
96  * retrieve associated scsi command using scsi_host_find_tag().
97  *
98  * Return: SCSI command reference or NULL.
99  */
100 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
101 	struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
102 {
103 	struct scsi_cmnd *scmd = NULL;
104 	struct scmd_priv *priv = NULL;
105 	u32 unique_tag = host_tag - 1;
106 
107 	if (WARN_ON(host_tag > mrioc->max_host_ios))
108 		goto out;
109 
110 	unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
111 
112 	scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
113 	if (scmd) {
114 		priv = scsi_cmd_priv(scmd);
115 		if (!priv->in_lld_scope)
116 			scmd = NULL;
117 	}
118 out:
119 	return scmd;
120 }
121 
122 /**
123  * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
124  * @mrioc: Adapter instance reference
125  * @scmd: SCSI command reference
126  *
127  * Invalidate the SCSI command private data to mark the command
128  * is not in LLD scope anymore.
129  *
130  * Return: Nothing.
131  */
132 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
133 	struct scsi_cmnd *scmd)
134 {
135 	struct scmd_priv *priv = NULL;
136 
137 	priv = scsi_cmd_priv(scmd);
138 
139 	if (WARN_ON(priv->in_lld_scope == 0))
140 		return;
141 	priv->host_tag = MPI3MR_HOSTTAG_INVALID;
142 	priv->req_q_idx = 0xFFFF;
143 	priv->scmd = NULL;
144 	priv->in_lld_scope = 0;
145 	priv->meta_sg_valid = 0;
146 	if (priv->chain_idx >= 0) {
147 		clear_bit(priv->chain_idx, mrioc->chain_bitmap);
148 		priv->chain_idx = -1;
149 	}
150 	if (priv->meta_chain_idx >= 0) {
151 		clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap);
152 		priv->meta_chain_idx = -1;
153 	}
154 }
155 
156 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
157 	struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
158 static void mpi3mr_fwevt_worker(struct work_struct *work);
159 
160 /**
161  * mpi3mr_fwevt_free - firmware event memory dealloctor
162  * @r: k reference pointer of the firmware event
163  *
164  * Free firmware event memory when no reference.
165  */
166 static void mpi3mr_fwevt_free(struct kref *r)
167 {
168 	kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
169 }
170 
171 /**
172  * mpi3mr_fwevt_get - k reference incrementor
173  * @fwevt: Firmware event reference
174  *
175  * Increment firmware event reference count.
176  */
177 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
178 {
179 	kref_get(&fwevt->ref_count);
180 }
181 
182 /**
183  * mpi3mr_fwevt_put - k reference decrementor
184  * @fwevt: Firmware event reference
185  *
186  * decrement firmware event reference count.
187  */
188 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
189 {
190 	kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
191 }
192 
193 /**
194  * mpi3mr_alloc_fwevt - Allocate firmware event
195  * @len: length of firmware event data to allocate
196  *
197  * Allocate firmware event with required length and initialize
198  * the reference counter.
199  *
200  * Return: firmware event reference.
201  */
202 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
203 {
204 	struct mpi3mr_fwevt *fwevt;
205 
206 	fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
207 	if (!fwevt)
208 		return NULL;
209 
210 	kref_init(&fwevt->ref_count);
211 	return fwevt;
212 }
213 
214 /**
215  * mpi3mr_fwevt_add_to_list - Add firmware event to the list
216  * @mrioc: Adapter instance reference
217  * @fwevt: Firmware event reference
218  *
219  * Add the given firmware event to the firmware event list.
220  *
221  * Return: Nothing.
222  */
223 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
224 	struct mpi3mr_fwevt *fwevt)
225 {
226 	unsigned long flags;
227 
228 	if (!mrioc->fwevt_worker_thread)
229 		return;
230 
231 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
232 	/* get fwevt reference count while adding it to fwevt_list */
233 	mpi3mr_fwevt_get(fwevt);
234 	INIT_LIST_HEAD(&fwevt->list);
235 	list_add_tail(&fwevt->list, &mrioc->fwevt_list);
236 	INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
237 	/* get fwevt reference count while enqueueing it to worker queue */
238 	mpi3mr_fwevt_get(fwevt);
239 	queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
240 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
241 }
242 
243 /**
244  * mpi3mr_fwevt_del_from_list - Delete firmware event from list
245  * @mrioc: Adapter instance reference
246  * @fwevt: Firmware event reference
247  *
248  * Delete the given firmware event from the firmware event list.
249  *
250  * Return: Nothing.
251  */
252 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
253 	struct mpi3mr_fwevt *fwevt)
254 {
255 	unsigned long flags;
256 
257 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
258 	if (!list_empty(&fwevt->list)) {
259 		list_del_init(&fwevt->list);
260 		/*
261 		 * Put fwevt reference count after
262 		 * removing it from fwevt_list
263 		 */
264 		mpi3mr_fwevt_put(fwevt);
265 	}
266 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
267 }
268 
269 /**
270  * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
271  * @mrioc: Adapter instance reference
272  *
273  * Dequeue a firmware event from the firmware event list.
274  *
275  * Return: firmware event.
276  */
277 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
278 	struct mpi3mr_ioc *mrioc)
279 {
280 	unsigned long flags;
281 	struct mpi3mr_fwevt *fwevt = NULL;
282 
283 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
284 	if (!list_empty(&mrioc->fwevt_list)) {
285 		fwevt = list_first_entry(&mrioc->fwevt_list,
286 		    struct mpi3mr_fwevt, list);
287 		list_del_init(&fwevt->list);
288 		/*
289 		 * Put fwevt reference count after
290 		 * removing it from fwevt_list
291 		 */
292 		mpi3mr_fwevt_put(fwevt);
293 	}
294 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
295 
296 	return fwevt;
297 }
298 
299 /**
300  * mpi3mr_cancel_work - cancel firmware event
301  * @fwevt: fwevt object which needs to be canceled
302  *
303  * Return: Nothing.
304  */
305 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
306 {
307 	/*
308 	 * Wait on the fwevt to complete. If this returns 1, then
309 	 * the event was never executed.
310 	 *
311 	 * If it did execute, we wait for it to finish, and the put will
312 	 * happen from mpi3mr_process_fwevt()
313 	 */
314 	if (cancel_work_sync(&fwevt->work)) {
315 		/*
316 		 * Put fwevt reference count after
317 		 * dequeuing it from worker queue
318 		 */
319 		mpi3mr_fwevt_put(fwevt);
320 		/*
321 		 * Put fwevt reference count to neutralize
322 		 * kref_init increment
323 		 */
324 		mpi3mr_fwevt_put(fwevt);
325 	}
326 }
327 
328 /**
329  * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
330  * @mrioc: Adapter instance reference
331  *
332  * Flush all pending firmware events from the firmware event
333  * list.
334  *
335  * Return: Nothing.
336  */
337 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
338 {
339 	struct mpi3mr_fwevt *fwevt = NULL;
340 
341 	if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
342 	    !mrioc->fwevt_worker_thread)
343 		return;
344 
345 	while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
346 		mpi3mr_cancel_work(fwevt);
347 
348 	if (mrioc->current_event) {
349 		fwevt = mrioc->current_event;
350 		/*
351 		 * Don't call cancel_work_sync() API for the
352 		 * fwevt work if the controller reset is
353 		 * get called as part of processing the
354 		 * same fwevt work (or) when worker thread is
355 		 * waiting for device add/remove APIs to complete.
356 		 * Otherwise we will see deadlock.
357 		 */
358 		if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
359 			fwevt->discard = 1;
360 			return;
361 		}
362 
363 		mpi3mr_cancel_work(fwevt);
364 	}
365 }
366 
367 /**
368  * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event
369  * @mrioc: Adapter instance reference
370  * @tg: Throttle group information pointer
371  *
372  * Accessor to queue on synthetically generated driver event to
373  * the event worker thread, the driver event will be used to
374  * reduce the QD of all VDs in the TG from the worker thread.
375  *
376  * Return: None.
377  */
378 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc,
379 	struct mpi3mr_throttle_group_info *tg)
380 {
381 	struct mpi3mr_fwevt *fwevt;
382 	u16 sz = sizeof(struct mpi3mr_throttle_group_info *);
383 
384 	/*
385 	 * If the QD reduction event is already queued due to throttle and if
386 	 * the QD is not restored through device info change event
387 	 * then dont queue further reduction events
388 	 */
389 	if (tg->fw_qd != tg->modified_qd)
390 		return;
391 
392 	fwevt = mpi3mr_alloc_fwevt(sz);
393 	if (!fwevt) {
394 		ioc_warn(mrioc, "failed to queue TG QD reduction event\n");
395 		return;
396 	}
397 	*(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg;
398 	fwevt->mrioc = mrioc;
399 	fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION;
400 	fwevt->send_ack = 0;
401 	fwevt->process_evt = 1;
402 	fwevt->evt_ctx = 0;
403 	fwevt->event_data_size = sz;
404 	tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8);
405 
406 	dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n",
407 	    tg->id);
408 	mpi3mr_fwevt_add_to_list(mrioc, fwevt);
409 }
410 
411 /**
412  * mpi3mr_invalidate_devhandles -Invalidate device handles
413  * @mrioc: Adapter instance reference
414  *
415  * Invalidate the device handles in the target device structures
416  * . Called post reset prior to reinitializing the controller.
417  *
418  * Return: Nothing.
419  */
420 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
421 {
422 	struct mpi3mr_tgt_dev *tgtdev;
423 	struct mpi3mr_stgt_priv_data *tgt_priv;
424 
425 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
426 		tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
427 		if (tgtdev->starget && tgtdev->starget->hostdata) {
428 			tgt_priv = tgtdev->starget->hostdata;
429 			tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
430 			tgt_priv->io_throttle_enabled = 0;
431 			tgt_priv->io_divert = 0;
432 			tgt_priv->throttle_group = NULL;
433 			if (tgtdev->host_exposed)
434 				atomic_set(&tgt_priv->block_io, 1);
435 		}
436 	}
437 }
438 
439 /**
440  * mpi3mr_print_scmd - print individual SCSI command
441  * @rq: Block request
442  * @data: Adapter instance reference
443  *
444  * Print the SCSI command details if it is in LLD scope.
445  *
446  * Return: true always.
447  */
448 static bool mpi3mr_print_scmd(struct request *rq, void *data)
449 {
450 	struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
451 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
452 	struct scmd_priv *priv = NULL;
453 
454 	if (scmd) {
455 		priv = scsi_cmd_priv(scmd);
456 		if (!priv->in_lld_scope)
457 			goto out;
458 
459 		ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n",
460 		    __func__, priv->host_tag, priv->req_q_idx + 1);
461 		scsi_print_command(scmd);
462 	}
463 
464 out:
465 	return(true);
466 }
467 
468 /**
469  * mpi3mr_flush_scmd - Flush individual SCSI command
470  * @rq: Block request
471  * @data: Adapter instance reference
472  *
473  * Return the SCSI command to the upper layers if it is in LLD
474  * scope.
475  *
476  * Return: true always.
477  */
478 
479 static bool mpi3mr_flush_scmd(struct request *rq, void *data)
480 {
481 	struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
482 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
483 	struct scmd_priv *priv = NULL;
484 
485 	if (scmd) {
486 		priv = scsi_cmd_priv(scmd);
487 		if (!priv->in_lld_scope)
488 			goto out;
489 
490 		if (priv->meta_sg_valid)
491 			dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
492 			    scsi_prot_sg_count(scmd), scmd->sc_data_direction);
493 		mpi3mr_clear_scmd_priv(mrioc, scmd);
494 		scsi_dma_unmap(scmd);
495 		scmd->result = DID_RESET << 16;
496 		scsi_print_command(scmd);
497 		scsi_done(scmd);
498 		mrioc->flush_io_count++;
499 	}
500 
501 out:
502 	return(true);
503 }
504 
505 /**
506  * mpi3mr_count_dev_pending - Count commands pending for a lun
507  * @rq: Block request
508  * @data: SCSI device reference
509  *
510  * This is an iterator function called for each SCSI command in
511  * a host and if the command is pending in the LLD for the
512  * specific device(lun) then device specific pending I/O counter
513  * is updated in the device structure.
514  *
515  * Return: true always.
516  */
517 
518 static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
519 {
520 	struct scsi_device *sdev = (struct scsi_device *)data;
521 	struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
522 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
523 	struct scmd_priv *priv;
524 
525 	if (scmd) {
526 		priv = scsi_cmd_priv(scmd);
527 		if (!priv->in_lld_scope)
528 			goto out;
529 		if (scmd->device == sdev)
530 			sdev_priv_data->pend_count++;
531 	}
532 
533 out:
534 	return true;
535 }
536 
537 /**
538  * mpi3mr_count_tgt_pending - Count commands pending for target
539  * @rq: Block request
540  * @data: SCSI target reference
541  *
542  * This is an iterator function called for each SCSI command in
543  * a host and if the command is pending in the LLD for the
544  * specific target then target specific pending I/O counter is
545  * updated in the target structure.
546  *
547  * Return: true always.
548  */
549 
550 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
551 {
552 	struct scsi_target *starget = (struct scsi_target *)data;
553 	struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
554 	struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
555 	struct scmd_priv *priv;
556 
557 	if (scmd) {
558 		priv = scsi_cmd_priv(scmd);
559 		if (!priv->in_lld_scope)
560 			goto out;
561 		if (scmd->device && (scsi_target(scmd->device) == starget))
562 			stgt_priv_data->pend_count++;
563 	}
564 
565 out:
566 	return true;
567 }
568 
569 /**
570  * mpi3mr_flush_host_io -  Flush host I/Os
571  * @mrioc: Adapter instance reference
572  *
573  * Flush all of the pending I/Os by calling
574  * blk_mq_tagset_busy_iter() for each possible tag. This is
575  * executed post controller reset
576  *
577  * Return: Nothing.
578  */
579 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
580 {
581 	struct Scsi_Host *shost = mrioc->shost;
582 
583 	mrioc->flush_io_count = 0;
584 	ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__);
585 	blk_mq_tagset_busy_iter(&shost->tag_set,
586 	    mpi3mr_flush_scmd, (void *)mrioc);
587 	ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__,
588 	    mrioc->flush_io_count);
589 }
590 
591 /**
592  * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds
593  * @mrioc: Adapter instance reference
594  *
595  * This function waits for currently running IO poll threads to
596  * exit and then flushes all host I/Os and any internal pending
597  * cmds. This is executed after controller is marked as
598  * unrecoverable.
599  *
600  * Return: Nothing.
601  */
602 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc)
603 {
604 	struct Scsi_Host *shost = mrioc->shost;
605 	int i;
606 
607 	if (!mrioc->unrecoverable)
608 		return;
609 
610 	if (mrioc->op_reply_qinfo) {
611 		for (i = 0; i < mrioc->num_queues; i++) {
612 			while (atomic_read(&mrioc->op_reply_qinfo[i].in_use))
613 				udelay(500);
614 			atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
615 		}
616 	}
617 	mrioc->flush_io_count = 0;
618 	blk_mq_tagset_busy_iter(&shost->tag_set,
619 	    mpi3mr_flush_scmd, (void *)mrioc);
620 	mpi3mr_flush_delayed_cmd_lists(mrioc);
621 	mpi3mr_flush_drv_cmds(mrioc);
622 }
623 
624 /**
625  * mpi3mr_alloc_tgtdev - target device allocator
626  *
627  * Allocate target device instance and initialize the reference
628  * count
629  *
630  * Return: target device instance.
631  */
632 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
633 {
634 	struct mpi3mr_tgt_dev *tgtdev;
635 
636 	tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
637 	if (!tgtdev)
638 		return NULL;
639 	kref_init(&tgtdev->ref_count);
640 	return tgtdev;
641 }
642 
643 /**
644  * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
645  * @mrioc: Adapter instance reference
646  * @tgtdev: Target device
647  *
648  * Add the target device to the target device list
649  *
650  * Return: Nothing.
651  */
652 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
653 	struct mpi3mr_tgt_dev *tgtdev)
654 {
655 	unsigned long flags;
656 
657 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
658 	mpi3mr_tgtdev_get(tgtdev);
659 	INIT_LIST_HEAD(&tgtdev->list);
660 	list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
661 	tgtdev->state = MPI3MR_DEV_CREATED;
662 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
663 }
664 
665 /**
666  * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
667  * @mrioc: Adapter instance reference
668  * @tgtdev: Target device
669  * @must_delete: Must delete the target device from the list irrespective
670  * of the device state.
671  *
672  * Remove the target device from the target device list
673  *
674  * Return: Nothing.
675  */
676 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
677 	struct mpi3mr_tgt_dev *tgtdev, bool must_delete)
678 {
679 	unsigned long flags;
680 
681 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
682 	if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) {
683 		if (!list_empty(&tgtdev->list)) {
684 			list_del_init(&tgtdev->list);
685 			tgtdev->state = MPI3MR_DEV_DELETED;
686 			mpi3mr_tgtdev_put(tgtdev);
687 		}
688 	}
689 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
690 }
691 
692 /**
693  * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
694  * @mrioc: Adapter instance reference
695  * @handle: Device handle
696  *
697  * Accessor to retrieve target device from the device handle.
698  * Non Lock version
699  *
700  * Return: Target device reference.
701  */
702 static struct mpi3mr_tgt_dev  *__mpi3mr_get_tgtdev_by_handle(
703 	struct mpi3mr_ioc *mrioc, u16 handle)
704 {
705 	struct mpi3mr_tgt_dev *tgtdev;
706 
707 	assert_spin_locked(&mrioc->tgtdev_lock);
708 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
709 		if (tgtdev->dev_handle == handle)
710 			goto found_tgtdev;
711 	return NULL;
712 
713 found_tgtdev:
714 	mpi3mr_tgtdev_get(tgtdev);
715 	return tgtdev;
716 }
717 
718 /**
719  * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
720  * @mrioc: Adapter instance reference
721  * @handle: Device handle
722  *
723  * Accessor to retrieve target device from the device handle.
724  * Lock version
725  *
726  * Return: Target device reference.
727  */
728 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
729 	struct mpi3mr_ioc *mrioc, u16 handle)
730 {
731 	struct mpi3mr_tgt_dev *tgtdev;
732 	unsigned long flags;
733 
734 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
735 	tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
736 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
737 	return tgtdev;
738 }
739 
740 /**
741  * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
742  * @mrioc: Adapter instance reference
743  * @persist_id: Persistent ID
744  *
745  * Accessor to retrieve target device from the Persistent ID.
746  * Non Lock version
747  *
748  * Return: Target device reference.
749  */
750 static struct mpi3mr_tgt_dev  *__mpi3mr_get_tgtdev_by_perst_id(
751 	struct mpi3mr_ioc *mrioc, u16 persist_id)
752 {
753 	struct mpi3mr_tgt_dev *tgtdev;
754 
755 	assert_spin_locked(&mrioc->tgtdev_lock);
756 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
757 		if (tgtdev->perst_id == persist_id)
758 			goto found_tgtdev;
759 	return NULL;
760 
761 found_tgtdev:
762 	mpi3mr_tgtdev_get(tgtdev);
763 	return tgtdev;
764 }
765 
766 /**
767  * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
768  * @mrioc: Adapter instance reference
769  * @persist_id: Persistent ID
770  *
771  * Accessor to retrieve target device from the Persistent ID.
772  * Lock version
773  *
774  * Return: Target device reference.
775  */
776 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
777 	struct mpi3mr_ioc *mrioc, u16 persist_id)
778 {
779 	struct mpi3mr_tgt_dev *tgtdev;
780 	unsigned long flags;
781 
782 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
783 	tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
784 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
785 	return tgtdev;
786 }
787 
788 /**
789  * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
790  * @mrioc: Adapter instance reference
791  * @tgt_priv: Target private data
792  *
793  * Accessor to return target device from the target private
794  * data. Non Lock version
795  *
796  * Return: Target device reference.
797  */
798 static struct mpi3mr_tgt_dev  *__mpi3mr_get_tgtdev_from_tgtpriv(
799 	struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
800 {
801 	struct mpi3mr_tgt_dev *tgtdev;
802 
803 	assert_spin_locked(&mrioc->tgtdev_lock);
804 	tgtdev = tgt_priv->tgt_dev;
805 	if (tgtdev)
806 		mpi3mr_tgtdev_get(tgtdev);
807 	return tgtdev;
808 }
809 
810 /**
811  * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs
812  * @mrioc: Adapter instance reference
813  * @tg: Throttle group information pointer
814  * @divert_value: 1 or 0
815  *
816  * Accessor to set io_divert flag for each device associated
817  * with the given throttle group with the given value.
818  *
819  * Return: None.
820  */
821 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
822 	struct mpi3mr_throttle_group_info *tg, u8 divert_value)
823 {
824 	unsigned long flags;
825 	struct mpi3mr_tgt_dev *tgtdev;
826 	struct mpi3mr_stgt_priv_data *tgt_priv;
827 
828 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
829 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
830 		if (tgtdev->starget && tgtdev->starget->hostdata) {
831 			tgt_priv = tgtdev->starget->hostdata;
832 			if (tgt_priv->throttle_group == tg)
833 				tgt_priv->io_divert = divert_value;
834 		}
835 	}
836 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
837 }
838 
839 /**
840  * mpi3mr_print_device_event_notice - print notice related to post processing of
841  *					device event after controller reset.
842  *
843  * @mrioc: Adapter instance reference
844  * @device_add: true for device add event and false for device removal event
845  *
846  * Return: None.
847  */
848 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
849 	bool device_add)
850 {
851 	ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
852 	    (device_add ? "addition" : "removal"));
853 	ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
854 	ioc_notice(mrioc, "are matched with attached devices for correctness\n");
855 }
856 
857 /**
858  * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
859  * @mrioc: Adapter instance reference
860  * @tgtdev: Target device structure
861  *
862  * Checks whether the device is exposed to upper layers and if it
863  * is then remove the device from upper layers by calling
864  * scsi_remove_target().
865  *
866  * Return: 0 on success, non zero on failure.
867  */
868 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
869 	struct mpi3mr_tgt_dev *tgtdev)
870 {
871 	struct mpi3mr_stgt_priv_data *tgt_priv;
872 
873 	ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
874 	    __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
875 	if (tgtdev->starget && tgtdev->starget->hostdata) {
876 		tgt_priv = tgtdev->starget->hostdata;
877 		atomic_set(&tgt_priv->block_io, 0);
878 		tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
879 	}
880 
881 	if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
882 	    MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) {
883 		if (tgtdev->starget) {
884 			if (mrioc->current_event)
885 				mrioc->current_event->pending_at_sml = 1;
886 			scsi_remove_target(&tgtdev->starget->dev);
887 			tgtdev->host_exposed = 0;
888 			if (mrioc->current_event) {
889 				mrioc->current_event->pending_at_sml = 0;
890 				if (mrioc->current_event->discard) {
891 					mpi3mr_print_device_event_notice(mrioc,
892 					    false);
893 					return;
894 				}
895 			}
896 		}
897 	} else
898 		mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev);
899 
900 	ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
901 	    __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
902 }
903 
904 /**
905  * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
906  * @mrioc: Adapter instance reference
907  * @perst_id: Persistent ID of the device
908  *
909  * Checks whether the device can be exposed to upper layers and
910  * if it is not then expose the device to upper layers by
911  * calling scsi_scan_target().
912  *
913  * Return: 0 on success, non zero on failure.
914  */
915 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
916 	u16 perst_id)
917 {
918 	int retval = 0;
919 	struct mpi3mr_tgt_dev *tgtdev;
920 
921 	if (mrioc->reset_in_progress)
922 		return -1;
923 
924 	tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
925 	if (!tgtdev) {
926 		retval = -1;
927 		goto out;
928 	}
929 	if (tgtdev->is_hidden || tgtdev->host_exposed) {
930 		retval = -1;
931 		goto out;
932 	}
933 	if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
934 	    MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){
935 		tgtdev->host_exposed = 1;
936 		if (mrioc->current_event)
937 			mrioc->current_event->pending_at_sml = 1;
938 		scsi_scan_target(&mrioc->shost->shost_gendev,
939 		    mrioc->scsi_device_channel, tgtdev->perst_id,
940 		    SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
941 		if (!tgtdev->starget)
942 			tgtdev->host_exposed = 0;
943 		if (mrioc->current_event) {
944 			mrioc->current_event->pending_at_sml = 0;
945 			if (mrioc->current_event->discard) {
946 				mpi3mr_print_device_event_notice(mrioc, true);
947 				goto out;
948 			}
949 		}
950 	} else
951 		mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev);
952 out:
953 	if (tgtdev)
954 		mpi3mr_tgtdev_put(tgtdev);
955 
956 	return retval;
957 }
958 
959 /**
960  * mpi3mr_change_queue_depth- Change QD callback handler
961  * @sdev: SCSI device reference
962  * @q_depth: Queue depth
963  *
964  * Validate and limit QD and call scsi_change_queue_depth.
965  *
966  * Return: return value of scsi_change_queue_depth
967  */
968 static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
969 	int q_depth)
970 {
971 	struct scsi_target *starget = scsi_target(sdev);
972 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
973 	int retval = 0;
974 
975 	if (!sdev->tagged_supported)
976 		q_depth = 1;
977 	if (q_depth > shost->can_queue)
978 		q_depth = shost->can_queue;
979 	else if (!q_depth)
980 		q_depth = MPI3MR_DEFAULT_SDEV_QD;
981 	retval = scsi_change_queue_depth(sdev, q_depth);
982 	sdev->max_queue_depth = sdev->queue_depth;
983 
984 	return retval;
985 }
986 
987 /**
988  * mpi3mr_update_sdev - Update SCSI device information
989  * @sdev: SCSI device reference
990  * @data: target device reference
991  *
992  * This is an iterator function called for each SCSI device in a
993  * target to update the target specific information into each
994  * SCSI device.
995  *
996  * Return: Nothing.
997  */
998 static void
999 mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
1000 {
1001 	struct mpi3mr_tgt_dev *tgtdev;
1002 
1003 	tgtdev = (struct mpi3mr_tgt_dev *)data;
1004 	if (!tgtdev)
1005 		return;
1006 
1007 	mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
1008 	switch (tgtdev->dev_type) {
1009 	case MPI3_DEVICE_DEVFORM_PCIE:
1010 		/*The block layer hw sector size = 512*/
1011 		if ((tgtdev->dev_spec.pcie_inf.dev_info &
1012 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
1013 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
1014 			blk_queue_max_hw_sectors(sdev->request_queue,
1015 			    tgtdev->dev_spec.pcie_inf.mdts / 512);
1016 			if (tgtdev->dev_spec.pcie_inf.pgsz == 0)
1017 				blk_queue_virt_boundary(sdev->request_queue,
1018 				    ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
1019 			else
1020 				blk_queue_virt_boundary(sdev->request_queue,
1021 				    ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
1022 		}
1023 		break;
1024 	default:
1025 		break;
1026 	}
1027 }
1028 
1029 /**
1030  * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
1031  * @mrioc: Adapter instance reference
1032  *
1033  * This is executed post controller reset to identify any
1034  * missing devices during reset and remove from the upper layers
1035  * or expose any newly detected device to the upper layers.
1036  *
1037  * Return: Nothing.
1038  */
1039 
1040 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
1041 {
1042 	struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
1043 
1044 	list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
1045 	    list) {
1046 		if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
1047 			dprint_reset(mrioc, "removing target device with perst_id(%d)\n",
1048 			    tgtdev->perst_id);
1049 			if (tgtdev->host_exposed)
1050 				mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1051 			mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
1052 			mpi3mr_tgtdev_put(tgtdev);
1053 		}
1054 	}
1055 
1056 	tgtdev = NULL;
1057 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
1058 		if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
1059 		    !tgtdev->is_hidden && !tgtdev->host_exposed)
1060 			mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
1061 	}
1062 }
1063 
1064 /**
1065  * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
1066  * @mrioc: Adapter instance reference
1067  * @tgtdev: Target device internal structure
1068  * @dev_pg0: New device page0
1069  * @is_added: Flag to indicate the device is just added
1070  *
1071  * Update the information from the device page0 into the driver
1072  * cached target device structure.
1073  *
1074  * Return: Nothing.
1075  */
1076 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
1077 	struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0,
1078 	bool is_added)
1079 {
1080 	u16 flags = 0;
1081 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
1082 	struct mpi3mr_enclosure_node *enclosure_dev = NULL;
1083 	u8 prot_mask = 0;
1084 
1085 	tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
1086 	tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
1087 	tgtdev->dev_type = dev_pg0->device_form;
1088 	tgtdev->io_unit_port = dev_pg0->io_unit_port;
1089 	tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
1090 	tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
1091 	tgtdev->slot = le16_to_cpu(dev_pg0->slot);
1092 	tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
1093 	tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
1094 	tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags);
1095 
1096 	if (tgtdev->encl_handle)
1097 		enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
1098 		    tgtdev->encl_handle);
1099 	if (enclosure_dev)
1100 		tgtdev->enclosure_logical_id = le64_to_cpu(
1101 		    enclosure_dev->pg0.enclosure_logical_id);
1102 
1103 	flags = tgtdev->devpg0_flag;
1104 
1105 	tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
1106 
1107 	if (is_added == true)
1108 		tgtdev->io_throttle_enabled =
1109 		    (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
1110 
1111 
1112 	if (tgtdev->starget && tgtdev->starget->hostdata) {
1113 		scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
1114 		    tgtdev->starget->hostdata;
1115 		scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
1116 		scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
1117 		scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
1118 		scsi_tgt_priv_data->io_throttle_enabled =
1119 		    tgtdev->io_throttle_enabled;
1120 		if (is_added == true)
1121 			atomic_set(&scsi_tgt_priv_data->block_io, 0);
1122 	}
1123 
1124 	switch (dev_pg0->access_status) {
1125 	case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
1126 	case MPI3_DEVICE0_ASTATUS_PREPARE:
1127 	case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
1128 	case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
1129 		break;
1130 	default:
1131 		tgtdev->is_hidden = 1;
1132 		break;
1133 	}
1134 
1135 	switch (tgtdev->dev_type) {
1136 	case MPI3_DEVICE_DEVFORM_SAS_SATA:
1137 	{
1138 		struct mpi3_device0_sas_sata_format *sasinf =
1139 		    &dev_pg0->device_specific.sas_sata_format;
1140 		u16 dev_info = le16_to_cpu(sasinf->device_info);
1141 
1142 		tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
1143 		tgtdev->dev_spec.sas_sata_inf.sas_address =
1144 		    le64_to_cpu(sasinf->sas_address);
1145 		tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num;
1146 		tgtdev->dev_spec.sas_sata_inf.attached_phy_id =
1147 		    sasinf->attached_phy_identifier;
1148 		if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
1149 		    MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
1150 			tgtdev->is_hidden = 1;
1151 		else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
1152 		    MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
1153 			tgtdev->is_hidden = 1;
1154 
1155 		if (((tgtdev->devpg0_flag &
1156 		    MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)
1157 		    && (tgtdev->devpg0_flag &
1158 		    MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) ||
1159 		    (tgtdev->parent_handle == 0xFFFF))
1160 			tgtdev->non_stl = 1;
1161 		if (tgtdev->dev_spec.sas_sata_inf.hba_port)
1162 			tgtdev->dev_spec.sas_sata_inf.hba_port->port_id =
1163 			    dev_pg0->io_unit_port;
1164 		break;
1165 	}
1166 	case MPI3_DEVICE_DEVFORM_PCIE:
1167 	{
1168 		struct mpi3_device0_pcie_format *pcieinf =
1169 		    &dev_pg0->device_specific.pcie_format;
1170 		u16 dev_info = le16_to_cpu(pcieinf->device_info);
1171 
1172 		tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
1173 		tgtdev->dev_spec.pcie_inf.capb =
1174 		    le32_to_cpu(pcieinf->capabilities);
1175 		tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
1176 		/* 2^12 = 4096 */
1177 		tgtdev->dev_spec.pcie_inf.pgsz = 12;
1178 		if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
1179 			tgtdev->dev_spec.pcie_inf.mdts =
1180 			    le32_to_cpu(pcieinf->maximum_data_transfer_size);
1181 			tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
1182 			tgtdev->dev_spec.pcie_inf.reset_to =
1183 			    max_t(u8, pcieinf->controller_reset_to,
1184 			     MPI3MR_INTADMCMD_TIMEOUT);
1185 			tgtdev->dev_spec.pcie_inf.abort_to =
1186 			    max_t(u8, pcieinf->nvme_abort_to,
1187 			    MPI3MR_INTADMCMD_TIMEOUT);
1188 		}
1189 		if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
1190 			tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
1191 		if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
1192 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
1193 		    ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
1194 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
1195 			tgtdev->is_hidden = 1;
1196 		tgtdev->non_stl = 1;
1197 		if (!mrioc->shost)
1198 			break;
1199 		prot_mask = scsi_host_get_prot(mrioc->shost);
1200 		if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
1201 			scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
1202 			ioc_info(mrioc,
1203 			    "%s : Disabling DIX0 prot capability\n", __func__);
1204 			ioc_info(mrioc,
1205 			    "because HBA does not support DIX0 operation on NVME drives\n");
1206 		}
1207 		break;
1208 	}
1209 	case MPI3_DEVICE_DEVFORM_VD:
1210 	{
1211 		struct mpi3_device0_vd_format *vdinf =
1212 		    &dev_pg0->device_specific.vd_format;
1213 		struct mpi3mr_throttle_group_info *tg = NULL;
1214 		u16 vdinf_io_throttle_group =
1215 		    le16_to_cpu(vdinf->io_throttle_group);
1216 
1217 		tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
1218 		if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
1219 			tgtdev->is_hidden = 1;
1220 		tgtdev->non_stl = 1;
1221 		tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
1222 		tgtdev->dev_spec.vd_inf.tg_high =
1223 		    le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
1224 		tgtdev->dev_spec.vd_inf.tg_low =
1225 		    le16_to_cpu(vdinf->io_throttle_group_low) * 2048;
1226 		if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) {
1227 			tg = mrioc->throttle_groups + vdinf_io_throttle_group;
1228 			tg->id = vdinf_io_throttle_group;
1229 			tg->high = tgtdev->dev_spec.vd_inf.tg_high;
1230 			tg->low = tgtdev->dev_spec.vd_inf.tg_low;
1231 			tg->qd_reduction =
1232 			    tgtdev->dev_spec.vd_inf.tg_qd_reduction;
1233 			if (is_added == true)
1234 				tg->fw_qd = tgtdev->q_depth;
1235 			tg->modified_qd = tgtdev->q_depth;
1236 		}
1237 		tgtdev->dev_spec.vd_inf.tg = tg;
1238 		if (scsi_tgt_priv_data)
1239 			scsi_tgt_priv_data->throttle_group = tg;
1240 		break;
1241 	}
1242 	default:
1243 		break;
1244 	}
1245 }
1246 
1247 /**
1248  * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
1249  * @mrioc: Adapter instance reference
1250  * @fwevt: Firmware event information.
1251  *
1252  * Process Device status Change event and based on device's new
1253  * information, either expose the device to the upper layers, or
1254  * remove the device from upper layers.
1255  *
1256  * Return: Nothing.
1257  */
1258 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
1259 	struct mpi3mr_fwevt *fwevt)
1260 {
1261 	u16 dev_handle = 0;
1262 	u8 uhide = 0, delete = 0, cleanup = 0;
1263 	struct mpi3mr_tgt_dev *tgtdev = NULL;
1264 	struct mpi3_event_data_device_status_change *evtdata =
1265 	    (struct mpi3_event_data_device_status_change *)fwevt->event_data;
1266 
1267 	dev_handle = le16_to_cpu(evtdata->dev_handle);
1268 	ioc_info(mrioc,
1269 	    "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
1270 	    __func__, dev_handle, evtdata->reason_code);
1271 	switch (evtdata->reason_code) {
1272 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1273 		delete = 1;
1274 		break;
1275 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
1276 		uhide = 1;
1277 		break;
1278 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1279 		delete = 1;
1280 		cleanup = 1;
1281 		break;
1282 	default:
1283 		ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
1284 		    evtdata->reason_code);
1285 		break;
1286 	}
1287 
1288 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1289 	if (!tgtdev)
1290 		goto out;
1291 	if (uhide) {
1292 		tgtdev->is_hidden = 0;
1293 		if (!tgtdev->host_exposed)
1294 			mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
1295 	}
1296 
1297 	if (delete)
1298 		mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1299 
1300 	if (cleanup) {
1301 		mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
1302 		mpi3mr_tgtdev_put(tgtdev);
1303 	}
1304 
1305 out:
1306 	if (tgtdev)
1307 		mpi3mr_tgtdev_put(tgtdev);
1308 }
1309 
1310 /**
1311  * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
1312  * @mrioc: Adapter instance reference
1313  * @dev_pg0: New device page0
1314  *
1315  * Process Device Info Change event and based on device's new
1316  * information, either expose the device to the upper layers, or
1317  * remove the device from upper layers or update the details of
1318  * the device.
1319  *
1320  * Return: Nothing.
1321  */
1322 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
1323 	struct mpi3_device_page0 *dev_pg0)
1324 {
1325 	struct mpi3mr_tgt_dev *tgtdev = NULL;
1326 	u16 dev_handle = 0, perst_id = 0;
1327 
1328 	perst_id = le16_to_cpu(dev_pg0->persistent_id);
1329 	dev_handle = le16_to_cpu(dev_pg0->dev_handle);
1330 	ioc_info(mrioc,
1331 	    "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
1332 	    __func__, dev_handle, perst_id);
1333 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1334 	if (!tgtdev)
1335 		goto out;
1336 	mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
1337 	if (!tgtdev->is_hidden && !tgtdev->host_exposed)
1338 		mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
1339 	if (tgtdev->is_hidden && tgtdev->host_exposed)
1340 		mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1341 	if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
1342 		starget_for_each_device(tgtdev->starget, (void *)tgtdev,
1343 		    mpi3mr_update_sdev);
1344 out:
1345 	if (tgtdev)
1346 		mpi3mr_tgtdev_put(tgtdev);
1347 }
1348 
1349 /**
1350  * mpi3mr_free_enclosure_list - release enclosures
1351  * @mrioc: Adapter instance reference
1352  *
1353  * Free memory allocated during encloure add.
1354  *
1355  * Return nothing.
1356  */
1357 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc)
1358 {
1359 	struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next;
1360 
1361 	list_for_each_entry_safe(enclosure_dev,
1362 	    enclosure_dev_next, &mrioc->enclosure_list, list) {
1363 		list_del(&enclosure_dev->list);
1364 		kfree(enclosure_dev);
1365 	}
1366 }
1367 
1368 /**
1369  * mpi3mr_enclosure_find_by_handle - enclosure search by handle
1370  * @mrioc: Adapter instance reference
1371  * @handle: Firmware device handle of the enclosure
1372  *
1373  * This searches for enclosure device based on handle, then returns the
1374  * enclosure object.
1375  *
1376  * Return: Enclosure object reference or NULL
1377  */
1378 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
1379 	struct mpi3mr_ioc *mrioc, u16 handle)
1380 {
1381 	struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL;
1382 
1383 	list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) {
1384 		if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle)
1385 			continue;
1386 		r = enclosure_dev;
1387 		goto out;
1388 	}
1389 out:
1390 	return r;
1391 }
1392 
1393 /**
1394  * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event
1395  * @mrioc: Adapter instance reference
1396  * @encl_pg0: Enclosure page 0.
1397  * @is_added: Added event or not
1398  *
1399  * Return nothing.
1400  */
1401 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc,
1402 	struct mpi3_enclosure_page0 *encl_pg0, u8 is_added)
1403 {
1404 	char *reason_str = NULL;
1405 
1406 	if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK))
1407 		return;
1408 
1409 	if (is_added)
1410 		reason_str = "enclosure added";
1411 	else
1412 		reason_str = "enclosure dev status changed";
1413 
1414 	ioc_info(mrioc,
1415 	    "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n",
1416 	    reason_str, le16_to_cpu(encl_pg0->enclosure_handle),
1417 	    (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id));
1418 	ioc_info(mrioc,
1419 	    "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n",
1420 	    le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port,
1421 	    le16_to_cpu(encl_pg0->flags),
1422 	    ((le16_to_cpu(encl_pg0->flags) &
1423 	      MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4));
1424 }
1425 
1426 /**
1427  * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf
1428  * @mrioc: Adapter instance reference
1429  * @fwevt: Firmware event reference
1430  *
1431  * Prints information about the Enclosure device status or
1432  * Enclosure add events if logging is enabled and add or remove
1433  * the enclosure from the controller's internal list of
1434  * enclosures.
1435  *
1436  * Return: Nothing.
1437  */
1438 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc,
1439 	struct mpi3mr_fwevt *fwevt)
1440 {
1441 	struct mpi3mr_enclosure_node *enclosure_dev = NULL;
1442 	struct mpi3_enclosure_page0 *encl_pg0;
1443 	u16 encl_handle;
1444 	u8 added, present;
1445 
1446 	encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data;
1447 	added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0;
1448 	mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added);
1449 
1450 
1451 	encl_handle = le16_to_cpu(encl_pg0->enclosure_handle);
1452 	present = ((le16_to_cpu(encl_pg0->flags) &
1453 	      MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4);
1454 
1455 	if (encl_handle)
1456 		enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
1457 		    encl_handle);
1458 	if (!enclosure_dev && present) {
1459 		enclosure_dev =
1460 			kzalloc(sizeof(struct mpi3mr_enclosure_node),
1461 			    GFP_KERNEL);
1462 		if (!enclosure_dev)
1463 			return;
1464 		list_add_tail(&enclosure_dev->list,
1465 		    &mrioc->enclosure_list);
1466 	}
1467 	if (enclosure_dev) {
1468 		if (!present) {
1469 			list_del(&enclosure_dev->list);
1470 			kfree(enclosure_dev);
1471 		} else
1472 			memcpy(&enclosure_dev->pg0, encl_pg0,
1473 			    sizeof(enclosure_dev->pg0));
1474 
1475 	}
1476 }
1477 
1478 /**
1479  * mpi3mr_sastopochg_evt_debug - SASTopoChange details
1480  * @mrioc: Adapter instance reference
1481  * @event_data: SAS topology change list event data
1482  *
1483  * Prints information about the SAS topology change event.
1484  *
1485  * Return: Nothing.
1486  */
1487 static void
1488 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
1489 	struct mpi3_event_data_sas_topology_change_list *event_data)
1490 {
1491 	int i;
1492 	u16 handle;
1493 	u8 reason_code, phy_number;
1494 	char *status_str = NULL;
1495 	u8 link_rate, prev_link_rate;
1496 
1497 	switch (event_data->exp_status) {
1498 	case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
1499 		status_str = "remove";
1500 		break;
1501 	case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
1502 		status_str =  "responding";
1503 		break;
1504 	case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
1505 		status_str = "remove delay";
1506 		break;
1507 	case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
1508 		status_str = "direct attached";
1509 		break;
1510 	default:
1511 		status_str = "unknown status";
1512 		break;
1513 	}
1514 	ioc_info(mrioc, "%s :sas topology change: (%s)\n",
1515 	    __func__, status_str);
1516 	ioc_info(mrioc,
1517 	    "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
1518 	    __func__, le16_to_cpu(event_data->expander_dev_handle),
1519 	    event_data->io_unit_port,
1520 	    le16_to_cpu(event_data->enclosure_handle),
1521 	    event_data->start_phy_num, event_data->num_entries);
1522 	for (i = 0; i < event_data->num_entries; i++) {
1523 		handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
1524 		if (!handle)
1525 			continue;
1526 		phy_number = event_data->start_phy_num + i;
1527 		reason_code = event_data->phy_entry[i].status &
1528 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1529 		switch (reason_code) {
1530 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1531 			status_str = "target remove";
1532 			break;
1533 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1534 			status_str = "delay target remove";
1535 			break;
1536 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1537 			status_str = "link status change";
1538 			break;
1539 		case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1540 			status_str = "link status no change";
1541 			break;
1542 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
1543 			status_str = "target responding";
1544 			break;
1545 		default:
1546 			status_str = "unknown";
1547 			break;
1548 		}
1549 		link_rate = event_data->phy_entry[i].link_rate >> 4;
1550 		prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
1551 		ioc_info(mrioc,
1552 		    "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
1553 		    __func__, phy_number, handle, status_str, link_rate,
1554 		    prev_link_rate);
1555 	}
1556 }
1557 
1558 /**
1559  * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
1560  * @mrioc: Adapter instance reference
1561  * @fwevt: Firmware event reference
1562  *
1563  * Prints information about the SAS topology change event and
1564  * for "not responding" event code, removes the device from the
1565  * upper layers.
1566  *
1567  * Return: Nothing.
1568  */
1569 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
1570 	struct mpi3mr_fwevt *fwevt)
1571 {
1572 	struct mpi3_event_data_sas_topology_change_list *event_data =
1573 	    (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
1574 	int i;
1575 	u16 handle;
1576 	u8 reason_code;
1577 	u64 exp_sas_address = 0, parent_sas_address = 0;
1578 	struct mpi3mr_hba_port *hba_port = NULL;
1579 	struct mpi3mr_tgt_dev *tgtdev = NULL;
1580 	struct mpi3mr_sas_node *sas_expander = NULL;
1581 	unsigned long flags;
1582 	u8 link_rate, prev_link_rate, parent_phy_number;
1583 
1584 	mpi3mr_sastopochg_evt_debug(mrioc, event_data);
1585 	if (mrioc->sas_transport_enabled) {
1586 		hba_port = mpi3mr_get_hba_port_by_id(mrioc,
1587 		    event_data->io_unit_port);
1588 		if (le16_to_cpu(event_data->expander_dev_handle)) {
1589 			spin_lock_irqsave(&mrioc->sas_node_lock, flags);
1590 			sas_expander = __mpi3mr_expander_find_by_handle(mrioc,
1591 			    le16_to_cpu(event_data->expander_dev_handle));
1592 			if (sas_expander) {
1593 				exp_sas_address = sas_expander->sas_address;
1594 				hba_port = sas_expander->hba_port;
1595 			}
1596 			spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
1597 			parent_sas_address = exp_sas_address;
1598 		} else
1599 			parent_sas_address = mrioc->sas_hba.sas_address;
1600 	}
1601 
1602 	for (i = 0; i < event_data->num_entries; i++) {
1603 		if (fwevt->discard)
1604 			return;
1605 		handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
1606 		if (!handle)
1607 			continue;
1608 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1609 		if (!tgtdev)
1610 			continue;
1611 
1612 		reason_code = event_data->phy_entry[i].status &
1613 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1614 
1615 		switch (reason_code) {
1616 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1617 			if (tgtdev->host_exposed)
1618 				mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1619 			mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
1620 			mpi3mr_tgtdev_put(tgtdev);
1621 			break;
1622 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
1623 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1624 		case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1625 		{
1626 			if (!mrioc->sas_transport_enabled || tgtdev->non_stl
1627 			    || tgtdev->is_hidden)
1628 				break;
1629 			link_rate = event_data->phy_entry[i].link_rate >> 4;
1630 			prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
1631 			if (link_rate == prev_link_rate)
1632 				break;
1633 			if (!parent_sas_address)
1634 				break;
1635 			parent_phy_number = event_data->start_phy_num + i;
1636 			mpi3mr_update_links(mrioc, parent_sas_address, handle,
1637 			    parent_phy_number, link_rate, hba_port);
1638 			break;
1639 		}
1640 		default:
1641 			break;
1642 		}
1643 		if (tgtdev)
1644 			mpi3mr_tgtdev_put(tgtdev);
1645 	}
1646 
1647 	if (mrioc->sas_transport_enabled && (event_data->exp_status ==
1648 	    MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) {
1649 		if (sas_expander)
1650 			mpi3mr_expander_remove(mrioc, exp_sas_address,
1651 			    hba_port);
1652 	}
1653 }
1654 
1655 /**
1656  * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details
1657  * @mrioc: Adapter instance reference
1658  * @event_data: PCIe topology change list event data
1659  *
1660  * Prints information about the PCIe topology change event.
1661  *
1662  * Return: Nothing.
1663  */
1664 static void
1665 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc,
1666 	struct mpi3_event_data_pcie_topology_change_list *event_data)
1667 {
1668 	int i;
1669 	u16 handle;
1670 	u16 reason_code;
1671 	u8 port_number;
1672 	char *status_str = NULL;
1673 	u8 link_rate, prev_link_rate;
1674 
1675 	switch (event_data->switch_status) {
1676 	case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
1677 		status_str = "remove";
1678 		break;
1679 	case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
1680 		status_str =  "responding";
1681 		break;
1682 	case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
1683 		status_str = "remove delay";
1684 		break;
1685 	case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
1686 		status_str = "direct attached";
1687 		break;
1688 	default:
1689 		status_str = "unknown status";
1690 		break;
1691 	}
1692 	ioc_info(mrioc, "%s :pcie topology change: (%s)\n",
1693 	    __func__, status_str);
1694 	ioc_info(mrioc,
1695 	    "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n",
1696 	    __func__, le16_to_cpu(event_data->switch_dev_handle),
1697 	    le16_to_cpu(event_data->enclosure_handle),
1698 	    event_data->start_port_num, event_data->num_entries);
1699 	for (i = 0; i < event_data->num_entries; i++) {
1700 		handle =
1701 		    le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
1702 		if (!handle)
1703 			continue;
1704 		port_number = event_data->start_port_num + i;
1705 		reason_code = event_data->port_entry[i].port_status;
1706 		switch (reason_code) {
1707 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1708 			status_str = "target remove";
1709 			break;
1710 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1711 			status_str = "delay target remove";
1712 			break;
1713 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1714 			status_str = "link status change";
1715 			break;
1716 		case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
1717 			status_str = "link status no change";
1718 			break;
1719 		case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
1720 			status_str = "target responding";
1721 			break;
1722 		default:
1723 			status_str = "unknown";
1724 			break;
1725 		}
1726 		link_rate = event_data->port_entry[i].current_port_info &
1727 		    MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1728 		prev_link_rate = event_data->port_entry[i].previous_port_info &
1729 		    MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1730 		ioc_info(mrioc,
1731 		    "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
1732 		    __func__, port_number, handle, status_str, link_rate,
1733 		    prev_link_rate);
1734 	}
1735 }
1736 
1737 /**
1738  * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
1739  * @mrioc: Adapter instance reference
1740  * @fwevt: Firmware event reference
1741  *
1742  * Prints information about the PCIe topology change event and
1743  * for "not responding" event code, removes the device from the
1744  * upper layers.
1745  *
1746  * Return: Nothing.
1747  */
1748 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
1749 	struct mpi3mr_fwevt *fwevt)
1750 {
1751 	struct mpi3_event_data_pcie_topology_change_list *event_data =
1752 	    (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
1753 	int i;
1754 	u16 handle;
1755 	u8 reason_code;
1756 	struct mpi3mr_tgt_dev *tgtdev = NULL;
1757 
1758 	mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
1759 
1760 	for (i = 0; i < event_data->num_entries; i++) {
1761 		if (fwevt->discard)
1762 			return;
1763 		handle =
1764 		    le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
1765 		if (!handle)
1766 			continue;
1767 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1768 		if (!tgtdev)
1769 			continue;
1770 
1771 		reason_code = event_data->port_entry[i].port_status;
1772 
1773 		switch (reason_code) {
1774 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1775 			if (tgtdev->host_exposed)
1776 				mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1777 			mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false);
1778 			mpi3mr_tgtdev_put(tgtdev);
1779 			break;
1780 		default:
1781 			break;
1782 		}
1783 		if (tgtdev)
1784 			mpi3mr_tgtdev_put(tgtdev);
1785 	}
1786 }
1787 
1788 /**
1789  * mpi3mr_logdata_evt_bh -  Log data event bottomhalf
1790  * @mrioc: Adapter instance reference
1791  * @fwevt: Firmware event reference
1792  *
1793  * Extracts the event data and calls application interfacing
1794  * function to process the event further.
1795  *
1796  * Return: Nothing.
1797  */
1798 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
1799 	struct mpi3mr_fwevt *fwevt)
1800 {
1801 	mpi3mr_app_save_logdata(mrioc, fwevt->event_data,
1802 	    fwevt->event_data_size);
1803 }
1804 
1805 /**
1806  * mpi3mr_update_sdev_qd - Update SCSI device queue depath
1807  * @sdev: SCSI device reference
1808  * @data: Queue depth reference
1809  *
1810  * This is an iterator function called for each SCSI device in a
1811  * target to update the QD of each SCSI device.
1812  *
1813  * Return: Nothing.
1814  */
1815 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data)
1816 {
1817 	u16 *q_depth = (u16 *)data;
1818 
1819 	scsi_change_queue_depth(sdev, (int)*q_depth);
1820 	sdev->max_queue_depth = sdev->queue_depth;
1821 }
1822 
1823 /**
1824  * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs
1825  * @mrioc: Adapter instance reference
1826  * @tg: Throttle group information pointer
1827  *
1828  * Accessor to reduce QD for each device associated with the
1829  * given throttle group.
1830  *
1831  * Return: None.
1832  */
1833 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
1834 	struct mpi3mr_throttle_group_info *tg)
1835 {
1836 	unsigned long flags;
1837 	struct mpi3mr_tgt_dev *tgtdev;
1838 	struct mpi3mr_stgt_priv_data *tgt_priv;
1839 
1840 
1841 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
1842 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
1843 		if (tgtdev->starget && tgtdev->starget->hostdata) {
1844 			tgt_priv = tgtdev->starget->hostdata;
1845 			if (tgt_priv->throttle_group == tg) {
1846 				dprint_event_bh(mrioc,
1847 				    "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n",
1848 				    tgt_priv->perst_id, tgtdev->q_depth,
1849 				    tg->modified_qd);
1850 				starget_for_each_device(tgtdev->starget,
1851 				    (void *)&tg->modified_qd,
1852 				    mpi3mr_update_sdev_qd);
1853 			}
1854 		}
1855 	}
1856 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
1857 }
1858 
1859 /**
1860  * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
1861  * @mrioc: Adapter instance reference
1862  * @fwevt: Firmware event reference
1863  *
1864  * Identifies the firmware event and calls corresponding bottomg
1865  * half handler and sends event acknowledgment if required.
1866  *
1867  * Return: Nothing.
1868  */
1869 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
1870 	struct mpi3mr_fwevt *fwevt)
1871 {
1872 	struct mpi3_device_page0 *dev_pg0 = NULL;
1873 	u16 perst_id, handle, dev_info;
1874 	struct mpi3_device0_sas_sata_format *sasinf = NULL;
1875 
1876 	mpi3mr_fwevt_del_from_list(mrioc, fwevt);
1877 	mrioc->current_event = fwevt;
1878 
1879 	if (mrioc->stop_drv_processing)
1880 		goto out;
1881 
1882 	if (mrioc->unrecoverable) {
1883 		dprint_event_bh(mrioc,
1884 		    "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n",
1885 		    fwevt->event_id);
1886 		goto out;
1887 	}
1888 
1889 	if (!fwevt->process_evt)
1890 		goto evt_ack;
1891 
1892 	switch (fwevt->event_id) {
1893 	case MPI3_EVENT_DEVICE_ADDED:
1894 	{
1895 		dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
1896 		perst_id = le16_to_cpu(dev_pg0->persistent_id);
1897 		handle = le16_to_cpu(dev_pg0->dev_handle);
1898 		if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
1899 			mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
1900 		else if (mrioc->sas_transport_enabled &&
1901 		    (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
1902 			sasinf = &dev_pg0->device_specific.sas_sata_format;
1903 			dev_info = le16_to_cpu(sasinf->device_info);
1904 			if (!mrioc->sas_hba.num_phys)
1905 				mpi3mr_sas_host_add(mrioc);
1906 			else
1907 				mpi3mr_sas_host_refresh(mrioc);
1908 
1909 			if (mpi3mr_is_expander_device(dev_info))
1910 				mpi3mr_expander_add(mrioc, handle);
1911 		}
1912 		break;
1913 	}
1914 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
1915 	{
1916 		dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
1917 		perst_id = le16_to_cpu(dev_pg0->persistent_id);
1918 		if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
1919 			mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0);
1920 		break;
1921 	}
1922 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
1923 	{
1924 		mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
1925 		break;
1926 	}
1927 	case MPI3_EVENT_ENCL_DEVICE_ADDED:
1928 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
1929 	{
1930 		mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt);
1931 		break;
1932 	}
1933 
1934 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1935 	{
1936 		mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
1937 		break;
1938 	}
1939 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1940 	{
1941 		mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
1942 		break;
1943 	}
1944 	case MPI3_EVENT_LOG_DATA:
1945 	{
1946 		mpi3mr_logdata_evt_bh(mrioc, fwevt);
1947 		break;
1948 	}
1949 	case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION:
1950 	{
1951 		struct mpi3mr_throttle_group_info *tg;
1952 
1953 		tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data;
1954 		dprint_event_bh(mrioc,
1955 		    "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n",
1956 		    tg->id, tg->need_qd_reduction);
1957 		if (tg->need_qd_reduction) {
1958 			mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg);
1959 			tg->need_qd_reduction = 0;
1960 		}
1961 		break;
1962 	}
1963 	case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH:
1964 	{
1965 		while (mrioc->device_refresh_on)
1966 			msleep(500);
1967 
1968 		dprint_event_bh(mrioc,
1969 		    "scan for non responding and newly added devices after soft reset started\n");
1970 		if (mrioc->sas_transport_enabled) {
1971 			mpi3mr_refresh_sas_ports(mrioc);
1972 			mpi3mr_refresh_expanders(mrioc);
1973 		}
1974 		mpi3mr_rfresh_tgtdevs(mrioc);
1975 		ioc_info(mrioc,
1976 		    "scan for non responding and newly added devices after soft reset completed\n");
1977 		break;
1978 	}
1979 	default:
1980 		break;
1981 	}
1982 
1983 evt_ack:
1984 	if (fwevt->send_ack)
1985 		mpi3mr_process_event_ack(mrioc, fwevt->event_id,
1986 		    fwevt->evt_ctx);
1987 out:
1988 	/* Put fwevt reference count to neutralize kref_init increment */
1989 	mpi3mr_fwevt_put(fwevt);
1990 	mrioc->current_event = NULL;
1991 }
1992 
1993 /**
1994  * mpi3mr_fwevt_worker - Firmware event worker
1995  * @work: Work struct containing firmware event
1996  *
1997  * Extracts the firmware event and calls mpi3mr_fwevt_bh.
1998  *
1999  * Return: Nothing.
2000  */
2001 static void mpi3mr_fwevt_worker(struct work_struct *work)
2002 {
2003 	struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
2004 	    work);
2005 	mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
2006 	/*
2007 	 * Put fwevt reference count after
2008 	 * dequeuing it from worker queue
2009 	 */
2010 	mpi3mr_fwevt_put(fwevt);
2011 }
2012 
2013 /**
2014  * mpi3mr_create_tgtdev - Create and add a target device
2015  * @mrioc: Adapter instance reference
2016  * @dev_pg0: Device Page 0 data
2017  *
2018  * If the device specified by the device page 0 data is not
2019  * present in the driver's internal list, allocate the memory
2020  * for the device, populate the data and add to the list, else
2021  * update the device data.  The key is persistent ID.
2022  *
2023  * Return: 0 on success, -ENOMEM on memory allocation failure
2024  */
2025 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
2026 	struct mpi3_device_page0 *dev_pg0)
2027 {
2028 	int retval = 0;
2029 	struct mpi3mr_tgt_dev *tgtdev = NULL;
2030 	u16 perst_id = 0;
2031 	unsigned long flags;
2032 
2033 	perst_id = le16_to_cpu(dev_pg0->persistent_id);
2034 	if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID)
2035 		return retval;
2036 
2037 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
2038 	tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
2039 	if (tgtdev)
2040 		tgtdev->state = MPI3MR_DEV_CREATED;
2041 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2042 
2043 	if (tgtdev) {
2044 		mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
2045 		mpi3mr_tgtdev_put(tgtdev);
2046 	} else {
2047 		tgtdev = mpi3mr_alloc_tgtdev();
2048 		if (!tgtdev)
2049 			return -ENOMEM;
2050 		mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
2051 		mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
2052 	}
2053 
2054 	return retval;
2055 }
2056 
2057 /**
2058  * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
2059  * @mrioc: Adapter instance reference
2060  *
2061  * Flush pending commands in the delayed lists due to a
2062  * controller reset or driver removal as a cleanup.
2063  *
2064  * Return: Nothing
2065  */
2066 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
2067 {
2068 	struct delayed_dev_rmhs_node *_rmhs_node;
2069 	struct delayed_evt_ack_node *_evtack_node;
2070 
2071 	dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
2072 	while (!list_empty(&mrioc->delayed_rmhs_list)) {
2073 		_rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
2074 		    struct delayed_dev_rmhs_node, list);
2075 		list_del(&_rmhs_node->list);
2076 		kfree(_rmhs_node);
2077 	}
2078 	dprint_reset(mrioc, "flushing delayed event ack commands\n");
2079 	while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
2080 		_evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
2081 		    struct delayed_evt_ack_node, list);
2082 		list_del(&_evtack_node->list);
2083 		kfree(_evtack_node);
2084 	}
2085 }
2086 
2087 /**
2088  * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
2089  * @mrioc: Adapter instance reference
2090  * @drv_cmd: Internal command tracker
2091  *
2092  * Issues a target reset TM to the firmware from the device
2093  * removal TM pend list or retry the removal handshake sequence
2094  * based on the IOU control request IOC status.
2095  *
2096  * Return: Nothing
2097  */
2098 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
2099 	struct mpi3mr_drv_cmd *drv_cmd)
2100 {
2101 	u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
2102 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
2103 
2104 	if (drv_cmd->state & MPI3MR_CMD_RESET)
2105 		goto clear_drv_cmd;
2106 
2107 	ioc_info(mrioc,
2108 	    "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
2109 	    __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
2110 	    drv_cmd->ioc_loginfo);
2111 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
2112 		if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
2113 			drv_cmd->retry_count++;
2114 			ioc_info(mrioc,
2115 			    "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
2116 			    __func__, drv_cmd->dev_handle,
2117 			    drv_cmd->retry_count);
2118 			mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
2119 			    drv_cmd, drv_cmd->iou_rc);
2120 			return;
2121 		}
2122 		ioc_err(mrioc,
2123 		    "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
2124 		    __func__, drv_cmd->dev_handle);
2125 	} else {
2126 		ioc_info(mrioc,
2127 		    "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
2128 		    __func__, drv_cmd->dev_handle);
2129 		clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
2130 	}
2131 
2132 	if (!list_empty(&mrioc->delayed_rmhs_list)) {
2133 		delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
2134 		    struct delayed_dev_rmhs_node, list);
2135 		drv_cmd->dev_handle = delayed_dev_rmhs->handle;
2136 		drv_cmd->retry_count = 0;
2137 		drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
2138 		ioc_info(mrioc,
2139 		    "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
2140 		    __func__, drv_cmd->dev_handle);
2141 		mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
2142 		    drv_cmd->iou_rc);
2143 		list_del(&delayed_dev_rmhs->list);
2144 		kfree(delayed_dev_rmhs);
2145 		return;
2146 	}
2147 
2148 clear_drv_cmd:
2149 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
2150 	drv_cmd->callback = NULL;
2151 	drv_cmd->retry_count = 0;
2152 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2153 	clear_bit(cmd_idx, mrioc->devrem_bitmap);
2154 }
2155 
2156 /**
2157  * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
2158  * @mrioc: Adapter instance reference
2159  * @drv_cmd: Internal command tracker
2160  *
2161  * Issues a target reset TM to the firmware from the device
2162  * removal TM pend list or issue IO unit control request as
2163  * part of device removal or hidden acknowledgment handshake.
2164  *
2165  * Return: Nothing
2166  */
2167 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
2168 	struct mpi3mr_drv_cmd *drv_cmd)
2169 {
2170 	struct mpi3_iounit_control_request iou_ctrl;
2171 	u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
2172 	struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
2173 	int retval;
2174 
2175 	if (drv_cmd->state & MPI3MR_CMD_RESET)
2176 		goto clear_drv_cmd;
2177 
2178 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
2179 		tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
2180 
2181 	if (tm_reply)
2182 		pr_info(IOCNAME
2183 		    "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
2184 		    mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
2185 		    drv_cmd->ioc_loginfo,
2186 		    le32_to_cpu(tm_reply->termination_count));
2187 
2188 	pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
2189 	    mrioc->name, drv_cmd->dev_handle, cmd_idx);
2190 
2191 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
2192 
2193 	drv_cmd->state = MPI3MR_CMD_PENDING;
2194 	drv_cmd->is_waiting = 0;
2195 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
2196 	iou_ctrl.operation = drv_cmd->iou_rc;
2197 	iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
2198 	iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
2199 	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
2200 
2201 	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
2202 	    1);
2203 	if (retval) {
2204 		pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
2205 		    mrioc->name);
2206 		goto clear_drv_cmd;
2207 	}
2208 
2209 	return;
2210 clear_drv_cmd:
2211 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
2212 	drv_cmd->callback = NULL;
2213 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2214 	drv_cmd->retry_count = 0;
2215 	clear_bit(cmd_idx, mrioc->devrem_bitmap);
2216 }
2217 
2218 /**
2219  * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
2220  * @mrioc: Adapter instance reference
2221  * @handle: Device handle
2222  * @cmdparam: Internal command tracker
2223  * @iou_rc: IO unit reason code
2224  *
2225  * Issues a target reset TM to the firmware or add it to a pend
2226  * list as part of device removal or hidden acknowledgment
2227  * handshake.
2228  *
2229  * Return: Nothing
2230  */
2231 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
2232 	struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
2233 {
2234 	struct mpi3_scsi_task_mgmt_request tm_req;
2235 	int retval = 0;
2236 	u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
2237 	u8 retrycount = 5;
2238 	struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
2239 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
2240 	struct mpi3mr_tgt_dev *tgtdev = NULL;
2241 	unsigned long flags;
2242 
2243 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
2244 	tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
2245 	if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE))
2246 		tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED;
2247 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2248 
2249 	if (drv_cmd)
2250 		goto issue_cmd;
2251 	do {
2252 		cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
2253 		    MPI3MR_NUM_DEVRMCMD);
2254 		if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
2255 			if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
2256 				break;
2257 			cmd_idx = MPI3MR_NUM_DEVRMCMD;
2258 		}
2259 	} while (retrycount--);
2260 
2261 	if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
2262 		delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
2263 		    GFP_ATOMIC);
2264 		if (!delayed_dev_rmhs)
2265 			return;
2266 		INIT_LIST_HEAD(&delayed_dev_rmhs->list);
2267 		delayed_dev_rmhs->handle = handle;
2268 		delayed_dev_rmhs->iou_rc = iou_rc;
2269 		list_add_tail(&delayed_dev_rmhs->list,
2270 		    &mrioc->delayed_rmhs_list);
2271 		ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
2272 		    __func__, handle);
2273 		return;
2274 	}
2275 	drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
2276 
2277 issue_cmd:
2278 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
2279 	ioc_info(mrioc,
2280 	    "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
2281 	    __func__, handle, cmd_idx);
2282 
2283 	memset(&tm_req, 0, sizeof(tm_req));
2284 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
2285 		ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
2286 		goto out;
2287 	}
2288 	drv_cmd->state = MPI3MR_CMD_PENDING;
2289 	drv_cmd->is_waiting = 0;
2290 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
2291 	drv_cmd->dev_handle = handle;
2292 	drv_cmd->iou_rc = iou_rc;
2293 	tm_req.dev_handle = cpu_to_le16(handle);
2294 	tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2295 	tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
2296 	tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
2297 	tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
2298 
2299 	set_bit(handle, mrioc->removepend_bitmap);
2300 	retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
2301 	if (retval) {
2302 		ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
2303 		    __func__);
2304 		goto out_failed;
2305 	}
2306 out:
2307 	return;
2308 out_failed:
2309 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
2310 	drv_cmd->callback = NULL;
2311 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2312 	drv_cmd->retry_count = 0;
2313 	clear_bit(cmd_idx, mrioc->devrem_bitmap);
2314 }
2315 
2316 /**
2317  * mpi3mr_complete_evt_ack - event ack request completion
2318  * @mrioc: Adapter instance reference
2319  * @drv_cmd: Internal command tracker
2320  *
2321  * This is the completion handler for non blocking event
2322  * acknowledgment sent to the firmware and this will issue any
2323  * pending event acknowledgment request.
2324  *
2325  * Return: Nothing
2326  */
2327 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
2328 	struct mpi3mr_drv_cmd *drv_cmd)
2329 {
2330 	u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
2331 	struct delayed_evt_ack_node *delayed_evtack = NULL;
2332 
2333 	if (drv_cmd->state & MPI3MR_CMD_RESET)
2334 		goto clear_drv_cmd;
2335 
2336 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
2337 		dprint_event_th(mrioc,
2338 		    "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
2339 		    (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
2340 		    drv_cmd->ioc_loginfo);
2341 	}
2342 
2343 	if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
2344 		delayed_evtack =
2345 			list_entry(mrioc->delayed_evtack_cmds_list.next,
2346 			    struct delayed_evt_ack_node, list);
2347 		mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
2348 		    delayed_evtack->event_ctx);
2349 		list_del(&delayed_evtack->list);
2350 		kfree(delayed_evtack);
2351 		return;
2352 	}
2353 clear_drv_cmd:
2354 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
2355 	drv_cmd->callback = NULL;
2356 	clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
2357 }
2358 
2359 /**
2360  * mpi3mr_send_event_ack - Issue event acknwoledgment request
2361  * @mrioc: Adapter instance reference
2362  * @event: MPI3 event id
2363  * @cmdparam: Internal command tracker
2364  * @event_ctx: event context
2365  *
2366  * Issues event acknowledgment request to the firmware if there
2367  * is a free command to send the event ack else it to a pend
2368  * list so that it will be processed on a completion of a prior
2369  * event acknowledgment .
2370  *
2371  * Return: Nothing
2372  */
2373 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
2374 	struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
2375 {
2376 	struct mpi3_event_ack_request evtack_req;
2377 	int retval = 0;
2378 	u8 retrycount = 5;
2379 	u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
2380 	struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
2381 	struct delayed_evt_ack_node *delayed_evtack = NULL;
2382 
2383 	if (drv_cmd) {
2384 		dprint_event_th(mrioc,
2385 		    "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
2386 		    event, event_ctx);
2387 		goto issue_cmd;
2388 	}
2389 	dprint_event_th(mrioc,
2390 	    "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
2391 	    event, event_ctx);
2392 	do {
2393 		cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
2394 		    MPI3MR_NUM_EVTACKCMD);
2395 		if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
2396 			if (!test_and_set_bit(cmd_idx,
2397 			    mrioc->evtack_cmds_bitmap))
2398 				break;
2399 			cmd_idx = MPI3MR_NUM_EVTACKCMD;
2400 		}
2401 	} while (retrycount--);
2402 
2403 	if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
2404 		delayed_evtack = kzalloc(sizeof(*delayed_evtack),
2405 		    GFP_ATOMIC);
2406 		if (!delayed_evtack)
2407 			return;
2408 		INIT_LIST_HEAD(&delayed_evtack->list);
2409 		delayed_evtack->event = event;
2410 		delayed_evtack->event_ctx = event_ctx;
2411 		list_add_tail(&delayed_evtack->list,
2412 		    &mrioc->delayed_evtack_cmds_list);
2413 		dprint_event_th(mrioc,
2414 		    "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
2415 		    event, event_ctx);
2416 		return;
2417 	}
2418 	drv_cmd = &mrioc->evtack_cmds[cmd_idx];
2419 
2420 issue_cmd:
2421 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
2422 
2423 	memset(&evtack_req, 0, sizeof(evtack_req));
2424 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
2425 		dprint_event_th(mrioc,
2426 		    "sending event ack failed due to command in use\n");
2427 		goto out;
2428 	}
2429 	drv_cmd->state = MPI3MR_CMD_PENDING;
2430 	drv_cmd->is_waiting = 0;
2431 	drv_cmd->callback = mpi3mr_complete_evt_ack;
2432 	evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
2433 	evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
2434 	evtack_req.event = event;
2435 	evtack_req.event_context = cpu_to_le32(event_ctx);
2436 	retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
2437 	    sizeof(evtack_req), 1);
2438 	if (retval) {
2439 		dprint_event_th(mrioc,
2440 		    "posting event ack request is failed\n");
2441 		goto out_failed;
2442 	}
2443 
2444 	dprint_event_th(mrioc,
2445 	    "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
2446 	    event, event_ctx);
2447 out:
2448 	return;
2449 out_failed:
2450 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
2451 	drv_cmd->callback = NULL;
2452 	clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
2453 }
2454 
2455 /**
2456  * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
2457  * @mrioc: Adapter instance reference
2458  * @event_reply: event data
2459  *
2460  * Checks for the reason code and based on that either block I/O
2461  * to device, or unblock I/O to the device, or start the device
2462  * removal handshake with reason as remove with the firmware for
2463  * PCIe devices.
2464  *
2465  * Return: Nothing
2466  */
2467 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
2468 	struct mpi3_event_notification_reply *event_reply)
2469 {
2470 	struct mpi3_event_data_pcie_topology_change_list *topo_evt =
2471 	    (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
2472 	int i;
2473 	u16 handle;
2474 	u8 reason_code;
2475 	struct mpi3mr_tgt_dev *tgtdev = NULL;
2476 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2477 
2478 	for (i = 0; i < topo_evt->num_entries; i++) {
2479 		handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
2480 		if (!handle)
2481 			continue;
2482 		reason_code = topo_evt->port_entry[i].port_status;
2483 		scsi_tgt_priv_data =  NULL;
2484 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
2485 		if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
2486 			scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2487 			    tgtdev->starget->hostdata;
2488 		switch (reason_code) {
2489 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
2490 			if (scsi_tgt_priv_data) {
2491 				scsi_tgt_priv_data->dev_removed = 1;
2492 				scsi_tgt_priv_data->dev_removedelay = 0;
2493 				atomic_set(&scsi_tgt_priv_data->block_io, 0);
2494 			}
2495 			mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
2496 			    MPI3_CTRL_OP_REMOVE_DEVICE);
2497 			break;
2498 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
2499 			if (scsi_tgt_priv_data) {
2500 				scsi_tgt_priv_data->dev_removedelay = 1;
2501 				atomic_inc(&scsi_tgt_priv_data->block_io);
2502 			}
2503 			break;
2504 		case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
2505 			if (scsi_tgt_priv_data &&
2506 			    scsi_tgt_priv_data->dev_removedelay) {
2507 				scsi_tgt_priv_data->dev_removedelay = 0;
2508 				atomic_dec_if_positive
2509 				    (&scsi_tgt_priv_data->block_io);
2510 			}
2511 			break;
2512 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
2513 		default:
2514 			break;
2515 		}
2516 		if (tgtdev)
2517 			mpi3mr_tgtdev_put(tgtdev);
2518 	}
2519 }
2520 
2521 /**
2522  * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
2523  * @mrioc: Adapter instance reference
2524  * @event_reply: event data
2525  *
2526  * Checks for the reason code and based on that either block I/O
2527  * to device, or unblock I/O to the device, or start the device
2528  * removal handshake with reason as remove with the firmware for
2529  * SAS/SATA devices.
2530  *
2531  * Return: Nothing
2532  */
2533 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
2534 	struct mpi3_event_notification_reply *event_reply)
2535 {
2536 	struct mpi3_event_data_sas_topology_change_list *topo_evt =
2537 	    (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
2538 	int i;
2539 	u16 handle;
2540 	u8 reason_code;
2541 	struct mpi3mr_tgt_dev *tgtdev = NULL;
2542 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2543 
2544 	for (i = 0; i < topo_evt->num_entries; i++) {
2545 		handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
2546 		if (!handle)
2547 			continue;
2548 		reason_code = topo_evt->phy_entry[i].status &
2549 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
2550 		scsi_tgt_priv_data =  NULL;
2551 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
2552 		if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
2553 			scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2554 			    tgtdev->starget->hostdata;
2555 		switch (reason_code) {
2556 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
2557 			if (scsi_tgt_priv_data) {
2558 				scsi_tgt_priv_data->dev_removed = 1;
2559 				scsi_tgt_priv_data->dev_removedelay = 0;
2560 				atomic_set(&scsi_tgt_priv_data->block_io, 0);
2561 			}
2562 			mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
2563 			    MPI3_CTRL_OP_REMOVE_DEVICE);
2564 			break;
2565 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
2566 			if (scsi_tgt_priv_data) {
2567 				scsi_tgt_priv_data->dev_removedelay = 1;
2568 				atomic_inc(&scsi_tgt_priv_data->block_io);
2569 			}
2570 			break;
2571 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
2572 			if (scsi_tgt_priv_data &&
2573 			    scsi_tgt_priv_data->dev_removedelay) {
2574 				scsi_tgt_priv_data->dev_removedelay = 0;
2575 				atomic_dec_if_positive
2576 				    (&scsi_tgt_priv_data->block_io);
2577 			}
2578 			break;
2579 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
2580 		default:
2581 			break;
2582 		}
2583 		if (tgtdev)
2584 			mpi3mr_tgtdev_put(tgtdev);
2585 	}
2586 }
2587 
2588 /**
2589  * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
2590  * @mrioc: Adapter instance reference
2591  * @event_reply: event data
2592  *
2593  * Checks for the reason code and based on that either block I/O
2594  * to device, or unblock I/O to the device, or start the device
2595  * removal handshake with reason as remove/hide acknowledgment
2596  * with the firmware.
2597  *
2598  * Return: Nothing
2599  */
2600 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
2601 	struct mpi3_event_notification_reply *event_reply)
2602 {
2603 	u16 dev_handle = 0;
2604 	u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
2605 	struct mpi3mr_tgt_dev *tgtdev = NULL;
2606 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2607 	struct mpi3_event_data_device_status_change *evtdata =
2608 	    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
2609 
2610 	if (mrioc->stop_drv_processing)
2611 		goto out;
2612 
2613 	dev_handle = le16_to_cpu(evtdata->dev_handle);
2614 
2615 	switch (evtdata->reason_code) {
2616 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
2617 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
2618 		block = 1;
2619 		break;
2620 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
2621 		delete = 1;
2622 		hide = 1;
2623 		break;
2624 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
2625 		delete = 1;
2626 		remove = 1;
2627 		break;
2628 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
2629 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
2630 		ublock = 1;
2631 		break;
2632 	default:
2633 		break;
2634 	}
2635 
2636 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
2637 	if (!tgtdev)
2638 		goto out;
2639 	if (hide)
2640 		tgtdev->is_hidden = hide;
2641 	if (tgtdev->starget && tgtdev->starget->hostdata) {
2642 		scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2643 		    tgtdev->starget->hostdata;
2644 		if (block)
2645 			atomic_inc(&scsi_tgt_priv_data->block_io);
2646 		if (delete)
2647 			scsi_tgt_priv_data->dev_removed = 1;
2648 		if (ublock)
2649 			atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
2650 	}
2651 	if (remove)
2652 		mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
2653 		    MPI3_CTRL_OP_REMOVE_DEVICE);
2654 	if (hide)
2655 		mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
2656 		    MPI3_CTRL_OP_HIDDEN_ACK);
2657 
2658 out:
2659 	if (tgtdev)
2660 		mpi3mr_tgtdev_put(tgtdev);
2661 }
2662 
2663 /**
2664  * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
2665  * @mrioc: Adapter instance reference
2666  * @event_reply: event data
2667  *
2668  * Blocks and unblocks host level I/O based on the reason code
2669  *
2670  * Return: Nothing
2671  */
2672 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
2673 	struct mpi3_event_notification_reply *event_reply)
2674 {
2675 	struct mpi3_event_data_prepare_for_reset *evtdata =
2676 	    (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
2677 
2678 	if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
2679 		dprint_event_th(mrioc,
2680 		    "prepare for reset event top half with rc=start\n");
2681 		if (mrioc->prepare_for_reset)
2682 			return;
2683 		mrioc->prepare_for_reset = 1;
2684 		mrioc->prepare_for_reset_timeout_counter = 0;
2685 	} else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
2686 		dprint_event_th(mrioc,
2687 		    "prepare for reset top half with rc=abort\n");
2688 		mrioc->prepare_for_reset = 0;
2689 		mrioc->prepare_for_reset_timeout_counter = 0;
2690 	}
2691 	if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
2692 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
2693 		mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
2694 		    le32_to_cpu(event_reply->event_context));
2695 }
2696 
2697 /**
2698  * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
2699  * @mrioc: Adapter instance reference
2700  * @event_reply: event data
2701  *
2702  * Identifies the new shutdown timeout value and update.
2703  *
2704  * Return: Nothing
2705  */
2706 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
2707 	struct mpi3_event_notification_reply *event_reply)
2708 {
2709 	struct mpi3_event_data_energy_pack_change *evtdata =
2710 	    (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
2711 	u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
2712 
2713 	if (shutdown_timeout <= 0) {
2714 		ioc_warn(mrioc,
2715 		    "%s :Invalid Shutdown Timeout received = %d\n",
2716 		    __func__, shutdown_timeout);
2717 		return;
2718 	}
2719 
2720 	ioc_info(mrioc,
2721 	    "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
2722 	    __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
2723 	mrioc->facts.shutdown_timeout = shutdown_timeout;
2724 }
2725 
2726 /**
2727  * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
2728  * @mrioc: Adapter instance reference
2729  * @event_reply: event data
2730  *
2731  * Displays Cable manegemt event details.
2732  *
2733  * Return: Nothing
2734  */
2735 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
2736 	struct mpi3_event_notification_reply *event_reply)
2737 {
2738 	struct mpi3_event_data_cable_management *evtdata =
2739 	    (struct mpi3_event_data_cable_management *)event_reply->event_data;
2740 
2741 	switch (evtdata->status) {
2742 	case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
2743 	{
2744 		ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
2745 		    "Devices connected to this cable are not detected.\n"
2746 		    "This cable requires %d mW of power.\n",
2747 		    evtdata->receptacle_id,
2748 		    le32_to_cpu(evtdata->active_cable_power_requirement));
2749 		break;
2750 	}
2751 	case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
2752 	{
2753 		ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
2754 		    evtdata->receptacle_id);
2755 		break;
2756 	}
2757 	default:
2758 		break;
2759 	}
2760 }
2761 
2762 /**
2763  * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event
2764  * @mrioc: Adapter instance reference
2765  *
2766  * Add driver specific event to make sure that the driver won't process the
2767  * events until all the devices are refreshed during soft reset.
2768  *
2769  * Return: Nothing
2770  */
2771 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc)
2772 {
2773 	struct mpi3mr_fwevt *fwevt = NULL;
2774 
2775 	fwevt = mpi3mr_alloc_fwevt(0);
2776 	if (!fwevt) {
2777 		dprint_event_th(mrioc,
2778 		    "failed to schedule bottom half handler for event(0x%02x)\n",
2779 		    MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH);
2780 		return;
2781 	}
2782 	fwevt->mrioc = mrioc;
2783 	fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH;
2784 	fwevt->send_ack = 0;
2785 	fwevt->process_evt = 1;
2786 	fwevt->evt_ctx = 0;
2787 	fwevt->event_data_size = 0;
2788 	mpi3mr_fwevt_add_to_list(mrioc, fwevt);
2789 }
2790 
2791 /**
2792  * mpi3mr_os_handle_events - Firmware event handler
2793  * @mrioc: Adapter instance reference
2794  * @event_reply: event data
2795  *
2796  * Identify whteher the event has to handled and acknowledged
2797  * and either process the event in the tophalf and/or schedule a
2798  * bottom half through mpi3mr_fwevt_worker.
2799  *
2800  * Return: Nothing
2801  */
2802 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
2803 	struct mpi3_event_notification_reply *event_reply)
2804 {
2805 	u16 evt_type, sz;
2806 	struct mpi3mr_fwevt *fwevt = NULL;
2807 	bool ack_req = 0, process_evt_bh = 0;
2808 
2809 	if (mrioc->stop_drv_processing)
2810 		return;
2811 
2812 	if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
2813 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
2814 		ack_req = 1;
2815 
2816 	evt_type = event_reply->event;
2817 
2818 	switch (evt_type) {
2819 	case MPI3_EVENT_DEVICE_ADDED:
2820 	{
2821 		struct mpi3_device_page0 *dev_pg0 =
2822 		    (struct mpi3_device_page0 *)event_reply->event_data;
2823 		if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
2824 			ioc_err(mrioc,
2825 			    "%s :Failed to add device in the device add event\n",
2826 			    __func__);
2827 		else
2828 			process_evt_bh = 1;
2829 		break;
2830 	}
2831 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
2832 	{
2833 		process_evt_bh = 1;
2834 		mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
2835 		break;
2836 	}
2837 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2838 	{
2839 		process_evt_bh = 1;
2840 		mpi3mr_sastopochg_evt_th(mrioc, event_reply);
2841 		break;
2842 	}
2843 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
2844 	{
2845 		process_evt_bh = 1;
2846 		mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
2847 		break;
2848 	}
2849 	case MPI3_EVENT_PREPARE_FOR_RESET:
2850 	{
2851 		mpi3mr_preparereset_evt_th(mrioc, event_reply);
2852 		ack_req = 0;
2853 		break;
2854 	}
2855 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
2856 	case MPI3_EVENT_LOG_DATA:
2857 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
2858 	case MPI3_EVENT_ENCL_DEVICE_ADDED:
2859 	{
2860 		process_evt_bh = 1;
2861 		break;
2862 	}
2863 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
2864 	{
2865 		mpi3mr_energypackchg_evt_th(mrioc, event_reply);
2866 		break;
2867 	}
2868 	case MPI3_EVENT_CABLE_MGMT:
2869 	{
2870 		mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
2871 		break;
2872 	}
2873 	case MPI3_EVENT_SAS_DISCOVERY:
2874 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
2875 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
2876 	case MPI3_EVENT_PCIE_ENUMERATION:
2877 		break;
2878 	default:
2879 		ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
2880 		    __func__, evt_type);
2881 		break;
2882 	}
2883 	if (process_evt_bh || ack_req) {
2884 		sz = event_reply->event_data_length * 4;
2885 		fwevt = mpi3mr_alloc_fwevt(sz);
2886 		if (!fwevt) {
2887 			ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
2888 			    __func__, __FILE__, __LINE__, __func__);
2889 			return;
2890 		}
2891 
2892 		memcpy(fwevt->event_data, event_reply->event_data, sz);
2893 		fwevt->mrioc = mrioc;
2894 		fwevt->event_id = evt_type;
2895 		fwevt->send_ack = ack_req;
2896 		fwevt->process_evt = process_evt_bh;
2897 		fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
2898 		mpi3mr_fwevt_add_to_list(mrioc, fwevt);
2899 	}
2900 }
2901 
2902 /**
2903  * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO
2904  * @mrioc: Adapter instance reference
2905  * @scmd: SCSI command reference
2906  * @scsiio_req: MPI3 SCSI IO request
2907  *
2908  * Identifies the protection information flags from the SCSI
2909  * command and set appropriate flags in the MPI3 SCSI IO
2910  * request.
2911  *
2912  * Return: Nothing
2913  */
2914 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
2915 	struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
2916 {
2917 	u16 eedp_flags = 0;
2918 	unsigned char prot_op = scsi_get_prot_op(scmd);
2919 
2920 	switch (prot_op) {
2921 	case SCSI_PROT_NORMAL:
2922 		return;
2923 	case SCSI_PROT_READ_STRIP:
2924 		eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
2925 		break;
2926 	case SCSI_PROT_WRITE_INSERT:
2927 		eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
2928 		break;
2929 	case SCSI_PROT_READ_INSERT:
2930 		eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
2931 		scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2932 		break;
2933 	case SCSI_PROT_WRITE_STRIP:
2934 		eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
2935 		scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2936 		break;
2937 	case SCSI_PROT_READ_PASS:
2938 		eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
2939 		scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2940 		break;
2941 	case SCSI_PROT_WRITE_PASS:
2942 		if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
2943 			eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
2944 			scsiio_req->sgl[0].eedp.application_tag_translation_mask =
2945 			    0xffff;
2946 		} else
2947 			eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
2948 
2949 		scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2950 		break;
2951 	default:
2952 		return;
2953 	}
2954 
2955 	if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
2956 		eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
2957 
2958 	if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
2959 		eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
2960 
2961 	if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
2962 		eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
2963 			MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
2964 		scsiio_req->cdb.eedp32.primary_reference_tag =
2965 			cpu_to_be32(scsi_prot_ref_tag(scmd));
2966 	}
2967 
2968 	if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
2969 		eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
2970 
2971 	eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
2972 
2973 	switch (scsi_prot_interval(scmd)) {
2974 	case 512:
2975 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
2976 		break;
2977 	case 520:
2978 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520;
2979 		break;
2980 	case 4080:
2981 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080;
2982 		break;
2983 	case 4088:
2984 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088;
2985 		break;
2986 	case 4096:
2987 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096;
2988 		break;
2989 	case 4104:
2990 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104;
2991 		break;
2992 	case 4160:
2993 		scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160;
2994 		break;
2995 	default:
2996 		break;
2997 	}
2998 
2999 	scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags);
3000 	scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED;
3001 }
3002 
3003 /**
3004  * mpi3mr_build_sense_buffer - Map sense information
3005  * @desc: Sense type
3006  * @buf: Sense buffer to populate
3007  * @key: Sense key
3008  * @asc: Additional sense code
3009  * @ascq: Additional sense code qualifier
3010  *
3011  * Maps the given sense information into either descriptor or
3012  * fixed format sense data.
3013  *
3014  * Return: Nothing
3015  */
3016 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key,
3017 	u8 asc, u8 ascq)
3018 {
3019 	if (desc) {
3020 		buf[0] = 0x72;	/* descriptor, current */
3021 		buf[1] = key;
3022 		buf[2] = asc;
3023 		buf[3] = ascq;
3024 		buf[7] = 0;
3025 	} else {
3026 		buf[0] = 0x70;	/* fixed, current */
3027 		buf[2] = key;
3028 		buf[7] = 0xa;
3029 		buf[12] = asc;
3030 		buf[13] = ascq;
3031 	}
3032 }
3033 
3034 /**
3035  * mpi3mr_map_eedp_error - Map EEDP errors from IOC status
3036  * @scmd: SCSI command reference
3037  * @ioc_status: status of MPI3 request
3038  *
3039  * Maps the EEDP error status of the SCSI IO request to sense
3040  * data.
3041  *
3042  * Return: Nothing
3043  */
3044 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd,
3045 	u16 ioc_status)
3046 {
3047 	u8 ascq = 0;
3048 
3049 	switch (ioc_status) {
3050 	case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
3051 		ascq = 0x01;
3052 		break;
3053 	case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
3054 		ascq = 0x02;
3055 		break;
3056 	case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
3057 		ascq = 0x03;
3058 		break;
3059 	default:
3060 		ascq = 0x00;
3061 		break;
3062 	}
3063 
3064 	mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3065 	    0x10, ascq);
3066 	scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
3067 }
3068 
3069 /**
3070  * mpi3mr_process_op_reply_desc - reply descriptor handler
3071  * @mrioc: Adapter instance reference
3072  * @reply_desc: Operational reply descriptor
3073  * @reply_dma: place holder for reply DMA address
3074  * @qidx: Operational queue index
3075  *
3076  * Process the operational reply descriptor and identifies the
3077  * descriptor type. Based on the descriptor map the MPI3 request
3078  * status to a SCSI command status and calls scsi_done call
3079  * back.
3080  *
3081  * Return: Nothing
3082  */
3083 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
3084 	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
3085 {
3086 	u16 reply_desc_type, host_tag = 0;
3087 	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
3088 	u32 ioc_loginfo = 0;
3089 	struct mpi3_status_reply_descriptor *status_desc = NULL;
3090 	struct mpi3_address_reply_descriptor *addr_desc = NULL;
3091 	struct mpi3_success_reply_descriptor *success_desc = NULL;
3092 	struct mpi3_scsi_io_reply *scsi_reply = NULL;
3093 	struct scsi_cmnd *scmd = NULL;
3094 	struct scmd_priv *priv = NULL;
3095 	u8 *sense_buf = NULL;
3096 	u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
3097 	u32 xfer_count = 0, sense_count = 0, resp_data = 0;
3098 	u16 dev_handle = 0xFFFF;
3099 	struct scsi_sense_hdr sshdr;
3100 	struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL;
3101 	struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
3102 	u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
3103 	struct mpi3mr_throttle_group_info *tg = NULL;
3104 	u8 throttle_enabled_dev = 0;
3105 
3106 	*reply_dma = 0;
3107 	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
3108 	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
3109 	switch (reply_desc_type) {
3110 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
3111 		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
3112 		host_tag = le16_to_cpu(status_desc->host_tag);
3113 		ioc_status = le16_to_cpu(status_desc->ioc_status);
3114 		if (ioc_status &
3115 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
3116 			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
3117 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
3118 		break;
3119 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
3120 		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
3121 		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
3122 		scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
3123 		    *reply_dma);
3124 		if (!scsi_reply) {
3125 			panic("%s: scsi_reply is NULL, this shouldn't happen\n",
3126 			    mrioc->name);
3127 			goto out;
3128 		}
3129 		host_tag = le16_to_cpu(scsi_reply->host_tag);
3130 		ioc_status = le16_to_cpu(scsi_reply->ioc_status);
3131 		scsi_status = scsi_reply->scsi_status;
3132 		scsi_state = scsi_reply->scsi_state;
3133 		dev_handle = le16_to_cpu(scsi_reply->dev_handle);
3134 		sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
3135 		xfer_count = le32_to_cpu(scsi_reply->transfer_count);
3136 		sense_count = le32_to_cpu(scsi_reply->sense_count);
3137 		resp_data = le32_to_cpu(scsi_reply->response_data);
3138 		sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
3139 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
3140 		if (ioc_status &
3141 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
3142 			ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
3143 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
3144 		if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
3145 			panic("%s: Ran out of sense buffers\n", mrioc->name);
3146 		break;
3147 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
3148 		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
3149 		host_tag = le16_to_cpu(success_desc->host_tag);
3150 		break;
3151 	default:
3152 		break;
3153 	}
3154 	scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
3155 	if (!scmd) {
3156 		panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
3157 		    mrioc->name, host_tag);
3158 		goto out;
3159 	}
3160 	priv = scsi_cmd_priv(scmd);
3161 
3162 	data_len_blks = scsi_bufflen(scmd) >> 9;
3163 	sdev_priv_data = scmd->device->hostdata;
3164 	if (sdev_priv_data) {
3165 		stgt_priv_data = sdev_priv_data->tgt_priv_data;
3166 		if (stgt_priv_data) {
3167 			tg = stgt_priv_data->throttle_group;
3168 			throttle_enabled_dev =
3169 			    stgt_priv_data->io_throttle_enabled;
3170 		}
3171 	}
3172 	if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
3173 	    throttle_enabled_dev)) {
3174 		ioc_pend_data_len = atomic_sub_return(data_len_blks,
3175 		    &mrioc->pend_large_data_sz);
3176 		if (tg) {
3177 			tg_pend_data_len = atomic_sub_return(data_len_blks,
3178 			    &tg->pend_large_data_sz);
3179 			if (tg->io_divert  && ((ioc_pend_data_len <=
3180 			    mrioc->io_throttle_low) &&
3181 			    (tg_pend_data_len <= tg->low))) {
3182 				tg->io_divert = 0;
3183 				mpi3mr_set_io_divert_for_all_vd_in_tg(
3184 				    mrioc, tg, 0);
3185 			}
3186 		} else {
3187 			if (ioc_pend_data_len <= mrioc->io_throttle_low)
3188 				stgt_priv_data->io_divert = 0;
3189 		}
3190 	} else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) {
3191 		ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz);
3192 		if (!tg) {
3193 			if (ioc_pend_data_len <= mrioc->io_throttle_low)
3194 				stgt_priv_data->io_divert = 0;
3195 
3196 		} else if (ioc_pend_data_len <= mrioc->io_throttle_low) {
3197 			tg_pend_data_len = atomic_read(&tg->pend_large_data_sz);
3198 			if (tg->io_divert  && (tg_pend_data_len <= tg->low)) {
3199 				tg->io_divert = 0;
3200 				mpi3mr_set_io_divert_for_all_vd_in_tg(
3201 				    mrioc, tg, 0);
3202 			}
3203 		}
3204 	}
3205 
3206 	if (success_desc) {
3207 		scmd->result = DID_OK << 16;
3208 		goto out_success;
3209 	}
3210 
3211 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
3212 	if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
3213 	    xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
3214 	    scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
3215 	    scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
3216 		ioc_status = MPI3_IOCSTATUS_SUCCESS;
3217 
3218 	if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
3219 	    sense_buf) {
3220 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
3221 
3222 		memcpy(scmd->sense_buffer, sense_buf, sz);
3223 	}
3224 
3225 	switch (ioc_status) {
3226 	case MPI3_IOCSTATUS_BUSY:
3227 	case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
3228 		scmd->result = SAM_STAT_BUSY;
3229 		break;
3230 	case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3231 		scmd->result = DID_NO_CONNECT << 16;
3232 		break;
3233 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
3234 		scmd->result = DID_SOFT_ERROR << 16;
3235 		break;
3236 	case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
3237 	case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
3238 		scmd->result = DID_RESET << 16;
3239 		break;
3240 	case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3241 		if ((xfer_count == 0) || (scmd->underflow > xfer_count))
3242 			scmd->result = DID_SOFT_ERROR << 16;
3243 		else
3244 			scmd->result = (DID_OK << 16) | scsi_status;
3245 		break;
3246 	case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
3247 		scmd->result = (DID_OK << 16) | scsi_status;
3248 		if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
3249 			break;
3250 		if (xfer_count < scmd->underflow) {
3251 			if (scsi_status == SAM_STAT_BUSY)
3252 				scmd->result = SAM_STAT_BUSY;
3253 			else
3254 				scmd->result = DID_SOFT_ERROR << 16;
3255 		} else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
3256 		    (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
3257 			scmd->result = DID_SOFT_ERROR << 16;
3258 		else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
3259 			scmd->result = DID_RESET << 16;
3260 		break;
3261 	case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
3262 		scsi_set_resid(scmd, 0);
3263 		fallthrough;
3264 	case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
3265 	case MPI3_IOCSTATUS_SUCCESS:
3266 		scmd->result = (DID_OK << 16) | scsi_status;
3267 		if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
3268 		    (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
3269 			(sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
3270 			scmd->result = DID_SOFT_ERROR << 16;
3271 		else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
3272 			scmd->result = DID_RESET << 16;
3273 		break;
3274 	case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
3275 	case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
3276 	case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
3277 		mpi3mr_map_eedp_error(scmd, ioc_status);
3278 		break;
3279 	case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3280 	case MPI3_IOCSTATUS_INVALID_FUNCTION:
3281 	case MPI3_IOCSTATUS_INVALID_SGL:
3282 	case MPI3_IOCSTATUS_INTERNAL_ERROR:
3283 	case MPI3_IOCSTATUS_INVALID_FIELD:
3284 	case MPI3_IOCSTATUS_INVALID_STATE:
3285 	case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
3286 	case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3287 	case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
3288 	default:
3289 		scmd->result = DID_SOFT_ERROR << 16;
3290 		break;
3291 	}
3292 
3293 	if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
3294 	    (scmd->cmnd[0] != ATA_16) &&
3295 	    mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) {
3296 		ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
3297 		    scmd->result);
3298 		scsi_print_command(scmd);
3299 		ioc_info(mrioc,
3300 		    "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
3301 		    __func__, dev_handle, ioc_status, ioc_loginfo,
3302 		    priv->req_q_idx + 1);
3303 		ioc_info(mrioc,
3304 		    " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
3305 		    host_tag, scsi_state, scsi_status, xfer_count, resp_data);
3306 		if (sense_buf) {
3307 			scsi_normalize_sense(sense_buf, sense_count, &sshdr);
3308 			ioc_info(mrioc,
3309 			    "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
3310 			    __func__, sense_count, sshdr.sense_key,
3311 			    sshdr.asc, sshdr.ascq);
3312 		}
3313 	}
3314 out_success:
3315 	if (priv->meta_sg_valid) {
3316 		dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
3317 		    scsi_prot_sg_count(scmd), scmd->sc_data_direction);
3318 	}
3319 	mpi3mr_clear_scmd_priv(mrioc, scmd);
3320 	scsi_dma_unmap(scmd);
3321 	scsi_done(scmd);
3322 out:
3323 	if (sense_buf)
3324 		mpi3mr_repost_sense_buf(mrioc,
3325 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
3326 }
3327 
3328 /**
3329  * mpi3mr_get_chain_idx - get free chain buffer index
3330  * @mrioc: Adapter instance reference
3331  *
3332  * Try to get a free chain buffer index from the free pool.
3333  *
3334  * Return: -1 on failure or the free chain buffer index
3335  */
3336 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
3337 {
3338 	u8 retry_count = 5;
3339 	int cmd_idx = -1;
3340 	unsigned long flags;
3341 
3342 	spin_lock_irqsave(&mrioc->chain_buf_lock, flags);
3343 	do {
3344 		cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
3345 		    mrioc->chain_buf_count);
3346 		if (cmd_idx < mrioc->chain_buf_count) {
3347 			set_bit(cmd_idx, mrioc->chain_bitmap);
3348 			break;
3349 		}
3350 		cmd_idx = -1;
3351 	} while (retry_count--);
3352 	spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags);
3353 	return cmd_idx;
3354 }
3355 
3356 /**
3357  * mpi3mr_prepare_sg_scmd - build scatter gather list
3358  * @mrioc: Adapter instance reference
3359  * @scmd: SCSI command reference
3360  * @scsiio_req: MPI3 SCSI IO request
3361  *
3362  * This function maps SCSI command's data and protection SGEs to
3363  * MPI request SGEs. If required additional 4K chain buffer is
3364  * used to send the SGEs.
3365  *
3366  * Return: 0 on success, -ENOMEM on dma_map_sg failure
3367  */
3368 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
3369 	struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
3370 {
3371 	dma_addr_t chain_dma;
3372 	struct scatterlist *sg_scmd;
3373 	void *sg_local, *chain;
3374 	u32 chain_length;
3375 	int sges_left, chain_idx;
3376 	u32 sges_in_segment;
3377 	u8 simple_sgl_flags;
3378 	u8 simple_sgl_flags_last;
3379 	u8 last_chain_sgl_flags;
3380 	struct chain_element *chain_req;
3381 	struct scmd_priv *priv = NULL;
3382 	u32 meta_sg = le32_to_cpu(scsiio_req->flags) &
3383 	    MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI;
3384 
3385 	priv = scsi_cmd_priv(scmd);
3386 
3387 	simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
3388 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
3389 	simple_sgl_flags_last = simple_sgl_flags |
3390 	    MPI3_SGE_FLAGS_END_OF_LIST;
3391 	last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
3392 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
3393 
3394 	if (meta_sg)
3395 		sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX];
3396 	else
3397 		sg_local = &scsiio_req->sgl;
3398 
3399 	if (!scsiio_req->data_length && !meta_sg) {
3400 		mpi3mr_build_zero_len_sge(sg_local);
3401 		return 0;
3402 	}
3403 
3404 	if (meta_sg) {
3405 		sg_scmd = scsi_prot_sglist(scmd);
3406 		sges_left = dma_map_sg(&mrioc->pdev->dev,
3407 		    scsi_prot_sglist(scmd),
3408 		    scsi_prot_sg_count(scmd),
3409 		    scmd->sc_data_direction);
3410 		priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
3411 	} else {
3412 		sg_scmd = scsi_sglist(scmd);
3413 		sges_left = scsi_dma_map(scmd);
3414 	}
3415 
3416 	if (sges_left < 0) {
3417 		sdev_printk(KERN_ERR, scmd->device,
3418 		    "scsi_dma_map failed: request for %d bytes!\n",
3419 		    scsi_bufflen(scmd));
3420 		return -ENOMEM;
3421 	}
3422 	if (sges_left > mrioc->max_sgl_entries) {
3423 		sdev_printk(KERN_ERR, scmd->device,
3424 		    "scsi_dma_map returned unsupported sge count %d!\n",
3425 		    sges_left);
3426 		return -ENOMEM;
3427 	}
3428 
3429 	sges_in_segment = (mrioc->facts.op_req_sz -
3430 	    offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
3431 
3432 	if (scsiio_req->sgl[0].eedp.flags ==
3433 	    MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) {
3434 		sg_local += sizeof(struct mpi3_sge_common);
3435 		sges_in_segment--;
3436 		/* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */
3437 	}
3438 
3439 	if (scsiio_req->msg_flags ==
3440 	    MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) {
3441 		sges_in_segment--;
3442 		/* Reserve last segment (scsiio_req->sgl[3]) for meta sg */
3443 	}
3444 
3445 	if (meta_sg)
3446 		sges_in_segment = 1;
3447 
3448 	if (sges_left <= sges_in_segment)
3449 		goto fill_in_last_segment;
3450 
3451 	/* fill in main message segment when there is a chain following */
3452 	while (sges_in_segment > 1) {
3453 		mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
3454 		    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
3455 		sg_scmd = sg_next(sg_scmd);
3456 		sg_local += sizeof(struct mpi3_sge_common);
3457 		sges_left--;
3458 		sges_in_segment--;
3459 	}
3460 
3461 	chain_idx = mpi3mr_get_chain_idx(mrioc);
3462 	if (chain_idx < 0)
3463 		return -1;
3464 	chain_req = &mrioc->chain_sgl_list[chain_idx];
3465 	if (meta_sg)
3466 		priv->meta_chain_idx = chain_idx;
3467 	else
3468 		priv->chain_idx = chain_idx;
3469 
3470 	chain = chain_req->addr;
3471 	chain_dma = chain_req->dma_addr;
3472 	sges_in_segment = sges_left;
3473 	chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
3474 
3475 	mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
3476 	    chain_length, chain_dma);
3477 
3478 	sg_local = chain;
3479 
3480 fill_in_last_segment:
3481 	while (sges_left > 0) {
3482 		if (sges_left == 1)
3483 			mpi3mr_add_sg_single(sg_local,
3484 			    simple_sgl_flags_last, sg_dma_len(sg_scmd),
3485 			    sg_dma_address(sg_scmd));
3486 		else
3487 			mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
3488 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
3489 		sg_scmd = sg_next(sg_scmd);
3490 		sg_local += sizeof(struct mpi3_sge_common);
3491 		sges_left--;
3492 	}
3493 
3494 	return 0;
3495 }
3496 
3497 /**
3498  * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
3499  * @mrioc: Adapter instance reference
3500  * @scmd: SCSI command reference
3501  * @scsiio_req: MPI3 SCSI IO request
3502  *
3503  * This function calls mpi3mr_prepare_sg_scmd for constructing
3504  * both data SGEs and protection information SGEs in the MPI
3505  * format from the SCSI Command as appropriate .
3506  *
3507  * Return: return value of mpi3mr_prepare_sg_scmd.
3508  */
3509 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
3510 	struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
3511 {
3512 	int ret;
3513 
3514 	ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
3515 	if (ret)
3516 		return ret;
3517 
3518 	if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) {
3519 		/* There is a valid meta sg */
3520 		scsiio_req->flags |=
3521 		    cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI);
3522 		ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
3523 	}
3524 
3525 	return ret;
3526 }
3527 
3528 /**
3529  * mpi3mr_tm_response_name -  get TM response as a string
3530  * @resp_code: TM response code
3531  *
3532  * Convert known task management response code as a readable
3533  * string.
3534  *
3535  * Return: response code string.
3536  */
3537 static const char *mpi3mr_tm_response_name(u8 resp_code)
3538 {
3539 	char *desc;
3540 
3541 	switch (resp_code) {
3542 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
3543 		desc = "task management request completed";
3544 		break;
3545 	case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
3546 		desc = "invalid frame";
3547 		break;
3548 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
3549 		desc = "task management request not supported";
3550 		break;
3551 	case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
3552 		desc = "task management request failed";
3553 		break;
3554 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
3555 		desc = "task management request succeeded";
3556 		break;
3557 	case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
3558 		desc = "invalid LUN";
3559 		break;
3560 	case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
3561 		desc = "overlapped tag attempted";
3562 		break;
3563 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
3564 		desc = "task queued, however not sent to target";
3565 		break;
3566 	case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
3567 		desc = "task management request denied by NVMe device";
3568 		break;
3569 	default:
3570 		desc = "unknown";
3571 		break;
3572 	}
3573 
3574 	return desc;
3575 }
3576 
3577 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
3578 {
3579 	int i;
3580 	int num_of_reply_queues =
3581 	    mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
3582 
3583 	for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
3584 		mpi3mr_process_op_reply_q(mrioc,
3585 		    mrioc->intr_info[i].op_reply_q);
3586 }
3587 
3588 /**
3589  * mpi3mr_issue_tm - Issue Task Management request
3590  * @mrioc: Adapter instance reference
3591  * @tm_type: Task Management type
3592  * @handle: Device handle
3593  * @lun: lun ID
3594  * @htag: Host tag of the TM request
3595  * @timeout: TM timeout value
3596  * @drv_cmd: Internal command tracker
3597  * @resp_code: Response code place holder
3598  * @scmd: SCSI command
3599  *
3600  * Issues a Task Management Request to the controller for a
3601  * specified target, lun and command and wait for its completion
3602  * and check TM response. Recover the TM if it timed out by
3603  * issuing controller reset.
3604  *
3605  * Return: 0 on success, non-zero on errors
3606  */
3607 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
3608 	u16 handle, uint lun, u16 htag, ulong timeout,
3609 	struct mpi3mr_drv_cmd *drv_cmd,
3610 	u8 *resp_code, struct scsi_cmnd *scmd)
3611 {
3612 	struct mpi3_scsi_task_mgmt_request tm_req;
3613 	struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
3614 	int retval = 0;
3615 	struct mpi3mr_tgt_dev *tgtdev = NULL;
3616 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
3617 	struct scmd_priv *cmd_priv = NULL;
3618 	struct scsi_device *sdev = NULL;
3619 	struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
3620 
3621 	ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
3622 	     __func__, tm_type, handle);
3623 	if (mrioc->unrecoverable) {
3624 		retval = -1;
3625 		ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n",
3626 		    __func__);
3627 		goto out;
3628 	}
3629 
3630 	memset(&tm_req, 0, sizeof(tm_req));
3631 	mutex_lock(&drv_cmd->mutex);
3632 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3633 		retval = -1;
3634 		ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
3635 		mutex_unlock(&drv_cmd->mutex);
3636 		goto out;
3637 	}
3638 	if (mrioc->reset_in_progress) {
3639 		retval = -1;
3640 		ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__);
3641 		mutex_unlock(&drv_cmd->mutex);
3642 		goto out;
3643 	}
3644 
3645 	drv_cmd->state = MPI3MR_CMD_PENDING;
3646 	drv_cmd->is_waiting = 1;
3647 	drv_cmd->callback = NULL;
3648 	tm_req.dev_handle = cpu_to_le16(handle);
3649 	tm_req.task_type = tm_type;
3650 	tm_req.host_tag = cpu_to_le16(htag);
3651 
3652 	int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun);
3653 	tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3654 
3655 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
3656 
3657 	if (scmd) {
3658 		sdev = scmd->device;
3659 		sdev_priv_data = sdev->hostdata;
3660 		scsi_tgt_priv_data = ((sdev_priv_data) ?
3661 		    sdev_priv_data->tgt_priv_data : NULL);
3662 	} else {
3663 		if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
3664 			scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
3665 			    tgtdev->starget->hostdata;
3666 	}
3667 
3668 	if (scsi_tgt_priv_data)
3669 		atomic_inc(&scsi_tgt_priv_data->block_io);
3670 
3671 	if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
3672 		if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
3673 			timeout = tgtdev->dev_spec.pcie_inf.abort_to;
3674 		else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
3675 			timeout = tgtdev->dev_spec.pcie_inf.reset_to;
3676 	}
3677 
3678 	init_completion(&drv_cmd->done);
3679 	retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
3680 	if (retval) {
3681 		ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__);
3682 		goto out_unlock;
3683 	}
3684 	wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
3685 
3686 	if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
3687 		drv_cmd->is_waiting = 0;
3688 		retval = -1;
3689 		if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
3690 			dprint_tm(mrioc,
3691 			    "task management request timed out after %ld seconds\n",
3692 			    timeout);
3693 			if (mrioc->logging_level & MPI3_DEBUG_TM)
3694 				dprint_dump_req(&tm_req, sizeof(tm_req)/4);
3695 			mpi3mr_soft_reset_handler(mrioc,
3696 			    MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
3697 		}
3698 		goto out_unlock;
3699 	}
3700 
3701 	if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
3702 		dprint_tm(mrioc, "invalid task management reply message\n");
3703 		retval = -1;
3704 		goto out_unlock;
3705 	}
3706 
3707 	tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
3708 
3709 	switch (drv_cmd->ioc_status) {
3710 	case MPI3_IOCSTATUS_SUCCESS:
3711 		*resp_code = le32_to_cpu(tm_reply->response_data) &
3712 			MPI3MR_RI_MASK_RESPCODE;
3713 		break;
3714 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
3715 		*resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
3716 		break;
3717 	default:
3718 		dprint_tm(mrioc,
3719 		    "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
3720 		    handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
3721 		retval = -1;
3722 		goto out_unlock;
3723 	}
3724 
3725 	switch (*resp_code) {
3726 	case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
3727 	case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
3728 		break;
3729 	case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
3730 		if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3731 			retval = -1;
3732 		break;
3733 	default:
3734 		retval = -1;
3735 		break;
3736 	}
3737 
3738 	dprint_tm(mrioc,
3739 	    "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
3740 	    tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
3741 	    le32_to_cpu(tm_reply->termination_count),
3742 	    mpi3mr_tm_response_name(*resp_code), *resp_code);
3743 
3744 	if (!retval) {
3745 		mpi3mr_ioc_disable_intr(mrioc);
3746 		mpi3mr_poll_pend_io_completions(mrioc);
3747 		mpi3mr_ioc_enable_intr(mrioc);
3748 		mpi3mr_poll_pend_io_completions(mrioc);
3749 		mpi3mr_process_admin_reply_q(mrioc);
3750 	}
3751 	switch (tm_type) {
3752 	case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3753 		if (!scsi_tgt_priv_data)
3754 			break;
3755 		scsi_tgt_priv_data->pend_count = 0;
3756 		blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
3757 		    mpi3mr_count_tgt_pending,
3758 		    (void *)scsi_tgt_priv_data->starget);
3759 		break;
3760 	case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3761 		if (!sdev_priv_data)
3762 			break;
3763 		sdev_priv_data->pend_count = 0;
3764 		blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
3765 		    mpi3mr_count_dev_pending, (void *)sdev);
3766 		break;
3767 	default:
3768 		break;
3769 	}
3770 
3771 out_unlock:
3772 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
3773 	mutex_unlock(&drv_cmd->mutex);
3774 	if (scsi_tgt_priv_data)
3775 		atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
3776 	if (tgtdev)
3777 		mpi3mr_tgtdev_put(tgtdev);
3778 out:
3779 	return retval;
3780 }
3781 
3782 /**
3783  * mpi3mr_bios_param - BIOS param callback
3784  * @sdev: SCSI device reference
3785  * @bdev: Block device reference
3786  * @capacity: Capacity in logical sectors
3787  * @params: Parameter array
3788  *
3789  * Just the parameters with heads/secots/cylinders.
3790  *
3791  * Return: 0 always
3792  */
3793 static int mpi3mr_bios_param(struct scsi_device *sdev,
3794 	struct block_device *bdev, sector_t capacity, int params[])
3795 {
3796 	int heads;
3797 	int sectors;
3798 	sector_t cylinders;
3799 	ulong dummy;
3800 
3801 	heads = 64;
3802 	sectors = 32;
3803 
3804 	dummy = heads * sectors;
3805 	cylinders = capacity;
3806 	sector_div(cylinders, dummy);
3807 
3808 	if ((ulong)capacity >= 0x200000) {
3809 		heads = 255;
3810 		sectors = 63;
3811 		dummy = heads * sectors;
3812 		cylinders = capacity;
3813 		sector_div(cylinders, dummy);
3814 	}
3815 
3816 	params[0] = heads;
3817 	params[1] = sectors;
3818 	params[2] = cylinders;
3819 	return 0;
3820 }
3821 
3822 /**
3823  * mpi3mr_map_queues - Map queues callback handler
3824  * @shost: SCSI host reference
3825  *
3826  * Maps default and poll queues.
3827  *
3828  * Return: return zero.
3829  */
3830 static void mpi3mr_map_queues(struct Scsi_Host *shost)
3831 {
3832 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
3833 	int i, qoff, offset;
3834 	struct blk_mq_queue_map *map = NULL;
3835 
3836 	offset = mrioc->op_reply_q_offset;
3837 
3838 	for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
3839 		map = &shost->tag_set.map[i];
3840 
3841 		map->nr_queues  = 0;
3842 
3843 		if (i == HCTX_TYPE_DEFAULT)
3844 			map->nr_queues = mrioc->default_qcount;
3845 		else if (i == HCTX_TYPE_POLL)
3846 			map->nr_queues = mrioc->active_poll_qcount;
3847 
3848 		if (!map->nr_queues) {
3849 			BUG_ON(i == HCTX_TYPE_DEFAULT);
3850 			continue;
3851 		}
3852 
3853 		/*
3854 		 * The poll queue(s) doesn't have an IRQ (and hence IRQ
3855 		 * affinity), so use the regular blk-mq cpu mapping
3856 		 */
3857 		map->queue_offset = qoff;
3858 		if (i != HCTX_TYPE_POLL)
3859 			blk_mq_pci_map_queues(map, mrioc->pdev, offset);
3860 		else
3861 			blk_mq_map_queues(map);
3862 
3863 		qoff += map->nr_queues;
3864 		offset += map->nr_queues;
3865 	}
3866 }
3867 
3868 /**
3869  * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
3870  * @mrioc: Adapter instance reference
3871  *
3872  * Calculate the pending I/Os for the controller and return.
3873  *
3874  * Return: Number of pending I/Os
3875  */
3876 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc)
3877 {
3878 	u16 i;
3879 	uint pend_ios = 0;
3880 
3881 	for (i = 0; i < mrioc->num_op_reply_q; i++)
3882 		pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios);
3883 	return pend_ios;
3884 }
3885 
3886 /**
3887  * mpi3mr_print_pending_host_io - print pending I/Os
3888  * @mrioc: Adapter instance reference
3889  *
3890  * Print number of pending I/Os and each I/O details prior to
3891  * reset for debug purpose.
3892  *
3893  * Return: Nothing
3894  */
3895 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc)
3896 {
3897 	struct Scsi_Host *shost = mrioc->shost;
3898 
3899 	ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n",
3900 	    __func__, mpi3mr_get_fw_pending_ios(mrioc));
3901 	blk_mq_tagset_busy_iter(&shost->tag_set,
3902 	    mpi3mr_print_scmd, (void *)mrioc);
3903 }
3904 
3905 /**
3906  * mpi3mr_wait_for_host_io - block for I/Os to complete
3907  * @mrioc: Adapter instance reference
3908  * @timeout: time out in seconds
3909  * Waits for pending I/Os for the given adapter to complete or
3910  * to hit the timeout.
3911  *
3912  * Return: Nothing
3913  */
3914 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
3915 {
3916 	enum mpi3mr_iocstate iocstate;
3917 	int i = 0;
3918 
3919 	iocstate = mpi3mr_get_iocstate(mrioc);
3920 	if (iocstate != MRIOC_STATE_READY)
3921 		return;
3922 
3923 	if (!mpi3mr_get_fw_pending_ios(mrioc))
3924 		return;
3925 	ioc_info(mrioc,
3926 	    "%s :Waiting for %d seconds prior to reset for %d I/O\n",
3927 	    __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc));
3928 
3929 	for (i = 0; i < timeout; i++) {
3930 		if (!mpi3mr_get_fw_pending_ios(mrioc))
3931 			break;
3932 		iocstate = mpi3mr_get_iocstate(mrioc);
3933 		if (iocstate != MRIOC_STATE_READY)
3934 			break;
3935 		msleep(1000);
3936 	}
3937 
3938 	ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__,
3939 	    mpi3mr_get_fw_pending_ios(mrioc));
3940 }
3941 
3942 /**
3943  * mpi3mr_eh_host_reset - Host reset error handling callback
3944  * @scmd: SCSI command reference
3945  *
3946  * Issue controller reset if the scmd is for a Physical Device,
3947  * if the scmd is for RAID volume, then wait for
3948  * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any
3949  * pending I/Os prior to issuing reset to the controller.
3950  *
3951  * Return: SUCCESS of successful reset else FAILED
3952  */
3953 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
3954 {
3955 	struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
3956 	struct mpi3mr_stgt_priv_data *stgt_priv_data;
3957 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
3958 	u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
3959 	int retval = FAILED, ret;
3960 
3961 	sdev_priv_data = scmd->device->hostdata;
3962 	if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
3963 		stgt_priv_data = sdev_priv_data->tgt_priv_data;
3964 		dev_type = stgt_priv_data->dev_type;
3965 	}
3966 
3967 	if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
3968 		mpi3mr_wait_for_host_io(mrioc,
3969 		    MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
3970 		if (!mpi3mr_get_fw_pending_ios(mrioc)) {
3971 			retval = SUCCESS;
3972 			goto out;
3973 		}
3974 	}
3975 
3976 	mpi3mr_print_pending_host_io(mrioc);
3977 	ret = mpi3mr_soft_reset_handler(mrioc,
3978 	    MPI3MR_RESET_FROM_EH_HOS, 1);
3979 	if (ret)
3980 		goto out;
3981 
3982 	retval = SUCCESS;
3983 out:
3984 	sdev_printk(KERN_INFO, scmd->device,
3985 	    "Host reset is %s for scmd(%p)\n",
3986 	    ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3987 
3988 	return retval;
3989 }
3990 
3991 /**
3992  * mpi3mr_eh_target_reset - Target reset error handling callback
3993  * @scmd: SCSI command reference
3994  *
3995  * Issue Target reset Task Management and verify the scmd is
3996  * terminated successfully and return status accordingly.
3997  *
3998  * Return: SUCCESS of successful termination of the scmd else
3999  *         FAILED
4000  */
4001 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
4002 {
4003 	struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
4004 	struct mpi3mr_stgt_priv_data *stgt_priv_data;
4005 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
4006 	u16 dev_handle;
4007 	u8 resp_code = 0;
4008 	int retval = FAILED, ret = 0;
4009 
4010 	sdev_printk(KERN_INFO, scmd->device,
4011 	    "Attempting Target Reset! scmd(%p)\n", scmd);
4012 	scsi_print_command(scmd);
4013 
4014 	sdev_priv_data = scmd->device->hostdata;
4015 	if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
4016 		sdev_printk(KERN_INFO, scmd->device,
4017 		    "SCSI device is not available\n");
4018 		retval = SUCCESS;
4019 		goto out;
4020 	}
4021 
4022 	stgt_priv_data = sdev_priv_data->tgt_priv_data;
4023 	dev_handle = stgt_priv_data->dev_handle;
4024 	if (stgt_priv_data->dev_removed) {
4025 		struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd);
4026 		sdev_printk(KERN_INFO, scmd->device,
4027 		    "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
4028 		    mrioc->name, dev_handle);
4029 		if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID)
4030 			retval = SUCCESS;
4031 		else
4032 			retval = FAILED;
4033 		goto out;
4034 	}
4035 	sdev_printk(KERN_INFO, scmd->device,
4036 	    "Target Reset is issued to handle(0x%04x)\n",
4037 	    dev_handle);
4038 
4039 	ret = mpi3mr_issue_tm(mrioc,
4040 	    MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
4041 	    sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
4042 	    MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
4043 
4044 	if (ret)
4045 		goto out;
4046 
4047 	if (stgt_priv_data->pend_count) {
4048 		sdev_printk(KERN_INFO, scmd->device,
4049 		    "%s: target has %d pending commands, target reset is failed\n",
4050 		    mrioc->name, stgt_priv_data->pend_count);
4051 		goto out;
4052 	}
4053 
4054 	retval = SUCCESS;
4055 out:
4056 	sdev_printk(KERN_INFO, scmd->device,
4057 	    "%s: target reset is %s for scmd(%p)\n", mrioc->name,
4058 	    ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4059 
4060 	return retval;
4061 }
4062 
4063 /**
4064  * mpi3mr_eh_dev_reset- Device reset error handling callback
4065  * @scmd: SCSI command reference
4066  *
4067  * Issue lun reset Task Management and verify the scmd is
4068  * terminated successfully and return status accordingly.
4069  *
4070  * Return: SUCCESS of successful termination of the scmd else
4071  *         FAILED
4072  */
4073 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
4074 {
4075 	struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
4076 	struct mpi3mr_stgt_priv_data *stgt_priv_data;
4077 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
4078 	u16 dev_handle;
4079 	u8 resp_code = 0;
4080 	int retval = FAILED, ret = 0;
4081 
4082 	sdev_printk(KERN_INFO, scmd->device,
4083 	    "Attempting Device(lun) Reset! scmd(%p)\n", scmd);
4084 	scsi_print_command(scmd);
4085 
4086 	sdev_priv_data = scmd->device->hostdata;
4087 	if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
4088 		sdev_printk(KERN_INFO, scmd->device,
4089 		    "SCSI device is not available\n");
4090 		retval = SUCCESS;
4091 		goto out;
4092 	}
4093 
4094 	stgt_priv_data = sdev_priv_data->tgt_priv_data;
4095 	dev_handle = stgt_priv_data->dev_handle;
4096 	if (stgt_priv_data->dev_removed) {
4097 		struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd);
4098 		sdev_printk(KERN_INFO, scmd->device,
4099 		    "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
4100 		    mrioc->name, dev_handle);
4101 		if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID)
4102 			retval = SUCCESS;
4103 		else
4104 			retval = FAILED;
4105 		goto out;
4106 	}
4107 	sdev_printk(KERN_INFO, scmd->device,
4108 	    "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
4109 
4110 	ret = mpi3mr_issue_tm(mrioc,
4111 	    MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
4112 	    sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
4113 	    MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
4114 
4115 	if (ret)
4116 		goto out;
4117 
4118 	if (sdev_priv_data->pend_count) {
4119 		sdev_printk(KERN_INFO, scmd->device,
4120 		    "%s: device has %d pending commands, device(LUN) reset is failed\n",
4121 		    mrioc->name, sdev_priv_data->pend_count);
4122 		goto out;
4123 	}
4124 	retval = SUCCESS;
4125 out:
4126 	sdev_printk(KERN_INFO, scmd->device,
4127 	    "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
4128 	    ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4129 
4130 	return retval;
4131 }
4132 
4133 /**
4134  * mpi3mr_scan_start - Scan start callback handler
4135  * @shost: SCSI host reference
4136  *
4137  * Issue port enable request asynchronously.
4138  *
4139  * Return: Nothing
4140  */
4141 static void mpi3mr_scan_start(struct Scsi_Host *shost)
4142 {
4143 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
4144 
4145 	mrioc->scan_started = 1;
4146 	ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
4147 	if (mpi3mr_issue_port_enable(mrioc, 1)) {
4148 		ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
4149 		mrioc->scan_started = 0;
4150 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
4151 	}
4152 }
4153 
4154 /**
4155  * mpi3mr_scan_finished - Scan finished callback handler
4156  * @shost: SCSI host reference
4157  * @time: Jiffies from the scan start
4158  *
4159  * Checks whether the port enable is completed or timedout or
4160  * failed and set the scan status accordingly after taking any
4161  * recovery if required.
4162  *
4163  * Return: 1 on scan finished or timed out, 0 for in progress
4164  */
4165 static int mpi3mr_scan_finished(struct Scsi_Host *shost,
4166 	unsigned long time)
4167 {
4168 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
4169 	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
4170 	u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
4171 
4172 	if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
4173 	    (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
4174 		ioc_err(mrioc, "port enable failed due to fault or reset\n");
4175 		mpi3mr_print_fault_info(mrioc);
4176 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
4177 		mrioc->scan_started = 0;
4178 		mrioc->init_cmds.is_waiting = 0;
4179 		mrioc->init_cmds.callback = NULL;
4180 		mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4181 	}
4182 
4183 	if (time >= (pe_timeout * HZ)) {
4184 		ioc_err(mrioc, "port enable failed due to time out\n");
4185 		mpi3mr_check_rh_fault_ioc(mrioc,
4186 		    MPI3MR_RESET_FROM_PE_TIMEOUT);
4187 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
4188 		mrioc->scan_started = 0;
4189 		mrioc->init_cmds.is_waiting = 0;
4190 		mrioc->init_cmds.callback = NULL;
4191 		mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
4192 	}
4193 
4194 	if (mrioc->scan_started)
4195 		return 0;
4196 
4197 	if (mrioc->scan_failed) {
4198 		ioc_err(mrioc,
4199 		    "port enable failed with status=0x%04x\n",
4200 		    mrioc->scan_failed);
4201 	} else
4202 		ioc_info(mrioc, "port enable is successfully completed\n");
4203 
4204 	mpi3mr_start_watchdog(mrioc);
4205 	mrioc->is_driver_loading = 0;
4206 	mrioc->stop_bsgs = 0;
4207 	return 1;
4208 }
4209 
4210 /**
4211  * mpi3mr_slave_destroy - Slave destroy callback handler
4212  * @sdev: SCSI device reference
4213  *
4214  * Cleanup and free per device(lun) private data.
4215  *
4216  * Return: Nothing.
4217  */
4218 static void mpi3mr_slave_destroy(struct scsi_device *sdev)
4219 {
4220 	struct Scsi_Host *shost;
4221 	struct mpi3mr_ioc *mrioc;
4222 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
4223 	struct mpi3mr_tgt_dev *tgt_dev = NULL;
4224 	unsigned long flags;
4225 	struct scsi_target *starget;
4226 	struct sas_rphy *rphy = NULL;
4227 
4228 	if (!sdev->hostdata)
4229 		return;
4230 
4231 	starget = scsi_target(sdev);
4232 	shost = dev_to_shost(&starget->dev);
4233 	mrioc = shost_priv(shost);
4234 	scsi_tgt_priv_data = starget->hostdata;
4235 
4236 	scsi_tgt_priv_data->num_luns--;
4237 
4238 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4239 	if (starget->channel == mrioc->scsi_device_channel)
4240 		tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
4241 	else if (mrioc->sas_transport_enabled && !starget->channel) {
4242 		rphy = dev_to_rphy(starget->dev.parent);
4243 		tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
4244 		    rphy->identify.sas_address, rphy);
4245 	}
4246 
4247 	if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
4248 		tgt_dev->starget = NULL;
4249 	if (tgt_dev)
4250 		mpi3mr_tgtdev_put(tgt_dev);
4251 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4252 
4253 	kfree(sdev->hostdata);
4254 	sdev->hostdata = NULL;
4255 }
4256 
4257 /**
4258  * mpi3mr_target_destroy - Target destroy callback handler
4259  * @starget: SCSI target reference
4260  *
4261  * Cleanup and free per target private data.
4262  *
4263  * Return: Nothing.
4264  */
4265 static void mpi3mr_target_destroy(struct scsi_target *starget)
4266 {
4267 	struct Scsi_Host *shost;
4268 	struct mpi3mr_ioc *mrioc;
4269 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
4270 	struct mpi3mr_tgt_dev *tgt_dev;
4271 	unsigned long flags;
4272 
4273 	if (!starget->hostdata)
4274 		return;
4275 
4276 	shost = dev_to_shost(&starget->dev);
4277 	mrioc = shost_priv(shost);
4278 	scsi_tgt_priv_data = starget->hostdata;
4279 
4280 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4281 	tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
4282 	if (tgt_dev && (tgt_dev->starget == starget) &&
4283 	    (tgt_dev->perst_id == starget->id))
4284 		tgt_dev->starget = NULL;
4285 	if (tgt_dev) {
4286 		scsi_tgt_priv_data->tgt_dev = NULL;
4287 		scsi_tgt_priv_data->perst_id = 0;
4288 		mpi3mr_tgtdev_put(tgt_dev);
4289 		mpi3mr_tgtdev_put(tgt_dev);
4290 	}
4291 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4292 
4293 	kfree(starget->hostdata);
4294 	starget->hostdata = NULL;
4295 }
4296 
4297 /**
4298  * mpi3mr_slave_configure - Slave configure callback handler
4299  * @sdev: SCSI device reference
4300  *
4301  * Configure queue depth, max hardware sectors and virt boundary
4302  * as required
4303  *
4304  * Return: 0 always.
4305  */
4306 static int mpi3mr_slave_configure(struct scsi_device *sdev)
4307 {
4308 	struct scsi_target *starget;
4309 	struct Scsi_Host *shost;
4310 	struct mpi3mr_ioc *mrioc;
4311 	struct mpi3mr_tgt_dev *tgt_dev = NULL;
4312 	unsigned long flags;
4313 	int retval = 0;
4314 	struct sas_rphy *rphy = NULL;
4315 
4316 	starget = scsi_target(sdev);
4317 	shost = dev_to_shost(&starget->dev);
4318 	mrioc = shost_priv(shost);
4319 
4320 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4321 	if (starget->channel == mrioc->scsi_device_channel)
4322 		tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
4323 	else if (mrioc->sas_transport_enabled && !starget->channel) {
4324 		rphy = dev_to_rphy(starget->dev.parent);
4325 		tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
4326 		    rphy->identify.sas_address, rphy);
4327 	}
4328 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4329 	if (!tgt_dev)
4330 		return -ENXIO;
4331 
4332 	mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
4333 
4334 	sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
4335 	blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
4336 
4337 	switch (tgt_dev->dev_type) {
4338 	case MPI3_DEVICE_DEVFORM_PCIE:
4339 		/*The block layer hw sector size = 512*/
4340 		if ((tgt_dev->dev_spec.pcie_inf.dev_info &
4341 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
4342 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
4343 			blk_queue_max_hw_sectors(sdev->request_queue,
4344 			    tgt_dev->dev_spec.pcie_inf.mdts / 512);
4345 			if (tgt_dev->dev_spec.pcie_inf.pgsz == 0)
4346 				blk_queue_virt_boundary(sdev->request_queue,
4347 				    ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
4348 			else
4349 				blk_queue_virt_boundary(sdev->request_queue,
4350 				    ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
4351 		}
4352 		break;
4353 	default:
4354 		break;
4355 	}
4356 
4357 	mpi3mr_tgtdev_put(tgt_dev);
4358 
4359 	return retval;
4360 }
4361 
4362 /**
4363  * mpi3mr_slave_alloc -Slave alloc callback handler
4364  * @sdev: SCSI device reference
4365  *
4366  * Allocate per device(lun) private data and initialize it.
4367  *
4368  * Return: 0 on success -ENOMEM on memory allocation failure.
4369  */
4370 static int mpi3mr_slave_alloc(struct scsi_device *sdev)
4371 {
4372 	struct Scsi_Host *shost;
4373 	struct mpi3mr_ioc *mrioc;
4374 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
4375 	struct mpi3mr_tgt_dev *tgt_dev = NULL;
4376 	struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
4377 	unsigned long flags;
4378 	struct scsi_target *starget;
4379 	int retval = 0;
4380 	struct sas_rphy *rphy = NULL;
4381 
4382 	starget = scsi_target(sdev);
4383 	shost = dev_to_shost(&starget->dev);
4384 	mrioc = shost_priv(shost);
4385 	scsi_tgt_priv_data = starget->hostdata;
4386 
4387 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4388 
4389 	if (starget->channel == mrioc->scsi_device_channel)
4390 		tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
4391 	else if (mrioc->sas_transport_enabled && !starget->channel) {
4392 		rphy = dev_to_rphy(starget->dev.parent);
4393 		tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
4394 		    rphy->identify.sas_address, rphy);
4395 	}
4396 
4397 	if (tgt_dev) {
4398 		if (tgt_dev->starget == NULL)
4399 			tgt_dev->starget = starget;
4400 		mpi3mr_tgtdev_put(tgt_dev);
4401 		retval = 0;
4402 	} else {
4403 		spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4404 		return -ENXIO;
4405 	}
4406 
4407 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4408 
4409 	scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
4410 	if (!scsi_dev_priv_data)
4411 		return -ENOMEM;
4412 
4413 	scsi_dev_priv_data->lun_id = sdev->lun;
4414 	scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
4415 	sdev->hostdata = scsi_dev_priv_data;
4416 
4417 	scsi_tgt_priv_data->num_luns++;
4418 
4419 	return retval;
4420 }
4421 
4422 /**
4423  * mpi3mr_target_alloc - Target alloc callback handler
4424  * @starget: SCSI target reference
4425  *
4426  * Allocate per target private data and initialize it.
4427  *
4428  * Return: 0 on success -ENOMEM on memory allocation failure.
4429  */
4430 static int mpi3mr_target_alloc(struct scsi_target *starget)
4431 {
4432 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4433 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
4434 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
4435 	struct mpi3mr_tgt_dev *tgt_dev;
4436 	unsigned long flags;
4437 	int retval = 0;
4438 	struct sas_rphy *rphy = NULL;
4439 	bool update_stgt_priv_data = false;
4440 
4441 	scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
4442 	if (!scsi_tgt_priv_data)
4443 		return -ENOMEM;
4444 
4445 	starget->hostdata = scsi_tgt_priv_data;
4446 
4447 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
4448 
4449 	if (starget->channel == mrioc->scsi_device_channel) {
4450 		tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
4451 		if (tgt_dev && !tgt_dev->is_hidden)
4452 			update_stgt_priv_data = true;
4453 		else
4454 			retval = -ENXIO;
4455 	} else if (mrioc->sas_transport_enabled && !starget->channel) {
4456 		rphy = dev_to_rphy(starget->dev.parent);
4457 		tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
4458 		    rphy->identify.sas_address, rphy);
4459 		if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
4460 		    (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA))
4461 			update_stgt_priv_data = true;
4462 		else
4463 			retval = -ENXIO;
4464 	}
4465 
4466 	if (update_stgt_priv_data) {
4467 		scsi_tgt_priv_data->starget = starget;
4468 		scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
4469 		scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
4470 		scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
4471 		scsi_tgt_priv_data->tgt_dev = tgt_dev;
4472 		tgt_dev->starget = starget;
4473 		atomic_set(&scsi_tgt_priv_data->block_io, 0);
4474 		retval = 0;
4475 		scsi_tgt_priv_data->io_throttle_enabled =
4476 		    tgt_dev->io_throttle_enabled;
4477 		if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
4478 			scsi_tgt_priv_data->throttle_group =
4479 			    tgt_dev->dev_spec.vd_inf.tg;
4480 	}
4481 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
4482 
4483 	return retval;
4484 }
4485 
4486 /**
4487  * mpi3mr_check_return_unmap - Whether an unmap is allowed
4488  * @mrioc: Adapter instance reference
4489  * @scmd: SCSI Command reference
4490  *
4491  * The controller hardware cannot handle certain unmap commands
4492  * for NVMe drives, this routine checks those and return true
4493  * and completes the SCSI command with proper status and sense
4494  * data.
4495  *
4496  * Return: TRUE for not  allowed unmap, FALSE otherwise.
4497  */
4498 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
4499 	struct scsi_cmnd *scmd)
4500 {
4501 	unsigned char *buf;
4502 	u16 param_len, desc_len, trunc_param_len;
4503 
4504 	trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
4505 
4506 	if (mrioc->pdev->revision) {
4507 		if ((param_len > 24) && ((param_len - 8) & 0xF)) {
4508 			trunc_param_len -= (param_len - 8) & 0xF;
4509 			dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
4510 			dprint_scsi_err(mrioc,
4511 			    "truncating param_len from (%d) to (%d)\n",
4512 			    param_len, trunc_param_len);
4513 			put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
4514 			dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
4515 		}
4516 		return false;
4517 	}
4518 
4519 	if (!param_len) {
4520 		ioc_warn(mrioc,
4521 		    "%s: cdb received with zero parameter length\n",
4522 		    __func__);
4523 		scsi_print_command(scmd);
4524 		scmd->result = DID_OK << 16;
4525 		scsi_done(scmd);
4526 		return true;
4527 	}
4528 
4529 	if (param_len < 24) {
4530 		ioc_warn(mrioc,
4531 		    "%s: cdb received with invalid param_len: %d\n",
4532 		    __func__, param_len);
4533 		scsi_print_command(scmd);
4534 		scmd->result = SAM_STAT_CHECK_CONDITION;
4535 		scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
4536 		    0x1A, 0);
4537 		scsi_done(scmd);
4538 		return true;
4539 	}
4540 	if (param_len != scsi_bufflen(scmd)) {
4541 		ioc_warn(mrioc,
4542 		    "%s: cdb received with param_len: %d bufflen: %d\n",
4543 		    __func__, param_len, scsi_bufflen(scmd));
4544 		scsi_print_command(scmd);
4545 		scmd->result = SAM_STAT_CHECK_CONDITION;
4546 		scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
4547 		    0x1A, 0);
4548 		scsi_done(scmd);
4549 		return true;
4550 	}
4551 	buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
4552 	if (!buf) {
4553 		scsi_print_command(scmd);
4554 		scmd->result = SAM_STAT_CHECK_CONDITION;
4555 		scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
4556 		    0x55, 0x03);
4557 		scsi_done(scmd);
4558 		return true;
4559 	}
4560 	scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
4561 	desc_len = get_unaligned_be16(&buf[2]);
4562 
4563 	if (desc_len < 16) {
4564 		ioc_warn(mrioc,
4565 		    "%s: Invalid descriptor length in param list: %d\n",
4566 		    __func__, desc_len);
4567 		scsi_print_command(scmd);
4568 		scmd->result = SAM_STAT_CHECK_CONDITION;
4569 		scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
4570 		    0x26, 0);
4571 		scsi_done(scmd);
4572 		kfree(buf);
4573 		return true;
4574 	}
4575 
4576 	if (param_len > (desc_len + 8)) {
4577 		trunc_param_len = desc_len + 8;
4578 		scsi_print_command(scmd);
4579 		dprint_scsi_err(mrioc,
4580 		    "truncating param_len(%d) to desc_len+8(%d)\n",
4581 		    param_len, trunc_param_len);
4582 		put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
4583 		scsi_print_command(scmd);
4584 	}
4585 
4586 	kfree(buf);
4587 	return false;
4588 }
4589 
4590 /**
4591  * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown
4592  * @scmd: SCSI Command reference
4593  *
4594  * Checks whether a cdb is allowed during shutdown or not.
4595  *
4596  * Return: TRUE for allowed commands, FALSE otherwise.
4597  */
4598 
4599 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd)
4600 {
4601 	switch (scmd->cmnd[0]) {
4602 	case SYNCHRONIZE_CACHE:
4603 	case START_STOP:
4604 		return true;
4605 	default:
4606 		return false;
4607 	}
4608 }
4609 
4610 /**
4611  * mpi3mr_qcmd - I/O request despatcher
4612  * @shost: SCSI Host reference
4613  * @scmd: SCSI Command reference
4614  *
4615  * Issues the SCSI Command as an MPI3 request.
4616  *
4617  * Return: 0 on successful queueing of the request or if the
4618  *         request is completed with failure.
4619  *         SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
4620  *         SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
4621  */
4622 static int mpi3mr_qcmd(struct Scsi_Host *shost,
4623 	struct scsi_cmnd *scmd)
4624 {
4625 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
4626 	struct mpi3mr_stgt_priv_data *stgt_priv_data;
4627 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
4628 	struct scmd_priv *scmd_priv_data = NULL;
4629 	struct mpi3_scsi_io_request *scsiio_req = NULL;
4630 	struct op_req_qinfo *op_req_q = NULL;
4631 	int retval = 0;
4632 	u16 dev_handle;
4633 	u16 host_tag;
4634 	u32 scsiio_flags = 0, data_len_blks = 0;
4635 	struct request *rq = scsi_cmd_to_rq(scmd);
4636 	int iprio_class;
4637 	u8 is_pcie_dev = 0;
4638 	u32 tracked_io_sz = 0;
4639 	u32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
4640 	struct mpi3mr_throttle_group_info *tg = NULL;
4641 
4642 	if (mrioc->unrecoverable) {
4643 		scmd->result = DID_ERROR << 16;
4644 		scsi_done(scmd);
4645 		goto out;
4646 	}
4647 
4648 	sdev_priv_data = scmd->device->hostdata;
4649 	if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
4650 		scmd->result = DID_NO_CONNECT << 16;
4651 		scsi_done(scmd);
4652 		goto out;
4653 	}
4654 
4655 	if (mrioc->stop_drv_processing &&
4656 	    !(mpi3mr_allow_scmd_to_fw(scmd))) {
4657 		scmd->result = DID_NO_CONNECT << 16;
4658 		scsi_done(scmd);
4659 		goto out;
4660 	}
4661 
4662 	stgt_priv_data = sdev_priv_data->tgt_priv_data;
4663 	dev_handle = stgt_priv_data->dev_handle;
4664 
4665 	/* Avoid error handling escalation when device is removed or blocked */
4666 
4667 	if (scmd->device->host->shost_state == SHOST_RECOVERY &&
4668 		scmd->cmnd[0] == TEST_UNIT_READY &&
4669 		(stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) {
4670 		scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
4671 		scsi_done(scmd);
4672 		goto out;
4673 	}
4674 
4675 	if (mrioc->reset_in_progress) {
4676 		retval = SCSI_MLQUEUE_HOST_BUSY;
4677 		goto out;
4678 	}
4679 
4680 	if (atomic_read(&stgt_priv_data->block_io)) {
4681 		if (mrioc->stop_drv_processing) {
4682 			scmd->result = DID_NO_CONNECT << 16;
4683 			scsi_done(scmd);
4684 			goto out;
4685 		}
4686 		retval = SCSI_MLQUEUE_DEVICE_BUSY;
4687 		goto out;
4688 	}
4689 
4690 	if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
4691 		scmd->result = DID_NO_CONNECT << 16;
4692 		scsi_done(scmd);
4693 		goto out;
4694 	}
4695 	if (stgt_priv_data->dev_removed) {
4696 		scmd->result = DID_NO_CONNECT << 16;
4697 		scsi_done(scmd);
4698 		goto out;
4699 	}
4700 
4701 	if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
4702 		is_pcie_dev = 1;
4703 	if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
4704 	    (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
4705 	    mpi3mr_check_return_unmap(mrioc, scmd))
4706 		goto out;
4707 
4708 	host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
4709 	if (host_tag == MPI3MR_HOSTTAG_INVALID) {
4710 		scmd->result = DID_ERROR << 16;
4711 		scsi_done(scmd);
4712 		goto out;
4713 	}
4714 
4715 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4716 		scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
4717 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4718 		scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
4719 	else
4720 		scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
4721 
4722 	scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
4723 
4724 	if (sdev_priv_data->ncq_prio_enable) {
4725 		iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4726 		if (iprio_class == IOPRIO_CLASS_RT)
4727 			scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
4728 	}
4729 
4730 	if (scmd->cmd_len > 16)
4731 		scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
4732 
4733 	scmd_priv_data = scsi_cmd_priv(scmd);
4734 	memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
4735 	scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
4736 	scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
4737 	scsiio_req->host_tag = cpu_to_le16(host_tag);
4738 
4739 	mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
4740 
4741 	memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
4742 	scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
4743 	scsiio_req->dev_handle = cpu_to_le16(dev_handle);
4744 	scsiio_req->flags = cpu_to_le32(scsiio_flags);
4745 	int_to_scsilun(sdev_priv_data->lun_id,
4746 	    (struct scsi_lun *)scsiio_req->lun);
4747 
4748 	if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
4749 		mpi3mr_clear_scmd_priv(mrioc, scmd);
4750 		retval = SCSI_MLQUEUE_HOST_BUSY;
4751 		goto out;
4752 	}
4753 	op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
4754 	data_len_blks = scsi_bufflen(scmd) >> 9;
4755 	if ((data_len_blks >= mrioc->io_throttle_data_length) &&
4756 	    stgt_priv_data->io_throttle_enabled) {
4757 		tracked_io_sz = data_len_blks;
4758 		tg = stgt_priv_data->throttle_group;
4759 		if (tg) {
4760 			ioc_pend_data_len = atomic_add_return(data_len_blks,
4761 			    &mrioc->pend_large_data_sz);
4762 			tg_pend_data_len = atomic_add_return(data_len_blks,
4763 			    &tg->pend_large_data_sz);
4764 			if (!tg->io_divert  && ((ioc_pend_data_len >=
4765 			    mrioc->io_throttle_high) ||
4766 			    (tg_pend_data_len >= tg->high))) {
4767 				tg->io_divert = 1;
4768 				tg->need_qd_reduction = 1;
4769 				mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc,
4770 				    tg, 1);
4771 				mpi3mr_queue_qd_reduction_event(mrioc, tg);
4772 			}
4773 		} else {
4774 			ioc_pend_data_len = atomic_add_return(data_len_blks,
4775 			    &mrioc->pend_large_data_sz);
4776 			if (ioc_pend_data_len >= mrioc->io_throttle_high)
4777 				stgt_priv_data->io_divert = 1;
4778 		}
4779 	}
4780 
4781 	if (stgt_priv_data->io_divert) {
4782 		scsiio_req->msg_flags |=
4783 		    MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
4784 		scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
4785 	}
4786 	scsiio_req->flags = cpu_to_le32(scsiio_flags);
4787 
4788 	if (mpi3mr_op_request_post(mrioc, op_req_q,
4789 	    scmd_priv_data->mpi3mr_scsiio_req)) {
4790 		mpi3mr_clear_scmd_priv(mrioc, scmd);
4791 		retval = SCSI_MLQUEUE_HOST_BUSY;
4792 		if (tracked_io_sz) {
4793 			atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz);
4794 			if (tg)
4795 				atomic_sub(tracked_io_sz,
4796 				    &tg->pend_large_data_sz);
4797 		}
4798 		goto out;
4799 	}
4800 
4801 out:
4802 	return retval;
4803 }
4804 
4805 static const struct scsi_host_template mpi3mr_driver_template = {
4806 	.module				= THIS_MODULE,
4807 	.name				= "MPI3 Storage Controller",
4808 	.proc_name			= MPI3MR_DRIVER_NAME,
4809 	.queuecommand			= mpi3mr_qcmd,
4810 	.target_alloc			= mpi3mr_target_alloc,
4811 	.slave_alloc			= mpi3mr_slave_alloc,
4812 	.slave_configure		= mpi3mr_slave_configure,
4813 	.target_destroy			= mpi3mr_target_destroy,
4814 	.slave_destroy			= mpi3mr_slave_destroy,
4815 	.scan_finished			= mpi3mr_scan_finished,
4816 	.scan_start			= mpi3mr_scan_start,
4817 	.change_queue_depth		= mpi3mr_change_queue_depth,
4818 	.eh_device_reset_handler	= mpi3mr_eh_dev_reset,
4819 	.eh_target_reset_handler	= mpi3mr_eh_target_reset,
4820 	.eh_host_reset_handler		= mpi3mr_eh_host_reset,
4821 	.bios_param			= mpi3mr_bios_param,
4822 	.map_queues			= mpi3mr_map_queues,
4823 	.mq_poll                        = mpi3mr_blk_mq_poll,
4824 	.no_write_same			= 1,
4825 	.can_queue			= 1,
4826 	.this_id			= -1,
4827 	.sg_tablesize			= MPI3MR_DEFAULT_SGL_ENTRIES,
4828 	/* max xfer supported is 1M (2K in 512 byte sized sectors)
4829 	 */
4830 	.max_sectors			= (MPI3MR_DEFAULT_MAX_IO_SIZE / 512),
4831 	.cmd_per_lun			= MPI3MR_MAX_CMDS_LUN,
4832 	.max_segment_size		= 0xffffffff,
4833 	.track_queue_depth		= 1,
4834 	.cmd_size			= sizeof(struct scmd_priv),
4835 	.shost_groups			= mpi3mr_host_groups,
4836 	.sdev_groups			= mpi3mr_dev_groups,
4837 };
4838 
4839 /**
4840  * mpi3mr_init_drv_cmd - Initialize internal command tracker
4841  * @cmdptr: Internal command tracker
4842  * @host_tag: Host tag used for the specific command
4843  *
4844  * Initialize the internal command tracker structure with
4845  * specified host tag.
4846  *
4847  * Return: Nothing.
4848  */
4849 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
4850 	u16 host_tag)
4851 {
4852 	mutex_init(&cmdptr->mutex);
4853 	cmdptr->reply = NULL;
4854 	cmdptr->state = MPI3MR_CMD_NOTUSED;
4855 	cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
4856 	cmdptr->host_tag = host_tag;
4857 }
4858 
4859 /**
4860  * osintfc_mrioc_security_status -Check controller secure status
4861  * @pdev: PCI device instance
4862  *
4863  * Read the Device Serial Number capability from PCI config
4864  * space and decide whether the controller is secure or not.
4865  *
4866  * Return: 0 on success, non-zero on failure.
4867  */
4868 static int
4869 osintfc_mrioc_security_status(struct pci_dev *pdev)
4870 {
4871 	u32 cap_data;
4872 	int base;
4873 	u32 ctlr_status;
4874 	u32 debug_status;
4875 	int retval = 0;
4876 
4877 	base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
4878 	if (!base) {
4879 		dev_err(&pdev->dev,
4880 		    "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__);
4881 		return -1;
4882 	}
4883 
4884 	pci_read_config_dword(pdev, base + 4, &cap_data);
4885 
4886 	debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK;
4887 	ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK;
4888 
4889 	switch (ctlr_status) {
4890 	case MPI3MR_INVALID_DEVICE:
4891 		dev_err(&pdev->dev,
4892 		    "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
4893 		    __func__, pdev->device, pdev->subsystem_vendor,
4894 		    pdev->subsystem_device);
4895 		retval = -1;
4896 		break;
4897 	case MPI3MR_CONFIG_SECURE_DEVICE:
4898 		if (!debug_status)
4899 			dev_info(&pdev->dev,
4900 			    "%s: Config secure ctlr is detected\n",
4901 			    __func__);
4902 		break;
4903 	case MPI3MR_HARD_SECURE_DEVICE:
4904 		break;
4905 	case MPI3MR_TAMPERED_DEVICE:
4906 		dev_err(&pdev->dev,
4907 		    "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
4908 		    __func__, pdev->device, pdev->subsystem_vendor,
4909 		    pdev->subsystem_device);
4910 		retval = -1;
4911 		break;
4912 	default:
4913 		retval = -1;
4914 			break;
4915 	}
4916 
4917 	if (!retval && debug_status) {
4918 		dev_err(&pdev->dev,
4919 		    "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
4920 		    __func__, pdev->device, pdev->subsystem_vendor,
4921 		    pdev->subsystem_device);
4922 		retval = -1;
4923 	}
4924 
4925 	return retval;
4926 }
4927 
4928 /**
4929  * mpi3mr_probe - PCI probe callback
4930  * @pdev: PCI device instance
4931  * @id: PCI device ID details
4932  *
4933  * controller initialization routine. Checks the security status
4934  * of the controller and if it is invalid or tampered return the
4935  * probe without initializing the controller. Otherwise,
4936  * allocate per adapter instance through shost_priv and
4937  * initialize controller specific data structures, initializae
4938  * the controller hardware, add shost to the SCSI subsystem.
4939  *
4940  * Return: 0 on success, non-zero on failure.
4941  */
4942 
4943 static int
4944 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4945 {
4946 	struct mpi3mr_ioc *mrioc = NULL;
4947 	struct Scsi_Host *shost = NULL;
4948 	int retval = 0, i;
4949 
4950 	if (osintfc_mrioc_security_status(pdev)) {
4951 		warn_non_secure_ctlr = 1;
4952 		return 1; /* For Invalid and Tampered device */
4953 	}
4954 
4955 	shost = scsi_host_alloc(&mpi3mr_driver_template,
4956 	    sizeof(struct mpi3mr_ioc));
4957 	if (!shost) {
4958 		retval = -ENODEV;
4959 		goto shost_failed;
4960 	}
4961 
4962 	mrioc = shost_priv(shost);
4963 	mrioc->id = mrioc_ids++;
4964 	sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
4965 	sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
4966 	INIT_LIST_HEAD(&mrioc->list);
4967 	spin_lock(&mrioc_list_lock);
4968 	list_add_tail(&mrioc->list, &mrioc_list);
4969 	spin_unlock(&mrioc_list_lock);
4970 
4971 	spin_lock_init(&mrioc->admin_req_lock);
4972 	spin_lock_init(&mrioc->reply_free_queue_lock);
4973 	spin_lock_init(&mrioc->sbq_lock);
4974 	spin_lock_init(&mrioc->fwevt_lock);
4975 	spin_lock_init(&mrioc->tgtdev_lock);
4976 	spin_lock_init(&mrioc->watchdog_lock);
4977 	spin_lock_init(&mrioc->chain_buf_lock);
4978 	spin_lock_init(&mrioc->sas_node_lock);
4979 
4980 	INIT_LIST_HEAD(&mrioc->fwevt_list);
4981 	INIT_LIST_HEAD(&mrioc->tgtdev_list);
4982 	INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
4983 	INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
4984 	INIT_LIST_HEAD(&mrioc->sas_expander_list);
4985 	INIT_LIST_HEAD(&mrioc->hba_port_table_list);
4986 	INIT_LIST_HEAD(&mrioc->enclosure_list);
4987 
4988 	mutex_init(&mrioc->reset_mutex);
4989 	mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
4990 	mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
4991 	mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
4992 	mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS);
4993 	mpi3mr_init_drv_cmd(&mrioc->transport_cmds,
4994 	    MPI3MR_HOSTTAG_TRANSPORT_CMDS);
4995 
4996 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4997 		mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
4998 		    MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
4999 
5000 	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
5001 		mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i],
5002 				    MPI3MR_HOSTTAG_EVTACKCMD_MIN + i);
5003 
5004 	if (pdev->revision)
5005 		mrioc->enable_segqueue = true;
5006 
5007 	init_waitqueue_head(&mrioc->reset_waitq);
5008 	mrioc->logging_level = logging_level;
5009 	mrioc->shost = shost;
5010 	mrioc->pdev = pdev;
5011 	mrioc->stop_bsgs = 1;
5012 
5013 	mrioc->max_sgl_entries = max_sgl_entries;
5014 	if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES)
5015 		mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES;
5016 	else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES)
5017 		mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
5018 	else {
5019 		mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES;
5020 		mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES;
5021 	}
5022 
5023 	/* init shost parameters */
5024 	shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
5025 	shost->max_lun = -1;
5026 	shost->unique_id = mrioc->id;
5027 
5028 	shost->max_channel = 0;
5029 	shost->max_id = 0xFFFFFFFF;
5030 
5031 	shost->host_tagset = 1;
5032 
5033 	if (prot_mask >= 0)
5034 		scsi_host_set_prot(shost, prot_mask);
5035 	else {
5036 		prot_mask = SHOST_DIF_TYPE1_PROTECTION
5037 		    | SHOST_DIF_TYPE2_PROTECTION
5038 		    | SHOST_DIF_TYPE3_PROTECTION;
5039 		scsi_host_set_prot(shost, prot_mask);
5040 	}
5041 
5042 	ioc_info(mrioc,
5043 	    "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n",
5044 	    __func__,
5045 	    (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5046 	    (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5047 	    (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5048 	    (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5049 	    (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5050 	    (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5051 	    (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5052 
5053 	if (prot_guard_mask)
5054 		scsi_host_set_guard(shost, (prot_guard_mask & 3));
5055 	else
5056 		scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
5057 
5058 	snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
5059 	    "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
5060 	mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
5061 	    mrioc->fwevt_worker_name, 0);
5062 	if (!mrioc->fwevt_worker_thread) {
5063 		ioc_err(mrioc, "failure at %s:%d/%s()!\n",
5064 		    __FILE__, __LINE__, __func__);
5065 		retval = -ENODEV;
5066 		goto fwevtthread_failed;
5067 	}
5068 
5069 	mrioc->is_driver_loading = 1;
5070 	mrioc->cpu_count = num_online_cpus();
5071 	if (mpi3mr_setup_resources(mrioc)) {
5072 		ioc_err(mrioc, "setup resources failed\n");
5073 		retval = -ENODEV;
5074 		goto resource_alloc_failed;
5075 	}
5076 	if (mpi3mr_init_ioc(mrioc)) {
5077 		ioc_err(mrioc, "initializing IOC failed\n");
5078 		retval = -ENODEV;
5079 		goto init_ioc_failed;
5080 	}
5081 
5082 	shost->nr_hw_queues = mrioc->num_op_reply_q;
5083 	if (mrioc->active_poll_qcount)
5084 		shost->nr_maps = 3;
5085 
5086 	shost->can_queue = mrioc->max_host_ios;
5087 	shost->sg_tablesize = mrioc->max_sgl_entries;
5088 	shost->max_id = mrioc->facts.max_perids + 1;
5089 
5090 	retval = scsi_add_host(shost, &pdev->dev);
5091 	if (retval) {
5092 		ioc_err(mrioc, "failure at %s:%d/%s()!\n",
5093 		    __FILE__, __LINE__, __func__);
5094 		goto addhost_failed;
5095 	}
5096 
5097 	scsi_scan_host(shost);
5098 	mpi3mr_bsg_init(mrioc);
5099 	return retval;
5100 
5101 addhost_failed:
5102 	mpi3mr_stop_watchdog(mrioc);
5103 	mpi3mr_cleanup_ioc(mrioc);
5104 init_ioc_failed:
5105 	mpi3mr_free_mem(mrioc);
5106 	mpi3mr_cleanup_resources(mrioc);
5107 resource_alloc_failed:
5108 	destroy_workqueue(mrioc->fwevt_worker_thread);
5109 fwevtthread_failed:
5110 	spin_lock(&mrioc_list_lock);
5111 	list_del(&mrioc->list);
5112 	spin_unlock(&mrioc_list_lock);
5113 	scsi_host_put(shost);
5114 shost_failed:
5115 	return retval;
5116 }
5117 
5118 /**
5119  * mpi3mr_remove - PCI remove callback
5120  * @pdev: PCI device instance
5121  *
5122  * Cleanup the IOC by issuing MUR and shutdown notification.
5123  * Free up all memory and resources associated with the
5124  * controllerand target devices, unregister the shost.
5125  *
5126  * Return: Nothing.
5127  */
5128 static void mpi3mr_remove(struct pci_dev *pdev)
5129 {
5130 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
5131 	struct mpi3mr_ioc *mrioc;
5132 	struct workqueue_struct	*wq;
5133 	unsigned long flags;
5134 	struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
5135 	struct mpi3mr_hba_port *port, *hba_port_next;
5136 	struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
5137 
5138 	if (!shost)
5139 		return;
5140 
5141 	mrioc = shost_priv(shost);
5142 	while (mrioc->reset_in_progress || mrioc->is_driver_loading)
5143 		ssleep(1);
5144 
5145 	if (!pci_device_is_present(mrioc->pdev)) {
5146 		mrioc->unrecoverable = 1;
5147 		mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
5148 	}
5149 
5150 	mpi3mr_bsg_exit(mrioc);
5151 	mrioc->stop_drv_processing = 1;
5152 	mpi3mr_cleanup_fwevt_list(mrioc);
5153 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
5154 	wq = mrioc->fwevt_worker_thread;
5155 	mrioc->fwevt_worker_thread = NULL;
5156 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
5157 	if (wq)
5158 		destroy_workqueue(wq);
5159 
5160 	if (mrioc->sas_transport_enabled)
5161 		sas_remove_host(shost);
5162 	else
5163 		scsi_remove_host(shost);
5164 
5165 	list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
5166 	    list) {
5167 		mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
5168 		mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true);
5169 		mpi3mr_tgtdev_put(tgtdev);
5170 	}
5171 	mpi3mr_stop_watchdog(mrioc);
5172 	mpi3mr_cleanup_ioc(mrioc);
5173 	mpi3mr_free_mem(mrioc);
5174 	mpi3mr_cleanup_resources(mrioc);
5175 
5176 	spin_lock_irqsave(&mrioc->sas_node_lock, flags);
5177 	list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
5178 	    &mrioc->sas_expander_list, list) {
5179 		spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
5180 		mpi3mr_expander_node_remove(mrioc, sas_expander);
5181 		spin_lock_irqsave(&mrioc->sas_node_lock, flags);
5182 	}
5183 	list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
5184 		ioc_info(mrioc,
5185 		    "removing hba_port entry: %p port: %d from hba_port list\n",
5186 		    port, port->port_id);
5187 		list_del(&port->list);
5188 		kfree(port);
5189 	}
5190 	spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
5191 
5192 	if (mrioc->sas_hba.num_phys) {
5193 		kfree(mrioc->sas_hba.phy);
5194 		mrioc->sas_hba.phy = NULL;
5195 		mrioc->sas_hba.num_phys = 0;
5196 	}
5197 
5198 	spin_lock(&mrioc_list_lock);
5199 	list_del(&mrioc->list);
5200 	spin_unlock(&mrioc_list_lock);
5201 
5202 	scsi_host_put(shost);
5203 }
5204 
5205 /**
5206  * mpi3mr_shutdown - PCI shutdown callback
5207  * @pdev: PCI device instance
5208  *
5209  * Free up all memory and resources associated with the
5210  * controller
5211  *
5212  * Return: Nothing.
5213  */
5214 static void mpi3mr_shutdown(struct pci_dev *pdev)
5215 {
5216 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
5217 	struct mpi3mr_ioc *mrioc;
5218 	struct workqueue_struct	*wq;
5219 	unsigned long flags;
5220 
5221 	if (!shost)
5222 		return;
5223 
5224 	mrioc = shost_priv(shost);
5225 	while (mrioc->reset_in_progress || mrioc->is_driver_loading)
5226 		ssleep(1);
5227 
5228 	mrioc->stop_drv_processing = 1;
5229 	mpi3mr_cleanup_fwevt_list(mrioc);
5230 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
5231 	wq = mrioc->fwevt_worker_thread;
5232 	mrioc->fwevt_worker_thread = NULL;
5233 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
5234 	if (wq)
5235 		destroy_workqueue(wq);
5236 
5237 	mpi3mr_stop_watchdog(mrioc);
5238 	mpi3mr_cleanup_ioc(mrioc);
5239 	mpi3mr_cleanup_resources(mrioc);
5240 }
5241 
5242 /**
5243  * mpi3mr_suspend - PCI power management suspend callback
5244  * @dev: Device struct
5245  *
5246  * Change the power state to the given value and cleanup the IOC
5247  * by issuing MUR and shutdown notification
5248  *
5249  * Return: 0 always.
5250  */
5251 static int __maybe_unused
5252 mpi3mr_suspend(struct device *dev)
5253 {
5254 	struct pci_dev *pdev = to_pci_dev(dev);
5255 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
5256 	struct mpi3mr_ioc *mrioc;
5257 
5258 	if (!shost)
5259 		return 0;
5260 
5261 	mrioc = shost_priv(shost);
5262 	while (mrioc->reset_in_progress || mrioc->is_driver_loading)
5263 		ssleep(1);
5264 	mrioc->stop_drv_processing = 1;
5265 	mpi3mr_cleanup_fwevt_list(mrioc);
5266 	scsi_block_requests(shost);
5267 	mpi3mr_stop_watchdog(mrioc);
5268 	mpi3mr_cleanup_ioc(mrioc);
5269 
5270 	ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n",
5271 	    pdev, pci_name(pdev));
5272 	mpi3mr_cleanup_resources(mrioc);
5273 
5274 	return 0;
5275 }
5276 
5277 /**
5278  * mpi3mr_resume - PCI power management resume callback
5279  * @dev: Device struct
5280  *
5281  * Restore the power state to D0 and reinitialize the controller
5282  * and resume I/O operations to the target devices
5283  *
5284  * Return: 0 on success, non-zero on failure
5285  */
5286 static int __maybe_unused
5287 mpi3mr_resume(struct device *dev)
5288 {
5289 	struct pci_dev *pdev = to_pci_dev(dev);
5290 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
5291 	struct mpi3mr_ioc *mrioc;
5292 	pci_power_t device_state = pdev->current_state;
5293 	int r;
5294 
5295 	if (!shost)
5296 		return 0;
5297 
5298 	mrioc = shost_priv(shost);
5299 
5300 	ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
5301 	    pdev, pci_name(pdev), device_state);
5302 	mrioc->pdev = pdev;
5303 	mrioc->cpu_count = num_online_cpus();
5304 	r = mpi3mr_setup_resources(mrioc);
5305 	if (r) {
5306 		ioc_info(mrioc, "%s: Setup resources failed[%d]\n",
5307 		    __func__, r);
5308 		return r;
5309 	}
5310 
5311 	mrioc->stop_drv_processing = 0;
5312 	mpi3mr_invalidate_devhandles(mrioc);
5313 	mpi3mr_free_enclosure_list(mrioc);
5314 	mpi3mr_memset_buffers(mrioc);
5315 	r = mpi3mr_reinit_ioc(mrioc, 1);
5316 	if (r) {
5317 		ioc_err(mrioc, "resuming controller failed[%d]\n", r);
5318 		return r;
5319 	}
5320 	ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
5321 	scsi_unblock_requests(shost);
5322 	mrioc->device_refresh_on = 0;
5323 	mpi3mr_start_watchdog(mrioc);
5324 
5325 	return 0;
5326 }
5327 
5328 static const struct pci_device_id mpi3mr_pci_id_table[] = {
5329 	{
5330 		PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
5331 		    MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
5332 	},
5333 	{ 0 }
5334 };
5335 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
5336 
5337 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume);
5338 
5339 static struct pci_driver mpi3mr_pci_driver = {
5340 	.name = MPI3MR_DRIVER_NAME,
5341 	.id_table = mpi3mr_pci_id_table,
5342 	.probe = mpi3mr_probe,
5343 	.remove = mpi3mr_remove,
5344 	.shutdown = mpi3mr_shutdown,
5345 	.driver.pm = &mpi3mr_pm_ops,
5346 };
5347 
5348 static ssize_t event_counter_show(struct device_driver *dd, char *buf)
5349 {
5350 	return sprintf(buf, "%llu\n", atomic64_read(&event_counter));
5351 }
5352 static DRIVER_ATTR_RO(event_counter);
5353 
5354 static int __init mpi3mr_init(void)
5355 {
5356 	int ret_val;
5357 
5358 	pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
5359 	    MPI3MR_DRIVER_VERSION);
5360 
5361 	mpi3mr_transport_template =
5362 	    sas_attach_transport(&mpi3mr_transport_functions);
5363 	if (!mpi3mr_transport_template) {
5364 		pr_err("%s failed to load due to sas transport attach failure\n",
5365 		    MPI3MR_DRIVER_NAME);
5366 		return -ENODEV;
5367 	}
5368 
5369 	ret_val = pci_register_driver(&mpi3mr_pci_driver);
5370 	if (ret_val) {
5371 		pr_err("%s failed to load due to pci register driver failure\n",
5372 		    MPI3MR_DRIVER_NAME);
5373 		goto err_pci_reg_fail;
5374 	}
5375 
5376 	ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
5377 				     &driver_attr_event_counter);
5378 	if (ret_val)
5379 		goto err_event_counter;
5380 
5381 	return ret_val;
5382 
5383 err_event_counter:
5384 	pci_unregister_driver(&mpi3mr_pci_driver);
5385 
5386 err_pci_reg_fail:
5387 	sas_release_transport(mpi3mr_transport_template);
5388 	return ret_val;
5389 }
5390 
5391 static void __exit mpi3mr_exit(void)
5392 {
5393 	if (warn_non_secure_ctlr)
5394 		pr_warn(
5395 		    "Unloading %s version %s while managing a non secure controller\n",
5396 		    MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
5397 	else
5398 		pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
5399 		    MPI3MR_DRIVER_VERSION);
5400 
5401 	driver_remove_file(&mpi3mr_pci_driver.driver,
5402 			   &driver_attr_event_counter);
5403 	pci_unregister_driver(&mpi3mr_pci_driver);
5404 	sas_release_transport(mpi3mr_transport_template);
5405 }
5406 
5407 module_init(mpi3mr_init);
5408 module_exit(mpi3mr_exit);
5409