xref: /openbmc/linux/drivers/scsi/mpi3mr/mpi3mr_os.c (revision e36710dc)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for Broadcom MPI3 Storage Controllers
4  *
5  * Copyright (C) 2017-2021 Broadcom Inc.
6  *  (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7  *
8  */
9 
10 #include "mpi3mr.h"
11 
12 /* global driver scop variables */
13 LIST_HEAD(mrioc_list);
14 DEFINE_SPINLOCK(mrioc_list_lock);
15 static int mrioc_ids;
16 static int warn_non_secure_ctlr;
17 
18 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
19 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
20 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
21 MODULE_VERSION(MPI3MR_DRIVER_VERSION);
22 
23 /* Module parameters*/
24 int logging_level;
25 module_param(logging_level, int, 0);
26 MODULE_PARM_DESC(logging_level,
27 	" bits for enabling additional logging info (default=0)");
28 
29 /* Forward declarations*/
30 /**
31  * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
32  * @mrioc: Adapter instance reference
33  * @scmd: SCSI command reference
34  *
35  * Calculate the host tag based on block tag for a given scmd.
36  *
37  * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
38  */
39 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
40 	struct scsi_cmnd *scmd)
41 {
42 	struct scmd_priv *priv = NULL;
43 	u32 unique_tag;
44 	u16 host_tag, hw_queue;
45 
46 	unique_tag = blk_mq_unique_tag(scmd->request);
47 
48 	hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
49 	if (hw_queue >= mrioc->num_op_reply_q)
50 		return MPI3MR_HOSTTAG_INVALID;
51 	host_tag = blk_mq_unique_tag_to_tag(unique_tag);
52 
53 	if (WARN_ON(host_tag >= mrioc->max_host_ios))
54 		return MPI3MR_HOSTTAG_INVALID;
55 
56 	priv = scsi_cmd_priv(scmd);
57 	/*host_tag 0 is invalid hence incrementing by 1*/
58 	priv->host_tag = host_tag + 1;
59 	priv->scmd = scmd;
60 	priv->in_lld_scope = 1;
61 	priv->req_q_idx = hw_queue;
62 	priv->chain_idx = -1;
63 	return priv->host_tag;
64 }
65 
66 /**
67  * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
68  * @mrioc: Adapter instance reference
69  * @host_tag: Host tag
70  * @qidx: Operational queue index
71  *
72  * Identify the block tag from the host tag and queue index and
73  * retrieve associated scsi command using scsi_host_find_tag().
74  *
75  * Return: SCSI command reference or NULL.
76  */
77 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
78 	struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
79 {
80 	struct scsi_cmnd *scmd = NULL;
81 	struct scmd_priv *priv = NULL;
82 	u32 unique_tag = host_tag - 1;
83 
84 	if (WARN_ON(host_tag > mrioc->max_host_ios))
85 		goto out;
86 
87 	unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
88 
89 	scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
90 	if (scmd) {
91 		priv = scsi_cmd_priv(scmd);
92 		if (!priv->in_lld_scope)
93 			scmd = NULL;
94 	}
95 out:
96 	return scmd;
97 }
98 
99 /**
100  * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
101  * @mrioc: Adapter instance reference
102  * @scmd: SCSI command reference
103  *
104  * Invalidate the SCSI command private data to mark the command
105  * is not in LLD scope anymore.
106  *
107  * Return: Nothing.
108  */
109 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
110 	struct scsi_cmnd *scmd)
111 {
112 	struct scmd_priv *priv = NULL;
113 
114 	priv = scsi_cmd_priv(scmd);
115 
116 	if (WARN_ON(priv->in_lld_scope == 0))
117 		return;
118 	priv->host_tag = MPI3MR_HOSTTAG_INVALID;
119 	priv->req_q_idx = 0xFFFF;
120 	priv->scmd = NULL;
121 	priv->in_lld_scope = 0;
122 	if (priv->chain_idx >= 0) {
123 		clear_bit(priv->chain_idx, mrioc->chain_bitmap);
124 		priv->chain_idx = -1;
125 	}
126 }
127 
128 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
129 	struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
130 static void mpi3mr_fwevt_worker(struct work_struct *work);
131 
132 /**
133  * mpi3mr_fwevt_free - firmware event memory dealloctor
134  * @r: k reference pointer of the firmware event
135  *
136  * Free firmware event memory when no reference.
137  */
138 static void mpi3mr_fwevt_free(struct kref *r)
139 {
140 	kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
141 }
142 
143 /**
144  * mpi3mr_fwevt_get - k reference incrementor
145  * @fwevt: Firmware event reference
146  *
147  * Increment firmware event reference count.
148  */
149 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
150 {
151 	kref_get(&fwevt->ref_count);
152 }
153 
154 /**
155  * mpi3mr_fwevt_put - k reference decrementor
156  * @fwevt: Firmware event reference
157  *
158  * decrement firmware event reference count.
159  */
160 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
161 {
162 	kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
163 }
164 
165 /**
166  * mpi3mr_alloc_fwevt - Allocate firmware event
167  * @len: length of firmware event data to allocate
168  *
169  * Allocate firmware event with required length and initialize
170  * the reference counter.
171  *
172  * Return: firmware event reference.
173  */
174 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
175 {
176 	struct mpi3mr_fwevt *fwevt;
177 
178 	fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
179 	if (!fwevt)
180 		return NULL;
181 
182 	kref_init(&fwevt->ref_count);
183 	return fwevt;
184 }
185 
186 /**
187  * mpi3mr_fwevt_add_to_list - Add firmware event to the list
188  * @mrioc: Adapter instance reference
189  * @fwevt: Firmware event reference
190  *
191  * Add the given firmware event to the firmware event list.
192  *
193  * Return: Nothing.
194  */
195 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
196 	struct mpi3mr_fwevt *fwevt)
197 {
198 	unsigned long flags;
199 
200 	if (!mrioc->fwevt_worker_thread)
201 		return;
202 
203 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
204 	/* get fwevt reference count while adding it to fwevt_list */
205 	mpi3mr_fwevt_get(fwevt);
206 	INIT_LIST_HEAD(&fwevt->list);
207 	list_add_tail(&fwevt->list, &mrioc->fwevt_list);
208 	INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
209 	/* get fwevt reference count while enqueueing it to worker queue */
210 	mpi3mr_fwevt_get(fwevt);
211 	queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
212 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
213 }
214 
215 /**
216  * mpi3mr_fwevt_del_from_list - Delete firmware event from list
217  * @mrioc: Adapter instance reference
218  * @fwevt: Firmware event reference
219  *
220  * Delete the given firmware event from the firmware event list.
221  *
222  * Return: Nothing.
223  */
224 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
225 	struct mpi3mr_fwevt *fwevt)
226 {
227 	unsigned long flags;
228 
229 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
230 	if (!list_empty(&fwevt->list)) {
231 		list_del_init(&fwevt->list);
232 		/*
233 		 * Put fwevt reference count after
234 		 * removing it from fwevt_list
235 		 */
236 		mpi3mr_fwevt_put(fwevt);
237 	}
238 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
239 }
240 
241 /**
242  * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
243  * @mrioc: Adapter instance reference
244  *
245  * Dequeue a firmware event from the firmware event list.
246  *
247  * Return: firmware event.
248  */
249 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
250 	struct mpi3mr_ioc *mrioc)
251 {
252 	unsigned long flags;
253 	struct mpi3mr_fwevt *fwevt = NULL;
254 
255 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
256 	if (!list_empty(&mrioc->fwevt_list)) {
257 		fwevt = list_first_entry(&mrioc->fwevt_list,
258 		    struct mpi3mr_fwevt, list);
259 		list_del_init(&fwevt->list);
260 		/*
261 		 * Put fwevt reference count after
262 		 * removing it from fwevt_list
263 		 */
264 		mpi3mr_fwevt_put(fwevt);
265 	}
266 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
267 
268 	return fwevt;
269 }
270 
271 /**
272  * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
273  * @mrioc: Adapter instance reference
274  *
275  * Flush all pending firmware events from the firmware event
276  * list.
277  *
278  * Return: Nothing.
279  */
280 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
281 {
282 	struct mpi3mr_fwevt *fwevt = NULL;
283 
284 	if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
285 	    !mrioc->fwevt_worker_thread)
286 		return;
287 
288 	while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)) ||
289 	    (fwevt = mrioc->current_event)) {
290 		/*
291 		 * Wait on the fwevt to complete. If this returns 1, then
292 		 * the event was never executed, and we need a put for the
293 		 * reference the work had on the fwevt.
294 		 *
295 		 * If it did execute, we wait for it to finish, and the put will
296 		 * happen from mpi3mr_process_fwevt()
297 		 */
298 		if (cancel_work_sync(&fwevt->work)) {
299 			/*
300 			 * Put fwevt reference count after
301 			 * dequeuing it from worker queue
302 			 */
303 			mpi3mr_fwevt_put(fwevt);
304 			/*
305 			 * Put fwevt reference count to neutralize
306 			 * kref_init increment
307 			 */
308 			mpi3mr_fwevt_put(fwevt);
309 		}
310 	}
311 }
312 
313 /**
314  * mpi3mr_alloc_tgtdev - target device allocator
315  *
316  * Allocate target device instance and initialize the reference
317  * count
318  *
319  * Return: target device instance.
320  */
321 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
322 {
323 	struct mpi3mr_tgt_dev *tgtdev;
324 
325 	tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
326 	if (!tgtdev)
327 		return NULL;
328 	kref_init(&tgtdev->ref_count);
329 	return tgtdev;
330 }
331 
332 /**
333  * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
334  * @mrioc: Adapter instance reference
335  * @tgtdev: Target device
336  *
337  * Add the target device to the target device list
338  *
339  * Return: Nothing.
340  */
341 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
342 	struct mpi3mr_tgt_dev *tgtdev)
343 {
344 	unsigned long flags;
345 
346 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
347 	mpi3mr_tgtdev_get(tgtdev);
348 	INIT_LIST_HEAD(&tgtdev->list);
349 	list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
350 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
351 }
352 
353 /**
354  * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
355  * @mrioc: Adapter instance reference
356  * @tgtdev: Target device
357  *
358  * Remove the target device from the target device list
359  *
360  * Return: Nothing.
361  */
362 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
363 	struct mpi3mr_tgt_dev *tgtdev)
364 {
365 	unsigned long flags;
366 
367 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
368 	if (!list_empty(&tgtdev->list)) {
369 		list_del_init(&tgtdev->list);
370 		mpi3mr_tgtdev_put(tgtdev);
371 	}
372 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
373 }
374 
375 /**
376  * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
377  * @mrioc: Adapter instance reference
378  * @handle: Device handle
379  *
380  * Accessor to retrieve target device from the device handle.
381  * Non Lock version
382  *
383  * Return: Target device reference.
384  */
385 static struct mpi3mr_tgt_dev  *__mpi3mr_get_tgtdev_by_handle(
386 	struct mpi3mr_ioc *mrioc, u16 handle)
387 {
388 	struct mpi3mr_tgt_dev *tgtdev;
389 
390 	assert_spin_locked(&mrioc->tgtdev_lock);
391 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
392 		if (tgtdev->dev_handle == handle)
393 			goto found_tgtdev;
394 	return NULL;
395 
396 found_tgtdev:
397 	mpi3mr_tgtdev_get(tgtdev);
398 	return tgtdev;
399 }
400 
401 /**
402  * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
403  * @mrioc: Adapter instance reference
404  * @handle: Device handle
405  *
406  * Accessor to retrieve target device from the device handle.
407  * Lock version
408  *
409  * Return: Target device reference.
410  */
411 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
412 	struct mpi3mr_ioc *mrioc, u16 handle)
413 {
414 	struct mpi3mr_tgt_dev *tgtdev;
415 	unsigned long flags;
416 
417 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
418 	tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
419 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
420 	return tgtdev;
421 }
422 
423 /**
424  * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
425  * @mrioc: Adapter instance reference
426  * @persist_id: Persistent ID
427  *
428  * Accessor to retrieve target device from the Persistent ID.
429  * Non Lock version
430  *
431  * Return: Target device reference.
432  */
433 static struct mpi3mr_tgt_dev  *__mpi3mr_get_tgtdev_by_perst_id(
434 	struct mpi3mr_ioc *mrioc, u16 persist_id)
435 {
436 	struct mpi3mr_tgt_dev *tgtdev;
437 
438 	assert_spin_locked(&mrioc->tgtdev_lock);
439 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
440 		if (tgtdev->perst_id == persist_id)
441 			goto found_tgtdev;
442 	return NULL;
443 
444 found_tgtdev:
445 	mpi3mr_tgtdev_get(tgtdev);
446 	return tgtdev;
447 }
448 
449 /**
450  * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
451  * @mrioc: Adapter instance reference
452  * @persist_id: Persistent ID
453  *
454  * Accessor to retrieve target device from the Persistent ID.
455  * Lock version
456  *
457  * Return: Target device reference.
458  */
459 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
460 	struct mpi3mr_ioc *mrioc, u16 persist_id)
461 {
462 	struct mpi3mr_tgt_dev *tgtdev;
463 	unsigned long flags;
464 
465 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
466 	tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
467 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
468 	return tgtdev;
469 }
470 
471 /**
472  * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
473  * @mrioc: Adapter instance reference
474  * @tgt_priv: Target private data
475  *
476  * Accessor to return target device from the target private
477  * data. Non Lock version
478  *
479  * Return: Target device reference.
480  */
481 static struct mpi3mr_tgt_dev  *__mpi3mr_get_tgtdev_from_tgtpriv(
482 	struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
483 {
484 	struct mpi3mr_tgt_dev *tgtdev;
485 
486 	assert_spin_locked(&mrioc->tgtdev_lock);
487 	tgtdev = tgt_priv->tgt_dev;
488 	if (tgtdev)
489 		mpi3mr_tgtdev_get(tgtdev);
490 	return tgtdev;
491 }
492 
493 /**
494  * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
495  * @mrioc: Adapter instance reference
496  * @tgtdev: Target device structure
497  *
498  * Checks whether the device is exposed to upper layers and if it
499  * is then remove the device from upper layers by calling
500  * scsi_remove_target().
501  *
502  * Return: 0 on success, non zero on failure.
503  */
504 static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
505 	struct mpi3mr_tgt_dev *tgtdev)
506 {
507 	struct mpi3mr_stgt_priv_data *tgt_priv;
508 
509 	ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
510 	    __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
511 	if (tgtdev->starget && tgtdev->starget->hostdata) {
512 		tgt_priv = tgtdev->starget->hostdata;
513 		tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
514 	}
515 
516 	if (tgtdev->starget) {
517 		scsi_remove_target(&tgtdev->starget->dev);
518 		tgtdev->host_exposed = 0;
519 	}
520 	ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
521 	    __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
522 }
523 
524 /**
525  * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
526  * @mrioc: Adapter instance reference
527  * @perst_id: Persistent ID of the device
528  *
529  * Checks whether the device can be exposed to upper layers and
530  * if it is not then expose the device to upper layers by
531  * calling scsi_scan_target().
532  *
533  * Return: 0 on success, non zero on failure.
534  */
535 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
536 	u16 perst_id)
537 {
538 	int retval = 0;
539 	struct mpi3mr_tgt_dev *tgtdev;
540 
541 	tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
542 	if (!tgtdev) {
543 		retval = -1;
544 		goto out;
545 	}
546 	if (tgtdev->is_hidden) {
547 		retval = -1;
548 		goto out;
549 	}
550 	if (!tgtdev->host_exposed && !mrioc->reset_in_progress) {
551 		tgtdev->host_exposed = 1;
552 		scsi_scan_target(&mrioc->shost->shost_gendev, 0,
553 		    tgtdev->perst_id,
554 		    SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
555 		if (!tgtdev->starget)
556 			tgtdev->host_exposed = 0;
557 	}
558 out:
559 	if (tgtdev)
560 		mpi3mr_tgtdev_put(tgtdev);
561 
562 	return retval;
563 }
564 
565 /**
566  * mpi3mr_update_sdev - Update SCSI device information
567  * @sdev: SCSI device reference
568  * @data: target device reference
569  *
570  * This is an iterator function called for each SCSI device in a
571  * target to update the target specific information into each
572  * SCSI device.
573  *
574  * Return: Nothing.
575  */
576 static void
577 mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
578 {
579 	struct mpi3mr_tgt_dev *tgtdev;
580 
581 	tgtdev = (struct mpi3mr_tgt_dev *)data;
582 	if (!tgtdev)
583 		return;
584 
585 	switch (tgtdev->dev_type) {
586 	case MPI3_DEVICE_DEVFORM_PCIE:
587 		/*The block layer hw sector size = 512*/
588 		blk_queue_max_hw_sectors(sdev->request_queue,
589 		    tgtdev->dev_spec.pcie_inf.mdts / 512);
590 		blk_queue_virt_boundary(sdev->request_queue,
591 		    ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
592 
593 		break;
594 	default:
595 		break;
596 	}
597 }
598 
599 /**
600  * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
601  * @mrioc: Adapter instance reference
602  *
603  * This is executed post controller reset to identify any
604  * missing devices during reset and remove from the upper layers
605  * or expose any newly detected device to the upper layers.
606  *
607  * Return: Nothing.
608  */
609 
610 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
611 {
612 	struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
613 
614 	list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
615 	    list) {
616 		if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
617 		    tgtdev->host_exposed) {
618 			mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
619 			mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
620 			mpi3mr_tgtdev_put(tgtdev);
621 		}
622 	}
623 
624 	tgtdev = NULL;
625 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
626 		if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
627 		    !tgtdev->is_hidden && !tgtdev->host_exposed)
628 			mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
629 	}
630 }
631 
632 /**
633  * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
634  * @mrioc: Adapter instance reference
635  * @tgtdev: Target device internal structure
636  * @dev_pg0: New device page0
637  *
638  * Update the information from the device page0 into the driver
639  * cached target device structure.
640  *
641  * Return: Nothing.
642  */
643 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
644 	struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0)
645 {
646 	u16 flags = 0;
647 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
648 
649 	tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
650 	tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
651 	tgtdev->dev_type = dev_pg0->device_form;
652 	tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
653 	tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
654 	tgtdev->slot = le16_to_cpu(dev_pg0->slot);
655 	tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
656 	tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
657 
658 	flags = le16_to_cpu(dev_pg0->flags);
659 	tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
660 
661 	if (tgtdev->starget && tgtdev->starget->hostdata) {
662 		scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
663 		    tgtdev->starget->hostdata;
664 		scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
665 		scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
666 		scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
667 	}
668 
669 	switch (tgtdev->dev_type) {
670 	case MPI3_DEVICE_DEVFORM_SAS_SATA:
671 	{
672 		struct mpi3_device0_sas_sata_format *sasinf =
673 		    &dev_pg0->device_specific.sas_sata_format;
674 		u16 dev_info = le16_to_cpu(sasinf->device_info);
675 
676 		tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
677 		tgtdev->dev_spec.sas_sata_inf.sas_address =
678 		    le64_to_cpu(sasinf->sas_address);
679 		if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
680 		    MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
681 			tgtdev->is_hidden = 1;
682 		else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
683 		    MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
684 			tgtdev->is_hidden = 1;
685 		break;
686 	}
687 	case MPI3_DEVICE_DEVFORM_PCIE:
688 	{
689 		struct mpi3_device0_pcie_format *pcieinf =
690 		    &dev_pg0->device_specific.pcie_format;
691 		u16 dev_info = le16_to_cpu(pcieinf->device_info);
692 
693 		tgtdev->dev_spec.pcie_inf.capb =
694 		    le32_to_cpu(pcieinf->capabilities);
695 		tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
696 		/* 2^12 = 4096 */
697 		tgtdev->dev_spec.pcie_inf.pgsz = 12;
698 		if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
699 			tgtdev->dev_spec.pcie_inf.mdts =
700 			    le32_to_cpu(pcieinf->maximum_data_transfer_size);
701 			tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
702 			tgtdev->dev_spec.pcie_inf.reset_to =
703 			    pcieinf->controller_reset_to;
704 			tgtdev->dev_spec.pcie_inf.abort_to =
705 			    pcieinf->nv_me_abort_to;
706 		}
707 		if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
708 			tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
709 		if ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
710 		    MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)
711 			tgtdev->is_hidden = 1;
712 		break;
713 	}
714 	case MPI3_DEVICE_DEVFORM_VD:
715 	{
716 		struct mpi3_device0_vd_format *vdinf =
717 		    &dev_pg0->device_specific.vd_format;
718 
719 		tgtdev->dev_spec.vol_inf.state = vdinf->vd_state;
720 		if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
721 			tgtdev->is_hidden = 1;
722 		break;
723 	}
724 	default:
725 		break;
726 	}
727 }
728 
729 /**
730  * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
731  * @mrioc: Adapter instance reference
732  * @fwevt: Firmware event information.
733  *
734  * Process Device status Change event and based on device's new
735  * information, either expose the device to the upper layers, or
736  * remove the device from upper layers.
737  *
738  * Return: Nothing.
739  */
740 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
741 	struct mpi3mr_fwevt *fwevt)
742 {
743 	u16 dev_handle = 0;
744 	u8 uhide = 0, delete = 0, cleanup = 0;
745 	struct mpi3mr_tgt_dev *tgtdev = NULL;
746 	struct mpi3_event_data_device_status_change *evtdata =
747 	    (struct mpi3_event_data_device_status_change *)fwevt->event_data;
748 
749 	dev_handle = le16_to_cpu(evtdata->dev_handle);
750 	ioc_info(mrioc,
751 	    "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
752 	    __func__, dev_handle, evtdata->reason_code);
753 	switch (evtdata->reason_code) {
754 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
755 		delete = 1;
756 		break;
757 	case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
758 		uhide = 1;
759 		break;
760 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
761 		delete = 1;
762 		cleanup = 1;
763 		break;
764 	default:
765 		ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
766 		    evtdata->reason_code);
767 		break;
768 	}
769 
770 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
771 	if (!tgtdev)
772 		goto out;
773 	if (uhide) {
774 		tgtdev->is_hidden = 0;
775 		if (!tgtdev->host_exposed)
776 			mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
777 	}
778 	if (tgtdev->starget && tgtdev->starget->hostdata) {
779 		if (delete)
780 			mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
781 	}
782 	if (cleanup) {
783 		mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
784 		mpi3mr_tgtdev_put(tgtdev);
785 	}
786 
787 out:
788 	if (tgtdev)
789 		mpi3mr_tgtdev_put(tgtdev);
790 }
791 
792 /**
793  * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
794  * @mrioc: Adapter instance reference
795  * @dev_pg0: New device page0
796  *
797  * Process Device Info Change event and based on device's new
798  * information, either expose the device to the upper layers, or
799  * remove the device from upper layers or update the details of
800  * the device.
801  *
802  * Return: Nothing.
803  */
804 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
805 	struct mpi3_device_page0 *dev_pg0)
806 {
807 	struct mpi3mr_tgt_dev *tgtdev = NULL;
808 	u16 dev_handle = 0, perst_id = 0;
809 
810 	perst_id = le16_to_cpu(dev_pg0->persistent_id);
811 	dev_handle = le16_to_cpu(dev_pg0->dev_handle);
812 	ioc_info(mrioc,
813 	    "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
814 	    __func__, dev_handle, perst_id);
815 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
816 	if (!tgtdev)
817 		goto out;
818 	mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
819 	if (!tgtdev->is_hidden && !tgtdev->host_exposed)
820 		mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
821 	if (tgtdev->is_hidden && tgtdev->host_exposed)
822 		mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
823 	if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
824 		starget_for_each_device(tgtdev->starget, (void *)tgtdev,
825 		    mpi3mr_update_sdev);
826 out:
827 	if (tgtdev)
828 		mpi3mr_tgtdev_put(tgtdev);
829 }
830 
831 /**
832  * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
833  * @mrioc: Adapter instance reference
834  * @fwevt: Firmware event reference
835  *
836  * Prints information about the SAS topology change event and
837  * for "not responding" event code, removes the device from the
838  * upper layers.
839  *
840  * Return: Nothing.
841  */
842 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
843 	struct mpi3mr_fwevt *fwevt)
844 {
845 	struct mpi3_event_data_sas_topology_change_list *event_data =
846 	    (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
847 	int i;
848 	u16 handle;
849 	u8 reason_code;
850 	struct mpi3mr_tgt_dev *tgtdev = NULL;
851 
852 	for (i = 0; i < event_data->num_entries; i++) {
853 		handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
854 		if (!handle)
855 			continue;
856 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
857 		if (!tgtdev)
858 			continue;
859 
860 		reason_code = event_data->phy_entry[i].status &
861 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
862 
863 		switch (reason_code) {
864 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
865 			if (tgtdev->host_exposed)
866 				mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
867 			mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
868 			mpi3mr_tgtdev_put(tgtdev);
869 			break;
870 		default:
871 			break;
872 		}
873 		if (tgtdev)
874 			mpi3mr_tgtdev_put(tgtdev);
875 	}
876 }
877 
878 /**
879  * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
880  * @mrioc: Adapter instance reference
881  * @fwevt: Firmware event reference
882  *
883  * Prints information about the PCIe topology change event and
884  * for "not responding" event code, removes the device from the
885  * upper layers.
886  *
887  * Return: Nothing.
888  */
889 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
890 	struct mpi3mr_fwevt *fwevt)
891 {
892 	struct mpi3_event_data_pcie_topology_change_list *event_data =
893 	    (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
894 	int i;
895 	u16 handle;
896 	u8 reason_code;
897 	struct mpi3mr_tgt_dev *tgtdev = NULL;
898 
899 	for (i = 0; i < event_data->num_entries; i++) {
900 		handle =
901 		    le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
902 		if (!handle)
903 			continue;
904 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
905 		if (!tgtdev)
906 			continue;
907 
908 		reason_code = event_data->port_entry[i].port_status;
909 
910 		switch (reason_code) {
911 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
912 			if (tgtdev->host_exposed)
913 				mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
914 			mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
915 			mpi3mr_tgtdev_put(tgtdev);
916 			break;
917 		default:
918 			break;
919 		}
920 		if (tgtdev)
921 			mpi3mr_tgtdev_put(tgtdev);
922 	}
923 }
924 
925 /**
926  * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
927  * @mrioc: Adapter instance reference
928  * @fwevt: Firmware event reference
929  *
930  * Identifies the firmware event and calls corresponding bottomg
931  * half handler and sends event acknowledgment if required.
932  *
933  * Return: Nothing.
934  */
935 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
936 	struct mpi3mr_fwevt *fwevt)
937 {
938 	mrioc->current_event = fwevt;
939 	mpi3mr_fwevt_del_from_list(mrioc, fwevt);
940 
941 	if (mrioc->stop_drv_processing)
942 		goto out;
943 
944 	if (!fwevt->process_evt)
945 		goto evt_ack;
946 
947 	switch (fwevt->event_id) {
948 	case MPI3_EVENT_DEVICE_ADDED:
949 	{
950 		struct mpi3_device_page0 *dev_pg0 =
951 		    (struct mpi3_device_page0 *)fwevt->event_data;
952 		mpi3mr_report_tgtdev_to_host(mrioc,
953 		    le16_to_cpu(dev_pg0->persistent_id));
954 		break;
955 	}
956 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
957 	{
958 		mpi3mr_devinfochg_evt_bh(mrioc,
959 		    (struct mpi3_device_page0 *)fwevt->event_data);
960 		break;
961 	}
962 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
963 	{
964 		mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
965 		break;
966 	}
967 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
968 	{
969 		mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
970 		break;
971 	}
972 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
973 	{
974 		mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
975 		break;
976 	}
977 	default:
978 		break;
979 	}
980 
981 evt_ack:
982 	if (fwevt->send_ack)
983 		mpi3mr_send_event_ack(mrioc, fwevt->event_id,
984 		    fwevt->evt_ctx);
985 out:
986 	/* Put fwevt reference count to neutralize kref_init increment */
987 	mpi3mr_fwevt_put(fwevt);
988 	mrioc->current_event = NULL;
989 }
990 
991 /**
992  * mpi3mr_fwevt_worker - Firmware event worker
993  * @work: Work struct containing firmware event
994  *
995  * Extracts the firmware event and calls mpi3mr_fwevt_bh.
996  *
997  * Return: Nothing.
998  */
999 static void mpi3mr_fwevt_worker(struct work_struct *work)
1000 {
1001 	struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
1002 	    work);
1003 	mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
1004 	/*
1005 	 * Put fwevt reference count after
1006 	 * dequeuing it from worker queue
1007 	 */
1008 	mpi3mr_fwevt_put(fwevt);
1009 }
1010 
1011 /**
1012  * mpi3mr_create_tgtdev - Create and add a target device
1013  * @mrioc: Adapter instance reference
1014  * @dev_pg0: Device Page 0 data
1015  *
1016  * If the device specified by the device page 0 data is not
1017  * present in the driver's internal list, allocate the memory
1018  * for the device, populate the data and add to the list, else
1019  * update the device data.  The key is persistent ID.
1020  *
1021  * Return: 0 on success, -ENOMEM on memory allocation failure
1022  */
1023 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
1024 	struct mpi3_device_page0 *dev_pg0)
1025 {
1026 	int retval = 0;
1027 	struct mpi3mr_tgt_dev *tgtdev = NULL;
1028 	u16 perst_id = 0;
1029 
1030 	perst_id = le16_to_cpu(dev_pg0->persistent_id);
1031 	tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
1032 	if (tgtdev) {
1033 		mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
1034 		mpi3mr_tgtdev_put(tgtdev);
1035 	} else {
1036 		tgtdev = mpi3mr_alloc_tgtdev();
1037 		if (!tgtdev)
1038 			return -ENOMEM;
1039 		mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
1040 		mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
1041 	}
1042 
1043 	return retval;
1044 }
1045 
1046 /**
1047  * mpi3mr_flush_delayed_rmhs_list - Flush pending commands
1048  * @mrioc: Adapter instance reference
1049  *
1050  * Flush pending commands in the delayed removal handshake list
1051  * due to a controller reset or driver removal as a cleanup.
1052  *
1053  * Return: Nothing
1054  */
1055 void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc)
1056 {
1057 	struct delayed_dev_rmhs_node *_rmhs_node;
1058 
1059 	while (!list_empty(&mrioc->delayed_rmhs_list)) {
1060 		_rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
1061 		    struct delayed_dev_rmhs_node, list);
1062 		list_del(&_rmhs_node->list);
1063 		kfree(_rmhs_node);
1064 	}
1065 }
1066 
1067 /**
1068  * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
1069  * @mrioc: Adapter instance reference
1070  * @drv_cmd: Internal command tracker
1071  *
1072  * Issues a target reset TM to the firmware from the device
1073  * removal TM pend list or retry the removal handshake sequence
1074  * based on the IOU control request IOC status.
1075  *
1076  * Return: Nothing
1077  */
1078 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
1079 	struct mpi3mr_drv_cmd *drv_cmd)
1080 {
1081 	u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
1082 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
1083 
1084 	ioc_info(mrioc,
1085 	    "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
1086 	    __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
1087 	    drv_cmd->ioc_loginfo);
1088 	if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
1089 		if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
1090 			drv_cmd->retry_count++;
1091 			ioc_info(mrioc,
1092 			    "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
1093 			    __func__, drv_cmd->dev_handle,
1094 			    drv_cmd->retry_count);
1095 			mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
1096 			    drv_cmd, drv_cmd->iou_rc);
1097 			return;
1098 		}
1099 		ioc_err(mrioc,
1100 		    "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
1101 		    __func__, drv_cmd->dev_handle);
1102 	} else {
1103 		ioc_info(mrioc,
1104 		    "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
1105 		    __func__, drv_cmd->dev_handle);
1106 		clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
1107 	}
1108 
1109 	if (!list_empty(&mrioc->delayed_rmhs_list)) {
1110 		delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
1111 		    struct delayed_dev_rmhs_node, list);
1112 		drv_cmd->dev_handle = delayed_dev_rmhs->handle;
1113 		drv_cmd->retry_count = 0;
1114 		drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
1115 		ioc_info(mrioc,
1116 		    "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
1117 		    __func__, drv_cmd->dev_handle);
1118 		mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
1119 		    drv_cmd->iou_rc);
1120 		list_del(&delayed_dev_rmhs->list);
1121 		kfree(delayed_dev_rmhs);
1122 		return;
1123 	}
1124 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
1125 	drv_cmd->callback = NULL;
1126 	drv_cmd->retry_count = 0;
1127 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
1128 	clear_bit(cmd_idx, mrioc->devrem_bitmap);
1129 }
1130 
1131 /**
1132  * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
1133  * @mrioc: Adapter instance reference
1134  * @drv_cmd: Internal command tracker
1135  *
1136  * Issues a target reset TM to the firmware from the device
1137  * removal TM pend list or issue IO unit control request as
1138  * part of device removal or hidden acknowledgment handshake.
1139  *
1140  * Return: Nothing
1141  */
1142 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
1143 	struct mpi3mr_drv_cmd *drv_cmd)
1144 {
1145 	struct mpi3_iounit_control_request iou_ctrl;
1146 	u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
1147 	struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
1148 	int retval;
1149 
1150 	if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
1151 		tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
1152 
1153 	if (tm_reply)
1154 		pr_info(IOCNAME
1155 		    "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
1156 		    mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
1157 		    drv_cmd->ioc_loginfo,
1158 		    le32_to_cpu(tm_reply->termination_count));
1159 
1160 	pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
1161 	    mrioc->name, drv_cmd->dev_handle, cmd_idx);
1162 
1163 	memset(&iou_ctrl, 0, sizeof(iou_ctrl));
1164 
1165 	drv_cmd->state = MPI3MR_CMD_PENDING;
1166 	drv_cmd->is_waiting = 0;
1167 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
1168 	iou_ctrl.operation = drv_cmd->iou_rc;
1169 	iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
1170 	iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
1171 	iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
1172 
1173 	retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
1174 	    1);
1175 	if (retval) {
1176 		pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
1177 		    mrioc->name);
1178 		goto out_failed;
1179 	}
1180 
1181 	return;
1182 out_failed:
1183 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
1184 	drv_cmd->callback = NULL;
1185 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
1186 	drv_cmd->retry_count = 0;
1187 	clear_bit(cmd_idx, mrioc->devrem_bitmap);
1188 }
1189 
1190 /**
1191  * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
1192  * @mrioc: Adapter instance reference
1193  * @handle: Device handle
1194  * @cmdparam: Internal command tracker
1195  * @iou_rc: IO unit reason code
1196  *
1197  * Issues a target reset TM to the firmware or add it to a pend
1198  * list as part of device removal or hidden acknowledgment
1199  * handshake.
1200  *
1201  * Return: Nothing
1202  */
1203 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
1204 	struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
1205 {
1206 	struct mpi3_scsi_task_mgmt_request tm_req;
1207 	int retval = 0;
1208 	u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
1209 	u8 retrycount = 5;
1210 	struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
1211 	struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
1212 
1213 	if (drv_cmd)
1214 		goto issue_cmd;
1215 	do {
1216 		cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
1217 		    MPI3MR_NUM_DEVRMCMD);
1218 		if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
1219 			if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
1220 				break;
1221 			cmd_idx = MPI3MR_NUM_DEVRMCMD;
1222 		}
1223 	} while (retrycount--);
1224 
1225 	if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
1226 		delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
1227 		    GFP_ATOMIC);
1228 		if (!delayed_dev_rmhs)
1229 			return;
1230 		INIT_LIST_HEAD(&delayed_dev_rmhs->list);
1231 		delayed_dev_rmhs->handle = handle;
1232 		delayed_dev_rmhs->iou_rc = iou_rc;
1233 		list_add_tail(&delayed_dev_rmhs->list,
1234 		    &mrioc->delayed_rmhs_list);
1235 		ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
1236 		    __func__, handle);
1237 		return;
1238 	}
1239 	drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
1240 
1241 issue_cmd:
1242 	cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
1243 	ioc_info(mrioc,
1244 	    "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
1245 	    __func__, handle, cmd_idx);
1246 
1247 	memset(&tm_req, 0, sizeof(tm_req));
1248 	if (drv_cmd->state & MPI3MR_CMD_PENDING) {
1249 		ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
1250 		goto out;
1251 	}
1252 	drv_cmd->state = MPI3MR_CMD_PENDING;
1253 	drv_cmd->is_waiting = 0;
1254 	drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
1255 	drv_cmd->dev_handle = handle;
1256 	drv_cmd->iou_rc = iou_rc;
1257 	tm_req.dev_handle = cpu_to_le16(handle);
1258 	tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
1259 	tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
1260 	tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
1261 	tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
1262 
1263 	set_bit(handle, mrioc->removepend_bitmap);
1264 	retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
1265 	if (retval) {
1266 		ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
1267 		    __func__);
1268 		goto out_failed;
1269 	}
1270 out:
1271 	return;
1272 out_failed:
1273 	drv_cmd->state = MPI3MR_CMD_NOTUSED;
1274 	drv_cmd->callback = NULL;
1275 	drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
1276 	drv_cmd->retry_count = 0;
1277 	clear_bit(cmd_idx, mrioc->devrem_bitmap);
1278 }
1279 
1280 /**
1281  * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
1282  * @mrioc: Adapter instance reference
1283  * @event_reply: event data
1284  *
1285  * Checks for the reason code and based on that either block I/O
1286  * to device, or unblock I/O to the device, or start the device
1287  * removal handshake with reason as remove with the firmware for
1288  * PCIe devices.
1289  *
1290  * Return: Nothing
1291  */
1292 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
1293 	struct mpi3_event_notification_reply *event_reply)
1294 {
1295 	struct mpi3_event_data_pcie_topology_change_list *topo_evt =
1296 	    (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
1297 	int i;
1298 	u16 handle;
1299 	u8 reason_code;
1300 	struct mpi3mr_tgt_dev *tgtdev = NULL;
1301 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
1302 
1303 	for (i = 0; i < topo_evt->num_entries; i++) {
1304 		handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
1305 		if (!handle)
1306 			continue;
1307 		reason_code = topo_evt->port_entry[i].port_status;
1308 		scsi_tgt_priv_data =  NULL;
1309 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1310 		if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
1311 			scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
1312 			    tgtdev->starget->hostdata;
1313 		switch (reason_code) {
1314 		case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1315 			if (scsi_tgt_priv_data) {
1316 				scsi_tgt_priv_data->dev_removed = 1;
1317 				scsi_tgt_priv_data->dev_removedelay = 0;
1318 				atomic_set(&scsi_tgt_priv_data->block_io, 0);
1319 			}
1320 			mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
1321 			    MPI3_CTRL_OP_REMOVE_DEVICE);
1322 			break;
1323 		case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1324 			if (scsi_tgt_priv_data) {
1325 				scsi_tgt_priv_data->dev_removedelay = 1;
1326 				atomic_inc(&scsi_tgt_priv_data->block_io);
1327 			}
1328 			break;
1329 		case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
1330 			if (scsi_tgt_priv_data &&
1331 			    scsi_tgt_priv_data->dev_removedelay) {
1332 				scsi_tgt_priv_data->dev_removedelay = 0;
1333 				atomic_dec_if_positive
1334 				    (&scsi_tgt_priv_data->block_io);
1335 			}
1336 			break;
1337 		case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1338 		default:
1339 			break;
1340 		}
1341 		if (tgtdev)
1342 			mpi3mr_tgtdev_put(tgtdev);
1343 	}
1344 }
1345 
1346 /**
1347  * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
1348  * @mrioc: Adapter instance reference
1349  * @event_reply: event data
1350  *
1351  * Checks for the reason code and based on that either block I/O
1352  * to device, or unblock I/O to the device, or start the device
1353  * removal handshake with reason as remove with the firmware for
1354  * SAS/SATA devices.
1355  *
1356  * Return: Nothing
1357  */
1358 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
1359 	struct mpi3_event_notification_reply *event_reply)
1360 {
1361 	struct mpi3_event_data_sas_topology_change_list *topo_evt =
1362 	    (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
1363 	int i;
1364 	u16 handle;
1365 	u8 reason_code;
1366 	struct mpi3mr_tgt_dev *tgtdev = NULL;
1367 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
1368 
1369 	for (i = 0; i < topo_evt->num_entries; i++) {
1370 		handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
1371 		if (!handle)
1372 			continue;
1373 		reason_code = topo_evt->phy_entry[i].status &
1374 		    MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1375 		scsi_tgt_priv_data =  NULL;
1376 		tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1377 		if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
1378 			scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
1379 			    tgtdev->starget->hostdata;
1380 		switch (reason_code) {
1381 		case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1382 			if (scsi_tgt_priv_data) {
1383 				scsi_tgt_priv_data->dev_removed = 1;
1384 				scsi_tgt_priv_data->dev_removedelay = 0;
1385 				atomic_set(&scsi_tgt_priv_data->block_io, 0);
1386 			}
1387 			mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
1388 			    MPI3_CTRL_OP_REMOVE_DEVICE);
1389 			break;
1390 		case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1391 			if (scsi_tgt_priv_data) {
1392 				scsi_tgt_priv_data->dev_removedelay = 1;
1393 				atomic_inc(&scsi_tgt_priv_data->block_io);
1394 			}
1395 			break;
1396 		case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
1397 			if (scsi_tgt_priv_data &&
1398 			    scsi_tgt_priv_data->dev_removedelay) {
1399 				scsi_tgt_priv_data->dev_removedelay = 0;
1400 				atomic_dec_if_positive
1401 				    (&scsi_tgt_priv_data->block_io);
1402 			}
1403 		case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1404 		default:
1405 			break;
1406 		}
1407 		if (tgtdev)
1408 			mpi3mr_tgtdev_put(tgtdev);
1409 	}
1410 }
1411 
1412 /**
1413  * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
1414  * @mrioc: Adapter instance reference
1415  * @event_reply: event data
1416  *
1417  * Checks for the reason code and based on that either block I/O
1418  * to device, or unblock I/O to the device, or start the device
1419  * removal handshake with reason as remove/hide acknowledgment
1420  * with the firmware.
1421  *
1422  * Return: Nothing
1423  */
1424 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
1425 	struct mpi3_event_notification_reply *event_reply)
1426 {
1427 	u16 dev_handle = 0;
1428 	u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
1429 	struct mpi3mr_tgt_dev *tgtdev = NULL;
1430 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
1431 	struct mpi3_event_data_device_status_change *evtdata =
1432 	    (struct mpi3_event_data_device_status_change *)event_reply->event_data;
1433 
1434 	if (mrioc->stop_drv_processing)
1435 		goto out;
1436 
1437 	dev_handle = le16_to_cpu(evtdata->dev_handle);
1438 
1439 	switch (evtdata->reason_code) {
1440 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
1441 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
1442 		block = 1;
1443 		break;
1444 	case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1445 		delete = 1;
1446 		hide = 1;
1447 		break;
1448 	case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1449 		delete = 1;
1450 		remove = 1;
1451 		break;
1452 	case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
1453 	case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
1454 		ublock = 1;
1455 		break;
1456 	default:
1457 		break;
1458 	}
1459 
1460 	tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1461 	if (!tgtdev)
1462 		goto out;
1463 	if (hide)
1464 		tgtdev->is_hidden = hide;
1465 	if (tgtdev->starget && tgtdev->starget->hostdata) {
1466 		scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
1467 		    tgtdev->starget->hostdata;
1468 		if (block)
1469 			atomic_inc(&scsi_tgt_priv_data->block_io);
1470 		if (delete)
1471 			scsi_tgt_priv_data->dev_removed = 1;
1472 		if (ublock)
1473 			atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
1474 	}
1475 	if (remove)
1476 		mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
1477 		    MPI3_CTRL_OP_REMOVE_DEVICE);
1478 	if (hide)
1479 		mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
1480 		    MPI3_CTRL_OP_HIDDEN_ACK);
1481 
1482 out:
1483 	if (tgtdev)
1484 		mpi3mr_tgtdev_put(tgtdev);
1485 }
1486 
1487 /**
1488  * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
1489  * @mrioc: Adapter instance reference
1490  * @event_reply: event data
1491  *
1492  * Identifies the new shutdown timeout value and update.
1493  *
1494  * Return: Nothing
1495  */
1496 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
1497 	struct mpi3_event_notification_reply *event_reply)
1498 {
1499 	struct mpi3_event_data_energy_pack_change *evtdata =
1500 	    (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
1501 	u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
1502 
1503 	if (shutdown_timeout <= 0) {
1504 		ioc_warn(mrioc,
1505 		    "%s :Invalid Shutdown Timeout received = %d\n",
1506 		    __func__, shutdown_timeout);
1507 		return;
1508 	}
1509 
1510 	ioc_info(mrioc,
1511 	    "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
1512 	    __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
1513 	mrioc->facts.shutdown_timeout = shutdown_timeout;
1514 }
1515 
1516 /**
1517  * mpi3mr_os_handle_events - Firmware event handler
1518  * @mrioc: Adapter instance reference
1519  * @event_reply: event data
1520  *
1521  * Identify whteher the event has to handled and acknowledged
1522  * and either process the event in the tophalf and/or schedule a
1523  * bottom half through mpi3mr_fwevt_worker.
1524  *
1525  * Return: Nothing
1526  */
1527 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
1528 	struct mpi3_event_notification_reply *event_reply)
1529 {
1530 	u16 evt_type, sz;
1531 	struct mpi3mr_fwevt *fwevt = NULL;
1532 	bool ack_req = 0, process_evt_bh = 0;
1533 
1534 	if (mrioc->stop_drv_processing)
1535 		return;
1536 
1537 	if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
1538 	    == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
1539 		ack_req = 1;
1540 
1541 	evt_type = event_reply->event;
1542 
1543 	switch (evt_type) {
1544 	case MPI3_EVENT_DEVICE_ADDED:
1545 	{
1546 		struct mpi3_device_page0 *dev_pg0 =
1547 		    (struct mpi3_device_page0 *)event_reply->event_data;
1548 		if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
1549 			ioc_err(mrioc,
1550 			    "%s :Failed to add device in the device add event\n",
1551 			    __func__);
1552 		else
1553 			process_evt_bh = 1;
1554 		break;
1555 	}
1556 	case MPI3_EVENT_DEVICE_STATUS_CHANGE:
1557 	{
1558 		process_evt_bh = 1;
1559 		mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
1560 		break;
1561 	}
1562 	case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1563 	{
1564 		process_evt_bh = 1;
1565 		mpi3mr_sastopochg_evt_th(mrioc, event_reply);
1566 		break;
1567 	}
1568 	case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1569 	{
1570 		process_evt_bh = 1;
1571 		mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
1572 		break;
1573 	}
1574 	case MPI3_EVENT_DEVICE_INFO_CHANGED:
1575 	{
1576 		process_evt_bh = 1;
1577 		break;
1578 	}
1579 	case MPI3_EVENT_ENERGY_PACK_CHANGE:
1580 	{
1581 		mpi3mr_energypackchg_evt_th(mrioc, event_reply);
1582 		break;
1583 	}
1584 	case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
1585 	case MPI3_EVENT_SAS_DISCOVERY:
1586 	case MPI3_EVENT_CABLE_MGMT:
1587 	case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1588 	case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
1589 	case MPI3_EVENT_PCIE_ENUMERATION:
1590 		break;
1591 	default:
1592 		ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
1593 		    __func__, evt_type);
1594 		break;
1595 	}
1596 	if (process_evt_bh || ack_req) {
1597 		sz = event_reply->event_data_length * 4;
1598 		fwevt = mpi3mr_alloc_fwevt(sz);
1599 		if (!fwevt) {
1600 			ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
1601 			    __func__, __FILE__, __LINE__, __func__);
1602 			return;
1603 		}
1604 
1605 		memcpy(fwevt->event_data, event_reply->event_data, sz);
1606 		fwevt->mrioc = mrioc;
1607 		fwevt->event_id = evt_type;
1608 		fwevt->send_ack = ack_req;
1609 		fwevt->process_evt = process_evt_bh;
1610 		fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
1611 		mpi3mr_fwevt_add_to_list(mrioc, fwevt);
1612 	}
1613 }
1614 
1615 /**
1616  * mpi3mr_process_op_reply_desc - reply descriptor handler
1617  * @mrioc: Adapter instance reference
1618  * @reply_desc: Operational reply descriptor
1619  * @reply_dma: place holder for reply DMA address
1620  * @qidx: Operational queue index
1621  *
1622  * Process the operational reply descriptor and identifies the
1623  * descriptor type. Based on the descriptor map the MPI3 request
1624  * status to a SCSI command status and calls scsi_done call
1625  * back.
1626  *
1627  * Return: Nothing
1628  */
1629 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
1630 	struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
1631 {
1632 	u16 reply_desc_type, host_tag = 0;
1633 	u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
1634 	u32 ioc_loginfo = 0;
1635 	struct mpi3_status_reply_descriptor *status_desc = NULL;
1636 	struct mpi3_address_reply_descriptor *addr_desc = NULL;
1637 	struct mpi3_success_reply_descriptor *success_desc = NULL;
1638 	struct mpi3_scsi_io_reply *scsi_reply = NULL;
1639 	struct scsi_cmnd *scmd = NULL;
1640 	struct scmd_priv *priv = NULL;
1641 	u8 *sense_buf = NULL;
1642 	u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
1643 	u32 xfer_count = 0, sense_count = 0, resp_data = 0;
1644 	u16 dev_handle = 0xFFFF;
1645 	struct scsi_sense_hdr sshdr;
1646 
1647 	*reply_dma = 0;
1648 	reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
1649 	    MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
1650 	switch (reply_desc_type) {
1651 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
1652 		status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
1653 		host_tag = le16_to_cpu(status_desc->host_tag);
1654 		ioc_status = le16_to_cpu(status_desc->ioc_status);
1655 		if (ioc_status &
1656 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
1657 			ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
1658 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
1659 		break;
1660 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
1661 		addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
1662 		*reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
1663 		scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
1664 		    *reply_dma);
1665 		if (!scsi_reply) {
1666 			panic("%s: scsi_reply is NULL, this shouldn't happen\n",
1667 			    mrioc->name);
1668 			goto out;
1669 		}
1670 		host_tag = le16_to_cpu(scsi_reply->host_tag);
1671 		ioc_status = le16_to_cpu(scsi_reply->ioc_status);
1672 		scsi_status = scsi_reply->scsi_status;
1673 		scsi_state = scsi_reply->scsi_state;
1674 		dev_handle = le16_to_cpu(scsi_reply->dev_handle);
1675 		sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
1676 		xfer_count = le32_to_cpu(scsi_reply->transfer_count);
1677 		sense_count = le32_to_cpu(scsi_reply->sense_count);
1678 		resp_data = le32_to_cpu(scsi_reply->response_data);
1679 		sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
1680 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
1681 		if (ioc_status &
1682 		    MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
1683 			ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
1684 		ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
1685 		if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
1686 			panic("%s: Ran out of sense buffers\n", mrioc->name);
1687 		break;
1688 	case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
1689 		success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
1690 		host_tag = le16_to_cpu(success_desc->host_tag);
1691 		break;
1692 	default:
1693 		break;
1694 	}
1695 	scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
1696 	if (!scmd) {
1697 		panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
1698 		    mrioc->name, host_tag);
1699 		goto out;
1700 	}
1701 	priv = scsi_cmd_priv(scmd);
1702 	if (success_desc) {
1703 		scmd->result = DID_OK << 16;
1704 		goto out_success;
1705 	}
1706 	if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
1707 	    xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
1708 	    scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
1709 	    scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
1710 		ioc_status = MPI3_IOCSTATUS_SUCCESS;
1711 
1712 	if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
1713 	    sense_buf) {
1714 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
1715 
1716 		memcpy(scmd->sense_buffer, sense_buf, sz);
1717 	}
1718 
1719 	switch (ioc_status) {
1720 	case MPI3_IOCSTATUS_BUSY:
1721 	case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
1722 		scmd->result = SAM_STAT_BUSY;
1723 		break;
1724 	case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1725 		scmd->result = DID_NO_CONNECT << 16;
1726 		break;
1727 	case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
1728 		scmd->result = DID_SOFT_ERROR << 16;
1729 		break;
1730 	case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
1731 	case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
1732 		scmd->result = DID_RESET << 16;
1733 		break;
1734 	case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1735 		if ((xfer_count == 0) || (scmd->underflow > xfer_count))
1736 			scmd->result = DID_SOFT_ERROR << 16;
1737 		else
1738 			scmd->result = (DID_OK << 16) | scsi_status;
1739 		break;
1740 	case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
1741 		scmd->result = (DID_OK << 16) | scsi_status;
1742 		if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
1743 			break;
1744 		if (xfer_count < scmd->underflow) {
1745 			if (scsi_status == SAM_STAT_BUSY)
1746 				scmd->result = SAM_STAT_BUSY;
1747 			else
1748 				scmd->result = DID_SOFT_ERROR << 16;
1749 		} else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
1750 		    (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
1751 			scmd->result = DID_SOFT_ERROR << 16;
1752 		else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
1753 			scmd->result = DID_RESET << 16;
1754 		break;
1755 	case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
1756 		scsi_set_resid(scmd, 0);
1757 		fallthrough;
1758 	case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
1759 	case MPI3_IOCSTATUS_SUCCESS:
1760 		scmd->result = (DID_OK << 16) | scsi_status;
1761 		if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
1762 		    (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
1763 			(sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
1764 			scmd->result = DID_SOFT_ERROR << 16;
1765 		else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
1766 			scmd->result = DID_RESET << 16;
1767 		break;
1768 	case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1769 	case MPI3_IOCSTATUS_INVALID_FUNCTION:
1770 	case MPI3_IOCSTATUS_INVALID_SGL:
1771 	case MPI3_IOCSTATUS_INTERNAL_ERROR:
1772 	case MPI3_IOCSTATUS_INVALID_FIELD:
1773 	case MPI3_IOCSTATUS_INVALID_STATE:
1774 	case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
1775 	case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1776 	case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
1777 	default:
1778 		scmd->result = DID_SOFT_ERROR << 16;
1779 		break;
1780 	}
1781 
1782 	if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
1783 	    (scmd->cmnd[0] != ATA_16)) {
1784 		ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
1785 		    scmd->result);
1786 		scsi_print_command(scmd);
1787 		ioc_info(mrioc,
1788 		    "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
1789 		    __func__, dev_handle, ioc_status, ioc_loginfo,
1790 		    priv->req_q_idx + 1);
1791 		ioc_info(mrioc,
1792 		    " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
1793 		    host_tag, scsi_state, scsi_status, xfer_count, resp_data);
1794 		if (sense_buf) {
1795 			scsi_normalize_sense(sense_buf, sense_count, &sshdr);
1796 			ioc_info(mrioc,
1797 			    "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
1798 			    __func__, sense_count, sshdr.sense_key,
1799 			    sshdr.asc, sshdr.ascq);
1800 		}
1801 	}
1802 out_success:
1803 	mpi3mr_clear_scmd_priv(mrioc, scmd);
1804 	scsi_dma_unmap(scmd);
1805 	scmd->scsi_done(scmd);
1806 out:
1807 	if (sense_buf)
1808 		mpi3mr_repost_sense_buf(mrioc,
1809 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
1810 }
1811 
1812 /**
1813  * mpi3mr_get_chain_idx - get free chain buffer index
1814  * @mrioc: Adapter instance reference
1815  *
1816  * Try to get a free chain buffer index from the free pool.
1817  *
1818  * Return: -1 on failure or the free chain buffer index
1819  */
1820 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
1821 {
1822 	u8 retry_count = 5;
1823 	int cmd_idx = -1;
1824 
1825 	do {
1826 		spin_lock(&mrioc->chain_buf_lock);
1827 		cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
1828 		    mrioc->chain_buf_count);
1829 		if (cmd_idx < mrioc->chain_buf_count) {
1830 			set_bit(cmd_idx, mrioc->chain_bitmap);
1831 			spin_unlock(&mrioc->chain_buf_lock);
1832 			break;
1833 		}
1834 		spin_unlock(&mrioc->chain_buf_lock);
1835 		cmd_idx = -1;
1836 	} while (retry_count--);
1837 	return cmd_idx;
1838 }
1839 
1840 /**
1841  * mpi3mr_prepare_sg_scmd - build scatter gather list
1842  * @mrioc: Adapter instance reference
1843  * @scmd: SCSI command reference
1844  * @scsiio_req: MPI3 SCSI IO request
1845  *
1846  * This function maps SCSI command's data and protection SGEs to
1847  * MPI request SGEs. If required additional 4K chain buffer is
1848  * used to send the SGEs.
1849  *
1850  * Return: 0 on success, -ENOMEM on dma_map_sg failure
1851  */
1852 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
1853 	struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
1854 {
1855 	dma_addr_t chain_dma;
1856 	struct scatterlist *sg_scmd;
1857 	void *sg_local, *chain;
1858 	u32 chain_length;
1859 	int sges_left, chain_idx;
1860 	u32 sges_in_segment;
1861 	u8 simple_sgl_flags;
1862 	u8 simple_sgl_flags_last;
1863 	u8 last_chain_sgl_flags;
1864 	struct chain_element *chain_req;
1865 	struct scmd_priv *priv = NULL;
1866 
1867 	priv = scsi_cmd_priv(scmd);
1868 
1869 	simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
1870 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
1871 	simple_sgl_flags_last = simple_sgl_flags |
1872 	    MPI3_SGE_FLAGS_END_OF_LIST;
1873 	last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
1874 	    MPI3_SGE_FLAGS_DLAS_SYSTEM;
1875 
1876 	sg_local = &scsiio_req->sgl;
1877 
1878 	if (!scsiio_req->data_length) {
1879 		mpi3mr_build_zero_len_sge(sg_local);
1880 		return 0;
1881 	}
1882 
1883 	sg_scmd = scsi_sglist(scmd);
1884 	sges_left = scsi_dma_map(scmd);
1885 
1886 	if (sges_left < 0) {
1887 		sdev_printk(KERN_ERR, scmd->device,
1888 		    "scsi_dma_map failed: request for %d bytes!\n",
1889 		    scsi_bufflen(scmd));
1890 		return -ENOMEM;
1891 	}
1892 	if (sges_left > MPI3MR_SG_DEPTH) {
1893 		sdev_printk(KERN_ERR, scmd->device,
1894 		    "scsi_dma_map returned unsupported sge count %d!\n",
1895 		    sges_left);
1896 		return -ENOMEM;
1897 	}
1898 
1899 	sges_in_segment = (mrioc->facts.op_req_sz -
1900 	    offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
1901 
1902 	if (sges_left <= sges_in_segment)
1903 		goto fill_in_last_segment;
1904 
1905 	/* fill in main message segment when there is a chain following */
1906 	while (sges_in_segment > 1) {
1907 		mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
1908 		    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1909 		sg_scmd = sg_next(sg_scmd);
1910 		sg_local += sizeof(struct mpi3_sge_common);
1911 		sges_left--;
1912 		sges_in_segment--;
1913 	}
1914 
1915 	chain_idx = mpi3mr_get_chain_idx(mrioc);
1916 	if (chain_idx < 0)
1917 		return -1;
1918 	chain_req = &mrioc->chain_sgl_list[chain_idx];
1919 	priv->chain_idx = chain_idx;
1920 
1921 	chain = chain_req->addr;
1922 	chain_dma = chain_req->dma_addr;
1923 	sges_in_segment = sges_left;
1924 	chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
1925 
1926 	mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
1927 	    chain_length, chain_dma);
1928 
1929 	sg_local = chain;
1930 
1931 fill_in_last_segment:
1932 	while (sges_left > 0) {
1933 		if (sges_left == 1)
1934 			mpi3mr_add_sg_single(sg_local,
1935 			    simple_sgl_flags_last, sg_dma_len(sg_scmd),
1936 			    sg_dma_address(sg_scmd));
1937 		else
1938 			mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
1939 			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1940 		sg_scmd = sg_next(sg_scmd);
1941 		sg_local += sizeof(struct mpi3_sge_common);
1942 		sges_left--;
1943 	}
1944 
1945 	return 0;
1946 }
1947 
1948 /**
1949  * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
1950  * @mrioc: Adapter instance reference
1951  * @scmd: SCSI command reference
1952  * @scsiio_req: MPI3 SCSI IO request
1953  *
1954  * This function calls mpi3mr_prepare_sg_scmd for constructing
1955  * both data SGEs and protection information SGEs in the MPI
1956  * format from the SCSI Command as appropriate .
1957  *
1958  * Return: return value of mpi3mr_prepare_sg_scmd.
1959  */
1960 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
1961 	struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
1962 {
1963 	int ret;
1964 
1965 	ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
1966 	if (ret)
1967 		return ret;
1968 
1969 	return ret;
1970 }
1971 
1972 /**
1973  * mpi3mr_map_queues - Map queues callback handler
1974  * @shost: SCSI host reference
1975  *
1976  * Call the blk_mq_pci_map_queues with from which operational
1977  * queue the mapping has to be done
1978  *
1979  * Return: return of blk_mq_pci_map_queues
1980  */
1981 static int mpi3mr_map_queues(struct Scsi_Host *shost)
1982 {
1983 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
1984 
1985 	return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
1986 	    mrioc->pdev, mrioc->op_reply_q_offset);
1987 }
1988 
1989 /**
1990  * mpi3mr_scan_start - Scan start callback handler
1991  * @shost: SCSI host reference
1992  *
1993  * Issue port enable request asynchronously.
1994  *
1995  * Return: Nothing
1996  */
1997 static void mpi3mr_scan_start(struct Scsi_Host *shost)
1998 {
1999 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
2000 
2001 	mrioc->scan_started = 1;
2002 	ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
2003 	if (mpi3mr_issue_port_enable(mrioc, 1)) {
2004 		ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
2005 		mrioc->scan_started = 0;
2006 		mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
2007 	}
2008 }
2009 
2010 /**
2011  * mpi3mr_scan_finished - Scan finished callback handler
2012  * @shost: SCSI host reference
2013  * @time: Jiffies from the scan start
2014  *
2015  * Checks whether the port enable is completed or timedout or
2016  * failed and set the scan status accordingly after taking any
2017  * recovery if required.
2018  *
2019  * Return: 1 on scan finished or timed out, 0 for in progress
2020  */
2021 static int mpi3mr_scan_finished(struct Scsi_Host *shost,
2022 	unsigned long time)
2023 {
2024 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
2025 	u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
2026 
2027 	if (time >= (pe_timeout * HZ)) {
2028 		mrioc->init_cmds.is_waiting = 0;
2029 		mrioc->init_cmds.callback = NULL;
2030 		mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
2031 		ioc_err(mrioc, "%s :port enable request timed out\n", __func__);
2032 		mrioc->is_driver_loading = 0;
2033 		mpi3mr_soft_reset_handler(mrioc,
2034 		    MPI3MR_RESET_FROM_PE_TIMEOUT, 1);
2035 	}
2036 
2037 	if (mrioc->scan_failed) {
2038 		ioc_err(mrioc,
2039 		    "%s :port enable failed with (ioc_status=0x%08x)\n",
2040 		    __func__, mrioc->scan_failed);
2041 		mrioc->is_driver_loading = 0;
2042 		mrioc->stop_drv_processing = 1;
2043 		return 1;
2044 	}
2045 
2046 	if (mrioc->scan_started)
2047 		return 0;
2048 	ioc_info(mrioc, "%s :port enable: SUCCESS\n", __func__);
2049 	mpi3mr_start_watchdog(mrioc);
2050 	mrioc->is_driver_loading = 0;
2051 
2052 	return 1;
2053 }
2054 
2055 /**
2056  * mpi3mr_slave_destroy - Slave destroy callback handler
2057  * @sdev: SCSI device reference
2058  *
2059  * Cleanup and free per device(lun) private data.
2060  *
2061  * Return: Nothing.
2062  */
2063 static void mpi3mr_slave_destroy(struct scsi_device *sdev)
2064 {
2065 	struct Scsi_Host *shost;
2066 	struct mpi3mr_ioc *mrioc;
2067 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
2068 	struct mpi3mr_tgt_dev *tgt_dev;
2069 	unsigned long flags;
2070 	struct scsi_target *starget;
2071 
2072 	if (!sdev->hostdata)
2073 		return;
2074 
2075 	starget = scsi_target(sdev);
2076 	shost = dev_to_shost(&starget->dev);
2077 	mrioc = shost_priv(shost);
2078 	scsi_tgt_priv_data = starget->hostdata;
2079 
2080 	scsi_tgt_priv_data->num_luns--;
2081 
2082 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
2083 	tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
2084 	if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
2085 		tgt_dev->starget = NULL;
2086 	if (tgt_dev)
2087 		mpi3mr_tgtdev_put(tgt_dev);
2088 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2089 
2090 	kfree(sdev->hostdata);
2091 	sdev->hostdata = NULL;
2092 }
2093 
2094 /**
2095  * mpi3mr_target_destroy - Target destroy callback handler
2096  * @starget: SCSI target reference
2097  *
2098  * Cleanup and free per target private data.
2099  *
2100  * Return: Nothing.
2101  */
2102 static void mpi3mr_target_destroy(struct scsi_target *starget)
2103 {
2104 	struct Scsi_Host *shost;
2105 	struct mpi3mr_ioc *mrioc;
2106 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
2107 	struct mpi3mr_tgt_dev *tgt_dev;
2108 	unsigned long flags;
2109 
2110 	if (!starget->hostdata)
2111 		return;
2112 
2113 	shost = dev_to_shost(&starget->dev);
2114 	mrioc = shost_priv(shost);
2115 	scsi_tgt_priv_data = starget->hostdata;
2116 
2117 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
2118 	tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
2119 	if (tgt_dev && (tgt_dev->starget == starget) &&
2120 	    (tgt_dev->perst_id == starget->id))
2121 		tgt_dev->starget = NULL;
2122 	if (tgt_dev) {
2123 		scsi_tgt_priv_data->tgt_dev = NULL;
2124 		scsi_tgt_priv_data->perst_id = 0;
2125 		mpi3mr_tgtdev_put(tgt_dev);
2126 		mpi3mr_tgtdev_put(tgt_dev);
2127 	}
2128 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2129 
2130 	kfree(starget->hostdata);
2131 	starget->hostdata = NULL;
2132 }
2133 
2134 /**
2135  * mpi3mr_slave_configure - Slave configure callback handler
2136  * @sdev: SCSI device reference
2137  *
2138  * Configure queue depth, max hardware sectors and virt boundary
2139  * as required
2140  *
2141  * Return: 0 always.
2142  */
2143 static int mpi3mr_slave_configure(struct scsi_device *sdev)
2144 {
2145 	struct scsi_target *starget;
2146 	struct Scsi_Host *shost;
2147 	struct mpi3mr_ioc *mrioc;
2148 	struct mpi3mr_tgt_dev *tgt_dev;
2149 	unsigned long flags;
2150 	int retval = 0;
2151 
2152 	starget = scsi_target(sdev);
2153 	shost = dev_to_shost(&starget->dev);
2154 	mrioc = shost_priv(shost);
2155 
2156 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
2157 	tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
2158 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2159 	if (!tgt_dev)
2160 		return -ENXIO;
2161 
2162 	switch (tgt_dev->dev_type) {
2163 	case MPI3_DEVICE_DEVFORM_PCIE:
2164 		/*The block layer hw sector size = 512*/
2165 		blk_queue_max_hw_sectors(sdev->request_queue,
2166 		    tgt_dev->dev_spec.pcie_inf.mdts / 512);
2167 		blk_queue_virt_boundary(sdev->request_queue,
2168 		    ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
2169 		break;
2170 	default:
2171 		break;
2172 	}
2173 
2174 	mpi3mr_tgtdev_put(tgt_dev);
2175 
2176 	return retval;
2177 }
2178 
2179 /**
2180  * mpi3mr_slave_alloc -Slave alloc callback handler
2181  * @sdev: SCSI device reference
2182  *
2183  * Allocate per device(lun) private data and initialize it.
2184  *
2185  * Return: 0 on success -ENOMEM on memory allocation failure.
2186  */
2187 static int mpi3mr_slave_alloc(struct scsi_device *sdev)
2188 {
2189 	struct Scsi_Host *shost;
2190 	struct mpi3mr_ioc *mrioc;
2191 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
2192 	struct mpi3mr_tgt_dev *tgt_dev;
2193 	struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
2194 	unsigned long flags;
2195 	struct scsi_target *starget;
2196 	int retval = 0;
2197 
2198 	starget = scsi_target(sdev);
2199 	shost = dev_to_shost(&starget->dev);
2200 	mrioc = shost_priv(shost);
2201 	scsi_tgt_priv_data = starget->hostdata;
2202 
2203 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
2204 	tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
2205 
2206 	if (tgt_dev) {
2207 		if (tgt_dev->starget == NULL)
2208 			tgt_dev->starget = starget;
2209 		mpi3mr_tgtdev_put(tgt_dev);
2210 		retval = 0;
2211 	} else {
2212 		spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2213 		return -ENXIO;
2214 	}
2215 
2216 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2217 
2218 	scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
2219 	if (!scsi_dev_priv_data)
2220 		return -ENOMEM;
2221 
2222 	scsi_dev_priv_data->lun_id = sdev->lun;
2223 	scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
2224 	sdev->hostdata = scsi_dev_priv_data;
2225 
2226 	scsi_tgt_priv_data->num_luns++;
2227 
2228 	return retval;
2229 }
2230 
2231 /**
2232  * mpi3mr_target_alloc - Target alloc callback handler
2233  * @starget: SCSI target reference
2234  *
2235  * Allocate per target private data and initialize it.
2236  *
2237  * Return: 0 on success -ENOMEM on memory allocation failure.
2238  */
2239 static int mpi3mr_target_alloc(struct scsi_target *starget)
2240 {
2241 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
2242 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
2243 	struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
2244 	struct mpi3mr_tgt_dev *tgt_dev;
2245 	unsigned long flags;
2246 	int retval = 0;
2247 
2248 	scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
2249 	if (!scsi_tgt_priv_data)
2250 		return -ENOMEM;
2251 
2252 	starget->hostdata = scsi_tgt_priv_data;
2253 	scsi_tgt_priv_data->starget = starget;
2254 	scsi_tgt_priv_data->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2255 
2256 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
2257 	tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
2258 	if (tgt_dev && !tgt_dev->is_hidden) {
2259 		starget->hostdata = scsi_tgt_priv_data;
2260 		scsi_tgt_priv_data->starget = starget;
2261 		scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
2262 		scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
2263 		scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
2264 		scsi_tgt_priv_data->tgt_dev = tgt_dev;
2265 		tgt_dev->starget = starget;
2266 		atomic_set(&scsi_tgt_priv_data->block_io, 0);
2267 		retval = 0;
2268 	} else {
2269 		kfree(scsi_tgt_priv_data);
2270 		retval = -ENXIO;
2271 	}
2272 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
2273 
2274 	return retval;
2275 }
2276 
2277 /**
2278  * mpi3mr_qcmd - I/O request despatcher
2279  * @shost: SCSI Host reference
2280  * @scmd: SCSI Command reference
2281  *
2282  * Issues the SCSI Command as an MPI3 request.
2283  *
2284  * Return: 0 on successful queueing of the request or if the
2285  *         request is completed with failure.
2286  *         SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
2287  *         SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
2288  */
2289 static int mpi3mr_qcmd(struct Scsi_Host *shost,
2290 	struct scsi_cmnd *scmd)
2291 {
2292 	struct mpi3mr_ioc *mrioc = shost_priv(shost);
2293 	struct mpi3mr_stgt_priv_data *stgt_priv_data;
2294 	struct mpi3mr_sdev_priv_data *sdev_priv_data;
2295 	struct scmd_priv *scmd_priv_data = NULL;
2296 	struct mpi3_scsi_io_request *scsiio_req = NULL;
2297 	struct op_req_qinfo *op_req_q = NULL;
2298 	int retval = 0;
2299 	u16 dev_handle;
2300 	u16 host_tag;
2301 	u32 scsiio_flags = 0;
2302 	struct request *rq = scmd->request;
2303 	int iprio_class;
2304 
2305 	sdev_priv_data = scmd->device->hostdata;
2306 	if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
2307 		scmd->result = DID_NO_CONNECT << 16;
2308 		scmd->scsi_done(scmd);
2309 		goto out;
2310 	}
2311 
2312 	if (mrioc->stop_drv_processing) {
2313 		scmd->result = DID_NO_CONNECT << 16;
2314 		scmd->scsi_done(scmd);
2315 		goto out;
2316 	}
2317 
2318 	if (mrioc->reset_in_progress) {
2319 		retval = SCSI_MLQUEUE_HOST_BUSY;
2320 		goto out;
2321 	}
2322 
2323 	stgt_priv_data = sdev_priv_data->tgt_priv_data;
2324 
2325 	dev_handle = stgt_priv_data->dev_handle;
2326 	if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
2327 		scmd->result = DID_NO_CONNECT << 16;
2328 		scmd->scsi_done(scmd);
2329 		goto out;
2330 	}
2331 	if (stgt_priv_data->dev_removed) {
2332 		scmd->result = DID_NO_CONNECT << 16;
2333 		scmd->scsi_done(scmd);
2334 		goto out;
2335 	}
2336 
2337 	if (atomic_read(&stgt_priv_data->block_io)) {
2338 		if (mrioc->stop_drv_processing) {
2339 			scmd->result = DID_NO_CONNECT << 16;
2340 			scmd->scsi_done(scmd);
2341 			goto out;
2342 		}
2343 		retval = SCSI_MLQUEUE_DEVICE_BUSY;
2344 		goto out;
2345 	}
2346 
2347 	host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
2348 	if (host_tag == MPI3MR_HOSTTAG_INVALID) {
2349 		scmd->result = DID_ERROR << 16;
2350 		scmd->scsi_done(scmd);
2351 		goto out;
2352 	}
2353 
2354 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2355 		scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
2356 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
2357 		scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
2358 	else
2359 		scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
2360 
2361 	scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
2362 
2363 	if (sdev_priv_data->ncq_prio_enable) {
2364 		iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
2365 		if (iprio_class == IOPRIO_CLASS_RT)
2366 			scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
2367 	}
2368 
2369 	if (scmd->cmd_len > 16)
2370 		scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
2371 
2372 	scmd_priv_data = scsi_cmd_priv(scmd);
2373 	memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
2374 	scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
2375 	scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
2376 	scsiio_req->host_tag = cpu_to_le16(host_tag);
2377 
2378 	memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
2379 	scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
2380 	scsiio_req->dev_handle = cpu_to_le16(dev_handle);
2381 	scsiio_req->flags = cpu_to_le32(scsiio_flags);
2382 	int_to_scsilun(sdev_priv_data->lun_id,
2383 	    (struct scsi_lun *)scsiio_req->lun);
2384 
2385 	if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
2386 		mpi3mr_clear_scmd_priv(mrioc, scmd);
2387 		retval = SCSI_MLQUEUE_HOST_BUSY;
2388 		goto out;
2389 	}
2390 	op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
2391 
2392 	if (mpi3mr_op_request_post(mrioc, op_req_q,
2393 	    scmd_priv_data->mpi3mr_scsiio_req)) {
2394 		mpi3mr_clear_scmd_priv(mrioc, scmd);
2395 		retval = SCSI_MLQUEUE_HOST_BUSY;
2396 		goto out;
2397 	}
2398 
2399 out:
2400 	return retval;
2401 }
2402 
2403 static struct scsi_host_template mpi3mr_driver_template = {
2404 	.module				= THIS_MODULE,
2405 	.name				= "MPI3 Storage Controller",
2406 	.proc_name			= MPI3MR_DRIVER_NAME,
2407 	.queuecommand			= mpi3mr_qcmd,
2408 	.target_alloc			= mpi3mr_target_alloc,
2409 	.slave_alloc			= mpi3mr_slave_alloc,
2410 	.slave_configure		= mpi3mr_slave_configure,
2411 	.target_destroy			= mpi3mr_target_destroy,
2412 	.slave_destroy			= mpi3mr_slave_destroy,
2413 	.scan_finished			= mpi3mr_scan_finished,
2414 	.scan_start			= mpi3mr_scan_start,
2415 	.map_queues			= mpi3mr_map_queues,
2416 	.no_write_same			= 1,
2417 	.can_queue			= 1,
2418 	.this_id			= -1,
2419 	.sg_tablesize			= MPI3MR_SG_DEPTH,
2420 	/* max xfer supported is 1M (2K in 512 byte sized sectors)
2421 	 */
2422 	.max_sectors			= 2048,
2423 	.cmd_per_lun			= MPI3MR_MAX_CMDS_LUN,
2424 	.track_queue_depth		= 1,
2425 	.cmd_size			= sizeof(struct scmd_priv),
2426 };
2427 
2428 /**
2429  * mpi3mr_init_drv_cmd - Initialize internal command tracker
2430  * @cmdptr: Internal command tracker
2431  * @host_tag: Host tag used for the specific command
2432  *
2433  * Initialize the internal command tracker structure with
2434  * specified host tag.
2435  *
2436  * Return: Nothing.
2437  */
2438 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
2439 	u16 host_tag)
2440 {
2441 	mutex_init(&cmdptr->mutex);
2442 	cmdptr->reply = NULL;
2443 	cmdptr->state = MPI3MR_CMD_NOTUSED;
2444 	cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
2445 	cmdptr->host_tag = host_tag;
2446 }
2447 
2448 /**
2449  * mpi3mr_probe - PCI probe callback
2450  * @pdev: PCI device instance
2451  * @id: PCI device ID details
2452  *
2453  * controller initialization routine. Checks the security status
2454  * of the controller and if it is invalid or tampered return the
2455  * probe without initializing the controller. Otherwise,
2456  * allocate per adapter instance through shost_priv and
2457  * initialize controller specific data structures, initializae
2458  * the controller hardware, add shost to the SCSI subsystem.
2459  *
2460  * Return: 0 on success, non-zero on failure.
2461  */
2462 
2463 static int
2464 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2465 {
2466 	struct mpi3mr_ioc *mrioc = NULL;
2467 	struct Scsi_Host *shost = NULL;
2468 	int retval = 0, i;
2469 
2470 	shost = scsi_host_alloc(&mpi3mr_driver_template,
2471 	    sizeof(struct mpi3mr_ioc));
2472 	if (!shost) {
2473 		retval = -ENODEV;
2474 		goto shost_failed;
2475 	}
2476 
2477 	mrioc = shost_priv(shost);
2478 	mrioc->id = mrioc_ids++;
2479 	sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
2480 	sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
2481 	INIT_LIST_HEAD(&mrioc->list);
2482 	spin_lock(&mrioc_list_lock);
2483 	list_add_tail(&mrioc->list, &mrioc_list);
2484 	spin_unlock(&mrioc_list_lock);
2485 
2486 	spin_lock_init(&mrioc->admin_req_lock);
2487 	spin_lock_init(&mrioc->reply_free_queue_lock);
2488 	spin_lock_init(&mrioc->sbq_lock);
2489 	spin_lock_init(&mrioc->fwevt_lock);
2490 	spin_lock_init(&mrioc->tgtdev_lock);
2491 	spin_lock_init(&mrioc->watchdog_lock);
2492 	spin_lock_init(&mrioc->chain_buf_lock);
2493 
2494 	INIT_LIST_HEAD(&mrioc->fwevt_list);
2495 	INIT_LIST_HEAD(&mrioc->tgtdev_list);
2496 	INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
2497 
2498 	mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
2499 
2500 	for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
2501 		mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
2502 		    MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
2503 
2504 	if (pdev->revision)
2505 		mrioc->enable_segqueue = true;
2506 
2507 	mrioc->logging_level = logging_level;
2508 	mrioc->shost = shost;
2509 	mrioc->pdev = pdev;
2510 
2511 	/* init shost parameters */
2512 	shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
2513 	shost->max_lun = -1;
2514 	shost->unique_id = mrioc->id;
2515 
2516 	shost->max_channel = 1;
2517 	shost->max_id = 0xFFFFFFFF;
2518 
2519 	snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
2520 	    "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
2521 	mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
2522 	    mrioc->fwevt_worker_name, WQ_MEM_RECLAIM);
2523 	if (!mrioc->fwevt_worker_thread) {
2524 		ioc_err(mrioc, "failure at %s:%d/%s()!\n",
2525 		    __FILE__, __LINE__, __func__);
2526 		retval = -ENODEV;
2527 		goto out_fwevtthread_failed;
2528 	}
2529 
2530 	mrioc->is_driver_loading = 1;
2531 	if (mpi3mr_init_ioc(mrioc)) {
2532 		ioc_err(mrioc, "failure at %s:%d/%s()!\n",
2533 		    __FILE__, __LINE__, __func__);
2534 		retval = -ENODEV;
2535 		goto out_iocinit_failed;
2536 	}
2537 
2538 	shost->nr_hw_queues = mrioc->num_op_reply_q;
2539 	shost->can_queue = mrioc->max_host_ios;
2540 	shost->sg_tablesize = MPI3MR_SG_DEPTH;
2541 	shost->max_id = mrioc->facts.max_perids;
2542 
2543 	retval = scsi_add_host(shost, &pdev->dev);
2544 	if (retval) {
2545 		ioc_err(mrioc, "failure at %s:%d/%s()!\n",
2546 		    __FILE__, __LINE__, __func__);
2547 		goto addhost_failed;
2548 	}
2549 
2550 	scsi_scan_host(shost);
2551 	return retval;
2552 
2553 addhost_failed:
2554 	mpi3mr_cleanup_ioc(mrioc);
2555 out_iocinit_failed:
2556 	destroy_workqueue(mrioc->fwevt_worker_thread);
2557 out_fwevtthread_failed:
2558 	spin_lock(&mrioc_list_lock);
2559 	list_del(&mrioc->list);
2560 	spin_unlock(&mrioc_list_lock);
2561 	scsi_host_put(shost);
2562 shost_failed:
2563 	return retval;
2564 }
2565 
2566 /**
2567  * mpi3mr_remove - PCI remove callback
2568  * @pdev: PCI device instance
2569  *
2570  * Free up all memory and resources associated with the
2571  * controllerand target devices, unregister the shost.
2572  *
2573  * Return: Nothing.
2574  */
2575 static void mpi3mr_remove(struct pci_dev *pdev)
2576 {
2577 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
2578 	struct mpi3mr_ioc *mrioc;
2579 	struct workqueue_struct	*wq;
2580 	unsigned long flags;
2581 	struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
2582 
2583 	mrioc = shost_priv(shost);
2584 	while (mrioc->reset_in_progress || mrioc->is_driver_loading)
2585 		ssleep(1);
2586 
2587 	mrioc->stop_drv_processing = 1;
2588 	mpi3mr_cleanup_fwevt_list(mrioc);
2589 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
2590 	wq = mrioc->fwevt_worker_thread;
2591 	mrioc->fwevt_worker_thread = NULL;
2592 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
2593 	if (wq)
2594 		destroy_workqueue(wq);
2595 	scsi_remove_host(shost);
2596 
2597 	list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
2598 	    list) {
2599 		mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
2600 		mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
2601 		mpi3mr_tgtdev_put(tgtdev);
2602 	}
2603 	mpi3mr_cleanup_ioc(mrioc);
2604 
2605 	spin_lock(&mrioc_list_lock);
2606 	list_del(&mrioc->list);
2607 	spin_unlock(&mrioc_list_lock);
2608 
2609 	scsi_host_put(shost);
2610 }
2611 
2612 /**
2613  * mpi3mr_shutdown - PCI shutdown callback
2614  * @pdev: PCI device instance
2615  *
2616  * Free up all memory and resources associated with the
2617  * controller
2618  *
2619  * Return: Nothing.
2620  */
2621 static void mpi3mr_shutdown(struct pci_dev *pdev)
2622 {
2623 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
2624 	struct mpi3mr_ioc *mrioc;
2625 	struct workqueue_struct	*wq;
2626 	unsigned long flags;
2627 
2628 	if (!shost)
2629 		return;
2630 
2631 	mrioc = shost_priv(shost);
2632 	while (mrioc->reset_in_progress || mrioc->is_driver_loading)
2633 		ssleep(1);
2634 
2635 	mrioc->stop_drv_processing = 1;
2636 	mpi3mr_cleanup_fwevt_list(mrioc);
2637 	spin_lock_irqsave(&mrioc->fwevt_lock, flags);
2638 	wq = mrioc->fwevt_worker_thread;
2639 	mrioc->fwevt_worker_thread = NULL;
2640 	spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
2641 	if (wq)
2642 		destroy_workqueue(wq);
2643 	mpi3mr_cleanup_ioc(mrioc);
2644 }
2645 
2646 static const struct pci_device_id mpi3mr_pci_id_table[] = {
2647 	{
2648 		PCI_DEVICE_SUB(PCI_VENDOR_ID_LSI_LOGIC, 0x00A5,
2649 		    PCI_ANY_ID, PCI_ANY_ID)
2650 	},
2651 	{ 0 }
2652 };
2653 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
2654 
2655 static struct pci_driver mpi3mr_pci_driver = {
2656 	.name = MPI3MR_DRIVER_NAME,
2657 	.id_table = mpi3mr_pci_id_table,
2658 	.probe = mpi3mr_probe,
2659 	.remove = mpi3mr_remove,
2660 	.shutdown = mpi3mr_shutdown,
2661 };
2662 
2663 static int __init mpi3mr_init(void)
2664 {
2665 	int ret_val;
2666 
2667 	pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
2668 	    MPI3MR_DRIVER_VERSION);
2669 
2670 	ret_val = pci_register_driver(&mpi3mr_pci_driver);
2671 
2672 	return ret_val;
2673 }
2674 
2675 static void __exit mpi3mr_exit(void)
2676 {
2677 	if (warn_non_secure_ctlr)
2678 		pr_warn(
2679 		    "Unloading %s version %s while managing a non secure controller\n",
2680 		    MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
2681 	else
2682 		pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
2683 		    MPI3MR_DRIVER_VERSION);
2684 
2685 	pci_unregister_driver(&mpi3mr_pci_driver);
2686 }
2687 
2688 module_init(mpi3mr_init);
2689 module_exit(mpi3mr_exit);
2690