1 /*
2  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3  *
4  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5  * Copyright (C) 2012-2014  LSI Corporation
6  * Copyright (C) 2013-2014 Avago Technologies
7  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/aer.h>
56 #include <linux/raid_class.h>
57 #include <asm/unaligned.h>
58 
59 #include "mpt3sas_base.h"
60 
61 #define RAID_CHANNEL 1
62 
63 #define PCIE_CHANNEL 2
64 
65 /* forward proto's */
66 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
67 	struct _sas_node *sas_expander);
68 static void _firmware_event_work(struct work_struct *work);
69 
70 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
71 	struct _sas_device *sas_device);
72 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
73 	u8 retry_count, u8 is_pd);
74 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
76 	struct _pcie_device *pcie_device);
77 static void
78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
80 
81 /* global parameters */
82 LIST_HEAD(mpt3sas_ioc_list);
83 /* global ioc lock for list operations */
84 DEFINE_SPINLOCK(gioc_lock);
85 
86 MODULE_AUTHOR(MPT3SAS_AUTHOR);
87 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
90 MODULE_ALIAS("mpt2sas");
91 
92 /* local parameters */
93 static u8 scsi_io_cb_idx = -1;
94 static u8 tm_cb_idx = -1;
95 static u8 ctl_cb_idx = -1;
96 static u8 base_cb_idx = -1;
97 static u8 port_enable_cb_idx = -1;
98 static u8 transport_cb_idx = -1;
99 static u8 scsih_cb_idx = -1;
100 static u8 config_cb_idx = -1;
101 static int mpt2_ids;
102 static int mpt3_ids;
103 
104 static u8 tm_tr_cb_idx = -1 ;
105 static u8 tm_tr_volume_cb_idx = -1 ;
106 static u8 tm_sas_control_cb_idx = -1;
107 
108 /* command line options */
109 static u32 logging_level;
110 MODULE_PARM_DESC(logging_level,
111 	" bits for enabling additional logging info (default=0)");
112 
113 
114 static ushort max_sectors = 0xFFFF;
115 module_param(max_sectors, ushort, 0444);
116 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
117 
118 
119 static int missing_delay[2] = {-1, -1};
120 module_param_array(missing_delay, int, NULL, 0444);
121 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
122 
123 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
124 #define MPT3SAS_MAX_LUN (16895)
125 static u64 max_lun = MPT3SAS_MAX_LUN;
126 module_param(max_lun, ullong, 0444);
127 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
128 
129 static ushort hbas_to_enumerate;
130 module_param(hbas_to_enumerate, ushort, 0444);
131 MODULE_PARM_DESC(hbas_to_enumerate,
132 		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
133 		  1 - enumerates only SAS 2.0 generation HBAs\n \
134 		  2 - enumerates only SAS 3.0 generation HBAs (default=0)");
135 
136 /* diag_buffer_enable is bitwise
137  * bit 0 set = TRACE
138  * bit 1 set = SNAPSHOT
139  * bit 2 set = EXTENDED
140  *
141  * Either bit can be set, or both
142  */
143 static int diag_buffer_enable = -1;
144 module_param(diag_buffer_enable, int, 0444);
145 MODULE_PARM_DESC(diag_buffer_enable,
146 	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
147 static int disable_discovery = -1;
148 module_param(disable_discovery, int, 0444);
149 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
150 
151 
152 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
153 static int prot_mask = -1;
154 module_param(prot_mask, int, 0444);
155 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
156 
157 static bool enable_sdev_max_qd;
158 module_param(enable_sdev_max_qd, bool, 0444);
159 MODULE_PARM_DESC(enable_sdev_max_qd,
160 	"Enable sdev max qd as can_queue, def=disabled(0)");
161 
162 /* raid transport support */
163 static struct raid_template *mpt3sas_raid_template;
164 static struct raid_template *mpt2sas_raid_template;
165 
166 
167 /**
168  * struct sense_info - common structure for obtaining sense keys
169  * @skey: sense key
170  * @asc: additional sense code
171  * @ascq: additional sense code qualifier
172  */
173 struct sense_info {
174 	u8 skey;
175 	u8 asc;
176 	u8 ascq;
177 };
178 
179 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
180 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
181 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
182 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
183 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
184 /**
185  * struct fw_event_work - firmware event struct
186  * @list: link list framework
187  * @work: work object (ioc->fault_reset_work_q)
188  * @ioc: per adapter object
189  * @device_handle: device handle
190  * @VF_ID: virtual function id
191  * @VP_ID: virtual port id
192  * @ignore: flag meaning this event has been marked to ignore
193  * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
194  * @refcount: kref for this event
195  * @event_data: reply event data payload follows
196  *
197  * This object stored on ioc->fw_event_list.
198  */
199 struct fw_event_work {
200 	struct list_head	list;
201 	struct work_struct	work;
202 
203 	struct MPT3SAS_ADAPTER *ioc;
204 	u16			device_handle;
205 	u8			VF_ID;
206 	u8			VP_ID;
207 	u8			ignore;
208 	u16			event;
209 	struct kref		refcount;
210 	char			event_data[] __aligned(4);
211 };
212 
213 static void fw_event_work_free(struct kref *r)
214 {
215 	kfree(container_of(r, struct fw_event_work, refcount));
216 }
217 
218 static void fw_event_work_get(struct fw_event_work *fw_work)
219 {
220 	kref_get(&fw_work->refcount);
221 }
222 
223 static void fw_event_work_put(struct fw_event_work *fw_work)
224 {
225 	kref_put(&fw_work->refcount, fw_event_work_free);
226 }
227 
228 static struct fw_event_work *alloc_fw_event_work(int len)
229 {
230 	struct fw_event_work *fw_event;
231 
232 	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
233 	if (!fw_event)
234 		return NULL;
235 
236 	kref_init(&fw_event->refcount);
237 	return fw_event;
238 }
239 
240 /**
241  * struct _scsi_io_transfer - scsi io transfer
242  * @handle: sas device handle (assigned by firmware)
243  * @is_raid: flag set for hidden raid components
244  * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
245  * @data_length: data transfer length
246  * @data_dma: dma pointer to data
247  * @sense: sense data
248  * @lun: lun number
249  * @cdb_length: cdb length
250  * @cdb: cdb contents
251  * @timeout: timeout for this command
252  * @VF_ID: virtual function id
253  * @VP_ID: virtual port id
254  * @valid_reply: flag set for reply message
255  * @sense_length: sense length
256  * @ioc_status: ioc status
257  * @scsi_state: scsi state
258  * @scsi_status: scsi staus
259  * @log_info: log information
260  * @transfer_length: data length transfer when there is a reply message
261  *
262  * Used for sending internal scsi commands to devices within this module.
263  * Refer to _scsi_send_scsi_io().
264  */
265 struct _scsi_io_transfer {
266 	u16	handle;
267 	u8	is_raid;
268 	enum dma_data_direction dir;
269 	u32	data_length;
270 	dma_addr_t data_dma;
271 	u8	sense[SCSI_SENSE_BUFFERSIZE];
272 	u32	lun;
273 	u8	cdb_length;
274 	u8	cdb[32];
275 	u8	timeout;
276 	u8	VF_ID;
277 	u8	VP_ID;
278 	u8	valid_reply;
279   /* the following bits are only valid when 'valid_reply = 1' */
280 	u32	sense_length;
281 	u16	ioc_status;
282 	u8	scsi_state;
283 	u8	scsi_status;
284 	u32	log_info;
285 	u32	transfer_length;
286 };
287 
288 /**
289  * _scsih_set_debug_level - global setting of ioc->logging_level.
290  * @val: ?
291  * @kp: ?
292  *
293  * Note: The logging levels are defined in mpt3sas_debug.h.
294  */
295 static int
296 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
297 {
298 	int ret = param_set_int(val, kp);
299 	struct MPT3SAS_ADAPTER *ioc;
300 
301 	if (ret)
302 		return ret;
303 
304 	pr_info("setting logging_level(0x%08x)\n", logging_level);
305 	spin_lock(&gioc_lock);
306 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
307 		ioc->logging_level = logging_level;
308 	spin_unlock(&gioc_lock);
309 	return 0;
310 }
311 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
312 	&logging_level, 0644);
313 
314 /**
315  * _scsih_srch_boot_sas_address - search based on sas_address
316  * @sas_address: sas address
317  * @boot_device: boot device object from bios page 2
318  *
319  * Return: 1 when there's a match, 0 means no match.
320  */
321 static inline int
322 _scsih_srch_boot_sas_address(u64 sas_address,
323 	Mpi2BootDeviceSasWwid_t *boot_device)
324 {
325 	return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
326 }
327 
328 /**
329  * _scsih_srch_boot_device_name - search based on device name
330  * @device_name: device name specified in INDENTIFY fram
331  * @boot_device: boot device object from bios page 2
332  *
333  * Return: 1 when there's a match, 0 means no match.
334  */
335 static inline int
336 _scsih_srch_boot_device_name(u64 device_name,
337 	Mpi2BootDeviceDeviceName_t *boot_device)
338 {
339 	return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
340 }
341 
342 /**
343  * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
344  * @enclosure_logical_id: enclosure logical id
345  * @slot_number: slot number
346  * @boot_device: boot device object from bios page 2
347  *
348  * Return: 1 when there's a match, 0 means no match.
349  */
350 static inline int
351 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
352 	Mpi2BootDeviceEnclosureSlot_t *boot_device)
353 {
354 	return (enclosure_logical_id == le64_to_cpu(boot_device->
355 	    EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
356 	    SlotNumber)) ? 1 : 0;
357 }
358 
359 /**
360  * _scsih_is_boot_device - search for matching boot device.
361  * @sas_address: sas address
362  * @device_name: device name specified in INDENTIFY fram
363  * @enclosure_logical_id: enclosure logical id
364  * @slot: slot number
365  * @form: specifies boot device form
366  * @boot_device: boot device object from bios page 2
367  *
368  * Return: 1 when there's a match, 0 means no match.
369  */
370 static int
371 _scsih_is_boot_device(u64 sas_address, u64 device_name,
372 	u64 enclosure_logical_id, u16 slot, u8 form,
373 	Mpi2BiosPage2BootDevice_t *boot_device)
374 {
375 	int rc = 0;
376 
377 	switch (form) {
378 	case MPI2_BIOSPAGE2_FORM_SAS_WWID:
379 		if (!sas_address)
380 			break;
381 		rc = _scsih_srch_boot_sas_address(
382 		    sas_address, &boot_device->SasWwid);
383 		break;
384 	case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
385 		if (!enclosure_logical_id)
386 			break;
387 		rc = _scsih_srch_boot_encl_slot(
388 		    enclosure_logical_id,
389 		    slot, &boot_device->EnclosureSlot);
390 		break;
391 	case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
392 		if (!device_name)
393 			break;
394 		rc = _scsih_srch_boot_device_name(
395 		    device_name, &boot_device->DeviceName);
396 		break;
397 	case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
398 		break;
399 	}
400 
401 	return rc;
402 }
403 
404 /**
405  * _scsih_get_sas_address - set the sas_address for given device handle
406  * @ioc: ?
407  * @handle: device handle
408  * @sas_address: sas address
409  *
410  * Return: 0 success, non-zero when failure
411  */
412 static int
413 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
414 	u64 *sas_address)
415 {
416 	Mpi2SasDevicePage0_t sas_device_pg0;
417 	Mpi2ConfigReply_t mpi_reply;
418 	u32 ioc_status;
419 
420 	*sas_address = 0;
421 
422 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
423 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
424 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
425 			__FILE__, __LINE__, __func__);
426 		return -ENXIO;
427 	}
428 
429 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
430 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
431 		/* For HBA, vSES doesn't return HBA SAS address. Instead return
432 		 * vSES's sas address.
433 		 */
434 		if ((handle <= ioc->sas_hba.num_phys) &&
435 		   (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
436 		   MPI2_SAS_DEVICE_INFO_SEP)))
437 			*sas_address = ioc->sas_hba.sas_address;
438 		else
439 			*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
440 		return 0;
441 	}
442 
443 	/* we hit this because the given parent handle doesn't exist */
444 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
445 		return -ENXIO;
446 
447 	/* else error case */
448 	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
449 		handle, ioc_status, __FILE__, __LINE__, __func__);
450 	return -EIO;
451 }
452 
453 /**
454  * _scsih_determine_boot_device - determine boot device.
455  * @ioc: per adapter object
456  * @device: sas_device or pcie_device object
457  * @channel: SAS or PCIe channel
458  *
459  * Determines whether this device should be first reported device to
460  * to scsi-ml or sas transport, this purpose is for persistent boot device.
461  * There are primary, alternate, and current entries in bios page 2. The order
462  * priority is primary, alternate, then current.  This routine saves
463  * the corresponding device object.
464  * The saved data to be used later in _scsih_probe_boot_devices().
465  */
466 static void
467 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
468 	u32 channel)
469 {
470 	struct _sas_device *sas_device;
471 	struct _pcie_device *pcie_device;
472 	struct _raid_device *raid_device;
473 	u64 sas_address;
474 	u64 device_name;
475 	u64 enclosure_logical_id;
476 	u16 slot;
477 
478 	 /* only process this function when driver loads */
479 	if (!ioc->is_driver_loading)
480 		return;
481 
482 	 /* no Bios, return immediately */
483 	if (!ioc->bios_pg3.BiosVersion)
484 		return;
485 
486 	if (channel == RAID_CHANNEL) {
487 		raid_device = device;
488 		sas_address = raid_device->wwid;
489 		device_name = 0;
490 		enclosure_logical_id = 0;
491 		slot = 0;
492 	} else if (channel == PCIE_CHANNEL) {
493 		pcie_device = device;
494 		sas_address = pcie_device->wwid;
495 		device_name = 0;
496 		enclosure_logical_id = 0;
497 		slot = 0;
498 	} else {
499 		sas_device = device;
500 		sas_address = sas_device->sas_address;
501 		device_name = sas_device->device_name;
502 		enclosure_logical_id = sas_device->enclosure_logical_id;
503 		slot = sas_device->slot;
504 	}
505 
506 	if (!ioc->req_boot_device.device) {
507 		if (_scsih_is_boot_device(sas_address, device_name,
508 		    enclosure_logical_id, slot,
509 		    (ioc->bios_pg2.ReqBootDeviceForm &
510 		    MPI2_BIOSPAGE2_FORM_MASK),
511 		    &ioc->bios_pg2.RequestedBootDevice)) {
512 			dinitprintk(ioc,
513 				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
514 					     __func__, (u64)sas_address));
515 			ioc->req_boot_device.device = device;
516 			ioc->req_boot_device.channel = channel;
517 		}
518 	}
519 
520 	if (!ioc->req_alt_boot_device.device) {
521 		if (_scsih_is_boot_device(sas_address, device_name,
522 		    enclosure_logical_id, slot,
523 		    (ioc->bios_pg2.ReqAltBootDeviceForm &
524 		    MPI2_BIOSPAGE2_FORM_MASK),
525 		    &ioc->bios_pg2.RequestedAltBootDevice)) {
526 			dinitprintk(ioc,
527 				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
528 					     __func__, (u64)sas_address));
529 			ioc->req_alt_boot_device.device = device;
530 			ioc->req_alt_boot_device.channel = channel;
531 		}
532 	}
533 
534 	if (!ioc->current_boot_device.device) {
535 		if (_scsih_is_boot_device(sas_address, device_name,
536 		    enclosure_logical_id, slot,
537 		    (ioc->bios_pg2.CurrentBootDeviceForm &
538 		    MPI2_BIOSPAGE2_FORM_MASK),
539 		    &ioc->bios_pg2.CurrentBootDevice)) {
540 			dinitprintk(ioc,
541 				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
542 					     __func__, (u64)sas_address));
543 			ioc->current_boot_device.device = device;
544 			ioc->current_boot_device.channel = channel;
545 		}
546 	}
547 }
548 
549 static struct _sas_device *
550 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
551 		struct MPT3SAS_TARGET *tgt_priv)
552 {
553 	struct _sas_device *ret;
554 
555 	assert_spin_locked(&ioc->sas_device_lock);
556 
557 	ret = tgt_priv->sas_dev;
558 	if (ret)
559 		sas_device_get(ret);
560 
561 	return ret;
562 }
563 
564 static struct _sas_device *
565 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
566 		struct MPT3SAS_TARGET *tgt_priv)
567 {
568 	struct _sas_device *ret;
569 	unsigned long flags;
570 
571 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
572 	ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
573 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
574 
575 	return ret;
576 }
577 
578 static struct _pcie_device *
579 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
580 	struct MPT3SAS_TARGET *tgt_priv)
581 {
582 	struct _pcie_device *ret;
583 
584 	assert_spin_locked(&ioc->pcie_device_lock);
585 
586 	ret = tgt_priv->pcie_dev;
587 	if (ret)
588 		pcie_device_get(ret);
589 
590 	return ret;
591 }
592 
593 /**
594  * mpt3sas_get_pdev_from_target - pcie device search
595  * @ioc: per adapter object
596  * @tgt_priv: starget private object
597  *
598  * Context: This function will acquire ioc->pcie_device_lock and will release
599  * before returning the pcie_device object.
600  *
601  * This searches for pcie_device from target, then return pcie_device object.
602  */
603 static struct _pcie_device *
604 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
605 	struct MPT3SAS_TARGET *tgt_priv)
606 {
607 	struct _pcie_device *ret;
608 	unsigned long flags;
609 
610 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
611 	ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
612 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
613 
614 	return ret;
615 }
616 
617 struct _sas_device *
618 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
619 					u64 sas_address)
620 {
621 	struct _sas_device *sas_device;
622 
623 	assert_spin_locked(&ioc->sas_device_lock);
624 
625 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
626 		if (sas_device->sas_address == sas_address)
627 			goto found_device;
628 
629 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
630 		if (sas_device->sas_address == sas_address)
631 			goto found_device;
632 
633 	return NULL;
634 
635 found_device:
636 	sas_device_get(sas_device);
637 	return sas_device;
638 }
639 
640 /**
641  * mpt3sas_get_sdev_by_addr - sas device search
642  * @ioc: per adapter object
643  * @sas_address: sas address
644  * Context: Calling function should acquire ioc->sas_device_lock
645  *
646  * This searches for sas_device based on sas_address, then return sas_device
647  * object.
648  */
649 struct _sas_device *
650 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
651 	u64 sas_address)
652 {
653 	struct _sas_device *sas_device;
654 	unsigned long flags;
655 
656 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
657 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
658 			sas_address);
659 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
660 
661 	return sas_device;
662 }
663 
664 static struct _sas_device *
665 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
666 {
667 	struct _sas_device *sas_device;
668 
669 	assert_spin_locked(&ioc->sas_device_lock);
670 
671 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
672 		if (sas_device->handle == handle)
673 			goto found_device;
674 
675 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
676 		if (sas_device->handle == handle)
677 			goto found_device;
678 
679 	return NULL;
680 
681 found_device:
682 	sas_device_get(sas_device);
683 	return sas_device;
684 }
685 
686 /**
687  * mpt3sas_get_sdev_by_handle - sas device search
688  * @ioc: per adapter object
689  * @handle: sas device handle (assigned by firmware)
690  * Context: Calling function should acquire ioc->sas_device_lock
691  *
692  * This searches for sas_device based on sas_address, then return sas_device
693  * object.
694  */
695 struct _sas_device *
696 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
697 {
698 	struct _sas_device *sas_device;
699 	unsigned long flags;
700 
701 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
702 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
703 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
704 
705 	return sas_device;
706 }
707 
708 /**
709  * _scsih_display_enclosure_chassis_info - display device location info
710  * @ioc: per adapter object
711  * @sas_device: per sas device object
712  * @sdev: scsi device struct
713  * @starget: scsi target struct
714  */
715 static void
716 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
717 	struct _sas_device *sas_device, struct scsi_device *sdev,
718 	struct scsi_target *starget)
719 {
720 	if (sdev) {
721 		if (sas_device->enclosure_handle != 0)
722 			sdev_printk(KERN_INFO, sdev,
723 			    "enclosure logical id (0x%016llx), slot(%d) \n",
724 			    (unsigned long long)
725 			    sas_device->enclosure_logical_id,
726 			    sas_device->slot);
727 		if (sas_device->connector_name[0] != '\0')
728 			sdev_printk(KERN_INFO, sdev,
729 			    "enclosure level(0x%04x), connector name( %s)\n",
730 			    sas_device->enclosure_level,
731 			    sas_device->connector_name);
732 		if (sas_device->is_chassis_slot_valid)
733 			sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
734 			    sas_device->chassis_slot);
735 	} else if (starget) {
736 		if (sas_device->enclosure_handle != 0)
737 			starget_printk(KERN_INFO, starget,
738 			    "enclosure logical id(0x%016llx), slot(%d) \n",
739 			    (unsigned long long)
740 			    sas_device->enclosure_logical_id,
741 			    sas_device->slot);
742 		if (sas_device->connector_name[0] != '\0')
743 			starget_printk(KERN_INFO, starget,
744 			    "enclosure level(0x%04x), connector name( %s)\n",
745 			    sas_device->enclosure_level,
746 			    sas_device->connector_name);
747 		if (sas_device->is_chassis_slot_valid)
748 			starget_printk(KERN_INFO, starget,
749 			    "chassis slot(0x%04x)\n",
750 			    sas_device->chassis_slot);
751 	} else {
752 		if (sas_device->enclosure_handle != 0)
753 			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
754 				 (u64)sas_device->enclosure_logical_id,
755 				 sas_device->slot);
756 		if (sas_device->connector_name[0] != '\0')
757 			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
758 				 sas_device->enclosure_level,
759 				 sas_device->connector_name);
760 		if (sas_device->is_chassis_slot_valid)
761 			ioc_info(ioc, "chassis slot(0x%04x)\n",
762 				 sas_device->chassis_slot);
763 	}
764 }
765 
766 /**
767  * _scsih_sas_device_remove - remove sas_device from list.
768  * @ioc: per adapter object
769  * @sas_device: the sas_device object
770  * Context: This function will acquire ioc->sas_device_lock.
771  *
772  * If sas_device is on the list, remove it and decrement its reference count.
773  */
774 static void
775 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
776 	struct _sas_device *sas_device)
777 {
778 	unsigned long flags;
779 
780 	if (!sas_device)
781 		return;
782 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
783 		 sas_device->handle, (u64)sas_device->sas_address);
784 
785 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
786 
787 	/*
788 	 * The lock serializes access to the list, but we still need to verify
789 	 * that nobody removed the entry while we were waiting on the lock.
790 	 */
791 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
792 	if (!list_empty(&sas_device->list)) {
793 		list_del_init(&sas_device->list);
794 		sas_device_put(sas_device);
795 	}
796 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
797 }
798 
799 /**
800  * _scsih_device_remove_by_handle - removing device object by handle
801  * @ioc: per adapter object
802  * @handle: device handle
803  */
804 static void
805 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
806 {
807 	struct _sas_device *sas_device;
808 	unsigned long flags;
809 
810 	if (ioc->shost_recovery)
811 		return;
812 
813 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
814 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
815 	if (sas_device) {
816 		list_del_init(&sas_device->list);
817 		sas_device_put(sas_device);
818 	}
819 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
820 	if (sas_device) {
821 		_scsih_remove_device(ioc, sas_device);
822 		sas_device_put(sas_device);
823 	}
824 }
825 
826 /**
827  * mpt3sas_device_remove_by_sas_address - removing device object by sas address
828  * @ioc: per adapter object
829  * @sas_address: device sas_address
830  */
831 void
832 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
833 	u64 sas_address)
834 {
835 	struct _sas_device *sas_device;
836 	unsigned long flags;
837 
838 	if (ioc->shost_recovery)
839 		return;
840 
841 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
842 	sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
843 	if (sas_device) {
844 		list_del_init(&sas_device->list);
845 		sas_device_put(sas_device);
846 	}
847 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
848 	if (sas_device) {
849 		_scsih_remove_device(ioc, sas_device);
850 		sas_device_put(sas_device);
851 	}
852 }
853 
854 /**
855  * _scsih_sas_device_add - insert sas_device to the list.
856  * @ioc: per adapter object
857  * @sas_device: the sas_device object
858  * Context: This function will acquire ioc->sas_device_lock.
859  *
860  * Adding new object to the ioc->sas_device_list.
861  */
862 static void
863 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
864 	struct _sas_device *sas_device)
865 {
866 	unsigned long flags;
867 
868 	dewtprintk(ioc,
869 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
870 			    __func__, sas_device->handle,
871 			    (u64)sas_device->sas_address));
872 
873 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
874 	    NULL, NULL));
875 
876 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
877 	sas_device_get(sas_device);
878 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
879 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
880 
881 	if (ioc->hide_drives) {
882 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
883 		return;
884 	}
885 
886 	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
887 	     sas_device->sas_address_parent)) {
888 		_scsih_sas_device_remove(ioc, sas_device);
889 	} else if (!sas_device->starget) {
890 		/*
891 		 * When asyn scanning is enabled, its not possible to remove
892 		 * devices while scanning is turned on due to an oops in
893 		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
894 		 */
895 		if (!ioc->is_driver_loading) {
896 			mpt3sas_transport_port_remove(ioc,
897 			    sas_device->sas_address,
898 			    sas_device->sas_address_parent);
899 			_scsih_sas_device_remove(ioc, sas_device);
900 		}
901 	} else
902 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
903 }
904 
905 /**
906  * _scsih_sas_device_init_add - insert sas_device to the list.
907  * @ioc: per adapter object
908  * @sas_device: the sas_device object
909  * Context: This function will acquire ioc->sas_device_lock.
910  *
911  * Adding new object at driver load time to the ioc->sas_device_init_list.
912  */
913 static void
914 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
915 	struct _sas_device *sas_device)
916 {
917 	unsigned long flags;
918 
919 	dewtprintk(ioc,
920 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
921 			    __func__, sas_device->handle,
922 			    (u64)sas_device->sas_address));
923 
924 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
925 	    NULL, NULL));
926 
927 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
928 	sas_device_get(sas_device);
929 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
930 	_scsih_determine_boot_device(ioc, sas_device, 0);
931 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
932 }
933 
934 
935 static struct _pcie_device *
936 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
937 {
938 	struct _pcie_device *pcie_device;
939 
940 	assert_spin_locked(&ioc->pcie_device_lock);
941 
942 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
943 		if (pcie_device->wwid == wwid)
944 			goto found_device;
945 
946 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
947 		if (pcie_device->wwid == wwid)
948 			goto found_device;
949 
950 	return NULL;
951 
952 found_device:
953 	pcie_device_get(pcie_device);
954 	return pcie_device;
955 }
956 
957 
958 /**
959  * mpt3sas_get_pdev_by_wwid - pcie device search
960  * @ioc: per adapter object
961  * @wwid: wwid
962  *
963  * Context: This function will acquire ioc->pcie_device_lock and will release
964  * before returning the pcie_device object.
965  *
966  * This searches for pcie_device based on wwid, then return pcie_device object.
967  */
968 static struct _pcie_device *
969 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
970 {
971 	struct _pcie_device *pcie_device;
972 	unsigned long flags;
973 
974 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
975 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
976 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
977 
978 	return pcie_device;
979 }
980 
981 
982 static struct _pcie_device *
983 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
984 	int channel)
985 {
986 	struct _pcie_device *pcie_device;
987 
988 	assert_spin_locked(&ioc->pcie_device_lock);
989 
990 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
991 		if (pcie_device->id == id && pcie_device->channel == channel)
992 			goto found_device;
993 
994 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
995 		if (pcie_device->id == id && pcie_device->channel == channel)
996 			goto found_device;
997 
998 	return NULL;
999 
1000 found_device:
1001 	pcie_device_get(pcie_device);
1002 	return pcie_device;
1003 }
1004 
1005 static struct _pcie_device *
1006 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1007 {
1008 	struct _pcie_device *pcie_device;
1009 
1010 	assert_spin_locked(&ioc->pcie_device_lock);
1011 
1012 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1013 		if (pcie_device->handle == handle)
1014 			goto found_device;
1015 
1016 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1017 		if (pcie_device->handle == handle)
1018 			goto found_device;
1019 
1020 	return NULL;
1021 
1022 found_device:
1023 	pcie_device_get(pcie_device);
1024 	return pcie_device;
1025 }
1026 
1027 
1028 /**
1029  * mpt3sas_get_pdev_by_handle - pcie device search
1030  * @ioc: per adapter object
1031  * @handle: Firmware device handle
1032  *
1033  * Context: This function will acquire ioc->pcie_device_lock and will release
1034  * before returning the pcie_device object.
1035  *
1036  * This searches for pcie_device based on handle, then return pcie_device
1037  * object.
1038  */
1039 struct _pcie_device *
1040 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1041 {
1042 	struct _pcie_device *pcie_device;
1043 	unsigned long flags;
1044 
1045 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1046 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1047 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1048 
1049 	return pcie_device;
1050 }
1051 
1052 /**
1053  * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1054  * @ioc: per adapter object
1055  * Context: This function will acquire ioc->pcie_device_lock
1056  *
1057  * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1058  * which has reported maximum among all available NVMe drives.
1059  * Minimum max_shutdown_latency will be six seconds.
1060  */
1061 static void
1062 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1063 {
1064 	struct _pcie_device *pcie_device;
1065 	unsigned long flags;
1066 	u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1067 
1068 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1069 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1070 		if (pcie_device->shutdown_latency) {
1071 			if (shutdown_latency < pcie_device->shutdown_latency)
1072 				shutdown_latency =
1073 					pcie_device->shutdown_latency;
1074 		}
1075 	}
1076 	ioc->max_shutdown_latency = shutdown_latency;
1077 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1078 }
1079 
1080 /**
1081  * _scsih_pcie_device_remove - remove pcie_device from list.
1082  * @ioc: per adapter object
1083  * @pcie_device: the pcie_device object
1084  * Context: This function will acquire ioc->pcie_device_lock.
1085  *
1086  * If pcie_device is on the list, remove it and decrement its reference count.
1087  */
1088 static void
1089 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1090 	struct _pcie_device *pcie_device)
1091 {
1092 	unsigned long flags;
1093 	int was_on_pcie_device_list = 0;
1094 	u8 update_latency = 0;
1095 
1096 	if (!pcie_device)
1097 		return;
1098 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1099 		 pcie_device->handle, (u64)pcie_device->wwid);
1100 	if (pcie_device->enclosure_handle != 0)
1101 		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1102 			 (u64)pcie_device->enclosure_logical_id,
1103 			 pcie_device->slot);
1104 	if (pcie_device->connector_name[0] != '\0')
1105 		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1106 			 pcie_device->enclosure_level,
1107 			 pcie_device->connector_name);
1108 
1109 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1110 	if (!list_empty(&pcie_device->list)) {
1111 		list_del_init(&pcie_device->list);
1112 		was_on_pcie_device_list = 1;
1113 	}
1114 	if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1115 		update_latency = 1;
1116 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1117 	if (was_on_pcie_device_list) {
1118 		kfree(pcie_device->serial_number);
1119 		pcie_device_put(pcie_device);
1120 	}
1121 
1122 	/*
1123 	 * This device's RTD3 Entry Latency matches IOC's
1124 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1125 	 * from the available drives as current drive is getting removed.
1126 	 */
1127 	if (update_latency)
1128 		_scsih_set_nvme_max_shutdown_latency(ioc);
1129 }
1130 
1131 
1132 /**
1133  * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1134  * @ioc: per adapter object
1135  * @handle: device handle
1136  */
1137 static void
1138 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1139 {
1140 	struct _pcie_device *pcie_device;
1141 	unsigned long flags;
1142 	int was_on_pcie_device_list = 0;
1143 	u8 update_latency = 0;
1144 
1145 	if (ioc->shost_recovery)
1146 		return;
1147 
1148 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1149 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1150 	if (pcie_device) {
1151 		if (!list_empty(&pcie_device->list)) {
1152 			list_del_init(&pcie_device->list);
1153 			was_on_pcie_device_list = 1;
1154 			pcie_device_put(pcie_device);
1155 		}
1156 		if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1157 			update_latency = 1;
1158 	}
1159 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1160 	if (was_on_pcie_device_list) {
1161 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1162 		pcie_device_put(pcie_device);
1163 	}
1164 
1165 	/*
1166 	 * This device's RTD3 Entry Latency matches IOC's
1167 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1168 	 * from the available drives as current drive is getting removed.
1169 	 */
1170 	if (update_latency)
1171 		_scsih_set_nvme_max_shutdown_latency(ioc);
1172 }
1173 
1174 /**
1175  * _scsih_pcie_device_add - add pcie_device object
1176  * @ioc: per adapter object
1177  * @pcie_device: pcie_device object
1178  *
1179  * This is added to the pcie_device_list link list.
1180  */
1181 static void
1182 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1183 	struct _pcie_device *pcie_device)
1184 {
1185 	unsigned long flags;
1186 
1187 	dewtprintk(ioc,
1188 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1189 			    __func__,
1190 			    pcie_device->handle, (u64)pcie_device->wwid));
1191 	if (pcie_device->enclosure_handle != 0)
1192 		dewtprintk(ioc,
1193 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1194 				    __func__,
1195 				    (u64)pcie_device->enclosure_logical_id,
1196 				    pcie_device->slot));
1197 	if (pcie_device->connector_name[0] != '\0')
1198 		dewtprintk(ioc,
1199 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1200 				    __func__, pcie_device->enclosure_level,
1201 				    pcie_device->connector_name));
1202 
1203 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1204 	pcie_device_get(pcie_device);
1205 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1206 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1207 
1208 	if (pcie_device->access_status ==
1209 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1210 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1211 		return;
1212 	}
1213 	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1214 		_scsih_pcie_device_remove(ioc, pcie_device);
1215 	} else if (!pcie_device->starget) {
1216 		if (!ioc->is_driver_loading) {
1217 /*TODO-- Need to find out whether this condition will occur or not*/
1218 			clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1219 		}
1220 	} else
1221 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1222 }
1223 
1224 /*
1225  * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1226  * @ioc: per adapter object
1227  * @pcie_device: the pcie_device object
1228  * Context: This function will acquire ioc->pcie_device_lock.
1229  *
1230  * Adding new object at driver load time to the ioc->pcie_device_init_list.
1231  */
1232 static void
1233 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1234 				struct _pcie_device *pcie_device)
1235 {
1236 	unsigned long flags;
1237 
1238 	dewtprintk(ioc,
1239 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1240 			    __func__,
1241 			    pcie_device->handle, (u64)pcie_device->wwid));
1242 	if (pcie_device->enclosure_handle != 0)
1243 		dewtprintk(ioc,
1244 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1245 				    __func__,
1246 				    (u64)pcie_device->enclosure_logical_id,
1247 				    pcie_device->slot));
1248 	if (pcie_device->connector_name[0] != '\0')
1249 		dewtprintk(ioc,
1250 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1251 				    __func__, pcie_device->enclosure_level,
1252 				    pcie_device->connector_name));
1253 
1254 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1255 	pcie_device_get(pcie_device);
1256 	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1257 	if (pcie_device->access_status !=
1258 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1259 		_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1260 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1261 }
1262 /**
1263  * _scsih_raid_device_find_by_id - raid device search
1264  * @ioc: per adapter object
1265  * @id: sas device target id
1266  * @channel: sas device channel
1267  * Context: Calling function should acquire ioc->raid_device_lock
1268  *
1269  * This searches for raid_device based on target id, then return raid_device
1270  * object.
1271  */
1272 static struct _raid_device *
1273 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1274 {
1275 	struct _raid_device *raid_device, *r;
1276 
1277 	r = NULL;
1278 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1279 		if (raid_device->id == id && raid_device->channel == channel) {
1280 			r = raid_device;
1281 			goto out;
1282 		}
1283 	}
1284 
1285  out:
1286 	return r;
1287 }
1288 
1289 /**
1290  * mpt3sas_raid_device_find_by_handle - raid device search
1291  * @ioc: per adapter object
1292  * @handle: sas device handle (assigned by firmware)
1293  * Context: Calling function should acquire ioc->raid_device_lock
1294  *
1295  * This searches for raid_device based on handle, then return raid_device
1296  * object.
1297  */
1298 struct _raid_device *
1299 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1300 {
1301 	struct _raid_device *raid_device, *r;
1302 
1303 	r = NULL;
1304 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1305 		if (raid_device->handle != handle)
1306 			continue;
1307 		r = raid_device;
1308 		goto out;
1309 	}
1310 
1311  out:
1312 	return r;
1313 }
1314 
1315 /**
1316  * _scsih_raid_device_find_by_wwid - raid device search
1317  * @ioc: per adapter object
1318  * @wwid: ?
1319  * Context: Calling function should acquire ioc->raid_device_lock
1320  *
1321  * This searches for raid_device based on wwid, then return raid_device
1322  * object.
1323  */
1324 static struct _raid_device *
1325 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1326 {
1327 	struct _raid_device *raid_device, *r;
1328 
1329 	r = NULL;
1330 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1331 		if (raid_device->wwid != wwid)
1332 			continue;
1333 		r = raid_device;
1334 		goto out;
1335 	}
1336 
1337  out:
1338 	return r;
1339 }
1340 
1341 /**
1342  * _scsih_raid_device_add - add raid_device object
1343  * @ioc: per adapter object
1344  * @raid_device: raid_device object
1345  *
1346  * This is added to the raid_device_list link list.
1347  */
1348 static void
1349 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1350 	struct _raid_device *raid_device)
1351 {
1352 	unsigned long flags;
1353 
1354 	dewtprintk(ioc,
1355 		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1356 			    __func__,
1357 			    raid_device->handle, (u64)raid_device->wwid));
1358 
1359 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1360 	list_add_tail(&raid_device->list, &ioc->raid_device_list);
1361 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1362 }
1363 
1364 /**
1365  * _scsih_raid_device_remove - delete raid_device object
1366  * @ioc: per adapter object
1367  * @raid_device: raid_device object
1368  *
1369  */
1370 static void
1371 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1372 	struct _raid_device *raid_device)
1373 {
1374 	unsigned long flags;
1375 
1376 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1377 	list_del(&raid_device->list);
1378 	kfree(raid_device);
1379 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1380 }
1381 
1382 /**
1383  * mpt3sas_scsih_expander_find_by_handle - expander device search
1384  * @ioc: per adapter object
1385  * @handle: expander handle (assigned by firmware)
1386  * Context: Calling function should acquire ioc->sas_device_lock
1387  *
1388  * This searches for expander device based on handle, then returns the
1389  * sas_node object.
1390  */
1391 struct _sas_node *
1392 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1393 {
1394 	struct _sas_node *sas_expander, *r;
1395 
1396 	r = NULL;
1397 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1398 		if (sas_expander->handle != handle)
1399 			continue;
1400 		r = sas_expander;
1401 		goto out;
1402 	}
1403  out:
1404 	return r;
1405 }
1406 
1407 /**
1408  * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1409  * @ioc: per adapter object
1410  * @handle: enclosure handle (assigned by firmware)
1411  * Context: Calling function should acquire ioc->sas_device_lock
1412  *
1413  * This searches for enclosure device based on handle, then returns the
1414  * enclosure object.
1415  */
1416 static struct _enclosure_node *
1417 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1418 {
1419 	struct _enclosure_node *enclosure_dev, *r;
1420 
1421 	r = NULL;
1422 	list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1423 		if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1424 			continue;
1425 		r = enclosure_dev;
1426 		goto out;
1427 	}
1428 out:
1429 	return r;
1430 }
1431 /**
1432  * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1433  * @ioc: per adapter object
1434  * @sas_address: sas address
1435  * Context: Calling function should acquire ioc->sas_node_lock.
1436  *
1437  * This searches for expander device based on sas_address, then returns the
1438  * sas_node object.
1439  */
1440 struct _sas_node *
1441 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1442 	u64 sas_address)
1443 {
1444 	struct _sas_node *sas_expander, *r;
1445 
1446 	r = NULL;
1447 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1448 		if (sas_expander->sas_address != sas_address)
1449 			continue;
1450 		r = sas_expander;
1451 		goto out;
1452 	}
1453  out:
1454 	return r;
1455 }
1456 
1457 /**
1458  * _scsih_expander_node_add - insert expander device to the list.
1459  * @ioc: per adapter object
1460  * @sas_expander: the sas_device object
1461  * Context: This function will acquire ioc->sas_node_lock.
1462  *
1463  * Adding new object to the ioc->sas_expander_list.
1464  */
1465 static void
1466 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1467 	struct _sas_node *sas_expander)
1468 {
1469 	unsigned long flags;
1470 
1471 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
1472 	list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1473 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1474 }
1475 
1476 /**
1477  * _scsih_is_end_device - determines if device is an end device
1478  * @device_info: bitfield providing information about the device.
1479  * Context: none
1480  *
1481  * Return: 1 if end device.
1482  */
1483 static int
1484 _scsih_is_end_device(u32 device_info)
1485 {
1486 	if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1487 		((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1488 		(device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1489 		(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1490 		return 1;
1491 	else
1492 		return 0;
1493 }
1494 
1495 /**
1496  * _scsih_is_nvme_pciescsi_device - determines if
1497  *			device is an pcie nvme/scsi device
1498  * @device_info: bitfield providing information about the device.
1499  * Context: none
1500  *
1501  * Returns 1 if device is pcie device type nvme/scsi.
1502  */
1503 static int
1504 _scsih_is_nvme_pciescsi_device(u32 device_info)
1505 {
1506 	if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1507 	    == MPI26_PCIE_DEVINFO_NVME) ||
1508 	    ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1509 	    == MPI26_PCIE_DEVINFO_SCSI))
1510 		return 1;
1511 	else
1512 		return 0;
1513 }
1514 
1515 /**
1516  * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1517  * @ioc: per adapter object
1518  * @smid: system request message index
1519  *
1520  * Return: the smid stored scmd pointer.
1521  * Then will dereference the stored scmd pointer.
1522  */
1523 struct scsi_cmnd *
1524 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1525 {
1526 	struct scsi_cmnd *scmd = NULL;
1527 	struct scsiio_tracker *st;
1528 	Mpi25SCSIIORequest_t *mpi_request;
1529 
1530 	if (smid > 0  &&
1531 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1532 		u32 unique_tag = smid - 1;
1533 
1534 		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1535 
1536 		/*
1537 		 * If SCSI IO request is outstanding at driver level then
1538 		 * DevHandle filed must be non-zero. If DevHandle is zero
1539 		 * then it means that this smid is free at driver level,
1540 		 * so return NULL.
1541 		 */
1542 		if (!mpi_request->DevHandle)
1543 			return scmd;
1544 
1545 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1546 		if (scmd) {
1547 			st = scsi_cmd_priv(scmd);
1548 			if (st->cb_idx == 0xFF || st->smid == 0)
1549 				scmd = NULL;
1550 		}
1551 	}
1552 	return scmd;
1553 }
1554 
1555 /**
1556  * scsih_change_queue_depth - setting device queue depth
1557  * @sdev: scsi device struct
1558  * @qdepth: requested queue depth
1559  *
1560  * Return: queue depth.
1561  */
1562 static int
1563 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1564 {
1565 	struct Scsi_Host *shost = sdev->host;
1566 	int max_depth;
1567 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1568 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1569 	struct MPT3SAS_TARGET *sas_target_priv_data;
1570 	struct _sas_device *sas_device;
1571 	unsigned long flags;
1572 
1573 	max_depth = shost->can_queue;
1574 
1575 	/*
1576 	 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1577 	 * is disabled.
1578 	 */
1579 	if (ioc->enable_sdev_max_qd)
1580 		goto not_sata;
1581 
1582 	sas_device_priv_data = sdev->hostdata;
1583 	if (!sas_device_priv_data)
1584 		goto not_sata;
1585 	sas_target_priv_data = sas_device_priv_data->sas_target;
1586 	if (!sas_target_priv_data)
1587 		goto not_sata;
1588 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1589 		goto not_sata;
1590 
1591 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1592 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1593 	if (sas_device) {
1594 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1595 			max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1596 
1597 		sas_device_put(sas_device);
1598 	}
1599 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1600 
1601  not_sata:
1602 
1603 	if (!sdev->tagged_supported)
1604 		max_depth = 1;
1605 	if (qdepth > max_depth)
1606 		qdepth = max_depth;
1607 	scsi_change_queue_depth(sdev, qdepth);
1608 	sdev_printk(KERN_INFO, sdev,
1609 	    "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1610 	    sdev->queue_depth, sdev->tagged_supported,
1611 	    sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1612 	return sdev->queue_depth;
1613 }
1614 
1615 /**
1616  * mpt3sas_scsih_change_queue_depth - setting device queue depth
1617  * @sdev: scsi device struct
1618  * @qdepth: requested queue depth
1619  *
1620  * Returns nothing.
1621  */
1622 void
1623 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1624 {
1625 	struct Scsi_Host *shost = sdev->host;
1626 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1627 
1628 	if (ioc->enable_sdev_max_qd)
1629 		qdepth = shost->can_queue;
1630 
1631 	scsih_change_queue_depth(sdev, qdepth);
1632 }
1633 
1634 /**
1635  * scsih_target_alloc - target add routine
1636  * @starget: scsi target struct
1637  *
1638  * Return: 0 if ok. Any other return is assumed to be an error and
1639  * the device is ignored.
1640  */
1641 static int
1642 scsih_target_alloc(struct scsi_target *starget)
1643 {
1644 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1645 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1646 	struct MPT3SAS_TARGET *sas_target_priv_data;
1647 	struct _sas_device *sas_device;
1648 	struct _raid_device *raid_device;
1649 	struct _pcie_device *pcie_device;
1650 	unsigned long flags;
1651 	struct sas_rphy *rphy;
1652 
1653 	sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1654 				       GFP_KERNEL);
1655 	if (!sas_target_priv_data)
1656 		return -ENOMEM;
1657 
1658 	starget->hostdata = sas_target_priv_data;
1659 	sas_target_priv_data->starget = starget;
1660 	sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1661 
1662 	/* RAID volumes */
1663 	if (starget->channel == RAID_CHANNEL) {
1664 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1665 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1666 		    starget->channel);
1667 		if (raid_device) {
1668 			sas_target_priv_data->handle = raid_device->handle;
1669 			sas_target_priv_data->sas_address = raid_device->wwid;
1670 			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1671 			if (ioc->is_warpdrive)
1672 				sas_target_priv_data->raid_device = raid_device;
1673 			raid_device->starget = starget;
1674 		}
1675 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1676 		return 0;
1677 	}
1678 
1679 	/* PCIe devices */
1680 	if (starget->channel == PCIE_CHANNEL) {
1681 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1682 		pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1683 			starget->channel);
1684 		if (pcie_device) {
1685 			sas_target_priv_data->handle = pcie_device->handle;
1686 			sas_target_priv_data->sas_address = pcie_device->wwid;
1687 			sas_target_priv_data->pcie_dev = pcie_device;
1688 			pcie_device->starget = starget;
1689 			pcie_device->id = starget->id;
1690 			pcie_device->channel = starget->channel;
1691 			sas_target_priv_data->flags |=
1692 				MPT_TARGET_FLAGS_PCIE_DEVICE;
1693 			if (pcie_device->fast_path)
1694 				sas_target_priv_data->flags |=
1695 					MPT_TARGET_FASTPATH_IO;
1696 		}
1697 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1698 		return 0;
1699 	}
1700 
1701 	/* sas/sata devices */
1702 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1703 	rphy = dev_to_rphy(starget->dev.parent);
1704 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1705 	   rphy->identify.sas_address);
1706 
1707 	if (sas_device) {
1708 		sas_target_priv_data->handle = sas_device->handle;
1709 		sas_target_priv_data->sas_address = sas_device->sas_address;
1710 		sas_target_priv_data->sas_dev = sas_device;
1711 		sas_device->starget = starget;
1712 		sas_device->id = starget->id;
1713 		sas_device->channel = starget->channel;
1714 		if (test_bit(sas_device->handle, ioc->pd_handles))
1715 			sas_target_priv_data->flags |=
1716 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
1717 		if (sas_device->fast_path)
1718 			sas_target_priv_data->flags |=
1719 					MPT_TARGET_FASTPATH_IO;
1720 	}
1721 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1722 
1723 	return 0;
1724 }
1725 
1726 /**
1727  * scsih_target_destroy - target destroy routine
1728  * @starget: scsi target struct
1729  */
1730 static void
1731 scsih_target_destroy(struct scsi_target *starget)
1732 {
1733 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1734 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1735 	struct MPT3SAS_TARGET *sas_target_priv_data;
1736 	struct _sas_device *sas_device;
1737 	struct _raid_device *raid_device;
1738 	struct _pcie_device *pcie_device;
1739 	unsigned long flags;
1740 
1741 	sas_target_priv_data = starget->hostdata;
1742 	if (!sas_target_priv_data)
1743 		return;
1744 
1745 	if (starget->channel == RAID_CHANNEL) {
1746 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1747 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1748 		    starget->channel);
1749 		if (raid_device) {
1750 			raid_device->starget = NULL;
1751 			raid_device->sdev = NULL;
1752 		}
1753 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1754 		goto out;
1755 	}
1756 
1757 	if (starget->channel == PCIE_CHANNEL) {
1758 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1759 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1760 							sas_target_priv_data);
1761 		if (pcie_device && (pcie_device->starget == starget) &&
1762 			(pcie_device->id == starget->id) &&
1763 			(pcie_device->channel == starget->channel))
1764 			pcie_device->starget = NULL;
1765 
1766 		if (pcie_device) {
1767 			/*
1768 			 * Corresponding get() is in _scsih_target_alloc()
1769 			 */
1770 			sas_target_priv_data->pcie_dev = NULL;
1771 			pcie_device_put(pcie_device);
1772 			pcie_device_put(pcie_device);
1773 		}
1774 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1775 		goto out;
1776 	}
1777 
1778 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1779 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1780 	if (sas_device && (sas_device->starget == starget) &&
1781 	    (sas_device->id == starget->id) &&
1782 	    (sas_device->channel == starget->channel))
1783 		sas_device->starget = NULL;
1784 
1785 	if (sas_device) {
1786 		/*
1787 		 * Corresponding get() is in _scsih_target_alloc()
1788 		 */
1789 		sas_target_priv_data->sas_dev = NULL;
1790 		sas_device_put(sas_device);
1791 
1792 		sas_device_put(sas_device);
1793 	}
1794 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1795 
1796  out:
1797 	kfree(sas_target_priv_data);
1798 	starget->hostdata = NULL;
1799 }
1800 
1801 /**
1802  * scsih_slave_alloc - device add routine
1803  * @sdev: scsi device struct
1804  *
1805  * Return: 0 if ok. Any other return is assumed to be an error and
1806  * the device is ignored.
1807  */
1808 static int
1809 scsih_slave_alloc(struct scsi_device *sdev)
1810 {
1811 	struct Scsi_Host *shost;
1812 	struct MPT3SAS_ADAPTER *ioc;
1813 	struct MPT3SAS_TARGET *sas_target_priv_data;
1814 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1815 	struct scsi_target *starget;
1816 	struct _raid_device *raid_device;
1817 	struct _sas_device *sas_device;
1818 	struct _pcie_device *pcie_device;
1819 	unsigned long flags;
1820 
1821 	sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
1822 				       GFP_KERNEL);
1823 	if (!sas_device_priv_data)
1824 		return -ENOMEM;
1825 
1826 	sas_device_priv_data->lun = sdev->lun;
1827 	sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
1828 
1829 	starget = scsi_target(sdev);
1830 	sas_target_priv_data = starget->hostdata;
1831 	sas_target_priv_data->num_luns++;
1832 	sas_device_priv_data->sas_target = sas_target_priv_data;
1833 	sdev->hostdata = sas_device_priv_data;
1834 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
1835 		sdev->no_uld_attach = 1;
1836 
1837 	shost = dev_to_shost(&starget->dev);
1838 	ioc = shost_priv(shost);
1839 	if (starget->channel == RAID_CHANNEL) {
1840 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1841 		raid_device = _scsih_raid_device_find_by_id(ioc,
1842 		    starget->id, starget->channel);
1843 		if (raid_device)
1844 			raid_device->sdev = sdev; /* raid is single lun */
1845 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1846 	}
1847 	if (starget->channel == PCIE_CHANNEL) {
1848 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1849 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
1850 				sas_target_priv_data->sas_address);
1851 		if (pcie_device && (pcie_device->starget == NULL)) {
1852 			sdev_printk(KERN_INFO, sdev,
1853 			    "%s : pcie_device->starget set to starget @ %d\n",
1854 			    __func__, __LINE__);
1855 			pcie_device->starget = starget;
1856 		}
1857 
1858 		if (pcie_device)
1859 			pcie_device_put(pcie_device);
1860 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1861 
1862 	} else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1863 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1864 		sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1865 					sas_target_priv_data->sas_address);
1866 		if (sas_device && (sas_device->starget == NULL)) {
1867 			sdev_printk(KERN_INFO, sdev,
1868 			"%s : sas_device->starget set to starget @ %d\n",
1869 			     __func__, __LINE__);
1870 			sas_device->starget = starget;
1871 		}
1872 
1873 		if (sas_device)
1874 			sas_device_put(sas_device);
1875 
1876 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1877 	}
1878 
1879 	return 0;
1880 }
1881 
1882 /**
1883  * scsih_slave_destroy - device destroy routine
1884  * @sdev: scsi device struct
1885  */
1886 static void
1887 scsih_slave_destroy(struct scsi_device *sdev)
1888 {
1889 	struct MPT3SAS_TARGET *sas_target_priv_data;
1890 	struct scsi_target *starget;
1891 	struct Scsi_Host *shost;
1892 	struct MPT3SAS_ADAPTER *ioc;
1893 	struct _sas_device *sas_device;
1894 	struct _pcie_device *pcie_device;
1895 	unsigned long flags;
1896 
1897 	if (!sdev->hostdata)
1898 		return;
1899 
1900 	starget = scsi_target(sdev);
1901 	sas_target_priv_data = starget->hostdata;
1902 	sas_target_priv_data->num_luns--;
1903 
1904 	shost = dev_to_shost(&starget->dev);
1905 	ioc = shost_priv(shost);
1906 
1907 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
1908 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1909 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1910 				sas_target_priv_data);
1911 		if (pcie_device && !sas_target_priv_data->num_luns)
1912 			pcie_device->starget = NULL;
1913 
1914 		if (pcie_device)
1915 			pcie_device_put(pcie_device);
1916 
1917 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1918 
1919 	} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1920 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1921 		sas_device = __mpt3sas_get_sdev_from_target(ioc,
1922 				sas_target_priv_data);
1923 		if (sas_device && !sas_target_priv_data->num_luns)
1924 			sas_device->starget = NULL;
1925 
1926 		if (sas_device)
1927 			sas_device_put(sas_device);
1928 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1929 	}
1930 
1931 	kfree(sdev->hostdata);
1932 	sdev->hostdata = NULL;
1933 }
1934 
1935 /**
1936  * _scsih_display_sata_capabilities - sata capabilities
1937  * @ioc: per adapter object
1938  * @handle: device handle
1939  * @sdev: scsi device struct
1940  */
1941 static void
1942 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
1943 	u16 handle, struct scsi_device *sdev)
1944 {
1945 	Mpi2ConfigReply_t mpi_reply;
1946 	Mpi2SasDevicePage0_t sas_device_pg0;
1947 	u32 ioc_status;
1948 	u16 flags;
1949 	u32 device_info;
1950 
1951 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1952 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
1953 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1954 			__FILE__, __LINE__, __func__);
1955 		return;
1956 	}
1957 
1958 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1959 	    MPI2_IOCSTATUS_MASK;
1960 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1961 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1962 			__FILE__, __LINE__, __func__);
1963 		return;
1964 	}
1965 
1966 	flags = le16_to_cpu(sas_device_pg0.Flags);
1967 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
1968 
1969 	sdev_printk(KERN_INFO, sdev,
1970 	    "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
1971 	    "sw_preserve(%s)\n",
1972 	    (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
1973 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
1974 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
1975 	    "n",
1976 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
1977 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
1978 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
1979 }
1980 
1981 /*
1982  * raid transport support -
1983  * Enabled for SLES11 and newer, in older kernels the driver will panic when
1984  * unloading the driver followed by a load - I believe that the subroutine
1985  * raid_class_release() is not cleaning up properly.
1986  */
1987 
1988 /**
1989  * scsih_is_raid - return boolean indicating device is raid volume
1990  * @dev: the device struct object
1991  */
1992 static int
1993 scsih_is_raid(struct device *dev)
1994 {
1995 	struct scsi_device *sdev = to_scsi_device(dev);
1996 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1997 
1998 	if (ioc->is_warpdrive)
1999 		return 0;
2000 	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2001 }
2002 
2003 static int
2004 scsih_is_nvme(struct device *dev)
2005 {
2006 	struct scsi_device *sdev = to_scsi_device(dev);
2007 
2008 	return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2009 }
2010 
2011 /**
2012  * scsih_get_resync - get raid volume resync percent complete
2013  * @dev: the device struct object
2014  */
2015 static void
2016 scsih_get_resync(struct device *dev)
2017 {
2018 	struct scsi_device *sdev = to_scsi_device(dev);
2019 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2020 	static struct _raid_device *raid_device;
2021 	unsigned long flags;
2022 	Mpi2RaidVolPage0_t vol_pg0;
2023 	Mpi2ConfigReply_t mpi_reply;
2024 	u32 volume_status_flags;
2025 	u8 percent_complete;
2026 	u16 handle;
2027 
2028 	percent_complete = 0;
2029 	handle = 0;
2030 	if (ioc->is_warpdrive)
2031 		goto out;
2032 
2033 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2034 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2035 	    sdev->channel);
2036 	if (raid_device) {
2037 		handle = raid_device->handle;
2038 		percent_complete = raid_device->percent_complete;
2039 	}
2040 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2041 
2042 	if (!handle)
2043 		goto out;
2044 
2045 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2046 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2047 	     sizeof(Mpi2RaidVolPage0_t))) {
2048 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2049 			__FILE__, __LINE__, __func__);
2050 		percent_complete = 0;
2051 		goto out;
2052 	}
2053 
2054 	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2055 	if (!(volume_status_flags &
2056 	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2057 		percent_complete = 0;
2058 
2059  out:
2060 
2061 	switch (ioc->hba_mpi_version_belonged) {
2062 	case MPI2_VERSION:
2063 		raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2064 		break;
2065 	case MPI25_VERSION:
2066 	case MPI26_VERSION:
2067 		raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2068 		break;
2069 	}
2070 }
2071 
2072 /**
2073  * scsih_get_state - get raid volume level
2074  * @dev: the device struct object
2075  */
2076 static void
2077 scsih_get_state(struct device *dev)
2078 {
2079 	struct scsi_device *sdev = to_scsi_device(dev);
2080 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2081 	static struct _raid_device *raid_device;
2082 	unsigned long flags;
2083 	Mpi2RaidVolPage0_t vol_pg0;
2084 	Mpi2ConfigReply_t mpi_reply;
2085 	u32 volstate;
2086 	enum raid_state state = RAID_STATE_UNKNOWN;
2087 	u16 handle = 0;
2088 
2089 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2090 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2091 	    sdev->channel);
2092 	if (raid_device)
2093 		handle = raid_device->handle;
2094 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2095 
2096 	if (!raid_device)
2097 		goto out;
2098 
2099 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2100 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2101 	     sizeof(Mpi2RaidVolPage0_t))) {
2102 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2103 			__FILE__, __LINE__, __func__);
2104 		goto out;
2105 	}
2106 
2107 	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2108 	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2109 		state = RAID_STATE_RESYNCING;
2110 		goto out;
2111 	}
2112 
2113 	switch (vol_pg0.VolumeState) {
2114 	case MPI2_RAID_VOL_STATE_OPTIMAL:
2115 	case MPI2_RAID_VOL_STATE_ONLINE:
2116 		state = RAID_STATE_ACTIVE;
2117 		break;
2118 	case  MPI2_RAID_VOL_STATE_DEGRADED:
2119 		state = RAID_STATE_DEGRADED;
2120 		break;
2121 	case MPI2_RAID_VOL_STATE_FAILED:
2122 	case MPI2_RAID_VOL_STATE_MISSING:
2123 		state = RAID_STATE_OFFLINE;
2124 		break;
2125 	}
2126  out:
2127 	switch (ioc->hba_mpi_version_belonged) {
2128 	case MPI2_VERSION:
2129 		raid_set_state(mpt2sas_raid_template, dev, state);
2130 		break;
2131 	case MPI25_VERSION:
2132 	case MPI26_VERSION:
2133 		raid_set_state(mpt3sas_raid_template, dev, state);
2134 		break;
2135 	}
2136 }
2137 
2138 /**
2139  * _scsih_set_level - set raid level
2140  * @ioc: ?
2141  * @sdev: scsi device struct
2142  * @volume_type: volume type
2143  */
2144 static void
2145 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2146 	struct scsi_device *sdev, u8 volume_type)
2147 {
2148 	enum raid_level level = RAID_LEVEL_UNKNOWN;
2149 
2150 	switch (volume_type) {
2151 	case MPI2_RAID_VOL_TYPE_RAID0:
2152 		level = RAID_LEVEL_0;
2153 		break;
2154 	case MPI2_RAID_VOL_TYPE_RAID10:
2155 		level = RAID_LEVEL_10;
2156 		break;
2157 	case MPI2_RAID_VOL_TYPE_RAID1E:
2158 		level = RAID_LEVEL_1E;
2159 		break;
2160 	case MPI2_RAID_VOL_TYPE_RAID1:
2161 		level = RAID_LEVEL_1;
2162 		break;
2163 	}
2164 
2165 	switch (ioc->hba_mpi_version_belonged) {
2166 	case MPI2_VERSION:
2167 		raid_set_level(mpt2sas_raid_template,
2168 			&sdev->sdev_gendev, level);
2169 		break;
2170 	case MPI25_VERSION:
2171 	case MPI26_VERSION:
2172 		raid_set_level(mpt3sas_raid_template,
2173 			&sdev->sdev_gendev, level);
2174 		break;
2175 	}
2176 }
2177 
2178 
2179 /**
2180  * _scsih_get_volume_capabilities - volume capabilities
2181  * @ioc: per adapter object
2182  * @raid_device: the raid_device object
2183  *
2184  * Return: 0 for success, else 1
2185  */
2186 static int
2187 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2188 	struct _raid_device *raid_device)
2189 {
2190 	Mpi2RaidVolPage0_t *vol_pg0;
2191 	Mpi2RaidPhysDiskPage0_t pd_pg0;
2192 	Mpi2SasDevicePage0_t sas_device_pg0;
2193 	Mpi2ConfigReply_t mpi_reply;
2194 	u16 sz;
2195 	u8 num_pds;
2196 
2197 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2198 	    &num_pds)) || !num_pds) {
2199 		dfailprintk(ioc,
2200 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2201 				     __FILE__, __LINE__, __func__));
2202 		return 1;
2203 	}
2204 
2205 	raid_device->num_pds = num_pds;
2206 	sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2207 	    sizeof(Mpi2RaidVol0PhysDisk_t));
2208 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
2209 	if (!vol_pg0) {
2210 		dfailprintk(ioc,
2211 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2212 				     __FILE__, __LINE__, __func__));
2213 		return 1;
2214 	}
2215 
2216 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2217 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2218 		dfailprintk(ioc,
2219 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2220 				     __FILE__, __LINE__, __func__));
2221 		kfree(vol_pg0);
2222 		return 1;
2223 	}
2224 
2225 	raid_device->volume_type = vol_pg0->VolumeType;
2226 
2227 	/* figure out what the underlying devices are by
2228 	 * obtaining the device_info bits for the 1st device
2229 	 */
2230 	if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2231 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2232 	    vol_pg0->PhysDisk[0].PhysDiskNum))) {
2233 		if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2234 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2235 		    le16_to_cpu(pd_pg0.DevHandle)))) {
2236 			raid_device->device_info =
2237 			    le32_to_cpu(sas_device_pg0.DeviceInfo);
2238 		}
2239 	}
2240 
2241 	kfree(vol_pg0);
2242 	return 0;
2243 }
2244 
2245 /**
2246  * _scsih_enable_tlr - setting TLR flags
2247  * @ioc: per adapter object
2248  * @sdev: scsi device struct
2249  *
2250  * Enabling Transaction Layer Retries for tape devices when
2251  * vpd page 0x90 is present
2252  *
2253  */
2254 static void
2255 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2256 {
2257 
2258 	/* only for TAPE */
2259 	if (sdev->type != TYPE_TAPE)
2260 		return;
2261 
2262 	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2263 		return;
2264 
2265 	sas_enable_tlr(sdev);
2266 	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2267 	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2268 	return;
2269 
2270 }
2271 
2272 /**
2273  * scsih_slave_configure - device configure routine.
2274  * @sdev: scsi device struct
2275  *
2276  * Return: 0 if ok. Any other return is assumed to be an error and
2277  * the device is ignored.
2278  */
2279 static int
2280 scsih_slave_configure(struct scsi_device *sdev)
2281 {
2282 	struct Scsi_Host *shost = sdev->host;
2283 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2284 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2285 	struct MPT3SAS_TARGET *sas_target_priv_data;
2286 	struct _sas_device *sas_device;
2287 	struct _pcie_device *pcie_device;
2288 	struct _raid_device *raid_device;
2289 	unsigned long flags;
2290 	int qdepth;
2291 	u8 ssp_target = 0;
2292 	char *ds = "";
2293 	char *r_level = "";
2294 	u16 handle, volume_handle = 0;
2295 	u64 volume_wwid = 0;
2296 
2297 	qdepth = 1;
2298 	sas_device_priv_data = sdev->hostdata;
2299 	sas_device_priv_data->configured_lun = 1;
2300 	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2301 	sas_target_priv_data = sas_device_priv_data->sas_target;
2302 	handle = sas_target_priv_data->handle;
2303 
2304 	/* raid volume handling */
2305 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2306 
2307 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2308 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2309 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2310 		if (!raid_device) {
2311 			dfailprintk(ioc,
2312 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2313 					     __FILE__, __LINE__, __func__));
2314 			return 1;
2315 		}
2316 
2317 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2318 			dfailprintk(ioc,
2319 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2320 					     __FILE__, __LINE__, __func__));
2321 			return 1;
2322 		}
2323 
2324 		/*
2325 		 * WARPDRIVE: Initialize the required data for Direct IO
2326 		 */
2327 		mpt3sas_init_warpdrive_properties(ioc, raid_device);
2328 
2329 		/* RAID Queue Depth Support
2330 		 * IS volume = underlying qdepth of drive type, either
2331 		 *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2332 		 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2333 		 */
2334 		if (raid_device->device_info &
2335 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2336 			qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2337 			ds = "SSP";
2338 		} else {
2339 			qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2340 			if (raid_device->device_info &
2341 			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2342 				ds = "SATA";
2343 			else
2344 				ds = "STP";
2345 		}
2346 
2347 		switch (raid_device->volume_type) {
2348 		case MPI2_RAID_VOL_TYPE_RAID0:
2349 			r_level = "RAID0";
2350 			break;
2351 		case MPI2_RAID_VOL_TYPE_RAID1E:
2352 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2353 			if (ioc->manu_pg10.OEMIdentifier &&
2354 			    (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2355 			    MFG10_GF0_R10_DISPLAY) &&
2356 			    !(raid_device->num_pds % 2))
2357 				r_level = "RAID10";
2358 			else
2359 				r_level = "RAID1E";
2360 			break;
2361 		case MPI2_RAID_VOL_TYPE_RAID1:
2362 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2363 			r_level = "RAID1";
2364 			break;
2365 		case MPI2_RAID_VOL_TYPE_RAID10:
2366 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2367 			r_level = "RAID10";
2368 			break;
2369 		case MPI2_RAID_VOL_TYPE_UNKNOWN:
2370 		default:
2371 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2372 			r_level = "RAIDX";
2373 			break;
2374 		}
2375 
2376 		if (!ioc->hide_ir_msg)
2377 			sdev_printk(KERN_INFO, sdev,
2378 			   "%s: handle(0x%04x), wwid(0x%016llx),"
2379 			    " pd_count(%d), type(%s)\n",
2380 			    r_level, raid_device->handle,
2381 			    (unsigned long long)raid_device->wwid,
2382 			    raid_device->num_pds, ds);
2383 
2384 		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2385 			blk_queue_max_hw_sectors(sdev->request_queue,
2386 						MPT3SAS_RAID_MAX_SECTORS);
2387 			sdev_printk(KERN_INFO, sdev,
2388 					"Set queue's max_sector to: %u\n",
2389 						MPT3SAS_RAID_MAX_SECTORS);
2390 		}
2391 
2392 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2393 
2394 		/* raid transport support */
2395 		if (!ioc->is_warpdrive)
2396 			_scsih_set_level(ioc, sdev, raid_device->volume_type);
2397 		return 0;
2398 	}
2399 
2400 	/* non-raid handling */
2401 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2402 		if (mpt3sas_config_get_volume_handle(ioc, handle,
2403 		    &volume_handle)) {
2404 			dfailprintk(ioc,
2405 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2406 					     __FILE__, __LINE__, __func__));
2407 			return 1;
2408 		}
2409 		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2410 		    volume_handle, &volume_wwid)) {
2411 			dfailprintk(ioc,
2412 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2413 					     __FILE__, __LINE__, __func__));
2414 			return 1;
2415 		}
2416 	}
2417 
2418 	/* PCIe handling */
2419 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2420 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2421 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2422 				sas_device_priv_data->sas_target->sas_address);
2423 		if (!pcie_device) {
2424 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2425 			dfailprintk(ioc,
2426 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2427 					     __FILE__, __LINE__, __func__));
2428 			return 1;
2429 		}
2430 
2431 		qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2432 		ds = "NVMe";
2433 		sdev_printk(KERN_INFO, sdev,
2434 			"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2435 			ds, handle, (unsigned long long)pcie_device->wwid,
2436 			pcie_device->port_num);
2437 		if (pcie_device->enclosure_handle != 0)
2438 			sdev_printk(KERN_INFO, sdev,
2439 			"%s: enclosure logical id(0x%016llx), slot(%d)\n",
2440 			ds,
2441 			(unsigned long long)pcie_device->enclosure_logical_id,
2442 			pcie_device->slot);
2443 		if (pcie_device->connector_name[0] != '\0')
2444 			sdev_printk(KERN_INFO, sdev,
2445 				"%s: enclosure level(0x%04x),"
2446 				"connector name( %s)\n", ds,
2447 				pcie_device->enclosure_level,
2448 				pcie_device->connector_name);
2449 
2450 		if (pcie_device->nvme_mdts)
2451 			blk_queue_max_hw_sectors(sdev->request_queue,
2452 					pcie_device->nvme_mdts/512);
2453 
2454 		pcie_device_put(pcie_device);
2455 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2456 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2457 		/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2458 		 ** merged and can eliminate holes created during merging
2459 		 ** operation.
2460 		 **/
2461 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2462 				sdev->request_queue);
2463 		blk_queue_virt_boundary(sdev->request_queue,
2464 				ioc->page_size - 1);
2465 		return 0;
2466 	}
2467 
2468 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2469 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2470 	   sas_device_priv_data->sas_target->sas_address);
2471 	if (!sas_device) {
2472 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2473 		dfailprintk(ioc,
2474 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2475 				     __FILE__, __LINE__, __func__));
2476 		return 1;
2477 	}
2478 
2479 	sas_device->volume_handle = volume_handle;
2480 	sas_device->volume_wwid = volume_wwid;
2481 	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2482 		qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2483 		ssp_target = 1;
2484 		if (sas_device->device_info &
2485 				MPI2_SAS_DEVICE_INFO_SEP) {
2486 			sdev_printk(KERN_WARNING, sdev,
2487 			"set ignore_delay_remove for handle(0x%04x)\n",
2488 			sas_device_priv_data->sas_target->handle);
2489 			sas_device_priv_data->ignore_delay_remove = 1;
2490 			ds = "SES";
2491 		} else
2492 			ds = "SSP";
2493 	} else {
2494 		qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2495 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2496 			ds = "STP";
2497 		else if (sas_device->device_info &
2498 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2499 			ds = "SATA";
2500 	}
2501 
2502 	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2503 	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2504 	    ds, handle, (unsigned long long)sas_device->sas_address,
2505 	    sas_device->phy, (unsigned long long)sas_device->device_name);
2506 
2507 	_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2508 
2509 	sas_device_put(sas_device);
2510 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2511 
2512 	if (!ssp_target)
2513 		_scsih_display_sata_capabilities(ioc, handle, sdev);
2514 
2515 
2516 	mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2517 
2518 	if (ssp_target) {
2519 		sas_read_port_mode_page(sdev);
2520 		_scsih_enable_tlr(ioc, sdev);
2521 	}
2522 
2523 	return 0;
2524 }
2525 
2526 /**
2527  * scsih_bios_param - fetch head, sector, cylinder info for a disk
2528  * @sdev: scsi device struct
2529  * @bdev: pointer to block device context
2530  * @capacity: device size (in 512 byte sectors)
2531  * @params: three element array to place output:
2532  *              params[0] number of heads (max 255)
2533  *              params[1] number of sectors (max 63)
2534  *              params[2] number of cylinders
2535  */
2536 static int
2537 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2538 	sector_t capacity, int params[])
2539 {
2540 	int		heads;
2541 	int		sectors;
2542 	sector_t	cylinders;
2543 	ulong		dummy;
2544 
2545 	heads = 64;
2546 	sectors = 32;
2547 
2548 	dummy = heads * sectors;
2549 	cylinders = capacity;
2550 	sector_div(cylinders, dummy);
2551 
2552 	/*
2553 	 * Handle extended translation size for logical drives
2554 	 * > 1Gb
2555 	 */
2556 	if ((ulong)capacity >= 0x200000) {
2557 		heads = 255;
2558 		sectors = 63;
2559 		dummy = heads * sectors;
2560 		cylinders = capacity;
2561 		sector_div(cylinders, dummy);
2562 	}
2563 
2564 	/* return result */
2565 	params[0] = heads;
2566 	params[1] = sectors;
2567 	params[2] = cylinders;
2568 
2569 	return 0;
2570 }
2571 
2572 /**
2573  * _scsih_response_code - translation of device response code
2574  * @ioc: per adapter object
2575  * @response_code: response code returned by the device
2576  */
2577 static void
2578 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2579 {
2580 	char *desc;
2581 
2582 	switch (response_code) {
2583 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2584 		desc = "task management request completed";
2585 		break;
2586 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2587 		desc = "invalid frame";
2588 		break;
2589 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2590 		desc = "task management request not supported";
2591 		break;
2592 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2593 		desc = "task management request failed";
2594 		break;
2595 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2596 		desc = "task management request succeeded";
2597 		break;
2598 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2599 		desc = "invalid lun";
2600 		break;
2601 	case 0xA:
2602 		desc = "overlapped tag attempted";
2603 		break;
2604 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2605 		desc = "task queued, however not sent to target";
2606 		break;
2607 	default:
2608 		desc = "unknown";
2609 		break;
2610 	}
2611 	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2612 }
2613 
2614 /**
2615  * _scsih_tm_done - tm completion routine
2616  * @ioc: per adapter object
2617  * @smid: system request message index
2618  * @msix_index: MSIX table index supplied by the OS
2619  * @reply: reply message frame(lower 32bit addr)
2620  * Context: none.
2621  *
2622  * The callback handler when using scsih_issue_tm.
2623  *
2624  * Return: 1 meaning mf should be freed from _base_interrupt
2625  *         0 means the mf is freed from this function.
2626  */
2627 static u8
2628 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2629 {
2630 	MPI2DefaultReply_t *mpi_reply;
2631 
2632 	if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2633 		return 1;
2634 	if (ioc->tm_cmds.smid != smid)
2635 		return 1;
2636 	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2637 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2638 	if (mpi_reply) {
2639 		memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2640 		ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2641 	}
2642 	ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2643 	complete(&ioc->tm_cmds.done);
2644 	return 1;
2645 }
2646 
2647 /**
2648  * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2649  * @ioc: per adapter object
2650  * @handle: device handle
2651  *
2652  * During taskmangement request, we need to freeze the device queue.
2653  */
2654 void
2655 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2656 {
2657 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2658 	struct scsi_device *sdev;
2659 	u8 skip = 0;
2660 
2661 	shost_for_each_device(sdev, ioc->shost) {
2662 		if (skip)
2663 			continue;
2664 		sas_device_priv_data = sdev->hostdata;
2665 		if (!sas_device_priv_data)
2666 			continue;
2667 		if (sas_device_priv_data->sas_target->handle == handle) {
2668 			sas_device_priv_data->sas_target->tm_busy = 1;
2669 			skip = 1;
2670 			ioc->ignore_loginfos = 1;
2671 		}
2672 	}
2673 }
2674 
2675 /**
2676  * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2677  * @ioc: per adapter object
2678  * @handle: device handle
2679  *
2680  * During taskmangement request, we need to freeze the device queue.
2681  */
2682 void
2683 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2684 {
2685 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2686 	struct scsi_device *sdev;
2687 	u8 skip = 0;
2688 
2689 	shost_for_each_device(sdev, ioc->shost) {
2690 		if (skip)
2691 			continue;
2692 		sas_device_priv_data = sdev->hostdata;
2693 		if (!sas_device_priv_data)
2694 			continue;
2695 		if (sas_device_priv_data->sas_target->handle == handle) {
2696 			sas_device_priv_data->sas_target->tm_busy = 0;
2697 			skip = 1;
2698 			ioc->ignore_loginfos = 0;
2699 		}
2700 	}
2701 }
2702 
2703 /**
2704  * mpt3sas_scsih_issue_tm - main routine for sending tm requests
2705  * @ioc: per adapter struct
2706  * @handle: device handle
2707  * @lun: lun number
2708  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2709  * @smid_task: smid assigned to the task
2710  * @msix_task: MSIX table index supplied by the OS
2711  * @timeout: timeout in seconds
2712  * @tr_method: Target Reset Method
2713  * Context: user
2714  *
2715  * A generic API for sending task management requests to firmware.
2716  *
2717  * The callback index is set inside `ioc->tm_cb_idx`.
2718  * The caller is responsible to check for outstanding commands.
2719  *
2720  * Return: SUCCESS or FAILED.
2721  */
2722 int
2723 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
2724 	u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
2725 {
2726 	Mpi2SCSITaskManagementRequest_t *mpi_request;
2727 	Mpi2SCSITaskManagementReply_t *mpi_reply;
2728 	u16 smid = 0;
2729 	u32 ioc_state;
2730 	int rc;
2731 	u8 issue_reset = 0;
2732 
2733 	lockdep_assert_held(&ioc->tm_cmds.mutex);
2734 
2735 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2736 		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
2737 		return FAILED;
2738 	}
2739 
2740 	if (ioc->shost_recovery || ioc->remove_host ||
2741 	    ioc->pci_error_recovery) {
2742 		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
2743 		return FAILED;
2744 	}
2745 
2746 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2747 	if (ioc_state & MPI2_DOORBELL_USED) {
2748 		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
2749 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2750 		return (!rc) ? SUCCESS : FAILED;
2751 	}
2752 
2753 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2754 		mpt3sas_print_fault_code(ioc, ioc_state &
2755 		    MPI2_DOORBELL_DATA_MASK);
2756 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2757 		return (!rc) ? SUCCESS : FAILED;
2758 	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
2759 	    MPI2_IOC_STATE_COREDUMP) {
2760 		mpt3sas_print_coredump_info(ioc, ioc_state &
2761 		    MPI2_DOORBELL_DATA_MASK);
2762 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2763 		return (!rc) ? SUCCESS : FAILED;
2764 	}
2765 
2766 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2767 	if (!smid) {
2768 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2769 		return FAILED;
2770 	}
2771 
2772 	dtmprintk(ioc,
2773 		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2774 			   handle, type, smid_task, timeout, tr_method));
2775 	ioc->tm_cmds.status = MPT3_CMD_PENDING;
2776 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2777 	ioc->tm_cmds.smid = smid;
2778 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2779 	memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
2780 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2781 	mpi_request->DevHandle = cpu_to_le16(handle);
2782 	mpi_request->TaskType = type;
2783 	mpi_request->MsgFlags = tr_method;
2784 	mpi_request->TaskMID = cpu_to_le16(smid_task);
2785 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2786 	mpt3sas_scsih_set_tm_flag(ioc, handle);
2787 	init_completion(&ioc->tm_cmds.done);
2788 	ioc->put_smid_hi_priority(ioc, smid, msix_task);
2789 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2790 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2791 		mpt3sas_check_cmd_timeout(ioc,
2792 		    ioc->tm_cmds.status, mpi_request,
2793 		    sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
2794 		if (issue_reset) {
2795 			rc = mpt3sas_base_hard_reset_handler(ioc,
2796 					FORCE_BIG_HAMMER);
2797 			rc = (!rc) ? SUCCESS : FAILED;
2798 			goto out;
2799 		}
2800 	}
2801 
2802 	/* sync IRQs in case those were busy during flush. */
2803 	mpt3sas_base_sync_reply_irqs(ioc);
2804 
2805 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
2806 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
2807 		mpi_reply = ioc->tm_cmds.reply;
2808 		dtmprintk(ioc,
2809 			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
2810 				   le16_to_cpu(mpi_reply->IOCStatus),
2811 				   le32_to_cpu(mpi_reply->IOCLogInfo),
2812 				   le32_to_cpu(mpi_reply->TerminationCount)));
2813 		if (ioc->logging_level & MPT_DEBUG_TM) {
2814 			_scsih_response_code(ioc, mpi_reply->ResponseCode);
2815 			if (mpi_reply->IOCStatus)
2816 				_debug_dump_mf(mpi_request,
2817 				    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2818 		}
2819 	}
2820 	rc = SUCCESS;
2821 
2822 out:
2823 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
2824 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
2825 	return rc;
2826 }
2827 
2828 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2829 		u64 lun, u8 type, u16 smid_task, u16 msix_task,
2830 		u8 timeout, u8 tr_method)
2831 {
2832 	int ret;
2833 
2834 	mutex_lock(&ioc->tm_cmds.mutex);
2835 	ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
2836 			msix_task, timeout, tr_method);
2837 	mutex_unlock(&ioc->tm_cmds.mutex);
2838 
2839 	return ret;
2840 }
2841 
2842 /**
2843  * _scsih_tm_display_info - displays info about the device
2844  * @ioc: per adapter struct
2845  * @scmd: pointer to scsi command object
2846  *
2847  * Called by task management callback handlers.
2848  */
2849 static void
2850 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2851 {
2852 	struct scsi_target *starget = scmd->device->sdev_target;
2853 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
2854 	struct _sas_device *sas_device = NULL;
2855 	struct _pcie_device *pcie_device = NULL;
2856 	unsigned long flags;
2857 	char *device_str = NULL;
2858 
2859 	if (!priv_target)
2860 		return;
2861 	if (ioc->hide_ir_msg)
2862 		device_str = "WarpDrive";
2863 	else
2864 		device_str = "volume";
2865 
2866 	scsi_print_command(scmd);
2867 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2868 		starget_printk(KERN_INFO, starget,
2869 			"%s handle(0x%04x), %s wwid(0x%016llx)\n",
2870 			device_str, priv_target->handle,
2871 		    device_str, (unsigned long long)priv_target->sas_address);
2872 
2873 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2874 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2875 		pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
2876 		if (pcie_device) {
2877 			starget_printk(KERN_INFO, starget,
2878 				"handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2879 				pcie_device->handle,
2880 				(unsigned long long)pcie_device->wwid,
2881 				pcie_device->port_num);
2882 			if (pcie_device->enclosure_handle != 0)
2883 				starget_printk(KERN_INFO, starget,
2884 					"enclosure logical id(0x%016llx), slot(%d)\n",
2885 					(unsigned long long)
2886 					pcie_device->enclosure_logical_id,
2887 					pcie_device->slot);
2888 			if (pcie_device->connector_name[0] != '\0')
2889 				starget_printk(KERN_INFO, starget,
2890 					"enclosure level(0x%04x), connector name( %s)\n",
2891 					pcie_device->enclosure_level,
2892 					pcie_device->connector_name);
2893 			pcie_device_put(pcie_device);
2894 		}
2895 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2896 
2897 	} else {
2898 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2899 		sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
2900 		if (sas_device) {
2901 			if (priv_target->flags &
2902 			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
2903 				starget_printk(KERN_INFO, starget,
2904 				    "volume handle(0x%04x), "
2905 				    "volume wwid(0x%016llx)\n",
2906 				    sas_device->volume_handle,
2907 				   (unsigned long long)sas_device->volume_wwid);
2908 			}
2909 			starget_printk(KERN_INFO, starget,
2910 			    "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
2911 			    sas_device->handle,
2912 			    (unsigned long long)sas_device->sas_address,
2913 			    sas_device->phy);
2914 
2915 			_scsih_display_enclosure_chassis_info(NULL, sas_device,
2916 			    NULL, starget);
2917 
2918 			sas_device_put(sas_device);
2919 		}
2920 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2921 	}
2922 }
2923 
2924 /**
2925  * scsih_abort - eh threads main abort routine
2926  * @scmd: pointer to scsi command object
2927  *
2928  * Return: SUCCESS if command aborted else FAILED
2929  */
2930 static int
2931 scsih_abort(struct scsi_cmnd *scmd)
2932 {
2933 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2934 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2935 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2936 	u16 handle;
2937 	int r;
2938 
2939 	u8 timeout = 30;
2940 	struct _pcie_device *pcie_device = NULL;
2941 	sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
2942 	    "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
2943 	    scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
2944 	    (scmd->request->timeout / HZ) * 1000);
2945 	_scsih_tm_display_info(ioc, scmd);
2946 
2947 	sas_device_priv_data = scmd->device->hostdata;
2948 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2949 	    ioc->remove_host) {
2950 		sdev_printk(KERN_INFO, scmd->device,
2951 		    "device been deleted! scmd(0x%p)\n", scmd);
2952 		scmd->result = DID_NO_CONNECT << 16;
2953 		scmd->scsi_done(scmd);
2954 		r = SUCCESS;
2955 		goto out;
2956 	}
2957 
2958 	/* check for completed command */
2959 	if (st == NULL || st->cb_idx == 0xFF) {
2960 		sdev_printk(KERN_INFO, scmd->device, "No reference found at "
2961 		    "driver, assuming scmd(0x%p) might have completed\n", scmd);
2962 		scmd->result = DID_RESET << 16;
2963 		r = SUCCESS;
2964 		goto out;
2965 	}
2966 
2967 	/* for hidden raid components and volumes this is not supported */
2968 	if (sas_device_priv_data->sas_target->flags &
2969 	    MPT_TARGET_FLAGS_RAID_COMPONENT ||
2970 	    sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2971 		scmd->result = DID_RESET << 16;
2972 		r = FAILED;
2973 		goto out;
2974 	}
2975 
2976 	mpt3sas_halt_firmware(ioc);
2977 
2978 	handle = sas_device_priv_data->sas_target->handle;
2979 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2980 	if (pcie_device && (!ioc->tm_custom_handling) &&
2981 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
2982 		timeout = ioc->nvme_abort_timeout;
2983 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2984 		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
2985 		st->smid, st->msix_io, timeout, 0);
2986 	/* Command must be cleared after abort */
2987 	if (r == SUCCESS && st->cb_idx != 0xFF)
2988 		r = FAILED;
2989  out:
2990 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
2991 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2992 	if (pcie_device)
2993 		pcie_device_put(pcie_device);
2994 	return r;
2995 }
2996 
2997 /**
2998  * scsih_dev_reset - eh threads main device reset routine
2999  * @scmd: pointer to scsi command object
3000  *
3001  * Return: SUCCESS if command aborted else FAILED
3002  */
3003 static int
3004 scsih_dev_reset(struct scsi_cmnd *scmd)
3005 {
3006 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3007 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3008 	struct _sas_device *sas_device = NULL;
3009 	struct _pcie_device *pcie_device = NULL;
3010 	u16	handle;
3011 	u8	tr_method = 0;
3012 	u8	tr_timeout = 30;
3013 	int r;
3014 
3015 	struct scsi_target *starget = scmd->device->sdev_target;
3016 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3017 
3018 	sdev_printk(KERN_INFO, scmd->device,
3019 	    "attempting device reset! scmd(0x%p)\n", scmd);
3020 	_scsih_tm_display_info(ioc, scmd);
3021 
3022 	sas_device_priv_data = scmd->device->hostdata;
3023 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3024 	    ioc->remove_host) {
3025 		sdev_printk(KERN_INFO, scmd->device,
3026 		    "device been deleted! scmd(0x%p)\n", scmd);
3027 		scmd->result = DID_NO_CONNECT << 16;
3028 		scmd->scsi_done(scmd);
3029 		r = SUCCESS;
3030 		goto out;
3031 	}
3032 
3033 	/* for hidden raid components obtain the volume_handle */
3034 	handle = 0;
3035 	if (sas_device_priv_data->sas_target->flags &
3036 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3037 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3038 				target_priv_data);
3039 		if (sas_device)
3040 			handle = sas_device->volume_handle;
3041 	} else
3042 		handle = sas_device_priv_data->sas_target->handle;
3043 
3044 	if (!handle) {
3045 		scmd->result = DID_RESET << 16;
3046 		r = FAILED;
3047 		goto out;
3048 	}
3049 
3050 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3051 
3052 	if (pcie_device && (!ioc->tm_custom_handling) &&
3053 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3054 		tr_timeout = pcie_device->reset_timeout;
3055 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3056 	} else
3057 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3058 
3059 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
3060 		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3061 		tr_timeout, tr_method);
3062 	/* Check for busy commands after reset */
3063 	if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
3064 		r = FAILED;
3065  out:
3066 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3067 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3068 
3069 	if (sas_device)
3070 		sas_device_put(sas_device);
3071 	if (pcie_device)
3072 		pcie_device_put(pcie_device);
3073 
3074 	return r;
3075 }
3076 
3077 /**
3078  * scsih_target_reset - eh threads main target reset routine
3079  * @scmd: pointer to scsi command object
3080  *
3081  * Return: SUCCESS if command aborted else FAILED
3082  */
3083 static int
3084 scsih_target_reset(struct scsi_cmnd *scmd)
3085 {
3086 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3087 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3088 	struct _sas_device *sas_device = NULL;
3089 	struct _pcie_device *pcie_device = NULL;
3090 	u16	handle;
3091 	u8	tr_method = 0;
3092 	u8	tr_timeout = 30;
3093 	int r;
3094 	struct scsi_target *starget = scmd->device->sdev_target;
3095 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3096 
3097 	starget_printk(KERN_INFO, starget,
3098 	    "attempting target reset! scmd(0x%p)\n", scmd);
3099 	_scsih_tm_display_info(ioc, scmd);
3100 
3101 	sas_device_priv_data = scmd->device->hostdata;
3102 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3103 	    ioc->remove_host) {
3104 		starget_printk(KERN_INFO, starget,
3105 		    "target been deleted! scmd(0x%p)\n", scmd);
3106 		scmd->result = DID_NO_CONNECT << 16;
3107 		scmd->scsi_done(scmd);
3108 		r = SUCCESS;
3109 		goto out;
3110 	}
3111 
3112 	/* for hidden raid components obtain the volume_handle */
3113 	handle = 0;
3114 	if (sas_device_priv_data->sas_target->flags &
3115 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3116 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3117 				target_priv_data);
3118 		if (sas_device)
3119 			handle = sas_device->volume_handle;
3120 	} else
3121 		handle = sas_device_priv_data->sas_target->handle;
3122 
3123 	if (!handle) {
3124 		scmd->result = DID_RESET << 16;
3125 		r = FAILED;
3126 		goto out;
3127 	}
3128 
3129 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3130 
3131 	if (pcie_device && (!ioc->tm_custom_handling) &&
3132 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3133 		tr_timeout = pcie_device->reset_timeout;
3134 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3135 	} else
3136 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3137 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
3138 		MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3139 	    tr_timeout, tr_method);
3140 	/* Check for busy commands after reset */
3141 	if (r == SUCCESS && atomic_read(&starget->target_busy))
3142 		r = FAILED;
3143  out:
3144 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3145 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3146 
3147 	if (sas_device)
3148 		sas_device_put(sas_device);
3149 	if (pcie_device)
3150 		pcie_device_put(pcie_device);
3151 	return r;
3152 }
3153 
3154 
3155 /**
3156  * scsih_host_reset - eh threads main host reset routine
3157  * @scmd: pointer to scsi command object
3158  *
3159  * Return: SUCCESS if command aborted else FAILED
3160  */
3161 static int
3162 scsih_host_reset(struct scsi_cmnd *scmd)
3163 {
3164 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3165 	int r, retval;
3166 
3167 	ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3168 	scsi_print_command(scmd);
3169 
3170 	if (ioc->is_driver_loading || ioc->remove_host) {
3171 		ioc_info(ioc, "Blocking the host reset\n");
3172 		r = FAILED;
3173 		goto out;
3174 	}
3175 
3176 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3177 	r = (retval < 0) ? FAILED : SUCCESS;
3178 out:
3179 	ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3180 		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3181 
3182 	return r;
3183 }
3184 
3185 /**
3186  * _scsih_fw_event_add - insert and queue up fw_event
3187  * @ioc: per adapter object
3188  * @fw_event: object describing the event
3189  * Context: This function will acquire ioc->fw_event_lock.
3190  *
3191  * This adds the firmware event object into link list, then queues it up to
3192  * be processed from user context.
3193  */
3194 static void
3195 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3196 {
3197 	unsigned long flags;
3198 
3199 	if (ioc->firmware_event_thread == NULL)
3200 		return;
3201 
3202 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3203 	fw_event_work_get(fw_event);
3204 	INIT_LIST_HEAD(&fw_event->list);
3205 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3206 	INIT_WORK(&fw_event->work, _firmware_event_work);
3207 	fw_event_work_get(fw_event);
3208 	queue_work(ioc->firmware_event_thread, &fw_event->work);
3209 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3210 }
3211 
3212 /**
3213  * _scsih_fw_event_del_from_list - delete fw_event from the list
3214  * @ioc: per adapter object
3215  * @fw_event: object describing the event
3216  * Context: This function will acquire ioc->fw_event_lock.
3217  *
3218  * If the fw_event is on the fw_event_list, remove it and do a put.
3219  */
3220 static void
3221 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3222 	*fw_event)
3223 {
3224 	unsigned long flags;
3225 
3226 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3227 	if (!list_empty(&fw_event->list)) {
3228 		list_del_init(&fw_event->list);
3229 		fw_event_work_put(fw_event);
3230 	}
3231 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3232 }
3233 
3234 
3235  /**
3236  * mpt3sas_send_trigger_data_event - send event for processing trigger data
3237  * @ioc: per adapter object
3238  * @event_data: trigger event data
3239  */
3240 void
3241 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3242 	struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3243 {
3244 	struct fw_event_work *fw_event;
3245 	u16 sz;
3246 
3247 	if (ioc->is_driver_loading)
3248 		return;
3249 	sz = sizeof(*event_data);
3250 	fw_event = alloc_fw_event_work(sz);
3251 	if (!fw_event)
3252 		return;
3253 	fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3254 	fw_event->ioc = ioc;
3255 	memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3256 	_scsih_fw_event_add(ioc, fw_event);
3257 	fw_event_work_put(fw_event);
3258 }
3259 
3260 /**
3261  * _scsih_error_recovery_delete_devices - remove devices not responding
3262  * @ioc: per adapter object
3263  */
3264 static void
3265 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3266 {
3267 	struct fw_event_work *fw_event;
3268 
3269 	if (ioc->is_driver_loading)
3270 		return;
3271 	fw_event = alloc_fw_event_work(0);
3272 	if (!fw_event)
3273 		return;
3274 	fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3275 	fw_event->ioc = ioc;
3276 	_scsih_fw_event_add(ioc, fw_event);
3277 	fw_event_work_put(fw_event);
3278 }
3279 
3280 /**
3281  * mpt3sas_port_enable_complete - port enable completed (fake event)
3282  * @ioc: per adapter object
3283  */
3284 void
3285 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3286 {
3287 	struct fw_event_work *fw_event;
3288 
3289 	fw_event = alloc_fw_event_work(0);
3290 	if (!fw_event)
3291 		return;
3292 	fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3293 	fw_event->ioc = ioc;
3294 	_scsih_fw_event_add(ioc, fw_event);
3295 	fw_event_work_put(fw_event);
3296 }
3297 
3298 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3299 {
3300 	unsigned long flags;
3301 	struct fw_event_work *fw_event = NULL;
3302 
3303 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3304 	if (!list_empty(&ioc->fw_event_list)) {
3305 		fw_event = list_first_entry(&ioc->fw_event_list,
3306 				struct fw_event_work, list);
3307 		list_del_init(&fw_event->list);
3308 	}
3309 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3310 
3311 	return fw_event;
3312 }
3313 
3314 /**
3315  * _scsih_fw_event_cleanup_queue - cleanup event queue
3316  * @ioc: per adapter object
3317  *
3318  * Walk the firmware event queue, either killing timers, or waiting
3319  * for outstanding events to complete
3320  */
3321 static void
3322 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3323 {
3324 	struct fw_event_work *fw_event;
3325 
3326 	if (list_empty(&ioc->fw_event_list) ||
3327 	     !ioc->firmware_event_thread || in_interrupt())
3328 		return;
3329 
3330 	while ((fw_event = dequeue_next_fw_event(ioc))) {
3331 		/*
3332 		 * Wait on the fw_event to complete. If this returns 1, then
3333 		 * the event was never executed, and we need a put for the
3334 		 * reference the work had on the fw_event.
3335 		 *
3336 		 * If it did execute, we wait for it to finish, and the put will
3337 		 * happen from _firmware_event_work()
3338 		 */
3339 		if (cancel_work_sync(&fw_event->work))
3340 			fw_event_work_put(fw_event);
3341 
3342 		fw_event_work_put(fw_event);
3343 	}
3344 }
3345 
3346 /**
3347  * _scsih_internal_device_block - block the sdev device
3348  * @sdev: per device object
3349  * @sas_device_priv_data : per device driver private data
3350  *
3351  * make sure device is blocked without error, if not
3352  * print an error
3353  */
3354 static void
3355 _scsih_internal_device_block(struct scsi_device *sdev,
3356 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3357 {
3358 	int r = 0;
3359 
3360 	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3361 	    sas_device_priv_data->sas_target->handle);
3362 	sas_device_priv_data->block = 1;
3363 
3364 	r = scsi_internal_device_block_nowait(sdev);
3365 	if (r == -EINVAL)
3366 		sdev_printk(KERN_WARNING, sdev,
3367 		    "device_block failed with return(%d) for handle(0x%04x)\n",
3368 		    r, sas_device_priv_data->sas_target->handle);
3369 }
3370 
3371 /**
3372  * _scsih_internal_device_unblock - unblock the sdev device
3373  * @sdev: per device object
3374  * @sas_device_priv_data : per device driver private data
3375  * make sure device is unblocked without error, if not retry
3376  * by blocking and then unblocking
3377  */
3378 
3379 static void
3380 _scsih_internal_device_unblock(struct scsi_device *sdev,
3381 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3382 {
3383 	int r = 0;
3384 
3385 	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3386 	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3387 	sas_device_priv_data->block = 0;
3388 	r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3389 	if (r == -EINVAL) {
3390 		/* The device has been set to SDEV_RUNNING by SD layer during
3391 		 * device addition but the request queue is still stopped by
3392 		 * our earlier block call. We need to perform a block again
3393 		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3394 
3395 		sdev_printk(KERN_WARNING, sdev,
3396 		    "device_unblock failed with return(%d) for handle(0x%04x) "
3397 		    "performing a block followed by an unblock\n",
3398 		    r, sas_device_priv_data->sas_target->handle);
3399 		sas_device_priv_data->block = 1;
3400 		r = scsi_internal_device_block_nowait(sdev);
3401 		if (r)
3402 			sdev_printk(KERN_WARNING, sdev, "retried device_block "
3403 			    "failed with return(%d) for handle(0x%04x)\n",
3404 			    r, sas_device_priv_data->sas_target->handle);
3405 
3406 		sas_device_priv_data->block = 0;
3407 		r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3408 		if (r)
3409 			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3410 			    " failed with return(%d) for handle(0x%04x)\n",
3411 			    r, sas_device_priv_data->sas_target->handle);
3412 	}
3413 }
3414 
3415 /**
3416  * _scsih_ublock_io_all_device - unblock every device
3417  * @ioc: per adapter object
3418  *
3419  * change the device state from block to running
3420  */
3421 static void
3422 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3423 {
3424 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3425 	struct scsi_device *sdev;
3426 
3427 	shost_for_each_device(sdev, ioc->shost) {
3428 		sas_device_priv_data = sdev->hostdata;
3429 		if (!sas_device_priv_data)
3430 			continue;
3431 		if (!sas_device_priv_data->block)
3432 			continue;
3433 
3434 		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3435 			"device_running, handle(0x%04x)\n",
3436 		    sas_device_priv_data->sas_target->handle));
3437 		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3438 	}
3439 }
3440 
3441 
3442 /**
3443  * _scsih_ublock_io_device - prepare device to be deleted
3444  * @ioc: per adapter object
3445  * @sas_address: sas address
3446  *
3447  * unblock then put device in offline state
3448  */
3449 static void
3450 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
3451 {
3452 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3453 	struct scsi_device *sdev;
3454 
3455 	shost_for_each_device(sdev, ioc->shost) {
3456 		sas_device_priv_data = sdev->hostdata;
3457 		if (!sas_device_priv_data)
3458 			continue;
3459 		if (sas_device_priv_data->sas_target->sas_address
3460 		    != sas_address)
3461 			continue;
3462 		if (sas_device_priv_data->block)
3463 			_scsih_internal_device_unblock(sdev,
3464 				sas_device_priv_data);
3465 	}
3466 }
3467 
3468 /**
3469  * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3470  * @ioc: per adapter object
3471  *
3472  * During device pull we need to appropriately set the sdev state.
3473  */
3474 static void
3475 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3476 {
3477 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3478 	struct scsi_device *sdev;
3479 
3480 	shost_for_each_device(sdev, ioc->shost) {
3481 		sas_device_priv_data = sdev->hostdata;
3482 		if (!sas_device_priv_data)
3483 			continue;
3484 		if (sas_device_priv_data->block)
3485 			continue;
3486 		if (sas_device_priv_data->ignore_delay_remove) {
3487 			sdev_printk(KERN_INFO, sdev,
3488 			"%s skip device_block for SES handle(0x%04x)\n",
3489 			__func__, sas_device_priv_data->sas_target->handle);
3490 			continue;
3491 		}
3492 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3493 	}
3494 }
3495 
3496 /**
3497  * _scsih_block_io_device - set the device state to SDEV_BLOCK
3498  * @ioc: per adapter object
3499  * @handle: device handle
3500  *
3501  * During device pull we need to appropriately set the sdev state.
3502  */
3503 static void
3504 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3505 {
3506 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3507 	struct scsi_device *sdev;
3508 	struct _sas_device *sas_device;
3509 
3510 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3511 
3512 	shost_for_each_device(sdev, ioc->shost) {
3513 		sas_device_priv_data = sdev->hostdata;
3514 		if (!sas_device_priv_data)
3515 			continue;
3516 		if (sas_device_priv_data->sas_target->handle != handle)
3517 			continue;
3518 		if (sas_device_priv_data->block)
3519 			continue;
3520 		if (sas_device && sas_device->pend_sas_rphy_add)
3521 			continue;
3522 		if (sas_device_priv_data->ignore_delay_remove) {
3523 			sdev_printk(KERN_INFO, sdev,
3524 			"%s skip device_block for SES handle(0x%04x)\n",
3525 			__func__, sas_device_priv_data->sas_target->handle);
3526 			continue;
3527 		}
3528 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3529 	}
3530 
3531 	if (sas_device)
3532 		sas_device_put(sas_device);
3533 }
3534 
3535 /**
3536  * _scsih_block_io_to_children_attached_to_ex
3537  * @ioc: per adapter object
3538  * @sas_expander: the sas_device object
3539  *
3540  * This routine set sdev state to SDEV_BLOCK for all devices
3541  * attached to this expander. This function called when expander is
3542  * pulled.
3543  */
3544 static void
3545 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3546 	struct _sas_node *sas_expander)
3547 {
3548 	struct _sas_port *mpt3sas_port;
3549 	struct _sas_device *sas_device;
3550 	struct _sas_node *expander_sibling;
3551 	unsigned long flags;
3552 
3553 	if (!sas_expander)
3554 		return;
3555 
3556 	list_for_each_entry(mpt3sas_port,
3557 	   &sas_expander->sas_port_list, port_list) {
3558 		if (mpt3sas_port->remote_identify.device_type ==
3559 		    SAS_END_DEVICE) {
3560 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
3561 			sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3562 			    mpt3sas_port->remote_identify.sas_address);
3563 			if (sas_device) {
3564 				set_bit(sas_device->handle,
3565 						ioc->blocking_handles);
3566 				sas_device_put(sas_device);
3567 			}
3568 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3569 		}
3570 	}
3571 
3572 	list_for_each_entry(mpt3sas_port,
3573 	   &sas_expander->sas_port_list, port_list) {
3574 
3575 		if (mpt3sas_port->remote_identify.device_type ==
3576 		    SAS_EDGE_EXPANDER_DEVICE ||
3577 		    mpt3sas_port->remote_identify.device_type ==
3578 		    SAS_FANOUT_EXPANDER_DEVICE) {
3579 			expander_sibling =
3580 			    mpt3sas_scsih_expander_find_by_sas_address(
3581 			    ioc, mpt3sas_port->remote_identify.sas_address);
3582 			_scsih_block_io_to_children_attached_to_ex(ioc,
3583 			    expander_sibling);
3584 		}
3585 	}
3586 }
3587 
3588 /**
3589  * _scsih_block_io_to_children_attached_directly
3590  * @ioc: per adapter object
3591  * @event_data: topology change event data
3592  *
3593  * This routine set sdev state to SDEV_BLOCK for all devices
3594  * direct attached during device pull.
3595  */
3596 static void
3597 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3598 	Mpi2EventDataSasTopologyChangeList_t *event_data)
3599 {
3600 	int i;
3601 	u16 handle;
3602 	u16 reason_code;
3603 
3604 	for (i = 0; i < event_data->NumEntries; i++) {
3605 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3606 		if (!handle)
3607 			continue;
3608 		reason_code = event_data->PHY[i].PhyStatus &
3609 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
3610 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3611 			_scsih_block_io_device(ioc, handle);
3612 	}
3613 }
3614 
3615 /**
3616  * _scsih_block_io_to_pcie_children_attached_directly
3617  * @ioc: per adapter object
3618  * @event_data: topology change event data
3619  *
3620  * This routine set sdev state to SDEV_BLOCK for all devices
3621  * direct attached during device pull/reconnect.
3622  */
3623 static void
3624 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3625 		Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3626 {
3627 	int i;
3628 	u16 handle;
3629 	u16 reason_code;
3630 
3631 	for (i = 0; i < event_data->NumEntries; i++) {
3632 		handle =
3633 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
3634 		if (!handle)
3635 			continue;
3636 		reason_code = event_data->PortEntry[i].PortStatus;
3637 		if (reason_code ==
3638 				MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
3639 			_scsih_block_io_device(ioc, handle);
3640 	}
3641 }
3642 /**
3643  * _scsih_tm_tr_send - send task management request
3644  * @ioc: per adapter object
3645  * @handle: device handle
3646  * Context: interrupt time.
3647  *
3648  * This code is to initiate the device removal handshake protocol
3649  * with controller firmware.  This function will issue target reset
3650  * using high priority request queue.  It will send a sas iounit
3651  * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
3652  *
3653  * This is designed to send muliple task management request at the same
3654  * time to the fifo. If the fifo is full, we will append the request,
3655  * and process it in a future completion.
3656  */
3657 static void
3658 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3659 {
3660 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3661 	u16 smid;
3662 	struct _sas_device *sas_device = NULL;
3663 	struct _pcie_device *pcie_device = NULL;
3664 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
3665 	u64 sas_address = 0;
3666 	unsigned long flags;
3667 	struct _tr_list *delayed_tr;
3668 	u32 ioc_state;
3669 	u8 tr_method = 0;
3670 
3671 	if (ioc->pci_error_recovery) {
3672 		dewtprintk(ioc,
3673 			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
3674 				    __func__, handle));
3675 		return;
3676 	}
3677 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3678 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3679 		dewtprintk(ioc,
3680 			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
3681 				    __func__, handle));
3682 		return;
3683 	}
3684 
3685 	/* if PD, then return */
3686 	if (test_bit(handle, ioc->pd_handles))
3687 		return;
3688 
3689 	clear_bit(handle, ioc->pend_os_device_add);
3690 
3691 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
3692 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
3693 	if (sas_device && sas_device->starget &&
3694 	    sas_device->starget->hostdata) {
3695 		sas_target_priv_data = sas_device->starget->hostdata;
3696 		sas_target_priv_data->deleted = 1;
3697 		sas_address = sas_device->sas_address;
3698 	}
3699 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3700 	if (!sas_device) {
3701 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3702 		pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
3703 		if (pcie_device && pcie_device->starget &&
3704 			pcie_device->starget->hostdata) {
3705 			sas_target_priv_data = pcie_device->starget->hostdata;
3706 			sas_target_priv_data->deleted = 1;
3707 			sas_address = pcie_device->wwid;
3708 		}
3709 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3710 		if (pcie_device && (!ioc->tm_custom_handling) &&
3711 		    (!(mpt3sas_scsih_is_pcie_scsi_device(
3712 		    pcie_device->device_info))))
3713 			tr_method =
3714 			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3715 		else
3716 			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3717 	}
3718 	if (sas_target_priv_data) {
3719 		dewtprintk(ioc,
3720 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3721 				    handle, (u64)sas_address));
3722 		if (sas_device) {
3723 			if (sas_device->enclosure_handle != 0)
3724 				dewtprintk(ioc,
3725 					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
3726 						    (u64)sas_device->enclosure_logical_id,
3727 						    sas_device->slot));
3728 			if (sas_device->connector_name[0] != '\0')
3729 				dewtprintk(ioc,
3730 					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
3731 						    sas_device->enclosure_level,
3732 						    sas_device->connector_name));
3733 		} else if (pcie_device) {
3734 			if (pcie_device->enclosure_handle != 0)
3735 				dewtprintk(ioc,
3736 					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
3737 						    (u64)pcie_device->enclosure_logical_id,
3738 						    pcie_device->slot));
3739 			if (pcie_device->connector_name[0] != '\0')
3740 				dewtprintk(ioc,
3741 					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
3742 						    pcie_device->enclosure_level,
3743 						    pcie_device->connector_name));
3744 		}
3745 		_scsih_ublock_io_device(ioc, sas_address);
3746 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
3747 	}
3748 
3749 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
3750 	if (!smid) {
3751 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3752 		if (!delayed_tr)
3753 			goto out;
3754 		INIT_LIST_HEAD(&delayed_tr->list);
3755 		delayed_tr->handle = handle;
3756 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3757 		dewtprintk(ioc,
3758 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3759 				    handle));
3760 		goto out;
3761 	}
3762 
3763 	dewtprintk(ioc,
3764 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3765 			    handle, smid, ioc->tm_tr_cb_idx));
3766 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3767 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3768 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3769 	mpi_request->DevHandle = cpu_to_le16(handle);
3770 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3771 	mpi_request->MsgFlags = tr_method;
3772 	set_bit(handle, ioc->device_remove_in_progress);
3773 	ioc->put_smid_hi_priority(ioc, smid, 0);
3774 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
3775 
3776 out:
3777 	if (sas_device)
3778 		sas_device_put(sas_device);
3779 	if (pcie_device)
3780 		pcie_device_put(pcie_device);
3781 }
3782 
3783 /**
3784  * _scsih_tm_tr_complete -
3785  * @ioc: per adapter object
3786  * @smid: system request message index
3787  * @msix_index: MSIX table index supplied by the OS
3788  * @reply: reply message frame(lower 32bit addr)
3789  * Context: interrupt time.
3790  *
3791  * This is the target reset completion routine.
3792  * This code is part of the code to initiate the device removal
3793  * handshake protocol with controller firmware.
3794  * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
3795  *
3796  * Return: 1 meaning mf should be freed from _base_interrupt
3797  *         0 means the mf is freed from this function.
3798  */
3799 static u8
3800 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3801 	u32 reply)
3802 {
3803 	u16 handle;
3804 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3805 	Mpi2SCSITaskManagementReply_t *mpi_reply =
3806 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3807 	Mpi2SasIoUnitControlRequest_t *mpi_request;
3808 	u16 smid_sas_ctrl;
3809 	u32 ioc_state;
3810 	struct _sc_list *delayed_sc;
3811 
3812 	if (ioc->pci_error_recovery) {
3813 		dewtprintk(ioc,
3814 			   ioc_info(ioc, "%s: host in pci error recovery\n",
3815 				    __func__));
3816 		return 1;
3817 	}
3818 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3819 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3820 		dewtprintk(ioc,
3821 			   ioc_info(ioc, "%s: host is not operational\n",
3822 				    __func__));
3823 		return 1;
3824 	}
3825 	if (unlikely(!mpi_reply)) {
3826 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3827 			__FILE__, __LINE__, __func__);
3828 		return 1;
3829 	}
3830 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3831 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
3832 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3833 		dewtprintk(ioc,
3834 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3835 				   handle,
3836 				   le16_to_cpu(mpi_reply->DevHandle), smid));
3837 		return 0;
3838 	}
3839 
3840 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3841 	dewtprintk(ioc,
3842 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3843 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3844 			    le32_to_cpu(mpi_reply->IOCLogInfo),
3845 			    le32_to_cpu(mpi_reply->TerminationCount)));
3846 
3847 	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
3848 	if (!smid_sas_ctrl) {
3849 		delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
3850 		if (!delayed_sc)
3851 			return _scsih_check_for_pending_tm(ioc, smid);
3852 		INIT_LIST_HEAD(&delayed_sc->list);
3853 		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
3854 		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
3855 		dewtprintk(ioc,
3856 			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
3857 				    handle));
3858 		return _scsih_check_for_pending_tm(ioc, smid);
3859 	}
3860 
3861 	dewtprintk(ioc,
3862 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3863 			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
3864 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
3865 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
3866 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3867 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3868 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
3869 	ioc->put_smid_default(ioc, smid_sas_ctrl);
3870 
3871 	return _scsih_check_for_pending_tm(ioc, smid);
3872 }
3873 
3874 /** _scsih_allow_scmd_to_device - check whether scmd needs to
3875  *				 issue to IOC or not.
3876  * @ioc: per adapter object
3877  * @scmd: pointer to scsi command object
3878  *
3879  * Returns true if scmd can be issued to IOC otherwise returns false.
3880  */
3881 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
3882 	struct scsi_cmnd *scmd)
3883 {
3884 
3885 	if (ioc->pci_error_recovery)
3886 		return false;
3887 
3888 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
3889 		if (ioc->remove_host)
3890 			return false;
3891 
3892 		return true;
3893 	}
3894 
3895 	if (ioc->remove_host) {
3896 
3897 		switch (scmd->cmnd[0]) {
3898 		case SYNCHRONIZE_CACHE:
3899 		case START_STOP:
3900 			return true;
3901 		default:
3902 			return false;
3903 		}
3904 	}
3905 
3906 	return true;
3907 }
3908 
3909 /**
3910  * _scsih_sas_control_complete - completion routine
3911  * @ioc: per adapter object
3912  * @smid: system request message index
3913  * @msix_index: MSIX table index supplied by the OS
3914  * @reply: reply message frame(lower 32bit addr)
3915  * Context: interrupt time.
3916  *
3917  * This is the sas iounit control completion routine.
3918  * This code is part of the code to initiate the device removal
3919  * handshake protocol with controller firmware.
3920  *
3921  * Return: 1 meaning mf should be freed from _base_interrupt
3922  *         0 means the mf is freed from this function.
3923  */
3924 static u8
3925 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3926 	u8 msix_index, u32 reply)
3927 {
3928 	Mpi2SasIoUnitControlReply_t *mpi_reply =
3929 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3930 
3931 	if (likely(mpi_reply)) {
3932 		dewtprintk(ioc,
3933 			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
3934 				    le16_to_cpu(mpi_reply->DevHandle), smid,
3935 				    le16_to_cpu(mpi_reply->IOCStatus),
3936 				    le32_to_cpu(mpi_reply->IOCLogInfo)));
3937 		if (le16_to_cpu(mpi_reply->IOCStatus) ==
3938 		     MPI2_IOCSTATUS_SUCCESS) {
3939 			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
3940 			    ioc->device_remove_in_progress);
3941 		}
3942 	} else {
3943 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3944 			__FILE__, __LINE__, __func__);
3945 	}
3946 	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
3947 }
3948 
3949 /**
3950  * _scsih_tm_tr_volume_send - send target reset request for volumes
3951  * @ioc: per adapter object
3952  * @handle: device handle
3953  * Context: interrupt time.
3954  *
3955  * This is designed to send muliple task management request at the same
3956  * time to the fifo. If the fifo is full, we will append the request,
3957  * and process it in a future completion.
3958  */
3959 static void
3960 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3961 {
3962 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3963 	u16 smid;
3964 	struct _tr_list *delayed_tr;
3965 
3966 	if (ioc->pci_error_recovery) {
3967 		dewtprintk(ioc,
3968 			   ioc_info(ioc, "%s: host reset in progress!\n",
3969 				    __func__));
3970 		return;
3971 	}
3972 
3973 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
3974 	if (!smid) {
3975 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3976 		if (!delayed_tr)
3977 			return;
3978 		INIT_LIST_HEAD(&delayed_tr->list);
3979 		delayed_tr->handle = handle;
3980 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
3981 		dewtprintk(ioc,
3982 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3983 				    handle));
3984 		return;
3985 	}
3986 
3987 	dewtprintk(ioc,
3988 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3989 			    handle, smid, ioc->tm_tr_volume_cb_idx));
3990 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3991 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3992 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3993 	mpi_request->DevHandle = cpu_to_le16(handle);
3994 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3995 	ioc->put_smid_hi_priority(ioc, smid, 0);
3996 }
3997 
3998 /**
3999  * _scsih_tm_volume_tr_complete - target reset completion
4000  * @ioc: per adapter object
4001  * @smid: system request message index
4002  * @msix_index: MSIX table index supplied by the OS
4003  * @reply: reply message frame(lower 32bit addr)
4004  * Context: interrupt time.
4005  *
4006  * Return: 1 meaning mf should be freed from _base_interrupt
4007  *         0 means the mf is freed from this function.
4008  */
4009 static u8
4010 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4011 	u8 msix_index, u32 reply)
4012 {
4013 	u16 handle;
4014 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4015 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4016 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4017 
4018 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
4019 		dewtprintk(ioc,
4020 			   ioc_info(ioc, "%s: host reset in progress!\n",
4021 				    __func__));
4022 		return 1;
4023 	}
4024 	if (unlikely(!mpi_reply)) {
4025 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4026 			__FILE__, __LINE__, __func__);
4027 		return 1;
4028 	}
4029 
4030 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4031 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4032 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4033 		dewtprintk(ioc,
4034 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4035 				   handle, le16_to_cpu(mpi_reply->DevHandle),
4036 				   smid));
4037 		return 0;
4038 	}
4039 
4040 	dewtprintk(ioc,
4041 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4042 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4043 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4044 			    le32_to_cpu(mpi_reply->TerminationCount)));
4045 
4046 	return _scsih_check_for_pending_tm(ioc, smid);
4047 }
4048 
4049 /**
4050  * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4051  * @ioc: per adapter object
4052  * @smid: system request message index
4053  * @event: Event ID
4054  * @event_context: used to track events uniquely
4055  *
4056  * Context - processed in interrupt context.
4057  */
4058 static void
4059 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4060 				U32 event_context)
4061 {
4062 	Mpi2EventAckRequest_t *ack_request;
4063 	int i = smid - ioc->internal_smid;
4064 	unsigned long flags;
4065 
4066 	/* Without releasing the smid just update the
4067 	 * call back index and reuse the same smid for
4068 	 * processing this delayed request
4069 	 */
4070 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4071 	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4072 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4073 
4074 	dewtprintk(ioc,
4075 		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4076 			    le16_to_cpu(event), smid, ioc->base_cb_idx));
4077 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4078 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4079 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4080 	ack_request->Event = event;
4081 	ack_request->EventContext = event_context;
4082 	ack_request->VF_ID = 0;  /* TODO */
4083 	ack_request->VP_ID = 0;
4084 	ioc->put_smid_default(ioc, smid);
4085 }
4086 
4087 /**
4088  * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4089  *				sas_io_unit_ctrl messages
4090  * @ioc: per adapter object
4091  * @smid: system request message index
4092  * @handle: device handle
4093  *
4094  * Context - processed in interrupt context.
4095  */
4096 static void
4097 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4098 					u16 smid, u16 handle)
4099 {
4100 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4101 	u32 ioc_state;
4102 	int i = smid - ioc->internal_smid;
4103 	unsigned long flags;
4104 
4105 	if (ioc->remove_host) {
4106 		dewtprintk(ioc,
4107 			   ioc_info(ioc, "%s: host has been removed\n",
4108 				    __func__));
4109 		return;
4110 	} else if (ioc->pci_error_recovery) {
4111 		dewtprintk(ioc,
4112 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4113 				    __func__));
4114 		return;
4115 	}
4116 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4117 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4118 		dewtprintk(ioc,
4119 			   ioc_info(ioc, "%s: host is not operational\n",
4120 				    __func__));
4121 		return;
4122 	}
4123 
4124 	/* Without releasing the smid just update the
4125 	 * call back index and reuse the same smid for
4126 	 * processing this delayed request
4127 	 */
4128 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4129 	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4130 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4131 
4132 	dewtprintk(ioc,
4133 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4134 			    handle, smid, ioc->tm_sas_control_cb_idx));
4135 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4136 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4137 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4138 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4139 	mpi_request->DevHandle = cpu_to_le16(handle);
4140 	ioc->put_smid_default(ioc, smid);
4141 }
4142 
4143 /**
4144  * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4145  * @ioc: per adapter object
4146  * @smid: system request message index
4147  *
4148  * Context: Executed in interrupt context
4149  *
4150  * This will check delayed internal messages list, and process the
4151  * next request.
4152  *
4153  * Return: 1 meaning mf should be freed from _base_interrupt
4154  *         0 means the mf is freed from this function.
4155  */
4156 u8
4157 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4158 {
4159 	struct _sc_list *delayed_sc;
4160 	struct _event_ack_list *delayed_event_ack;
4161 
4162 	if (!list_empty(&ioc->delayed_event_ack_list)) {
4163 		delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4164 						struct _event_ack_list, list);
4165 		_scsih_issue_delayed_event_ack(ioc, smid,
4166 		  delayed_event_ack->Event, delayed_event_ack->EventContext);
4167 		list_del(&delayed_event_ack->list);
4168 		kfree(delayed_event_ack);
4169 		return 0;
4170 	}
4171 
4172 	if (!list_empty(&ioc->delayed_sc_list)) {
4173 		delayed_sc = list_entry(ioc->delayed_sc_list.next,
4174 						struct _sc_list, list);
4175 		_scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4176 						 delayed_sc->handle);
4177 		list_del(&delayed_sc->list);
4178 		kfree(delayed_sc);
4179 		return 0;
4180 	}
4181 	return 1;
4182 }
4183 
4184 /**
4185  * _scsih_check_for_pending_tm - check for pending task management
4186  * @ioc: per adapter object
4187  * @smid: system request message index
4188  *
4189  * This will check delayed target reset list, and feed the
4190  * next reqeust.
4191  *
4192  * Return: 1 meaning mf should be freed from _base_interrupt
4193  *         0 means the mf is freed from this function.
4194  */
4195 static u8
4196 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4197 {
4198 	struct _tr_list *delayed_tr;
4199 
4200 	if (!list_empty(&ioc->delayed_tr_volume_list)) {
4201 		delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4202 		    struct _tr_list, list);
4203 		mpt3sas_base_free_smid(ioc, smid);
4204 		_scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4205 		list_del(&delayed_tr->list);
4206 		kfree(delayed_tr);
4207 		return 0;
4208 	}
4209 
4210 	if (!list_empty(&ioc->delayed_tr_list)) {
4211 		delayed_tr = list_entry(ioc->delayed_tr_list.next,
4212 		    struct _tr_list, list);
4213 		mpt3sas_base_free_smid(ioc, smid);
4214 		_scsih_tm_tr_send(ioc, delayed_tr->handle);
4215 		list_del(&delayed_tr->list);
4216 		kfree(delayed_tr);
4217 		return 0;
4218 	}
4219 
4220 	return 1;
4221 }
4222 
4223 /**
4224  * _scsih_check_topo_delete_events - sanity check on topo events
4225  * @ioc: per adapter object
4226  * @event_data: the event data payload
4227  *
4228  * This routine added to better handle cable breaker.
4229  *
4230  * This handles the case where driver receives multiple expander
4231  * add and delete events in a single shot.  When there is a delete event
4232  * the routine will void any pending add events waiting in the event queue.
4233  */
4234 static void
4235 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4236 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4237 {
4238 	struct fw_event_work *fw_event;
4239 	Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4240 	u16 expander_handle;
4241 	struct _sas_node *sas_expander;
4242 	unsigned long flags;
4243 	int i, reason_code;
4244 	u16 handle;
4245 
4246 	for (i = 0 ; i < event_data->NumEntries; i++) {
4247 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4248 		if (!handle)
4249 			continue;
4250 		reason_code = event_data->PHY[i].PhyStatus &
4251 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4252 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4253 			_scsih_tm_tr_send(ioc, handle);
4254 	}
4255 
4256 	expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4257 	if (expander_handle < ioc->sas_hba.num_phys) {
4258 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4259 		return;
4260 	}
4261 	if (event_data->ExpStatus ==
4262 	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4263 		/* put expander attached devices into blocking state */
4264 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
4265 		sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4266 		    expander_handle);
4267 		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4268 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4269 		do {
4270 			handle = find_first_bit(ioc->blocking_handles,
4271 			    ioc->facts.MaxDevHandle);
4272 			if (handle < ioc->facts.MaxDevHandle)
4273 				_scsih_block_io_device(ioc, handle);
4274 		} while (test_and_clear_bit(handle, ioc->blocking_handles));
4275 	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4276 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4277 
4278 	if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4279 		return;
4280 
4281 	/* mark ignore flag for pending events */
4282 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4283 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4284 		if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4285 		    fw_event->ignore)
4286 			continue;
4287 		local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4288 				   fw_event->event_data;
4289 		if (local_event_data->ExpStatus ==
4290 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4291 		    local_event_data->ExpStatus ==
4292 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4293 			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4294 			    expander_handle) {
4295 				dewtprintk(ioc,
4296 					   ioc_info(ioc, "setting ignoring flag\n"));
4297 				fw_event->ignore = 1;
4298 			}
4299 		}
4300 	}
4301 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4302 }
4303 
4304 /**
4305  * _scsih_check_pcie_topo_remove_events - sanity check on topo
4306  * events
4307  * @ioc: per adapter object
4308  * @event_data: the event data payload
4309  *
4310  * This handles the case where driver receives multiple switch
4311  * or device add and delete events in a single shot.  When there
4312  * is a delete event the routine will void any pending add
4313  * events waiting in the event queue.
4314  */
4315 static void
4316 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4317 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4318 {
4319 	struct fw_event_work *fw_event;
4320 	Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4321 	unsigned long flags;
4322 	int i, reason_code;
4323 	u16 handle, switch_handle;
4324 
4325 	for (i = 0; i < event_data->NumEntries; i++) {
4326 		handle =
4327 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4328 		if (!handle)
4329 			continue;
4330 		reason_code = event_data->PortEntry[i].PortStatus;
4331 		if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4332 			_scsih_tm_tr_send(ioc, handle);
4333 	}
4334 
4335 	switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4336 	if (!switch_handle) {
4337 		_scsih_block_io_to_pcie_children_attached_directly(
4338 							ioc, event_data);
4339 		return;
4340 	}
4341     /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4342 	if ((event_data->SwitchStatus
4343 		== MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4344 		(event_data->SwitchStatus ==
4345 					MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4346 		_scsih_block_io_to_pcie_children_attached_directly(
4347 							ioc, event_data);
4348 
4349 	if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4350 		return;
4351 
4352 	/* mark ignore flag for pending events */
4353 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4354 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4355 		if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4356 			fw_event->ignore)
4357 			continue;
4358 		local_event_data =
4359 			(Mpi26EventDataPCIeTopologyChangeList_t *)
4360 			fw_event->event_data;
4361 		if (local_event_data->SwitchStatus ==
4362 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4363 		    local_event_data->SwitchStatus ==
4364 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4365 			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4366 				switch_handle) {
4367 				dewtprintk(ioc,
4368 					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
4369 				fw_event->ignore = 1;
4370 			}
4371 		}
4372 	}
4373 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4374 }
4375 
4376 /**
4377  * _scsih_set_volume_delete_flag - setting volume delete flag
4378  * @ioc: per adapter object
4379  * @handle: device handle
4380  *
4381  * This returns nothing.
4382  */
4383 static void
4384 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4385 {
4386 	struct _raid_device *raid_device;
4387 	struct MPT3SAS_TARGET *sas_target_priv_data;
4388 	unsigned long flags;
4389 
4390 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
4391 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4392 	if (raid_device && raid_device->starget &&
4393 	    raid_device->starget->hostdata) {
4394 		sas_target_priv_data =
4395 		    raid_device->starget->hostdata;
4396 		sas_target_priv_data->deleted = 1;
4397 		dewtprintk(ioc,
4398 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4399 				    handle, (u64)raid_device->wwid));
4400 	}
4401 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4402 }
4403 
4404 /**
4405  * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4406  * @handle: input handle
4407  * @a: handle for volume a
4408  * @b: handle for volume b
4409  *
4410  * IR firmware only supports two raid volumes.  The purpose of this
4411  * routine is to set the volume handle in either a or b. When the given
4412  * input handle is non-zero, or when a and b have not been set before.
4413  */
4414 static void
4415 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4416 {
4417 	if (!handle || handle == *a || handle == *b)
4418 		return;
4419 	if (!*a)
4420 		*a = handle;
4421 	else if (!*b)
4422 		*b = handle;
4423 }
4424 
4425 /**
4426  * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4427  * @ioc: per adapter object
4428  * @event_data: the event data payload
4429  * Context: interrupt time.
4430  *
4431  * This routine will send target reset to volume, followed by target
4432  * resets to the PDs. This is called when a PD has been removed, or
4433  * volume has been deleted or removed. When the target reset is sent
4434  * to volume, the PD target resets need to be queued to start upon
4435  * completion of the volume target reset.
4436  */
4437 static void
4438 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4439 	Mpi2EventDataIrConfigChangeList_t *event_data)
4440 {
4441 	Mpi2EventIrConfigElement_t *element;
4442 	int i;
4443 	u16 handle, volume_handle, a, b;
4444 	struct _tr_list *delayed_tr;
4445 
4446 	a = 0;
4447 	b = 0;
4448 
4449 	if (ioc->is_warpdrive)
4450 		return;
4451 
4452 	/* Volume Resets for Deleted or Removed */
4453 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4454 	for (i = 0; i < event_data->NumElements; i++, element++) {
4455 		if (le32_to_cpu(event_data->Flags) &
4456 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4457 			continue;
4458 		if (element->ReasonCode ==
4459 		    MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4460 		    element->ReasonCode ==
4461 		    MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4462 			volume_handle = le16_to_cpu(element->VolDevHandle);
4463 			_scsih_set_volume_delete_flag(ioc, volume_handle);
4464 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4465 		}
4466 	}
4467 
4468 	/* Volume Resets for UNHIDE events */
4469 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4470 	for (i = 0; i < event_data->NumElements; i++, element++) {
4471 		if (le32_to_cpu(event_data->Flags) &
4472 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4473 			continue;
4474 		if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4475 			volume_handle = le16_to_cpu(element->VolDevHandle);
4476 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4477 		}
4478 	}
4479 
4480 	if (a)
4481 		_scsih_tm_tr_volume_send(ioc, a);
4482 	if (b)
4483 		_scsih_tm_tr_volume_send(ioc, b);
4484 
4485 	/* PD target resets */
4486 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4487 	for (i = 0; i < event_data->NumElements; i++, element++) {
4488 		if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4489 			continue;
4490 		handle = le16_to_cpu(element->PhysDiskDevHandle);
4491 		volume_handle = le16_to_cpu(element->VolDevHandle);
4492 		clear_bit(handle, ioc->pd_handles);
4493 		if (!volume_handle)
4494 			_scsih_tm_tr_send(ioc, handle);
4495 		else if (volume_handle == a || volume_handle == b) {
4496 			delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4497 			BUG_ON(!delayed_tr);
4498 			INIT_LIST_HEAD(&delayed_tr->list);
4499 			delayed_tr->handle = handle;
4500 			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4501 			dewtprintk(ioc,
4502 				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4503 					    handle));
4504 		} else
4505 			_scsih_tm_tr_send(ioc, handle);
4506 	}
4507 }
4508 
4509 
4510 /**
4511  * _scsih_check_volume_delete_events - set delete flag for volumes
4512  * @ioc: per adapter object
4513  * @event_data: the event data payload
4514  * Context: interrupt time.
4515  *
4516  * This will handle the case when the cable connected to entire volume is
4517  * pulled. We will take care of setting the deleted flag so normal IO will
4518  * not be sent.
4519  */
4520 static void
4521 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4522 	Mpi2EventDataIrVolume_t *event_data)
4523 {
4524 	u32 state;
4525 
4526 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4527 		return;
4528 	state = le32_to_cpu(event_data->NewValue);
4529 	if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4530 	    MPI2_RAID_VOL_STATE_FAILED)
4531 		_scsih_set_volume_delete_flag(ioc,
4532 		    le16_to_cpu(event_data->VolDevHandle));
4533 }
4534 
4535 /**
4536  * _scsih_temp_threshold_events - display temperature threshold exceeded events
4537  * @ioc: per adapter object
4538  * @event_data: the temp threshold event data
4539  * Context: interrupt time.
4540  */
4541 static void
4542 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4543 	Mpi2EventDataTemperature_t *event_data)
4544 {
4545 	u32 doorbell;
4546 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
4547 		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4548 			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4549 			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4550 			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4551 			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4552 			event_data->SensorNum);
4553 		ioc_err(ioc, "Current Temp In Celsius: %d\n",
4554 			event_data->CurrentTemperature);
4555 		if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4556 			doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4557 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
4558 			    MPI2_IOC_STATE_FAULT) {
4559 				mpt3sas_print_fault_code(ioc,
4560 				    doorbell & MPI2_DOORBELL_DATA_MASK);
4561 			} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4562 			    MPI2_IOC_STATE_COREDUMP) {
4563 				mpt3sas_print_coredump_info(ioc,
4564 				    doorbell & MPI2_DOORBELL_DATA_MASK);
4565 			}
4566 		}
4567 	}
4568 }
4569 
4570 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4571 {
4572 	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4573 
4574 	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4575 		return 0;
4576 
4577 	if (pending)
4578 		return test_and_set_bit(0, &priv->ata_command_pending);
4579 
4580 	clear_bit(0, &priv->ata_command_pending);
4581 	return 0;
4582 }
4583 
4584 /**
4585  * _scsih_flush_running_cmds - completing outstanding commands.
4586  * @ioc: per adapter object
4587  *
4588  * The flushing out of all pending scmd commands following host reset,
4589  * where all IO is dropped to the floor.
4590  */
4591 static void
4592 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4593 {
4594 	struct scsi_cmnd *scmd;
4595 	struct scsiio_tracker *st;
4596 	u16 smid;
4597 	int count = 0;
4598 
4599 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4600 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4601 		if (!scmd)
4602 			continue;
4603 		count++;
4604 		_scsih_set_satl_pending(scmd, false);
4605 		st = scsi_cmd_priv(scmd);
4606 		mpt3sas_base_clear_st(ioc, st);
4607 		scsi_dma_unmap(scmd);
4608 		if (ioc->pci_error_recovery || ioc->remove_host)
4609 			scmd->result = DID_NO_CONNECT << 16;
4610 		else
4611 			scmd->result = DID_RESET << 16;
4612 		scmd->scsi_done(scmd);
4613 	}
4614 	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4615 }
4616 
4617 /**
4618  * _scsih_setup_eedp - setup MPI request for EEDP transfer
4619  * @ioc: per adapter object
4620  * @scmd: pointer to scsi command object
4621  * @mpi_request: pointer to the SCSI_IO request message frame
4622  *
4623  * Supporting protection 1 and 3.
4624  */
4625 static void
4626 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4627 	Mpi25SCSIIORequest_t *mpi_request)
4628 {
4629 	u16 eedp_flags;
4630 	unsigned char prot_op = scsi_get_prot_op(scmd);
4631 	unsigned char prot_type = scsi_get_prot_type(scmd);
4632 	Mpi25SCSIIORequest_t *mpi_request_3v =
4633 	   (Mpi25SCSIIORequest_t *)mpi_request;
4634 
4635 	if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
4636 		return;
4637 
4638 	if (prot_op ==  SCSI_PROT_READ_STRIP)
4639 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
4640 	else if (prot_op ==  SCSI_PROT_WRITE_INSERT)
4641 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
4642 	else
4643 		return;
4644 
4645 	switch (prot_type) {
4646 	case SCSI_PROT_DIF_TYPE1:
4647 	case SCSI_PROT_DIF_TYPE2:
4648 
4649 		/*
4650 		* enable ref/guard checking
4651 		* auto increment ref tag
4652 		*/
4653 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
4654 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
4655 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4656 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
4657 		    cpu_to_be32(t10_pi_ref_tag(scmd->request));
4658 		break;
4659 
4660 	case SCSI_PROT_DIF_TYPE3:
4661 
4662 		/*
4663 		* enable guard checking
4664 		*/
4665 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4666 
4667 		break;
4668 	}
4669 
4670 	mpi_request_3v->EEDPBlockSize =
4671 	    cpu_to_le16(scmd->device->sector_size);
4672 
4673 	if (ioc->is_gen35_ioc)
4674 		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
4675 	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
4676 }
4677 
4678 /**
4679  * _scsih_eedp_error_handling - return sense code for EEDP errors
4680  * @scmd: pointer to scsi command object
4681  * @ioc_status: ioc status
4682  */
4683 static void
4684 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4685 {
4686 	u8 ascq;
4687 
4688 	switch (ioc_status) {
4689 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4690 		ascq = 0x01;
4691 		break;
4692 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4693 		ascq = 0x02;
4694 		break;
4695 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4696 		ascq = 0x03;
4697 		break;
4698 	default:
4699 		ascq = 0x00;
4700 		break;
4701 	}
4702 	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
4703 	    ascq);
4704 	scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
4705 	    SAM_STAT_CHECK_CONDITION;
4706 }
4707 
4708 /**
4709  * scsih_qcmd - main scsi request entry point
4710  * @shost: SCSI host pointer
4711  * @scmd: pointer to scsi command object
4712  *
4713  * The callback index is set inside `ioc->scsi_io_cb_idx`.
4714  *
4715  * Return: 0 on success.  If there's a failure, return either:
4716  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
4717  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
4718  */
4719 static int
4720 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4721 {
4722 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4723 	struct MPT3SAS_DEVICE *sas_device_priv_data;
4724 	struct MPT3SAS_TARGET *sas_target_priv_data;
4725 	struct _raid_device *raid_device;
4726 	struct request *rq = scmd->request;
4727 	int class;
4728 	Mpi25SCSIIORequest_t *mpi_request;
4729 	struct _pcie_device *pcie_device = NULL;
4730 	u32 mpi_control;
4731 	u16 smid;
4732 	u16 handle;
4733 
4734 	if (ioc->logging_level & MPT_DEBUG_SCSI)
4735 		scsi_print_command(scmd);
4736 
4737 	sas_device_priv_data = scmd->device->hostdata;
4738 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4739 		scmd->result = DID_NO_CONNECT << 16;
4740 		scmd->scsi_done(scmd);
4741 		return 0;
4742 	}
4743 
4744 	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
4745 		scmd->result = DID_NO_CONNECT << 16;
4746 		scmd->scsi_done(scmd);
4747 		return 0;
4748 	}
4749 
4750 	sas_target_priv_data = sas_device_priv_data->sas_target;
4751 
4752 	/* invalid device handle */
4753 	handle = sas_target_priv_data->handle;
4754 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
4755 		scmd->result = DID_NO_CONNECT << 16;
4756 		scmd->scsi_done(scmd);
4757 		return 0;
4758 	}
4759 
4760 
4761 	if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
4762 		/* host recovery or link resets sent via IOCTLs */
4763 		return SCSI_MLQUEUE_HOST_BUSY;
4764 	} else if (sas_target_priv_data->deleted) {
4765 		/* device has been deleted */
4766 		scmd->result = DID_NO_CONNECT << 16;
4767 		scmd->scsi_done(scmd);
4768 		return 0;
4769 	} else if (sas_target_priv_data->tm_busy ||
4770 		   sas_device_priv_data->block) {
4771 		/* device busy with task management */
4772 		return SCSI_MLQUEUE_DEVICE_BUSY;
4773 	}
4774 
4775 	/*
4776 	 * Bug work around for firmware SATL handling.  The loop
4777 	 * is based on atomic operations and ensures consistency
4778 	 * since we're lockless at this point
4779 	 */
4780 	do {
4781 		if (test_bit(0, &sas_device_priv_data->ata_command_pending))
4782 			return SCSI_MLQUEUE_DEVICE_BUSY;
4783 	} while (_scsih_set_satl_pending(scmd, true));
4784 
4785 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4786 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
4787 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4788 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
4789 	else
4790 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
4791 
4792 	/* set tags */
4793 	mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
4794 	/* NCQ Prio supported, make sure control indicated high priority */
4795 	if (sas_device_priv_data->ncq_prio_enable) {
4796 		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4797 		if (class == IOPRIO_CLASS_RT)
4798 			mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
4799 	}
4800 	/* Make sure Device is not raid volume.
4801 	 * We do not expose raid functionality to upper layer for warpdrive.
4802 	 */
4803 	if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
4804 		&& !scsih_is_nvme(&scmd->device->sdev_gendev))
4805 		&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
4806 		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
4807 
4808 	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
4809 	if (!smid) {
4810 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4811 		_scsih_set_satl_pending(scmd, false);
4812 		goto out;
4813 	}
4814 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4815 	memset(mpi_request, 0, ioc->request_sz);
4816 	_scsih_setup_eedp(ioc, scmd, mpi_request);
4817 
4818 	if (scmd->cmd_len == 32)
4819 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
4820 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4821 	if (sas_device_priv_data->sas_target->flags &
4822 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
4823 		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
4824 	else
4825 		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4826 	mpi_request->DevHandle = cpu_to_le16(handle);
4827 	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
4828 	mpi_request->Control = cpu_to_le32(mpi_control);
4829 	mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
4830 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
4831 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
4832 	mpi_request->SenseBufferLowAddress =
4833 	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
4834 	mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
4835 	int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
4836 	    mpi_request->LUN);
4837 	memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4838 
4839 	if (mpi_request->DataLength) {
4840 		pcie_device = sas_target_priv_data->pcie_dev;
4841 		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
4842 			mpt3sas_base_free_smid(ioc, smid);
4843 			_scsih_set_satl_pending(scmd, false);
4844 			goto out;
4845 		}
4846 	} else
4847 		ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
4848 
4849 	raid_device = sas_target_priv_data->raid_device;
4850 	if (raid_device && raid_device->direct_io_enabled)
4851 		mpt3sas_setup_direct_io(ioc, scmd,
4852 			raid_device, mpi_request);
4853 
4854 	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
4855 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
4856 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
4857 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
4858 			ioc->put_smid_fast_path(ioc, smid, handle);
4859 		} else
4860 			ioc->put_smid_scsi_io(ioc, smid,
4861 			    le16_to_cpu(mpi_request->DevHandle));
4862 	} else
4863 		ioc->put_smid_default(ioc, smid);
4864 	return 0;
4865 
4866  out:
4867 	return SCSI_MLQUEUE_HOST_BUSY;
4868 }
4869 
4870 /**
4871  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
4872  * @sense_buffer: sense data returned by target
4873  * @data: normalized skey/asc/ascq
4874  */
4875 static void
4876 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
4877 {
4878 	if ((sense_buffer[0] & 0x7F) >= 0x72) {
4879 		/* descriptor format */
4880 		data->skey = sense_buffer[1] & 0x0F;
4881 		data->asc = sense_buffer[2];
4882 		data->ascq = sense_buffer[3];
4883 	} else {
4884 		/* fixed format */
4885 		data->skey = sense_buffer[2] & 0x0F;
4886 		data->asc = sense_buffer[12];
4887 		data->ascq = sense_buffer[13];
4888 	}
4889 }
4890 
4891 /**
4892  * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
4893  * @ioc: per adapter object
4894  * @scmd: pointer to scsi command object
4895  * @mpi_reply: reply mf payload returned from firmware
4896  * @smid: ?
4897  *
4898  * scsi_status - SCSI Status code returned from target device
4899  * scsi_state - state info associated with SCSI_IO determined by ioc
4900  * ioc_status - ioc supplied status info
4901  */
4902 static void
4903 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4904 	Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
4905 {
4906 	u32 response_info;
4907 	u8 *response_bytes;
4908 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
4909 	    MPI2_IOCSTATUS_MASK;
4910 	u8 scsi_state = mpi_reply->SCSIState;
4911 	u8 scsi_status = mpi_reply->SCSIStatus;
4912 	char *desc_ioc_state = NULL;
4913 	char *desc_scsi_status = NULL;
4914 	char *desc_scsi_state = ioc->tmp_string;
4915 	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
4916 	struct _sas_device *sas_device = NULL;
4917 	struct _pcie_device *pcie_device = NULL;
4918 	struct scsi_target *starget = scmd->device->sdev_target;
4919 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
4920 	char *device_str = NULL;
4921 
4922 	if (!priv_target)
4923 		return;
4924 	if (ioc->hide_ir_msg)
4925 		device_str = "WarpDrive";
4926 	else
4927 		device_str = "volume";
4928 
4929 	if (log_info == 0x31170000)
4930 		return;
4931 
4932 	switch (ioc_status) {
4933 	case MPI2_IOCSTATUS_SUCCESS:
4934 		desc_ioc_state = "success";
4935 		break;
4936 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
4937 		desc_ioc_state = "invalid function";
4938 		break;
4939 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
4940 		desc_ioc_state = "scsi recovered error";
4941 		break;
4942 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
4943 		desc_ioc_state = "scsi invalid dev handle";
4944 		break;
4945 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4946 		desc_ioc_state = "scsi device not there";
4947 		break;
4948 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
4949 		desc_ioc_state = "scsi data overrun";
4950 		break;
4951 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
4952 		desc_ioc_state = "scsi data underrun";
4953 		break;
4954 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
4955 		desc_ioc_state = "scsi io data error";
4956 		break;
4957 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4958 		desc_ioc_state = "scsi protocol error";
4959 		break;
4960 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
4961 		desc_ioc_state = "scsi task terminated";
4962 		break;
4963 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4964 		desc_ioc_state = "scsi residual mismatch";
4965 		break;
4966 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4967 		desc_ioc_state = "scsi task mgmt failed";
4968 		break;
4969 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
4970 		desc_ioc_state = "scsi ioc terminated";
4971 		break;
4972 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
4973 		desc_ioc_state = "scsi ext terminated";
4974 		break;
4975 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4976 		desc_ioc_state = "eedp guard error";
4977 		break;
4978 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4979 		desc_ioc_state = "eedp ref tag error";
4980 		break;
4981 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4982 		desc_ioc_state = "eedp app tag error";
4983 		break;
4984 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
4985 		desc_ioc_state = "insufficient power";
4986 		break;
4987 	default:
4988 		desc_ioc_state = "unknown";
4989 		break;
4990 	}
4991 
4992 	switch (scsi_status) {
4993 	case MPI2_SCSI_STATUS_GOOD:
4994 		desc_scsi_status = "good";
4995 		break;
4996 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
4997 		desc_scsi_status = "check condition";
4998 		break;
4999 	case MPI2_SCSI_STATUS_CONDITION_MET:
5000 		desc_scsi_status = "condition met";
5001 		break;
5002 	case MPI2_SCSI_STATUS_BUSY:
5003 		desc_scsi_status = "busy";
5004 		break;
5005 	case MPI2_SCSI_STATUS_INTERMEDIATE:
5006 		desc_scsi_status = "intermediate";
5007 		break;
5008 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5009 		desc_scsi_status = "intermediate condmet";
5010 		break;
5011 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5012 		desc_scsi_status = "reservation conflict";
5013 		break;
5014 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5015 		desc_scsi_status = "command terminated";
5016 		break;
5017 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5018 		desc_scsi_status = "task set full";
5019 		break;
5020 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
5021 		desc_scsi_status = "aca active";
5022 		break;
5023 	case MPI2_SCSI_STATUS_TASK_ABORTED:
5024 		desc_scsi_status = "task aborted";
5025 		break;
5026 	default:
5027 		desc_scsi_status = "unknown";
5028 		break;
5029 	}
5030 
5031 	desc_scsi_state[0] = '\0';
5032 	if (!scsi_state)
5033 		desc_scsi_state = " ";
5034 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5035 		strcat(desc_scsi_state, "response info ");
5036 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5037 		strcat(desc_scsi_state, "state terminated ");
5038 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5039 		strcat(desc_scsi_state, "no status ");
5040 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5041 		strcat(desc_scsi_state, "autosense failed ");
5042 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5043 		strcat(desc_scsi_state, "autosense valid ");
5044 
5045 	scsi_print_command(scmd);
5046 
5047 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5048 		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5049 			 device_str, (u64)priv_target->sas_address);
5050 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5051 		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5052 		if (pcie_device) {
5053 			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5054 				 (u64)pcie_device->wwid, pcie_device->port_num);
5055 			if (pcie_device->enclosure_handle != 0)
5056 				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5057 					 (u64)pcie_device->enclosure_logical_id,
5058 					 pcie_device->slot);
5059 			if (pcie_device->connector_name[0])
5060 				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5061 					 pcie_device->enclosure_level,
5062 					 pcie_device->connector_name);
5063 			pcie_device_put(pcie_device);
5064 		}
5065 	} else {
5066 		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5067 		if (sas_device) {
5068 			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5069 				 (u64)sas_device->sas_address, sas_device->phy);
5070 
5071 			_scsih_display_enclosure_chassis_info(ioc, sas_device,
5072 			    NULL, NULL);
5073 
5074 			sas_device_put(sas_device);
5075 		}
5076 	}
5077 
5078 	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5079 		 le16_to_cpu(mpi_reply->DevHandle),
5080 		 desc_ioc_state, ioc_status, smid);
5081 	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5082 		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5083 	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5084 		 le16_to_cpu(mpi_reply->TaskTag),
5085 		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5086 	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5087 		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5088 
5089 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5090 		struct sense_info data;
5091 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5092 		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5093 			 data.skey, data.asc, data.ascq,
5094 			 le32_to_cpu(mpi_reply->SenseCount));
5095 	}
5096 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5097 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5098 		response_bytes = (u8 *)&response_info;
5099 		_scsih_response_code(ioc, response_bytes[0]);
5100 	}
5101 }
5102 
5103 /**
5104  * _scsih_turn_on_pfa_led - illuminate PFA LED
5105  * @ioc: per adapter object
5106  * @handle: device handle
5107  * Context: process
5108  */
5109 static void
5110 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5111 {
5112 	Mpi2SepReply_t mpi_reply;
5113 	Mpi2SepRequest_t mpi_request;
5114 	struct _sas_device *sas_device;
5115 
5116 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5117 	if (!sas_device)
5118 		return;
5119 
5120 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5121 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5122 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5123 	mpi_request.SlotStatus =
5124 	    cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5125 	mpi_request.DevHandle = cpu_to_le16(handle);
5126 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5127 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5128 	    &mpi_request)) != 0) {
5129 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5130 			__FILE__, __LINE__, __func__);
5131 		goto out;
5132 	}
5133 	sas_device->pfa_led_on = 1;
5134 
5135 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5136 		dewtprintk(ioc,
5137 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5138 				    le16_to_cpu(mpi_reply.IOCStatus),
5139 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5140 		goto out;
5141 	}
5142 out:
5143 	sas_device_put(sas_device);
5144 }
5145 
5146 /**
5147  * _scsih_turn_off_pfa_led - turn off Fault LED
5148  * @ioc: per adapter object
5149  * @sas_device: sas device whose PFA LED has to turned off
5150  * Context: process
5151  */
5152 static void
5153 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5154 	struct _sas_device *sas_device)
5155 {
5156 	Mpi2SepReply_t mpi_reply;
5157 	Mpi2SepRequest_t mpi_request;
5158 
5159 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5160 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5161 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5162 	mpi_request.SlotStatus = 0;
5163 	mpi_request.Slot = cpu_to_le16(sas_device->slot);
5164 	mpi_request.DevHandle = 0;
5165 	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5166 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5167 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5168 		&mpi_request)) != 0) {
5169 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5170 			__FILE__, __LINE__, __func__);
5171 		return;
5172 	}
5173 
5174 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5175 		dewtprintk(ioc,
5176 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5177 				    le16_to_cpu(mpi_reply.IOCStatus),
5178 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5179 		return;
5180 	}
5181 }
5182 
5183 /**
5184  * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5185  * @ioc: per adapter object
5186  * @handle: device handle
5187  * Context: interrupt.
5188  */
5189 static void
5190 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5191 {
5192 	struct fw_event_work *fw_event;
5193 
5194 	fw_event = alloc_fw_event_work(0);
5195 	if (!fw_event)
5196 		return;
5197 	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5198 	fw_event->device_handle = handle;
5199 	fw_event->ioc = ioc;
5200 	_scsih_fw_event_add(ioc, fw_event);
5201 	fw_event_work_put(fw_event);
5202 }
5203 
5204 /**
5205  * _scsih_smart_predicted_fault - process smart errors
5206  * @ioc: per adapter object
5207  * @handle: device handle
5208  * Context: interrupt.
5209  */
5210 static void
5211 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5212 {
5213 	struct scsi_target *starget;
5214 	struct MPT3SAS_TARGET *sas_target_priv_data;
5215 	Mpi2EventNotificationReply_t *event_reply;
5216 	Mpi2EventDataSasDeviceStatusChange_t *event_data;
5217 	struct _sas_device *sas_device;
5218 	ssize_t sz;
5219 	unsigned long flags;
5220 
5221 	/* only handle non-raid devices */
5222 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5223 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5224 	if (!sas_device)
5225 		goto out_unlock;
5226 
5227 	starget = sas_device->starget;
5228 	sas_target_priv_data = starget->hostdata;
5229 
5230 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5231 	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5232 		goto out_unlock;
5233 
5234 	_scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5235 
5236 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5237 
5238 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5239 		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5240 
5241 	/* insert into event log */
5242 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5243 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5244 	event_reply = kzalloc(sz, GFP_ATOMIC);
5245 	if (!event_reply) {
5246 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5247 			__FILE__, __LINE__, __func__);
5248 		goto out;
5249 	}
5250 
5251 	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5252 	event_reply->Event =
5253 	    cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5254 	event_reply->MsgLength = sz/4;
5255 	event_reply->EventDataLength =
5256 	    cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5257 	event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5258 	    event_reply->EventData;
5259 	event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5260 	event_data->ASC = 0x5D;
5261 	event_data->DevHandle = cpu_to_le16(handle);
5262 	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5263 	mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5264 	kfree(event_reply);
5265 out:
5266 	if (sas_device)
5267 		sas_device_put(sas_device);
5268 	return;
5269 
5270 out_unlock:
5271 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5272 	goto out;
5273 }
5274 
5275 /**
5276  * _scsih_io_done - scsi request callback
5277  * @ioc: per adapter object
5278  * @smid: system request message index
5279  * @msix_index: MSIX table index supplied by the OS
5280  * @reply: reply message frame(lower 32bit addr)
5281  *
5282  * Callback handler when using _scsih_qcmd.
5283  *
5284  * Return: 1 meaning mf should be freed from _base_interrupt
5285  *         0 means the mf is freed from this function.
5286  */
5287 static u8
5288 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5289 {
5290 	Mpi25SCSIIORequest_t *mpi_request;
5291 	Mpi2SCSIIOReply_t *mpi_reply;
5292 	struct scsi_cmnd *scmd;
5293 	struct scsiio_tracker *st;
5294 	u16 ioc_status;
5295 	u32 xfer_cnt;
5296 	u8 scsi_state;
5297 	u8 scsi_status;
5298 	u32 log_info;
5299 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5300 	u32 response_code = 0;
5301 
5302 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5303 
5304 	scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5305 	if (scmd == NULL)
5306 		return 1;
5307 
5308 	_scsih_set_satl_pending(scmd, false);
5309 
5310 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5311 
5312 	if (mpi_reply == NULL) {
5313 		scmd->result = DID_OK << 16;
5314 		goto out;
5315 	}
5316 
5317 	sas_device_priv_data = scmd->device->hostdata;
5318 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5319 	     sas_device_priv_data->sas_target->deleted) {
5320 		scmd->result = DID_NO_CONNECT << 16;
5321 		goto out;
5322 	}
5323 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5324 
5325 	/*
5326 	 * WARPDRIVE: If direct_io is set then it is directIO,
5327 	 * the failed direct I/O should be redirected to volume
5328 	 */
5329 	st = scsi_cmd_priv(scmd);
5330 	if (st->direct_io &&
5331 	     ((ioc_status & MPI2_IOCSTATUS_MASK)
5332 	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5333 		st->direct_io = 0;
5334 		st->scmd = scmd;
5335 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5336 		mpi_request->DevHandle =
5337 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
5338 		ioc->put_smid_scsi_io(ioc, smid,
5339 		    sas_device_priv_data->sas_target->handle);
5340 		return 0;
5341 	}
5342 	/* turning off TLR */
5343 	scsi_state = mpi_reply->SCSIState;
5344 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5345 		response_code =
5346 		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5347 	if (!sas_device_priv_data->tlr_snoop_check) {
5348 		sas_device_priv_data->tlr_snoop_check++;
5349 		if ((!ioc->is_warpdrive &&
5350 		    !scsih_is_raid(&scmd->device->sdev_gendev) &&
5351 		    !scsih_is_nvme(&scmd->device->sdev_gendev))
5352 		    && sas_is_tlr_enabled(scmd->device) &&
5353 		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5354 			sas_disable_tlr(scmd->device);
5355 			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5356 		}
5357 	}
5358 
5359 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5360 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5361 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5362 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
5363 	else
5364 		log_info = 0;
5365 	ioc_status &= MPI2_IOCSTATUS_MASK;
5366 	scsi_status = mpi_reply->SCSIStatus;
5367 
5368 	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5369 	    (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5370 	     scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5371 	     scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5372 		ioc_status = MPI2_IOCSTATUS_SUCCESS;
5373 	}
5374 
5375 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5376 		struct sense_info data;
5377 		const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5378 		    smid);
5379 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5380 		    le32_to_cpu(mpi_reply->SenseCount));
5381 		memcpy(scmd->sense_buffer, sense_data, sz);
5382 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5383 		/* failure prediction threshold exceeded */
5384 		if (data.asc == 0x5D)
5385 			_scsih_smart_predicted_fault(ioc,
5386 			    le16_to_cpu(mpi_reply->DevHandle));
5387 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5388 
5389 		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5390 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5391 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5392 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5393 			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5394 	}
5395 	switch (ioc_status) {
5396 	case MPI2_IOCSTATUS_BUSY:
5397 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5398 		scmd->result = SAM_STAT_BUSY;
5399 		break;
5400 
5401 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5402 		scmd->result = DID_NO_CONNECT << 16;
5403 		break;
5404 
5405 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5406 		if (sas_device_priv_data->block) {
5407 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5408 			goto out;
5409 		}
5410 		if (log_info == 0x31110630) {
5411 			if (scmd->retries > 2) {
5412 				scmd->result = DID_NO_CONNECT << 16;
5413 				scsi_device_set_state(scmd->device,
5414 				    SDEV_OFFLINE);
5415 			} else {
5416 				scmd->result = DID_SOFT_ERROR << 16;
5417 				scmd->device->expecting_cc_ua = 1;
5418 			}
5419 			break;
5420 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5421 			scmd->result = DID_RESET << 16;
5422 			break;
5423 		} else if ((scmd->device->channel == RAID_CHANNEL) &&
5424 		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5425 		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5426 			scmd->result = DID_RESET << 16;
5427 			break;
5428 		}
5429 		scmd->result = DID_SOFT_ERROR << 16;
5430 		break;
5431 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5432 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5433 		scmd->result = DID_RESET << 16;
5434 		break;
5435 
5436 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5437 		if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5438 			scmd->result = DID_SOFT_ERROR << 16;
5439 		else
5440 			scmd->result = (DID_OK << 16) | scsi_status;
5441 		break;
5442 
5443 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5444 		scmd->result = (DID_OK << 16) | scsi_status;
5445 
5446 		if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5447 			break;
5448 
5449 		if (xfer_cnt < scmd->underflow) {
5450 			if (scsi_status == SAM_STAT_BUSY)
5451 				scmd->result = SAM_STAT_BUSY;
5452 			else
5453 				scmd->result = DID_SOFT_ERROR << 16;
5454 		} else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5455 		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
5456 			scmd->result = DID_SOFT_ERROR << 16;
5457 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5458 			scmd->result = DID_RESET << 16;
5459 		else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5460 			mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5461 			mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5462 			scmd->result = (DRIVER_SENSE << 24) |
5463 			    SAM_STAT_CHECK_CONDITION;
5464 			scmd->sense_buffer[0] = 0x70;
5465 			scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5466 			scmd->sense_buffer[12] = 0x20;
5467 			scmd->sense_buffer[13] = 0;
5468 		}
5469 		break;
5470 
5471 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5472 		scsi_set_resid(scmd, 0);
5473 		/* fall through */
5474 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5475 	case MPI2_IOCSTATUS_SUCCESS:
5476 		scmd->result = (DID_OK << 16) | scsi_status;
5477 		if (response_code ==
5478 		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5479 		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5480 		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5481 			scmd->result = DID_SOFT_ERROR << 16;
5482 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5483 			scmd->result = DID_RESET << 16;
5484 		break;
5485 
5486 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5487 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5488 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5489 		_scsih_eedp_error_handling(scmd, ioc_status);
5490 		break;
5491 
5492 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5493 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5494 	case MPI2_IOCSTATUS_INVALID_SGL:
5495 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
5496 	case MPI2_IOCSTATUS_INVALID_FIELD:
5497 	case MPI2_IOCSTATUS_INVALID_STATE:
5498 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5499 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5500 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5501 	default:
5502 		scmd->result = DID_SOFT_ERROR << 16;
5503 		break;
5504 
5505 	}
5506 
5507 	if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5508 		_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5509 
5510  out:
5511 
5512 	scsi_dma_unmap(scmd);
5513 	mpt3sas_base_free_smid(ioc, smid);
5514 	scmd->scsi_done(scmd);
5515 	return 0;
5516 }
5517 
5518 /**
5519  * _scsih_sas_host_refresh - refreshing sas host object contents
5520  * @ioc: per adapter object
5521  * Context: user
5522  *
5523  * During port enable, fw will send topology events for every device. Its
5524  * possible that the handles may change from the previous setting, so this
5525  * code keeping handles updating if changed.
5526  */
5527 static void
5528 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
5529 {
5530 	u16 sz;
5531 	u16 ioc_status;
5532 	int i;
5533 	Mpi2ConfigReply_t mpi_reply;
5534 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5535 	u16 attached_handle;
5536 	u8 link_rate;
5537 
5538 	dtmprintk(ioc,
5539 		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
5540 			   (u64)ioc->sas_hba.sas_address));
5541 
5542 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
5543 	    * sizeof(Mpi2SasIOUnit0PhyData_t));
5544 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5545 	if (!sas_iounit_pg0) {
5546 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5547 			__FILE__, __LINE__, __func__);
5548 		return;
5549 	}
5550 
5551 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5552 	    sas_iounit_pg0, sz)) != 0)
5553 		goto out;
5554 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5555 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5556 		goto out;
5557 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5558 		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
5559 		if (i == 0)
5560 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5561 			    PhyData[0].ControllerDevHandle);
5562 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5563 		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
5564 		    AttachedDevHandle);
5565 		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
5566 			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
5567 		mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
5568 		    attached_handle, i, link_rate);
5569 	}
5570  out:
5571 	kfree(sas_iounit_pg0);
5572 }
5573 
5574 /**
5575  * _scsih_sas_host_add - create sas host object
5576  * @ioc: per adapter object
5577  *
5578  * Creating host side data object, stored in ioc->sas_hba
5579  */
5580 static void
5581 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5582 {
5583 	int i;
5584 	Mpi2ConfigReply_t mpi_reply;
5585 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5586 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5587 	Mpi2SasPhyPage0_t phy_pg0;
5588 	Mpi2SasDevicePage0_t sas_device_pg0;
5589 	Mpi2SasEnclosurePage0_t enclosure_pg0;
5590 	u16 ioc_status;
5591 	u16 sz;
5592 	u8 device_missing_delay;
5593 	u8 num_phys;
5594 
5595 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
5596 	if (!num_phys) {
5597 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5598 			__FILE__, __LINE__, __func__);
5599 		return;
5600 	}
5601 	ioc->sas_hba.phy = kcalloc(num_phys,
5602 	    sizeof(struct _sas_phy), GFP_KERNEL);
5603 	if (!ioc->sas_hba.phy) {
5604 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5605 			__FILE__, __LINE__, __func__);
5606 		goto out;
5607 	}
5608 	ioc->sas_hba.num_phys = num_phys;
5609 
5610 	/* sas_iounit page 0 */
5611 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
5612 	    sizeof(Mpi2SasIOUnit0PhyData_t));
5613 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5614 	if (!sas_iounit_pg0) {
5615 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5616 			__FILE__, __LINE__, __func__);
5617 		return;
5618 	}
5619 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5620 	    sas_iounit_pg0, sz))) {
5621 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5622 			__FILE__, __LINE__, __func__);
5623 		goto out;
5624 	}
5625 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5626 	    MPI2_IOCSTATUS_MASK;
5627 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5628 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5629 			__FILE__, __LINE__, __func__);
5630 		goto out;
5631 	}
5632 
5633 	/* sas_iounit page 1 */
5634 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
5635 	    sizeof(Mpi2SasIOUnit1PhyData_t));
5636 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
5637 	if (!sas_iounit_pg1) {
5638 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5639 			__FILE__, __LINE__, __func__);
5640 		goto out;
5641 	}
5642 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5643 	    sas_iounit_pg1, sz))) {
5644 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5645 			__FILE__, __LINE__, __func__);
5646 		goto out;
5647 	}
5648 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5649 	    MPI2_IOCSTATUS_MASK;
5650 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5651 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5652 			__FILE__, __LINE__, __func__);
5653 		goto out;
5654 	}
5655 
5656 	ioc->io_missing_delay =
5657 	    sas_iounit_pg1->IODeviceMissingDelay;
5658 	device_missing_delay =
5659 	    sas_iounit_pg1->ReportDeviceMissingDelay;
5660 	if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
5661 		ioc->device_missing_delay = (device_missing_delay &
5662 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
5663 	else
5664 		ioc->device_missing_delay = device_missing_delay &
5665 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
5666 
5667 	ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
5668 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5669 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5670 		    i))) {
5671 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5672 				__FILE__, __LINE__, __func__);
5673 			goto out;
5674 		}
5675 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5676 		    MPI2_IOCSTATUS_MASK;
5677 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5678 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5679 				__FILE__, __LINE__, __func__);
5680 			goto out;
5681 		}
5682 
5683 		if (i == 0)
5684 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5685 			    PhyData[0].ControllerDevHandle);
5686 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5687 		ioc->sas_hba.phy[i].phy_id = i;
5688 		mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
5689 		    phy_pg0, ioc->sas_hba.parent_dev);
5690 	}
5691 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5692 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
5693 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5694 			__FILE__, __LINE__, __func__);
5695 		goto out;
5696 	}
5697 	ioc->sas_hba.enclosure_handle =
5698 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
5699 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5700 	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5701 		 ioc->sas_hba.handle,
5702 		 (u64)ioc->sas_hba.sas_address,
5703 		 ioc->sas_hba.num_phys);
5704 
5705 	if (ioc->sas_hba.enclosure_handle) {
5706 		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
5707 		    &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
5708 		   ioc->sas_hba.enclosure_handle)))
5709 			ioc->sas_hba.enclosure_logical_id =
5710 			    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5711 	}
5712 
5713  out:
5714 	kfree(sas_iounit_pg1);
5715 	kfree(sas_iounit_pg0);
5716 }
5717 
5718 /**
5719  * _scsih_expander_add -  creating expander object
5720  * @ioc: per adapter object
5721  * @handle: expander handle
5722  *
5723  * Creating expander object, stored in ioc->sas_expander_list.
5724  *
5725  * Return: 0 for success, else error.
5726  */
5727 static int
5728 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5729 {
5730 	struct _sas_node *sas_expander;
5731 	struct _enclosure_node *enclosure_dev;
5732 	Mpi2ConfigReply_t mpi_reply;
5733 	Mpi2ExpanderPage0_t expander_pg0;
5734 	Mpi2ExpanderPage1_t expander_pg1;
5735 	u32 ioc_status;
5736 	u16 parent_handle;
5737 	u64 sas_address, sas_address_parent = 0;
5738 	int i;
5739 	unsigned long flags;
5740 	struct _sas_port *mpt3sas_port = NULL;
5741 
5742 	int rc = 0;
5743 
5744 	if (!handle)
5745 		return -1;
5746 
5747 	if (ioc->shost_recovery || ioc->pci_error_recovery)
5748 		return -1;
5749 
5750 	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
5751 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
5752 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5753 			__FILE__, __LINE__, __func__);
5754 		return -1;
5755 	}
5756 
5757 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5758 	    MPI2_IOCSTATUS_MASK;
5759 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5760 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5761 			__FILE__, __LINE__, __func__);
5762 		return -1;
5763 	}
5764 
5765 	/* handle out of order topology events */
5766 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
5767 	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
5768 	    != 0) {
5769 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5770 			__FILE__, __LINE__, __func__);
5771 		return -1;
5772 	}
5773 	if (sas_address_parent != ioc->sas_hba.sas_address) {
5774 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
5775 		sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5776 		    sas_address_parent);
5777 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5778 		if (!sas_expander) {
5779 			rc = _scsih_expander_add(ioc, parent_handle);
5780 			if (rc != 0)
5781 				return rc;
5782 		}
5783 	}
5784 
5785 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
5786 	sas_address = le64_to_cpu(expander_pg0.SASAddress);
5787 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5788 	    sas_address);
5789 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5790 
5791 	if (sas_expander)
5792 		return 0;
5793 
5794 	sas_expander = kzalloc(sizeof(struct _sas_node),
5795 	    GFP_KERNEL);
5796 	if (!sas_expander) {
5797 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5798 			__FILE__, __LINE__, __func__);
5799 		return -1;
5800 	}
5801 
5802 	sas_expander->handle = handle;
5803 	sas_expander->num_phys = expander_pg0.NumPhys;
5804 	sas_expander->sas_address_parent = sas_address_parent;
5805 	sas_expander->sas_address = sas_address;
5806 
5807 	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5808 		 handle, parent_handle,
5809 		 (u64)sas_expander->sas_address, sas_expander->num_phys);
5810 
5811 	if (!sas_expander->num_phys)
5812 		goto out_fail;
5813 	sas_expander->phy = kcalloc(sas_expander->num_phys,
5814 	    sizeof(struct _sas_phy), GFP_KERNEL);
5815 	if (!sas_expander->phy) {
5816 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5817 			__FILE__, __LINE__, __func__);
5818 		rc = -1;
5819 		goto out_fail;
5820 	}
5821 
5822 	INIT_LIST_HEAD(&sas_expander->sas_port_list);
5823 	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
5824 	    sas_address_parent);
5825 	if (!mpt3sas_port) {
5826 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5827 			__FILE__, __LINE__, __func__);
5828 		rc = -1;
5829 		goto out_fail;
5830 	}
5831 	sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
5832 
5833 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
5834 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
5835 		    &expander_pg1, i, handle))) {
5836 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5837 				__FILE__, __LINE__, __func__);
5838 			rc = -1;
5839 			goto out_fail;
5840 		}
5841 		sas_expander->phy[i].handle = handle;
5842 		sas_expander->phy[i].phy_id = i;
5843 
5844 		if ((mpt3sas_transport_add_expander_phy(ioc,
5845 		    &sas_expander->phy[i], expander_pg1,
5846 		    sas_expander->parent_dev))) {
5847 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5848 				__FILE__, __LINE__, __func__);
5849 			rc = -1;
5850 			goto out_fail;
5851 		}
5852 	}
5853 
5854 	if (sas_expander->enclosure_handle) {
5855 		enclosure_dev =
5856 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
5857 						sas_expander->enclosure_handle);
5858 		if (enclosure_dev)
5859 			sas_expander->enclosure_logical_id =
5860 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5861 	}
5862 
5863 	_scsih_expander_node_add(ioc, sas_expander);
5864 	return 0;
5865 
5866  out_fail:
5867 
5868 	if (mpt3sas_port)
5869 		mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
5870 		    sas_address_parent);
5871 	kfree(sas_expander);
5872 	return rc;
5873 }
5874 
5875 /**
5876  * mpt3sas_expander_remove - removing expander object
5877  * @ioc: per adapter object
5878  * @sas_address: expander sas_address
5879  */
5880 void
5881 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
5882 {
5883 	struct _sas_node *sas_expander;
5884 	unsigned long flags;
5885 
5886 	if (ioc->shost_recovery)
5887 		return;
5888 
5889 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
5890 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5891 	    sas_address);
5892 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5893 	if (sas_expander)
5894 		_scsih_expander_node_remove(ioc, sas_expander);
5895 }
5896 
5897 /**
5898  * _scsih_done -  internal SCSI_IO callback handler.
5899  * @ioc: per adapter object
5900  * @smid: system request message index
5901  * @msix_index: MSIX table index supplied by the OS
5902  * @reply: reply message frame(lower 32bit addr)
5903  *
5904  * Callback handler when sending internal generated SCSI_IO.
5905  * The callback index passed is `ioc->scsih_cb_idx`
5906  *
5907  * Return: 1 meaning mf should be freed from _base_interrupt
5908  *         0 means the mf is freed from this function.
5909  */
5910 static u8
5911 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5912 {
5913 	MPI2DefaultReply_t *mpi_reply;
5914 
5915 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
5916 	if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
5917 		return 1;
5918 	if (ioc->scsih_cmds.smid != smid)
5919 		return 1;
5920 	ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
5921 	if (mpi_reply) {
5922 		memcpy(ioc->scsih_cmds.reply, mpi_reply,
5923 		    mpi_reply->MsgLength*4);
5924 		ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
5925 	}
5926 	ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
5927 	complete(&ioc->scsih_cmds.done);
5928 	return 1;
5929 }
5930 
5931 
5932 
5933 
5934 #define MPT3_MAX_LUNS (255)
5935 
5936 
5937 /**
5938  * _scsih_check_access_status - check access flags
5939  * @ioc: per adapter object
5940  * @sas_address: sas address
5941  * @handle: sas device handle
5942  * @access_status: errors returned during discovery of the device
5943  *
5944  * Return: 0 for success, else failure
5945  */
5946 static u8
5947 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
5948 	u16 handle, u8 access_status)
5949 {
5950 	u8 rc = 1;
5951 	char *desc = NULL;
5952 
5953 	switch (access_status) {
5954 	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
5955 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
5956 		rc = 0;
5957 		break;
5958 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
5959 		desc = "sata capability failed";
5960 		break;
5961 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
5962 		desc = "sata affiliation conflict";
5963 		break;
5964 	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
5965 		desc = "route not addressable";
5966 		break;
5967 	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
5968 		desc = "smp error not addressable";
5969 		break;
5970 	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
5971 		desc = "device blocked";
5972 		break;
5973 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
5974 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
5975 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
5976 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
5977 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
5978 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
5979 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
5980 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
5981 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
5982 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
5983 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
5984 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
5985 		desc = "sata initialization failed";
5986 		break;
5987 	default:
5988 		desc = "unknown";
5989 		break;
5990 	}
5991 
5992 	if (!rc)
5993 		return 0;
5994 
5995 	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
5996 		desc, (u64)sas_address, handle);
5997 	return rc;
5998 }
5999 
6000 /**
6001  * _scsih_check_device - checking device responsiveness
6002  * @ioc: per adapter object
6003  * @parent_sas_address: sas address of parent expander or sas host
6004  * @handle: attached device handle
6005  * @phy_number: phy number
6006  * @link_rate: new link rate
6007  */
6008 static void
6009 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
6010 	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
6011 {
6012 	Mpi2ConfigReply_t mpi_reply;
6013 	Mpi2SasDevicePage0_t sas_device_pg0;
6014 	struct _sas_device *sas_device;
6015 	struct _enclosure_node *enclosure_dev = NULL;
6016 	u32 ioc_status;
6017 	unsigned long flags;
6018 	u64 sas_address;
6019 	struct scsi_target *starget;
6020 	struct MPT3SAS_TARGET *sas_target_priv_data;
6021 	u32 device_info;
6022 
6023 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6024 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
6025 		return;
6026 
6027 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6028 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6029 		return;
6030 
6031 	/* wide port handling ~ we need only handle device once for the phy that
6032 	 * is matched in sas device page zero
6033 	 */
6034 	if (phy_number != sas_device_pg0.PhyNum)
6035 		return;
6036 
6037 	/* check if this is end device */
6038 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6039 	if (!(_scsih_is_end_device(device_info)))
6040 		return;
6041 
6042 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
6043 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6044 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6045 	    sas_address);
6046 
6047 	if (!sas_device)
6048 		goto out_unlock;
6049 
6050 	if (unlikely(sas_device->handle != handle)) {
6051 		starget = sas_device->starget;
6052 		sas_target_priv_data = starget->hostdata;
6053 		starget_printk(KERN_INFO, starget,
6054 			"handle changed from(0x%04x) to (0x%04x)!!!\n",
6055 			sas_device->handle, handle);
6056 		sas_target_priv_data->handle = handle;
6057 		sas_device->handle = handle;
6058 		if (le16_to_cpu(sas_device_pg0.Flags) &
6059 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6060 			sas_device->enclosure_level =
6061 				sas_device_pg0.EnclosureLevel;
6062 			memcpy(sas_device->connector_name,
6063 				sas_device_pg0.ConnectorName, 4);
6064 			sas_device->connector_name[4] = '\0';
6065 		} else {
6066 			sas_device->enclosure_level = 0;
6067 			sas_device->connector_name[0] = '\0';
6068 		}
6069 
6070 		sas_device->enclosure_handle =
6071 				le16_to_cpu(sas_device_pg0.EnclosureHandle);
6072 		sas_device->is_chassis_slot_valid = 0;
6073 		enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
6074 						sas_device->enclosure_handle);
6075 		if (enclosure_dev) {
6076 			sas_device->enclosure_logical_id =
6077 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6078 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6079 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6080 				sas_device->is_chassis_slot_valid = 1;
6081 				sas_device->chassis_slot =
6082 					enclosure_dev->pg0.ChassisSlot;
6083 			}
6084 		}
6085 	}
6086 
6087 	/* check if device is present */
6088 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
6089 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6090 		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
6091 			handle);
6092 		goto out_unlock;
6093 	}
6094 
6095 	/* check if there were any issues with discovery */
6096 	if (_scsih_check_access_status(ioc, sas_address, handle,
6097 	    sas_device_pg0.AccessStatus))
6098 		goto out_unlock;
6099 
6100 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6101 	_scsih_ublock_io_device(ioc, sas_address);
6102 
6103 	if (sas_device)
6104 		sas_device_put(sas_device);
6105 	return;
6106 
6107 out_unlock:
6108 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6109 	if (sas_device)
6110 		sas_device_put(sas_device);
6111 }
6112 
6113 /**
6114  * _scsih_add_device -  creating sas device object
6115  * @ioc: per adapter object
6116  * @handle: sas device handle
6117  * @phy_num: phy number end device attached to
6118  * @is_pd: is this hidden raid component
6119  *
6120  * Creating end device object, stored in ioc->sas_device_list.
6121  *
6122  * Return: 0 for success, non-zero for failure.
6123  */
6124 static int
6125 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6126 	u8 is_pd)
6127 {
6128 	Mpi2ConfigReply_t mpi_reply;
6129 	Mpi2SasDevicePage0_t sas_device_pg0;
6130 	struct _sas_device *sas_device;
6131 	struct _enclosure_node *enclosure_dev = NULL;
6132 	u32 ioc_status;
6133 	u64 sas_address;
6134 	u32 device_info;
6135 
6136 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6137 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
6138 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6139 			__FILE__, __LINE__, __func__);
6140 		return -1;
6141 	}
6142 
6143 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6144 	    MPI2_IOCSTATUS_MASK;
6145 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6146 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6147 			__FILE__, __LINE__, __func__);
6148 		return -1;
6149 	}
6150 
6151 	/* check if this is end device */
6152 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6153 	if (!(_scsih_is_end_device(device_info)))
6154 		return -1;
6155 	set_bit(handle, ioc->pend_os_device_add);
6156 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6157 
6158 	/* check if device is present */
6159 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
6160 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6161 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6162 			handle);
6163 		return -1;
6164 	}
6165 
6166 	/* check if there were any issues with discovery */
6167 	if (_scsih_check_access_status(ioc, sas_address, handle,
6168 	    sas_device_pg0.AccessStatus))
6169 		return -1;
6170 
6171 	sas_device = mpt3sas_get_sdev_by_addr(ioc,
6172 					sas_address);
6173 	if (sas_device) {
6174 		clear_bit(handle, ioc->pend_os_device_add);
6175 		sas_device_put(sas_device);
6176 		return -1;
6177 	}
6178 
6179 	if (sas_device_pg0.EnclosureHandle) {
6180 		enclosure_dev =
6181 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6182 			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
6183 		if (enclosure_dev == NULL)
6184 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
6185 				 sas_device_pg0.EnclosureHandle);
6186 	}
6187 
6188 	sas_device = kzalloc(sizeof(struct _sas_device),
6189 	    GFP_KERNEL);
6190 	if (!sas_device) {
6191 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6192 			__FILE__, __LINE__, __func__);
6193 		return 0;
6194 	}
6195 
6196 	kref_init(&sas_device->refcount);
6197 	sas_device->handle = handle;
6198 	if (_scsih_get_sas_address(ioc,
6199 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
6200 	    &sas_device->sas_address_parent) != 0)
6201 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6202 			__FILE__, __LINE__, __func__);
6203 	sas_device->enclosure_handle =
6204 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
6205 	if (sas_device->enclosure_handle != 0)
6206 		sas_device->slot =
6207 		    le16_to_cpu(sas_device_pg0.Slot);
6208 	sas_device->device_info = device_info;
6209 	sas_device->sas_address = sas_address;
6210 	sas_device->phy = sas_device_pg0.PhyNum;
6211 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
6212 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6213 
6214 	if (le16_to_cpu(sas_device_pg0.Flags)
6215 		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6216 		sas_device->enclosure_level =
6217 			sas_device_pg0.EnclosureLevel;
6218 		memcpy(sas_device->connector_name,
6219 			sas_device_pg0.ConnectorName, 4);
6220 		sas_device->connector_name[4] = '\0';
6221 	} else {
6222 		sas_device->enclosure_level = 0;
6223 		sas_device->connector_name[0] = '\0';
6224 	}
6225 	/* get enclosure_logical_id & chassis_slot*/
6226 	sas_device->is_chassis_slot_valid = 0;
6227 	if (enclosure_dev) {
6228 		sas_device->enclosure_logical_id =
6229 		    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6230 		if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6231 		    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6232 			sas_device->is_chassis_slot_valid = 1;
6233 			sas_device->chassis_slot =
6234 					enclosure_dev->pg0.ChassisSlot;
6235 		}
6236 	}
6237 
6238 	/* get device name */
6239 	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
6240 
6241 	if (ioc->wait_for_discovery_to_complete)
6242 		_scsih_sas_device_init_add(ioc, sas_device);
6243 	else
6244 		_scsih_sas_device_add(ioc, sas_device);
6245 
6246 	sas_device_put(sas_device);
6247 	return 0;
6248 }
6249 
6250 /**
6251  * _scsih_remove_device -  removing sas device object
6252  * @ioc: per adapter object
6253  * @sas_device: the sas_device object
6254  */
6255 static void
6256 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6257 	struct _sas_device *sas_device)
6258 {
6259 	struct MPT3SAS_TARGET *sas_target_priv_data;
6260 
6261 	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
6262 	     (sas_device->pfa_led_on)) {
6263 		_scsih_turn_off_pfa_led(ioc, sas_device);
6264 		sas_device->pfa_led_on = 0;
6265 	}
6266 
6267 	dewtprintk(ioc,
6268 		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
6269 			    __func__,
6270 			    sas_device->handle, (u64)sas_device->sas_address));
6271 
6272 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6273 	    NULL, NULL));
6274 
6275 	if (sas_device->starget && sas_device->starget->hostdata) {
6276 		sas_target_priv_data = sas_device->starget->hostdata;
6277 		sas_target_priv_data->deleted = 1;
6278 		_scsih_ublock_io_device(ioc, sas_device->sas_address);
6279 		sas_target_priv_data->handle =
6280 		     MPT3SAS_INVALID_DEVICE_HANDLE;
6281 	}
6282 
6283 	if (!ioc->hide_drives)
6284 		mpt3sas_transport_port_remove(ioc,
6285 		    sas_device->sas_address,
6286 		    sas_device->sas_address_parent);
6287 
6288 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
6289 		 sas_device->handle, (u64)sas_device->sas_address);
6290 
6291 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
6292 
6293 	dewtprintk(ioc,
6294 		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
6295 			    __func__,
6296 			    sas_device->handle, (u64)sas_device->sas_address));
6297 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6298 	    NULL, NULL));
6299 }
6300 
6301 /**
6302  * _scsih_sas_topology_change_event_debug - debug for topology event
6303  * @ioc: per adapter object
6304  * @event_data: event data payload
6305  * Context: user.
6306  */
6307 static void
6308 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6309 	Mpi2EventDataSasTopologyChangeList_t *event_data)
6310 {
6311 	int i;
6312 	u16 handle;
6313 	u16 reason_code;
6314 	u8 phy_number;
6315 	char *status_str = NULL;
6316 	u8 link_rate, prev_link_rate;
6317 
6318 	switch (event_data->ExpStatus) {
6319 	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6320 		status_str = "add";
6321 		break;
6322 	case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6323 		status_str = "remove";
6324 		break;
6325 	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6326 	case 0:
6327 		status_str =  "responding";
6328 		break;
6329 	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6330 		status_str = "remove delay";
6331 		break;
6332 	default:
6333 		status_str = "unknown status";
6334 		break;
6335 	}
6336 	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
6337 	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
6338 	    "start_phy(%02d), count(%d)\n",
6339 	    le16_to_cpu(event_data->ExpanderDevHandle),
6340 	    le16_to_cpu(event_data->EnclosureHandle),
6341 	    event_data->StartPhyNum, event_data->NumEntries);
6342 	for (i = 0; i < event_data->NumEntries; i++) {
6343 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6344 		if (!handle)
6345 			continue;
6346 		phy_number = event_data->StartPhyNum + i;
6347 		reason_code = event_data->PHY[i].PhyStatus &
6348 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6349 		switch (reason_code) {
6350 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6351 			status_str = "target add";
6352 			break;
6353 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6354 			status_str = "target remove";
6355 			break;
6356 		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6357 			status_str = "delay target remove";
6358 			break;
6359 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6360 			status_str = "link rate change";
6361 			break;
6362 		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6363 			status_str = "target responding";
6364 			break;
6365 		default:
6366 			status_str = "unknown";
6367 			break;
6368 		}
6369 		link_rate = event_data->PHY[i].LinkRate >> 4;
6370 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6371 		pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
6372 		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
6373 		    handle, status_str, link_rate, prev_link_rate);
6374 
6375 	}
6376 }
6377 
6378 /**
6379  * _scsih_sas_topology_change_event - handle topology changes
6380  * @ioc: per adapter object
6381  * @fw_event: The fw_event_work object
6382  * Context: user.
6383  *
6384  */
6385 static int
6386 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6387 	struct fw_event_work *fw_event)
6388 {
6389 	int i;
6390 	u16 parent_handle, handle;
6391 	u16 reason_code;
6392 	u8 phy_number, max_phys;
6393 	struct _sas_node *sas_expander;
6394 	u64 sas_address;
6395 	unsigned long flags;
6396 	u8 link_rate, prev_link_rate;
6397 	Mpi2EventDataSasTopologyChangeList_t *event_data =
6398 		(Mpi2EventDataSasTopologyChangeList_t *)
6399 		fw_event->event_data;
6400 
6401 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6402 		_scsih_sas_topology_change_event_debug(ioc, event_data);
6403 
6404 	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
6405 		return 0;
6406 
6407 	if (!ioc->sas_hba.num_phys)
6408 		_scsih_sas_host_add(ioc);
6409 	else
6410 		_scsih_sas_host_refresh(ioc);
6411 
6412 	if (fw_event->ignore) {
6413 		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
6414 		return 0;
6415 	}
6416 
6417 	parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
6418 
6419 	/* handle expander add */
6420 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
6421 		if (_scsih_expander_add(ioc, parent_handle) != 0)
6422 			return 0;
6423 
6424 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6425 	sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
6426 	    parent_handle);
6427 	if (sas_expander) {
6428 		sas_address = sas_expander->sas_address;
6429 		max_phys = sas_expander->num_phys;
6430 	} else if (parent_handle < ioc->sas_hba.num_phys) {
6431 		sas_address = ioc->sas_hba.sas_address;
6432 		max_phys = ioc->sas_hba.num_phys;
6433 	} else {
6434 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6435 		return 0;
6436 	}
6437 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6438 
6439 	/* handle siblings events */
6440 	for (i = 0; i < event_data->NumEntries; i++) {
6441 		if (fw_event->ignore) {
6442 			dewtprintk(ioc,
6443 				   ioc_info(ioc, "ignoring expander event\n"));
6444 			return 0;
6445 		}
6446 		if (ioc->remove_host || ioc->pci_error_recovery)
6447 			return 0;
6448 		phy_number = event_data->StartPhyNum + i;
6449 		if (phy_number >= max_phys)
6450 			continue;
6451 		reason_code = event_data->PHY[i].PhyStatus &
6452 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6453 		if ((event_data->PHY[i].PhyStatus &
6454 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
6455 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
6456 				continue;
6457 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6458 		if (!handle)
6459 			continue;
6460 		link_rate = event_data->PHY[i].LinkRate >> 4;
6461 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6462 		switch (reason_code) {
6463 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6464 
6465 			if (ioc->shost_recovery)
6466 				break;
6467 
6468 			if (link_rate == prev_link_rate)
6469 				break;
6470 
6471 			mpt3sas_transport_update_links(ioc, sas_address,
6472 			    handle, phy_number, link_rate);
6473 
6474 			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6475 				break;
6476 
6477 			_scsih_check_device(ioc, sas_address, handle,
6478 			    phy_number, link_rate);
6479 
6480 			if (!test_bit(handle, ioc->pend_os_device_add))
6481 				break;
6482 
6483 			/* fall through */
6484 
6485 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6486 
6487 			if (ioc->shost_recovery)
6488 				break;
6489 
6490 			mpt3sas_transport_update_links(ioc, sas_address,
6491 			    handle, phy_number, link_rate);
6492 
6493 			_scsih_add_device(ioc, handle, phy_number, 0);
6494 
6495 			break;
6496 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6497 
6498 			_scsih_device_remove_by_handle(ioc, handle);
6499 			break;
6500 		}
6501 	}
6502 
6503 	/* handle expander removal */
6504 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
6505 	    sas_expander)
6506 		mpt3sas_expander_remove(ioc, sas_address);
6507 
6508 	return 0;
6509 }
6510 
6511 /**
6512  * _scsih_sas_device_status_change_event_debug - debug for device event
6513  * @ioc: ?
6514  * @event_data: event data payload
6515  * Context: user.
6516  */
6517 static void
6518 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6519 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
6520 {
6521 	char *reason_str = NULL;
6522 
6523 	switch (event_data->ReasonCode) {
6524 	case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
6525 		reason_str = "smart data";
6526 		break;
6527 	case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
6528 		reason_str = "unsupported device discovered";
6529 		break;
6530 	case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
6531 		reason_str = "internal device reset";
6532 		break;
6533 	case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
6534 		reason_str = "internal task abort";
6535 		break;
6536 	case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
6537 		reason_str = "internal task abort set";
6538 		break;
6539 	case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
6540 		reason_str = "internal clear task set";
6541 		break;
6542 	case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
6543 		reason_str = "internal query task";
6544 		break;
6545 	case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
6546 		reason_str = "sata init failure";
6547 		break;
6548 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
6549 		reason_str = "internal device reset complete";
6550 		break;
6551 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
6552 		reason_str = "internal task abort complete";
6553 		break;
6554 	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
6555 		reason_str = "internal async notification";
6556 		break;
6557 	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
6558 		reason_str = "expander reduced functionality";
6559 		break;
6560 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
6561 		reason_str = "expander reduced functionality complete";
6562 		break;
6563 	default:
6564 		reason_str = "unknown reason";
6565 		break;
6566 	}
6567 	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
6568 		 reason_str, le16_to_cpu(event_data->DevHandle),
6569 		 (u64)le64_to_cpu(event_data->SASAddress),
6570 		 le16_to_cpu(event_data->TaskTag));
6571 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
6572 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
6573 			event_data->ASC, event_data->ASCQ);
6574 	pr_cont("\n");
6575 }
6576 
6577 /**
6578  * _scsih_sas_device_status_change_event - handle device status change
6579  * @ioc: per adapter object
6580  * @event_data: The fw event
6581  * Context: user.
6582  */
6583 static void
6584 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6585 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
6586 {
6587 	struct MPT3SAS_TARGET *target_priv_data;
6588 	struct _sas_device *sas_device;
6589 	u64 sas_address;
6590 	unsigned long flags;
6591 
6592 	/* In MPI Revision K (0xC), the internal device reset complete was
6593 	 * implemented, so avoid setting tm_busy flag for older firmware.
6594 	 */
6595 	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
6596 		return;
6597 
6598 	if (event_data->ReasonCode !=
6599 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
6600 	   event_data->ReasonCode !=
6601 	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
6602 		return;
6603 
6604 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
6605 	sas_address = le64_to_cpu(event_data->SASAddress);
6606 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6607 	    sas_address);
6608 
6609 	if (!sas_device || !sas_device->starget)
6610 		goto out;
6611 
6612 	target_priv_data = sas_device->starget->hostdata;
6613 	if (!target_priv_data)
6614 		goto out;
6615 
6616 	if (event_data->ReasonCode ==
6617 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
6618 		target_priv_data->tm_busy = 1;
6619 	else
6620 		target_priv_data->tm_busy = 0;
6621 
6622 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6623 		ioc_info(ioc,
6624 		    "%s tm_busy flag for handle(0x%04x)\n",
6625 		    (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
6626 		    target_priv_data->handle);
6627 
6628 out:
6629 	if (sas_device)
6630 		sas_device_put(sas_device);
6631 
6632 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6633 }
6634 
6635 
6636 /**
6637  * _scsih_check_pcie_access_status - check access flags
6638  * @ioc: per adapter object
6639  * @wwid: wwid
6640  * @handle: sas device handle
6641  * @access_status: errors returned during discovery of the device
6642  *
6643  * Return: 0 for success, else failure
6644  */
6645 static u8
6646 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6647 	u16 handle, u8 access_status)
6648 {
6649 	u8 rc = 1;
6650 	char *desc = NULL;
6651 
6652 	switch (access_status) {
6653 	case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
6654 	case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
6655 		rc = 0;
6656 		break;
6657 	case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
6658 		desc = "PCIe device capability failed";
6659 		break;
6660 	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6661 		desc = "PCIe device blocked";
6662 		ioc_info(ioc,
6663 		    "Device with Access Status (%s): wwid(0x%016llx), "
6664 		    "handle(0x%04x)\n ll only be added to the internal list",
6665 		    desc, (u64)wwid, handle);
6666 		rc = 0;
6667 		break;
6668 	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6669 		desc = "PCIe device mem space access failed";
6670 		break;
6671 	case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
6672 		desc = "PCIe device unsupported";
6673 		break;
6674 	case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
6675 		desc = "PCIe device MSIx Required";
6676 		break;
6677 	case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
6678 		desc = "PCIe device init fail max";
6679 		break;
6680 	case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
6681 		desc = "PCIe device status unknown";
6682 		break;
6683 	case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
6684 		desc = "nvme ready timeout";
6685 		break;
6686 	case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
6687 		desc = "nvme device configuration unsupported";
6688 		break;
6689 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
6690 		desc = "nvme identify failed";
6691 		break;
6692 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
6693 		desc = "nvme qconfig failed";
6694 		break;
6695 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
6696 		desc = "nvme qcreation failed";
6697 		break;
6698 	case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
6699 		desc = "nvme eventcfg failed";
6700 		break;
6701 	case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
6702 		desc = "nvme get feature stat failed";
6703 		break;
6704 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
6705 		desc = "nvme idle timeout";
6706 		break;
6707 	case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
6708 		desc = "nvme failure status";
6709 		break;
6710 	default:
6711 		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
6712 			access_status, (u64)wwid, handle);
6713 		return rc;
6714 	}
6715 
6716 	if (!rc)
6717 		return rc;
6718 
6719 	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6720 		 desc, (u64)wwid, handle);
6721 	return rc;
6722 }
6723 
6724 /**
6725  * _scsih_pcie_device_remove_from_sml -  removing pcie device
6726  * from SML and free up associated memory
6727  * @ioc: per adapter object
6728  * @pcie_device: the pcie_device object
6729  */
6730 static void
6731 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6732 	struct _pcie_device *pcie_device)
6733 {
6734 	struct MPT3SAS_TARGET *sas_target_priv_data;
6735 
6736 	dewtprintk(ioc,
6737 		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
6738 			    __func__,
6739 			    pcie_device->handle, (u64)pcie_device->wwid));
6740 	if (pcie_device->enclosure_handle != 0)
6741 		dewtprintk(ioc,
6742 			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6743 				    __func__,
6744 				    (u64)pcie_device->enclosure_logical_id,
6745 				    pcie_device->slot));
6746 	if (pcie_device->connector_name[0] != '\0')
6747 		dewtprintk(ioc,
6748 			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
6749 				    __func__,
6750 				    pcie_device->enclosure_level,
6751 				    pcie_device->connector_name));
6752 
6753 	if (pcie_device->starget && pcie_device->starget->hostdata) {
6754 		sas_target_priv_data = pcie_device->starget->hostdata;
6755 		sas_target_priv_data->deleted = 1;
6756 		_scsih_ublock_io_device(ioc, pcie_device->wwid);
6757 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6758 	}
6759 
6760 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
6761 		 pcie_device->handle, (u64)pcie_device->wwid);
6762 	if (pcie_device->enclosure_handle != 0)
6763 		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6764 			 (u64)pcie_device->enclosure_logical_id,
6765 			 pcie_device->slot);
6766 	if (pcie_device->connector_name[0] != '\0')
6767 		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
6768 			 pcie_device->enclosure_level,
6769 			 pcie_device->connector_name);
6770 
6771 	if (pcie_device->starget && (pcie_device->access_status !=
6772 				MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
6773 		scsi_remove_target(&pcie_device->starget->dev);
6774 	dewtprintk(ioc,
6775 		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
6776 			    __func__,
6777 			    pcie_device->handle, (u64)pcie_device->wwid));
6778 	if (pcie_device->enclosure_handle != 0)
6779 		dewtprintk(ioc,
6780 			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
6781 				    __func__,
6782 				    (u64)pcie_device->enclosure_logical_id,
6783 				    pcie_device->slot));
6784 	if (pcie_device->connector_name[0] != '\0')
6785 		dewtprintk(ioc,
6786 			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
6787 				    __func__,
6788 				    pcie_device->enclosure_level,
6789 				    pcie_device->connector_name));
6790 
6791 	kfree(pcie_device->serial_number);
6792 }
6793 
6794 
6795 /**
6796  * _scsih_pcie_check_device - checking device responsiveness
6797  * @ioc: per adapter object
6798  * @handle: attached device handle
6799  */
6800 static void
6801 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6802 {
6803 	Mpi2ConfigReply_t mpi_reply;
6804 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
6805 	u32 ioc_status;
6806 	struct _pcie_device *pcie_device;
6807 	u64 wwid;
6808 	unsigned long flags;
6809 	struct scsi_target *starget;
6810 	struct MPT3SAS_TARGET *sas_target_priv_data;
6811 	u32 device_info;
6812 
6813 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6814 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
6815 		return;
6816 
6817 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6818 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6819 		return;
6820 
6821 	/* check if this is end device */
6822 	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6823 	if (!(_scsih_is_nvme_pciescsi_device(device_info)))
6824 		return;
6825 
6826 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
6827 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
6828 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
6829 
6830 	if (!pcie_device) {
6831 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6832 		return;
6833 	}
6834 
6835 	if (unlikely(pcie_device->handle != handle)) {
6836 		starget = pcie_device->starget;
6837 		sas_target_priv_data = starget->hostdata;
6838 		pcie_device->access_status = pcie_device_pg0.AccessStatus;
6839 		starget_printk(KERN_INFO, starget,
6840 		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
6841 		    pcie_device->handle, handle);
6842 		sas_target_priv_data->handle = handle;
6843 		pcie_device->handle = handle;
6844 
6845 		if (le32_to_cpu(pcie_device_pg0.Flags) &
6846 		    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6847 			pcie_device->enclosure_level =
6848 			    pcie_device_pg0.EnclosureLevel;
6849 			memcpy(&pcie_device->connector_name[0],
6850 			    &pcie_device_pg0.ConnectorName[0], 4);
6851 		} else {
6852 			pcie_device->enclosure_level = 0;
6853 			pcie_device->connector_name[0] = '\0';
6854 		}
6855 	}
6856 
6857 	/* check if device is present */
6858 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6859 	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6860 		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
6861 			 handle);
6862 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6863 		pcie_device_put(pcie_device);
6864 		return;
6865 	}
6866 
6867 	/* check if there were any issues with discovery */
6868 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6869 	    pcie_device_pg0.AccessStatus)) {
6870 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6871 		pcie_device_put(pcie_device);
6872 		return;
6873 	}
6874 
6875 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6876 	pcie_device_put(pcie_device);
6877 
6878 	_scsih_ublock_io_device(ioc, wwid);
6879 
6880 	return;
6881 }
6882 
6883 /**
6884  * _scsih_pcie_add_device -  creating pcie device object
6885  * @ioc: per adapter object
6886  * @handle: pcie device handle
6887  *
6888  * Creating end device object, stored in ioc->pcie_device_list.
6889  *
6890  * Return: 1 means queue the event later, 0 means complete the event
6891  */
6892 static int
6893 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6894 {
6895 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
6896 	Mpi26PCIeDevicePage2_t pcie_device_pg2;
6897 	Mpi2ConfigReply_t mpi_reply;
6898 	struct _pcie_device *pcie_device;
6899 	struct _enclosure_node *enclosure_dev;
6900 	u32 ioc_status;
6901 	u64 wwid;
6902 
6903 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6904 	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
6905 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6906 			__FILE__, __LINE__, __func__);
6907 		return 0;
6908 	}
6909 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6910 	    MPI2_IOCSTATUS_MASK;
6911 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6912 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6913 			__FILE__, __LINE__, __func__);
6914 		return 0;
6915 	}
6916 
6917 	set_bit(handle, ioc->pend_os_device_add);
6918 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
6919 
6920 	/* check if device is present */
6921 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6922 		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6923 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6924 			handle);
6925 		return 0;
6926 	}
6927 
6928 	/* check if there were any issues with discovery */
6929 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6930 	    pcie_device_pg0.AccessStatus))
6931 		return 0;
6932 
6933 	if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
6934 	    (pcie_device_pg0.DeviceInfo))))
6935 		return 0;
6936 
6937 	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
6938 	if (pcie_device) {
6939 		clear_bit(handle, ioc->pend_os_device_add);
6940 		pcie_device_put(pcie_device);
6941 		return 0;
6942 	}
6943 
6944 	/* PCIe Device Page 2 contains read-only information about a
6945 	 * specific NVMe device; therefore, this page is only
6946 	 * valid for NVMe devices and skip for pcie devices of type scsi.
6947 	 */
6948 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
6949 		le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
6950 		if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
6951 		    &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
6952 		    handle)) {
6953 			ioc_err(ioc,
6954 			    "failure at %s:%d/%s()!\n", __FILE__,
6955 			    __LINE__, __func__);
6956 			return 0;
6957 		}
6958 
6959 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6960 					MPI2_IOCSTATUS_MASK;
6961 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6962 			ioc_err(ioc,
6963 			    "failure at %s:%d/%s()!\n", __FILE__,
6964 			    __LINE__, __func__);
6965 			return 0;
6966 		}
6967 	}
6968 
6969 	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
6970 	if (!pcie_device) {
6971 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6972 			__FILE__, __LINE__, __func__);
6973 		return 0;
6974 	}
6975 
6976 	kref_init(&pcie_device->refcount);
6977 	pcie_device->id = ioc->pcie_target_id++;
6978 	pcie_device->channel = PCIE_CHANNEL;
6979 	pcie_device->handle = handle;
6980 	pcie_device->access_status = pcie_device_pg0.AccessStatus;
6981 	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6982 	pcie_device->wwid = wwid;
6983 	pcie_device->port_num = pcie_device_pg0.PortNum;
6984 	pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
6985 	    MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6986 
6987 	pcie_device->enclosure_handle =
6988 	    le16_to_cpu(pcie_device_pg0.EnclosureHandle);
6989 	if (pcie_device->enclosure_handle != 0)
6990 		pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
6991 
6992 	if (le32_to_cpu(pcie_device_pg0.Flags) &
6993 	    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6994 		pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
6995 		memcpy(&pcie_device->connector_name[0],
6996 		    &pcie_device_pg0.ConnectorName[0], 4);
6997 	} else {
6998 		pcie_device->enclosure_level = 0;
6999 		pcie_device->connector_name[0] = '\0';
7000 	}
7001 
7002 	/* get enclosure_logical_id */
7003 	if (pcie_device->enclosure_handle) {
7004 		enclosure_dev =
7005 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7006 						pcie_device->enclosure_handle);
7007 		if (enclosure_dev)
7008 			pcie_device->enclosure_logical_id =
7009 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7010 	}
7011 	/* TODO -- Add device name once FW supports it */
7012 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
7013 	    le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
7014 		pcie_device->nvme_mdts =
7015 		    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
7016 		pcie_device->shutdown_latency =
7017 			le16_to_cpu(pcie_device_pg2.ShutdownLatency);
7018 		/*
7019 		 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
7020 		 * if drive's RTD3 Entry Latency is greater then IOC's
7021 		 * max_shutdown_latency.
7022 		 */
7023 		if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
7024 			ioc->max_shutdown_latency =
7025 				pcie_device->shutdown_latency;
7026 		if (pcie_device_pg2.ControllerResetTO)
7027 			pcie_device->reset_timeout =
7028 			    pcie_device_pg2.ControllerResetTO;
7029 		else
7030 			pcie_device->reset_timeout = 30;
7031 	} else
7032 		pcie_device->reset_timeout = 30;
7033 
7034 	if (ioc->wait_for_discovery_to_complete)
7035 		_scsih_pcie_device_init_add(ioc, pcie_device);
7036 	else
7037 		_scsih_pcie_device_add(ioc, pcie_device);
7038 
7039 	pcie_device_put(pcie_device);
7040 	return 0;
7041 }
7042 
7043 /**
7044  * _scsih_pcie_topology_change_event_debug - debug for topology
7045  * event
7046  * @ioc: per adapter object
7047  * @event_data: event data payload
7048  * Context: user.
7049  */
7050 static void
7051 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7052 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
7053 {
7054 	int i;
7055 	u16 handle;
7056 	u16 reason_code;
7057 	u8 port_number;
7058 	char *status_str = NULL;
7059 	u8 link_rate, prev_link_rate;
7060 
7061 	switch (event_data->SwitchStatus) {
7062 	case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
7063 		status_str = "add";
7064 		break;
7065 	case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
7066 		status_str = "remove";
7067 		break;
7068 	case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
7069 	case 0:
7070 		status_str =  "responding";
7071 		break;
7072 	case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
7073 		status_str = "remove delay";
7074 		break;
7075 	default:
7076 		status_str = "unknown status";
7077 		break;
7078 	}
7079 	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
7080 	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
7081 		"start_port(%02d), count(%d)\n",
7082 		le16_to_cpu(event_data->SwitchDevHandle),
7083 		le16_to_cpu(event_data->EnclosureHandle),
7084 		event_data->StartPortNum, event_data->NumEntries);
7085 	for (i = 0; i < event_data->NumEntries; i++) {
7086 		handle =
7087 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7088 		if (!handle)
7089 			continue;
7090 		port_number = event_data->StartPortNum + i;
7091 		reason_code = event_data->PortEntry[i].PortStatus;
7092 		switch (reason_code) {
7093 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7094 			status_str = "target add";
7095 			break;
7096 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7097 			status_str = "target remove";
7098 			break;
7099 		case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
7100 			status_str = "delay target remove";
7101 			break;
7102 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7103 			status_str = "link rate change";
7104 			break;
7105 		case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
7106 			status_str = "target responding";
7107 			break;
7108 		default:
7109 			status_str = "unknown";
7110 			break;
7111 		}
7112 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
7113 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7114 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
7115 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7116 		pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
7117 			" link rate: new(0x%02x), old(0x%02x)\n", port_number,
7118 			handle, status_str, link_rate, prev_link_rate);
7119 	}
7120 }
7121 
7122 /**
7123  * _scsih_pcie_topology_change_event - handle PCIe topology
7124  *  changes
7125  * @ioc: per adapter object
7126  * @fw_event: The fw_event_work object
7127  * Context: user.
7128  *
7129  */
7130 static void
7131 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7132 	struct fw_event_work *fw_event)
7133 {
7134 	int i;
7135 	u16 handle;
7136 	u16 reason_code;
7137 	u8 link_rate, prev_link_rate;
7138 	unsigned long flags;
7139 	int rc;
7140 	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
7141 		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
7142 	struct _pcie_device *pcie_device;
7143 
7144 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7145 		_scsih_pcie_topology_change_event_debug(ioc, event_data);
7146 
7147 	if (ioc->shost_recovery || ioc->remove_host ||
7148 		ioc->pci_error_recovery)
7149 		return;
7150 
7151 	if (fw_event->ignore) {
7152 		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
7153 		return;
7154 	}
7155 
7156 	/* handle siblings events */
7157 	for (i = 0; i < event_data->NumEntries; i++) {
7158 		if (fw_event->ignore) {
7159 			dewtprintk(ioc,
7160 				   ioc_info(ioc, "ignoring switch event\n"));
7161 			return;
7162 		}
7163 		if (ioc->remove_host || ioc->pci_error_recovery)
7164 			return;
7165 		reason_code = event_data->PortEntry[i].PortStatus;
7166 		handle =
7167 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7168 		if (!handle)
7169 			continue;
7170 
7171 		link_rate = event_data->PortEntry[i].CurrentPortInfo
7172 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7173 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
7174 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7175 
7176 		switch (reason_code) {
7177 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7178 			if (ioc->shost_recovery)
7179 				break;
7180 			if (link_rate == prev_link_rate)
7181 				break;
7182 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7183 				break;
7184 
7185 			_scsih_pcie_check_device(ioc, handle);
7186 
7187 			/* This code after this point handles the test case
7188 			 * where a device has been added, however its returning
7189 			 * BUSY for sometime.  Then before the Device Missing
7190 			 * Delay expires and the device becomes READY, the
7191 			 * device is removed and added back.
7192 			 */
7193 			spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7194 			pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
7195 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7196 
7197 			if (pcie_device) {
7198 				pcie_device_put(pcie_device);
7199 				break;
7200 			}
7201 
7202 			if (!test_bit(handle, ioc->pend_os_device_add))
7203 				break;
7204 
7205 			dewtprintk(ioc,
7206 				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
7207 					    handle));
7208 			event_data->PortEntry[i].PortStatus &= 0xF0;
7209 			event_data->PortEntry[i].PortStatus |=
7210 				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
7211 			/* fall through */
7212 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7213 			if (ioc->shost_recovery)
7214 				break;
7215 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7216 				break;
7217 
7218 			rc = _scsih_pcie_add_device(ioc, handle);
7219 			if (!rc) {
7220 				/* mark entry vacant */
7221 				/* TODO This needs to be reviewed and fixed,
7222 				 * we dont have an entry
7223 				 * to make an event void like vacant
7224 				 */
7225 				event_data->PortEntry[i].PortStatus |=
7226 					MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
7227 			}
7228 			break;
7229 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7230 			_scsih_pcie_device_remove_by_handle(ioc, handle);
7231 			break;
7232 		}
7233 	}
7234 }
7235 
7236 /**
7237  * _scsih_pcie_device_status_change_event_debug - debug for device event
7238  * @ioc: ?
7239  * @event_data: event data payload
7240  * Context: user.
7241  */
7242 static void
7243 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7244 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
7245 {
7246 	char *reason_str = NULL;
7247 
7248 	switch (event_data->ReasonCode) {
7249 	case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
7250 		reason_str = "smart data";
7251 		break;
7252 	case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
7253 		reason_str = "unsupported device discovered";
7254 		break;
7255 	case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
7256 		reason_str = "internal device reset";
7257 		break;
7258 	case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
7259 		reason_str = "internal task abort";
7260 		break;
7261 	case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7262 		reason_str = "internal task abort set";
7263 		break;
7264 	case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7265 		reason_str = "internal clear task set";
7266 		break;
7267 	case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
7268 		reason_str = "internal query task";
7269 		break;
7270 	case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
7271 		reason_str = "device init failure";
7272 		break;
7273 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7274 		reason_str = "internal device reset complete";
7275 		break;
7276 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7277 		reason_str = "internal task abort complete";
7278 		break;
7279 	case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7280 		reason_str = "internal async notification";
7281 		break;
7282 	case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
7283 		reason_str = "pcie hot reset failed";
7284 		break;
7285 	default:
7286 		reason_str = "unknown reason";
7287 		break;
7288 	}
7289 
7290 	ioc_info(ioc, "PCIE device status change: (%s)\n"
7291 		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7292 		 reason_str, le16_to_cpu(event_data->DevHandle),
7293 		 (u64)le64_to_cpu(event_data->WWID),
7294 		 le16_to_cpu(event_data->TaskTag));
7295 	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7296 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7297 			event_data->ASC, event_data->ASCQ);
7298 	pr_cont("\n");
7299 }
7300 
7301 /**
7302  * _scsih_pcie_device_status_change_event - handle device status
7303  * change
7304  * @ioc: per adapter object
7305  * @fw_event: The fw_event_work object
7306  * Context: user.
7307  */
7308 static void
7309 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7310 	struct fw_event_work *fw_event)
7311 {
7312 	struct MPT3SAS_TARGET *target_priv_data;
7313 	struct _pcie_device *pcie_device;
7314 	u64 wwid;
7315 	unsigned long flags;
7316 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
7317 		(Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
7318 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7319 		_scsih_pcie_device_status_change_event_debug(ioc,
7320 			event_data);
7321 
7322 	if (event_data->ReasonCode !=
7323 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7324 		event_data->ReasonCode !=
7325 		MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7326 		return;
7327 
7328 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7329 	wwid = le64_to_cpu(event_data->WWID);
7330 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7331 
7332 	if (!pcie_device || !pcie_device->starget)
7333 		goto out;
7334 
7335 	target_priv_data = pcie_device->starget->hostdata;
7336 	if (!target_priv_data)
7337 		goto out;
7338 
7339 	if (event_data->ReasonCode ==
7340 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
7341 		target_priv_data->tm_busy = 1;
7342 	else
7343 		target_priv_data->tm_busy = 0;
7344 out:
7345 	if (pcie_device)
7346 		pcie_device_put(pcie_device);
7347 
7348 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7349 }
7350 
7351 /**
7352  * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
7353  * event
7354  * @ioc: per adapter object
7355  * @event_data: event data payload
7356  * Context: user.
7357  */
7358 static void
7359 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7360 	Mpi2EventDataSasEnclDevStatusChange_t *event_data)
7361 {
7362 	char *reason_str = NULL;
7363 
7364 	switch (event_data->ReasonCode) {
7365 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7366 		reason_str = "enclosure add";
7367 		break;
7368 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7369 		reason_str = "enclosure remove";
7370 		break;
7371 	default:
7372 		reason_str = "unknown reason";
7373 		break;
7374 	}
7375 
7376 	ioc_info(ioc, "enclosure status change: (%s)\n"
7377 		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
7378 		 reason_str,
7379 		 le16_to_cpu(event_data->EnclosureHandle),
7380 		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
7381 		 le16_to_cpu(event_data->StartSlot));
7382 }
7383 
7384 /**
7385  * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
7386  * @ioc: per adapter object
7387  * @fw_event: The fw_event_work object
7388  * Context: user.
7389  */
7390 static void
7391 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7392 	struct fw_event_work *fw_event)
7393 {
7394 	Mpi2ConfigReply_t mpi_reply;
7395 	struct _enclosure_node *enclosure_dev = NULL;
7396 	Mpi2EventDataSasEnclDevStatusChange_t *event_data =
7397 		(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
7398 	int rc;
7399 	u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
7400 
7401 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7402 		_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
7403 		     (Mpi2EventDataSasEnclDevStatusChange_t *)
7404 		     fw_event->event_data);
7405 	if (ioc->shost_recovery)
7406 		return;
7407 
7408 	if (enclosure_handle)
7409 		enclosure_dev =
7410 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7411 						enclosure_handle);
7412 	switch (event_data->ReasonCode) {
7413 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7414 		if (!enclosure_dev) {
7415 			enclosure_dev =
7416 				kzalloc(sizeof(struct _enclosure_node),
7417 					GFP_KERNEL);
7418 			if (!enclosure_dev) {
7419 				ioc_info(ioc, "failure at %s:%d/%s()!\n",
7420 					 __FILE__, __LINE__, __func__);
7421 				return;
7422 			}
7423 			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7424 				&enclosure_dev->pg0,
7425 				MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7426 				enclosure_handle);
7427 
7428 			if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
7429 						MPI2_IOCSTATUS_MASK)) {
7430 				kfree(enclosure_dev);
7431 				return;
7432 			}
7433 
7434 			list_add_tail(&enclosure_dev->list,
7435 							&ioc->enclosure_list);
7436 		}
7437 		break;
7438 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7439 		if (enclosure_dev) {
7440 			list_del(&enclosure_dev->list);
7441 			kfree(enclosure_dev);
7442 		}
7443 		break;
7444 	default:
7445 		break;
7446 	}
7447 }
7448 
7449 /**
7450  * _scsih_sas_broadcast_primitive_event - handle broadcast events
7451  * @ioc: per adapter object
7452  * @fw_event: The fw_event_work object
7453  * Context: user.
7454  */
7455 static void
7456 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7457 	struct fw_event_work *fw_event)
7458 {
7459 	struct scsi_cmnd *scmd;
7460 	struct scsi_device *sdev;
7461 	struct scsiio_tracker *st;
7462 	u16 smid, handle;
7463 	u32 lun;
7464 	struct MPT3SAS_DEVICE *sas_device_priv_data;
7465 	u32 termination_count;
7466 	u32 query_count;
7467 	Mpi2SCSITaskManagementReply_t *mpi_reply;
7468 	Mpi2EventDataSasBroadcastPrimitive_t *event_data =
7469 		(Mpi2EventDataSasBroadcastPrimitive_t *)
7470 		fw_event->event_data;
7471 	u16 ioc_status;
7472 	unsigned long flags;
7473 	int r;
7474 	u8 max_retries = 0;
7475 	u8 task_abort_retries;
7476 
7477 	mutex_lock(&ioc->tm_cmds.mutex);
7478 	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
7479 		 __func__, event_data->PhyNum, event_data->PortWidth);
7480 
7481 	_scsih_block_io_all_device(ioc);
7482 
7483 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7484 	mpi_reply = ioc->tm_cmds.reply;
7485  broadcast_aen_retry:
7486 
7487 	/* sanity checks for retrying this loop */
7488 	if (max_retries++ == 5) {
7489 		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
7490 		goto out;
7491 	} else if (max_retries > 1)
7492 		dewtprintk(ioc,
7493 			   ioc_info(ioc, "%s: %d retry\n",
7494 				    __func__, max_retries - 1));
7495 
7496 	termination_count = 0;
7497 	query_count = 0;
7498 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
7499 		if (ioc->shost_recovery)
7500 			goto out;
7501 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
7502 		if (!scmd)
7503 			continue;
7504 		st = scsi_cmd_priv(scmd);
7505 		sdev = scmd->device;
7506 		sas_device_priv_data = sdev->hostdata;
7507 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
7508 			continue;
7509 		 /* skip hidden raid components */
7510 		if (sas_device_priv_data->sas_target->flags &
7511 		    MPT_TARGET_FLAGS_RAID_COMPONENT)
7512 			continue;
7513 		 /* skip volumes */
7514 		if (sas_device_priv_data->sas_target->flags &
7515 		    MPT_TARGET_FLAGS_VOLUME)
7516 			continue;
7517 		 /* skip PCIe devices */
7518 		if (sas_device_priv_data->sas_target->flags &
7519 		    MPT_TARGET_FLAGS_PCIE_DEVICE)
7520 			continue;
7521 
7522 		handle = sas_device_priv_data->sas_target->handle;
7523 		lun = sas_device_priv_data->lun;
7524 		query_count++;
7525 
7526 		if (ioc->shost_recovery)
7527 			goto out;
7528 
7529 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7530 		r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
7531 			MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7532 			st->msix_io, 30, 0);
7533 		if (r == FAILED) {
7534 			sdev_printk(KERN_WARNING, sdev,
7535 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
7536 			    "QUERY_TASK: scmd(%p)\n", scmd);
7537 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7538 			goto broadcast_aen_retry;
7539 		}
7540 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
7541 		    & MPI2_IOCSTATUS_MASK;
7542 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7543 			sdev_printk(KERN_WARNING, sdev,
7544 				"query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
7545 				ioc_status, scmd);
7546 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7547 			goto broadcast_aen_retry;
7548 		}
7549 
7550 		/* see if IO is still owned by IOC and target */
7551 		if (mpi_reply->ResponseCode ==
7552 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
7553 		     mpi_reply->ResponseCode ==
7554 		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
7555 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7556 			continue;
7557 		}
7558 		task_abort_retries = 0;
7559  tm_retry:
7560 		if (task_abort_retries++ == 60) {
7561 			dewtprintk(ioc,
7562 				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
7563 					    __func__));
7564 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7565 			goto broadcast_aen_retry;
7566 		}
7567 
7568 		if (ioc->shost_recovery)
7569 			goto out_no_lock;
7570 
7571 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
7572 			MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
7573 			st->msix_io, 30, 0);
7574 		if (r == FAILED || st->cb_idx != 0xFF) {
7575 			sdev_printk(KERN_WARNING, sdev,
7576 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
7577 			    "scmd(%p)\n", scmd);
7578 			goto tm_retry;
7579 		}
7580 
7581 		if (task_abort_retries > 1)
7582 			sdev_printk(KERN_WARNING, sdev,
7583 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
7584 			    " scmd(%p)\n",
7585 			    task_abort_retries - 1, scmd);
7586 
7587 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
7588 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7589 	}
7590 
7591 	if (ioc->broadcast_aen_pending) {
7592 		dewtprintk(ioc,
7593 			   ioc_info(ioc,
7594 				    "%s: loop back due to pending AEN\n",
7595 				    __func__));
7596 		 ioc->broadcast_aen_pending = 0;
7597 		 goto broadcast_aen_retry;
7598 	}
7599 
7600  out:
7601 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7602  out_no_lock:
7603 
7604 	dewtprintk(ioc,
7605 		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
7606 			    __func__, query_count, termination_count));
7607 
7608 	ioc->broadcast_aen_busy = 0;
7609 	if (!ioc->shost_recovery)
7610 		_scsih_ublock_io_all_device(ioc);
7611 	mutex_unlock(&ioc->tm_cmds.mutex);
7612 }
7613 
7614 /**
7615  * _scsih_sas_discovery_event - handle discovery events
7616  * @ioc: per adapter object
7617  * @fw_event: The fw_event_work object
7618  * Context: user.
7619  */
7620 static void
7621 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7622 	struct fw_event_work *fw_event)
7623 {
7624 	Mpi2EventDataSasDiscovery_t *event_data =
7625 		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
7626 
7627 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7628 		ioc_info(ioc, "discovery event: (%s)",
7629 			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
7630 			 "start" : "stop");
7631 		if (event_data->DiscoveryStatus)
7632 			pr_cont("discovery_status(0x%08x)",
7633 				le32_to_cpu(event_data->DiscoveryStatus));
7634 		pr_cont("\n");
7635 	}
7636 
7637 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
7638 	    !ioc->sas_hba.num_phys) {
7639 		if (disable_discovery > 0 && ioc->shost_recovery) {
7640 			/* Wait for the reset to complete */
7641 			while (ioc->shost_recovery)
7642 				ssleep(1);
7643 		}
7644 		_scsih_sas_host_add(ioc);
7645 	}
7646 }
7647 
7648 /**
7649  * _scsih_sas_device_discovery_error_event - display SAS device discovery error
7650  *						events
7651  * @ioc: per adapter object
7652  * @fw_event: The fw_event_work object
7653  * Context: user.
7654  */
7655 static void
7656 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7657 	struct fw_event_work *fw_event)
7658 {
7659 	Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
7660 		(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
7661 
7662 	switch (event_data->ReasonCode) {
7663 	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7664 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
7665 			 le16_to_cpu(event_data->DevHandle),
7666 			 (u64)le64_to_cpu(event_data->SASAddress),
7667 			 event_data->PhysicalPort);
7668 		break;
7669 	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7670 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
7671 			 le16_to_cpu(event_data->DevHandle),
7672 			 (u64)le64_to_cpu(event_data->SASAddress),
7673 			 event_data->PhysicalPort);
7674 		break;
7675 	default:
7676 		break;
7677 	}
7678 }
7679 
7680 /**
7681  * _scsih_pcie_enumeration_event - handle enumeration events
7682  * @ioc: per adapter object
7683  * @fw_event: The fw_event_work object
7684  * Context: user.
7685  */
7686 static void
7687 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7688 	struct fw_event_work *fw_event)
7689 {
7690 	Mpi26EventDataPCIeEnumeration_t *event_data =
7691 		(Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
7692 
7693 	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7694 		return;
7695 
7696 	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
7697 		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7698 		 "started" : "completed",
7699 		 event_data->Flags);
7700 	if (event_data->EnumerationStatus)
7701 		pr_cont("enumeration_status(0x%08x)",
7702 			le32_to_cpu(event_data->EnumerationStatus));
7703 	pr_cont("\n");
7704 }
7705 
7706 /**
7707  * _scsih_ir_fastpath - turn on fastpath for IR physdisk
7708  * @ioc: per adapter object
7709  * @handle: device handle for physical disk
7710  * @phys_disk_num: physical disk number
7711  *
7712  * Return: 0 for success, else failure.
7713  */
7714 static int
7715 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7716 {
7717 	Mpi2RaidActionRequest_t *mpi_request;
7718 	Mpi2RaidActionReply_t *mpi_reply;
7719 	u16 smid;
7720 	u8 issue_reset = 0;
7721 	int rc = 0;
7722 	u16 ioc_status;
7723 	u32 log_info;
7724 
7725 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
7726 		return rc;
7727 
7728 	mutex_lock(&ioc->scsih_cmds.mutex);
7729 
7730 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7731 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
7732 		rc = -EAGAIN;
7733 		goto out;
7734 	}
7735 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
7736 
7737 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
7738 	if (!smid) {
7739 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7740 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7741 		rc = -EAGAIN;
7742 		goto out;
7743 	}
7744 
7745 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7746 	ioc->scsih_cmds.smid = smid;
7747 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
7748 
7749 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
7750 	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
7751 	mpi_request->PhysDiskNum = phys_disk_num;
7752 
7753 	dewtprintk(ioc,
7754 		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
7755 			    handle, phys_disk_num));
7756 
7757 	init_completion(&ioc->scsih_cmds.done);
7758 	ioc->put_smid_default(ioc, smid);
7759 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
7760 
7761 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
7762 		mpt3sas_check_cmd_timeout(ioc,
7763 		    ioc->scsih_cmds.status, mpi_request,
7764 		    sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
7765 		rc = -EFAULT;
7766 		goto out;
7767 	}
7768 
7769 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
7770 
7771 		mpi_reply = ioc->scsih_cmds.reply;
7772 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
7773 		if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
7774 			log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
7775 		else
7776 			log_info = 0;
7777 		ioc_status &= MPI2_IOCSTATUS_MASK;
7778 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7779 			dewtprintk(ioc,
7780 				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
7781 					    ioc_status, log_info));
7782 			rc = -EFAULT;
7783 		} else
7784 			dewtprintk(ioc,
7785 				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
7786 	}
7787 
7788  out:
7789 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7790 	mutex_unlock(&ioc->scsih_cmds.mutex);
7791 
7792 	if (issue_reset)
7793 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7794 	return rc;
7795 }
7796 
7797 /**
7798  * _scsih_reprobe_lun - reprobing lun
7799  * @sdev: scsi device struct
7800  * @no_uld_attach: sdev->no_uld_attach flag setting
7801  *
7802  **/
7803 static void
7804 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
7805 {
7806 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
7807 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
7808 	    sdev->no_uld_attach ? "hiding" : "exposing");
7809 	WARN_ON(scsi_device_reprobe(sdev));
7810 }
7811 
7812 /**
7813  * _scsih_sas_volume_add - add new volume
7814  * @ioc: per adapter object
7815  * @element: IR config element data
7816  * Context: user.
7817  */
7818 static void
7819 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
7820 	Mpi2EventIrConfigElement_t *element)
7821 {
7822 	struct _raid_device *raid_device;
7823 	unsigned long flags;
7824 	u64 wwid;
7825 	u16 handle = le16_to_cpu(element->VolDevHandle);
7826 	int rc;
7827 
7828 	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
7829 	if (!wwid) {
7830 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7831 			__FILE__, __LINE__, __func__);
7832 		return;
7833 	}
7834 
7835 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
7836 	raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
7837 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7838 
7839 	if (raid_device)
7840 		return;
7841 
7842 	raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
7843 	if (!raid_device) {
7844 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7845 			__FILE__, __LINE__, __func__);
7846 		return;
7847 	}
7848 
7849 	raid_device->id = ioc->sas_id++;
7850 	raid_device->channel = RAID_CHANNEL;
7851 	raid_device->handle = handle;
7852 	raid_device->wwid = wwid;
7853 	_scsih_raid_device_add(ioc, raid_device);
7854 	if (!ioc->wait_for_discovery_to_complete) {
7855 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
7856 		    raid_device->id, 0);
7857 		if (rc)
7858 			_scsih_raid_device_remove(ioc, raid_device);
7859 	} else {
7860 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
7861 		_scsih_determine_boot_device(ioc, raid_device, 1);
7862 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7863 	}
7864 }
7865 
7866 /**
7867  * _scsih_sas_volume_delete - delete volume
7868  * @ioc: per adapter object
7869  * @handle: volume device handle
7870  * Context: user.
7871  */
7872 static void
7873 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7874 {
7875 	struct _raid_device *raid_device;
7876 	unsigned long flags;
7877 	struct MPT3SAS_TARGET *sas_target_priv_data;
7878 	struct scsi_target *starget = NULL;
7879 
7880 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
7881 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
7882 	if (raid_device) {
7883 		if (raid_device->starget) {
7884 			starget = raid_device->starget;
7885 			sas_target_priv_data = starget->hostdata;
7886 			sas_target_priv_data->deleted = 1;
7887 		}
7888 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7889 			 raid_device->handle, (u64)raid_device->wwid);
7890 		list_del(&raid_device->list);
7891 		kfree(raid_device);
7892 	}
7893 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7894 	if (starget)
7895 		scsi_remove_target(&starget->dev);
7896 }
7897 
7898 /**
7899  * _scsih_sas_pd_expose - expose pd component to /dev/sdX
7900  * @ioc: per adapter object
7901  * @element: IR config element data
7902  * Context: user.
7903  */
7904 static void
7905 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
7906 	Mpi2EventIrConfigElement_t *element)
7907 {
7908 	struct _sas_device *sas_device;
7909 	struct scsi_target *starget = NULL;
7910 	struct MPT3SAS_TARGET *sas_target_priv_data;
7911 	unsigned long flags;
7912 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7913 
7914 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7915 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7916 	if (sas_device) {
7917 		sas_device->volume_handle = 0;
7918 		sas_device->volume_wwid = 0;
7919 		clear_bit(handle, ioc->pd_handles);
7920 		if (sas_device->starget && sas_device->starget->hostdata) {
7921 			starget = sas_device->starget;
7922 			sas_target_priv_data = starget->hostdata;
7923 			sas_target_priv_data->flags &=
7924 			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
7925 		}
7926 	}
7927 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7928 	if (!sas_device)
7929 		return;
7930 
7931 	/* exposing raid component */
7932 	if (starget)
7933 		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
7934 
7935 	sas_device_put(sas_device);
7936 }
7937 
7938 /**
7939  * _scsih_sas_pd_hide - hide pd component from /dev/sdX
7940  * @ioc: per adapter object
7941  * @element: IR config element data
7942  * Context: user.
7943  */
7944 static void
7945 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
7946 	Mpi2EventIrConfigElement_t *element)
7947 {
7948 	struct _sas_device *sas_device;
7949 	struct scsi_target *starget = NULL;
7950 	struct MPT3SAS_TARGET *sas_target_priv_data;
7951 	unsigned long flags;
7952 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7953 	u16 volume_handle = 0;
7954 	u64 volume_wwid = 0;
7955 
7956 	mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
7957 	if (volume_handle)
7958 		mpt3sas_config_get_volume_wwid(ioc, volume_handle,
7959 		    &volume_wwid);
7960 
7961 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7962 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7963 	if (sas_device) {
7964 		set_bit(handle, ioc->pd_handles);
7965 		if (sas_device->starget && sas_device->starget->hostdata) {
7966 			starget = sas_device->starget;
7967 			sas_target_priv_data = starget->hostdata;
7968 			sas_target_priv_data->flags |=
7969 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
7970 			sas_device->volume_handle = volume_handle;
7971 			sas_device->volume_wwid = volume_wwid;
7972 		}
7973 	}
7974 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7975 	if (!sas_device)
7976 		return;
7977 
7978 	/* hiding raid component */
7979 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7980 
7981 	if (starget)
7982 		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
7983 
7984 	sas_device_put(sas_device);
7985 }
7986 
7987 /**
7988  * _scsih_sas_pd_delete - delete pd component
7989  * @ioc: per adapter object
7990  * @element: IR config element data
7991  * Context: user.
7992  */
7993 static void
7994 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
7995 	Mpi2EventIrConfigElement_t *element)
7996 {
7997 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7998 
7999 	_scsih_device_remove_by_handle(ioc, handle);
8000 }
8001 
8002 /**
8003  * _scsih_sas_pd_add - remove pd component
8004  * @ioc: per adapter object
8005  * @element: IR config element data
8006  * Context: user.
8007  */
8008 static void
8009 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
8010 	Mpi2EventIrConfigElement_t *element)
8011 {
8012 	struct _sas_device *sas_device;
8013 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8014 	Mpi2ConfigReply_t mpi_reply;
8015 	Mpi2SasDevicePage0_t sas_device_pg0;
8016 	u32 ioc_status;
8017 	u64 sas_address;
8018 	u16 parent_handle;
8019 
8020 	set_bit(handle, ioc->pd_handles);
8021 
8022 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8023 	if (sas_device) {
8024 		_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
8025 		sas_device_put(sas_device);
8026 		return;
8027 	}
8028 
8029 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
8030 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
8031 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8032 			__FILE__, __LINE__, __func__);
8033 		return;
8034 	}
8035 
8036 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8037 	    MPI2_IOCSTATUS_MASK;
8038 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8039 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8040 			__FILE__, __LINE__, __func__);
8041 		return;
8042 	}
8043 
8044 	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8045 	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8046 		mpt3sas_transport_update_links(ioc, sas_address, handle,
8047 		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8048 
8049 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
8050 	_scsih_add_device(ioc, handle, 0, 1);
8051 }
8052 
8053 /**
8054  * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
8055  * @ioc: per adapter object
8056  * @event_data: event data payload
8057  * Context: user.
8058  */
8059 static void
8060 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8061 	Mpi2EventDataIrConfigChangeList_t *event_data)
8062 {
8063 	Mpi2EventIrConfigElement_t *element;
8064 	u8 element_type;
8065 	int i;
8066 	char *reason_str = NULL, *element_str = NULL;
8067 
8068 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8069 
8070 	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
8071 		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
8072 		 "foreign" : "native",
8073 		 event_data->NumElements);
8074 	for (i = 0; i < event_data->NumElements; i++, element++) {
8075 		switch (element->ReasonCode) {
8076 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8077 			reason_str = "add";
8078 			break;
8079 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8080 			reason_str = "remove";
8081 			break;
8082 		case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
8083 			reason_str = "no change";
8084 			break;
8085 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8086 			reason_str = "hide";
8087 			break;
8088 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8089 			reason_str = "unhide";
8090 			break;
8091 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8092 			reason_str = "volume_created";
8093 			break;
8094 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8095 			reason_str = "volume_deleted";
8096 			break;
8097 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8098 			reason_str = "pd_created";
8099 			break;
8100 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8101 			reason_str = "pd_deleted";
8102 			break;
8103 		default:
8104 			reason_str = "unknown reason";
8105 			break;
8106 		}
8107 		element_type = le16_to_cpu(element->ElementFlags) &
8108 		    MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
8109 		switch (element_type) {
8110 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
8111 			element_str = "volume";
8112 			break;
8113 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
8114 			element_str = "phys disk";
8115 			break;
8116 		case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
8117 			element_str = "hot spare";
8118 			break;
8119 		default:
8120 			element_str = "unknown element";
8121 			break;
8122 		}
8123 		pr_info("\t(%s:%s), vol handle(0x%04x), " \
8124 		    "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
8125 		    reason_str, le16_to_cpu(element->VolDevHandle),
8126 		    le16_to_cpu(element->PhysDiskDevHandle),
8127 		    element->PhysDiskNum);
8128 	}
8129 }
8130 
8131 /**
8132  * _scsih_sas_ir_config_change_event - handle ir configuration change events
8133  * @ioc: per adapter object
8134  * @fw_event: The fw_event_work object
8135  * Context: user.
8136  */
8137 static void
8138 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
8139 	struct fw_event_work *fw_event)
8140 {
8141 	Mpi2EventIrConfigElement_t *element;
8142 	int i;
8143 	u8 foreign_config;
8144 	Mpi2EventDataIrConfigChangeList_t *event_data =
8145 		(Mpi2EventDataIrConfigChangeList_t *)
8146 		fw_event->event_data;
8147 
8148 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8149 	     (!ioc->hide_ir_msg))
8150 		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
8151 
8152 	foreign_config = (le32_to_cpu(event_data->Flags) &
8153 	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
8154 
8155 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8156 	if (ioc->shost_recovery &&
8157 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8158 		for (i = 0; i < event_data->NumElements; i++, element++) {
8159 			if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
8160 				_scsih_ir_fastpath(ioc,
8161 					le16_to_cpu(element->PhysDiskDevHandle),
8162 					element->PhysDiskNum);
8163 		}
8164 		return;
8165 	}
8166 
8167 	for (i = 0; i < event_data->NumElements; i++, element++) {
8168 
8169 		switch (element->ReasonCode) {
8170 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8171 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8172 			if (!foreign_config)
8173 				_scsih_sas_volume_add(ioc, element);
8174 			break;
8175 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8176 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8177 			if (!foreign_config)
8178 				_scsih_sas_volume_delete(ioc,
8179 				    le16_to_cpu(element->VolDevHandle));
8180 			break;
8181 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8182 			if (!ioc->is_warpdrive)
8183 				_scsih_sas_pd_hide(ioc, element);
8184 			break;
8185 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8186 			if (!ioc->is_warpdrive)
8187 				_scsih_sas_pd_expose(ioc, element);
8188 			break;
8189 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8190 			if (!ioc->is_warpdrive)
8191 				_scsih_sas_pd_add(ioc, element);
8192 			break;
8193 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8194 			if (!ioc->is_warpdrive)
8195 				_scsih_sas_pd_delete(ioc, element);
8196 			break;
8197 		}
8198 	}
8199 }
8200 
8201 /**
8202  * _scsih_sas_ir_volume_event - IR volume event
8203  * @ioc: per adapter object
8204  * @fw_event: The fw_event_work object
8205  * Context: user.
8206  */
8207 static void
8208 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8209 	struct fw_event_work *fw_event)
8210 {
8211 	u64 wwid;
8212 	unsigned long flags;
8213 	struct _raid_device *raid_device;
8214 	u16 handle;
8215 	u32 state;
8216 	int rc;
8217 	Mpi2EventDataIrVolume_t *event_data =
8218 		(Mpi2EventDataIrVolume_t *) fw_event->event_data;
8219 
8220 	if (ioc->shost_recovery)
8221 		return;
8222 
8223 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
8224 		return;
8225 
8226 	handle = le16_to_cpu(event_data->VolDevHandle);
8227 	state = le32_to_cpu(event_data->NewValue);
8228 	if (!ioc->hide_ir_msg)
8229 		dewtprintk(ioc,
8230 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8231 				    __func__, handle,
8232 				    le32_to_cpu(event_data->PreviousValue),
8233 				    state));
8234 	switch (state) {
8235 	case MPI2_RAID_VOL_STATE_MISSING:
8236 	case MPI2_RAID_VOL_STATE_FAILED:
8237 		_scsih_sas_volume_delete(ioc, handle);
8238 		break;
8239 
8240 	case MPI2_RAID_VOL_STATE_ONLINE:
8241 	case MPI2_RAID_VOL_STATE_DEGRADED:
8242 	case MPI2_RAID_VOL_STATE_OPTIMAL:
8243 
8244 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8245 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8246 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8247 
8248 		if (raid_device)
8249 			break;
8250 
8251 		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8252 		if (!wwid) {
8253 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8254 				__FILE__, __LINE__, __func__);
8255 			break;
8256 		}
8257 
8258 		raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8259 		if (!raid_device) {
8260 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8261 				__FILE__, __LINE__, __func__);
8262 			break;
8263 		}
8264 
8265 		raid_device->id = ioc->sas_id++;
8266 		raid_device->channel = RAID_CHANNEL;
8267 		raid_device->handle = handle;
8268 		raid_device->wwid = wwid;
8269 		_scsih_raid_device_add(ioc, raid_device);
8270 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8271 		    raid_device->id, 0);
8272 		if (rc)
8273 			_scsih_raid_device_remove(ioc, raid_device);
8274 		break;
8275 
8276 	case MPI2_RAID_VOL_STATE_INITIALIZING:
8277 	default:
8278 		break;
8279 	}
8280 }
8281 
8282 /**
8283  * _scsih_sas_ir_physical_disk_event - PD event
8284  * @ioc: per adapter object
8285  * @fw_event: The fw_event_work object
8286  * Context: user.
8287  */
8288 static void
8289 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8290 	struct fw_event_work *fw_event)
8291 {
8292 	u16 handle, parent_handle;
8293 	u32 state;
8294 	struct _sas_device *sas_device;
8295 	Mpi2ConfigReply_t mpi_reply;
8296 	Mpi2SasDevicePage0_t sas_device_pg0;
8297 	u32 ioc_status;
8298 	Mpi2EventDataIrPhysicalDisk_t *event_data =
8299 		(Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
8300 	u64 sas_address;
8301 
8302 	if (ioc->shost_recovery)
8303 		return;
8304 
8305 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
8306 		return;
8307 
8308 	handle = le16_to_cpu(event_data->PhysDiskDevHandle);
8309 	state = le32_to_cpu(event_data->NewValue);
8310 
8311 	if (!ioc->hide_ir_msg)
8312 		dewtprintk(ioc,
8313 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8314 				    __func__, handle,
8315 				    le32_to_cpu(event_data->PreviousValue),
8316 				    state));
8317 
8318 	switch (state) {
8319 	case MPI2_RAID_PD_STATE_ONLINE:
8320 	case MPI2_RAID_PD_STATE_DEGRADED:
8321 	case MPI2_RAID_PD_STATE_REBUILDING:
8322 	case MPI2_RAID_PD_STATE_OPTIMAL:
8323 	case MPI2_RAID_PD_STATE_HOT_SPARE:
8324 
8325 		if (!ioc->is_warpdrive)
8326 			set_bit(handle, ioc->pd_handles);
8327 
8328 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8329 		if (sas_device) {
8330 			sas_device_put(sas_device);
8331 			return;
8332 		}
8333 
8334 		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8335 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8336 		    handle))) {
8337 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8338 				__FILE__, __LINE__, __func__);
8339 			return;
8340 		}
8341 
8342 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8343 		    MPI2_IOCSTATUS_MASK;
8344 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8345 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8346 				__FILE__, __LINE__, __func__);
8347 			return;
8348 		}
8349 
8350 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8351 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8352 			mpt3sas_transport_update_links(ioc, sas_address, handle,
8353 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8354 
8355 		_scsih_add_device(ioc, handle, 0, 1);
8356 
8357 		break;
8358 
8359 	case MPI2_RAID_PD_STATE_OFFLINE:
8360 	case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
8361 	case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
8362 	default:
8363 		break;
8364 	}
8365 }
8366 
8367 /**
8368  * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
8369  * @ioc: per adapter object
8370  * @event_data: event data payload
8371  * Context: user.
8372  */
8373 static void
8374 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
8375 	Mpi2EventDataIrOperationStatus_t *event_data)
8376 {
8377 	char *reason_str = NULL;
8378 
8379 	switch (event_data->RAIDOperation) {
8380 	case MPI2_EVENT_IR_RAIDOP_RESYNC:
8381 		reason_str = "resync";
8382 		break;
8383 	case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8384 		reason_str = "online capacity expansion";
8385 		break;
8386 	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8387 		reason_str = "consistency check";
8388 		break;
8389 	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
8390 		reason_str = "background init";
8391 		break;
8392 	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
8393 		reason_str = "make data consistent";
8394 		break;
8395 	}
8396 
8397 	if (!reason_str)
8398 		return;
8399 
8400 	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
8401 		 reason_str,
8402 		 le16_to_cpu(event_data->VolDevHandle),
8403 		 event_data->PercentComplete);
8404 }
8405 
8406 /**
8407  * _scsih_sas_ir_operation_status_event - handle RAID operation events
8408  * @ioc: per adapter object
8409  * @fw_event: The fw_event_work object
8410  * Context: user.
8411  */
8412 static void
8413 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
8414 	struct fw_event_work *fw_event)
8415 {
8416 	Mpi2EventDataIrOperationStatus_t *event_data =
8417 		(Mpi2EventDataIrOperationStatus_t *)
8418 		fw_event->event_data;
8419 	static struct _raid_device *raid_device;
8420 	unsigned long flags;
8421 	u16 handle;
8422 
8423 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8424 	    (!ioc->hide_ir_msg))
8425 		_scsih_sas_ir_operation_status_event_debug(ioc,
8426 		     event_data);
8427 
8428 	/* code added for raid transport support */
8429 	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
8430 
8431 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8432 		handle = le16_to_cpu(event_data->VolDevHandle);
8433 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8434 		if (raid_device)
8435 			raid_device->percent_complete =
8436 			    event_data->PercentComplete;
8437 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8438 	}
8439 }
8440 
8441 /**
8442  * _scsih_prep_device_scan - initialize parameters prior to device scan
8443  * @ioc: per adapter object
8444  *
8445  * Set the deleted flag prior to device scan.  If the device is found during
8446  * the scan, then we clear the deleted flag.
8447  */
8448 static void
8449 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
8450 {
8451 	struct MPT3SAS_DEVICE *sas_device_priv_data;
8452 	struct scsi_device *sdev;
8453 
8454 	shost_for_each_device(sdev, ioc->shost) {
8455 		sas_device_priv_data = sdev->hostdata;
8456 		if (sas_device_priv_data && sas_device_priv_data->sas_target)
8457 			sas_device_priv_data->sas_target->deleted = 1;
8458 	}
8459 }
8460 
8461 /**
8462  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
8463  * @ioc: per adapter object
8464  * @sas_device_pg0: SAS Device page 0
8465  *
8466  * After host reset, find out whether devices are still responding.
8467  * Used in _scsih_remove_unresponsive_sas_devices.
8468  */
8469 static void
8470 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
8471 Mpi2SasDevicePage0_t *sas_device_pg0)
8472 {
8473 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8474 	struct scsi_target *starget;
8475 	struct _sas_device *sas_device = NULL;
8476 	struct _enclosure_node *enclosure_dev = NULL;
8477 	unsigned long flags;
8478 
8479 	if (sas_device_pg0->EnclosureHandle) {
8480 		enclosure_dev =
8481 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8482 				le16_to_cpu(sas_device_pg0->EnclosureHandle));
8483 		if (enclosure_dev == NULL)
8484 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
8485 				 sas_device_pg0->EnclosureHandle);
8486 	}
8487 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8488 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
8489 		if ((sas_device->sas_address == le64_to_cpu(
8490 		    sas_device_pg0->SASAddress)) && (sas_device->slot ==
8491 		    le16_to_cpu(sas_device_pg0->Slot))) {
8492 			sas_device->responding = 1;
8493 			starget = sas_device->starget;
8494 			if (starget && starget->hostdata) {
8495 				sas_target_priv_data = starget->hostdata;
8496 				sas_target_priv_data->tm_busy = 0;
8497 				sas_target_priv_data->deleted = 0;
8498 			} else
8499 				sas_target_priv_data = NULL;
8500 			if (starget) {
8501 				starget_printk(KERN_INFO, starget,
8502 				    "handle(0x%04x), sas_addr(0x%016llx)\n",
8503 				    le16_to_cpu(sas_device_pg0->DevHandle),
8504 				    (unsigned long long)
8505 				    sas_device->sas_address);
8506 
8507 				if (sas_device->enclosure_handle != 0)
8508 					starget_printk(KERN_INFO, starget,
8509 					 "enclosure logical id(0x%016llx),"
8510 					 " slot(%d)\n",
8511 					 (unsigned long long)
8512 					 sas_device->enclosure_logical_id,
8513 					 sas_device->slot);
8514 			}
8515 			if (le16_to_cpu(sas_device_pg0->Flags) &
8516 			      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8517 				sas_device->enclosure_level =
8518 				   sas_device_pg0->EnclosureLevel;
8519 				memcpy(&sas_device->connector_name[0],
8520 					&sas_device_pg0->ConnectorName[0], 4);
8521 			} else {
8522 				sas_device->enclosure_level = 0;
8523 				sas_device->connector_name[0] = '\0';
8524 			}
8525 
8526 			sas_device->enclosure_handle =
8527 				le16_to_cpu(sas_device_pg0->EnclosureHandle);
8528 			sas_device->is_chassis_slot_valid = 0;
8529 			if (enclosure_dev) {
8530 				sas_device->enclosure_logical_id = le64_to_cpu(
8531 					enclosure_dev->pg0.EnclosureLogicalID);
8532 				if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8533 				    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8534 					sas_device->is_chassis_slot_valid = 1;
8535 					sas_device->chassis_slot =
8536 						enclosure_dev->pg0.ChassisSlot;
8537 				}
8538 			}
8539 
8540 			if (sas_device->handle == le16_to_cpu(
8541 			    sas_device_pg0->DevHandle))
8542 				goto out;
8543 			pr_info("\thandle changed from(0x%04x)!!!\n",
8544 			    sas_device->handle);
8545 			sas_device->handle = le16_to_cpu(
8546 			    sas_device_pg0->DevHandle);
8547 			if (sas_target_priv_data)
8548 				sas_target_priv_data->handle =
8549 				    le16_to_cpu(sas_device_pg0->DevHandle);
8550 			goto out;
8551 		}
8552 	}
8553  out:
8554 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8555 }
8556 
8557 /**
8558  * _scsih_create_enclosure_list_after_reset - Free Existing list,
8559  *	And create enclosure list by scanning all Enclosure Page(0)s
8560  * @ioc: per adapter object
8561  */
8562 static void
8563 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8564 {
8565 	struct _enclosure_node *enclosure_dev;
8566 	Mpi2ConfigReply_t mpi_reply;
8567 	u16 enclosure_handle;
8568 	int rc;
8569 
8570 	/* Free existing enclosure list */
8571 	mpt3sas_free_enclosure_list(ioc);
8572 
8573 	/* Re constructing enclosure list after reset*/
8574 	enclosure_handle = 0xFFFF;
8575 	do {
8576 		enclosure_dev =
8577 			kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8578 		if (!enclosure_dev) {
8579 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8580 				__FILE__, __LINE__, __func__);
8581 			return;
8582 		}
8583 		rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8584 				&enclosure_dev->pg0,
8585 				MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
8586 				enclosure_handle);
8587 
8588 		if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8589 						MPI2_IOCSTATUS_MASK)) {
8590 			kfree(enclosure_dev);
8591 			return;
8592 		}
8593 		list_add_tail(&enclosure_dev->list,
8594 						&ioc->enclosure_list);
8595 		enclosure_handle =
8596 			le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
8597 	} while (1);
8598 }
8599 
8600 /**
8601  * _scsih_search_responding_sas_devices -
8602  * @ioc: per adapter object
8603  *
8604  * After host reset, find out whether devices are still responding.
8605  * If not remove.
8606  */
8607 static void
8608 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8609 {
8610 	Mpi2SasDevicePage0_t sas_device_pg0;
8611 	Mpi2ConfigReply_t mpi_reply;
8612 	u16 ioc_status;
8613 	u16 handle;
8614 	u32 device_info;
8615 
8616 	ioc_info(ioc, "search for end-devices: start\n");
8617 
8618 	if (list_empty(&ioc->sas_device_list))
8619 		goto out;
8620 
8621 	handle = 0xFFFF;
8622 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8623 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8624 	    handle))) {
8625 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8626 		    MPI2_IOCSTATUS_MASK;
8627 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8628 			break;
8629 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
8630 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8631 		if (!(_scsih_is_end_device(device_info)))
8632 			continue;
8633 		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
8634 	}
8635 
8636  out:
8637 	ioc_info(ioc, "search for end-devices: complete\n");
8638 }
8639 
8640 /**
8641  * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
8642  * @ioc: per adapter object
8643  * @pcie_device_pg0: PCIe Device page 0
8644  *
8645  * After host reset, find out whether devices are still responding.
8646  * Used in _scsih_remove_unresponding_devices.
8647  */
8648 static void
8649 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8650 	Mpi26PCIeDevicePage0_t *pcie_device_pg0)
8651 {
8652 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8653 	struct scsi_target *starget;
8654 	struct _pcie_device *pcie_device;
8655 	unsigned long flags;
8656 
8657 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8658 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8659 		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8660 		    && (pcie_device->slot == le16_to_cpu(
8661 		    pcie_device_pg0->Slot))) {
8662 			pcie_device->access_status =
8663 					pcie_device_pg0->AccessStatus;
8664 			pcie_device->responding = 1;
8665 			starget = pcie_device->starget;
8666 			if (starget && starget->hostdata) {
8667 				sas_target_priv_data = starget->hostdata;
8668 				sas_target_priv_data->tm_busy = 0;
8669 				sas_target_priv_data->deleted = 0;
8670 			} else
8671 				sas_target_priv_data = NULL;
8672 			if (starget) {
8673 				starget_printk(KERN_INFO, starget,
8674 				    "handle(0x%04x), wwid(0x%016llx) ",
8675 				    pcie_device->handle,
8676 				    (unsigned long long)pcie_device->wwid);
8677 				if (pcie_device->enclosure_handle != 0)
8678 					starget_printk(KERN_INFO, starget,
8679 					    "enclosure logical id(0x%016llx), "
8680 					    "slot(%d)\n",
8681 					    (unsigned long long)
8682 					    pcie_device->enclosure_logical_id,
8683 					    pcie_device->slot);
8684 			}
8685 
8686 			if (((le32_to_cpu(pcie_device_pg0->Flags)) &
8687 			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
8688 			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8689 				pcie_device->enclosure_level =
8690 				    pcie_device_pg0->EnclosureLevel;
8691 				memcpy(&pcie_device->connector_name[0],
8692 				    &pcie_device_pg0->ConnectorName[0], 4);
8693 			} else {
8694 				pcie_device->enclosure_level = 0;
8695 				pcie_device->connector_name[0] = '\0';
8696 			}
8697 
8698 			if (pcie_device->handle == le16_to_cpu(
8699 			    pcie_device_pg0->DevHandle))
8700 				goto out;
8701 			pr_info("\thandle changed from(0x%04x)!!!\n",
8702 			    pcie_device->handle);
8703 			pcie_device->handle = le16_to_cpu(
8704 			    pcie_device_pg0->DevHandle);
8705 			if (sas_target_priv_data)
8706 				sas_target_priv_data->handle =
8707 				    le16_to_cpu(pcie_device_pg0->DevHandle);
8708 			goto out;
8709 		}
8710 	}
8711 
8712  out:
8713 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8714 }
8715 
8716 /**
8717  * _scsih_search_responding_pcie_devices -
8718  * @ioc: per adapter object
8719  *
8720  * After host reset, find out whether devices are still responding.
8721  * If not remove.
8722  */
8723 static void
8724 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8725 {
8726 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8727 	Mpi2ConfigReply_t mpi_reply;
8728 	u16 ioc_status;
8729 	u16 handle;
8730 	u32 device_info;
8731 
8732 	ioc_info(ioc, "search for end-devices: start\n");
8733 
8734 	if (list_empty(&ioc->pcie_device_list))
8735 		goto out;
8736 
8737 	handle = 0xFFFF;
8738 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8739 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8740 		handle))) {
8741 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8742 		    MPI2_IOCSTATUS_MASK;
8743 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8744 			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
8745 				 __func__, ioc_status,
8746 				 le32_to_cpu(mpi_reply.IOCLogInfo));
8747 			break;
8748 		}
8749 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8750 		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8751 		if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8752 			continue;
8753 		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8754 	}
8755 out:
8756 	ioc_info(ioc, "search for PCIe end-devices: complete\n");
8757 }
8758 
8759 /**
8760  * _scsih_mark_responding_raid_device - mark a raid_device as responding
8761  * @ioc: per adapter object
8762  * @wwid: world wide identifier for raid volume
8763  * @handle: device handle
8764  *
8765  * After host reset, find out whether devices are still responding.
8766  * Used in _scsih_remove_unresponsive_raid_devices.
8767  */
8768 static void
8769 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
8770 	u16 handle)
8771 {
8772 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8773 	struct scsi_target *starget;
8774 	struct _raid_device *raid_device;
8775 	unsigned long flags;
8776 
8777 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
8778 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
8779 		if (raid_device->wwid == wwid && raid_device->starget) {
8780 			starget = raid_device->starget;
8781 			if (starget && starget->hostdata) {
8782 				sas_target_priv_data = starget->hostdata;
8783 				sas_target_priv_data->deleted = 0;
8784 			} else
8785 				sas_target_priv_data = NULL;
8786 			raid_device->responding = 1;
8787 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8788 			starget_printk(KERN_INFO, raid_device->starget,
8789 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
8790 			    (unsigned long long)raid_device->wwid);
8791 
8792 			/*
8793 			 * WARPDRIVE: The handles of the PDs might have changed
8794 			 * across the host reset so re-initialize the
8795 			 * required data for Direct IO
8796 			 */
8797 			mpt3sas_init_warpdrive_properties(ioc, raid_device);
8798 			spin_lock_irqsave(&ioc->raid_device_lock, flags);
8799 			if (raid_device->handle == handle) {
8800 				spin_unlock_irqrestore(&ioc->raid_device_lock,
8801 				    flags);
8802 				return;
8803 			}
8804 			pr_info("\thandle changed from(0x%04x)!!!\n",
8805 			    raid_device->handle);
8806 			raid_device->handle = handle;
8807 			if (sas_target_priv_data)
8808 				sas_target_priv_data->handle = handle;
8809 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8810 			return;
8811 		}
8812 	}
8813 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8814 }
8815 
8816 /**
8817  * _scsih_search_responding_raid_devices -
8818  * @ioc: per adapter object
8819  *
8820  * After host reset, find out whether devices are still responding.
8821  * If not remove.
8822  */
8823 static void
8824 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
8825 {
8826 	Mpi2RaidVolPage1_t volume_pg1;
8827 	Mpi2RaidVolPage0_t volume_pg0;
8828 	Mpi2RaidPhysDiskPage0_t pd_pg0;
8829 	Mpi2ConfigReply_t mpi_reply;
8830 	u16 ioc_status;
8831 	u16 handle;
8832 	u8 phys_disk_num;
8833 
8834 	if (!ioc->ir_firmware)
8835 		return;
8836 
8837 	ioc_info(ioc, "search for raid volumes: start\n");
8838 
8839 	if (list_empty(&ioc->raid_device_list))
8840 		goto out;
8841 
8842 	handle = 0xFFFF;
8843 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
8844 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
8845 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8846 		    MPI2_IOCSTATUS_MASK;
8847 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8848 			break;
8849 		handle = le16_to_cpu(volume_pg1.DevHandle);
8850 
8851 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
8852 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
8853 		     sizeof(Mpi2RaidVolPage0_t)))
8854 			continue;
8855 
8856 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
8857 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
8858 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
8859 			_scsih_mark_responding_raid_device(ioc,
8860 			    le64_to_cpu(volume_pg1.WWID), handle);
8861 	}
8862 
8863 	/* refresh the pd_handles */
8864 	if (!ioc->is_warpdrive) {
8865 		phys_disk_num = 0xFF;
8866 		memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
8867 		while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
8868 		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
8869 		    phys_disk_num))) {
8870 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8871 			    MPI2_IOCSTATUS_MASK;
8872 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8873 				break;
8874 			phys_disk_num = pd_pg0.PhysDiskNum;
8875 			handle = le16_to_cpu(pd_pg0.DevHandle);
8876 			set_bit(handle, ioc->pd_handles);
8877 		}
8878 	}
8879  out:
8880 	ioc_info(ioc, "search for responding raid volumes: complete\n");
8881 }
8882 
8883 /**
8884  * _scsih_mark_responding_expander - mark a expander as responding
8885  * @ioc: per adapter object
8886  * @expander_pg0:SAS Expander Config Page0
8887  *
8888  * After host reset, find out whether devices are still responding.
8889  * Used in _scsih_remove_unresponsive_expanders.
8890  */
8891 static void
8892 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
8893 	Mpi2ExpanderPage0_t *expander_pg0)
8894 {
8895 	struct _sas_node *sas_expander = NULL;
8896 	unsigned long flags;
8897 	int i;
8898 	struct _enclosure_node *enclosure_dev = NULL;
8899 	u16 handle = le16_to_cpu(expander_pg0->DevHandle);
8900 	u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
8901 	u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
8902 
8903 	if (enclosure_handle)
8904 		enclosure_dev =
8905 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8906 							enclosure_handle);
8907 
8908 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
8909 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
8910 		if (sas_expander->sas_address != sas_address)
8911 			continue;
8912 		sas_expander->responding = 1;
8913 
8914 		if (enclosure_dev) {
8915 			sas_expander->enclosure_logical_id =
8916 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8917 			sas_expander->enclosure_handle =
8918 			    le16_to_cpu(expander_pg0->EnclosureHandle);
8919 		}
8920 
8921 		if (sas_expander->handle == handle)
8922 			goto out;
8923 		pr_info("\texpander(0x%016llx): handle changed" \
8924 		    " from(0x%04x) to (0x%04x)!!!\n",
8925 		    (unsigned long long)sas_expander->sas_address,
8926 		    sas_expander->handle, handle);
8927 		sas_expander->handle = handle;
8928 		for (i = 0 ; i < sas_expander->num_phys ; i++)
8929 			sas_expander->phy[i].handle = handle;
8930 		goto out;
8931 	}
8932  out:
8933 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8934 }
8935 
8936 /**
8937  * _scsih_search_responding_expanders -
8938  * @ioc: per adapter object
8939  *
8940  * After host reset, find out whether devices are still responding.
8941  * If not remove.
8942  */
8943 static void
8944 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
8945 {
8946 	Mpi2ExpanderPage0_t expander_pg0;
8947 	Mpi2ConfigReply_t mpi_reply;
8948 	u16 ioc_status;
8949 	u64 sas_address;
8950 	u16 handle;
8951 
8952 	ioc_info(ioc, "search for expanders: start\n");
8953 
8954 	if (list_empty(&ioc->sas_expander_list))
8955 		goto out;
8956 
8957 	handle = 0xFFFF;
8958 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
8959 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
8960 
8961 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8962 		    MPI2_IOCSTATUS_MASK;
8963 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8964 			break;
8965 
8966 		handle = le16_to_cpu(expander_pg0.DevHandle);
8967 		sas_address = le64_to_cpu(expander_pg0.SASAddress);
8968 		pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
8969 			handle,
8970 		    (unsigned long long)sas_address);
8971 		_scsih_mark_responding_expander(ioc, &expander_pg0);
8972 	}
8973 
8974  out:
8975 	ioc_info(ioc, "search for expanders: complete\n");
8976 }
8977 
8978 /**
8979  * _scsih_remove_unresponding_devices - removing unresponding devices
8980  * @ioc: per adapter object
8981  */
8982 static void
8983 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8984 {
8985 	struct _sas_device *sas_device, *sas_device_next;
8986 	struct _sas_node *sas_expander, *sas_expander_next;
8987 	struct _raid_device *raid_device, *raid_device_next;
8988 	struct _pcie_device *pcie_device, *pcie_device_next;
8989 	struct list_head tmp_list;
8990 	unsigned long flags;
8991 	LIST_HEAD(head);
8992 
8993 	ioc_info(ioc, "removing unresponding devices: start\n");
8994 
8995 	/* removing unresponding end devices */
8996 	ioc_info(ioc, "removing unresponding devices: end-devices\n");
8997 	/*
8998 	 * Iterate, pulling off devices marked as non-responding. We become the
8999 	 * owner for the reference the list had on any object we prune.
9000 	 */
9001 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9002 	list_for_each_entry_safe(sas_device, sas_device_next,
9003 	    &ioc->sas_device_list, list) {
9004 		if (!sas_device->responding)
9005 			list_move_tail(&sas_device->list, &head);
9006 		else
9007 			sas_device->responding = 0;
9008 	}
9009 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9010 
9011 	/*
9012 	 * Now, uninitialize and remove the unresponding devices we pruned.
9013 	 */
9014 	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
9015 		_scsih_remove_device(ioc, sas_device);
9016 		list_del_init(&sas_device->list);
9017 		sas_device_put(sas_device);
9018 	}
9019 
9020 	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
9021 	INIT_LIST_HEAD(&head);
9022 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9023 	list_for_each_entry_safe(pcie_device, pcie_device_next,
9024 	    &ioc->pcie_device_list, list) {
9025 		if (!pcie_device->responding)
9026 			list_move_tail(&pcie_device->list, &head);
9027 		else
9028 			pcie_device->responding = 0;
9029 	}
9030 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9031 
9032 	list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
9033 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9034 		list_del_init(&pcie_device->list);
9035 		pcie_device_put(pcie_device);
9036 	}
9037 
9038 	/* removing unresponding volumes */
9039 	if (ioc->ir_firmware) {
9040 		ioc_info(ioc, "removing unresponding devices: volumes\n");
9041 		list_for_each_entry_safe(raid_device, raid_device_next,
9042 		    &ioc->raid_device_list, list) {
9043 			if (!raid_device->responding)
9044 				_scsih_sas_volume_delete(ioc,
9045 				    raid_device->handle);
9046 			else
9047 				raid_device->responding = 0;
9048 		}
9049 	}
9050 
9051 	/* removing unresponding expanders */
9052 	ioc_info(ioc, "removing unresponding devices: expanders\n");
9053 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
9054 	INIT_LIST_HEAD(&tmp_list);
9055 	list_for_each_entry_safe(sas_expander, sas_expander_next,
9056 	    &ioc->sas_expander_list, list) {
9057 		if (!sas_expander->responding)
9058 			list_move_tail(&sas_expander->list, &tmp_list);
9059 		else
9060 			sas_expander->responding = 0;
9061 	}
9062 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9063 	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
9064 	    list) {
9065 		_scsih_expander_node_remove(ioc, sas_expander);
9066 	}
9067 
9068 	ioc_info(ioc, "removing unresponding devices: complete\n");
9069 
9070 	/* unblock devices */
9071 	_scsih_ublock_io_all_device(ioc);
9072 }
9073 
9074 static void
9075 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
9076 	struct _sas_node *sas_expander, u16 handle)
9077 {
9078 	Mpi2ExpanderPage1_t expander_pg1;
9079 	Mpi2ConfigReply_t mpi_reply;
9080 	int i;
9081 
9082 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
9083 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
9084 		    &expander_pg1, i, handle))) {
9085 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9086 				__FILE__, __LINE__, __func__);
9087 			return;
9088 		}
9089 
9090 		mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
9091 		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
9092 		    expander_pg1.NegotiatedLinkRate >> 4);
9093 	}
9094 }
9095 
9096 /**
9097  * _scsih_scan_for_devices_after_reset - scan for devices after host reset
9098  * @ioc: per adapter object
9099  */
9100 static void
9101 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9102 {
9103 	Mpi2ExpanderPage0_t expander_pg0;
9104 	Mpi2SasDevicePage0_t sas_device_pg0;
9105 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
9106 	Mpi2RaidVolPage1_t volume_pg1;
9107 	Mpi2RaidVolPage0_t volume_pg0;
9108 	Mpi2RaidPhysDiskPage0_t pd_pg0;
9109 	Mpi2EventIrConfigElement_t element;
9110 	Mpi2ConfigReply_t mpi_reply;
9111 	u8 phys_disk_num;
9112 	u16 ioc_status;
9113 	u16 handle, parent_handle;
9114 	u64 sas_address;
9115 	struct _sas_device *sas_device;
9116 	struct _pcie_device *pcie_device;
9117 	struct _sas_node *expander_device;
9118 	static struct _raid_device *raid_device;
9119 	u8 retry_count;
9120 	unsigned long flags;
9121 
9122 	ioc_info(ioc, "scan devices: start\n");
9123 
9124 	_scsih_sas_host_refresh(ioc);
9125 
9126 	ioc_info(ioc, "\tscan devices: expanders start\n");
9127 
9128 	/* expanders */
9129 	handle = 0xFFFF;
9130 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
9131 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
9132 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9133 		    MPI2_IOCSTATUS_MASK;
9134 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9135 			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9136 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9137 			break;
9138 		}
9139 		handle = le16_to_cpu(expander_pg0.DevHandle);
9140 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
9141 		expander_device = mpt3sas_scsih_expander_find_by_sas_address(
9142 		    ioc, le64_to_cpu(expander_pg0.SASAddress));
9143 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9144 		if (expander_device)
9145 			_scsih_refresh_expander_links(ioc, expander_device,
9146 			    handle);
9147 		else {
9148 			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9149 				 handle,
9150 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
9151 			_scsih_expander_add(ioc, handle);
9152 			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9153 				 handle,
9154 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
9155 		}
9156 	}
9157 
9158 	ioc_info(ioc, "\tscan devices: expanders complete\n");
9159 
9160 	if (!ioc->ir_firmware)
9161 		goto skip_to_sas;
9162 
9163 	ioc_info(ioc, "\tscan devices: phys disk start\n");
9164 
9165 	/* phys disk */
9166 	phys_disk_num = 0xFF;
9167 	while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9168 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9169 	    phys_disk_num))) {
9170 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9171 		    MPI2_IOCSTATUS_MASK;
9172 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9173 			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9174 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9175 			break;
9176 		}
9177 		phys_disk_num = pd_pg0.PhysDiskNum;
9178 		handle = le16_to_cpu(pd_pg0.DevHandle);
9179 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9180 		if (sas_device) {
9181 			sas_device_put(sas_device);
9182 			continue;
9183 		}
9184 		if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9185 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9186 		    handle) != 0)
9187 			continue;
9188 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9189 		    MPI2_IOCSTATUS_MASK;
9190 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9191 			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
9192 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9193 			break;
9194 		}
9195 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9196 		if (!_scsih_get_sas_address(ioc, parent_handle,
9197 		    &sas_address)) {
9198 			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9199 				 handle,
9200 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9201 			mpt3sas_transport_update_links(ioc, sas_address,
9202 			    handle, sas_device_pg0.PhyNum,
9203 			    MPI2_SAS_NEG_LINK_RATE_1_5);
9204 			set_bit(handle, ioc->pd_handles);
9205 			retry_count = 0;
9206 			/* This will retry adding the end device.
9207 			 * _scsih_add_device() will decide on retries and
9208 			 * return "1" when it should be retried
9209 			 */
9210 			while (_scsih_add_device(ioc, handle, retry_count++,
9211 			    1)) {
9212 				ssleep(1);
9213 			}
9214 			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9215 				 handle,
9216 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9217 		}
9218 	}
9219 
9220 	ioc_info(ioc, "\tscan devices: phys disk complete\n");
9221 
9222 	ioc_info(ioc, "\tscan devices: volumes start\n");
9223 
9224 	/* volumes */
9225 	handle = 0xFFFF;
9226 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9227 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9228 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9229 		    MPI2_IOCSTATUS_MASK;
9230 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9231 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9232 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9233 			break;
9234 		}
9235 		handle = le16_to_cpu(volume_pg1.DevHandle);
9236 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9237 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
9238 		    le64_to_cpu(volume_pg1.WWID));
9239 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9240 		if (raid_device)
9241 			continue;
9242 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9243 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9244 		     sizeof(Mpi2RaidVolPage0_t)))
9245 			continue;
9246 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9247 		    MPI2_IOCSTATUS_MASK;
9248 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9249 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9250 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9251 			break;
9252 		}
9253 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9254 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9255 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
9256 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
9257 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
9258 			element.VolDevHandle = volume_pg1.DevHandle;
9259 			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
9260 				 volume_pg1.DevHandle);
9261 			_scsih_sas_volume_add(ioc, &element);
9262 			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
9263 				 volume_pg1.DevHandle);
9264 		}
9265 	}
9266 
9267 	ioc_info(ioc, "\tscan devices: volumes complete\n");
9268 
9269  skip_to_sas:
9270 
9271 	ioc_info(ioc, "\tscan devices: end devices start\n");
9272 
9273 	/* sas devices */
9274 	handle = 0xFFFF;
9275 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9276 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9277 	    handle))) {
9278 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9279 		    MPI2_IOCSTATUS_MASK;
9280 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9281 			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9282 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9283 			break;
9284 		}
9285 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
9286 		if (!(_scsih_is_end_device(
9287 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
9288 			continue;
9289 		sas_device = mpt3sas_get_sdev_by_addr(ioc,
9290 		    le64_to_cpu(sas_device_pg0.SASAddress));
9291 		if (sas_device) {
9292 			sas_device_put(sas_device);
9293 			continue;
9294 		}
9295 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9296 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
9297 			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9298 				 handle,
9299 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9300 			mpt3sas_transport_update_links(ioc, sas_address, handle,
9301 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
9302 			retry_count = 0;
9303 			/* This will retry adding the end device.
9304 			 * _scsih_add_device() will decide on retries and
9305 			 * return "1" when it should be retried
9306 			 */
9307 			while (_scsih_add_device(ioc, handle, retry_count++,
9308 			    0)) {
9309 				ssleep(1);
9310 			}
9311 			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9312 				 handle,
9313 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9314 		}
9315 	}
9316 	ioc_info(ioc, "\tscan devices: end devices complete\n");
9317 	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
9318 
9319 	/* pcie devices */
9320 	handle = 0xFFFF;
9321 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9322 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9323 		handle))) {
9324 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9325 				& MPI2_IOCSTATUS_MASK;
9326 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9327 			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9328 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9329 			break;
9330 		}
9331 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9332 		if (!(_scsih_is_nvme_pciescsi_device(
9333 			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9334 			continue;
9335 		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
9336 				le64_to_cpu(pcie_device_pg0.WWID));
9337 		if (pcie_device) {
9338 			pcie_device_put(pcie_device);
9339 			continue;
9340 		}
9341 		retry_count = 0;
9342 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9343 		_scsih_pcie_add_device(ioc, handle);
9344 
9345 		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
9346 			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
9347 	}
9348 	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
9349 	ioc_info(ioc, "scan devices: complete\n");
9350 }
9351 
9352 /**
9353  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9354  * @ioc: per adapter object
9355  *
9356  * The handler for doing any required cleanup or initialization.
9357  */
9358 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9359 {
9360 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
9361 }
9362 
9363 /**
9364  * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
9365  *							scsi & tm cmds.
9366  * @ioc: per adapter object
9367  *
9368  * The handler for doing any required cleanup or initialization.
9369  */
9370 void
9371 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
9372 {
9373 	dtmprintk(ioc,
9374 	    ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
9375 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
9376 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
9377 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
9378 		complete(&ioc->scsih_cmds.done);
9379 	}
9380 	if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
9381 		ioc->tm_cmds.status |= MPT3_CMD_RESET;
9382 		mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
9383 		complete(&ioc->tm_cmds.done);
9384 	}
9385 
9386 	memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
9387 	memset(ioc->device_remove_in_progress, 0,
9388 	       ioc->device_remove_in_progress_sz);
9389 	_scsih_fw_event_cleanup_queue(ioc);
9390 	_scsih_flush_running_cmds(ioc);
9391 }
9392 
9393 /**
9394  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9395  * @ioc: per adapter object
9396  *
9397  * The handler for doing any required cleanup or initialization.
9398  */
9399 void
9400 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
9401 {
9402 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
9403 	if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9404 					   !ioc->sas_hba.num_phys)) {
9405 		_scsih_prep_device_scan(ioc);
9406 		_scsih_create_enclosure_list_after_reset(ioc);
9407 		_scsih_search_responding_sas_devices(ioc);
9408 		_scsih_search_responding_pcie_devices(ioc);
9409 		_scsih_search_responding_raid_devices(ioc);
9410 		_scsih_search_responding_expanders(ioc);
9411 		_scsih_error_recovery_delete_devices(ioc);
9412 	}
9413 }
9414 
9415 /**
9416  * _mpt3sas_fw_work - delayed task for processing firmware events
9417  * @ioc: per adapter object
9418  * @fw_event: The fw_event_work object
9419  * Context: user.
9420  */
9421 static void
9422 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9423 {
9424 	_scsih_fw_event_del_from_list(ioc, fw_event);
9425 
9426 	/* the queue is being flushed so ignore this event */
9427 	if (ioc->remove_host || ioc->pci_error_recovery) {
9428 		fw_event_work_put(fw_event);
9429 		return;
9430 	}
9431 
9432 	switch (fw_event->event) {
9433 	case MPT3SAS_PROCESS_TRIGGER_DIAG:
9434 		mpt3sas_process_trigger_data(ioc,
9435 			(struct SL_WH_TRIGGERS_EVENT_DATA_T *)
9436 			fw_event->event_data);
9437 		break;
9438 	case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
9439 		while (scsi_host_in_recovery(ioc->shost) ||
9440 					 ioc->shost_recovery) {
9441 			/*
9442 			 * If we're unloading, bail. Otherwise, this can become
9443 			 * an infinite loop.
9444 			 */
9445 			if (ioc->remove_host)
9446 				goto out;
9447 			ssleep(1);
9448 		}
9449 		_scsih_remove_unresponding_devices(ioc);
9450 		_scsih_scan_for_devices_after_reset(ioc);
9451 		_scsih_set_nvme_max_shutdown_latency(ioc);
9452 		break;
9453 	case MPT3SAS_PORT_ENABLE_COMPLETE:
9454 		ioc->start_scan = 0;
9455 		if (missing_delay[0] != -1 && missing_delay[1] != -1)
9456 			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
9457 			    missing_delay[1]);
9458 		dewtprintk(ioc,
9459 			   ioc_info(ioc, "port enable: complete from worker thread\n"));
9460 		break;
9461 	case MPT3SAS_TURN_ON_PFA_LED:
9462 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
9463 		break;
9464 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9465 		_scsih_sas_topology_change_event(ioc, fw_event);
9466 		break;
9467 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9468 		if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
9469 			_scsih_sas_device_status_change_event_debug(ioc,
9470 			    (Mpi2EventDataSasDeviceStatusChange_t *)
9471 			    fw_event->event_data);
9472 		break;
9473 	case MPI2_EVENT_SAS_DISCOVERY:
9474 		_scsih_sas_discovery_event(ioc, fw_event);
9475 		break;
9476 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9477 		_scsih_sas_device_discovery_error_event(ioc, fw_event);
9478 		break;
9479 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9480 		_scsih_sas_broadcast_primitive_event(ioc, fw_event);
9481 		break;
9482 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9483 		_scsih_sas_enclosure_dev_status_change_event(ioc,
9484 		    fw_event);
9485 		break;
9486 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9487 		_scsih_sas_ir_config_change_event(ioc, fw_event);
9488 		break;
9489 	case MPI2_EVENT_IR_VOLUME:
9490 		_scsih_sas_ir_volume_event(ioc, fw_event);
9491 		break;
9492 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9493 		_scsih_sas_ir_physical_disk_event(ioc, fw_event);
9494 		break;
9495 	case MPI2_EVENT_IR_OPERATION_STATUS:
9496 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
9497 		break;
9498 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9499 		_scsih_pcie_device_status_change_event(ioc, fw_event);
9500 		break;
9501 	case MPI2_EVENT_PCIE_ENUMERATION:
9502 		_scsih_pcie_enumeration_event(ioc, fw_event);
9503 		break;
9504 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9505 		_scsih_pcie_topology_change_event(ioc, fw_event);
9506 			return;
9507 	break;
9508 	}
9509 out:
9510 	fw_event_work_put(fw_event);
9511 }
9512 
9513 /**
9514  * _firmware_event_work
9515  * @work: The fw_event_work object
9516  * Context: user.
9517  *
9518  * wrappers for the work thread handling firmware events
9519  */
9520 
9521 static void
9522 _firmware_event_work(struct work_struct *work)
9523 {
9524 	struct fw_event_work *fw_event = container_of(work,
9525 	    struct fw_event_work, work);
9526 
9527 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
9528 }
9529 
9530 /**
9531  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
9532  * @ioc: per adapter object
9533  * @msix_index: MSIX table index supplied by the OS
9534  * @reply: reply message frame(lower 32bit addr)
9535  * Context: interrupt.
9536  *
9537  * This function merely adds a new work task into ioc->firmware_event_thread.
9538  * The tasks are worked from _firmware_event_work in user context.
9539  *
9540  * Return: 1 meaning mf should be freed from _base_interrupt
9541  *         0 means the mf is freed from this function.
9542  */
9543 u8
9544 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9545 	u32 reply)
9546 {
9547 	struct fw_event_work *fw_event;
9548 	Mpi2EventNotificationReply_t *mpi_reply;
9549 	u16 event;
9550 	u16 sz;
9551 	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
9552 
9553 	/* events turned off due to host reset */
9554 	if (ioc->pci_error_recovery)
9555 		return 1;
9556 
9557 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
9558 
9559 	if (unlikely(!mpi_reply)) {
9560 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
9561 			__FILE__, __LINE__, __func__);
9562 		return 1;
9563 	}
9564 
9565 	event = le16_to_cpu(mpi_reply->Event);
9566 
9567 	if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
9568 		mpt3sas_trigger_event(ioc, event, 0);
9569 
9570 	switch (event) {
9571 	/* handle these */
9572 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9573 	{
9574 		Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
9575 		    (Mpi2EventDataSasBroadcastPrimitive_t *)
9576 		    mpi_reply->EventData;
9577 
9578 		if (baen_data->Primitive !=
9579 		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
9580 			return 1;
9581 
9582 		if (ioc->broadcast_aen_busy) {
9583 			ioc->broadcast_aen_pending++;
9584 			return 1;
9585 		} else
9586 			ioc->broadcast_aen_busy = 1;
9587 		break;
9588 	}
9589 
9590 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9591 		_scsih_check_topo_delete_events(ioc,
9592 		    (Mpi2EventDataSasTopologyChangeList_t *)
9593 		    mpi_reply->EventData);
9594 		break;
9595 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9596 	_scsih_check_pcie_topo_remove_events(ioc,
9597 		    (Mpi26EventDataPCIeTopologyChangeList_t *)
9598 		    mpi_reply->EventData);
9599 		break;
9600 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9601 		_scsih_check_ir_config_unhide_events(ioc,
9602 		    (Mpi2EventDataIrConfigChangeList_t *)
9603 		    mpi_reply->EventData);
9604 		break;
9605 	case MPI2_EVENT_IR_VOLUME:
9606 		_scsih_check_volume_delete_events(ioc,
9607 		    (Mpi2EventDataIrVolume_t *)
9608 		    mpi_reply->EventData);
9609 		break;
9610 	case MPI2_EVENT_LOG_ENTRY_ADDED:
9611 	{
9612 		Mpi2EventDataLogEntryAdded_t *log_entry;
9613 		u32 *log_code;
9614 
9615 		if (!ioc->is_warpdrive)
9616 			break;
9617 
9618 		log_entry = (Mpi2EventDataLogEntryAdded_t *)
9619 		    mpi_reply->EventData;
9620 		log_code = (u32 *)log_entry->LogData;
9621 
9622 		if (le16_to_cpu(log_entry->LogEntryQualifier)
9623 		    != MPT2_WARPDRIVE_LOGENTRY)
9624 			break;
9625 
9626 		switch (le32_to_cpu(*log_code)) {
9627 		case MPT2_WARPDRIVE_LC_SSDT:
9628 			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9629 			break;
9630 		case MPT2_WARPDRIVE_LC_SSDLW:
9631 			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
9632 			break;
9633 		case MPT2_WARPDRIVE_LC_SSDLF:
9634 			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
9635 			break;
9636 		case MPT2_WARPDRIVE_LC_BRMF:
9637 			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9638 			break;
9639 		}
9640 
9641 		break;
9642 	}
9643 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9644 		_scsih_sas_device_status_change_event(ioc,
9645 		    (Mpi2EventDataSasDeviceStatusChange_t *)
9646 		    mpi_reply->EventData);
9647 		break;
9648 	case MPI2_EVENT_IR_OPERATION_STATUS:
9649 	case MPI2_EVENT_SAS_DISCOVERY:
9650 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9651 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9652 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9653 	case MPI2_EVENT_PCIE_ENUMERATION:
9654 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9655 		break;
9656 
9657 	case MPI2_EVENT_TEMP_THRESHOLD:
9658 		_scsih_temp_threshold_events(ioc,
9659 			(Mpi2EventDataTemperature_t *)
9660 			mpi_reply->EventData);
9661 		break;
9662 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
9663 		ActiveCableEventData =
9664 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
9665 		switch (ActiveCableEventData->ReasonCode) {
9666 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
9667 			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
9668 				   ActiveCableEventData->ReceptacleID);
9669 			pr_notice("cannot be powered and devices connected\n");
9670 			pr_notice("to this active cable will not be seen\n");
9671 			pr_notice("This active cable requires %d mW of power\n",
9672 			     ActiveCableEventData->ActiveCablePowerRequirement);
9673 			break;
9674 
9675 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
9676 			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
9677 				   ActiveCableEventData->ReceptacleID);
9678 			pr_notice(
9679 			    "is not running at optimal speed(12 Gb/s rate)\n");
9680 			break;
9681 		}
9682 
9683 		break;
9684 
9685 	default: /* ignore the rest */
9686 		return 1;
9687 	}
9688 
9689 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
9690 	fw_event = alloc_fw_event_work(sz);
9691 	if (!fw_event) {
9692 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9693 			__FILE__, __LINE__, __func__);
9694 		return 1;
9695 	}
9696 
9697 	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
9698 	fw_event->ioc = ioc;
9699 	fw_event->VF_ID = mpi_reply->VF_ID;
9700 	fw_event->VP_ID = mpi_reply->VP_ID;
9701 	fw_event->event = event;
9702 	_scsih_fw_event_add(ioc, fw_event);
9703 	fw_event_work_put(fw_event);
9704 	return 1;
9705 }
9706 
9707 /**
9708  * _scsih_expander_node_remove - removing expander device from list.
9709  * @ioc: per adapter object
9710  * @sas_expander: the sas_device object
9711  *
9712  * Removing object and freeing associated memory from the
9713  * ioc->sas_expander_list.
9714  */
9715 static void
9716 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
9717 	struct _sas_node *sas_expander)
9718 {
9719 	struct _sas_port *mpt3sas_port, *next;
9720 	unsigned long flags;
9721 
9722 	/* remove sibling ports attached to this expander */
9723 	list_for_each_entry_safe(mpt3sas_port, next,
9724 	   &sas_expander->sas_port_list, port_list) {
9725 		if (ioc->shost_recovery)
9726 			return;
9727 		if (mpt3sas_port->remote_identify.device_type ==
9728 		    SAS_END_DEVICE)
9729 			mpt3sas_device_remove_by_sas_address(ioc,
9730 			    mpt3sas_port->remote_identify.sas_address);
9731 		else if (mpt3sas_port->remote_identify.device_type ==
9732 		    SAS_EDGE_EXPANDER_DEVICE ||
9733 		    mpt3sas_port->remote_identify.device_type ==
9734 		    SAS_FANOUT_EXPANDER_DEVICE)
9735 			mpt3sas_expander_remove(ioc,
9736 			    mpt3sas_port->remote_identify.sas_address);
9737 	}
9738 
9739 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
9740 	    sas_expander->sas_address_parent);
9741 
9742 	ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
9743 		 sas_expander->handle, (unsigned long long)
9744 		 sas_expander->sas_address);
9745 
9746 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
9747 	list_del(&sas_expander->list);
9748 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9749 
9750 	kfree(sas_expander->phy);
9751 	kfree(sas_expander);
9752 }
9753 
9754 /**
9755  * _scsih_nvme_shutdown - NVMe shutdown notification
9756  * @ioc: per adapter object
9757  *
9758  * Sending IoUnitControl request with shutdown operation code to alert IOC that
9759  * the host system is shutting down so that IOC can issue NVMe shutdown to
9760  * NVMe drives attached to it.
9761  */
9762 static void
9763 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
9764 {
9765 	Mpi26IoUnitControlRequest_t *mpi_request;
9766 	Mpi26IoUnitControlReply_t *mpi_reply;
9767 	u16 smid;
9768 
9769 	/* are there any NVMe devices ? */
9770 	if (list_empty(&ioc->pcie_device_list))
9771 		return;
9772 
9773 	mutex_lock(&ioc->scsih_cmds.mutex);
9774 
9775 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
9776 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
9777 		goto out;
9778 	}
9779 
9780 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
9781 
9782 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
9783 	if (!smid) {
9784 		ioc_err(ioc,
9785 		    "%s: failed obtaining a smid\n", __func__);
9786 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9787 		goto out;
9788 	}
9789 
9790 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
9791 	ioc->scsih_cmds.smid = smid;
9792 	memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
9793 	mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
9794 	mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
9795 
9796 	init_completion(&ioc->scsih_cmds.done);
9797 	ioc->put_smid_default(ioc, smid);
9798 	/* Wait for max_shutdown_latency seconds */
9799 	ioc_info(ioc,
9800 		"Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
9801 		ioc->max_shutdown_latency);
9802 	wait_for_completion_timeout(&ioc->scsih_cmds.done,
9803 			ioc->max_shutdown_latency*HZ);
9804 
9805 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
9806 		ioc_err(ioc, "%s: timeout\n", __func__);
9807 		goto out;
9808 	}
9809 
9810 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
9811 		mpi_reply = ioc->scsih_cmds.reply;
9812 		ioc_info(ioc, "Io Unit Control shutdown (complete):"
9813 			"ioc_status(0x%04x), loginfo(0x%08x)\n",
9814 			le16_to_cpu(mpi_reply->IOCStatus),
9815 			le32_to_cpu(mpi_reply->IOCLogInfo));
9816 	}
9817  out:
9818 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9819 	mutex_unlock(&ioc->scsih_cmds.mutex);
9820 }
9821 
9822 
9823 /**
9824  * _scsih_ir_shutdown - IR shutdown notification
9825  * @ioc: per adapter object
9826  *
9827  * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
9828  * the host system is shutting down.
9829  */
9830 static void
9831 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
9832 {
9833 	Mpi2RaidActionRequest_t *mpi_request;
9834 	Mpi2RaidActionReply_t *mpi_reply;
9835 	u16 smid;
9836 
9837 	/* is IR firmware build loaded ? */
9838 	if (!ioc->ir_firmware)
9839 		return;
9840 
9841 	/* are there any volumes ? */
9842 	if (list_empty(&ioc->raid_device_list))
9843 		return;
9844 
9845 	mutex_lock(&ioc->scsih_cmds.mutex);
9846 
9847 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
9848 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
9849 		goto out;
9850 	}
9851 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
9852 
9853 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
9854 	if (!smid) {
9855 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
9856 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9857 		goto out;
9858 	}
9859 
9860 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
9861 	ioc->scsih_cmds.smid = smid;
9862 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
9863 
9864 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
9865 	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
9866 
9867 	if (!ioc->hide_ir_msg)
9868 		ioc_info(ioc, "IR shutdown (sending)\n");
9869 	init_completion(&ioc->scsih_cmds.done);
9870 	ioc->put_smid_default(ioc, smid);
9871 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
9872 
9873 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
9874 		ioc_err(ioc, "%s: timeout\n", __func__);
9875 		goto out;
9876 	}
9877 
9878 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
9879 		mpi_reply = ioc->scsih_cmds.reply;
9880 		if (!ioc->hide_ir_msg)
9881 			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
9882 				 le16_to_cpu(mpi_reply->IOCStatus),
9883 				 le32_to_cpu(mpi_reply->IOCLogInfo));
9884 	}
9885 
9886  out:
9887 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9888 	mutex_unlock(&ioc->scsih_cmds.mutex);
9889 }
9890 
9891 /**
9892  * scsih_remove - detach and remove add host
9893  * @pdev: PCI device struct
9894  *
9895  * Routine called when unloading the driver.
9896  */
9897 static void scsih_remove(struct pci_dev *pdev)
9898 {
9899 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9900 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9901 	struct _sas_port *mpt3sas_port, *next_port;
9902 	struct _raid_device *raid_device, *next;
9903 	struct MPT3SAS_TARGET *sas_target_priv_data;
9904 	struct _pcie_device *pcie_device, *pcienext;
9905 	struct workqueue_struct	*wq;
9906 	unsigned long flags;
9907 	Mpi2ConfigReply_t mpi_reply;
9908 
9909 	ioc->remove_host = 1;
9910 
9911 	if (!pci_device_is_present(pdev))
9912 		_scsih_flush_running_cmds(ioc);
9913 
9914 	_scsih_fw_event_cleanup_queue(ioc);
9915 
9916 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
9917 	wq = ioc->firmware_event_thread;
9918 	ioc->firmware_event_thread = NULL;
9919 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9920 	if (wq)
9921 		destroy_workqueue(wq);
9922 	/*
9923 	 * Copy back the unmodified ioc page1. so that on next driver load,
9924 	 * current modified changes on ioc page1 won't take effect.
9925 	 */
9926 	if (ioc->is_aero_ioc)
9927 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
9928 				&ioc->ioc_pg1_copy);
9929 	/* release all the volumes */
9930 	_scsih_ir_shutdown(ioc);
9931 	mpt3sas_destroy_debugfs(ioc);
9932 	sas_remove_host(shost);
9933 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
9934 	    list) {
9935 		if (raid_device->starget) {
9936 			sas_target_priv_data =
9937 			    raid_device->starget->hostdata;
9938 			sas_target_priv_data->deleted = 1;
9939 			scsi_remove_target(&raid_device->starget->dev);
9940 		}
9941 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9942 			 raid_device->handle, (u64)raid_device->wwid);
9943 		_scsih_raid_device_remove(ioc, raid_device);
9944 	}
9945 	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
9946 		list) {
9947 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9948 		list_del_init(&pcie_device->list);
9949 		pcie_device_put(pcie_device);
9950 	}
9951 
9952 	/* free ports attached to the sas_host */
9953 	list_for_each_entry_safe(mpt3sas_port, next_port,
9954 	   &ioc->sas_hba.sas_port_list, port_list) {
9955 		if (mpt3sas_port->remote_identify.device_type ==
9956 		    SAS_END_DEVICE)
9957 			mpt3sas_device_remove_by_sas_address(ioc,
9958 			    mpt3sas_port->remote_identify.sas_address);
9959 		else if (mpt3sas_port->remote_identify.device_type ==
9960 		    SAS_EDGE_EXPANDER_DEVICE ||
9961 		    mpt3sas_port->remote_identify.device_type ==
9962 		    SAS_FANOUT_EXPANDER_DEVICE)
9963 			mpt3sas_expander_remove(ioc,
9964 			    mpt3sas_port->remote_identify.sas_address);
9965 	}
9966 
9967 	/* free phys attached to the sas_host */
9968 	if (ioc->sas_hba.num_phys) {
9969 		kfree(ioc->sas_hba.phy);
9970 		ioc->sas_hba.phy = NULL;
9971 		ioc->sas_hba.num_phys = 0;
9972 	}
9973 
9974 	mpt3sas_base_detach(ioc);
9975 	spin_lock(&gioc_lock);
9976 	list_del(&ioc->list);
9977 	spin_unlock(&gioc_lock);
9978 	scsi_host_put(shost);
9979 }
9980 
9981 /**
9982  * scsih_shutdown - routine call during system shutdown
9983  * @pdev: PCI device struct
9984  */
9985 static void
9986 scsih_shutdown(struct pci_dev *pdev)
9987 {
9988 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9989 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9990 	struct workqueue_struct	*wq;
9991 	unsigned long flags;
9992 	Mpi2ConfigReply_t mpi_reply;
9993 
9994 	ioc->remove_host = 1;
9995 
9996 	if (!pci_device_is_present(pdev))
9997 		_scsih_flush_running_cmds(ioc);
9998 
9999 	_scsih_fw_event_cleanup_queue(ioc);
10000 
10001 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
10002 	wq = ioc->firmware_event_thread;
10003 	ioc->firmware_event_thread = NULL;
10004 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
10005 	if (wq)
10006 		destroy_workqueue(wq);
10007 	/*
10008 	 * Copy back the unmodified ioc page1 so that on next driver load,
10009 	 * current modified changes on ioc page1 won't take effect.
10010 	 */
10011 	if (ioc->is_aero_ioc)
10012 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
10013 				&ioc->ioc_pg1_copy);
10014 
10015 	_scsih_ir_shutdown(ioc);
10016 	_scsih_nvme_shutdown(ioc);
10017 	mpt3sas_base_detach(ioc);
10018 }
10019 
10020 
10021 /**
10022  * _scsih_probe_boot_devices - reports 1st device
10023  * @ioc: per adapter object
10024  *
10025  * If specified in bios page 2, this routine reports the 1st
10026  * device scsi-ml or sas transport for persistent boot device
10027  * purposes.  Please refer to function _scsih_determine_boot_device()
10028  */
10029 static void
10030 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
10031 {
10032 	u32 channel;
10033 	void *device;
10034 	struct _sas_device *sas_device;
10035 	struct _raid_device *raid_device;
10036 	struct _pcie_device *pcie_device;
10037 	u16 handle;
10038 	u64 sas_address_parent;
10039 	u64 sas_address;
10040 	unsigned long flags;
10041 	int rc;
10042 	int tid;
10043 
10044 	 /* no Bios, return immediately */
10045 	if (!ioc->bios_pg3.BiosVersion)
10046 		return;
10047 
10048 	device = NULL;
10049 	if (ioc->req_boot_device.device) {
10050 		device =  ioc->req_boot_device.device;
10051 		channel = ioc->req_boot_device.channel;
10052 	} else if (ioc->req_alt_boot_device.device) {
10053 		device =  ioc->req_alt_boot_device.device;
10054 		channel = ioc->req_alt_boot_device.channel;
10055 	} else if (ioc->current_boot_device.device) {
10056 		device =  ioc->current_boot_device.device;
10057 		channel = ioc->current_boot_device.channel;
10058 	}
10059 
10060 	if (!device)
10061 		return;
10062 
10063 	if (channel == RAID_CHANNEL) {
10064 		raid_device = device;
10065 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
10066 		    raid_device->id, 0);
10067 		if (rc)
10068 			_scsih_raid_device_remove(ioc, raid_device);
10069 	} else if (channel == PCIE_CHANNEL) {
10070 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10071 		pcie_device = device;
10072 		tid = pcie_device->id;
10073 		list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
10074 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10075 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
10076 		if (rc)
10077 			_scsih_pcie_device_remove(ioc, pcie_device);
10078 	} else {
10079 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
10080 		sas_device = device;
10081 		handle = sas_device->handle;
10082 		sas_address_parent = sas_device->sas_address_parent;
10083 		sas_address = sas_device->sas_address;
10084 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
10085 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10086 
10087 		if (ioc->hide_drives)
10088 			return;
10089 		if (!mpt3sas_transport_port_add(ioc, handle,
10090 		    sas_address_parent)) {
10091 			_scsih_sas_device_remove(ioc, sas_device);
10092 		} else if (!sas_device->starget) {
10093 			if (!ioc->is_driver_loading) {
10094 				mpt3sas_transport_port_remove(ioc,
10095 				    sas_address,
10096 				    sas_address_parent);
10097 				_scsih_sas_device_remove(ioc, sas_device);
10098 			}
10099 		}
10100 	}
10101 }
10102 
10103 /**
10104  * _scsih_probe_raid - reporting raid volumes to scsi-ml
10105  * @ioc: per adapter object
10106  *
10107  * Called during initial loading of the driver.
10108  */
10109 static void
10110 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
10111 {
10112 	struct _raid_device *raid_device, *raid_next;
10113 	int rc;
10114 
10115 	list_for_each_entry_safe(raid_device, raid_next,
10116 	    &ioc->raid_device_list, list) {
10117 		if (raid_device->starget)
10118 			continue;
10119 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
10120 		    raid_device->id, 0);
10121 		if (rc)
10122 			_scsih_raid_device_remove(ioc, raid_device);
10123 	}
10124 }
10125 
10126 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
10127 {
10128 	struct _sas_device *sas_device = NULL;
10129 	unsigned long flags;
10130 
10131 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10132 	if (!list_empty(&ioc->sas_device_init_list)) {
10133 		sas_device = list_first_entry(&ioc->sas_device_init_list,
10134 				struct _sas_device, list);
10135 		sas_device_get(sas_device);
10136 	}
10137 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10138 
10139 	return sas_device;
10140 }
10141 
10142 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10143 		struct _sas_device *sas_device)
10144 {
10145 	unsigned long flags;
10146 
10147 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10148 
10149 	/*
10150 	 * Since we dropped the lock during the call to port_add(), we need to
10151 	 * be careful here that somebody else didn't move or delete this item
10152 	 * while we were busy with other things.
10153 	 *
10154 	 * If it was on the list, we need a put() for the reference the list
10155 	 * had. Either way, we need a get() for the destination list.
10156 	 */
10157 	if (!list_empty(&sas_device->list)) {
10158 		list_del_init(&sas_device->list);
10159 		sas_device_put(sas_device);
10160 	}
10161 
10162 	sas_device_get(sas_device);
10163 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
10164 
10165 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10166 }
10167 
10168 /**
10169  * _scsih_probe_sas - reporting sas devices to sas transport
10170  * @ioc: per adapter object
10171  *
10172  * Called during initial loading of the driver.
10173  */
10174 static void
10175 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
10176 {
10177 	struct _sas_device *sas_device;
10178 
10179 	if (ioc->hide_drives)
10180 		return;
10181 
10182 	while ((sas_device = get_next_sas_device(ioc))) {
10183 		if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
10184 		    sas_device->sas_address_parent)) {
10185 			_scsih_sas_device_remove(ioc, sas_device);
10186 			sas_device_put(sas_device);
10187 			continue;
10188 		} else if (!sas_device->starget) {
10189 			/*
10190 			 * When asyn scanning is enabled, its not possible to
10191 			 * remove devices while scanning is turned on due to an
10192 			 * oops in scsi_sysfs_add_sdev()->add_device()->
10193 			 * sysfs_addrm_start()
10194 			 */
10195 			if (!ioc->is_driver_loading) {
10196 				mpt3sas_transport_port_remove(ioc,
10197 				    sas_device->sas_address,
10198 				    sas_device->sas_address_parent);
10199 				_scsih_sas_device_remove(ioc, sas_device);
10200 				sas_device_put(sas_device);
10201 				continue;
10202 			}
10203 		}
10204 		sas_device_make_active(ioc, sas_device);
10205 		sas_device_put(sas_device);
10206 	}
10207 }
10208 
10209 /**
10210  * get_next_pcie_device - Get the next pcie device
10211  * @ioc: per adapter object
10212  *
10213  * Get the next pcie device from pcie_device_init_list list.
10214  *
10215  * Return: pcie device structure if pcie_device_init_list list is not empty
10216  * otherwise returns NULL
10217  */
10218 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
10219 {
10220 	struct _pcie_device *pcie_device = NULL;
10221 	unsigned long flags;
10222 
10223 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10224 	if (!list_empty(&ioc->pcie_device_init_list)) {
10225 		pcie_device = list_first_entry(&ioc->pcie_device_init_list,
10226 				struct _pcie_device, list);
10227 		pcie_device_get(pcie_device);
10228 	}
10229 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10230 
10231 	return pcie_device;
10232 }
10233 
10234 /**
10235  * pcie_device_make_active - Add pcie device to pcie_device_list list
10236  * @ioc: per adapter object
10237  * @pcie_device: pcie device object
10238  *
10239  * Add the pcie device which has registered with SCSI Transport Later to
10240  * pcie_device_list list
10241  */
10242 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10243 		struct _pcie_device *pcie_device)
10244 {
10245 	unsigned long flags;
10246 
10247 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10248 
10249 	if (!list_empty(&pcie_device->list)) {
10250 		list_del_init(&pcie_device->list);
10251 		pcie_device_put(pcie_device);
10252 	}
10253 	pcie_device_get(pcie_device);
10254 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
10255 
10256 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10257 }
10258 
10259 /**
10260  * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
10261  * @ioc: per adapter object
10262  *
10263  * Called during initial loading of the driver.
10264  */
10265 static void
10266 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10267 {
10268 	struct _pcie_device *pcie_device;
10269 	int rc;
10270 
10271 	/* PCIe Device List */
10272 	while ((pcie_device = get_next_pcie_device(ioc))) {
10273 		if (pcie_device->starget) {
10274 			pcie_device_put(pcie_device);
10275 			continue;
10276 		}
10277 		if (pcie_device->access_status ==
10278 		    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
10279 			pcie_device_make_active(ioc, pcie_device);
10280 			pcie_device_put(pcie_device);
10281 			continue;
10282 		}
10283 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10284 			pcie_device->id, 0);
10285 		if (rc) {
10286 			_scsih_pcie_device_remove(ioc, pcie_device);
10287 			pcie_device_put(pcie_device);
10288 			continue;
10289 		} else if (!pcie_device->starget) {
10290 			/*
10291 			 * When async scanning is enabled, its not possible to
10292 			 * remove devices while scanning is turned on due to an
10293 			 * oops in scsi_sysfs_add_sdev()->add_device()->
10294 			 * sysfs_addrm_start()
10295 			 */
10296 			if (!ioc->is_driver_loading) {
10297 			/* TODO-- Need to find out whether this condition will
10298 			 * occur or not
10299 			 */
10300 				_scsih_pcie_device_remove(ioc, pcie_device);
10301 				pcie_device_put(pcie_device);
10302 				continue;
10303 			}
10304 		}
10305 		pcie_device_make_active(ioc, pcie_device);
10306 		pcie_device_put(pcie_device);
10307 	}
10308 }
10309 
10310 /**
10311  * _scsih_probe_devices - probing for devices
10312  * @ioc: per adapter object
10313  *
10314  * Called during initial loading of the driver.
10315  */
10316 static void
10317 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
10318 {
10319 	u16 volume_mapping_flags;
10320 
10321 	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
10322 		return;  /* return when IOC doesn't support initiator mode */
10323 
10324 	_scsih_probe_boot_devices(ioc);
10325 
10326 	if (ioc->ir_firmware) {
10327 		volume_mapping_flags =
10328 		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
10329 		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
10330 		if (volume_mapping_flags ==
10331 		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
10332 			_scsih_probe_raid(ioc);
10333 			_scsih_probe_sas(ioc);
10334 		} else {
10335 			_scsih_probe_sas(ioc);
10336 			_scsih_probe_raid(ioc);
10337 		}
10338 	} else {
10339 		_scsih_probe_sas(ioc);
10340 		_scsih_probe_pcie(ioc);
10341 	}
10342 }
10343 
10344 /**
10345  * scsih_scan_start - scsi lld callback for .scan_start
10346  * @shost: SCSI host pointer
10347  *
10348  * The shost has the ability to discover targets on its own instead
10349  * of scanning the entire bus.  In our implemention, we will kick off
10350  * firmware discovery.
10351  */
10352 static void
10353 scsih_scan_start(struct Scsi_Host *shost)
10354 {
10355 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10356 	int rc;
10357 	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
10358 		mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
10359 	else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
10360 		mpt3sas_enable_diag_buffer(ioc, 1);
10361 
10362 	if (disable_discovery > 0)
10363 		return;
10364 
10365 	ioc->start_scan = 1;
10366 	rc = mpt3sas_port_enable(ioc);
10367 
10368 	if (rc != 0)
10369 		ioc_info(ioc, "port enable: FAILED\n");
10370 }
10371 
10372 /**
10373  * scsih_scan_finished - scsi lld callback for .scan_finished
10374  * @shost: SCSI host pointer
10375  * @time: elapsed time of the scan in jiffies
10376  *
10377  * This function will be called periodicallyn until it returns 1 with the
10378  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
10379  * we wait for firmware discovery to complete, then return 1.
10380  */
10381 static int
10382 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10383 {
10384 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10385 
10386 	if (disable_discovery > 0) {
10387 		ioc->is_driver_loading = 0;
10388 		ioc->wait_for_discovery_to_complete = 0;
10389 		return 1;
10390 	}
10391 
10392 	if (time >= (300 * HZ)) {
10393 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10394 		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
10395 		ioc->is_driver_loading = 0;
10396 		return 1;
10397 	}
10398 
10399 	if (ioc->start_scan)
10400 		return 0;
10401 
10402 	if (ioc->start_scan_failed) {
10403 		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
10404 			 ioc->start_scan_failed);
10405 		ioc->is_driver_loading = 0;
10406 		ioc->wait_for_discovery_to_complete = 0;
10407 		ioc->remove_host = 1;
10408 		return 1;
10409 	}
10410 
10411 	ioc_info(ioc, "port enable: SUCCESS\n");
10412 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10413 
10414 	if (ioc->wait_for_discovery_to_complete) {
10415 		ioc->wait_for_discovery_to_complete = 0;
10416 		_scsih_probe_devices(ioc);
10417 	}
10418 	mpt3sas_base_start_watchdog(ioc);
10419 	ioc->is_driver_loading = 0;
10420 	return 1;
10421 }
10422 
10423 /* shost template for SAS 2.0 HBA devices */
10424 static struct scsi_host_template mpt2sas_driver_template = {
10425 	.module				= THIS_MODULE,
10426 	.name				= "Fusion MPT SAS Host",
10427 	.proc_name			= MPT2SAS_DRIVER_NAME,
10428 	.queuecommand			= scsih_qcmd,
10429 	.target_alloc			= scsih_target_alloc,
10430 	.slave_alloc			= scsih_slave_alloc,
10431 	.slave_configure		= scsih_slave_configure,
10432 	.target_destroy			= scsih_target_destroy,
10433 	.slave_destroy			= scsih_slave_destroy,
10434 	.scan_finished			= scsih_scan_finished,
10435 	.scan_start			= scsih_scan_start,
10436 	.change_queue_depth		= scsih_change_queue_depth,
10437 	.eh_abort_handler		= scsih_abort,
10438 	.eh_device_reset_handler	= scsih_dev_reset,
10439 	.eh_target_reset_handler	= scsih_target_reset,
10440 	.eh_host_reset_handler		= scsih_host_reset,
10441 	.bios_param			= scsih_bios_param,
10442 	.can_queue			= 1,
10443 	.this_id			= -1,
10444 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
10445 	.max_sectors			= 32767,
10446 	.cmd_per_lun			= 7,
10447 	.shost_attrs			= mpt3sas_host_attrs,
10448 	.sdev_attrs			= mpt3sas_dev_attrs,
10449 	.track_queue_depth		= 1,
10450 	.cmd_size			= sizeof(struct scsiio_tracker),
10451 };
10452 
10453 /* raid transport support for SAS 2.0 HBA devices */
10454 static struct raid_function_template mpt2sas_raid_functions = {
10455 	.cookie		= &mpt2sas_driver_template,
10456 	.is_raid	= scsih_is_raid,
10457 	.get_resync	= scsih_get_resync,
10458 	.get_state	= scsih_get_state,
10459 };
10460 
10461 /* shost template for SAS 3.0 HBA devices */
10462 static struct scsi_host_template mpt3sas_driver_template = {
10463 	.module				= THIS_MODULE,
10464 	.name				= "Fusion MPT SAS Host",
10465 	.proc_name			= MPT3SAS_DRIVER_NAME,
10466 	.queuecommand			= scsih_qcmd,
10467 	.target_alloc			= scsih_target_alloc,
10468 	.slave_alloc			= scsih_slave_alloc,
10469 	.slave_configure		= scsih_slave_configure,
10470 	.target_destroy			= scsih_target_destroy,
10471 	.slave_destroy			= scsih_slave_destroy,
10472 	.scan_finished			= scsih_scan_finished,
10473 	.scan_start			= scsih_scan_start,
10474 	.change_queue_depth		= scsih_change_queue_depth,
10475 	.eh_abort_handler		= scsih_abort,
10476 	.eh_device_reset_handler	= scsih_dev_reset,
10477 	.eh_target_reset_handler	= scsih_target_reset,
10478 	.eh_host_reset_handler		= scsih_host_reset,
10479 	.bios_param			= scsih_bios_param,
10480 	.can_queue			= 1,
10481 	.this_id			= -1,
10482 	.sg_tablesize			= MPT3SAS_SG_DEPTH,
10483 	.max_sectors			= 32767,
10484 	.max_segment_size		= 0xffffffff,
10485 	.cmd_per_lun			= 7,
10486 	.shost_attrs			= mpt3sas_host_attrs,
10487 	.sdev_attrs			= mpt3sas_dev_attrs,
10488 	.track_queue_depth		= 1,
10489 	.cmd_size			= sizeof(struct scsiio_tracker),
10490 };
10491 
10492 /* raid transport support for SAS 3.0 HBA devices */
10493 static struct raid_function_template mpt3sas_raid_functions = {
10494 	.cookie		= &mpt3sas_driver_template,
10495 	.is_raid	= scsih_is_raid,
10496 	.get_resync	= scsih_get_resync,
10497 	.get_state	= scsih_get_state,
10498 };
10499 
10500 /**
10501  * _scsih_determine_hba_mpi_version - determine in which MPI version class
10502  *					this device belongs to.
10503  * @pdev: PCI device struct
10504  *
10505  * return MPI2_VERSION for SAS 2.0 HBA devices,
10506  *	MPI25_VERSION for SAS 3.0 HBA devices, and
10507  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
10508  */
10509 static u16
10510 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
10511 {
10512 
10513 	switch (pdev->device) {
10514 	case MPI2_MFGPAGE_DEVID_SSS6200:
10515 	case MPI2_MFGPAGE_DEVID_SAS2004:
10516 	case MPI2_MFGPAGE_DEVID_SAS2008:
10517 	case MPI2_MFGPAGE_DEVID_SAS2108_1:
10518 	case MPI2_MFGPAGE_DEVID_SAS2108_2:
10519 	case MPI2_MFGPAGE_DEVID_SAS2108_3:
10520 	case MPI2_MFGPAGE_DEVID_SAS2116_1:
10521 	case MPI2_MFGPAGE_DEVID_SAS2116_2:
10522 	case MPI2_MFGPAGE_DEVID_SAS2208_1:
10523 	case MPI2_MFGPAGE_DEVID_SAS2208_2:
10524 	case MPI2_MFGPAGE_DEVID_SAS2208_3:
10525 	case MPI2_MFGPAGE_DEVID_SAS2208_4:
10526 	case MPI2_MFGPAGE_DEVID_SAS2208_5:
10527 	case MPI2_MFGPAGE_DEVID_SAS2208_6:
10528 	case MPI2_MFGPAGE_DEVID_SAS2308_1:
10529 	case MPI2_MFGPAGE_DEVID_SAS2308_2:
10530 	case MPI2_MFGPAGE_DEVID_SAS2308_3:
10531 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10532 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10533 		return MPI2_VERSION;
10534 	case MPI25_MFGPAGE_DEVID_SAS3004:
10535 	case MPI25_MFGPAGE_DEVID_SAS3008:
10536 	case MPI25_MFGPAGE_DEVID_SAS3108_1:
10537 	case MPI25_MFGPAGE_DEVID_SAS3108_2:
10538 	case MPI25_MFGPAGE_DEVID_SAS3108_5:
10539 	case MPI25_MFGPAGE_DEVID_SAS3108_6:
10540 		return MPI25_VERSION;
10541 	case MPI26_MFGPAGE_DEVID_SAS3216:
10542 	case MPI26_MFGPAGE_DEVID_SAS3224:
10543 	case MPI26_MFGPAGE_DEVID_SAS3316_1:
10544 	case MPI26_MFGPAGE_DEVID_SAS3316_2:
10545 	case MPI26_MFGPAGE_DEVID_SAS3316_3:
10546 	case MPI26_MFGPAGE_DEVID_SAS3316_4:
10547 	case MPI26_MFGPAGE_DEVID_SAS3324_1:
10548 	case MPI26_MFGPAGE_DEVID_SAS3324_2:
10549 	case MPI26_MFGPAGE_DEVID_SAS3324_3:
10550 	case MPI26_MFGPAGE_DEVID_SAS3324_4:
10551 	case MPI26_MFGPAGE_DEVID_SAS3508:
10552 	case MPI26_MFGPAGE_DEVID_SAS3508_1:
10553 	case MPI26_MFGPAGE_DEVID_SAS3408:
10554 	case MPI26_MFGPAGE_DEVID_SAS3516:
10555 	case MPI26_MFGPAGE_DEVID_SAS3516_1:
10556 	case MPI26_MFGPAGE_DEVID_SAS3416:
10557 	case MPI26_MFGPAGE_DEVID_SAS3616:
10558 	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10559 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10560 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10561 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10562 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10563 		return MPI26_VERSION;
10564 	}
10565 	return 0;
10566 }
10567 
10568 /**
10569  * _scsih_probe - attach and add scsi host
10570  * @pdev: PCI device struct
10571  * @id: pci device id
10572  *
10573  * Return: 0 success, anything else error.
10574  */
10575 static int
10576 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10577 {
10578 	struct MPT3SAS_ADAPTER *ioc;
10579 	struct Scsi_Host *shost = NULL;
10580 	int rv;
10581 	u16 hba_mpi_version;
10582 
10583 	/* Determine in which MPI version class this pci device belongs */
10584 	hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
10585 	if (hba_mpi_version == 0)
10586 		return -ENODEV;
10587 
10588 	/* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
10589 	 * for other generation HBA's return with -ENODEV
10590 	 */
10591 	if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
10592 		return -ENODEV;
10593 
10594 	/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
10595 	 * for other generation HBA's return with -ENODEV
10596 	 */
10597 	if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
10598 		|| hba_mpi_version ==  MPI26_VERSION)))
10599 		return -ENODEV;
10600 
10601 	switch (hba_mpi_version) {
10602 	case MPI2_VERSION:
10603 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
10604 			PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
10605 		/* Use mpt2sas driver host template for SAS 2.0 HBA's */
10606 		shost = scsi_host_alloc(&mpt2sas_driver_template,
10607 		  sizeof(struct MPT3SAS_ADAPTER));
10608 		if (!shost)
10609 			return -ENODEV;
10610 		ioc = shost_priv(shost);
10611 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10612 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10613 		ioc->id = mpt2_ids++;
10614 		sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
10615 		switch (pdev->device) {
10616 		case MPI2_MFGPAGE_DEVID_SSS6200:
10617 			ioc->is_warpdrive = 1;
10618 			ioc->hide_ir_msg = 1;
10619 			break;
10620 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10621 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10622 			ioc->is_mcpu_endpoint = 1;
10623 			break;
10624 		default:
10625 			ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
10626 			break;
10627 		}
10628 		break;
10629 	case MPI25_VERSION:
10630 	case MPI26_VERSION:
10631 		/* Use mpt3sas driver host template for SAS 3.0 HBA's */
10632 		shost = scsi_host_alloc(&mpt3sas_driver_template,
10633 		  sizeof(struct MPT3SAS_ADAPTER));
10634 		if (!shost)
10635 			return -ENODEV;
10636 		ioc = shost_priv(shost);
10637 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10638 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10639 		ioc->id = mpt3_ids++;
10640 		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
10641 		switch (pdev->device) {
10642 		case MPI26_MFGPAGE_DEVID_SAS3508:
10643 		case MPI26_MFGPAGE_DEVID_SAS3508_1:
10644 		case MPI26_MFGPAGE_DEVID_SAS3408:
10645 		case MPI26_MFGPAGE_DEVID_SAS3516:
10646 		case MPI26_MFGPAGE_DEVID_SAS3516_1:
10647 		case MPI26_MFGPAGE_DEVID_SAS3416:
10648 		case MPI26_MFGPAGE_DEVID_SAS3616:
10649 		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10650 			ioc->is_gen35_ioc = 1;
10651 			break;
10652 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10653 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10654 			dev_info(&pdev->dev,
10655 			    "HBA is in Configurable Secure mode\n");
10656 			/* fall through */
10657 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10658 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10659 			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
10660 			break;
10661 		default:
10662 			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
10663 		}
10664 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
10665 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
10666 			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
10667 			ioc->combined_reply_queue = 1;
10668 			if (ioc->is_gen35_ioc)
10669 				ioc->combined_reply_index_count =
10670 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
10671 			else
10672 				ioc->combined_reply_index_count =
10673 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
10674 		}
10675 		break;
10676 	default:
10677 		return -ENODEV;
10678 	}
10679 
10680 	INIT_LIST_HEAD(&ioc->list);
10681 	spin_lock(&gioc_lock);
10682 	list_add_tail(&ioc->list, &mpt3sas_ioc_list);
10683 	spin_unlock(&gioc_lock);
10684 	ioc->shost = shost;
10685 	ioc->pdev = pdev;
10686 	ioc->scsi_io_cb_idx = scsi_io_cb_idx;
10687 	ioc->tm_cb_idx = tm_cb_idx;
10688 	ioc->ctl_cb_idx = ctl_cb_idx;
10689 	ioc->base_cb_idx = base_cb_idx;
10690 	ioc->port_enable_cb_idx = port_enable_cb_idx;
10691 	ioc->transport_cb_idx = transport_cb_idx;
10692 	ioc->scsih_cb_idx = scsih_cb_idx;
10693 	ioc->config_cb_idx = config_cb_idx;
10694 	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
10695 	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
10696 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
10697 	ioc->logging_level = logging_level;
10698 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
10699 	/* Host waits for minimum of six seconds */
10700 	ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
10701 	/*
10702 	 * Enable MEMORY MOVE support flag.
10703 	 */
10704 	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
10705 
10706 	ioc->enable_sdev_max_qd = enable_sdev_max_qd;
10707 
10708 	/* misc semaphores and spin locks */
10709 	mutex_init(&ioc->reset_in_progress_mutex);
10710 	/* initializing pci_access_mutex lock */
10711 	mutex_init(&ioc->pci_access_mutex);
10712 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
10713 	spin_lock_init(&ioc->scsi_lookup_lock);
10714 	spin_lock_init(&ioc->sas_device_lock);
10715 	spin_lock_init(&ioc->sas_node_lock);
10716 	spin_lock_init(&ioc->fw_event_lock);
10717 	spin_lock_init(&ioc->raid_device_lock);
10718 	spin_lock_init(&ioc->pcie_device_lock);
10719 	spin_lock_init(&ioc->diag_trigger_lock);
10720 
10721 	INIT_LIST_HEAD(&ioc->sas_device_list);
10722 	INIT_LIST_HEAD(&ioc->sas_device_init_list);
10723 	INIT_LIST_HEAD(&ioc->sas_expander_list);
10724 	INIT_LIST_HEAD(&ioc->enclosure_list);
10725 	INIT_LIST_HEAD(&ioc->pcie_device_list);
10726 	INIT_LIST_HEAD(&ioc->pcie_device_init_list);
10727 	INIT_LIST_HEAD(&ioc->fw_event_list);
10728 	INIT_LIST_HEAD(&ioc->raid_device_list);
10729 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
10730 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
10731 	INIT_LIST_HEAD(&ioc->delayed_sc_list);
10732 	INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
10733 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
10734 	INIT_LIST_HEAD(&ioc->reply_queue_list);
10735 
10736 	sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
10737 
10738 	/* init shost parameters */
10739 	shost->max_cmd_len = 32;
10740 	shost->max_lun = max_lun;
10741 	shost->transportt = mpt3sas_transport_template;
10742 	shost->unique_id = ioc->id;
10743 
10744 	if (ioc->is_mcpu_endpoint) {
10745 		/* mCPU MPI support 64K max IO */
10746 		shost->max_sectors = 128;
10747 		ioc_info(ioc, "The max_sectors value is set to %d\n",
10748 			 shost->max_sectors);
10749 	} else {
10750 		if (max_sectors != 0xFFFF) {
10751 			if (max_sectors < 64) {
10752 				shost->max_sectors = 64;
10753 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
10754 					 max_sectors);
10755 			} else if (max_sectors > 32767) {
10756 				shost->max_sectors = 32767;
10757 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
10758 					 max_sectors);
10759 			} else {
10760 				shost->max_sectors = max_sectors & 0xFFFE;
10761 				ioc_info(ioc, "The max_sectors value is set to %d\n",
10762 					 shost->max_sectors);
10763 			}
10764 		}
10765 	}
10766 	/* register EEDP capabilities with SCSI layer */
10767 	if (prot_mask >= 0)
10768 		scsi_host_set_prot(shost, (prot_mask & 0x07));
10769 	else
10770 		scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
10771 				   | SHOST_DIF_TYPE2_PROTECTION
10772 				   | SHOST_DIF_TYPE3_PROTECTION);
10773 
10774 	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
10775 
10776 	/* event thread */
10777 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
10778 	    "fw_event_%s%d", ioc->driver_name, ioc->id);
10779 	ioc->firmware_event_thread = alloc_ordered_workqueue(
10780 	    ioc->firmware_event_name, 0);
10781 	if (!ioc->firmware_event_thread) {
10782 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10783 			__FILE__, __LINE__, __func__);
10784 		rv = -ENODEV;
10785 		goto out_thread_fail;
10786 	}
10787 
10788 	ioc->is_driver_loading = 1;
10789 	if ((mpt3sas_base_attach(ioc))) {
10790 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10791 			__FILE__, __LINE__, __func__);
10792 		rv = -ENODEV;
10793 		goto out_attach_fail;
10794 	}
10795 
10796 	if (ioc->is_warpdrive) {
10797 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
10798 			ioc->hide_drives = 0;
10799 		else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
10800 			ioc->hide_drives = 1;
10801 		else {
10802 			if (mpt3sas_get_num_volumes(ioc))
10803 				ioc->hide_drives = 1;
10804 			else
10805 				ioc->hide_drives = 0;
10806 		}
10807 	} else
10808 		ioc->hide_drives = 0;
10809 
10810 	rv = scsi_add_host(shost, &pdev->dev);
10811 	if (rv) {
10812 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10813 			__FILE__, __LINE__, __func__);
10814 		goto out_add_shost_fail;
10815 	}
10816 
10817 	scsi_scan_host(shost);
10818 	mpt3sas_setup_debugfs(ioc);
10819 	return 0;
10820 out_add_shost_fail:
10821 	mpt3sas_base_detach(ioc);
10822  out_attach_fail:
10823 	destroy_workqueue(ioc->firmware_event_thread);
10824  out_thread_fail:
10825 	spin_lock(&gioc_lock);
10826 	list_del(&ioc->list);
10827 	spin_unlock(&gioc_lock);
10828 	scsi_host_put(shost);
10829 	return rv;
10830 }
10831 
10832 #ifdef CONFIG_PM
10833 /**
10834  * scsih_suspend - power management suspend main entry point
10835  * @pdev: PCI device struct
10836  * @state: PM state change to (usually PCI_D3)
10837  *
10838  * Return: 0 success, anything else error.
10839  */
10840 static int
10841 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
10842 {
10843 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10844 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10845 	pci_power_t device_state;
10846 
10847 	mpt3sas_base_stop_watchdog(ioc);
10848 	flush_scheduled_work();
10849 	scsi_block_requests(shost);
10850 	_scsih_nvme_shutdown(ioc);
10851 	device_state = pci_choose_state(pdev, state);
10852 	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
10853 		 pdev, pci_name(pdev), device_state);
10854 
10855 	pci_save_state(pdev);
10856 	mpt3sas_base_free_resources(ioc);
10857 	pci_set_power_state(pdev, device_state);
10858 	return 0;
10859 }
10860 
10861 /**
10862  * scsih_resume - power management resume main entry point
10863  * @pdev: PCI device struct
10864  *
10865  * Return: 0 success, anything else error.
10866  */
10867 static int
10868 scsih_resume(struct pci_dev *pdev)
10869 {
10870 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10871 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10872 	pci_power_t device_state = pdev->current_state;
10873 	int r;
10874 
10875 	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
10876 		 pdev, pci_name(pdev), device_state);
10877 
10878 	pci_set_power_state(pdev, PCI_D0);
10879 	pci_enable_wake(pdev, PCI_D0, 0);
10880 	pci_restore_state(pdev);
10881 	ioc->pdev = pdev;
10882 	r = mpt3sas_base_map_resources(ioc);
10883 	if (r)
10884 		return r;
10885 	ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
10886 	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
10887 	scsi_unblock_requests(shost);
10888 	mpt3sas_base_start_watchdog(ioc);
10889 	return 0;
10890 }
10891 #endif /* CONFIG_PM */
10892 
10893 /**
10894  * scsih_pci_error_detected - Called when a PCI error is detected.
10895  * @pdev: PCI device struct
10896  * @state: PCI channel state
10897  *
10898  * Description: Called when a PCI error is detected.
10899  *
10900  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
10901  */
10902 static pci_ers_result_t
10903 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10904 {
10905 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10906 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10907 
10908 	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
10909 
10910 	switch (state) {
10911 	case pci_channel_io_normal:
10912 		return PCI_ERS_RESULT_CAN_RECOVER;
10913 	case pci_channel_io_frozen:
10914 		/* Fatal error, prepare for slot reset */
10915 		ioc->pci_error_recovery = 1;
10916 		scsi_block_requests(ioc->shost);
10917 		mpt3sas_base_stop_watchdog(ioc);
10918 		mpt3sas_base_free_resources(ioc);
10919 		return PCI_ERS_RESULT_NEED_RESET;
10920 	case pci_channel_io_perm_failure:
10921 		/* Permanent error, prepare for device removal */
10922 		ioc->pci_error_recovery = 1;
10923 		mpt3sas_base_stop_watchdog(ioc);
10924 		_scsih_flush_running_cmds(ioc);
10925 		return PCI_ERS_RESULT_DISCONNECT;
10926 	}
10927 	return PCI_ERS_RESULT_NEED_RESET;
10928 }
10929 
10930 /**
10931  * scsih_pci_slot_reset - Called when PCI slot has been reset.
10932  * @pdev: PCI device struct
10933  *
10934  * Description: This routine is called by the pci error recovery
10935  * code after the PCI slot has been reset, just before we
10936  * should resume normal operations.
10937  */
10938 static pci_ers_result_t
10939 scsih_pci_slot_reset(struct pci_dev *pdev)
10940 {
10941 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10942 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10943 	int rc;
10944 
10945 	ioc_info(ioc, "PCI error: slot reset callback!!\n");
10946 
10947 	ioc->pci_error_recovery = 0;
10948 	ioc->pdev = pdev;
10949 	pci_restore_state(pdev);
10950 	rc = mpt3sas_base_map_resources(ioc);
10951 	if (rc)
10952 		return PCI_ERS_RESULT_DISCONNECT;
10953 
10954 	ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
10955 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
10956 
10957 	ioc_warn(ioc, "hard reset: %s\n",
10958 		 (rc == 0) ? "success" : "failed");
10959 
10960 	if (!rc)
10961 		return PCI_ERS_RESULT_RECOVERED;
10962 	else
10963 		return PCI_ERS_RESULT_DISCONNECT;
10964 }
10965 
10966 /**
10967  * scsih_pci_resume() - resume normal ops after PCI reset
10968  * @pdev: pointer to PCI device
10969  *
10970  * Called when the error recovery driver tells us that its
10971  * OK to resume normal operation. Use completion to allow
10972  * halted scsi ops to resume.
10973  */
10974 static void
10975 scsih_pci_resume(struct pci_dev *pdev)
10976 {
10977 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10978 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10979 
10980 	ioc_info(ioc, "PCI error: resume callback!!\n");
10981 
10982 	mpt3sas_base_start_watchdog(ioc);
10983 	scsi_unblock_requests(ioc->shost);
10984 }
10985 
10986 /**
10987  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
10988  * @pdev: pointer to PCI device
10989  */
10990 static pci_ers_result_t
10991 scsih_pci_mmio_enabled(struct pci_dev *pdev)
10992 {
10993 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10994 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10995 
10996 	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
10997 
10998 	/* TODO - dump whatever for debugging purposes */
10999 
11000 	/* This called only if scsih_pci_error_detected returns
11001 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
11002 	 * works, no need to reset slot.
11003 	 */
11004 	return PCI_ERS_RESULT_RECOVERED;
11005 }
11006 
11007 /**
11008  * scsih__ncq_prio_supp - Check for NCQ command priority support
11009  * @sdev: scsi device struct
11010  *
11011  * This is called when a user indicates they would like to enable
11012  * ncq command priorities. This works only on SATA devices.
11013  */
11014 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
11015 {
11016 	unsigned char *buf;
11017 	bool ncq_prio_supp = false;
11018 
11019 	if (!scsi_device_supports_vpd(sdev))
11020 		return ncq_prio_supp;
11021 
11022 	buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
11023 	if (!buf)
11024 		return ncq_prio_supp;
11025 
11026 	if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
11027 		ncq_prio_supp = (buf[213] >> 4) & 1;
11028 
11029 	kfree(buf);
11030 	return ncq_prio_supp;
11031 }
11032 /*
11033  * The pci device ids are defined in mpi/mpi2_cnfg.h.
11034  */
11035 static const struct pci_device_id mpt3sas_pci_table[] = {
11036 	/* Spitfire ~ 2004 */
11037 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
11038 		PCI_ANY_ID, PCI_ANY_ID },
11039 	/* Falcon ~ 2008 */
11040 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
11041 		PCI_ANY_ID, PCI_ANY_ID },
11042 	/* Liberator ~ 2108 */
11043 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
11044 		PCI_ANY_ID, PCI_ANY_ID },
11045 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
11046 		PCI_ANY_ID, PCI_ANY_ID },
11047 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
11048 		PCI_ANY_ID, PCI_ANY_ID },
11049 	/* Meteor ~ 2116 */
11050 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
11051 		PCI_ANY_ID, PCI_ANY_ID },
11052 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
11053 		PCI_ANY_ID, PCI_ANY_ID },
11054 	/* Thunderbolt ~ 2208 */
11055 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
11056 		PCI_ANY_ID, PCI_ANY_ID },
11057 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
11058 		PCI_ANY_ID, PCI_ANY_ID },
11059 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
11060 		PCI_ANY_ID, PCI_ANY_ID },
11061 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
11062 		PCI_ANY_ID, PCI_ANY_ID },
11063 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
11064 		PCI_ANY_ID, PCI_ANY_ID },
11065 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
11066 		PCI_ANY_ID, PCI_ANY_ID },
11067 	/* Mustang ~ 2308 */
11068 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
11069 		PCI_ANY_ID, PCI_ANY_ID },
11070 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
11071 		PCI_ANY_ID, PCI_ANY_ID },
11072 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
11073 		PCI_ANY_ID, PCI_ANY_ID },
11074 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
11075 		PCI_ANY_ID, PCI_ANY_ID },
11076 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
11077 		PCI_ANY_ID, PCI_ANY_ID },
11078 	/* SSS6200 */
11079 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
11080 		PCI_ANY_ID, PCI_ANY_ID },
11081 	/* Fury ~ 3004 and 3008 */
11082 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
11083 		PCI_ANY_ID, PCI_ANY_ID },
11084 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
11085 		PCI_ANY_ID, PCI_ANY_ID },
11086 	/* Invader ~ 3108 */
11087 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
11088 		PCI_ANY_ID, PCI_ANY_ID },
11089 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
11090 		PCI_ANY_ID, PCI_ANY_ID },
11091 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
11092 		PCI_ANY_ID, PCI_ANY_ID },
11093 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
11094 		PCI_ANY_ID, PCI_ANY_ID },
11095 	/* Cutlass ~ 3216 and 3224 */
11096 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
11097 		PCI_ANY_ID, PCI_ANY_ID },
11098 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
11099 		PCI_ANY_ID, PCI_ANY_ID },
11100 	/* Intruder ~ 3316 and 3324 */
11101 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
11102 		PCI_ANY_ID, PCI_ANY_ID },
11103 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
11104 		PCI_ANY_ID, PCI_ANY_ID },
11105 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
11106 		PCI_ANY_ID, PCI_ANY_ID },
11107 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
11108 		PCI_ANY_ID, PCI_ANY_ID },
11109 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
11110 		PCI_ANY_ID, PCI_ANY_ID },
11111 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
11112 		PCI_ANY_ID, PCI_ANY_ID },
11113 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
11114 		PCI_ANY_ID, PCI_ANY_ID },
11115 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
11116 		PCI_ANY_ID, PCI_ANY_ID },
11117 	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
11118 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
11119 		PCI_ANY_ID, PCI_ANY_ID },
11120 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
11121 		PCI_ANY_ID, PCI_ANY_ID },
11122 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
11123 		PCI_ANY_ID, PCI_ANY_ID },
11124 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
11125 		PCI_ANY_ID, PCI_ANY_ID },
11126 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
11127 		PCI_ANY_ID, PCI_ANY_ID },
11128 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
11129 		PCI_ANY_ID, PCI_ANY_ID },
11130 	/* Mercator ~ 3616*/
11131 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
11132 		PCI_ANY_ID, PCI_ANY_ID },
11133 
11134 	/* Aero SI 0x00E1 Configurable Secure
11135 	 * 0x00E2 Hard Secure
11136 	 */
11137 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
11138 		PCI_ANY_ID, PCI_ANY_ID },
11139 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
11140 		PCI_ANY_ID, PCI_ANY_ID },
11141 
11142 	/* Atlas PCIe Switch Management Port */
11143 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
11144 		PCI_ANY_ID, PCI_ANY_ID },
11145 
11146 	/* Sea SI 0x00E5 Configurable Secure
11147 	 * 0x00E6 Hard Secure
11148 	 */
11149 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
11150 		PCI_ANY_ID, PCI_ANY_ID },
11151 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
11152 		PCI_ANY_ID, PCI_ANY_ID },
11153 
11154 	{0}     /* Terminating entry */
11155 };
11156 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
11157 
11158 static struct pci_error_handlers _mpt3sas_err_handler = {
11159 	.error_detected	= scsih_pci_error_detected,
11160 	.mmio_enabled	= scsih_pci_mmio_enabled,
11161 	.slot_reset	= scsih_pci_slot_reset,
11162 	.resume		= scsih_pci_resume,
11163 };
11164 
11165 static struct pci_driver mpt3sas_driver = {
11166 	.name		= MPT3SAS_DRIVER_NAME,
11167 	.id_table	= mpt3sas_pci_table,
11168 	.probe		= _scsih_probe,
11169 	.remove		= scsih_remove,
11170 	.shutdown	= scsih_shutdown,
11171 	.err_handler	= &_mpt3sas_err_handler,
11172 #ifdef CONFIG_PM
11173 	.suspend	= scsih_suspend,
11174 	.resume		= scsih_resume,
11175 #endif
11176 };
11177 
11178 /**
11179  * scsih_init - main entry point for this driver.
11180  *
11181  * Return: 0 success, anything else error.
11182  */
11183 static int
11184 scsih_init(void)
11185 {
11186 	mpt2_ids = 0;
11187 	mpt3_ids = 0;
11188 
11189 	mpt3sas_base_initialize_callback_handler();
11190 
11191 	 /* queuecommand callback hander */
11192 	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
11193 
11194 	/* task management callback handler */
11195 	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
11196 
11197 	/* base internal commands callback handler */
11198 	base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
11199 	port_enable_cb_idx = mpt3sas_base_register_callback_handler(
11200 	    mpt3sas_port_enable_done);
11201 
11202 	/* transport internal commands callback handler */
11203 	transport_cb_idx = mpt3sas_base_register_callback_handler(
11204 	    mpt3sas_transport_done);
11205 
11206 	/* scsih internal commands callback handler */
11207 	scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
11208 
11209 	/* configuration page API internal commands callback handler */
11210 	config_cb_idx = mpt3sas_base_register_callback_handler(
11211 	    mpt3sas_config_done);
11212 
11213 	/* ctl module callback handler */
11214 	ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
11215 
11216 	tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
11217 	    _scsih_tm_tr_complete);
11218 
11219 	tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
11220 	    _scsih_tm_volume_tr_complete);
11221 
11222 	tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
11223 	    _scsih_sas_control_complete);
11224 
11225 	mpt3sas_init_debugfs();
11226 	return 0;
11227 }
11228 
11229 /**
11230  * scsih_exit - exit point for this driver (when it is a module).
11231  *
11232  * Return: 0 success, anything else error.
11233  */
11234 static void
11235 scsih_exit(void)
11236 {
11237 
11238 	mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
11239 	mpt3sas_base_release_callback_handler(tm_cb_idx);
11240 	mpt3sas_base_release_callback_handler(base_cb_idx);
11241 	mpt3sas_base_release_callback_handler(port_enable_cb_idx);
11242 	mpt3sas_base_release_callback_handler(transport_cb_idx);
11243 	mpt3sas_base_release_callback_handler(scsih_cb_idx);
11244 	mpt3sas_base_release_callback_handler(config_cb_idx);
11245 	mpt3sas_base_release_callback_handler(ctl_cb_idx);
11246 
11247 	mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
11248 	mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
11249 	mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
11250 
11251 /* raid transport support */
11252 	if (hbas_to_enumerate != 1)
11253 		raid_class_release(mpt3sas_raid_template);
11254 	if (hbas_to_enumerate != 2)
11255 		raid_class_release(mpt2sas_raid_template);
11256 	sas_release_transport(mpt3sas_transport_template);
11257 	mpt3sas_exit_debugfs();
11258 }
11259 
11260 /**
11261  * _mpt3sas_init - main entry point for this driver.
11262  *
11263  * Return: 0 success, anything else error.
11264  */
11265 static int __init
11266 _mpt3sas_init(void)
11267 {
11268 	int error;
11269 
11270 	pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
11271 					MPT3SAS_DRIVER_VERSION);
11272 
11273 	mpt3sas_transport_template =
11274 	    sas_attach_transport(&mpt3sas_transport_functions);
11275 	if (!mpt3sas_transport_template)
11276 		return -ENODEV;
11277 
11278 	/* No need attach mpt3sas raid functions template
11279 	 * if hbas_to_enumarate value is one.
11280 	 */
11281 	if (hbas_to_enumerate != 1) {
11282 		mpt3sas_raid_template =
11283 				raid_class_attach(&mpt3sas_raid_functions);
11284 		if (!mpt3sas_raid_template) {
11285 			sas_release_transport(mpt3sas_transport_template);
11286 			return -ENODEV;
11287 		}
11288 	}
11289 
11290 	/* No need to attach mpt2sas raid functions template
11291 	 * if hbas_to_enumarate value is two
11292 	 */
11293 	if (hbas_to_enumerate != 2) {
11294 		mpt2sas_raid_template =
11295 				raid_class_attach(&mpt2sas_raid_functions);
11296 		if (!mpt2sas_raid_template) {
11297 			sas_release_transport(mpt3sas_transport_template);
11298 			return -ENODEV;
11299 		}
11300 	}
11301 
11302 	error = scsih_init();
11303 	if (error) {
11304 		scsih_exit();
11305 		return error;
11306 	}
11307 
11308 	mpt3sas_ctl_init(hbas_to_enumerate);
11309 
11310 	error = pci_register_driver(&mpt3sas_driver);
11311 	if (error)
11312 		scsih_exit();
11313 
11314 	return error;
11315 }
11316 
11317 /**
11318  * _mpt3sas_exit - exit point for this driver (when it is a module).
11319  *
11320  */
11321 static void __exit
11322 _mpt3sas_exit(void)
11323 {
11324 	pr_info("mpt3sas version %s unloading\n",
11325 				MPT3SAS_DRIVER_VERSION);
11326 
11327 	mpt3sas_ctl_exit(hbas_to_enumerate);
11328 
11329 	pci_unregister_driver(&mpt3sas_driver);
11330 
11331 	scsih_exit();
11332 }
11333 
11334 module_init(_mpt3sas_init);
11335 module_exit(_mpt3sas_exit);
11336