1 /*
2  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3  *
4  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5  * Copyright (C) 2012-2014  LSI Corporation
6  * Copyright (C) 2013-2014 Avago Technologies
7  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/pci-aspm.h>
55 #include <linux/interrupt.h>
56 #include <linux/aer.h>
57 #include <linux/raid_class.h>
58 #include <asm/unaligned.h>
59 
60 #include "mpt3sas_base.h"
61 
62 #define RAID_CHANNEL 1
63 
64 #define PCIE_CHANNEL 2
65 
66 /* forward proto's */
67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 	struct _sas_node *sas_expander);
69 static void _firmware_event_work(struct work_struct *work);
70 
71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 	struct _sas_device *sas_device);
73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 	u8 retry_count, u8 is_pd);
75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 	struct _pcie_device *pcie_device);
78 static void
79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
81 
82 /* global parameters */
83 LIST_HEAD(mpt3sas_ioc_list);
84 /* global ioc lock for list operations */
85 DEFINE_SPINLOCK(gioc_lock);
86 
87 MODULE_AUTHOR(MPT3SAS_AUTHOR);
88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91 MODULE_ALIAS("mpt2sas");
92 
93 /* local parameters */
94 static u8 scsi_io_cb_idx = -1;
95 static u8 tm_cb_idx = -1;
96 static u8 ctl_cb_idx = -1;
97 static u8 base_cb_idx = -1;
98 static u8 port_enable_cb_idx = -1;
99 static u8 transport_cb_idx = -1;
100 static u8 scsih_cb_idx = -1;
101 static u8 config_cb_idx = -1;
102 static int mpt2_ids;
103 static int mpt3_ids;
104 
105 static u8 tm_tr_cb_idx = -1 ;
106 static u8 tm_tr_volume_cb_idx = -1 ;
107 static u8 tm_sas_control_cb_idx = -1;
108 
109 /* command line options */
110 static u32 logging_level;
111 MODULE_PARM_DESC(logging_level,
112 	" bits for enabling additional logging info (default=0)");
113 
114 
115 static ushort max_sectors = 0xFFFF;
116 module_param(max_sectors, ushort, 0444);
117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
118 
119 
120 static int missing_delay[2] = {-1, -1};
121 module_param_array(missing_delay, int, NULL, 0444);
122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
123 
124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
125 #define MPT3SAS_MAX_LUN (16895)
126 static u64 max_lun = MPT3SAS_MAX_LUN;
127 module_param(max_lun, ullong, 0444);
128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
129 
130 static ushort hbas_to_enumerate;
131 module_param(hbas_to_enumerate, ushort, 0444);
132 MODULE_PARM_DESC(hbas_to_enumerate,
133 		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134 		  1 - enumerates only SAS 2.0 generation HBAs\n \
135 		  2 - enumerates only SAS 3.0 generation HBAs (default=0)");
136 
137 /* diag_buffer_enable is bitwise
138  * bit 0 set = TRACE
139  * bit 1 set = SNAPSHOT
140  * bit 2 set = EXTENDED
141  *
142  * Either bit can be set, or both
143  */
144 static int diag_buffer_enable = -1;
145 module_param(diag_buffer_enable, int, 0444);
146 MODULE_PARM_DESC(diag_buffer_enable,
147 	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148 static int disable_discovery = -1;
149 module_param(disable_discovery, int, 0444);
150 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
151 
152 
153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
154 static int prot_mask = -1;
155 module_param(prot_mask, int, 0444);
156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
157 
158 static bool enable_sdev_max_qd;
159 module_param(enable_sdev_max_qd, bool, 0444);
160 MODULE_PARM_DESC(enable_sdev_max_qd,
161 	"Enable sdev max qd as can_queue, def=disabled(0)");
162 
163 /* raid transport support */
164 static struct raid_template *mpt3sas_raid_template;
165 static struct raid_template *mpt2sas_raid_template;
166 
167 
168 /**
169  * struct sense_info - common structure for obtaining sense keys
170  * @skey: sense key
171  * @asc: additional sense code
172  * @ascq: additional sense code qualifier
173  */
174 struct sense_info {
175 	u8 skey;
176 	u8 asc;
177 	u8 ascq;
178 };
179 
180 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
181 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
182 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
183 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
184 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
185 /**
186  * struct fw_event_work - firmware event struct
187  * @list: link list framework
188  * @work: work object (ioc->fault_reset_work_q)
189  * @ioc: per adapter object
190  * @device_handle: device handle
191  * @VF_ID: virtual function id
192  * @VP_ID: virtual port id
193  * @ignore: flag meaning this event has been marked to ignore
194  * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
195  * @refcount: kref for this event
196  * @event_data: reply event data payload follows
197  *
198  * This object stored on ioc->fw_event_list.
199  */
200 struct fw_event_work {
201 	struct list_head	list;
202 	struct work_struct	work;
203 
204 	struct MPT3SAS_ADAPTER *ioc;
205 	u16			device_handle;
206 	u8			VF_ID;
207 	u8			VP_ID;
208 	u8			ignore;
209 	u16			event;
210 	struct kref		refcount;
211 	char			event_data[0] __aligned(4);
212 };
213 
214 static void fw_event_work_free(struct kref *r)
215 {
216 	kfree(container_of(r, struct fw_event_work, refcount));
217 }
218 
219 static void fw_event_work_get(struct fw_event_work *fw_work)
220 {
221 	kref_get(&fw_work->refcount);
222 }
223 
224 static void fw_event_work_put(struct fw_event_work *fw_work)
225 {
226 	kref_put(&fw_work->refcount, fw_event_work_free);
227 }
228 
229 static struct fw_event_work *alloc_fw_event_work(int len)
230 {
231 	struct fw_event_work *fw_event;
232 
233 	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
234 	if (!fw_event)
235 		return NULL;
236 
237 	kref_init(&fw_event->refcount);
238 	return fw_event;
239 }
240 
241 /**
242  * struct _scsi_io_transfer - scsi io transfer
243  * @handle: sas device handle (assigned by firmware)
244  * @is_raid: flag set for hidden raid components
245  * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
246  * @data_length: data transfer length
247  * @data_dma: dma pointer to data
248  * @sense: sense data
249  * @lun: lun number
250  * @cdb_length: cdb length
251  * @cdb: cdb contents
252  * @timeout: timeout for this command
253  * @VF_ID: virtual function id
254  * @VP_ID: virtual port id
255  * @valid_reply: flag set for reply message
256  * @sense_length: sense length
257  * @ioc_status: ioc status
258  * @scsi_state: scsi state
259  * @scsi_status: scsi staus
260  * @log_info: log information
261  * @transfer_length: data length transfer when there is a reply message
262  *
263  * Used for sending internal scsi commands to devices within this module.
264  * Refer to _scsi_send_scsi_io().
265  */
266 struct _scsi_io_transfer {
267 	u16	handle;
268 	u8	is_raid;
269 	enum dma_data_direction dir;
270 	u32	data_length;
271 	dma_addr_t data_dma;
272 	u8	sense[SCSI_SENSE_BUFFERSIZE];
273 	u32	lun;
274 	u8	cdb_length;
275 	u8	cdb[32];
276 	u8	timeout;
277 	u8	VF_ID;
278 	u8	VP_ID;
279 	u8	valid_reply;
280   /* the following bits are only valid when 'valid_reply = 1' */
281 	u32	sense_length;
282 	u16	ioc_status;
283 	u8	scsi_state;
284 	u8	scsi_status;
285 	u32	log_info;
286 	u32	transfer_length;
287 };
288 
289 /**
290  * _scsih_set_debug_level - global setting of ioc->logging_level.
291  * @val: ?
292  * @kp: ?
293  *
294  * Note: The logging levels are defined in mpt3sas_debug.h.
295  */
296 static int
297 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
298 {
299 	int ret = param_set_int(val, kp);
300 	struct MPT3SAS_ADAPTER *ioc;
301 
302 	if (ret)
303 		return ret;
304 
305 	pr_info("setting logging_level(0x%08x)\n", logging_level);
306 	spin_lock(&gioc_lock);
307 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
308 		ioc->logging_level = logging_level;
309 	spin_unlock(&gioc_lock);
310 	return 0;
311 }
312 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
313 	&logging_level, 0644);
314 
315 /**
316  * _scsih_srch_boot_sas_address - search based on sas_address
317  * @sas_address: sas address
318  * @boot_device: boot device object from bios page 2
319  *
320  * Return: 1 when there's a match, 0 means no match.
321  */
322 static inline int
323 _scsih_srch_boot_sas_address(u64 sas_address,
324 	Mpi2BootDeviceSasWwid_t *boot_device)
325 {
326 	return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
327 }
328 
329 /**
330  * _scsih_srch_boot_device_name - search based on device name
331  * @device_name: device name specified in INDENTIFY fram
332  * @boot_device: boot device object from bios page 2
333  *
334  * Return: 1 when there's a match, 0 means no match.
335  */
336 static inline int
337 _scsih_srch_boot_device_name(u64 device_name,
338 	Mpi2BootDeviceDeviceName_t *boot_device)
339 {
340 	return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
341 }
342 
343 /**
344  * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
345  * @enclosure_logical_id: enclosure logical id
346  * @slot_number: slot number
347  * @boot_device: boot device object from bios page 2
348  *
349  * Return: 1 when there's a match, 0 means no match.
350  */
351 static inline int
352 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
353 	Mpi2BootDeviceEnclosureSlot_t *boot_device)
354 {
355 	return (enclosure_logical_id == le64_to_cpu(boot_device->
356 	    EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
357 	    SlotNumber)) ? 1 : 0;
358 }
359 
360 /**
361  * _scsih_is_boot_device - search for matching boot device.
362  * @sas_address: sas address
363  * @device_name: device name specified in INDENTIFY fram
364  * @enclosure_logical_id: enclosure logical id
365  * @slot: slot number
366  * @form: specifies boot device form
367  * @boot_device: boot device object from bios page 2
368  *
369  * Return: 1 when there's a match, 0 means no match.
370  */
371 static int
372 _scsih_is_boot_device(u64 sas_address, u64 device_name,
373 	u64 enclosure_logical_id, u16 slot, u8 form,
374 	Mpi2BiosPage2BootDevice_t *boot_device)
375 {
376 	int rc = 0;
377 
378 	switch (form) {
379 	case MPI2_BIOSPAGE2_FORM_SAS_WWID:
380 		if (!sas_address)
381 			break;
382 		rc = _scsih_srch_boot_sas_address(
383 		    sas_address, &boot_device->SasWwid);
384 		break;
385 	case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
386 		if (!enclosure_logical_id)
387 			break;
388 		rc = _scsih_srch_boot_encl_slot(
389 		    enclosure_logical_id,
390 		    slot, &boot_device->EnclosureSlot);
391 		break;
392 	case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
393 		if (!device_name)
394 			break;
395 		rc = _scsih_srch_boot_device_name(
396 		    device_name, &boot_device->DeviceName);
397 		break;
398 	case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
399 		break;
400 	}
401 
402 	return rc;
403 }
404 
405 /**
406  * _scsih_get_sas_address - set the sas_address for given device handle
407  * @ioc: ?
408  * @handle: device handle
409  * @sas_address: sas address
410  *
411  * Return: 0 success, non-zero when failure
412  */
413 static int
414 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
415 	u64 *sas_address)
416 {
417 	Mpi2SasDevicePage0_t sas_device_pg0;
418 	Mpi2ConfigReply_t mpi_reply;
419 	u32 ioc_status;
420 
421 	*sas_address = 0;
422 
423 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
424 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
425 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
426 			__FILE__, __LINE__, __func__);
427 		return -ENXIO;
428 	}
429 
430 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
431 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
432 		/* For HBA, vSES doesn't return HBA SAS address. Instead return
433 		 * vSES's sas address.
434 		 */
435 		if ((handle <= ioc->sas_hba.num_phys) &&
436 		   (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
437 		   MPI2_SAS_DEVICE_INFO_SEP)))
438 			*sas_address = ioc->sas_hba.sas_address;
439 		else
440 			*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
441 		return 0;
442 	}
443 
444 	/* we hit this because the given parent handle doesn't exist */
445 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
446 		return -ENXIO;
447 
448 	/* else error case */
449 	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
450 		handle, ioc_status, __FILE__, __LINE__, __func__);
451 	return -EIO;
452 }
453 
454 /**
455  * _scsih_determine_boot_device - determine boot device.
456  * @ioc: per adapter object
457  * @device: sas_device or pcie_device object
458  * @channel: SAS or PCIe channel
459  *
460  * Determines whether this device should be first reported device to
461  * to scsi-ml or sas transport, this purpose is for persistent boot device.
462  * There are primary, alternate, and current entries in bios page 2. The order
463  * priority is primary, alternate, then current.  This routine saves
464  * the corresponding device object.
465  * The saved data to be used later in _scsih_probe_boot_devices().
466  */
467 static void
468 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
469 	u32 channel)
470 {
471 	struct _sas_device *sas_device;
472 	struct _pcie_device *pcie_device;
473 	struct _raid_device *raid_device;
474 	u64 sas_address;
475 	u64 device_name;
476 	u64 enclosure_logical_id;
477 	u16 slot;
478 
479 	 /* only process this function when driver loads */
480 	if (!ioc->is_driver_loading)
481 		return;
482 
483 	 /* no Bios, return immediately */
484 	if (!ioc->bios_pg3.BiosVersion)
485 		return;
486 
487 	if (channel == RAID_CHANNEL) {
488 		raid_device = device;
489 		sas_address = raid_device->wwid;
490 		device_name = 0;
491 		enclosure_logical_id = 0;
492 		slot = 0;
493 	} else if (channel == PCIE_CHANNEL) {
494 		pcie_device = device;
495 		sas_address = pcie_device->wwid;
496 		device_name = 0;
497 		enclosure_logical_id = 0;
498 		slot = 0;
499 	} else {
500 		sas_device = device;
501 		sas_address = sas_device->sas_address;
502 		device_name = sas_device->device_name;
503 		enclosure_logical_id = sas_device->enclosure_logical_id;
504 		slot = sas_device->slot;
505 	}
506 
507 	if (!ioc->req_boot_device.device) {
508 		if (_scsih_is_boot_device(sas_address, device_name,
509 		    enclosure_logical_id, slot,
510 		    (ioc->bios_pg2.ReqBootDeviceForm &
511 		    MPI2_BIOSPAGE2_FORM_MASK),
512 		    &ioc->bios_pg2.RequestedBootDevice)) {
513 			dinitprintk(ioc,
514 				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
515 					     __func__, (u64)sas_address));
516 			ioc->req_boot_device.device = device;
517 			ioc->req_boot_device.channel = channel;
518 		}
519 	}
520 
521 	if (!ioc->req_alt_boot_device.device) {
522 		if (_scsih_is_boot_device(sas_address, device_name,
523 		    enclosure_logical_id, slot,
524 		    (ioc->bios_pg2.ReqAltBootDeviceForm &
525 		    MPI2_BIOSPAGE2_FORM_MASK),
526 		    &ioc->bios_pg2.RequestedAltBootDevice)) {
527 			dinitprintk(ioc,
528 				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
529 					     __func__, (u64)sas_address));
530 			ioc->req_alt_boot_device.device = device;
531 			ioc->req_alt_boot_device.channel = channel;
532 		}
533 	}
534 
535 	if (!ioc->current_boot_device.device) {
536 		if (_scsih_is_boot_device(sas_address, device_name,
537 		    enclosure_logical_id, slot,
538 		    (ioc->bios_pg2.CurrentBootDeviceForm &
539 		    MPI2_BIOSPAGE2_FORM_MASK),
540 		    &ioc->bios_pg2.CurrentBootDevice)) {
541 			dinitprintk(ioc,
542 				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
543 					     __func__, (u64)sas_address));
544 			ioc->current_boot_device.device = device;
545 			ioc->current_boot_device.channel = channel;
546 		}
547 	}
548 }
549 
550 static struct _sas_device *
551 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
552 		struct MPT3SAS_TARGET *tgt_priv)
553 {
554 	struct _sas_device *ret;
555 
556 	assert_spin_locked(&ioc->sas_device_lock);
557 
558 	ret = tgt_priv->sas_dev;
559 	if (ret)
560 		sas_device_get(ret);
561 
562 	return ret;
563 }
564 
565 static struct _sas_device *
566 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
567 		struct MPT3SAS_TARGET *tgt_priv)
568 {
569 	struct _sas_device *ret;
570 	unsigned long flags;
571 
572 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
573 	ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
574 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
575 
576 	return ret;
577 }
578 
579 static struct _pcie_device *
580 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
581 	struct MPT3SAS_TARGET *tgt_priv)
582 {
583 	struct _pcie_device *ret;
584 
585 	assert_spin_locked(&ioc->pcie_device_lock);
586 
587 	ret = tgt_priv->pcie_dev;
588 	if (ret)
589 		pcie_device_get(ret);
590 
591 	return ret;
592 }
593 
594 /**
595  * mpt3sas_get_pdev_from_target - pcie device search
596  * @ioc: per adapter object
597  * @tgt_priv: starget private object
598  *
599  * Context: This function will acquire ioc->pcie_device_lock and will release
600  * before returning the pcie_device object.
601  *
602  * This searches for pcie_device from target, then return pcie_device object.
603  */
604 static struct _pcie_device *
605 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
606 	struct MPT3SAS_TARGET *tgt_priv)
607 {
608 	struct _pcie_device *ret;
609 	unsigned long flags;
610 
611 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
612 	ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
613 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
614 
615 	return ret;
616 }
617 
618 struct _sas_device *
619 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
620 					u64 sas_address)
621 {
622 	struct _sas_device *sas_device;
623 
624 	assert_spin_locked(&ioc->sas_device_lock);
625 
626 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
627 		if (sas_device->sas_address == sas_address)
628 			goto found_device;
629 
630 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
631 		if (sas_device->sas_address == sas_address)
632 			goto found_device;
633 
634 	return NULL;
635 
636 found_device:
637 	sas_device_get(sas_device);
638 	return sas_device;
639 }
640 
641 /**
642  * mpt3sas_get_sdev_by_addr - sas device search
643  * @ioc: per adapter object
644  * @sas_address: sas address
645  * Context: Calling function should acquire ioc->sas_device_lock
646  *
647  * This searches for sas_device based on sas_address, then return sas_device
648  * object.
649  */
650 struct _sas_device *
651 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
652 	u64 sas_address)
653 {
654 	struct _sas_device *sas_device;
655 	unsigned long flags;
656 
657 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
658 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
659 			sas_address);
660 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
661 
662 	return sas_device;
663 }
664 
665 static struct _sas_device *
666 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
667 {
668 	struct _sas_device *sas_device;
669 
670 	assert_spin_locked(&ioc->sas_device_lock);
671 
672 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
673 		if (sas_device->handle == handle)
674 			goto found_device;
675 
676 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
677 		if (sas_device->handle == handle)
678 			goto found_device;
679 
680 	return NULL;
681 
682 found_device:
683 	sas_device_get(sas_device);
684 	return sas_device;
685 }
686 
687 /**
688  * mpt3sas_get_sdev_by_handle - sas device search
689  * @ioc: per adapter object
690  * @handle: sas device handle (assigned by firmware)
691  * Context: Calling function should acquire ioc->sas_device_lock
692  *
693  * This searches for sas_device based on sas_address, then return sas_device
694  * object.
695  */
696 struct _sas_device *
697 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
698 {
699 	struct _sas_device *sas_device;
700 	unsigned long flags;
701 
702 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
703 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
704 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
705 
706 	return sas_device;
707 }
708 
709 /**
710  * _scsih_display_enclosure_chassis_info - display device location info
711  * @ioc: per adapter object
712  * @sas_device: per sas device object
713  * @sdev: scsi device struct
714  * @starget: scsi target struct
715  */
716 static void
717 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
718 	struct _sas_device *sas_device, struct scsi_device *sdev,
719 	struct scsi_target *starget)
720 {
721 	if (sdev) {
722 		if (sas_device->enclosure_handle != 0)
723 			sdev_printk(KERN_INFO, sdev,
724 			    "enclosure logical id (0x%016llx), slot(%d) \n",
725 			    (unsigned long long)
726 			    sas_device->enclosure_logical_id,
727 			    sas_device->slot);
728 		if (sas_device->connector_name[0] != '\0')
729 			sdev_printk(KERN_INFO, sdev,
730 			    "enclosure level(0x%04x), connector name( %s)\n",
731 			    sas_device->enclosure_level,
732 			    sas_device->connector_name);
733 		if (sas_device->is_chassis_slot_valid)
734 			sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
735 			    sas_device->chassis_slot);
736 	} else if (starget) {
737 		if (sas_device->enclosure_handle != 0)
738 			starget_printk(KERN_INFO, starget,
739 			    "enclosure logical id(0x%016llx), slot(%d) \n",
740 			    (unsigned long long)
741 			    sas_device->enclosure_logical_id,
742 			    sas_device->slot);
743 		if (sas_device->connector_name[0] != '\0')
744 			starget_printk(KERN_INFO, starget,
745 			    "enclosure level(0x%04x), connector name( %s)\n",
746 			    sas_device->enclosure_level,
747 			    sas_device->connector_name);
748 		if (sas_device->is_chassis_slot_valid)
749 			starget_printk(KERN_INFO, starget,
750 			    "chassis slot(0x%04x)\n",
751 			    sas_device->chassis_slot);
752 	} else {
753 		if (sas_device->enclosure_handle != 0)
754 			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
755 				 (u64)sas_device->enclosure_logical_id,
756 				 sas_device->slot);
757 		if (sas_device->connector_name[0] != '\0')
758 			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
759 				 sas_device->enclosure_level,
760 				 sas_device->connector_name);
761 		if (sas_device->is_chassis_slot_valid)
762 			ioc_info(ioc, "chassis slot(0x%04x)\n",
763 				 sas_device->chassis_slot);
764 	}
765 }
766 
767 /**
768  * _scsih_sas_device_remove - remove sas_device from list.
769  * @ioc: per adapter object
770  * @sas_device: the sas_device object
771  * Context: This function will acquire ioc->sas_device_lock.
772  *
773  * If sas_device is on the list, remove it and decrement its reference count.
774  */
775 static void
776 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
777 	struct _sas_device *sas_device)
778 {
779 	unsigned long flags;
780 
781 	if (!sas_device)
782 		return;
783 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
784 		 sas_device->handle, (u64)sas_device->sas_address);
785 
786 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
787 
788 	/*
789 	 * The lock serializes access to the list, but we still need to verify
790 	 * that nobody removed the entry while we were waiting on the lock.
791 	 */
792 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
793 	if (!list_empty(&sas_device->list)) {
794 		list_del_init(&sas_device->list);
795 		sas_device_put(sas_device);
796 	}
797 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
798 }
799 
800 /**
801  * _scsih_device_remove_by_handle - removing device object by handle
802  * @ioc: per adapter object
803  * @handle: device handle
804  */
805 static void
806 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
807 {
808 	struct _sas_device *sas_device;
809 	unsigned long flags;
810 
811 	if (ioc->shost_recovery)
812 		return;
813 
814 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
815 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
816 	if (sas_device) {
817 		list_del_init(&sas_device->list);
818 		sas_device_put(sas_device);
819 	}
820 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
821 	if (sas_device) {
822 		_scsih_remove_device(ioc, sas_device);
823 		sas_device_put(sas_device);
824 	}
825 }
826 
827 /**
828  * mpt3sas_device_remove_by_sas_address - removing device object by sas address
829  * @ioc: per adapter object
830  * @sas_address: device sas_address
831  */
832 void
833 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
834 	u64 sas_address)
835 {
836 	struct _sas_device *sas_device;
837 	unsigned long flags;
838 
839 	if (ioc->shost_recovery)
840 		return;
841 
842 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
843 	sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
844 	if (sas_device) {
845 		list_del_init(&sas_device->list);
846 		sas_device_put(sas_device);
847 	}
848 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
849 	if (sas_device) {
850 		_scsih_remove_device(ioc, sas_device);
851 		sas_device_put(sas_device);
852 	}
853 }
854 
855 /**
856  * _scsih_sas_device_add - insert sas_device to the list.
857  * @ioc: per adapter object
858  * @sas_device: the sas_device object
859  * Context: This function will acquire ioc->sas_device_lock.
860  *
861  * Adding new object to the ioc->sas_device_list.
862  */
863 static void
864 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
865 	struct _sas_device *sas_device)
866 {
867 	unsigned long flags;
868 
869 	dewtprintk(ioc,
870 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
871 			    __func__, sas_device->handle,
872 			    (u64)sas_device->sas_address));
873 
874 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
875 	    NULL, NULL));
876 
877 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
878 	sas_device_get(sas_device);
879 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
880 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
881 
882 	if (ioc->hide_drives) {
883 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
884 		return;
885 	}
886 
887 	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
888 	     sas_device->sas_address_parent)) {
889 		_scsih_sas_device_remove(ioc, sas_device);
890 	} else if (!sas_device->starget) {
891 		/*
892 		 * When asyn scanning is enabled, its not possible to remove
893 		 * devices while scanning is turned on due to an oops in
894 		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
895 		 */
896 		if (!ioc->is_driver_loading) {
897 			mpt3sas_transport_port_remove(ioc,
898 			    sas_device->sas_address,
899 			    sas_device->sas_address_parent);
900 			_scsih_sas_device_remove(ioc, sas_device);
901 		}
902 	} else
903 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
904 }
905 
906 /**
907  * _scsih_sas_device_init_add - insert sas_device to the list.
908  * @ioc: per adapter object
909  * @sas_device: the sas_device object
910  * Context: This function will acquire ioc->sas_device_lock.
911  *
912  * Adding new object at driver load time to the ioc->sas_device_init_list.
913  */
914 static void
915 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
916 	struct _sas_device *sas_device)
917 {
918 	unsigned long flags;
919 
920 	dewtprintk(ioc,
921 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
922 			    __func__, sas_device->handle,
923 			    (u64)sas_device->sas_address));
924 
925 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
926 	    NULL, NULL));
927 
928 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
929 	sas_device_get(sas_device);
930 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
931 	_scsih_determine_boot_device(ioc, sas_device, 0);
932 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
933 }
934 
935 
936 static struct _pcie_device *
937 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
938 {
939 	struct _pcie_device *pcie_device;
940 
941 	assert_spin_locked(&ioc->pcie_device_lock);
942 
943 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
944 		if (pcie_device->wwid == wwid)
945 			goto found_device;
946 
947 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
948 		if (pcie_device->wwid == wwid)
949 			goto found_device;
950 
951 	return NULL;
952 
953 found_device:
954 	pcie_device_get(pcie_device);
955 	return pcie_device;
956 }
957 
958 
959 /**
960  * mpt3sas_get_pdev_by_wwid - pcie device search
961  * @ioc: per adapter object
962  * @wwid: wwid
963  *
964  * Context: This function will acquire ioc->pcie_device_lock and will release
965  * before returning the pcie_device object.
966  *
967  * This searches for pcie_device based on wwid, then return pcie_device object.
968  */
969 static struct _pcie_device *
970 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
971 {
972 	struct _pcie_device *pcie_device;
973 	unsigned long flags;
974 
975 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
976 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
977 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
978 
979 	return pcie_device;
980 }
981 
982 
983 static struct _pcie_device *
984 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
985 	int channel)
986 {
987 	struct _pcie_device *pcie_device;
988 
989 	assert_spin_locked(&ioc->pcie_device_lock);
990 
991 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
992 		if (pcie_device->id == id && pcie_device->channel == channel)
993 			goto found_device;
994 
995 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
996 		if (pcie_device->id == id && pcie_device->channel == channel)
997 			goto found_device;
998 
999 	return NULL;
1000 
1001 found_device:
1002 	pcie_device_get(pcie_device);
1003 	return pcie_device;
1004 }
1005 
1006 static struct _pcie_device *
1007 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1008 {
1009 	struct _pcie_device *pcie_device;
1010 
1011 	assert_spin_locked(&ioc->pcie_device_lock);
1012 
1013 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1014 		if (pcie_device->handle == handle)
1015 			goto found_device;
1016 
1017 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1018 		if (pcie_device->handle == handle)
1019 			goto found_device;
1020 
1021 	return NULL;
1022 
1023 found_device:
1024 	pcie_device_get(pcie_device);
1025 	return pcie_device;
1026 }
1027 
1028 
1029 /**
1030  * mpt3sas_get_pdev_by_handle - pcie device search
1031  * @ioc: per adapter object
1032  * @handle: Firmware device handle
1033  *
1034  * Context: This function will acquire ioc->pcie_device_lock and will release
1035  * before returning the pcie_device object.
1036  *
1037  * This searches for pcie_device based on handle, then return pcie_device
1038  * object.
1039  */
1040 struct _pcie_device *
1041 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1042 {
1043 	struct _pcie_device *pcie_device;
1044 	unsigned long flags;
1045 
1046 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1047 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1048 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1049 
1050 	return pcie_device;
1051 }
1052 
1053 /**
1054  * _scsih_pcie_device_remove - remove pcie_device from list.
1055  * @ioc: per adapter object
1056  * @pcie_device: the pcie_device object
1057  * Context: This function will acquire ioc->pcie_device_lock.
1058  *
1059  * If pcie_device is on the list, remove it and decrement its reference count.
1060  */
1061 static void
1062 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1063 	struct _pcie_device *pcie_device)
1064 {
1065 	unsigned long flags;
1066 	int was_on_pcie_device_list = 0;
1067 
1068 	if (!pcie_device)
1069 		return;
1070 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1071 		 pcie_device->handle, (u64)pcie_device->wwid);
1072 	if (pcie_device->enclosure_handle != 0)
1073 		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1074 			 (u64)pcie_device->enclosure_logical_id,
1075 			 pcie_device->slot);
1076 	if (pcie_device->connector_name[0] != '\0')
1077 		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1078 			 pcie_device->enclosure_level,
1079 			 pcie_device->connector_name);
1080 
1081 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1082 	if (!list_empty(&pcie_device->list)) {
1083 		list_del_init(&pcie_device->list);
1084 		was_on_pcie_device_list = 1;
1085 	}
1086 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1087 	if (was_on_pcie_device_list) {
1088 		kfree(pcie_device->serial_number);
1089 		pcie_device_put(pcie_device);
1090 	}
1091 }
1092 
1093 
1094 /**
1095  * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1096  * @ioc: per adapter object
1097  * @handle: device handle
1098  */
1099 static void
1100 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1101 {
1102 	struct _pcie_device *pcie_device;
1103 	unsigned long flags;
1104 	int was_on_pcie_device_list = 0;
1105 
1106 	if (ioc->shost_recovery)
1107 		return;
1108 
1109 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1110 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1111 	if (pcie_device) {
1112 		if (!list_empty(&pcie_device->list)) {
1113 			list_del_init(&pcie_device->list);
1114 			was_on_pcie_device_list = 1;
1115 			pcie_device_put(pcie_device);
1116 		}
1117 	}
1118 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1119 	if (was_on_pcie_device_list) {
1120 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1121 		pcie_device_put(pcie_device);
1122 	}
1123 }
1124 
1125 /**
1126  * _scsih_pcie_device_add - add pcie_device object
1127  * @ioc: per adapter object
1128  * @pcie_device: pcie_device object
1129  *
1130  * This is added to the pcie_device_list link list.
1131  */
1132 static void
1133 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1134 	struct _pcie_device *pcie_device)
1135 {
1136 	unsigned long flags;
1137 
1138 	dewtprintk(ioc,
1139 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1140 			    __func__,
1141 			    pcie_device->handle, (u64)pcie_device->wwid));
1142 	if (pcie_device->enclosure_handle != 0)
1143 		dewtprintk(ioc,
1144 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1145 				    __func__,
1146 				    (u64)pcie_device->enclosure_logical_id,
1147 				    pcie_device->slot));
1148 	if (pcie_device->connector_name[0] != '\0')
1149 		dewtprintk(ioc,
1150 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1151 				    __func__, pcie_device->enclosure_level,
1152 				    pcie_device->connector_name));
1153 
1154 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1155 	pcie_device_get(pcie_device);
1156 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1157 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1158 
1159 	if (pcie_device->access_status ==
1160 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1161 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1162 		return;
1163 	}
1164 	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1165 		_scsih_pcie_device_remove(ioc, pcie_device);
1166 	} else if (!pcie_device->starget) {
1167 		if (!ioc->is_driver_loading) {
1168 /*TODO-- Need to find out whether this condition will occur or not*/
1169 			clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1170 		}
1171 	} else
1172 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1173 }
1174 
1175 /*
1176  * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1177  * @ioc: per adapter object
1178  * @pcie_device: the pcie_device object
1179  * Context: This function will acquire ioc->pcie_device_lock.
1180  *
1181  * Adding new object at driver load time to the ioc->pcie_device_init_list.
1182  */
1183 static void
1184 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1185 				struct _pcie_device *pcie_device)
1186 {
1187 	unsigned long flags;
1188 
1189 	dewtprintk(ioc,
1190 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1191 			    __func__,
1192 			    pcie_device->handle, (u64)pcie_device->wwid));
1193 	if (pcie_device->enclosure_handle != 0)
1194 		dewtprintk(ioc,
1195 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1196 				    __func__,
1197 				    (u64)pcie_device->enclosure_logical_id,
1198 				    pcie_device->slot));
1199 	if (pcie_device->connector_name[0] != '\0')
1200 		dewtprintk(ioc,
1201 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1202 				    __func__, pcie_device->enclosure_level,
1203 				    pcie_device->connector_name));
1204 
1205 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1206 	pcie_device_get(pcie_device);
1207 	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1208 	if (pcie_device->access_status !=
1209 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1210 		_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1211 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1212 }
1213 /**
1214  * _scsih_raid_device_find_by_id - raid device search
1215  * @ioc: per adapter object
1216  * @id: sas device target id
1217  * @channel: sas device channel
1218  * Context: Calling function should acquire ioc->raid_device_lock
1219  *
1220  * This searches for raid_device based on target id, then return raid_device
1221  * object.
1222  */
1223 static struct _raid_device *
1224 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1225 {
1226 	struct _raid_device *raid_device, *r;
1227 
1228 	r = NULL;
1229 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1230 		if (raid_device->id == id && raid_device->channel == channel) {
1231 			r = raid_device;
1232 			goto out;
1233 		}
1234 	}
1235 
1236  out:
1237 	return r;
1238 }
1239 
1240 /**
1241  * mpt3sas_raid_device_find_by_handle - raid device search
1242  * @ioc: per adapter object
1243  * @handle: sas device handle (assigned by firmware)
1244  * Context: Calling function should acquire ioc->raid_device_lock
1245  *
1246  * This searches for raid_device based on handle, then return raid_device
1247  * object.
1248  */
1249 struct _raid_device *
1250 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1251 {
1252 	struct _raid_device *raid_device, *r;
1253 
1254 	r = NULL;
1255 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1256 		if (raid_device->handle != handle)
1257 			continue;
1258 		r = raid_device;
1259 		goto out;
1260 	}
1261 
1262  out:
1263 	return r;
1264 }
1265 
1266 /**
1267  * _scsih_raid_device_find_by_wwid - raid device search
1268  * @ioc: per adapter object
1269  * @wwid: ?
1270  * Context: Calling function should acquire ioc->raid_device_lock
1271  *
1272  * This searches for raid_device based on wwid, then return raid_device
1273  * object.
1274  */
1275 static struct _raid_device *
1276 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1277 {
1278 	struct _raid_device *raid_device, *r;
1279 
1280 	r = NULL;
1281 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1282 		if (raid_device->wwid != wwid)
1283 			continue;
1284 		r = raid_device;
1285 		goto out;
1286 	}
1287 
1288  out:
1289 	return r;
1290 }
1291 
1292 /**
1293  * _scsih_raid_device_add - add raid_device object
1294  * @ioc: per adapter object
1295  * @raid_device: raid_device object
1296  *
1297  * This is added to the raid_device_list link list.
1298  */
1299 static void
1300 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1301 	struct _raid_device *raid_device)
1302 {
1303 	unsigned long flags;
1304 
1305 	dewtprintk(ioc,
1306 		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1307 			    __func__,
1308 			    raid_device->handle, (u64)raid_device->wwid));
1309 
1310 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1311 	list_add_tail(&raid_device->list, &ioc->raid_device_list);
1312 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1313 }
1314 
1315 /**
1316  * _scsih_raid_device_remove - delete raid_device object
1317  * @ioc: per adapter object
1318  * @raid_device: raid_device object
1319  *
1320  */
1321 static void
1322 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1323 	struct _raid_device *raid_device)
1324 {
1325 	unsigned long flags;
1326 
1327 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1328 	list_del(&raid_device->list);
1329 	kfree(raid_device);
1330 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1331 }
1332 
1333 /**
1334  * mpt3sas_scsih_expander_find_by_handle - expander device search
1335  * @ioc: per adapter object
1336  * @handle: expander handle (assigned by firmware)
1337  * Context: Calling function should acquire ioc->sas_device_lock
1338  *
1339  * This searches for expander device based on handle, then returns the
1340  * sas_node object.
1341  */
1342 struct _sas_node *
1343 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1344 {
1345 	struct _sas_node *sas_expander, *r;
1346 
1347 	r = NULL;
1348 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1349 		if (sas_expander->handle != handle)
1350 			continue;
1351 		r = sas_expander;
1352 		goto out;
1353 	}
1354  out:
1355 	return r;
1356 }
1357 
1358 /**
1359  * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1360  * @ioc: per adapter object
1361  * @handle: enclosure handle (assigned by firmware)
1362  * Context: Calling function should acquire ioc->sas_device_lock
1363  *
1364  * This searches for enclosure device based on handle, then returns the
1365  * enclosure object.
1366  */
1367 static struct _enclosure_node *
1368 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1369 {
1370 	struct _enclosure_node *enclosure_dev, *r;
1371 
1372 	r = NULL;
1373 	list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1374 		if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1375 			continue;
1376 		r = enclosure_dev;
1377 		goto out;
1378 	}
1379 out:
1380 	return r;
1381 }
1382 /**
1383  * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1384  * @ioc: per adapter object
1385  * @sas_address: sas address
1386  * Context: Calling function should acquire ioc->sas_node_lock.
1387  *
1388  * This searches for expander device based on sas_address, then returns the
1389  * sas_node object.
1390  */
1391 struct _sas_node *
1392 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1393 	u64 sas_address)
1394 {
1395 	struct _sas_node *sas_expander, *r;
1396 
1397 	r = NULL;
1398 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1399 		if (sas_expander->sas_address != sas_address)
1400 			continue;
1401 		r = sas_expander;
1402 		goto out;
1403 	}
1404  out:
1405 	return r;
1406 }
1407 
1408 /**
1409  * _scsih_expander_node_add - insert expander device to the list.
1410  * @ioc: per adapter object
1411  * @sas_expander: the sas_device object
1412  * Context: This function will acquire ioc->sas_node_lock.
1413  *
1414  * Adding new object to the ioc->sas_expander_list.
1415  */
1416 static void
1417 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1418 	struct _sas_node *sas_expander)
1419 {
1420 	unsigned long flags;
1421 
1422 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
1423 	list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1424 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1425 }
1426 
1427 /**
1428  * _scsih_is_end_device - determines if device is an end device
1429  * @device_info: bitfield providing information about the device.
1430  * Context: none
1431  *
1432  * Return: 1 if end device.
1433  */
1434 static int
1435 _scsih_is_end_device(u32 device_info)
1436 {
1437 	if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1438 		((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1439 		(device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1440 		(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1441 		return 1;
1442 	else
1443 		return 0;
1444 }
1445 
1446 /**
1447  * _scsih_is_nvme_pciescsi_device - determines if
1448  *			device is an pcie nvme/scsi device
1449  * @device_info: bitfield providing information about the device.
1450  * Context: none
1451  *
1452  * Returns 1 if device is pcie device type nvme/scsi.
1453  */
1454 static int
1455 _scsih_is_nvme_pciescsi_device(u32 device_info)
1456 {
1457 	if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1458 	    == MPI26_PCIE_DEVINFO_NVME) ||
1459 	    ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1460 	    == MPI26_PCIE_DEVINFO_SCSI))
1461 		return 1;
1462 	else
1463 		return 0;
1464 }
1465 
1466 /**
1467  * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1468  * @ioc: per adapter object
1469  * @smid: system request message index
1470  *
1471  * Return: the smid stored scmd pointer.
1472  * Then will dereference the stored scmd pointer.
1473  */
1474 struct scsi_cmnd *
1475 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1476 {
1477 	struct scsi_cmnd *scmd = NULL;
1478 	struct scsiio_tracker *st;
1479 	Mpi25SCSIIORequest_t *mpi_request;
1480 
1481 	if (smid > 0  &&
1482 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1483 		u32 unique_tag = smid - 1;
1484 
1485 		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1486 
1487 		/*
1488 		 * If SCSI IO request is outstanding at driver level then
1489 		 * DevHandle filed must be non-zero. If DevHandle is zero
1490 		 * then it means that this smid is free at driver level,
1491 		 * so return NULL.
1492 		 */
1493 		if (!mpi_request->DevHandle)
1494 			return scmd;
1495 
1496 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1497 		if (scmd) {
1498 			st = scsi_cmd_priv(scmd);
1499 			if (st->cb_idx == 0xFF || st->smid == 0)
1500 				scmd = NULL;
1501 		}
1502 	}
1503 	return scmd;
1504 }
1505 
1506 /**
1507  * scsih_change_queue_depth - setting device queue depth
1508  * @sdev: scsi device struct
1509  * @qdepth: requested queue depth
1510  *
1511  * Return: queue depth.
1512  */
1513 static int
1514 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1515 {
1516 	struct Scsi_Host *shost = sdev->host;
1517 	int max_depth;
1518 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1519 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1520 	struct MPT3SAS_TARGET *sas_target_priv_data;
1521 	struct _sas_device *sas_device;
1522 	unsigned long flags;
1523 
1524 	max_depth = shost->can_queue;
1525 
1526 	/*
1527 	 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1528 	 * is disabled.
1529 	 */
1530 	if (ioc->enable_sdev_max_qd)
1531 		goto not_sata;
1532 
1533 	sas_device_priv_data = sdev->hostdata;
1534 	if (!sas_device_priv_data)
1535 		goto not_sata;
1536 	sas_target_priv_data = sas_device_priv_data->sas_target;
1537 	if (!sas_target_priv_data)
1538 		goto not_sata;
1539 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1540 		goto not_sata;
1541 
1542 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1543 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1544 	if (sas_device) {
1545 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1546 			max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1547 
1548 		sas_device_put(sas_device);
1549 	}
1550 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1551 
1552  not_sata:
1553 
1554 	if (!sdev->tagged_supported)
1555 		max_depth = 1;
1556 	if (qdepth > max_depth)
1557 		qdepth = max_depth;
1558 	return scsi_change_queue_depth(sdev, qdepth);
1559 }
1560 
1561 /**
1562  * mpt3sas_scsih_change_queue_depth - setting device queue depth
1563  * @sdev: scsi device struct
1564  * @qdepth: requested queue depth
1565  *
1566  * Returns nothing.
1567  */
1568 void
1569 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1570 {
1571 	struct Scsi_Host *shost = sdev->host;
1572 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1573 
1574 	if (ioc->enable_sdev_max_qd)
1575 		qdepth = shost->can_queue;
1576 
1577 	scsih_change_queue_depth(sdev, qdepth);
1578 }
1579 
1580 /**
1581  * scsih_target_alloc - target add routine
1582  * @starget: scsi target struct
1583  *
1584  * Return: 0 if ok. Any other return is assumed to be an error and
1585  * the device is ignored.
1586  */
1587 static int
1588 scsih_target_alloc(struct scsi_target *starget)
1589 {
1590 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1591 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1592 	struct MPT3SAS_TARGET *sas_target_priv_data;
1593 	struct _sas_device *sas_device;
1594 	struct _raid_device *raid_device;
1595 	struct _pcie_device *pcie_device;
1596 	unsigned long flags;
1597 	struct sas_rphy *rphy;
1598 
1599 	sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1600 				       GFP_KERNEL);
1601 	if (!sas_target_priv_data)
1602 		return -ENOMEM;
1603 
1604 	starget->hostdata = sas_target_priv_data;
1605 	sas_target_priv_data->starget = starget;
1606 	sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1607 
1608 	/* RAID volumes */
1609 	if (starget->channel == RAID_CHANNEL) {
1610 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1611 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1612 		    starget->channel);
1613 		if (raid_device) {
1614 			sas_target_priv_data->handle = raid_device->handle;
1615 			sas_target_priv_data->sas_address = raid_device->wwid;
1616 			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1617 			if (ioc->is_warpdrive)
1618 				sas_target_priv_data->raid_device = raid_device;
1619 			raid_device->starget = starget;
1620 		}
1621 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1622 		return 0;
1623 	}
1624 
1625 	/* PCIe devices */
1626 	if (starget->channel == PCIE_CHANNEL) {
1627 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1628 		pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1629 			starget->channel);
1630 		if (pcie_device) {
1631 			sas_target_priv_data->handle = pcie_device->handle;
1632 			sas_target_priv_data->sas_address = pcie_device->wwid;
1633 			sas_target_priv_data->pcie_dev = pcie_device;
1634 			pcie_device->starget = starget;
1635 			pcie_device->id = starget->id;
1636 			pcie_device->channel = starget->channel;
1637 			sas_target_priv_data->flags |=
1638 				MPT_TARGET_FLAGS_PCIE_DEVICE;
1639 			if (pcie_device->fast_path)
1640 				sas_target_priv_data->flags |=
1641 					MPT_TARGET_FASTPATH_IO;
1642 		}
1643 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1644 		return 0;
1645 	}
1646 
1647 	/* sas/sata devices */
1648 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1649 	rphy = dev_to_rphy(starget->dev.parent);
1650 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1651 	   rphy->identify.sas_address);
1652 
1653 	if (sas_device) {
1654 		sas_target_priv_data->handle = sas_device->handle;
1655 		sas_target_priv_data->sas_address = sas_device->sas_address;
1656 		sas_target_priv_data->sas_dev = sas_device;
1657 		sas_device->starget = starget;
1658 		sas_device->id = starget->id;
1659 		sas_device->channel = starget->channel;
1660 		if (test_bit(sas_device->handle, ioc->pd_handles))
1661 			sas_target_priv_data->flags |=
1662 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
1663 		if (sas_device->fast_path)
1664 			sas_target_priv_data->flags |=
1665 					MPT_TARGET_FASTPATH_IO;
1666 	}
1667 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1668 
1669 	return 0;
1670 }
1671 
1672 /**
1673  * scsih_target_destroy - target destroy routine
1674  * @starget: scsi target struct
1675  */
1676 static void
1677 scsih_target_destroy(struct scsi_target *starget)
1678 {
1679 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1680 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1681 	struct MPT3SAS_TARGET *sas_target_priv_data;
1682 	struct _sas_device *sas_device;
1683 	struct _raid_device *raid_device;
1684 	struct _pcie_device *pcie_device;
1685 	unsigned long flags;
1686 
1687 	sas_target_priv_data = starget->hostdata;
1688 	if (!sas_target_priv_data)
1689 		return;
1690 
1691 	if (starget->channel == RAID_CHANNEL) {
1692 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1693 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1694 		    starget->channel);
1695 		if (raid_device) {
1696 			raid_device->starget = NULL;
1697 			raid_device->sdev = NULL;
1698 		}
1699 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1700 		goto out;
1701 	}
1702 
1703 	if (starget->channel == PCIE_CHANNEL) {
1704 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1705 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1706 							sas_target_priv_data);
1707 		if (pcie_device && (pcie_device->starget == starget) &&
1708 			(pcie_device->id == starget->id) &&
1709 			(pcie_device->channel == starget->channel))
1710 			pcie_device->starget = NULL;
1711 
1712 		if (pcie_device) {
1713 			/*
1714 			 * Corresponding get() is in _scsih_target_alloc()
1715 			 */
1716 			sas_target_priv_data->pcie_dev = NULL;
1717 			pcie_device_put(pcie_device);
1718 			pcie_device_put(pcie_device);
1719 		}
1720 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1721 		goto out;
1722 	}
1723 
1724 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1725 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1726 	if (sas_device && (sas_device->starget == starget) &&
1727 	    (sas_device->id == starget->id) &&
1728 	    (sas_device->channel == starget->channel))
1729 		sas_device->starget = NULL;
1730 
1731 	if (sas_device) {
1732 		/*
1733 		 * Corresponding get() is in _scsih_target_alloc()
1734 		 */
1735 		sas_target_priv_data->sas_dev = NULL;
1736 		sas_device_put(sas_device);
1737 
1738 		sas_device_put(sas_device);
1739 	}
1740 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1741 
1742  out:
1743 	kfree(sas_target_priv_data);
1744 	starget->hostdata = NULL;
1745 }
1746 
1747 /**
1748  * scsih_slave_alloc - device add routine
1749  * @sdev: scsi device struct
1750  *
1751  * Return: 0 if ok. Any other return is assumed to be an error and
1752  * the device is ignored.
1753  */
1754 static int
1755 scsih_slave_alloc(struct scsi_device *sdev)
1756 {
1757 	struct Scsi_Host *shost;
1758 	struct MPT3SAS_ADAPTER *ioc;
1759 	struct MPT3SAS_TARGET *sas_target_priv_data;
1760 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1761 	struct scsi_target *starget;
1762 	struct _raid_device *raid_device;
1763 	struct _sas_device *sas_device;
1764 	struct _pcie_device *pcie_device;
1765 	unsigned long flags;
1766 
1767 	sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
1768 				       GFP_KERNEL);
1769 	if (!sas_device_priv_data)
1770 		return -ENOMEM;
1771 
1772 	sas_device_priv_data->lun = sdev->lun;
1773 	sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
1774 
1775 	starget = scsi_target(sdev);
1776 	sas_target_priv_data = starget->hostdata;
1777 	sas_target_priv_data->num_luns++;
1778 	sas_device_priv_data->sas_target = sas_target_priv_data;
1779 	sdev->hostdata = sas_device_priv_data;
1780 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
1781 		sdev->no_uld_attach = 1;
1782 
1783 	shost = dev_to_shost(&starget->dev);
1784 	ioc = shost_priv(shost);
1785 	if (starget->channel == RAID_CHANNEL) {
1786 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1787 		raid_device = _scsih_raid_device_find_by_id(ioc,
1788 		    starget->id, starget->channel);
1789 		if (raid_device)
1790 			raid_device->sdev = sdev; /* raid is single lun */
1791 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1792 	}
1793 	if (starget->channel == PCIE_CHANNEL) {
1794 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1795 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
1796 				sas_target_priv_data->sas_address);
1797 		if (pcie_device && (pcie_device->starget == NULL)) {
1798 			sdev_printk(KERN_INFO, sdev,
1799 			    "%s : pcie_device->starget set to starget @ %d\n",
1800 			    __func__, __LINE__);
1801 			pcie_device->starget = starget;
1802 		}
1803 
1804 		if (pcie_device)
1805 			pcie_device_put(pcie_device);
1806 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1807 
1808 	} else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1809 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1810 		sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1811 					sas_target_priv_data->sas_address);
1812 		if (sas_device && (sas_device->starget == NULL)) {
1813 			sdev_printk(KERN_INFO, sdev,
1814 			"%s : sas_device->starget set to starget @ %d\n",
1815 			     __func__, __LINE__);
1816 			sas_device->starget = starget;
1817 		}
1818 
1819 		if (sas_device)
1820 			sas_device_put(sas_device);
1821 
1822 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1823 	}
1824 
1825 	return 0;
1826 }
1827 
1828 /**
1829  * scsih_slave_destroy - device destroy routine
1830  * @sdev: scsi device struct
1831  */
1832 static void
1833 scsih_slave_destroy(struct scsi_device *sdev)
1834 {
1835 	struct MPT3SAS_TARGET *sas_target_priv_data;
1836 	struct scsi_target *starget;
1837 	struct Scsi_Host *shost;
1838 	struct MPT3SAS_ADAPTER *ioc;
1839 	struct _sas_device *sas_device;
1840 	struct _pcie_device *pcie_device;
1841 	unsigned long flags;
1842 
1843 	if (!sdev->hostdata)
1844 		return;
1845 
1846 	starget = scsi_target(sdev);
1847 	sas_target_priv_data = starget->hostdata;
1848 	sas_target_priv_data->num_luns--;
1849 
1850 	shost = dev_to_shost(&starget->dev);
1851 	ioc = shost_priv(shost);
1852 
1853 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
1854 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1855 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1856 				sas_target_priv_data);
1857 		if (pcie_device && !sas_target_priv_data->num_luns)
1858 			pcie_device->starget = NULL;
1859 
1860 		if (pcie_device)
1861 			pcie_device_put(pcie_device);
1862 
1863 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1864 
1865 	} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1866 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1867 		sas_device = __mpt3sas_get_sdev_from_target(ioc,
1868 				sas_target_priv_data);
1869 		if (sas_device && !sas_target_priv_data->num_luns)
1870 			sas_device->starget = NULL;
1871 
1872 		if (sas_device)
1873 			sas_device_put(sas_device);
1874 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1875 	}
1876 
1877 	kfree(sdev->hostdata);
1878 	sdev->hostdata = NULL;
1879 }
1880 
1881 /**
1882  * _scsih_display_sata_capabilities - sata capabilities
1883  * @ioc: per adapter object
1884  * @handle: device handle
1885  * @sdev: scsi device struct
1886  */
1887 static void
1888 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
1889 	u16 handle, struct scsi_device *sdev)
1890 {
1891 	Mpi2ConfigReply_t mpi_reply;
1892 	Mpi2SasDevicePage0_t sas_device_pg0;
1893 	u32 ioc_status;
1894 	u16 flags;
1895 	u32 device_info;
1896 
1897 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1898 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
1899 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1900 			__FILE__, __LINE__, __func__);
1901 		return;
1902 	}
1903 
1904 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1905 	    MPI2_IOCSTATUS_MASK;
1906 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1907 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1908 			__FILE__, __LINE__, __func__);
1909 		return;
1910 	}
1911 
1912 	flags = le16_to_cpu(sas_device_pg0.Flags);
1913 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
1914 
1915 	sdev_printk(KERN_INFO, sdev,
1916 	    "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
1917 	    "sw_preserve(%s)\n",
1918 	    (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
1919 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
1920 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
1921 	    "n",
1922 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
1923 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
1924 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
1925 }
1926 
1927 /*
1928  * raid transport support -
1929  * Enabled for SLES11 and newer, in older kernels the driver will panic when
1930  * unloading the driver followed by a load - I believe that the subroutine
1931  * raid_class_release() is not cleaning up properly.
1932  */
1933 
1934 /**
1935  * scsih_is_raid - return boolean indicating device is raid volume
1936  * @dev: the device struct object
1937  */
1938 static int
1939 scsih_is_raid(struct device *dev)
1940 {
1941 	struct scsi_device *sdev = to_scsi_device(dev);
1942 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1943 
1944 	if (ioc->is_warpdrive)
1945 		return 0;
1946 	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
1947 }
1948 
1949 static int
1950 scsih_is_nvme(struct device *dev)
1951 {
1952 	struct scsi_device *sdev = to_scsi_device(dev);
1953 
1954 	return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
1955 }
1956 
1957 /**
1958  * scsih_get_resync - get raid volume resync percent complete
1959  * @dev: the device struct object
1960  */
1961 static void
1962 scsih_get_resync(struct device *dev)
1963 {
1964 	struct scsi_device *sdev = to_scsi_device(dev);
1965 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1966 	static struct _raid_device *raid_device;
1967 	unsigned long flags;
1968 	Mpi2RaidVolPage0_t vol_pg0;
1969 	Mpi2ConfigReply_t mpi_reply;
1970 	u32 volume_status_flags;
1971 	u8 percent_complete;
1972 	u16 handle;
1973 
1974 	percent_complete = 0;
1975 	handle = 0;
1976 	if (ioc->is_warpdrive)
1977 		goto out;
1978 
1979 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1980 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1981 	    sdev->channel);
1982 	if (raid_device) {
1983 		handle = raid_device->handle;
1984 		percent_complete = raid_device->percent_complete;
1985 	}
1986 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1987 
1988 	if (!handle)
1989 		goto out;
1990 
1991 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1992 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
1993 	     sizeof(Mpi2RaidVolPage0_t))) {
1994 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1995 			__FILE__, __LINE__, __func__);
1996 		percent_complete = 0;
1997 		goto out;
1998 	}
1999 
2000 	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2001 	if (!(volume_status_flags &
2002 	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2003 		percent_complete = 0;
2004 
2005  out:
2006 
2007 	switch (ioc->hba_mpi_version_belonged) {
2008 	case MPI2_VERSION:
2009 		raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2010 		break;
2011 	case MPI25_VERSION:
2012 	case MPI26_VERSION:
2013 		raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2014 		break;
2015 	}
2016 }
2017 
2018 /**
2019  * scsih_get_state - get raid volume level
2020  * @dev: the device struct object
2021  */
2022 static void
2023 scsih_get_state(struct device *dev)
2024 {
2025 	struct scsi_device *sdev = to_scsi_device(dev);
2026 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2027 	static struct _raid_device *raid_device;
2028 	unsigned long flags;
2029 	Mpi2RaidVolPage0_t vol_pg0;
2030 	Mpi2ConfigReply_t mpi_reply;
2031 	u32 volstate;
2032 	enum raid_state state = RAID_STATE_UNKNOWN;
2033 	u16 handle = 0;
2034 
2035 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2036 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2037 	    sdev->channel);
2038 	if (raid_device)
2039 		handle = raid_device->handle;
2040 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2041 
2042 	if (!raid_device)
2043 		goto out;
2044 
2045 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2046 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2047 	     sizeof(Mpi2RaidVolPage0_t))) {
2048 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2049 			__FILE__, __LINE__, __func__);
2050 		goto out;
2051 	}
2052 
2053 	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2054 	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2055 		state = RAID_STATE_RESYNCING;
2056 		goto out;
2057 	}
2058 
2059 	switch (vol_pg0.VolumeState) {
2060 	case MPI2_RAID_VOL_STATE_OPTIMAL:
2061 	case MPI2_RAID_VOL_STATE_ONLINE:
2062 		state = RAID_STATE_ACTIVE;
2063 		break;
2064 	case  MPI2_RAID_VOL_STATE_DEGRADED:
2065 		state = RAID_STATE_DEGRADED;
2066 		break;
2067 	case MPI2_RAID_VOL_STATE_FAILED:
2068 	case MPI2_RAID_VOL_STATE_MISSING:
2069 		state = RAID_STATE_OFFLINE;
2070 		break;
2071 	}
2072  out:
2073 	switch (ioc->hba_mpi_version_belonged) {
2074 	case MPI2_VERSION:
2075 		raid_set_state(mpt2sas_raid_template, dev, state);
2076 		break;
2077 	case MPI25_VERSION:
2078 	case MPI26_VERSION:
2079 		raid_set_state(mpt3sas_raid_template, dev, state);
2080 		break;
2081 	}
2082 }
2083 
2084 /**
2085  * _scsih_set_level - set raid level
2086  * @ioc: ?
2087  * @sdev: scsi device struct
2088  * @volume_type: volume type
2089  */
2090 static void
2091 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2092 	struct scsi_device *sdev, u8 volume_type)
2093 {
2094 	enum raid_level level = RAID_LEVEL_UNKNOWN;
2095 
2096 	switch (volume_type) {
2097 	case MPI2_RAID_VOL_TYPE_RAID0:
2098 		level = RAID_LEVEL_0;
2099 		break;
2100 	case MPI2_RAID_VOL_TYPE_RAID10:
2101 		level = RAID_LEVEL_10;
2102 		break;
2103 	case MPI2_RAID_VOL_TYPE_RAID1E:
2104 		level = RAID_LEVEL_1E;
2105 		break;
2106 	case MPI2_RAID_VOL_TYPE_RAID1:
2107 		level = RAID_LEVEL_1;
2108 		break;
2109 	}
2110 
2111 	switch (ioc->hba_mpi_version_belonged) {
2112 	case MPI2_VERSION:
2113 		raid_set_level(mpt2sas_raid_template,
2114 			&sdev->sdev_gendev, level);
2115 		break;
2116 	case MPI25_VERSION:
2117 	case MPI26_VERSION:
2118 		raid_set_level(mpt3sas_raid_template,
2119 			&sdev->sdev_gendev, level);
2120 		break;
2121 	}
2122 }
2123 
2124 
2125 /**
2126  * _scsih_get_volume_capabilities - volume capabilities
2127  * @ioc: per adapter object
2128  * @raid_device: the raid_device object
2129  *
2130  * Return: 0 for success, else 1
2131  */
2132 static int
2133 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2134 	struct _raid_device *raid_device)
2135 {
2136 	Mpi2RaidVolPage0_t *vol_pg0;
2137 	Mpi2RaidPhysDiskPage0_t pd_pg0;
2138 	Mpi2SasDevicePage0_t sas_device_pg0;
2139 	Mpi2ConfigReply_t mpi_reply;
2140 	u16 sz;
2141 	u8 num_pds;
2142 
2143 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2144 	    &num_pds)) || !num_pds) {
2145 		dfailprintk(ioc,
2146 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2147 				     __FILE__, __LINE__, __func__));
2148 		return 1;
2149 	}
2150 
2151 	raid_device->num_pds = num_pds;
2152 	sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2153 	    sizeof(Mpi2RaidVol0PhysDisk_t));
2154 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
2155 	if (!vol_pg0) {
2156 		dfailprintk(ioc,
2157 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2158 				     __FILE__, __LINE__, __func__));
2159 		return 1;
2160 	}
2161 
2162 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2163 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2164 		dfailprintk(ioc,
2165 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2166 				     __FILE__, __LINE__, __func__));
2167 		kfree(vol_pg0);
2168 		return 1;
2169 	}
2170 
2171 	raid_device->volume_type = vol_pg0->VolumeType;
2172 
2173 	/* figure out what the underlying devices are by
2174 	 * obtaining the device_info bits for the 1st device
2175 	 */
2176 	if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2177 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2178 	    vol_pg0->PhysDisk[0].PhysDiskNum))) {
2179 		if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2180 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2181 		    le16_to_cpu(pd_pg0.DevHandle)))) {
2182 			raid_device->device_info =
2183 			    le32_to_cpu(sas_device_pg0.DeviceInfo);
2184 		}
2185 	}
2186 
2187 	kfree(vol_pg0);
2188 	return 0;
2189 }
2190 
2191 /**
2192  * _scsih_enable_tlr - setting TLR flags
2193  * @ioc: per adapter object
2194  * @sdev: scsi device struct
2195  *
2196  * Enabling Transaction Layer Retries for tape devices when
2197  * vpd page 0x90 is present
2198  *
2199  */
2200 static void
2201 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2202 {
2203 
2204 	/* only for TAPE */
2205 	if (sdev->type != TYPE_TAPE)
2206 		return;
2207 
2208 	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2209 		return;
2210 
2211 	sas_enable_tlr(sdev);
2212 	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2213 	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2214 	return;
2215 
2216 }
2217 
2218 /**
2219  * scsih_slave_configure - device configure routine.
2220  * @sdev: scsi device struct
2221  *
2222  * Return: 0 if ok. Any other return is assumed to be an error and
2223  * the device is ignored.
2224  */
2225 static int
2226 scsih_slave_configure(struct scsi_device *sdev)
2227 {
2228 	struct Scsi_Host *shost = sdev->host;
2229 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2230 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2231 	struct MPT3SAS_TARGET *sas_target_priv_data;
2232 	struct _sas_device *sas_device;
2233 	struct _pcie_device *pcie_device;
2234 	struct _raid_device *raid_device;
2235 	unsigned long flags;
2236 	int qdepth;
2237 	u8 ssp_target = 0;
2238 	char *ds = "";
2239 	char *r_level = "";
2240 	u16 handle, volume_handle = 0;
2241 	u64 volume_wwid = 0;
2242 
2243 	qdepth = 1;
2244 	sas_device_priv_data = sdev->hostdata;
2245 	sas_device_priv_data->configured_lun = 1;
2246 	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2247 	sas_target_priv_data = sas_device_priv_data->sas_target;
2248 	handle = sas_target_priv_data->handle;
2249 
2250 	/* raid volume handling */
2251 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2252 
2253 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2254 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2255 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2256 		if (!raid_device) {
2257 			dfailprintk(ioc,
2258 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2259 					     __FILE__, __LINE__, __func__));
2260 			return 1;
2261 		}
2262 
2263 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2264 			dfailprintk(ioc,
2265 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2266 					     __FILE__, __LINE__, __func__));
2267 			return 1;
2268 		}
2269 
2270 		/*
2271 		 * WARPDRIVE: Initialize the required data for Direct IO
2272 		 */
2273 		mpt3sas_init_warpdrive_properties(ioc, raid_device);
2274 
2275 		/* RAID Queue Depth Support
2276 		 * IS volume = underlying qdepth of drive type, either
2277 		 *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2278 		 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2279 		 */
2280 		if (raid_device->device_info &
2281 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2282 			qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2283 			ds = "SSP";
2284 		} else {
2285 			qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2286 			if (raid_device->device_info &
2287 			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2288 				ds = "SATA";
2289 			else
2290 				ds = "STP";
2291 		}
2292 
2293 		switch (raid_device->volume_type) {
2294 		case MPI2_RAID_VOL_TYPE_RAID0:
2295 			r_level = "RAID0";
2296 			break;
2297 		case MPI2_RAID_VOL_TYPE_RAID1E:
2298 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2299 			if (ioc->manu_pg10.OEMIdentifier &&
2300 			    (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2301 			    MFG10_GF0_R10_DISPLAY) &&
2302 			    !(raid_device->num_pds % 2))
2303 				r_level = "RAID10";
2304 			else
2305 				r_level = "RAID1E";
2306 			break;
2307 		case MPI2_RAID_VOL_TYPE_RAID1:
2308 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2309 			r_level = "RAID1";
2310 			break;
2311 		case MPI2_RAID_VOL_TYPE_RAID10:
2312 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2313 			r_level = "RAID10";
2314 			break;
2315 		case MPI2_RAID_VOL_TYPE_UNKNOWN:
2316 		default:
2317 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2318 			r_level = "RAIDX";
2319 			break;
2320 		}
2321 
2322 		if (!ioc->hide_ir_msg)
2323 			sdev_printk(KERN_INFO, sdev,
2324 			   "%s: handle(0x%04x), wwid(0x%016llx),"
2325 			    " pd_count(%d), type(%s)\n",
2326 			    r_level, raid_device->handle,
2327 			    (unsigned long long)raid_device->wwid,
2328 			    raid_device->num_pds, ds);
2329 
2330 		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2331 			blk_queue_max_hw_sectors(sdev->request_queue,
2332 						MPT3SAS_RAID_MAX_SECTORS);
2333 			sdev_printk(KERN_INFO, sdev,
2334 					"Set queue's max_sector to: %u\n",
2335 						MPT3SAS_RAID_MAX_SECTORS);
2336 		}
2337 
2338 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2339 
2340 		/* raid transport support */
2341 		if (!ioc->is_warpdrive)
2342 			_scsih_set_level(ioc, sdev, raid_device->volume_type);
2343 		return 0;
2344 	}
2345 
2346 	/* non-raid handling */
2347 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2348 		if (mpt3sas_config_get_volume_handle(ioc, handle,
2349 		    &volume_handle)) {
2350 			dfailprintk(ioc,
2351 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2352 					     __FILE__, __LINE__, __func__));
2353 			return 1;
2354 		}
2355 		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2356 		    volume_handle, &volume_wwid)) {
2357 			dfailprintk(ioc,
2358 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2359 					     __FILE__, __LINE__, __func__));
2360 			return 1;
2361 		}
2362 	}
2363 
2364 	/* PCIe handling */
2365 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2366 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2367 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2368 				sas_device_priv_data->sas_target->sas_address);
2369 		if (!pcie_device) {
2370 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2371 			dfailprintk(ioc,
2372 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2373 					     __FILE__, __LINE__, __func__));
2374 			return 1;
2375 		}
2376 
2377 		qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2378 		ds = "NVMe";
2379 		sdev_printk(KERN_INFO, sdev,
2380 			"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2381 			ds, handle, (unsigned long long)pcie_device->wwid,
2382 			pcie_device->port_num);
2383 		if (pcie_device->enclosure_handle != 0)
2384 			sdev_printk(KERN_INFO, sdev,
2385 			"%s: enclosure logical id(0x%016llx), slot(%d)\n",
2386 			ds,
2387 			(unsigned long long)pcie_device->enclosure_logical_id,
2388 			pcie_device->slot);
2389 		if (pcie_device->connector_name[0] != '\0')
2390 			sdev_printk(KERN_INFO, sdev,
2391 				"%s: enclosure level(0x%04x),"
2392 				"connector name( %s)\n", ds,
2393 				pcie_device->enclosure_level,
2394 				pcie_device->connector_name);
2395 
2396 		if (pcie_device->nvme_mdts)
2397 			blk_queue_max_hw_sectors(sdev->request_queue,
2398 					pcie_device->nvme_mdts/512);
2399 
2400 		pcie_device_put(pcie_device);
2401 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2402 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2403 		/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2404 		 ** merged and can eliminate holes created during merging
2405 		 ** operation.
2406 		 **/
2407 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2408 				sdev->request_queue);
2409 		blk_queue_virt_boundary(sdev->request_queue,
2410 				ioc->page_size - 1);
2411 		return 0;
2412 	}
2413 
2414 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2415 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2416 	   sas_device_priv_data->sas_target->sas_address);
2417 	if (!sas_device) {
2418 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2419 		dfailprintk(ioc,
2420 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2421 				     __FILE__, __LINE__, __func__));
2422 		return 1;
2423 	}
2424 
2425 	sas_device->volume_handle = volume_handle;
2426 	sas_device->volume_wwid = volume_wwid;
2427 	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2428 		qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2429 		ssp_target = 1;
2430 		if (sas_device->device_info &
2431 				MPI2_SAS_DEVICE_INFO_SEP) {
2432 			sdev_printk(KERN_WARNING, sdev,
2433 			"set ignore_delay_remove for handle(0x%04x)\n",
2434 			sas_device_priv_data->sas_target->handle);
2435 			sas_device_priv_data->ignore_delay_remove = 1;
2436 			ds = "SES";
2437 		} else
2438 			ds = "SSP";
2439 	} else {
2440 		qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2441 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2442 			ds = "STP";
2443 		else if (sas_device->device_info &
2444 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2445 			ds = "SATA";
2446 	}
2447 
2448 	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2449 	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2450 	    ds, handle, (unsigned long long)sas_device->sas_address,
2451 	    sas_device->phy, (unsigned long long)sas_device->device_name);
2452 
2453 	_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2454 
2455 	sas_device_put(sas_device);
2456 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2457 
2458 	if (!ssp_target)
2459 		_scsih_display_sata_capabilities(ioc, handle, sdev);
2460 
2461 
2462 	mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2463 
2464 	if (ssp_target) {
2465 		sas_read_port_mode_page(sdev);
2466 		_scsih_enable_tlr(ioc, sdev);
2467 	}
2468 
2469 	return 0;
2470 }
2471 
2472 /**
2473  * scsih_bios_param - fetch head, sector, cylinder info for a disk
2474  * @sdev: scsi device struct
2475  * @bdev: pointer to block device context
2476  * @capacity: device size (in 512 byte sectors)
2477  * @params: three element array to place output:
2478  *              params[0] number of heads (max 255)
2479  *              params[1] number of sectors (max 63)
2480  *              params[2] number of cylinders
2481  */
2482 static int
2483 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2484 	sector_t capacity, int params[])
2485 {
2486 	int		heads;
2487 	int		sectors;
2488 	sector_t	cylinders;
2489 	ulong		dummy;
2490 
2491 	heads = 64;
2492 	sectors = 32;
2493 
2494 	dummy = heads * sectors;
2495 	cylinders = capacity;
2496 	sector_div(cylinders, dummy);
2497 
2498 	/*
2499 	 * Handle extended translation size for logical drives
2500 	 * > 1Gb
2501 	 */
2502 	if ((ulong)capacity >= 0x200000) {
2503 		heads = 255;
2504 		sectors = 63;
2505 		dummy = heads * sectors;
2506 		cylinders = capacity;
2507 		sector_div(cylinders, dummy);
2508 	}
2509 
2510 	/* return result */
2511 	params[0] = heads;
2512 	params[1] = sectors;
2513 	params[2] = cylinders;
2514 
2515 	return 0;
2516 }
2517 
2518 /**
2519  * _scsih_response_code - translation of device response code
2520  * @ioc: per adapter object
2521  * @response_code: response code returned by the device
2522  */
2523 static void
2524 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2525 {
2526 	char *desc;
2527 
2528 	switch (response_code) {
2529 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2530 		desc = "task management request completed";
2531 		break;
2532 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2533 		desc = "invalid frame";
2534 		break;
2535 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2536 		desc = "task management request not supported";
2537 		break;
2538 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2539 		desc = "task management request failed";
2540 		break;
2541 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2542 		desc = "task management request succeeded";
2543 		break;
2544 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2545 		desc = "invalid lun";
2546 		break;
2547 	case 0xA:
2548 		desc = "overlapped tag attempted";
2549 		break;
2550 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2551 		desc = "task queued, however not sent to target";
2552 		break;
2553 	default:
2554 		desc = "unknown";
2555 		break;
2556 	}
2557 	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2558 }
2559 
2560 /**
2561  * _scsih_tm_done - tm completion routine
2562  * @ioc: per adapter object
2563  * @smid: system request message index
2564  * @msix_index: MSIX table index supplied by the OS
2565  * @reply: reply message frame(lower 32bit addr)
2566  * Context: none.
2567  *
2568  * The callback handler when using scsih_issue_tm.
2569  *
2570  * Return: 1 meaning mf should be freed from _base_interrupt
2571  *         0 means the mf is freed from this function.
2572  */
2573 static u8
2574 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2575 {
2576 	MPI2DefaultReply_t *mpi_reply;
2577 
2578 	if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2579 		return 1;
2580 	if (ioc->tm_cmds.smid != smid)
2581 		return 1;
2582 	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2583 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2584 	if (mpi_reply) {
2585 		memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2586 		ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2587 	}
2588 	ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2589 	complete(&ioc->tm_cmds.done);
2590 	return 1;
2591 }
2592 
2593 /**
2594  * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2595  * @ioc: per adapter object
2596  * @handle: device handle
2597  *
2598  * During taskmangement request, we need to freeze the device queue.
2599  */
2600 void
2601 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2602 {
2603 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2604 	struct scsi_device *sdev;
2605 	u8 skip = 0;
2606 
2607 	shost_for_each_device(sdev, ioc->shost) {
2608 		if (skip)
2609 			continue;
2610 		sas_device_priv_data = sdev->hostdata;
2611 		if (!sas_device_priv_data)
2612 			continue;
2613 		if (sas_device_priv_data->sas_target->handle == handle) {
2614 			sas_device_priv_data->sas_target->tm_busy = 1;
2615 			skip = 1;
2616 			ioc->ignore_loginfos = 1;
2617 		}
2618 	}
2619 }
2620 
2621 /**
2622  * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2623  * @ioc: per adapter object
2624  * @handle: device handle
2625  *
2626  * During taskmangement request, we need to freeze the device queue.
2627  */
2628 void
2629 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2630 {
2631 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2632 	struct scsi_device *sdev;
2633 	u8 skip = 0;
2634 
2635 	shost_for_each_device(sdev, ioc->shost) {
2636 		if (skip)
2637 			continue;
2638 		sas_device_priv_data = sdev->hostdata;
2639 		if (!sas_device_priv_data)
2640 			continue;
2641 		if (sas_device_priv_data->sas_target->handle == handle) {
2642 			sas_device_priv_data->sas_target->tm_busy = 0;
2643 			skip = 1;
2644 			ioc->ignore_loginfos = 0;
2645 		}
2646 	}
2647 }
2648 
2649 /**
2650  * mpt3sas_scsih_issue_tm - main routine for sending tm requests
2651  * @ioc: per adapter struct
2652  * @handle: device handle
2653  * @lun: lun number
2654  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2655  * @smid_task: smid assigned to the task
2656  * @msix_task: MSIX table index supplied by the OS
2657  * @timeout: timeout in seconds
2658  * @tr_method: Target Reset Method
2659  * Context: user
2660  *
2661  * A generic API for sending task management requests to firmware.
2662  *
2663  * The callback index is set inside `ioc->tm_cb_idx`.
2664  * The caller is responsible to check for outstanding commands.
2665  *
2666  * Return: SUCCESS or FAILED.
2667  */
2668 int
2669 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
2670 	u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
2671 {
2672 	Mpi2SCSITaskManagementRequest_t *mpi_request;
2673 	Mpi2SCSITaskManagementReply_t *mpi_reply;
2674 	u16 smid = 0;
2675 	u32 ioc_state;
2676 	int rc;
2677 
2678 	lockdep_assert_held(&ioc->tm_cmds.mutex);
2679 
2680 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2681 		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
2682 		return FAILED;
2683 	}
2684 
2685 	if (ioc->shost_recovery || ioc->remove_host ||
2686 	    ioc->pci_error_recovery) {
2687 		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
2688 		return FAILED;
2689 	}
2690 
2691 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2692 	if (ioc_state & MPI2_DOORBELL_USED) {
2693 		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
2694 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2695 		return (!rc) ? SUCCESS : FAILED;
2696 	}
2697 
2698 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2699 		mpt3sas_base_fault_info(ioc, ioc_state &
2700 		    MPI2_DOORBELL_DATA_MASK);
2701 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2702 		return (!rc) ? SUCCESS : FAILED;
2703 	}
2704 
2705 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2706 	if (!smid) {
2707 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2708 		return FAILED;
2709 	}
2710 
2711 	dtmprintk(ioc,
2712 		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2713 			   handle, type, smid_task, timeout, tr_method));
2714 	ioc->tm_cmds.status = MPT3_CMD_PENDING;
2715 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2716 	ioc->tm_cmds.smid = smid;
2717 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2718 	memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
2719 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2720 	mpi_request->DevHandle = cpu_to_le16(handle);
2721 	mpi_request->TaskType = type;
2722 	mpi_request->MsgFlags = tr_method;
2723 	mpi_request->TaskMID = cpu_to_le16(smid_task);
2724 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2725 	mpt3sas_scsih_set_tm_flag(ioc, handle);
2726 	init_completion(&ioc->tm_cmds.done);
2727 	ioc->put_smid_hi_priority(ioc, smid, msix_task);
2728 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2729 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2730 		if (mpt3sas_base_check_cmd_timeout(ioc,
2731 			ioc->tm_cmds.status, mpi_request,
2732 			sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
2733 			rc = mpt3sas_base_hard_reset_handler(ioc,
2734 					FORCE_BIG_HAMMER);
2735 			rc = (!rc) ? SUCCESS : FAILED;
2736 			goto out;
2737 		}
2738 	}
2739 
2740 	/* sync IRQs in case those were busy during flush. */
2741 	mpt3sas_base_sync_reply_irqs(ioc);
2742 
2743 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
2744 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
2745 		mpi_reply = ioc->tm_cmds.reply;
2746 		dtmprintk(ioc,
2747 			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
2748 				   le16_to_cpu(mpi_reply->IOCStatus),
2749 				   le32_to_cpu(mpi_reply->IOCLogInfo),
2750 				   le32_to_cpu(mpi_reply->TerminationCount)));
2751 		if (ioc->logging_level & MPT_DEBUG_TM) {
2752 			_scsih_response_code(ioc, mpi_reply->ResponseCode);
2753 			if (mpi_reply->IOCStatus)
2754 				_debug_dump_mf(mpi_request,
2755 				    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2756 		}
2757 	}
2758 	rc = SUCCESS;
2759 
2760 out:
2761 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
2762 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
2763 	return rc;
2764 }
2765 
2766 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2767 		u64 lun, u8 type, u16 smid_task, u16 msix_task,
2768 		u8 timeout, u8 tr_method)
2769 {
2770 	int ret;
2771 
2772 	mutex_lock(&ioc->tm_cmds.mutex);
2773 	ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
2774 			msix_task, timeout, tr_method);
2775 	mutex_unlock(&ioc->tm_cmds.mutex);
2776 
2777 	return ret;
2778 }
2779 
2780 /**
2781  * _scsih_tm_display_info - displays info about the device
2782  * @ioc: per adapter struct
2783  * @scmd: pointer to scsi command object
2784  *
2785  * Called by task management callback handlers.
2786  */
2787 static void
2788 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2789 {
2790 	struct scsi_target *starget = scmd->device->sdev_target;
2791 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
2792 	struct _sas_device *sas_device = NULL;
2793 	struct _pcie_device *pcie_device = NULL;
2794 	unsigned long flags;
2795 	char *device_str = NULL;
2796 
2797 	if (!priv_target)
2798 		return;
2799 	if (ioc->hide_ir_msg)
2800 		device_str = "WarpDrive";
2801 	else
2802 		device_str = "volume";
2803 
2804 	scsi_print_command(scmd);
2805 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2806 		starget_printk(KERN_INFO, starget,
2807 			"%s handle(0x%04x), %s wwid(0x%016llx)\n",
2808 			device_str, priv_target->handle,
2809 		    device_str, (unsigned long long)priv_target->sas_address);
2810 
2811 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2812 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2813 		pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
2814 		if (pcie_device) {
2815 			starget_printk(KERN_INFO, starget,
2816 				"handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2817 				pcie_device->handle,
2818 				(unsigned long long)pcie_device->wwid,
2819 				pcie_device->port_num);
2820 			if (pcie_device->enclosure_handle != 0)
2821 				starget_printk(KERN_INFO, starget,
2822 					"enclosure logical id(0x%016llx), slot(%d)\n",
2823 					(unsigned long long)
2824 					pcie_device->enclosure_logical_id,
2825 					pcie_device->slot);
2826 			if (pcie_device->connector_name[0] != '\0')
2827 				starget_printk(KERN_INFO, starget,
2828 					"enclosure level(0x%04x), connector name( %s)\n",
2829 					pcie_device->enclosure_level,
2830 					pcie_device->connector_name);
2831 			pcie_device_put(pcie_device);
2832 		}
2833 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2834 
2835 	} else {
2836 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2837 		sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
2838 		if (sas_device) {
2839 			if (priv_target->flags &
2840 			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
2841 				starget_printk(KERN_INFO, starget,
2842 				    "volume handle(0x%04x), "
2843 				    "volume wwid(0x%016llx)\n",
2844 				    sas_device->volume_handle,
2845 				   (unsigned long long)sas_device->volume_wwid);
2846 			}
2847 			starget_printk(KERN_INFO, starget,
2848 			    "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
2849 			    sas_device->handle,
2850 			    (unsigned long long)sas_device->sas_address,
2851 			    sas_device->phy);
2852 
2853 			_scsih_display_enclosure_chassis_info(NULL, sas_device,
2854 			    NULL, starget);
2855 
2856 			sas_device_put(sas_device);
2857 		}
2858 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2859 	}
2860 }
2861 
2862 /**
2863  * scsih_abort - eh threads main abort routine
2864  * @scmd: pointer to scsi command object
2865  *
2866  * Return: SUCCESS if command aborted else FAILED
2867  */
2868 static int
2869 scsih_abort(struct scsi_cmnd *scmd)
2870 {
2871 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2872 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2873 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2874 	u16 handle;
2875 	int r;
2876 
2877 	u8 timeout = 30;
2878 	struct _pcie_device *pcie_device = NULL;
2879 	sdev_printk(KERN_INFO, scmd->device,
2880 		"attempting task abort! scmd(%p)\n", scmd);
2881 	_scsih_tm_display_info(ioc, scmd);
2882 
2883 	sas_device_priv_data = scmd->device->hostdata;
2884 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2885 	    ioc->remove_host) {
2886 		sdev_printk(KERN_INFO, scmd->device,
2887 			"device been deleted! scmd(%p)\n", scmd);
2888 		scmd->result = DID_NO_CONNECT << 16;
2889 		scmd->scsi_done(scmd);
2890 		r = SUCCESS;
2891 		goto out;
2892 	}
2893 
2894 	/* check for completed command */
2895 	if (st == NULL || st->cb_idx == 0xFF) {
2896 		scmd->result = DID_RESET << 16;
2897 		r = SUCCESS;
2898 		goto out;
2899 	}
2900 
2901 	/* for hidden raid components and volumes this is not supported */
2902 	if (sas_device_priv_data->sas_target->flags &
2903 	    MPT_TARGET_FLAGS_RAID_COMPONENT ||
2904 	    sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2905 		scmd->result = DID_RESET << 16;
2906 		r = FAILED;
2907 		goto out;
2908 	}
2909 
2910 	mpt3sas_halt_firmware(ioc);
2911 
2912 	handle = sas_device_priv_data->sas_target->handle;
2913 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2914 	if (pcie_device && (!ioc->tm_custom_handling) &&
2915 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
2916 		timeout = ioc->nvme_abort_timeout;
2917 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2918 		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
2919 		st->smid, st->msix_io, timeout, 0);
2920 	/* Command must be cleared after abort */
2921 	if (r == SUCCESS && st->cb_idx != 0xFF)
2922 		r = FAILED;
2923  out:
2924 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
2925 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2926 	if (pcie_device)
2927 		pcie_device_put(pcie_device);
2928 	return r;
2929 }
2930 
2931 /**
2932  * scsih_dev_reset - eh threads main device reset routine
2933  * @scmd: pointer to scsi command object
2934  *
2935  * Return: SUCCESS if command aborted else FAILED
2936  */
2937 static int
2938 scsih_dev_reset(struct scsi_cmnd *scmd)
2939 {
2940 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2941 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2942 	struct _sas_device *sas_device = NULL;
2943 	struct _pcie_device *pcie_device = NULL;
2944 	u16	handle;
2945 	u8	tr_method = 0;
2946 	u8	tr_timeout = 30;
2947 	int r;
2948 
2949 	struct scsi_target *starget = scmd->device->sdev_target;
2950 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
2951 
2952 	sdev_printk(KERN_INFO, scmd->device,
2953 		"attempting device reset! scmd(%p)\n", scmd);
2954 	_scsih_tm_display_info(ioc, scmd);
2955 
2956 	sas_device_priv_data = scmd->device->hostdata;
2957 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2958 	    ioc->remove_host) {
2959 		sdev_printk(KERN_INFO, scmd->device,
2960 			"device been deleted! scmd(%p)\n", scmd);
2961 		scmd->result = DID_NO_CONNECT << 16;
2962 		scmd->scsi_done(scmd);
2963 		r = SUCCESS;
2964 		goto out;
2965 	}
2966 
2967 	/* for hidden raid components obtain the volume_handle */
2968 	handle = 0;
2969 	if (sas_device_priv_data->sas_target->flags &
2970 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
2971 		sas_device = mpt3sas_get_sdev_from_target(ioc,
2972 				target_priv_data);
2973 		if (sas_device)
2974 			handle = sas_device->volume_handle;
2975 	} else
2976 		handle = sas_device_priv_data->sas_target->handle;
2977 
2978 	if (!handle) {
2979 		scmd->result = DID_RESET << 16;
2980 		r = FAILED;
2981 		goto out;
2982 	}
2983 
2984 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2985 
2986 	if (pcie_device && (!ioc->tm_custom_handling) &&
2987 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
2988 		tr_timeout = pcie_device->reset_timeout;
2989 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
2990 	} else
2991 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2992 
2993 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2994 		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
2995 		tr_timeout, tr_method);
2996 	/* Check for busy commands after reset */
2997 	if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
2998 		r = FAILED;
2999  out:
3000 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
3001 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3002 
3003 	if (sas_device)
3004 		sas_device_put(sas_device);
3005 	if (pcie_device)
3006 		pcie_device_put(pcie_device);
3007 
3008 	return r;
3009 }
3010 
3011 /**
3012  * scsih_target_reset - eh threads main target reset routine
3013  * @scmd: pointer to scsi command object
3014  *
3015  * Return: SUCCESS if command aborted else FAILED
3016  */
3017 static int
3018 scsih_target_reset(struct scsi_cmnd *scmd)
3019 {
3020 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3021 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3022 	struct _sas_device *sas_device = NULL;
3023 	struct _pcie_device *pcie_device = NULL;
3024 	u16	handle;
3025 	u8	tr_method = 0;
3026 	u8	tr_timeout = 30;
3027 	int r;
3028 	struct scsi_target *starget = scmd->device->sdev_target;
3029 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3030 
3031 	starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
3032 		scmd);
3033 	_scsih_tm_display_info(ioc, scmd);
3034 
3035 	sas_device_priv_data = scmd->device->hostdata;
3036 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3037 	    ioc->remove_host) {
3038 		starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
3039 			scmd);
3040 		scmd->result = DID_NO_CONNECT << 16;
3041 		scmd->scsi_done(scmd);
3042 		r = SUCCESS;
3043 		goto out;
3044 	}
3045 
3046 	/* for hidden raid components obtain the volume_handle */
3047 	handle = 0;
3048 	if (sas_device_priv_data->sas_target->flags &
3049 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3050 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3051 				target_priv_data);
3052 		if (sas_device)
3053 			handle = sas_device->volume_handle;
3054 	} else
3055 		handle = sas_device_priv_data->sas_target->handle;
3056 
3057 	if (!handle) {
3058 		scmd->result = DID_RESET << 16;
3059 		r = FAILED;
3060 		goto out;
3061 	}
3062 
3063 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3064 
3065 	if (pcie_device && (!ioc->tm_custom_handling) &&
3066 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3067 		tr_timeout = pcie_device->reset_timeout;
3068 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3069 	} else
3070 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3071 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
3072 		MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3073 	    tr_timeout, tr_method);
3074 	/* Check for busy commands after reset */
3075 	if (r == SUCCESS && atomic_read(&starget->target_busy))
3076 		r = FAILED;
3077  out:
3078 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
3079 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3080 
3081 	if (sas_device)
3082 		sas_device_put(sas_device);
3083 	if (pcie_device)
3084 		pcie_device_put(pcie_device);
3085 	return r;
3086 }
3087 
3088 
3089 /**
3090  * scsih_host_reset - eh threads main host reset routine
3091  * @scmd: pointer to scsi command object
3092  *
3093  * Return: SUCCESS if command aborted else FAILED
3094  */
3095 static int
3096 scsih_host_reset(struct scsi_cmnd *scmd)
3097 {
3098 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3099 	int r, retval;
3100 
3101 	ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
3102 	scsi_print_command(scmd);
3103 
3104 	if (ioc->is_driver_loading || ioc->remove_host) {
3105 		ioc_info(ioc, "Blocking the host reset\n");
3106 		r = FAILED;
3107 		goto out;
3108 	}
3109 
3110 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3111 	r = (retval < 0) ? FAILED : SUCCESS;
3112 out:
3113 	ioc_info(ioc, "host reset: %s scmd(%p)\n",
3114 		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3115 
3116 	return r;
3117 }
3118 
3119 /**
3120  * _scsih_fw_event_add - insert and queue up fw_event
3121  * @ioc: per adapter object
3122  * @fw_event: object describing the event
3123  * Context: This function will acquire ioc->fw_event_lock.
3124  *
3125  * This adds the firmware event object into link list, then queues it up to
3126  * be processed from user context.
3127  */
3128 static void
3129 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3130 {
3131 	unsigned long flags;
3132 
3133 	if (ioc->firmware_event_thread == NULL)
3134 		return;
3135 
3136 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3137 	fw_event_work_get(fw_event);
3138 	INIT_LIST_HEAD(&fw_event->list);
3139 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3140 	INIT_WORK(&fw_event->work, _firmware_event_work);
3141 	fw_event_work_get(fw_event);
3142 	queue_work(ioc->firmware_event_thread, &fw_event->work);
3143 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3144 }
3145 
3146 /**
3147  * _scsih_fw_event_del_from_list - delete fw_event from the list
3148  * @ioc: per adapter object
3149  * @fw_event: object describing the event
3150  * Context: This function will acquire ioc->fw_event_lock.
3151  *
3152  * If the fw_event is on the fw_event_list, remove it and do a put.
3153  */
3154 static void
3155 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3156 	*fw_event)
3157 {
3158 	unsigned long flags;
3159 
3160 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3161 	if (!list_empty(&fw_event->list)) {
3162 		list_del_init(&fw_event->list);
3163 		fw_event_work_put(fw_event);
3164 	}
3165 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3166 }
3167 
3168 
3169  /**
3170  * mpt3sas_send_trigger_data_event - send event for processing trigger data
3171  * @ioc: per adapter object
3172  * @event_data: trigger event data
3173  */
3174 void
3175 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3176 	struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3177 {
3178 	struct fw_event_work *fw_event;
3179 	u16 sz;
3180 
3181 	if (ioc->is_driver_loading)
3182 		return;
3183 	sz = sizeof(*event_data);
3184 	fw_event = alloc_fw_event_work(sz);
3185 	if (!fw_event)
3186 		return;
3187 	fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3188 	fw_event->ioc = ioc;
3189 	memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3190 	_scsih_fw_event_add(ioc, fw_event);
3191 	fw_event_work_put(fw_event);
3192 }
3193 
3194 /**
3195  * _scsih_error_recovery_delete_devices - remove devices not responding
3196  * @ioc: per adapter object
3197  */
3198 static void
3199 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3200 {
3201 	struct fw_event_work *fw_event;
3202 
3203 	if (ioc->is_driver_loading)
3204 		return;
3205 	fw_event = alloc_fw_event_work(0);
3206 	if (!fw_event)
3207 		return;
3208 	fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3209 	fw_event->ioc = ioc;
3210 	_scsih_fw_event_add(ioc, fw_event);
3211 	fw_event_work_put(fw_event);
3212 }
3213 
3214 /**
3215  * mpt3sas_port_enable_complete - port enable completed (fake event)
3216  * @ioc: per adapter object
3217  */
3218 void
3219 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3220 {
3221 	struct fw_event_work *fw_event;
3222 
3223 	fw_event = alloc_fw_event_work(0);
3224 	if (!fw_event)
3225 		return;
3226 	fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3227 	fw_event->ioc = ioc;
3228 	_scsih_fw_event_add(ioc, fw_event);
3229 	fw_event_work_put(fw_event);
3230 }
3231 
3232 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3233 {
3234 	unsigned long flags;
3235 	struct fw_event_work *fw_event = NULL;
3236 
3237 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3238 	if (!list_empty(&ioc->fw_event_list)) {
3239 		fw_event = list_first_entry(&ioc->fw_event_list,
3240 				struct fw_event_work, list);
3241 		list_del_init(&fw_event->list);
3242 	}
3243 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3244 
3245 	return fw_event;
3246 }
3247 
3248 /**
3249  * _scsih_fw_event_cleanup_queue - cleanup event queue
3250  * @ioc: per adapter object
3251  *
3252  * Walk the firmware event queue, either killing timers, or waiting
3253  * for outstanding events to complete
3254  */
3255 static void
3256 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3257 {
3258 	struct fw_event_work *fw_event;
3259 
3260 	if (list_empty(&ioc->fw_event_list) ||
3261 	     !ioc->firmware_event_thread || in_interrupt())
3262 		return;
3263 
3264 	while ((fw_event = dequeue_next_fw_event(ioc))) {
3265 		/*
3266 		 * Wait on the fw_event to complete. If this returns 1, then
3267 		 * the event was never executed, and we need a put for the
3268 		 * reference the work had on the fw_event.
3269 		 *
3270 		 * If it did execute, we wait for it to finish, and the put will
3271 		 * happen from _firmware_event_work()
3272 		 */
3273 		if (cancel_work_sync(&fw_event->work))
3274 			fw_event_work_put(fw_event);
3275 
3276 		fw_event_work_put(fw_event);
3277 	}
3278 }
3279 
3280 /**
3281  * _scsih_internal_device_block - block the sdev device
3282  * @sdev: per device object
3283  * @sas_device_priv_data : per device driver private data
3284  *
3285  * make sure device is blocked without error, if not
3286  * print an error
3287  */
3288 static void
3289 _scsih_internal_device_block(struct scsi_device *sdev,
3290 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3291 {
3292 	int r = 0;
3293 
3294 	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3295 	    sas_device_priv_data->sas_target->handle);
3296 	sas_device_priv_data->block = 1;
3297 
3298 	r = scsi_internal_device_block_nowait(sdev);
3299 	if (r == -EINVAL)
3300 		sdev_printk(KERN_WARNING, sdev,
3301 		    "device_block failed with return(%d) for handle(0x%04x)\n",
3302 		    r, sas_device_priv_data->sas_target->handle);
3303 }
3304 
3305 /**
3306  * _scsih_internal_device_unblock - unblock the sdev device
3307  * @sdev: per device object
3308  * @sas_device_priv_data : per device driver private data
3309  * make sure device is unblocked without error, if not retry
3310  * by blocking and then unblocking
3311  */
3312 
3313 static void
3314 _scsih_internal_device_unblock(struct scsi_device *sdev,
3315 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3316 {
3317 	int r = 0;
3318 
3319 	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3320 	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3321 	sas_device_priv_data->block = 0;
3322 	r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3323 	if (r == -EINVAL) {
3324 		/* The device has been set to SDEV_RUNNING by SD layer during
3325 		 * device addition but the request queue is still stopped by
3326 		 * our earlier block call. We need to perform a block again
3327 		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3328 
3329 		sdev_printk(KERN_WARNING, sdev,
3330 		    "device_unblock failed with return(%d) for handle(0x%04x) "
3331 		    "performing a block followed by an unblock\n",
3332 		    r, sas_device_priv_data->sas_target->handle);
3333 		sas_device_priv_data->block = 1;
3334 		r = scsi_internal_device_block_nowait(sdev);
3335 		if (r)
3336 			sdev_printk(KERN_WARNING, sdev, "retried device_block "
3337 			    "failed with return(%d) for handle(0x%04x)\n",
3338 			    r, sas_device_priv_data->sas_target->handle);
3339 
3340 		sas_device_priv_data->block = 0;
3341 		r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3342 		if (r)
3343 			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3344 			    " failed with return(%d) for handle(0x%04x)\n",
3345 			    r, sas_device_priv_data->sas_target->handle);
3346 	}
3347 }
3348 
3349 /**
3350  * _scsih_ublock_io_all_device - unblock every device
3351  * @ioc: per adapter object
3352  *
3353  * change the device state from block to running
3354  */
3355 static void
3356 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3357 {
3358 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3359 	struct scsi_device *sdev;
3360 
3361 	shost_for_each_device(sdev, ioc->shost) {
3362 		sas_device_priv_data = sdev->hostdata;
3363 		if (!sas_device_priv_data)
3364 			continue;
3365 		if (!sas_device_priv_data->block)
3366 			continue;
3367 
3368 		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3369 			"device_running, handle(0x%04x)\n",
3370 		    sas_device_priv_data->sas_target->handle));
3371 		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3372 	}
3373 }
3374 
3375 
3376 /**
3377  * _scsih_ublock_io_device - prepare device to be deleted
3378  * @ioc: per adapter object
3379  * @sas_address: sas address
3380  *
3381  * unblock then put device in offline state
3382  */
3383 static void
3384 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
3385 {
3386 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3387 	struct scsi_device *sdev;
3388 
3389 	shost_for_each_device(sdev, ioc->shost) {
3390 		sas_device_priv_data = sdev->hostdata;
3391 		if (!sas_device_priv_data)
3392 			continue;
3393 		if (sas_device_priv_data->sas_target->sas_address
3394 		    != sas_address)
3395 			continue;
3396 		if (sas_device_priv_data->block)
3397 			_scsih_internal_device_unblock(sdev,
3398 				sas_device_priv_data);
3399 	}
3400 }
3401 
3402 /**
3403  * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3404  * @ioc: per adapter object
3405  *
3406  * During device pull we need to appropriately set the sdev state.
3407  */
3408 static void
3409 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3410 {
3411 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3412 	struct scsi_device *sdev;
3413 
3414 	shost_for_each_device(sdev, ioc->shost) {
3415 		sas_device_priv_data = sdev->hostdata;
3416 		if (!sas_device_priv_data)
3417 			continue;
3418 		if (sas_device_priv_data->block)
3419 			continue;
3420 		if (sas_device_priv_data->ignore_delay_remove) {
3421 			sdev_printk(KERN_INFO, sdev,
3422 			"%s skip device_block for SES handle(0x%04x)\n",
3423 			__func__, sas_device_priv_data->sas_target->handle);
3424 			continue;
3425 		}
3426 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3427 	}
3428 }
3429 
3430 /**
3431  * _scsih_block_io_device - set the device state to SDEV_BLOCK
3432  * @ioc: per adapter object
3433  * @handle: device handle
3434  *
3435  * During device pull we need to appropriately set the sdev state.
3436  */
3437 static void
3438 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3439 {
3440 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3441 	struct scsi_device *sdev;
3442 	struct _sas_device *sas_device;
3443 
3444 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3445 
3446 	shost_for_each_device(sdev, ioc->shost) {
3447 		sas_device_priv_data = sdev->hostdata;
3448 		if (!sas_device_priv_data)
3449 			continue;
3450 		if (sas_device_priv_data->sas_target->handle != handle)
3451 			continue;
3452 		if (sas_device_priv_data->block)
3453 			continue;
3454 		if (sas_device && sas_device->pend_sas_rphy_add)
3455 			continue;
3456 		if (sas_device_priv_data->ignore_delay_remove) {
3457 			sdev_printk(KERN_INFO, sdev,
3458 			"%s skip device_block for SES handle(0x%04x)\n",
3459 			__func__, sas_device_priv_data->sas_target->handle);
3460 			continue;
3461 		}
3462 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3463 	}
3464 
3465 	if (sas_device)
3466 		sas_device_put(sas_device);
3467 }
3468 
3469 /**
3470  * _scsih_block_io_to_children_attached_to_ex
3471  * @ioc: per adapter object
3472  * @sas_expander: the sas_device object
3473  *
3474  * This routine set sdev state to SDEV_BLOCK for all devices
3475  * attached to this expander. This function called when expander is
3476  * pulled.
3477  */
3478 static void
3479 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3480 	struct _sas_node *sas_expander)
3481 {
3482 	struct _sas_port *mpt3sas_port;
3483 	struct _sas_device *sas_device;
3484 	struct _sas_node *expander_sibling;
3485 	unsigned long flags;
3486 
3487 	if (!sas_expander)
3488 		return;
3489 
3490 	list_for_each_entry(mpt3sas_port,
3491 	   &sas_expander->sas_port_list, port_list) {
3492 		if (mpt3sas_port->remote_identify.device_type ==
3493 		    SAS_END_DEVICE) {
3494 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
3495 			sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3496 			    mpt3sas_port->remote_identify.sas_address);
3497 			if (sas_device) {
3498 				set_bit(sas_device->handle,
3499 						ioc->blocking_handles);
3500 				sas_device_put(sas_device);
3501 			}
3502 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3503 		}
3504 	}
3505 
3506 	list_for_each_entry(mpt3sas_port,
3507 	   &sas_expander->sas_port_list, port_list) {
3508 
3509 		if (mpt3sas_port->remote_identify.device_type ==
3510 		    SAS_EDGE_EXPANDER_DEVICE ||
3511 		    mpt3sas_port->remote_identify.device_type ==
3512 		    SAS_FANOUT_EXPANDER_DEVICE) {
3513 			expander_sibling =
3514 			    mpt3sas_scsih_expander_find_by_sas_address(
3515 			    ioc, mpt3sas_port->remote_identify.sas_address);
3516 			_scsih_block_io_to_children_attached_to_ex(ioc,
3517 			    expander_sibling);
3518 		}
3519 	}
3520 }
3521 
3522 /**
3523  * _scsih_block_io_to_children_attached_directly
3524  * @ioc: per adapter object
3525  * @event_data: topology change event data
3526  *
3527  * This routine set sdev state to SDEV_BLOCK for all devices
3528  * direct attached during device pull.
3529  */
3530 static void
3531 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3532 	Mpi2EventDataSasTopologyChangeList_t *event_data)
3533 {
3534 	int i;
3535 	u16 handle;
3536 	u16 reason_code;
3537 
3538 	for (i = 0; i < event_data->NumEntries; i++) {
3539 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3540 		if (!handle)
3541 			continue;
3542 		reason_code = event_data->PHY[i].PhyStatus &
3543 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
3544 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3545 			_scsih_block_io_device(ioc, handle);
3546 	}
3547 }
3548 
3549 /**
3550  * _scsih_block_io_to_pcie_children_attached_directly
3551  * @ioc: per adapter object
3552  * @event_data: topology change event data
3553  *
3554  * This routine set sdev state to SDEV_BLOCK for all devices
3555  * direct attached during device pull/reconnect.
3556  */
3557 static void
3558 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3559 		Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3560 {
3561 	int i;
3562 	u16 handle;
3563 	u16 reason_code;
3564 
3565 	for (i = 0; i < event_data->NumEntries; i++) {
3566 		handle =
3567 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
3568 		if (!handle)
3569 			continue;
3570 		reason_code = event_data->PortEntry[i].PortStatus;
3571 		if (reason_code ==
3572 				MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
3573 			_scsih_block_io_device(ioc, handle);
3574 	}
3575 }
3576 /**
3577  * _scsih_tm_tr_send - send task management request
3578  * @ioc: per adapter object
3579  * @handle: device handle
3580  * Context: interrupt time.
3581  *
3582  * This code is to initiate the device removal handshake protocol
3583  * with controller firmware.  This function will issue target reset
3584  * using high priority request queue.  It will send a sas iounit
3585  * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
3586  *
3587  * This is designed to send muliple task management request at the same
3588  * time to the fifo. If the fifo is full, we will append the request,
3589  * and process it in a future completion.
3590  */
3591 static void
3592 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3593 {
3594 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3595 	u16 smid;
3596 	struct _sas_device *sas_device = NULL;
3597 	struct _pcie_device *pcie_device = NULL;
3598 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
3599 	u64 sas_address = 0;
3600 	unsigned long flags;
3601 	struct _tr_list *delayed_tr;
3602 	u32 ioc_state;
3603 	u8 tr_method = 0;
3604 
3605 	if (ioc->pci_error_recovery) {
3606 		dewtprintk(ioc,
3607 			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
3608 				    __func__, handle));
3609 		return;
3610 	}
3611 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3612 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3613 		dewtprintk(ioc,
3614 			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
3615 				    __func__, handle));
3616 		return;
3617 	}
3618 
3619 	/* if PD, then return */
3620 	if (test_bit(handle, ioc->pd_handles))
3621 		return;
3622 
3623 	clear_bit(handle, ioc->pend_os_device_add);
3624 
3625 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
3626 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
3627 	if (sas_device && sas_device->starget &&
3628 	    sas_device->starget->hostdata) {
3629 		sas_target_priv_data = sas_device->starget->hostdata;
3630 		sas_target_priv_data->deleted = 1;
3631 		sas_address = sas_device->sas_address;
3632 	}
3633 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3634 	if (!sas_device) {
3635 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3636 		pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
3637 		if (pcie_device && pcie_device->starget &&
3638 			pcie_device->starget->hostdata) {
3639 			sas_target_priv_data = pcie_device->starget->hostdata;
3640 			sas_target_priv_data->deleted = 1;
3641 			sas_address = pcie_device->wwid;
3642 		}
3643 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3644 		if (pcie_device && (!ioc->tm_custom_handling) &&
3645 		    (!(mpt3sas_scsih_is_pcie_scsi_device(
3646 		    pcie_device->device_info))))
3647 			tr_method =
3648 			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3649 		else
3650 			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3651 	}
3652 	if (sas_target_priv_data) {
3653 		dewtprintk(ioc,
3654 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3655 				    handle, (u64)sas_address));
3656 		if (sas_device) {
3657 			if (sas_device->enclosure_handle != 0)
3658 				dewtprintk(ioc,
3659 					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
3660 						    (u64)sas_device->enclosure_logical_id,
3661 						    sas_device->slot));
3662 			if (sas_device->connector_name[0] != '\0')
3663 				dewtprintk(ioc,
3664 					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
3665 						    sas_device->enclosure_level,
3666 						    sas_device->connector_name));
3667 		} else if (pcie_device) {
3668 			if (pcie_device->enclosure_handle != 0)
3669 				dewtprintk(ioc,
3670 					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
3671 						    (u64)pcie_device->enclosure_logical_id,
3672 						    pcie_device->slot));
3673 			if (pcie_device->connector_name[0] != '\0')
3674 				dewtprintk(ioc,
3675 					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
3676 						    pcie_device->enclosure_level,
3677 						    pcie_device->connector_name));
3678 		}
3679 		_scsih_ublock_io_device(ioc, sas_address);
3680 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
3681 	}
3682 
3683 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
3684 	if (!smid) {
3685 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3686 		if (!delayed_tr)
3687 			goto out;
3688 		INIT_LIST_HEAD(&delayed_tr->list);
3689 		delayed_tr->handle = handle;
3690 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3691 		dewtprintk(ioc,
3692 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3693 				    handle));
3694 		goto out;
3695 	}
3696 
3697 	dewtprintk(ioc,
3698 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3699 			    handle, smid, ioc->tm_tr_cb_idx));
3700 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3701 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3702 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3703 	mpi_request->DevHandle = cpu_to_le16(handle);
3704 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3705 	mpi_request->MsgFlags = tr_method;
3706 	set_bit(handle, ioc->device_remove_in_progress);
3707 	ioc->put_smid_hi_priority(ioc, smid, 0);
3708 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
3709 
3710 out:
3711 	if (sas_device)
3712 		sas_device_put(sas_device);
3713 	if (pcie_device)
3714 		pcie_device_put(pcie_device);
3715 }
3716 
3717 /**
3718  * _scsih_tm_tr_complete -
3719  * @ioc: per adapter object
3720  * @smid: system request message index
3721  * @msix_index: MSIX table index supplied by the OS
3722  * @reply: reply message frame(lower 32bit addr)
3723  * Context: interrupt time.
3724  *
3725  * This is the target reset completion routine.
3726  * This code is part of the code to initiate the device removal
3727  * handshake protocol with controller firmware.
3728  * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
3729  *
3730  * Return: 1 meaning mf should be freed from _base_interrupt
3731  *         0 means the mf is freed from this function.
3732  */
3733 static u8
3734 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3735 	u32 reply)
3736 {
3737 	u16 handle;
3738 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3739 	Mpi2SCSITaskManagementReply_t *mpi_reply =
3740 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3741 	Mpi2SasIoUnitControlRequest_t *mpi_request;
3742 	u16 smid_sas_ctrl;
3743 	u32 ioc_state;
3744 	struct _sc_list *delayed_sc;
3745 
3746 	if (ioc->pci_error_recovery) {
3747 		dewtprintk(ioc,
3748 			   ioc_info(ioc, "%s: host in pci error recovery\n",
3749 				    __func__));
3750 		return 1;
3751 	}
3752 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3753 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3754 		dewtprintk(ioc,
3755 			   ioc_info(ioc, "%s: host is not operational\n",
3756 				    __func__));
3757 		return 1;
3758 	}
3759 	if (unlikely(!mpi_reply)) {
3760 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3761 			__FILE__, __LINE__, __func__);
3762 		return 1;
3763 	}
3764 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3765 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
3766 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3767 		dewtprintk(ioc,
3768 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3769 				   handle,
3770 				   le16_to_cpu(mpi_reply->DevHandle), smid));
3771 		return 0;
3772 	}
3773 
3774 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3775 	dewtprintk(ioc,
3776 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3777 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3778 			    le32_to_cpu(mpi_reply->IOCLogInfo),
3779 			    le32_to_cpu(mpi_reply->TerminationCount)));
3780 
3781 	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
3782 	if (!smid_sas_ctrl) {
3783 		delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
3784 		if (!delayed_sc)
3785 			return _scsih_check_for_pending_tm(ioc, smid);
3786 		INIT_LIST_HEAD(&delayed_sc->list);
3787 		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
3788 		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
3789 		dewtprintk(ioc,
3790 			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
3791 				    handle));
3792 		return _scsih_check_for_pending_tm(ioc, smid);
3793 	}
3794 
3795 	dewtprintk(ioc,
3796 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3797 			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
3798 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
3799 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
3800 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3801 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3802 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
3803 	ioc->put_smid_default(ioc, smid_sas_ctrl);
3804 
3805 	return _scsih_check_for_pending_tm(ioc, smid);
3806 }
3807 
3808 /** _scsih_allow_scmd_to_device - check whether scmd needs to
3809  *				 issue to IOC or not.
3810  * @ioc: per adapter object
3811  * @scmd: pointer to scsi command object
3812  *
3813  * Returns true if scmd can be issued to IOC otherwise returns false.
3814  */
3815 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
3816 	struct scsi_cmnd *scmd)
3817 {
3818 
3819 	if (ioc->pci_error_recovery)
3820 		return false;
3821 
3822 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
3823 		if (ioc->remove_host)
3824 			return false;
3825 
3826 		return true;
3827 	}
3828 
3829 	if (ioc->remove_host) {
3830 
3831 		switch (scmd->cmnd[0]) {
3832 		case SYNCHRONIZE_CACHE:
3833 		case START_STOP:
3834 			return true;
3835 		default:
3836 			return false;
3837 		}
3838 	}
3839 
3840 	return true;
3841 }
3842 
3843 /**
3844  * _scsih_sas_control_complete - completion routine
3845  * @ioc: per adapter object
3846  * @smid: system request message index
3847  * @msix_index: MSIX table index supplied by the OS
3848  * @reply: reply message frame(lower 32bit addr)
3849  * Context: interrupt time.
3850  *
3851  * This is the sas iounit control completion routine.
3852  * This code is part of the code to initiate the device removal
3853  * handshake protocol with controller firmware.
3854  *
3855  * Return: 1 meaning mf should be freed from _base_interrupt
3856  *         0 means the mf is freed from this function.
3857  */
3858 static u8
3859 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3860 	u8 msix_index, u32 reply)
3861 {
3862 	Mpi2SasIoUnitControlReply_t *mpi_reply =
3863 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3864 
3865 	if (likely(mpi_reply)) {
3866 		dewtprintk(ioc,
3867 			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
3868 				    le16_to_cpu(mpi_reply->DevHandle), smid,
3869 				    le16_to_cpu(mpi_reply->IOCStatus),
3870 				    le32_to_cpu(mpi_reply->IOCLogInfo)));
3871 		if (le16_to_cpu(mpi_reply->IOCStatus) ==
3872 		     MPI2_IOCSTATUS_SUCCESS) {
3873 			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
3874 			    ioc->device_remove_in_progress);
3875 		}
3876 	} else {
3877 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3878 			__FILE__, __LINE__, __func__);
3879 	}
3880 	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
3881 }
3882 
3883 /**
3884  * _scsih_tm_tr_volume_send - send target reset request for volumes
3885  * @ioc: per adapter object
3886  * @handle: device handle
3887  * Context: interrupt time.
3888  *
3889  * This is designed to send muliple task management request at the same
3890  * time to the fifo. If the fifo is full, we will append the request,
3891  * and process it in a future completion.
3892  */
3893 static void
3894 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3895 {
3896 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3897 	u16 smid;
3898 	struct _tr_list *delayed_tr;
3899 
3900 	if (ioc->pci_error_recovery) {
3901 		dewtprintk(ioc,
3902 			   ioc_info(ioc, "%s: host reset in progress!\n",
3903 				    __func__));
3904 		return;
3905 	}
3906 
3907 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
3908 	if (!smid) {
3909 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3910 		if (!delayed_tr)
3911 			return;
3912 		INIT_LIST_HEAD(&delayed_tr->list);
3913 		delayed_tr->handle = handle;
3914 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
3915 		dewtprintk(ioc,
3916 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3917 				    handle));
3918 		return;
3919 	}
3920 
3921 	dewtprintk(ioc,
3922 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3923 			    handle, smid, ioc->tm_tr_volume_cb_idx));
3924 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3925 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3926 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3927 	mpi_request->DevHandle = cpu_to_le16(handle);
3928 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3929 	ioc->put_smid_hi_priority(ioc, smid, 0);
3930 }
3931 
3932 /**
3933  * _scsih_tm_volume_tr_complete - target reset completion
3934  * @ioc: per adapter object
3935  * @smid: system request message index
3936  * @msix_index: MSIX table index supplied by the OS
3937  * @reply: reply message frame(lower 32bit addr)
3938  * Context: interrupt time.
3939  *
3940  * Return: 1 meaning mf should be freed from _base_interrupt
3941  *         0 means the mf is freed from this function.
3942  */
3943 static u8
3944 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3945 	u8 msix_index, u32 reply)
3946 {
3947 	u16 handle;
3948 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3949 	Mpi2SCSITaskManagementReply_t *mpi_reply =
3950 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3951 
3952 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
3953 		dewtprintk(ioc,
3954 			   ioc_info(ioc, "%s: host reset in progress!\n",
3955 				    __func__));
3956 		return 1;
3957 	}
3958 	if (unlikely(!mpi_reply)) {
3959 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3960 			__FILE__, __LINE__, __func__);
3961 		return 1;
3962 	}
3963 
3964 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3965 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
3966 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3967 		dewtprintk(ioc,
3968 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3969 				   handle, le16_to_cpu(mpi_reply->DevHandle),
3970 				   smid));
3971 		return 0;
3972 	}
3973 
3974 	dewtprintk(ioc,
3975 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3976 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3977 			    le32_to_cpu(mpi_reply->IOCLogInfo),
3978 			    le32_to_cpu(mpi_reply->TerminationCount)));
3979 
3980 	return _scsih_check_for_pending_tm(ioc, smid);
3981 }
3982 
3983 /**
3984  * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
3985  * @ioc: per adapter object
3986  * @smid: system request message index
3987  * @event: Event ID
3988  * @event_context: used to track events uniquely
3989  *
3990  * Context - processed in interrupt context.
3991  */
3992 static void
3993 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
3994 				U32 event_context)
3995 {
3996 	Mpi2EventAckRequest_t *ack_request;
3997 	int i = smid - ioc->internal_smid;
3998 	unsigned long flags;
3999 
4000 	/* Without releasing the smid just update the
4001 	 * call back index and reuse the same smid for
4002 	 * processing this delayed request
4003 	 */
4004 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4005 	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4006 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4007 
4008 	dewtprintk(ioc,
4009 		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4010 			    le16_to_cpu(event), smid, ioc->base_cb_idx));
4011 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4012 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4013 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4014 	ack_request->Event = event;
4015 	ack_request->EventContext = event_context;
4016 	ack_request->VF_ID = 0;  /* TODO */
4017 	ack_request->VP_ID = 0;
4018 	ioc->put_smid_default(ioc, smid);
4019 }
4020 
4021 /**
4022  * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4023  *				sas_io_unit_ctrl messages
4024  * @ioc: per adapter object
4025  * @smid: system request message index
4026  * @handle: device handle
4027  *
4028  * Context - processed in interrupt context.
4029  */
4030 static void
4031 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4032 					u16 smid, u16 handle)
4033 {
4034 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4035 	u32 ioc_state;
4036 	int i = smid - ioc->internal_smid;
4037 	unsigned long flags;
4038 
4039 	if (ioc->remove_host) {
4040 		dewtprintk(ioc,
4041 			   ioc_info(ioc, "%s: host has been removed\n",
4042 				    __func__));
4043 		return;
4044 	} else if (ioc->pci_error_recovery) {
4045 		dewtprintk(ioc,
4046 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4047 				    __func__));
4048 		return;
4049 	}
4050 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4051 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4052 		dewtprintk(ioc,
4053 			   ioc_info(ioc, "%s: host is not operational\n",
4054 				    __func__));
4055 		return;
4056 	}
4057 
4058 	/* Without releasing the smid just update the
4059 	 * call back index and reuse the same smid for
4060 	 * processing this delayed request
4061 	 */
4062 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4063 	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4064 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4065 
4066 	dewtprintk(ioc,
4067 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4068 			    handle, smid, ioc->tm_sas_control_cb_idx));
4069 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4070 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4071 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4072 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4073 	mpi_request->DevHandle = cpu_to_le16(handle);
4074 	ioc->put_smid_default(ioc, smid);
4075 }
4076 
4077 /**
4078  * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4079  * @ioc: per adapter object
4080  * @smid: system request message index
4081  *
4082  * Context: Executed in interrupt context
4083  *
4084  * This will check delayed internal messages list, and process the
4085  * next request.
4086  *
4087  * Return: 1 meaning mf should be freed from _base_interrupt
4088  *         0 means the mf is freed from this function.
4089  */
4090 u8
4091 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4092 {
4093 	struct _sc_list *delayed_sc;
4094 	struct _event_ack_list *delayed_event_ack;
4095 
4096 	if (!list_empty(&ioc->delayed_event_ack_list)) {
4097 		delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4098 						struct _event_ack_list, list);
4099 		_scsih_issue_delayed_event_ack(ioc, smid,
4100 		  delayed_event_ack->Event, delayed_event_ack->EventContext);
4101 		list_del(&delayed_event_ack->list);
4102 		kfree(delayed_event_ack);
4103 		return 0;
4104 	}
4105 
4106 	if (!list_empty(&ioc->delayed_sc_list)) {
4107 		delayed_sc = list_entry(ioc->delayed_sc_list.next,
4108 						struct _sc_list, list);
4109 		_scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4110 						 delayed_sc->handle);
4111 		list_del(&delayed_sc->list);
4112 		kfree(delayed_sc);
4113 		return 0;
4114 	}
4115 	return 1;
4116 }
4117 
4118 /**
4119  * _scsih_check_for_pending_tm - check for pending task management
4120  * @ioc: per adapter object
4121  * @smid: system request message index
4122  *
4123  * This will check delayed target reset list, and feed the
4124  * next reqeust.
4125  *
4126  * Return: 1 meaning mf should be freed from _base_interrupt
4127  *         0 means the mf is freed from this function.
4128  */
4129 static u8
4130 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4131 {
4132 	struct _tr_list *delayed_tr;
4133 
4134 	if (!list_empty(&ioc->delayed_tr_volume_list)) {
4135 		delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4136 		    struct _tr_list, list);
4137 		mpt3sas_base_free_smid(ioc, smid);
4138 		_scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4139 		list_del(&delayed_tr->list);
4140 		kfree(delayed_tr);
4141 		return 0;
4142 	}
4143 
4144 	if (!list_empty(&ioc->delayed_tr_list)) {
4145 		delayed_tr = list_entry(ioc->delayed_tr_list.next,
4146 		    struct _tr_list, list);
4147 		mpt3sas_base_free_smid(ioc, smid);
4148 		_scsih_tm_tr_send(ioc, delayed_tr->handle);
4149 		list_del(&delayed_tr->list);
4150 		kfree(delayed_tr);
4151 		return 0;
4152 	}
4153 
4154 	return 1;
4155 }
4156 
4157 /**
4158  * _scsih_check_topo_delete_events - sanity check on topo events
4159  * @ioc: per adapter object
4160  * @event_data: the event data payload
4161  *
4162  * This routine added to better handle cable breaker.
4163  *
4164  * This handles the case where driver receives multiple expander
4165  * add and delete events in a single shot.  When there is a delete event
4166  * the routine will void any pending add events waiting in the event queue.
4167  */
4168 static void
4169 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4170 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4171 {
4172 	struct fw_event_work *fw_event;
4173 	Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4174 	u16 expander_handle;
4175 	struct _sas_node *sas_expander;
4176 	unsigned long flags;
4177 	int i, reason_code;
4178 	u16 handle;
4179 
4180 	for (i = 0 ; i < event_data->NumEntries; i++) {
4181 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4182 		if (!handle)
4183 			continue;
4184 		reason_code = event_data->PHY[i].PhyStatus &
4185 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4186 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4187 			_scsih_tm_tr_send(ioc, handle);
4188 	}
4189 
4190 	expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4191 	if (expander_handle < ioc->sas_hba.num_phys) {
4192 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4193 		return;
4194 	}
4195 	if (event_data->ExpStatus ==
4196 	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4197 		/* put expander attached devices into blocking state */
4198 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
4199 		sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4200 		    expander_handle);
4201 		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4202 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4203 		do {
4204 			handle = find_first_bit(ioc->blocking_handles,
4205 			    ioc->facts.MaxDevHandle);
4206 			if (handle < ioc->facts.MaxDevHandle)
4207 				_scsih_block_io_device(ioc, handle);
4208 		} while (test_and_clear_bit(handle, ioc->blocking_handles));
4209 	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4210 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4211 
4212 	if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4213 		return;
4214 
4215 	/* mark ignore flag for pending events */
4216 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4217 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4218 		if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4219 		    fw_event->ignore)
4220 			continue;
4221 		local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4222 				   fw_event->event_data;
4223 		if (local_event_data->ExpStatus ==
4224 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4225 		    local_event_data->ExpStatus ==
4226 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4227 			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4228 			    expander_handle) {
4229 				dewtprintk(ioc,
4230 					   ioc_info(ioc, "setting ignoring flag\n"));
4231 				fw_event->ignore = 1;
4232 			}
4233 		}
4234 	}
4235 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4236 }
4237 
4238 /**
4239  * _scsih_check_pcie_topo_remove_events - sanity check on topo
4240  * events
4241  * @ioc: per adapter object
4242  * @event_data: the event data payload
4243  *
4244  * This handles the case where driver receives multiple switch
4245  * or device add and delete events in a single shot.  When there
4246  * is a delete event the routine will void any pending add
4247  * events waiting in the event queue.
4248  */
4249 static void
4250 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4251 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4252 {
4253 	struct fw_event_work *fw_event;
4254 	Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4255 	unsigned long flags;
4256 	int i, reason_code;
4257 	u16 handle, switch_handle;
4258 
4259 	for (i = 0; i < event_data->NumEntries; i++) {
4260 		handle =
4261 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4262 		if (!handle)
4263 			continue;
4264 		reason_code = event_data->PortEntry[i].PortStatus;
4265 		if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4266 			_scsih_tm_tr_send(ioc, handle);
4267 	}
4268 
4269 	switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4270 	if (!switch_handle) {
4271 		_scsih_block_io_to_pcie_children_attached_directly(
4272 							ioc, event_data);
4273 		return;
4274 	}
4275     /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4276 	if ((event_data->SwitchStatus
4277 		== MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4278 		(event_data->SwitchStatus ==
4279 					MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4280 		_scsih_block_io_to_pcie_children_attached_directly(
4281 							ioc, event_data);
4282 
4283 	if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4284 		return;
4285 
4286 	/* mark ignore flag for pending events */
4287 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4288 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4289 		if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4290 			fw_event->ignore)
4291 			continue;
4292 		local_event_data =
4293 			(Mpi26EventDataPCIeTopologyChangeList_t *)
4294 			fw_event->event_data;
4295 		if (local_event_data->SwitchStatus ==
4296 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4297 		    local_event_data->SwitchStatus ==
4298 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4299 			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4300 				switch_handle) {
4301 				dewtprintk(ioc,
4302 					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
4303 				fw_event->ignore = 1;
4304 			}
4305 		}
4306 	}
4307 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4308 }
4309 
4310 /**
4311  * _scsih_set_volume_delete_flag - setting volume delete flag
4312  * @ioc: per adapter object
4313  * @handle: device handle
4314  *
4315  * This returns nothing.
4316  */
4317 static void
4318 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4319 {
4320 	struct _raid_device *raid_device;
4321 	struct MPT3SAS_TARGET *sas_target_priv_data;
4322 	unsigned long flags;
4323 
4324 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
4325 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4326 	if (raid_device && raid_device->starget &&
4327 	    raid_device->starget->hostdata) {
4328 		sas_target_priv_data =
4329 		    raid_device->starget->hostdata;
4330 		sas_target_priv_data->deleted = 1;
4331 		dewtprintk(ioc,
4332 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4333 				    handle, (u64)raid_device->wwid));
4334 	}
4335 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4336 }
4337 
4338 /**
4339  * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4340  * @handle: input handle
4341  * @a: handle for volume a
4342  * @b: handle for volume b
4343  *
4344  * IR firmware only supports two raid volumes.  The purpose of this
4345  * routine is to set the volume handle in either a or b. When the given
4346  * input handle is non-zero, or when a and b have not been set before.
4347  */
4348 static void
4349 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4350 {
4351 	if (!handle || handle == *a || handle == *b)
4352 		return;
4353 	if (!*a)
4354 		*a = handle;
4355 	else if (!*b)
4356 		*b = handle;
4357 }
4358 
4359 /**
4360  * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4361  * @ioc: per adapter object
4362  * @event_data: the event data payload
4363  * Context: interrupt time.
4364  *
4365  * This routine will send target reset to volume, followed by target
4366  * resets to the PDs. This is called when a PD has been removed, or
4367  * volume has been deleted or removed. When the target reset is sent
4368  * to volume, the PD target resets need to be queued to start upon
4369  * completion of the volume target reset.
4370  */
4371 static void
4372 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4373 	Mpi2EventDataIrConfigChangeList_t *event_data)
4374 {
4375 	Mpi2EventIrConfigElement_t *element;
4376 	int i;
4377 	u16 handle, volume_handle, a, b;
4378 	struct _tr_list *delayed_tr;
4379 
4380 	a = 0;
4381 	b = 0;
4382 
4383 	if (ioc->is_warpdrive)
4384 		return;
4385 
4386 	/* Volume Resets for Deleted or Removed */
4387 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4388 	for (i = 0; i < event_data->NumElements; i++, element++) {
4389 		if (le32_to_cpu(event_data->Flags) &
4390 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4391 			continue;
4392 		if (element->ReasonCode ==
4393 		    MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4394 		    element->ReasonCode ==
4395 		    MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4396 			volume_handle = le16_to_cpu(element->VolDevHandle);
4397 			_scsih_set_volume_delete_flag(ioc, volume_handle);
4398 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4399 		}
4400 	}
4401 
4402 	/* Volume Resets for UNHIDE events */
4403 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4404 	for (i = 0; i < event_data->NumElements; i++, element++) {
4405 		if (le32_to_cpu(event_data->Flags) &
4406 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4407 			continue;
4408 		if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4409 			volume_handle = le16_to_cpu(element->VolDevHandle);
4410 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4411 		}
4412 	}
4413 
4414 	if (a)
4415 		_scsih_tm_tr_volume_send(ioc, a);
4416 	if (b)
4417 		_scsih_tm_tr_volume_send(ioc, b);
4418 
4419 	/* PD target resets */
4420 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4421 	for (i = 0; i < event_data->NumElements; i++, element++) {
4422 		if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4423 			continue;
4424 		handle = le16_to_cpu(element->PhysDiskDevHandle);
4425 		volume_handle = le16_to_cpu(element->VolDevHandle);
4426 		clear_bit(handle, ioc->pd_handles);
4427 		if (!volume_handle)
4428 			_scsih_tm_tr_send(ioc, handle);
4429 		else if (volume_handle == a || volume_handle == b) {
4430 			delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4431 			BUG_ON(!delayed_tr);
4432 			INIT_LIST_HEAD(&delayed_tr->list);
4433 			delayed_tr->handle = handle;
4434 			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4435 			dewtprintk(ioc,
4436 				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4437 					    handle));
4438 		} else
4439 			_scsih_tm_tr_send(ioc, handle);
4440 	}
4441 }
4442 
4443 
4444 /**
4445  * _scsih_check_volume_delete_events - set delete flag for volumes
4446  * @ioc: per adapter object
4447  * @event_data: the event data payload
4448  * Context: interrupt time.
4449  *
4450  * This will handle the case when the cable connected to entire volume is
4451  * pulled. We will take care of setting the deleted flag so normal IO will
4452  * not be sent.
4453  */
4454 static void
4455 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4456 	Mpi2EventDataIrVolume_t *event_data)
4457 {
4458 	u32 state;
4459 
4460 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4461 		return;
4462 	state = le32_to_cpu(event_data->NewValue);
4463 	if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4464 	    MPI2_RAID_VOL_STATE_FAILED)
4465 		_scsih_set_volume_delete_flag(ioc,
4466 		    le16_to_cpu(event_data->VolDevHandle));
4467 }
4468 
4469 /**
4470  * _scsih_temp_threshold_events - display temperature threshold exceeded events
4471  * @ioc: per adapter object
4472  * @event_data: the temp threshold event data
4473  * Context: interrupt time.
4474  */
4475 static void
4476 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4477 	Mpi2EventDataTemperature_t *event_data)
4478 {
4479 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
4480 		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4481 			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4482 			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4483 			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4484 			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4485 			event_data->SensorNum);
4486 		ioc_err(ioc, "Current Temp In Celsius: %d\n",
4487 			event_data->CurrentTemperature);
4488 	}
4489 }
4490 
4491 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4492 {
4493 	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4494 
4495 	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4496 		return 0;
4497 
4498 	if (pending)
4499 		return test_and_set_bit(0, &priv->ata_command_pending);
4500 
4501 	clear_bit(0, &priv->ata_command_pending);
4502 	return 0;
4503 }
4504 
4505 /**
4506  * _scsih_flush_running_cmds - completing outstanding commands.
4507  * @ioc: per adapter object
4508  *
4509  * The flushing out of all pending scmd commands following host reset,
4510  * where all IO is dropped to the floor.
4511  */
4512 static void
4513 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4514 {
4515 	struct scsi_cmnd *scmd;
4516 	struct scsiio_tracker *st;
4517 	u16 smid;
4518 	int count = 0;
4519 
4520 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4521 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4522 		if (!scmd)
4523 			continue;
4524 		count++;
4525 		_scsih_set_satl_pending(scmd, false);
4526 		st = scsi_cmd_priv(scmd);
4527 		mpt3sas_base_clear_st(ioc, st);
4528 		scsi_dma_unmap(scmd);
4529 		if (ioc->pci_error_recovery || ioc->remove_host)
4530 			scmd->result = DID_NO_CONNECT << 16;
4531 		else
4532 			scmd->result = DID_RESET << 16;
4533 		scmd->scsi_done(scmd);
4534 	}
4535 	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4536 }
4537 
4538 /**
4539  * _scsih_setup_eedp - setup MPI request for EEDP transfer
4540  * @ioc: per adapter object
4541  * @scmd: pointer to scsi command object
4542  * @mpi_request: pointer to the SCSI_IO request message frame
4543  *
4544  * Supporting protection 1 and 3.
4545  */
4546 static void
4547 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4548 	Mpi25SCSIIORequest_t *mpi_request)
4549 {
4550 	u16 eedp_flags;
4551 	unsigned char prot_op = scsi_get_prot_op(scmd);
4552 	unsigned char prot_type = scsi_get_prot_type(scmd);
4553 	Mpi25SCSIIORequest_t *mpi_request_3v =
4554 	   (Mpi25SCSIIORequest_t *)mpi_request;
4555 
4556 	if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
4557 		return;
4558 
4559 	if (prot_op ==  SCSI_PROT_READ_STRIP)
4560 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
4561 	else if (prot_op ==  SCSI_PROT_WRITE_INSERT)
4562 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
4563 	else
4564 		return;
4565 
4566 	switch (prot_type) {
4567 	case SCSI_PROT_DIF_TYPE1:
4568 	case SCSI_PROT_DIF_TYPE2:
4569 
4570 		/*
4571 		* enable ref/guard checking
4572 		* auto increment ref tag
4573 		*/
4574 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
4575 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
4576 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4577 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
4578 		    cpu_to_be32(t10_pi_ref_tag(scmd->request));
4579 		break;
4580 
4581 	case SCSI_PROT_DIF_TYPE3:
4582 
4583 		/*
4584 		* enable guard checking
4585 		*/
4586 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4587 
4588 		break;
4589 	}
4590 
4591 	mpi_request_3v->EEDPBlockSize =
4592 	    cpu_to_le16(scmd->device->sector_size);
4593 
4594 	if (ioc->is_gen35_ioc)
4595 		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
4596 	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
4597 }
4598 
4599 /**
4600  * _scsih_eedp_error_handling - return sense code for EEDP errors
4601  * @scmd: pointer to scsi command object
4602  * @ioc_status: ioc status
4603  */
4604 static void
4605 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4606 {
4607 	u8 ascq;
4608 
4609 	switch (ioc_status) {
4610 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4611 		ascq = 0x01;
4612 		break;
4613 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4614 		ascq = 0x02;
4615 		break;
4616 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4617 		ascq = 0x03;
4618 		break;
4619 	default:
4620 		ascq = 0x00;
4621 		break;
4622 	}
4623 	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
4624 	    ascq);
4625 	scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
4626 	    SAM_STAT_CHECK_CONDITION;
4627 }
4628 
4629 /**
4630  * scsih_qcmd - main scsi request entry point
4631  * @shost: SCSI host pointer
4632  * @scmd: pointer to scsi command object
4633  *
4634  * The callback index is set inside `ioc->scsi_io_cb_idx`.
4635  *
4636  * Return: 0 on success.  If there's a failure, return either:
4637  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
4638  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
4639  */
4640 static int
4641 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4642 {
4643 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4644 	struct MPT3SAS_DEVICE *sas_device_priv_data;
4645 	struct MPT3SAS_TARGET *sas_target_priv_data;
4646 	struct _raid_device *raid_device;
4647 	struct request *rq = scmd->request;
4648 	int class;
4649 	Mpi25SCSIIORequest_t *mpi_request;
4650 	struct _pcie_device *pcie_device = NULL;
4651 	u32 mpi_control;
4652 	u16 smid;
4653 	u16 handle;
4654 
4655 	if (ioc->logging_level & MPT_DEBUG_SCSI)
4656 		scsi_print_command(scmd);
4657 
4658 	sas_device_priv_data = scmd->device->hostdata;
4659 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4660 		scmd->result = DID_NO_CONNECT << 16;
4661 		scmd->scsi_done(scmd);
4662 		return 0;
4663 	}
4664 
4665 	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
4666 		scmd->result = DID_NO_CONNECT << 16;
4667 		scmd->scsi_done(scmd);
4668 		return 0;
4669 	}
4670 
4671 	sas_target_priv_data = sas_device_priv_data->sas_target;
4672 
4673 	/* invalid device handle */
4674 	handle = sas_target_priv_data->handle;
4675 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
4676 		scmd->result = DID_NO_CONNECT << 16;
4677 		scmd->scsi_done(scmd);
4678 		return 0;
4679 	}
4680 
4681 
4682 	if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
4683 		/* host recovery or link resets sent via IOCTLs */
4684 		return SCSI_MLQUEUE_HOST_BUSY;
4685 	} else if (sas_target_priv_data->deleted) {
4686 		/* device has been deleted */
4687 		scmd->result = DID_NO_CONNECT << 16;
4688 		scmd->scsi_done(scmd);
4689 		return 0;
4690 	} else if (sas_target_priv_data->tm_busy ||
4691 		   sas_device_priv_data->block) {
4692 		/* device busy with task management */
4693 		return SCSI_MLQUEUE_DEVICE_BUSY;
4694 	}
4695 
4696 	/*
4697 	 * Bug work around for firmware SATL handling.  The loop
4698 	 * is based on atomic operations and ensures consistency
4699 	 * since we're lockless at this point
4700 	 */
4701 	do {
4702 		if (test_bit(0, &sas_device_priv_data->ata_command_pending))
4703 			return SCSI_MLQUEUE_DEVICE_BUSY;
4704 	} while (_scsih_set_satl_pending(scmd, true));
4705 
4706 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4707 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
4708 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4709 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
4710 	else
4711 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
4712 
4713 	/* set tags */
4714 	mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
4715 	/* NCQ Prio supported, make sure control indicated high priority */
4716 	if (sas_device_priv_data->ncq_prio_enable) {
4717 		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4718 		if (class == IOPRIO_CLASS_RT)
4719 			mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
4720 	}
4721 	/* Make sure Device is not raid volume.
4722 	 * We do not expose raid functionality to upper layer for warpdrive.
4723 	 */
4724 	if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
4725 		&& !scsih_is_nvme(&scmd->device->sdev_gendev))
4726 		&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
4727 		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
4728 
4729 	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
4730 	if (!smid) {
4731 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4732 		_scsih_set_satl_pending(scmd, false);
4733 		goto out;
4734 	}
4735 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4736 	memset(mpi_request, 0, ioc->request_sz);
4737 	_scsih_setup_eedp(ioc, scmd, mpi_request);
4738 
4739 	if (scmd->cmd_len == 32)
4740 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
4741 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4742 	if (sas_device_priv_data->sas_target->flags &
4743 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
4744 		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
4745 	else
4746 		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4747 	mpi_request->DevHandle = cpu_to_le16(handle);
4748 	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
4749 	mpi_request->Control = cpu_to_le32(mpi_control);
4750 	mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
4751 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
4752 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
4753 	mpi_request->SenseBufferLowAddress =
4754 	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
4755 	mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
4756 	int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
4757 	    mpi_request->LUN);
4758 	memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4759 
4760 	if (mpi_request->DataLength) {
4761 		pcie_device = sas_target_priv_data->pcie_dev;
4762 		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
4763 			mpt3sas_base_free_smid(ioc, smid);
4764 			_scsih_set_satl_pending(scmd, false);
4765 			goto out;
4766 		}
4767 	} else
4768 		ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
4769 
4770 	raid_device = sas_target_priv_data->raid_device;
4771 	if (raid_device && raid_device->direct_io_enabled)
4772 		mpt3sas_setup_direct_io(ioc, scmd,
4773 			raid_device, mpi_request);
4774 
4775 	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
4776 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
4777 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
4778 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
4779 			ioc->put_smid_fast_path(ioc, smid, handle);
4780 		} else
4781 			ioc->put_smid_scsi_io(ioc, smid,
4782 			    le16_to_cpu(mpi_request->DevHandle));
4783 	} else
4784 		ioc->put_smid_default(ioc, smid);
4785 	return 0;
4786 
4787  out:
4788 	return SCSI_MLQUEUE_HOST_BUSY;
4789 }
4790 
4791 /**
4792  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
4793  * @sense_buffer: sense data returned by target
4794  * @data: normalized skey/asc/ascq
4795  */
4796 static void
4797 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
4798 {
4799 	if ((sense_buffer[0] & 0x7F) >= 0x72) {
4800 		/* descriptor format */
4801 		data->skey = sense_buffer[1] & 0x0F;
4802 		data->asc = sense_buffer[2];
4803 		data->ascq = sense_buffer[3];
4804 	} else {
4805 		/* fixed format */
4806 		data->skey = sense_buffer[2] & 0x0F;
4807 		data->asc = sense_buffer[12];
4808 		data->ascq = sense_buffer[13];
4809 	}
4810 }
4811 
4812 /**
4813  * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
4814  * @ioc: per adapter object
4815  * @scmd: pointer to scsi command object
4816  * @mpi_reply: reply mf payload returned from firmware
4817  * @smid: ?
4818  *
4819  * scsi_status - SCSI Status code returned from target device
4820  * scsi_state - state info associated with SCSI_IO determined by ioc
4821  * ioc_status - ioc supplied status info
4822  */
4823 static void
4824 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4825 	Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
4826 {
4827 	u32 response_info;
4828 	u8 *response_bytes;
4829 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
4830 	    MPI2_IOCSTATUS_MASK;
4831 	u8 scsi_state = mpi_reply->SCSIState;
4832 	u8 scsi_status = mpi_reply->SCSIStatus;
4833 	char *desc_ioc_state = NULL;
4834 	char *desc_scsi_status = NULL;
4835 	char *desc_scsi_state = ioc->tmp_string;
4836 	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
4837 	struct _sas_device *sas_device = NULL;
4838 	struct _pcie_device *pcie_device = NULL;
4839 	struct scsi_target *starget = scmd->device->sdev_target;
4840 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
4841 	char *device_str = NULL;
4842 
4843 	if (!priv_target)
4844 		return;
4845 	if (ioc->hide_ir_msg)
4846 		device_str = "WarpDrive";
4847 	else
4848 		device_str = "volume";
4849 
4850 	if (log_info == 0x31170000)
4851 		return;
4852 
4853 	switch (ioc_status) {
4854 	case MPI2_IOCSTATUS_SUCCESS:
4855 		desc_ioc_state = "success";
4856 		break;
4857 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
4858 		desc_ioc_state = "invalid function";
4859 		break;
4860 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
4861 		desc_ioc_state = "scsi recovered error";
4862 		break;
4863 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
4864 		desc_ioc_state = "scsi invalid dev handle";
4865 		break;
4866 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4867 		desc_ioc_state = "scsi device not there";
4868 		break;
4869 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
4870 		desc_ioc_state = "scsi data overrun";
4871 		break;
4872 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
4873 		desc_ioc_state = "scsi data underrun";
4874 		break;
4875 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
4876 		desc_ioc_state = "scsi io data error";
4877 		break;
4878 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4879 		desc_ioc_state = "scsi protocol error";
4880 		break;
4881 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
4882 		desc_ioc_state = "scsi task terminated";
4883 		break;
4884 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4885 		desc_ioc_state = "scsi residual mismatch";
4886 		break;
4887 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4888 		desc_ioc_state = "scsi task mgmt failed";
4889 		break;
4890 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
4891 		desc_ioc_state = "scsi ioc terminated";
4892 		break;
4893 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
4894 		desc_ioc_state = "scsi ext terminated";
4895 		break;
4896 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4897 		desc_ioc_state = "eedp guard error";
4898 		break;
4899 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4900 		desc_ioc_state = "eedp ref tag error";
4901 		break;
4902 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4903 		desc_ioc_state = "eedp app tag error";
4904 		break;
4905 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
4906 		desc_ioc_state = "insufficient power";
4907 		break;
4908 	default:
4909 		desc_ioc_state = "unknown";
4910 		break;
4911 	}
4912 
4913 	switch (scsi_status) {
4914 	case MPI2_SCSI_STATUS_GOOD:
4915 		desc_scsi_status = "good";
4916 		break;
4917 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
4918 		desc_scsi_status = "check condition";
4919 		break;
4920 	case MPI2_SCSI_STATUS_CONDITION_MET:
4921 		desc_scsi_status = "condition met";
4922 		break;
4923 	case MPI2_SCSI_STATUS_BUSY:
4924 		desc_scsi_status = "busy";
4925 		break;
4926 	case MPI2_SCSI_STATUS_INTERMEDIATE:
4927 		desc_scsi_status = "intermediate";
4928 		break;
4929 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
4930 		desc_scsi_status = "intermediate condmet";
4931 		break;
4932 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
4933 		desc_scsi_status = "reservation conflict";
4934 		break;
4935 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
4936 		desc_scsi_status = "command terminated";
4937 		break;
4938 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
4939 		desc_scsi_status = "task set full";
4940 		break;
4941 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
4942 		desc_scsi_status = "aca active";
4943 		break;
4944 	case MPI2_SCSI_STATUS_TASK_ABORTED:
4945 		desc_scsi_status = "task aborted";
4946 		break;
4947 	default:
4948 		desc_scsi_status = "unknown";
4949 		break;
4950 	}
4951 
4952 	desc_scsi_state[0] = '\0';
4953 	if (!scsi_state)
4954 		desc_scsi_state = " ";
4955 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
4956 		strcat(desc_scsi_state, "response info ");
4957 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
4958 		strcat(desc_scsi_state, "state terminated ");
4959 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
4960 		strcat(desc_scsi_state, "no status ");
4961 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
4962 		strcat(desc_scsi_state, "autosense failed ");
4963 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
4964 		strcat(desc_scsi_state, "autosense valid ");
4965 
4966 	scsi_print_command(scmd);
4967 
4968 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
4969 		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
4970 			 device_str, (u64)priv_target->sas_address);
4971 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
4972 		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
4973 		if (pcie_device) {
4974 			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
4975 				 (u64)pcie_device->wwid, pcie_device->port_num);
4976 			if (pcie_device->enclosure_handle != 0)
4977 				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
4978 					 (u64)pcie_device->enclosure_logical_id,
4979 					 pcie_device->slot);
4980 			if (pcie_device->connector_name[0])
4981 				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
4982 					 pcie_device->enclosure_level,
4983 					 pcie_device->connector_name);
4984 			pcie_device_put(pcie_device);
4985 		}
4986 	} else {
4987 		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
4988 		if (sas_device) {
4989 			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
4990 				 (u64)sas_device->sas_address, sas_device->phy);
4991 
4992 			_scsih_display_enclosure_chassis_info(ioc, sas_device,
4993 			    NULL, NULL);
4994 
4995 			sas_device_put(sas_device);
4996 		}
4997 	}
4998 
4999 	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5000 		 le16_to_cpu(mpi_reply->DevHandle),
5001 		 desc_ioc_state, ioc_status, smid);
5002 	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5003 		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5004 	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5005 		 le16_to_cpu(mpi_reply->TaskTag),
5006 		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5007 	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5008 		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5009 
5010 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5011 		struct sense_info data;
5012 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5013 		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5014 			 data.skey, data.asc, data.ascq,
5015 			 le32_to_cpu(mpi_reply->SenseCount));
5016 	}
5017 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5018 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5019 		response_bytes = (u8 *)&response_info;
5020 		_scsih_response_code(ioc, response_bytes[0]);
5021 	}
5022 }
5023 
5024 /**
5025  * _scsih_turn_on_pfa_led - illuminate PFA LED
5026  * @ioc: per adapter object
5027  * @handle: device handle
5028  * Context: process
5029  */
5030 static void
5031 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5032 {
5033 	Mpi2SepReply_t mpi_reply;
5034 	Mpi2SepRequest_t mpi_request;
5035 	struct _sas_device *sas_device;
5036 
5037 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5038 	if (!sas_device)
5039 		return;
5040 
5041 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5042 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5043 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5044 	mpi_request.SlotStatus =
5045 	    cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5046 	mpi_request.DevHandle = cpu_to_le16(handle);
5047 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5048 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5049 	    &mpi_request)) != 0) {
5050 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5051 			__FILE__, __LINE__, __func__);
5052 		goto out;
5053 	}
5054 	sas_device->pfa_led_on = 1;
5055 
5056 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5057 		dewtprintk(ioc,
5058 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5059 				    le16_to_cpu(mpi_reply.IOCStatus),
5060 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5061 		goto out;
5062 	}
5063 out:
5064 	sas_device_put(sas_device);
5065 }
5066 
5067 /**
5068  * _scsih_turn_off_pfa_led - turn off Fault LED
5069  * @ioc: per adapter object
5070  * @sas_device: sas device whose PFA LED has to turned off
5071  * Context: process
5072  */
5073 static void
5074 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5075 	struct _sas_device *sas_device)
5076 {
5077 	Mpi2SepReply_t mpi_reply;
5078 	Mpi2SepRequest_t mpi_request;
5079 
5080 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5081 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5082 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5083 	mpi_request.SlotStatus = 0;
5084 	mpi_request.Slot = cpu_to_le16(sas_device->slot);
5085 	mpi_request.DevHandle = 0;
5086 	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5087 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5088 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5089 		&mpi_request)) != 0) {
5090 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5091 			__FILE__, __LINE__, __func__);
5092 		return;
5093 	}
5094 
5095 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5096 		dewtprintk(ioc,
5097 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5098 				    le16_to_cpu(mpi_reply.IOCStatus),
5099 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5100 		return;
5101 	}
5102 }
5103 
5104 /**
5105  * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5106  * @ioc: per adapter object
5107  * @handle: device handle
5108  * Context: interrupt.
5109  */
5110 static void
5111 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5112 {
5113 	struct fw_event_work *fw_event;
5114 
5115 	fw_event = alloc_fw_event_work(0);
5116 	if (!fw_event)
5117 		return;
5118 	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5119 	fw_event->device_handle = handle;
5120 	fw_event->ioc = ioc;
5121 	_scsih_fw_event_add(ioc, fw_event);
5122 	fw_event_work_put(fw_event);
5123 }
5124 
5125 /**
5126  * _scsih_smart_predicted_fault - process smart errors
5127  * @ioc: per adapter object
5128  * @handle: device handle
5129  * Context: interrupt.
5130  */
5131 static void
5132 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5133 {
5134 	struct scsi_target *starget;
5135 	struct MPT3SAS_TARGET *sas_target_priv_data;
5136 	Mpi2EventNotificationReply_t *event_reply;
5137 	Mpi2EventDataSasDeviceStatusChange_t *event_data;
5138 	struct _sas_device *sas_device;
5139 	ssize_t sz;
5140 	unsigned long flags;
5141 
5142 	/* only handle non-raid devices */
5143 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5144 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5145 	if (!sas_device)
5146 		goto out_unlock;
5147 
5148 	starget = sas_device->starget;
5149 	sas_target_priv_data = starget->hostdata;
5150 
5151 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5152 	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5153 		goto out_unlock;
5154 
5155 	_scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5156 
5157 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5158 
5159 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5160 		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5161 
5162 	/* insert into event log */
5163 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5164 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5165 	event_reply = kzalloc(sz, GFP_KERNEL);
5166 	if (!event_reply) {
5167 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5168 			__FILE__, __LINE__, __func__);
5169 		goto out;
5170 	}
5171 
5172 	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5173 	event_reply->Event =
5174 	    cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5175 	event_reply->MsgLength = sz/4;
5176 	event_reply->EventDataLength =
5177 	    cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5178 	event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5179 	    event_reply->EventData;
5180 	event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5181 	event_data->ASC = 0x5D;
5182 	event_data->DevHandle = cpu_to_le16(handle);
5183 	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5184 	mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5185 	kfree(event_reply);
5186 out:
5187 	if (sas_device)
5188 		sas_device_put(sas_device);
5189 	return;
5190 
5191 out_unlock:
5192 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5193 	goto out;
5194 }
5195 
5196 /**
5197  * _scsih_io_done - scsi request callback
5198  * @ioc: per adapter object
5199  * @smid: system request message index
5200  * @msix_index: MSIX table index supplied by the OS
5201  * @reply: reply message frame(lower 32bit addr)
5202  *
5203  * Callback handler when using _scsih_qcmd.
5204  *
5205  * Return: 1 meaning mf should be freed from _base_interrupt
5206  *         0 means the mf is freed from this function.
5207  */
5208 static u8
5209 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5210 {
5211 	Mpi25SCSIIORequest_t *mpi_request;
5212 	Mpi2SCSIIOReply_t *mpi_reply;
5213 	struct scsi_cmnd *scmd;
5214 	struct scsiio_tracker *st;
5215 	u16 ioc_status;
5216 	u32 xfer_cnt;
5217 	u8 scsi_state;
5218 	u8 scsi_status;
5219 	u32 log_info;
5220 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5221 	u32 response_code = 0;
5222 
5223 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5224 
5225 	scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5226 	if (scmd == NULL)
5227 		return 1;
5228 
5229 	_scsih_set_satl_pending(scmd, false);
5230 
5231 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5232 
5233 	if (mpi_reply == NULL) {
5234 		scmd->result = DID_OK << 16;
5235 		goto out;
5236 	}
5237 
5238 	sas_device_priv_data = scmd->device->hostdata;
5239 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5240 	     sas_device_priv_data->sas_target->deleted) {
5241 		scmd->result = DID_NO_CONNECT << 16;
5242 		goto out;
5243 	}
5244 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5245 
5246 	/*
5247 	 * WARPDRIVE: If direct_io is set then it is directIO,
5248 	 * the failed direct I/O should be redirected to volume
5249 	 */
5250 	st = scsi_cmd_priv(scmd);
5251 	if (st->direct_io &&
5252 	     ((ioc_status & MPI2_IOCSTATUS_MASK)
5253 	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5254 		st->direct_io = 0;
5255 		st->scmd = scmd;
5256 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5257 		mpi_request->DevHandle =
5258 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
5259 		ioc->put_smid_scsi_io(ioc, smid,
5260 		    sas_device_priv_data->sas_target->handle);
5261 		return 0;
5262 	}
5263 	/* turning off TLR */
5264 	scsi_state = mpi_reply->SCSIState;
5265 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5266 		response_code =
5267 		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5268 	if (!sas_device_priv_data->tlr_snoop_check) {
5269 		sas_device_priv_data->tlr_snoop_check++;
5270 		if ((!ioc->is_warpdrive &&
5271 		    !scsih_is_raid(&scmd->device->sdev_gendev) &&
5272 		    !scsih_is_nvme(&scmd->device->sdev_gendev))
5273 		    && sas_is_tlr_enabled(scmd->device) &&
5274 		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5275 			sas_disable_tlr(scmd->device);
5276 			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5277 		}
5278 	}
5279 
5280 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5281 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5282 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5283 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
5284 	else
5285 		log_info = 0;
5286 	ioc_status &= MPI2_IOCSTATUS_MASK;
5287 	scsi_status = mpi_reply->SCSIStatus;
5288 
5289 	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5290 	    (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5291 	     scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5292 	     scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5293 		ioc_status = MPI2_IOCSTATUS_SUCCESS;
5294 	}
5295 
5296 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5297 		struct sense_info data;
5298 		const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5299 		    smid);
5300 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5301 		    le32_to_cpu(mpi_reply->SenseCount));
5302 		memcpy(scmd->sense_buffer, sense_data, sz);
5303 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5304 		/* failure prediction threshold exceeded */
5305 		if (data.asc == 0x5D)
5306 			_scsih_smart_predicted_fault(ioc,
5307 			    le16_to_cpu(mpi_reply->DevHandle));
5308 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5309 
5310 		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5311 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5312 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5313 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5314 			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5315 	}
5316 	switch (ioc_status) {
5317 	case MPI2_IOCSTATUS_BUSY:
5318 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5319 		scmd->result = SAM_STAT_BUSY;
5320 		break;
5321 
5322 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5323 		scmd->result = DID_NO_CONNECT << 16;
5324 		break;
5325 
5326 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5327 		if (sas_device_priv_data->block) {
5328 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5329 			goto out;
5330 		}
5331 		if (log_info == 0x31110630) {
5332 			if (scmd->retries > 2) {
5333 				scmd->result = DID_NO_CONNECT << 16;
5334 				scsi_device_set_state(scmd->device,
5335 				    SDEV_OFFLINE);
5336 			} else {
5337 				scmd->result = DID_SOFT_ERROR << 16;
5338 				scmd->device->expecting_cc_ua = 1;
5339 			}
5340 			break;
5341 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5342 			scmd->result = DID_RESET << 16;
5343 			break;
5344 		} else if ((scmd->device->channel == RAID_CHANNEL) &&
5345 		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5346 		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5347 			scmd->result = DID_RESET << 16;
5348 			break;
5349 		}
5350 		scmd->result = DID_SOFT_ERROR << 16;
5351 		break;
5352 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5353 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5354 		scmd->result = DID_RESET << 16;
5355 		break;
5356 
5357 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5358 		if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5359 			scmd->result = DID_SOFT_ERROR << 16;
5360 		else
5361 			scmd->result = (DID_OK << 16) | scsi_status;
5362 		break;
5363 
5364 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5365 		scmd->result = (DID_OK << 16) | scsi_status;
5366 
5367 		if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5368 			break;
5369 
5370 		if (xfer_cnt < scmd->underflow) {
5371 			if (scsi_status == SAM_STAT_BUSY)
5372 				scmd->result = SAM_STAT_BUSY;
5373 			else
5374 				scmd->result = DID_SOFT_ERROR << 16;
5375 		} else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5376 		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
5377 			scmd->result = DID_SOFT_ERROR << 16;
5378 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5379 			scmd->result = DID_RESET << 16;
5380 		else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5381 			mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5382 			mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5383 			scmd->result = (DRIVER_SENSE << 24) |
5384 			    SAM_STAT_CHECK_CONDITION;
5385 			scmd->sense_buffer[0] = 0x70;
5386 			scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5387 			scmd->sense_buffer[12] = 0x20;
5388 			scmd->sense_buffer[13] = 0;
5389 		}
5390 		break;
5391 
5392 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5393 		scsi_set_resid(scmd, 0);
5394 		/* fall through */
5395 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5396 	case MPI2_IOCSTATUS_SUCCESS:
5397 		scmd->result = (DID_OK << 16) | scsi_status;
5398 		if (response_code ==
5399 		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5400 		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5401 		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5402 			scmd->result = DID_SOFT_ERROR << 16;
5403 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5404 			scmd->result = DID_RESET << 16;
5405 		break;
5406 
5407 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5408 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5409 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5410 		_scsih_eedp_error_handling(scmd, ioc_status);
5411 		break;
5412 
5413 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5414 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5415 	case MPI2_IOCSTATUS_INVALID_SGL:
5416 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
5417 	case MPI2_IOCSTATUS_INVALID_FIELD:
5418 	case MPI2_IOCSTATUS_INVALID_STATE:
5419 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5420 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5421 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5422 	default:
5423 		scmd->result = DID_SOFT_ERROR << 16;
5424 		break;
5425 
5426 	}
5427 
5428 	if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5429 		_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5430 
5431  out:
5432 
5433 	scsi_dma_unmap(scmd);
5434 	mpt3sas_base_free_smid(ioc, smid);
5435 	scmd->scsi_done(scmd);
5436 	return 0;
5437 }
5438 
5439 /**
5440  * _scsih_sas_host_refresh - refreshing sas host object contents
5441  * @ioc: per adapter object
5442  * Context: user
5443  *
5444  * During port enable, fw will send topology events for every device. Its
5445  * possible that the handles may change from the previous setting, so this
5446  * code keeping handles updating if changed.
5447  */
5448 static void
5449 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
5450 {
5451 	u16 sz;
5452 	u16 ioc_status;
5453 	int i;
5454 	Mpi2ConfigReply_t mpi_reply;
5455 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5456 	u16 attached_handle;
5457 	u8 link_rate;
5458 
5459 	dtmprintk(ioc,
5460 		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
5461 			   (u64)ioc->sas_hba.sas_address));
5462 
5463 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
5464 	    * sizeof(Mpi2SasIOUnit0PhyData_t));
5465 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5466 	if (!sas_iounit_pg0) {
5467 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5468 			__FILE__, __LINE__, __func__);
5469 		return;
5470 	}
5471 
5472 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5473 	    sas_iounit_pg0, sz)) != 0)
5474 		goto out;
5475 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5476 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5477 		goto out;
5478 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5479 		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
5480 		if (i == 0)
5481 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5482 			    PhyData[0].ControllerDevHandle);
5483 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5484 		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
5485 		    AttachedDevHandle);
5486 		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
5487 			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
5488 		mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
5489 		    attached_handle, i, link_rate);
5490 	}
5491  out:
5492 	kfree(sas_iounit_pg0);
5493 }
5494 
5495 /**
5496  * _scsih_sas_host_add - create sas host object
5497  * @ioc: per adapter object
5498  *
5499  * Creating host side data object, stored in ioc->sas_hba
5500  */
5501 static void
5502 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5503 {
5504 	int i;
5505 	Mpi2ConfigReply_t mpi_reply;
5506 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5507 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5508 	Mpi2SasPhyPage0_t phy_pg0;
5509 	Mpi2SasDevicePage0_t sas_device_pg0;
5510 	Mpi2SasEnclosurePage0_t enclosure_pg0;
5511 	u16 ioc_status;
5512 	u16 sz;
5513 	u8 device_missing_delay;
5514 	u8 num_phys;
5515 
5516 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
5517 	if (!num_phys) {
5518 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5519 			__FILE__, __LINE__, __func__);
5520 		return;
5521 	}
5522 	ioc->sas_hba.phy = kcalloc(num_phys,
5523 	    sizeof(struct _sas_phy), GFP_KERNEL);
5524 	if (!ioc->sas_hba.phy) {
5525 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5526 			__FILE__, __LINE__, __func__);
5527 		goto out;
5528 	}
5529 	ioc->sas_hba.num_phys = num_phys;
5530 
5531 	/* sas_iounit page 0 */
5532 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
5533 	    sizeof(Mpi2SasIOUnit0PhyData_t));
5534 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5535 	if (!sas_iounit_pg0) {
5536 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5537 			__FILE__, __LINE__, __func__);
5538 		return;
5539 	}
5540 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5541 	    sas_iounit_pg0, sz))) {
5542 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5543 			__FILE__, __LINE__, __func__);
5544 		goto out;
5545 	}
5546 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5547 	    MPI2_IOCSTATUS_MASK;
5548 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5549 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5550 			__FILE__, __LINE__, __func__);
5551 		goto out;
5552 	}
5553 
5554 	/* sas_iounit page 1 */
5555 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
5556 	    sizeof(Mpi2SasIOUnit1PhyData_t));
5557 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
5558 	if (!sas_iounit_pg1) {
5559 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5560 			__FILE__, __LINE__, __func__);
5561 		goto out;
5562 	}
5563 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5564 	    sas_iounit_pg1, sz))) {
5565 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5566 			__FILE__, __LINE__, __func__);
5567 		goto out;
5568 	}
5569 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5570 	    MPI2_IOCSTATUS_MASK;
5571 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5572 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5573 			__FILE__, __LINE__, __func__);
5574 		goto out;
5575 	}
5576 
5577 	ioc->io_missing_delay =
5578 	    sas_iounit_pg1->IODeviceMissingDelay;
5579 	device_missing_delay =
5580 	    sas_iounit_pg1->ReportDeviceMissingDelay;
5581 	if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
5582 		ioc->device_missing_delay = (device_missing_delay &
5583 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
5584 	else
5585 		ioc->device_missing_delay = device_missing_delay &
5586 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
5587 
5588 	ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
5589 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5590 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5591 		    i))) {
5592 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5593 				__FILE__, __LINE__, __func__);
5594 			goto out;
5595 		}
5596 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5597 		    MPI2_IOCSTATUS_MASK;
5598 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5599 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5600 				__FILE__, __LINE__, __func__);
5601 			goto out;
5602 		}
5603 
5604 		if (i == 0)
5605 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5606 			    PhyData[0].ControllerDevHandle);
5607 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5608 		ioc->sas_hba.phy[i].phy_id = i;
5609 		mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
5610 		    phy_pg0, ioc->sas_hba.parent_dev);
5611 	}
5612 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5613 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
5614 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5615 			__FILE__, __LINE__, __func__);
5616 		goto out;
5617 	}
5618 	ioc->sas_hba.enclosure_handle =
5619 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
5620 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5621 	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5622 		 ioc->sas_hba.handle,
5623 		 (u64)ioc->sas_hba.sas_address,
5624 		 ioc->sas_hba.num_phys);
5625 
5626 	if (ioc->sas_hba.enclosure_handle) {
5627 		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
5628 		    &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
5629 		   ioc->sas_hba.enclosure_handle)))
5630 			ioc->sas_hba.enclosure_logical_id =
5631 			    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5632 	}
5633 
5634  out:
5635 	kfree(sas_iounit_pg1);
5636 	kfree(sas_iounit_pg0);
5637 }
5638 
5639 /**
5640  * _scsih_expander_add -  creating expander object
5641  * @ioc: per adapter object
5642  * @handle: expander handle
5643  *
5644  * Creating expander object, stored in ioc->sas_expander_list.
5645  *
5646  * Return: 0 for success, else error.
5647  */
5648 static int
5649 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5650 {
5651 	struct _sas_node *sas_expander;
5652 	struct _enclosure_node *enclosure_dev;
5653 	Mpi2ConfigReply_t mpi_reply;
5654 	Mpi2ExpanderPage0_t expander_pg0;
5655 	Mpi2ExpanderPage1_t expander_pg1;
5656 	u32 ioc_status;
5657 	u16 parent_handle;
5658 	u64 sas_address, sas_address_parent = 0;
5659 	int i;
5660 	unsigned long flags;
5661 	struct _sas_port *mpt3sas_port = NULL;
5662 
5663 	int rc = 0;
5664 
5665 	if (!handle)
5666 		return -1;
5667 
5668 	if (ioc->shost_recovery || ioc->pci_error_recovery)
5669 		return -1;
5670 
5671 	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
5672 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
5673 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5674 			__FILE__, __LINE__, __func__);
5675 		return -1;
5676 	}
5677 
5678 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5679 	    MPI2_IOCSTATUS_MASK;
5680 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5681 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5682 			__FILE__, __LINE__, __func__);
5683 		return -1;
5684 	}
5685 
5686 	/* handle out of order topology events */
5687 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
5688 	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
5689 	    != 0) {
5690 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5691 			__FILE__, __LINE__, __func__);
5692 		return -1;
5693 	}
5694 	if (sas_address_parent != ioc->sas_hba.sas_address) {
5695 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
5696 		sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5697 		    sas_address_parent);
5698 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5699 		if (!sas_expander) {
5700 			rc = _scsih_expander_add(ioc, parent_handle);
5701 			if (rc != 0)
5702 				return rc;
5703 		}
5704 	}
5705 
5706 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
5707 	sas_address = le64_to_cpu(expander_pg0.SASAddress);
5708 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5709 	    sas_address);
5710 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5711 
5712 	if (sas_expander)
5713 		return 0;
5714 
5715 	sas_expander = kzalloc(sizeof(struct _sas_node),
5716 	    GFP_KERNEL);
5717 	if (!sas_expander) {
5718 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5719 			__FILE__, __LINE__, __func__);
5720 		return -1;
5721 	}
5722 
5723 	sas_expander->handle = handle;
5724 	sas_expander->num_phys = expander_pg0.NumPhys;
5725 	sas_expander->sas_address_parent = sas_address_parent;
5726 	sas_expander->sas_address = sas_address;
5727 
5728 	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5729 		 handle, parent_handle,
5730 		 (u64)sas_expander->sas_address, sas_expander->num_phys);
5731 
5732 	if (!sas_expander->num_phys)
5733 		goto out_fail;
5734 	sas_expander->phy = kcalloc(sas_expander->num_phys,
5735 	    sizeof(struct _sas_phy), GFP_KERNEL);
5736 	if (!sas_expander->phy) {
5737 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5738 			__FILE__, __LINE__, __func__);
5739 		rc = -1;
5740 		goto out_fail;
5741 	}
5742 
5743 	INIT_LIST_HEAD(&sas_expander->sas_port_list);
5744 	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
5745 	    sas_address_parent);
5746 	if (!mpt3sas_port) {
5747 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5748 			__FILE__, __LINE__, __func__);
5749 		rc = -1;
5750 		goto out_fail;
5751 	}
5752 	sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
5753 
5754 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
5755 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
5756 		    &expander_pg1, i, handle))) {
5757 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5758 				__FILE__, __LINE__, __func__);
5759 			rc = -1;
5760 			goto out_fail;
5761 		}
5762 		sas_expander->phy[i].handle = handle;
5763 		sas_expander->phy[i].phy_id = i;
5764 
5765 		if ((mpt3sas_transport_add_expander_phy(ioc,
5766 		    &sas_expander->phy[i], expander_pg1,
5767 		    sas_expander->parent_dev))) {
5768 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5769 				__FILE__, __LINE__, __func__);
5770 			rc = -1;
5771 			goto out_fail;
5772 		}
5773 	}
5774 
5775 	if (sas_expander->enclosure_handle) {
5776 		enclosure_dev =
5777 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
5778 						sas_expander->enclosure_handle);
5779 		if (enclosure_dev)
5780 			sas_expander->enclosure_logical_id =
5781 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5782 	}
5783 
5784 	_scsih_expander_node_add(ioc, sas_expander);
5785 	return 0;
5786 
5787  out_fail:
5788 
5789 	if (mpt3sas_port)
5790 		mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
5791 		    sas_address_parent);
5792 	kfree(sas_expander);
5793 	return rc;
5794 }
5795 
5796 /**
5797  * mpt3sas_expander_remove - removing expander object
5798  * @ioc: per adapter object
5799  * @sas_address: expander sas_address
5800  */
5801 void
5802 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
5803 {
5804 	struct _sas_node *sas_expander;
5805 	unsigned long flags;
5806 
5807 	if (ioc->shost_recovery)
5808 		return;
5809 
5810 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
5811 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5812 	    sas_address);
5813 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5814 	if (sas_expander)
5815 		_scsih_expander_node_remove(ioc, sas_expander);
5816 }
5817 
5818 /**
5819  * _scsih_done -  internal SCSI_IO callback handler.
5820  * @ioc: per adapter object
5821  * @smid: system request message index
5822  * @msix_index: MSIX table index supplied by the OS
5823  * @reply: reply message frame(lower 32bit addr)
5824  *
5825  * Callback handler when sending internal generated SCSI_IO.
5826  * The callback index passed is `ioc->scsih_cb_idx`
5827  *
5828  * Return: 1 meaning mf should be freed from _base_interrupt
5829  *         0 means the mf is freed from this function.
5830  */
5831 static u8
5832 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5833 {
5834 	MPI2DefaultReply_t *mpi_reply;
5835 
5836 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
5837 	if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
5838 		return 1;
5839 	if (ioc->scsih_cmds.smid != smid)
5840 		return 1;
5841 	ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
5842 	if (mpi_reply) {
5843 		memcpy(ioc->scsih_cmds.reply, mpi_reply,
5844 		    mpi_reply->MsgLength*4);
5845 		ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
5846 	}
5847 	ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
5848 	complete(&ioc->scsih_cmds.done);
5849 	return 1;
5850 }
5851 
5852 
5853 
5854 
5855 #define MPT3_MAX_LUNS (255)
5856 
5857 
5858 /**
5859  * _scsih_check_access_status - check access flags
5860  * @ioc: per adapter object
5861  * @sas_address: sas address
5862  * @handle: sas device handle
5863  * @access_status: errors returned during discovery of the device
5864  *
5865  * Return: 0 for success, else failure
5866  */
5867 static u8
5868 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
5869 	u16 handle, u8 access_status)
5870 {
5871 	u8 rc = 1;
5872 	char *desc = NULL;
5873 
5874 	switch (access_status) {
5875 	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
5876 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
5877 		rc = 0;
5878 		break;
5879 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
5880 		desc = "sata capability failed";
5881 		break;
5882 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
5883 		desc = "sata affiliation conflict";
5884 		break;
5885 	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
5886 		desc = "route not addressable";
5887 		break;
5888 	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
5889 		desc = "smp error not addressable";
5890 		break;
5891 	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
5892 		desc = "device blocked";
5893 		break;
5894 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
5895 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
5896 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
5897 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
5898 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
5899 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
5900 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
5901 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
5902 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
5903 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
5904 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
5905 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
5906 		desc = "sata initialization failed";
5907 		break;
5908 	default:
5909 		desc = "unknown";
5910 		break;
5911 	}
5912 
5913 	if (!rc)
5914 		return 0;
5915 
5916 	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
5917 		desc, (u64)sas_address, handle);
5918 	return rc;
5919 }
5920 
5921 /**
5922  * _scsih_check_device - checking device responsiveness
5923  * @ioc: per adapter object
5924  * @parent_sas_address: sas address of parent expander or sas host
5925  * @handle: attached device handle
5926  * @phy_number: phy number
5927  * @link_rate: new link rate
5928  */
5929 static void
5930 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5931 	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
5932 {
5933 	Mpi2ConfigReply_t mpi_reply;
5934 	Mpi2SasDevicePage0_t sas_device_pg0;
5935 	struct _sas_device *sas_device;
5936 	struct _enclosure_node *enclosure_dev = NULL;
5937 	u32 ioc_status;
5938 	unsigned long flags;
5939 	u64 sas_address;
5940 	struct scsi_target *starget;
5941 	struct MPT3SAS_TARGET *sas_target_priv_data;
5942 	u32 device_info;
5943 
5944 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5945 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
5946 		return;
5947 
5948 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5949 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5950 		return;
5951 
5952 	/* wide port handling ~ we need only handle device once for the phy that
5953 	 * is matched in sas device page zero
5954 	 */
5955 	if (phy_number != sas_device_pg0.PhyNum)
5956 		return;
5957 
5958 	/* check if this is end device */
5959 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
5960 	if (!(_scsih_is_end_device(device_info)))
5961 		return;
5962 
5963 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5964 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5965 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
5966 	    sas_address);
5967 
5968 	if (!sas_device)
5969 		goto out_unlock;
5970 
5971 	if (unlikely(sas_device->handle != handle)) {
5972 		starget = sas_device->starget;
5973 		sas_target_priv_data = starget->hostdata;
5974 		starget_printk(KERN_INFO, starget,
5975 			"handle changed from(0x%04x) to (0x%04x)!!!\n",
5976 			sas_device->handle, handle);
5977 		sas_target_priv_data->handle = handle;
5978 		sas_device->handle = handle;
5979 		if (le16_to_cpu(sas_device_pg0.Flags) &
5980 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
5981 			sas_device->enclosure_level =
5982 				sas_device_pg0.EnclosureLevel;
5983 			memcpy(sas_device->connector_name,
5984 				sas_device_pg0.ConnectorName, 4);
5985 			sas_device->connector_name[4] = '\0';
5986 		} else {
5987 			sas_device->enclosure_level = 0;
5988 			sas_device->connector_name[0] = '\0';
5989 		}
5990 
5991 		sas_device->enclosure_handle =
5992 				le16_to_cpu(sas_device_pg0.EnclosureHandle);
5993 		sas_device->is_chassis_slot_valid = 0;
5994 		enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
5995 						sas_device->enclosure_handle);
5996 		if (enclosure_dev) {
5997 			sas_device->enclosure_logical_id =
5998 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5999 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6000 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6001 				sas_device->is_chassis_slot_valid = 1;
6002 				sas_device->chassis_slot =
6003 					enclosure_dev->pg0.ChassisSlot;
6004 			}
6005 		}
6006 	}
6007 
6008 	/* check if device is present */
6009 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
6010 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6011 		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
6012 			handle);
6013 		goto out_unlock;
6014 	}
6015 
6016 	/* check if there were any issues with discovery */
6017 	if (_scsih_check_access_status(ioc, sas_address, handle,
6018 	    sas_device_pg0.AccessStatus))
6019 		goto out_unlock;
6020 
6021 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6022 	_scsih_ublock_io_device(ioc, sas_address);
6023 
6024 	if (sas_device)
6025 		sas_device_put(sas_device);
6026 	return;
6027 
6028 out_unlock:
6029 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6030 	if (sas_device)
6031 		sas_device_put(sas_device);
6032 }
6033 
6034 /**
6035  * _scsih_add_device -  creating sas device object
6036  * @ioc: per adapter object
6037  * @handle: sas device handle
6038  * @phy_num: phy number end device attached to
6039  * @is_pd: is this hidden raid component
6040  *
6041  * Creating end device object, stored in ioc->sas_device_list.
6042  *
6043  * Return: 0 for success, non-zero for failure.
6044  */
6045 static int
6046 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6047 	u8 is_pd)
6048 {
6049 	Mpi2ConfigReply_t mpi_reply;
6050 	Mpi2SasDevicePage0_t sas_device_pg0;
6051 	struct _sas_device *sas_device;
6052 	struct _enclosure_node *enclosure_dev = NULL;
6053 	u32 ioc_status;
6054 	u64 sas_address;
6055 	u32 device_info;
6056 
6057 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6058 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
6059 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6060 			__FILE__, __LINE__, __func__);
6061 		return -1;
6062 	}
6063 
6064 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6065 	    MPI2_IOCSTATUS_MASK;
6066 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6067 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6068 			__FILE__, __LINE__, __func__);
6069 		return -1;
6070 	}
6071 
6072 	/* check if this is end device */
6073 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6074 	if (!(_scsih_is_end_device(device_info)))
6075 		return -1;
6076 	set_bit(handle, ioc->pend_os_device_add);
6077 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6078 
6079 	/* check if device is present */
6080 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
6081 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6082 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6083 			handle);
6084 		return -1;
6085 	}
6086 
6087 	/* check if there were any issues with discovery */
6088 	if (_scsih_check_access_status(ioc, sas_address, handle,
6089 	    sas_device_pg0.AccessStatus))
6090 		return -1;
6091 
6092 	sas_device = mpt3sas_get_sdev_by_addr(ioc,
6093 					sas_address);
6094 	if (sas_device) {
6095 		clear_bit(handle, ioc->pend_os_device_add);
6096 		sas_device_put(sas_device);
6097 		return -1;
6098 	}
6099 
6100 	if (sas_device_pg0.EnclosureHandle) {
6101 		enclosure_dev =
6102 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6103 			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
6104 		if (enclosure_dev == NULL)
6105 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
6106 				 sas_device_pg0.EnclosureHandle);
6107 	}
6108 
6109 	sas_device = kzalloc(sizeof(struct _sas_device),
6110 	    GFP_KERNEL);
6111 	if (!sas_device) {
6112 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6113 			__FILE__, __LINE__, __func__);
6114 		return 0;
6115 	}
6116 
6117 	kref_init(&sas_device->refcount);
6118 	sas_device->handle = handle;
6119 	if (_scsih_get_sas_address(ioc,
6120 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
6121 	    &sas_device->sas_address_parent) != 0)
6122 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6123 			__FILE__, __LINE__, __func__);
6124 	sas_device->enclosure_handle =
6125 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
6126 	if (sas_device->enclosure_handle != 0)
6127 		sas_device->slot =
6128 		    le16_to_cpu(sas_device_pg0.Slot);
6129 	sas_device->device_info = device_info;
6130 	sas_device->sas_address = sas_address;
6131 	sas_device->phy = sas_device_pg0.PhyNum;
6132 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
6133 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6134 
6135 	if (le16_to_cpu(sas_device_pg0.Flags)
6136 		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6137 		sas_device->enclosure_level =
6138 			sas_device_pg0.EnclosureLevel;
6139 		memcpy(sas_device->connector_name,
6140 			sas_device_pg0.ConnectorName, 4);
6141 		sas_device->connector_name[4] = '\0';
6142 	} else {
6143 		sas_device->enclosure_level = 0;
6144 		sas_device->connector_name[0] = '\0';
6145 	}
6146 	/* get enclosure_logical_id & chassis_slot*/
6147 	sas_device->is_chassis_slot_valid = 0;
6148 	if (enclosure_dev) {
6149 		sas_device->enclosure_logical_id =
6150 		    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6151 		if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6152 		    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6153 			sas_device->is_chassis_slot_valid = 1;
6154 			sas_device->chassis_slot =
6155 					enclosure_dev->pg0.ChassisSlot;
6156 		}
6157 	}
6158 
6159 	/* get device name */
6160 	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
6161 
6162 	if (ioc->wait_for_discovery_to_complete)
6163 		_scsih_sas_device_init_add(ioc, sas_device);
6164 	else
6165 		_scsih_sas_device_add(ioc, sas_device);
6166 
6167 	sas_device_put(sas_device);
6168 	return 0;
6169 }
6170 
6171 /**
6172  * _scsih_remove_device -  removing sas device object
6173  * @ioc: per adapter object
6174  * @sas_device: the sas_device object
6175  */
6176 static void
6177 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6178 	struct _sas_device *sas_device)
6179 {
6180 	struct MPT3SAS_TARGET *sas_target_priv_data;
6181 
6182 	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
6183 	     (sas_device->pfa_led_on)) {
6184 		_scsih_turn_off_pfa_led(ioc, sas_device);
6185 		sas_device->pfa_led_on = 0;
6186 	}
6187 
6188 	dewtprintk(ioc,
6189 		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
6190 			    __func__,
6191 			    sas_device->handle, (u64)sas_device->sas_address));
6192 
6193 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6194 	    NULL, NULL));
6195 
6196 	if (sas_device->starget && sas_device->starget->hostdata) {
6197 		sas_target_priv_data = sas_device->starget->hostdata;
6198 		sas_target_priv_data->deleted = 1;
6199 		_scsih_ublock_io_device(ioc, sas_device->sas_address);
6200 		sas_target_priv_data->handle =
6201 		     MPT3SAS_INVALID_DEVICE_HANDLE;
6202 	}
6203 
6204 	if (!ioc->hide_drives)
6205 		mpt3sas_transport_port_remove(ioc,
6206 		    sas_device->sas_address,
6207 		    sas_device->sas_address_parent);
6208 
6209 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
6210 		 sas_device->handle, (u64)sas_device->sas_address);
6211 
6212 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
6213 
6214 	dewtprintk(ioc,
6215 		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
6216 			    __func__,
6217 			    sas_device->handle, (u64)sas_device->sas_address));
6218 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6219 	    NULL, NULL));
6220 }
6221 
6222 /**
6223  * _scsih_sas_topology_change_event_debug - debug for topology event
6224  * @ioc: per adapter object
6225  * @event_data: event data payload
6226  * Context: user.
6227  */
6228 static void
6229 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6230 	Mpi2EventDataSasTopologyChangeList_t *event_data)
6231 {
6232 	int i;
6233 	u16 handle;
6234 	u16 reason_code;
6235 	u8 phy_number;
6236 	char *status_str = NULL;
6237 	u8 link_rate, prev_link_rate;
6238 
6239 	switch (event_data->ExpStatus) {
6240 	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6241 		status_str = "add";
6242 		break;
6243 	case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6244 		status_str = "remove";
6245 		break;
6246 	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6247 	case 0:
6248 		status_str =  "responding";
6249 		break;
6250 	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6251 		status_str = "remove delay";
6252 		break;
6253 	default:
6254 		status_str = "unknown status";
6255 		break;
6256 	}
6257 	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
6258 	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
6259 	    "start_phy(%02d), count(%d)\n",
6260 	    le16_to_cpu(event_data->ExpanderDevHandle),
6261 	    le16_to_cpu(event_data->EnclosureHandle),
6262 	    event_data->StartPhyNum, event_data->NumEntries);
6263 	for (i = 0; i < event_data->NumEntries; i++) {
6264 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6265 		if (!handle)
6266 			continue;
6267 		phy_number = event_data->StartPhyNum + i;
6268 		reason_code = event_data->PHY[i].PhyStatus &
6269 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6270 		switch (reason_code) {
6271 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6272 			status_str = "target add";
6273 			break;
6274 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6275 			status_str = "target remove";
6276 			break;
6277 		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6278 			status_str = "delay target remove";
6279 			break;
6280 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6281 			status_str = "link rate change";
6282 			break;
6283 		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6284 			status_str = "target responding";
6285 			break;
6286 		default:
6287 			status_str = "unknown";
6288 			break;
6289 		}
6290 		link_rate = event_data->PHY[i].LinkRate >> 4;
6291 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6292 		pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
6293 		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
6294 		    handle, status_str, link_rate, prev_link_rate);
6295 
6296 	}
6297 }
6298 
6299 /**
6300  * _scsih_sas_topology_change_event - handle topology changes
6301  * @ioc: per adapter object
6302  * @fw_event: The fw_event_work object
6303  * Context: user.
6304  *
6305  */
6306 static int
6307 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6308 	struct fw_event_work *fw_event)
6309 {
6310 	int i;
6311 	u16 parent_handle, handle;
6312 	u16 reason_code;
6313 	u8 phy_number, max_phys;
6314 	struct _sas_node *sas_expander;
6315 	u64 sas_address;
6316 	unsigned long flags;
6317 	u8 link_rate, prev_link_rate;
6318 	Mpi2EventDataSasTopologyChangeList_t *event_data =
6319 		(Mpi2EventDataSasTopologyChangeList_t *)
6320 		fw_event->event_data;
6321 
6322 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6323 		_scsih_sas_topology_change_event_debug(ioc, event_data);
6324 
6325 	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
6326 		return 0;
6327 
6328 	if (!ioc->sas_hba.num_phys)
6329 		_scsih_sas_host_add(ioc);
6330 	else
6331 		_scsih_sas_host_refresh(ioc);
6332 
6333 	if (fw_event->ignore) {
6334 		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
6335 		return 0;
6336 	}
6337 
6338 	parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
6339 
6340 	/* handle expander add */
6341 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
6342 		if (_scsih_expander_add(ioc, parent_handle) != 0)
6343 			return 0;
6344 
6345 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6346 	sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
6347 	    parent_handle);
6348 	if (sas_expander) {
6349 		sas_address = sas_expander->sas_address;
6350 		max_phys = sas_expander->num_phys;
6351 	} else if (parent_handle < ioc->sas_hba.num_phys) {
6352 		sas_address = ioc->sas_hba.sas_address;
6353 		max_phys = ioc->sas_hba.num_phys;
6354 	} else {
6355 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6356 		return 0;
6357 	}
6358 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6359 
6360 	/* handle siblings events */
6361 	for (i = 0; i < event_data->NumEntries; i++) {
6362 		if (fw_event->ignore) {
6363 			dewtprintk(ioc,
6364 				   ioc_info(ioc, "ignoring expander event\n"));
6365 			return 0;
6366 		}
6367 		if (ioc->remove_host || ioc->pci_error_recovery)
6368 			return 0;
6369 		phy_number = event_data->StartPhyNum + i;
6370 		if (phy_number >= max_phys)
6371 			continue;
6372 		reason_code = event_data->PHY[i].PhyStatus &
6373 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6374 		if ((event_data->PHY[i].PhyStatus &
6375 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
6376 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
6377 				continue;
6378 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6379 		if (!handle)
6380 			continue;
6381 		link_rate = event_data->PHY[i].LinkRate >> 4;
6382 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6383 		switch (reason_code) {
6384 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6385 
6386 			if (ioc->shost_recovery)
6387 				break;
6388 
6389 			if (link_rate == prev_link_rate)
6390 				break;
6391 
6392 			mpt3sas_transport_update_links(ioc, sas_address,
6393 			    handle, phy_number, link_rate);
6394 
6395 			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6396 				break;
6397 
6398 			_scsih_check_device(ioc, sas_address, handle,
6399 			    phy_number, link_rate);
6400 
6401 			if (!test_bit(handle, ioc->pend_os_device_add))
6402 				break;
6403 
6404 			/* fall through */
6405 
6406 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6407 
6408 			if (ioc->shost_recovery)
6409 				break;
6410 
6411 			mpt3sas_transport_update_links(ioc, sas_address,
6412 			    handle, phy_number, link_rate);
6413 
6414 			_scsih_add_device(ioc, handle, phy_number, 0);
6415 
6416 			break;
6417 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6418 
6419 			_scsih_device_remove_by_handle(ioc, handle);
6420 			break;
6421 		}
6422 	}
6423 
6424 	/* handle expander removal */
6425 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
6426 	    sas_expander)
6427 		mpt3sas_expander_remove(ioc, sas_address);
6428 
6429 	return 0;
6430 }
6431 
6432 /**
6433  * _scsih_sas_device_status_change_event_debug - debug for device event
6434  * @ioc: ?
6435  * @event_data: event data payload
6436  * Context: user.
6437  */
6438 static void
6439 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6440 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
6441 {
6442 	char *reason_str = NULL;
6443 
6444 	switch (event_data->ReasonCode) {
6445 	case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
6446 		reason_str = "smart data";
6447 		break;
6448 	case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
6449 		reason_str = "unsupported device discovered";
6450 		break;
6451 	case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
6452 		reason_str = "internal device reset";
6453 		break;
6454 	case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
6455 		reason_str = "internal task abort";
6456 		break;
6457 	case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
6458 		reason_str = "internal task abort set";
6459 		break;
6460 	case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
6461 		reason_str = "internal clear task set";
6462 		break;
6463 	case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
6464 		reason_str = "internal query task";
6465 		break;
6466 	case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
6467 		reason_str = "sata init failure";
6468 		break;
6469 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
6470 		reason_str = "internal device reset complete";
6471 		break;
6472 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
6473 		reason_str = "internal task abort complete";
6474 		break;
6475 	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
6476 		reason_str = "internal async notification";
6477 		break;
6478 	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
6479 		reason_str = "expander reduced functionality";
6480 		break;
6481 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
6482 		reason_str = "expander reduced functionality complete";
6483 		break;
6484 	default:
6485 		reason_str = "unknown reason";
6486 		break;
6487 	}
6488 	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
6489 		 reason_str, le16_to_cpu(event_data->DevHandle),
6490 		 (u64)le64_to_cpu(event_data->SASAddress),
6491 		 le16_to_cpu(event_data->TaskTag));
6492 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
6493 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
6494 			event_data->ASC, event_data->ASCQ);
6495 	pr_cont("\n");
6496 }
6497 
6498 /**
6499  * _scsih_sas_device_status_change_event - handle device status change
6500  * @ioc: per adapter object
6501  * @event_data: The fw event
6502  * Context: user.
6503  */
6504 static void
6505 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6506 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
6507 {
6508 	struct MPT3SAS_TARGET *target_priv_data;
6509 	struct _sas_device *sas_device;
6510 	u64 sas_address;
6511 	unsigned long flags;
6512 
6513 	/* In MPI Revision K (0xC), the internal device reset complete was
6514 	 * implemented, so avoid setting tm_busy flag for older firmware.
6515 	 */
6516 	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
6517 		return;
6518 
6519 	if (event_data->ReasonCode !=
6520 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
6521 	   event_data->ReasonCode !=
6522 	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
6523 		return;
6524 
6525 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
6526 	sas_address = le64_to_cpu(event_data->SASAddress);
6527 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6528 	    sas_address);
6529 
6530 	if (!sas_device || !sas_device->starget)
6531 		goto out;
6532 
6533 	target_priv_data = sas_device->starget->hostdata;
6534 	if (!target_priv_data)
6535 		goto out;
6536 
6537 	if (event_data->ReasonCode ==
6538 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
6539 		target_priv_data->tm_busy = 1;
6540 	else
6541 		target_priv_data->tm_busy = 0;
6542 
6543 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6544 		ioc_info(ioc,
6545 		    "%s tm_busy flag for handle(0x%04x)\n",
6546 		    (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
6547 		    target_priv_data->handle);
6548 
6549 out:
6550 	if (sas_device)
6551 		sas_device_put(sas_device);
6552 
6553 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6554 }
6555 
6556 
6557 /**
6558  * _scsih_check_pcie_access_status - check access flags
6559  * @ioc: per adapter object
6560  * @wwid: wwid
6561  * @handle: sas device handle
6562  * @access_status: errors returned during discovery of the device
6563  *
6564  * Return: 0 for success, else failure
6565  */
6566 static u8
6567 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6568 	u16 handle, u8 access_status)
6569 {
6570 	u8 rc = 1;
6571 	char *desc = NULL;
6572 
6573 	switch (access_status) {
6574 	case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
6575 	case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
6576 		rc = 0;
6577 		break;
6578 	case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
6579 		desc = "PCIe device capability failed";
6580 		break;
6581 	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6582 		desc = "PCIe device blocked";
6583 		ioc_info(ioc,
6584 		    "Device with Access Status (%s): wwid(0x%016llx), "
6585 		    "handle(0x%04x)\n ll only be added to the internal list",
6586 		    desc, (u64)wwid, handle);
6587 		rc = 0;
6588 		break;
6589 	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6590 		desc = "PCIe device mem space access failed";
6591 		break;
6592 	case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
6593 		desc = "PCIe device unsupported";
6594 		break;
6595 	case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
6596 		desc = "PCIe device MSIx Required";
6597 		break;
6598 	case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
6599 		desc = "PCIe device init fail max";
6600 		break;
6601 	case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
6602 		desc = "PCIe device status unknown";
6603 		break;
6604 	case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
6605 		desc = "nvme ready timeout";
6606 		break;
6607 	case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
6608 		desc = "nvme device configuration unsupported";
6609 		break;
6610 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
6611 		desc = "nvme identify failed";
6612 		break;
6613 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
6614 		desc = "nvme qconfig failed";
6615 		break;
6616 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
6617 		desc = "nvme qcreation failed";
6618 		break;
6619 	case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
6620 		desc = "nvme eventcfg failed";
6621 		break;
6622 	case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
6623 		desc = "nvme get feature stat failed";
6624 		break;
6625 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
6626 		desc = "nvme idle timeout";
6627 		break;
6628 	case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
6629 		desc = "nvme failure status";
6630 		break;
6631 	default:
6632 		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
6633 			access_status, (u64)wwid, handle);
6634 		return rc;
6635 	}
6636 
6637 	if (!rc)
6638 		return rc;
6639 
6640 	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6641 		 desc, (u64)wwid, handle);
6642 	return rc;
6643 }
6644 
6645 /**
6646  * _scsih_pcie_device_remove_from_sml -  removing pcie device
6647  * from SML and free up associated memory
6648  * @ioc: per adapter object
6649  * @pcie_device: the pcie_device object
6650  */
6651 static void
6652 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6653 	struct _pcie_device *pcie_device)
6654 {
6655 	struct MPT3SAS_TARGET *sas_target_priv_data;
6656 
6657 	dewtprintk(ioc,
6658 		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
6659 			    __func__,
6660 			    pcie_device->handle, (u64)pcie_device->wwid));
6661 	if (pcie_device->enclosure_handle != 0)
6662 		dewtprintk(ioc,
6663 			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6664 				    __func__,
6665 				    (u64)pcie_device->enclosure_logical_id,
6666 				    pcie_device->slot));
6667 	if (pcie_device->connector_name[0] != '\0')
6668 		dewtprintk(ioc,
6669 			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
6670 				    __func__,
6671 				    pcie_device->enclosure_level,
6672 				    pcie_device->connector_name));
6673 
6674 	if (pcie_device->starget && pcie_device->starget->hostdata) {
6675 		sas_target_priv_data = pcie_device->starget->hostdata;
6676 		sas_target_priv_data->deleted = 1;
6677 		_scsih_ublock_io_device(ioc, pcie_device->wwid);
6678 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6679 	}
6680 
6681 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
6682 		 pcie_device->handle, (u64)pcie_device->wwid);
6683 	if (pcie_device->enclosure_handle != 0)
6684 		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6685 			 (u64)pcie_device->enclosure_logical_id,
6686 			 pcie_device->slot);
6687 	if (pcie_device->connector_name[0] != '\0')
6688 		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
6689 			 pcie_device->enclosure_level,
6690 			 pcie_device->connector_name);
6691 
6692 	if (pcie_device->starget && (pcie_device->access_status !=
6693 				MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
6694 		scsi_remove_target(&pcie_device->starget->dev);
6695 	dewtprintk(ioc,
6696 		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
6697 			    __func__,
6698 			    pcie_device->handle, (u64)pcie_device->wwid));
6699 	if (pcie_device->enclosure_handle != 0)
6700 		dewtprintk(ioc,
6701 			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
6702 				    __func__,
6703 				    (u64)pcie_device->enclosure_logical_id,
6704 				    pcie_device->slot));
6705 	if (pcie_device->connector_name[0] != '\0')
6706 		dewtprintk(ioc,
6707 			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
6708 				    __func__,
6709 				    pcie_device->enclosure_level,
6710 				    pcie_device->connector_name));
6711 
6712 	kfree(pcie_device->serial_number);
6713 }
6714 
6715 
6716 /**
6717  * _scsih_pcie_check_device - checking device responsiveness
6718  * @ioc: per adapter object
6719  * @handle: attached device handle
6720  */
6721 static void
6722 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6723 {
6724 	Mpi2ConfigReply_t mpi_reply;
6725 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
6726 	u32 ioc_status;
6727 	struct _pcie_device *pcie_device;
6728 	u64 wwid;
6729 	unsigned long flags;
6730 	struct scsi_target *starget;
6731 	struct MPT3SAS_TARGET *sas_target_priv_data;
6732 	u32 device_info;
6733 
6734 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6735 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
6736 		return;
6737 
6738 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6739 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6740 		return;
6741 
6742 	/* check if this is end device */
6743 	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6744 	if (!(_scsih_is_nvme_pciescsi_device(device_info)))
6745 		return;
6746 
6747 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
6748 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
6749 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
6750 
6751 	if (!pcie_device) {
6752 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6753 		return;
6754 	}
6755 
6756 	if (unlikely(pcie_device->handle != handle)) {
6757 		starget = pcie_device->starget;
6758 		sas_target_priv_data = starget->hostdata;
6759 		pcie_device->access_status = pcie_device_pg0.AccessStatus;
6760 		starget_printk(KERN_INFO, starget,
6761 		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
6762 		    pcie_device->handle, handle);
6763 		sas_target_priv_data->handle = handle;
6764 		pcie_device->handle = handle;
6765 
6766 		if (le32_to_cpu(pcie_device_pg0.Flags) &
6767 		    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6768 			pcie_device->enclosure_level =
6769 			    pcie_device_pg0.EnclosureLevel;
6770 			memcpy(&pcie_device->connector_name[0],
6771 			    &pcie_device_pg0.ConnectorName[0], 4);
6772 		} else {
6773 			pcie_device->enclosure_level = 0;
6774 			pcie_device->connector_name[0] = '\0';
6775 		}
6776 	}
6777 
6778 	/* check if device is present */
6779 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6780 	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6781 		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
6782 			 handle);
6783 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6784 		pcie_device_put(pcie_device);
6785 		return;
6786 	}
6787 
6788 	/* check if there were any issues with discovery */
6789 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6790 	    pcie_device_pg0.AccessStatus)) {
6791 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6792 		pcie_device_put(pcie_device);
6793 		return;
6794 	}
6795 
6796 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6797 	pcie_device_put(pcie_device);
6798 
6799 	_scsih_ublock_io_device(ioc, wwid);
6800 
6801 	return;
6802 }
6803 
6804 /**
6805  * _scsih_pcie_add_device -  creating pcie device object
6806  * @ioc: per adapter object
6807  * @handle: pcie device handle
6808  *
6809  * Creating end device object, stored in ioc->pcie_device_list.
6810  *
6811  * Return: 1 means queue the event later, 0 means complete the event
6812  */
6813 static int
6814 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6815 {
6816 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
6817 	Mpi26PCIeDevicePage2_t pcie_device_pg2;
6818 	Mpi2ConfigReply_t mpi_reply;
6819 	struct _pcie_device *pcie_device;
6820 	struct _enclosure_node *enclosure_dev;
6821 	u32 ioc_status;
6822 	u64 wwid;
6823 
6824 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6825 	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
6826 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6827 			__FILE__, __LINE__, __func__);
6828 		return 0;
6829 	}
6830 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6831 	    MPI2_IOCSTATUS_MASK;
6832 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6833 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6834 			__FILE__, __LINE__, __func__);
6835 		return 0;
6836 	}
6837 
6838 	set_bit(handle, ioc->pend_os_device_add);
6839 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
6840 
6841 	/* check if device is present */
6842 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6843 		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6844 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6845 			handle);
6846 		return 0;
6847 	}
6848 
6849 	/* check if there were any issues with discovery */
6850 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6851 	    pcie_device_pg0.AccessStatus))
6852 		return 0;
6853 
6854 	if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
6855 	    (pcie_device_pg0.DeviceInfo))))
6856 		return 0;
6857 
6858 	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
6859 	if (pcie_device) {
6860 		clear_bit(handle, ioc->pend_os_device_add);
6861 		pcie_device_put(pcie_device);
6862 		return 0;
6863 	}
6864 
6865 	/* PCIe Device Page 2 contains read-only information about a
6866 	 * specific NVMe device; therefore, this page is only
6867 	 * valid for NVMe devices and skip for pcie devices of type scsi.
6868 	 */
6869 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
6870 		le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
6871 		if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
6872 		    &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
6873 		    handle)) {
6874 			ioc_err(ioc,
6875 			    "failure at %s:%d/%s()!\n", __FILE__,
6876 			    __LINE__, __func__);
6877 			return 0;
6878 		}
6879 
6880 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6881 					MPI2_IOCSTATUS_MASK;
6882 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6883 			ioc_err(ioc,
6884 			    "failure at %s:%d/%s()!\n", __FILE__,
6885 			    __LINE__, __func__);
6886 			return 0;
6887 		}
6888 	}
6889 
6890 	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
6891 	if (!pcie_device) {
6892 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6893 			__FILE__, __LINE__, __func__);
6894 		return 0;
6895 	}
6896 
6897 	kref_init(&pcie_device->refcount);
6898 	pcie_device->id = ioc->pcie_target_id++;
6899 	pcie_device->channel = PCIE_CHANNEL;
6900 	pcie_device->handle = handle;
6901 	pcie_device->access_status = pcie_device_pg0.AccessStatus;
6902 	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6903 	pcie_device->wwid = wwid;
6904 	pcie_device->port_num = pcie_device_pg0.PortNum;
6905 	pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
6906 	    MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6907 
6908 	pcie_device->enclosure_handle =
6909 	    le16_to_cpu(pcie_device_pg0.EnclosureHandle);
6910 	if (pcie_device->enclosure_handle != 0)
6911 		pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
6912 
6913 	if (le32_to_cpu(pcie_device_pg0.Flags) &
6914 	    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6915 		pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
6916 		memcpy(&pcie_device->connector_name[0],
6917 		    &pcie_device_pg0.ConnectorName[0], 4);
6918 	} else {
6919 		pcie_device->enclosure_level = 0;
6920 		pcie_device->connector_name[0] = '\0';
6921 	}
6922 
6923 	/* get enclosure_logical_id */
6924 	if (pcie_device->enclosure_handle) {
6925 		enclosure_dev =
6926 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6927 						pcie_device->enclosure_handle);
6928 		if (enclosure_dev)
6929 			pcie_device->enclosure_logical_id =
6930 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6931 	}
6932 	/* TODO -- Add device name once FW supports it */
6933 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
6934 	    le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
6935 		pcie_device->nvme_mdts =
6936 		    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
6937 		if (pcie_device_pg2.ControllerResetTO)
6938 			pcie_device->reset_timeout =
6939 			    pcie_device_pg2.ControllerResetTO;
6940 		else
6941 			pcie_device->reset_timeout = 30;
6942 	} else
6943 		pcie_device->reset_timeout = 30;
6944 
6945 	if (ioc->wait_for_discovery_to_complete)
6946 		_scsih_pcie_device_init_add(ioc, pcie_device);
6947 	else
6948 		_scsih_pcie_device_add(ioc, pcie_device);
6949 
6950 	pcie_device_put(pcie_device);
6951 	return 0;
6952 }
6953 
6954 /**
6955  * _scsih_pcie_topology_change_event_debug - debug for topology
6956  * event
6957  * @ioc: per adapter object
6958  * @event_data: event data payload
6959  * Context: user.
6960  */
6961 static void
6962 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6963 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
6964 {
6965 	int i;
6966 	u16 handle;
6967 	u16 reason_code;
6968 	u8 port_number;
6969 	char *status_str = NULL;
6970 	u8 link_rate, prev_link_rate;
6971 
6972 	switch (event_data->SwitchStatus) {
6973 	case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
6974 		status_str = "add";
6975 		break;
6976 	case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
6977 		status_str = "remove";
6978 		break;
6979 	case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
6980 	case 0:
6981 		status_str =  "responding";
6982 		break;
6983 	case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
6984 		status_str = "remove delay";
6985 		break;
6986 	default:
6987 		status_str = "unknown status";
6988 		break;
6989 	}
6990 	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
6991 	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
6992 		"start_port(%02d), count(%d)\n",
6993 		le16_to_cpu(event_data->SwitchDevHandle),
6994 		le16_to_cpu(event_data->EnclosureHandle),
6995 		event_data->StartPortNum, event_data->NumEntries);
6996 	for (i = 0; i < event_data->NumEntries; i++) {
6997 		handle =
6998 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
6999 		if (!handle)
7000 			continue;
7001 		port_number = event_data->StartPortNum + i;
7002 		reason_code = event_data->PortEntry[i].PortStatus;
7003 		switch (reason_code) {
7004 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7005 			status_str = "target add";
7006 			break;
7007 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7008 			status_str = "target remove";
7009 			break;
7010 		case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
7011 			status_str = "delay target remove";
7012 			break;
7013 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7014 			status_str = "link rate change";
7015 			break;
7016 		case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
7017 			status_str = "target responding";
7018 			break;
7019 		default:
7020 			status_str = "unknown";
7021 			break;
7022 		}
7023 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
7024 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7025 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
7026 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7027 		pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
7028 			" link rate: new(0x%02x), old(0x%02x)\n", port_number,
7029 			handle, status_str, link_rate, prev_link_rate);
7030 	}
7031 }
7032 
7033 /**
7034  * _scsih_pcie_topology_change_event - handle PCIe topology
7035  *  changes
7036  * @ioc: per adapter object
7037  * @fw_event: The fw_event_work object
7038  * Context: user.
7039  *
7040  */
7041 static void
7042 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7043 	struct fw_event_work *fw_event)
7044 {
7045 	int i;
7046 	u16 handle;
7047 	u16 reason_code;
7048 	u8 link_rate, prev_link_rate;
7049 	unsigned long flags;
7050 	int rc;
7051 	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
7052 		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
7053 	struct _pcie_device *pcie_device;
7054 
7055 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7056 		_scsih_pcie_topology_change_event_debug(ioc, event_data);
7057 
7058 	if (ioc->shost_recovery || ioc->remove_host ||
7059 		ioc->pci_error_recovery)
7060 		return;
7061 
7062 	if (fw_event->ignore) {
7063 		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
7064 		return;
7065 	}
7066 
7067 	/* handle siblings events */
7068 	for (i = 0; i < event_data->NumEntries; i++) {
7069 		if (fw_event->ignore) {
7070 			dewtprintk(ioc,
7071 				   ioc_info(ioc, "ignoring switch event\n"));
7072 			return;
7073 		}
7074 		if (ioc->remove_host || ioc->pci_error_recovery)
7075 			return;
7076 		reason_code = event_data->PortEntry[i].PortStatus;
7077 		handle =
7078 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7079 		if (!handle)
7080 			continue;
7081 
7082 		link_rate = event_data->PortEntry[i].CurrentPortInfo
7083 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7084 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
7085 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7086 
7087 		switch (reason_code) {
7088 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7089 			if (ioc->shost_recovery)
7090 				break;
7091 			if (link_rate == prev_link_rate)
7092 				break;
7093 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7094 				break;
7095 
7096 			_scsih_pcie_check_device(ioc, handle);
7097 
7098 			/* This code after this point handles the test case
7099 			 * where a device has been added, however its returning
7100 			 * BUSY for sometime.  Then before the Device Missing
7101 			 * Delay expires and the device becomes READY, the
7102 			 * device is removed and added back.
7103 			 */
7104 			spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7105 			pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
7106 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7107 
7108 			if (pcie_device) {
7109 				pcie_device_put(pcie_device);
7110 				break;
7111 			}
7112 
7113 			if (!test_bit(handle, ioc->pend_os_device_add))
7114 				break;
7115 
7116 			dewtprintk(ioc,
7117 				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
7118 					    handle));
7119 			event_data->PortEntry[i].PortStatus &= 0xF0;
7120 			event_data->PortEntry[i].PortStatus |=
7121 				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
7122 			/* fall through */
7123 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7124 			if (ioc->shost_recovery)
7125 				break;
7126 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7127 				break;
7128 
7129 			rc = _scsih_pcie_add_device(ioc, handle);
7130 			if (!rc) {
7131 				/* mark entry vacant */
7132 				/* TODO This needs to be reviewed and fixed,
7133 				 * we dont have an entry
7134 				 * to make an event void like vacant
7135 				 */
7136 				event_data->PortEntry[i].PortStatus |=
7137 					MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
7138 			}
7139 			break;
7140 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7141 			_scsih_pcie_device_remove_by_handle(ioc, handle);
7142 			break;
7143 		}
7144 	}
7145 }
7146 
7147 /**
7148  * _scsih_pcie_device_status_change_event_debug - debug for device event
7149  * @ioc: ?
7150  * @event_data: event data payload
7151  * Context: user.
7152  */
7153 static void
7154 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7155 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
7156 {
7157 	char *reason_str = NULL;
7158 
7159 	switch (event_data->ReasonCode) {
7160 	case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
7161 		reason_str = "smart data";
7162 		break;
7163 	case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
7164 		reason_str = "unsupported device discovered";
7165 		break;
7166 	case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
7167 		reason_str = "internal device reset";
7168 		break;
7169 	case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
7170 		reason_str = "internal task abort";
7171 		break;
7172 	case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7173 		reason_str = "internal task abort set";
7174 		break;
7175 	case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7176 		reason_str = "internal clear task set";
7177 		break;
7178 	case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
7179 		reason_str = "internal query task";
7180 		break;
7181 	case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
7182 		reason_str = "device init failure";
7183 		break;
7184 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7185 		reason_str = "internal device reset complete";
7186 		break;
7187 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7188 		reason_str = "internal task abort complete";
7189 		break;
7190 	case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7191 		reason_str = "internal async notification";
7192 		break;
7193 	case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
7194 		reason_str = "pcie hot reset failed";
7195 		break;
7196 	default:
7197 		reason_str = "unknown reason";
7198 		break;
7199 	}
7200 
7201 	ioc_info(ioc, "PCIE device status change: (%s)\n"
7202 		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7203 		 reason_str, le16_to_cpu(event_data->DevHandle),
7204 		 (u64)le64_to_cpu(event_data->WWID),
7205 		 le16_to_cpu(event_data->TaskTag));
7206 	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7207 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7208 			event_data->ASC, event_data->ASCQ);
7209 	pr_cont("\n");
7210 }
7211 
7212 /**
7213  * _scsih_pcie_device_status_change_event - handle device status
7214  * change
7215  * @ioc: per adapter object
7216  * @fw_event: The fw_event_work object
7217  * Context: user.
7218  */
7219 static void
7220 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7221 	struct fw_event_work *fw_event)
7222 {
7223 	struct MPT3SAS_TARGET *target_priv_data;
7224 	struct _pcie_device *pcie_device;
7225 	u64 wwid;
7226 	unsigned long flags;
7227 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
7228 		(Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
7229 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7230 		_scsih_pcie_device_status_change_event_debug(ioc,
7231 			event_data);
7232 
7233 	if (event_data->ReasonCode !=
7234 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7235 		event_data->ReasonCode !=
7236 		MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7237 		return;
7238 
7239 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7240 	wwid = le64_to_cpu(event_data->WWID);
7241 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7242 
7243 	if (!pcie_device || !pcie_device->starget)
7244 		goto out;
7245 
7246 	target_priv_data = pcie_device->starget->hostdata;
7247 	if (!target_priv_data)
7248 		goto out;
7249 
7250 	if (event_data->ReasonCode ==
7251 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
7252 		target_priv_data->tm_busy = 1;
7253 	else
7254 		target_priv_data->tm_busy = 0;
7255 out:
7256 	if (pcie_device)
7257 		pcie_device_put(pcie_device);
7258 
7259 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7260 }
7261 
7262 /**
7263  * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
7264  * event
7265  * @ioc: per adapter object
7266  * @event_data: event data payload
7267  * Context: user.
7268  */
7269 static void
7270 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7271 	Mpi2EventDataSasEnclDevStatusChange_t *event_data)
7272 {
7273 	char *reason_str = NULL;
7274 
7275 	switch (event_data->ReasonCode) {
7276 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7277 		reason_str = "enclosure add";
7278 		break;
7279 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7280 		reason_str = "enclosure remove";
7281 		break;
7282 	default:
7283 		reason_str = "unknown reason";
7284 		break;
7285 	}
7286 
7287 	ioc_info(ioc, "enclosure status change: (%s)\n"
7288 		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
7289 		 reason_str,
7290 		 le16_to_cpu(event_data->EnclosureHandle),
7291 		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
7292 		 le16_to_cpu(event_data->StartSlot));
7293 }
7294 
7295 /**
7296  * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
7297  * @ioc: per adapter object
7298  * @fw_event: The fw_event_work object
7299  * Context: user.
7300  */
7301 static void
7302 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7303 	struct fw_event_work *fw_event)
7304 {
7305 	Mpi2ConfigReply_t mpi_reply;
7306 	struct _enclosure_node *enclosure_dev = NULL;
7307 	Mpi2EventDataSasEnclDevStatusChange_t *event_data =
7308 		(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
7309 	int rc;
7310 	u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
7311 
7312 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7313 		_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
7314 		     (Mpi2EventDataSasEnclDevStatusChange_t *)
7315 		     fw_event->event_data);
7316 	if (ioc->shost_recovery)
7317 		return;
7318 
7319 	if (enclosure_handle)
7320 		enclosure_dev =
7321 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7322 						enclosure_handle);
7323 	switch (event_data->ReasonCode) {
7324 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7325 		if (!enclosure_dev) {
7326 			enclosure_dev =
7327 				kzalloc(sizeof(struct _enclosure_node),
7328 					GFP_KERNEL);
7329 			if (!enclosure_dev) {
7330 				ioc_info(ioc, "failure at %s:%d/%s()!\n",
7331 					 __FILE__, __LINE__, __func__);
7332 				return;
7333 			}
7334 			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7335 				&enclosure_dev->pg0,
7336 				MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7337 				enclosure_handle);
7338 
7339 			if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
7340 						MPI2_IOCSTATUS_MASK)) {
7341 				kfree(enclosure_dev);
7342 				return;
7343 			}
7344 
7345 			list_add_tail(&enclosure_dev->list,
7346 							&ioc->enclosure_list);
7347 		}
7348 		break;
7349 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7350 		if (enclosure_dev) {
7351 			list_del(&enclosure_dev->list);
7352 			kfree(enclosure_dev);
7353 		}
7354 		break;
7355 	default:
7356 		break;
7357 	}
7358 }
7359 
7360 /**
7361  * _scsih_sas_broadcast_primitive_event - handle broadcast events
7362  * @ioc: per adapter object
7363  * @fw_event: The fw_event_work object
7364  * Context: user.
7365  */
7366 static void
7367 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7368 	struct fw_event_work *fw_event)
7369 {
7370 	struct scsi_cmnd *scmd;
7371 	struct scsi_device *sdev;
7372 	struct scsiio_tracker *st;
7373 	u16 smid, handle;
7374 	u32 lun;
7375 	struct MPT3SAS_DEVICE *sas_device_priv_data;
7376 	u32 termination_count;
7377 	u32 query_count;
7378 	Mpi2SCSITaskManagementReply_t *mpi_reply;
7379 	Mpi2EventDataSasBroadcastPrimitive_t *event_data =
7380 		(Mpi2EventDataSasBroadcastPrimitive_t *)
7381 		fw_event->event_data;
7382 	u16 ioc_status;
7383 	unsigned long flags;
7384 	int r;
7385 	u8 max_retries = 0;
7386 	u8 task_abort_retries;
7387 
7388 	mutex_lock(&ioc->tm_cmds.mutex);
7389 	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
7390 		 __func__, event_data->PhyNum, event_data->PortWidth);
7391 
7392 	_scsih_block_io_all_device(ioc);
7393 
7394 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7395 	mpi_reply = ioc->tm_cmds.reply;
7396  broadcast_aen_retry:
7397 
7398 	/* sanity checks for retrying this loop */
7399 	if (max_retries++ == 5) {
7400 		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
7401 		goto out;
7402 	} else if (max_retries > 1)
7403 		dewtprintk(ioc,
7404 			   ioc_info(ioc, "%s: %d retry\n",
7405 				    __func__, max_retries - 1));
7406 
7407 	termination_count = 0;
7408 	query_count = 0;
7409 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
7410 		if (ioc->shost_recovery)
7411 			goto out;
7412 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
7413 		if (!scmd)
7414 			continue;
7415 		st = scsi_cmd_priv(scmd);
7416 		sdev = scmd->device;
7417 		sas_device_priv_data = sdev->hostdata;
7418 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
7419 			continue;
7420 		 /* skip hidden raid components */
7421 		if (sas_device_priv_data->sas_target->flags &
7422 		    MPT_TARGET_FLAGS_RAID_COMPONENT)
7423 			continue;
7424 		 /* skip volumes */
7425 		if (sas_device_priv_data->sas_target->flags &
7426 		    MPT_TARGET_FLAGS_VOLUME)
7427 			continue;
7428 		 /* skip PCIe devices */
7429 		if (sas_device_priv_data->sas_target->flags &
7430 		    MPT_TARGET_FLAGS_PCIE_DEVICE)
7431 			continue;
7432 
7433 		handle = sas_device_priv_data->sas_target->handle;
7434 		lun = sas_device_priv_data->lun;
7435 		query_count++;
7436 
7437 		if (ioc->shost_recovery)
7438 			goto out;
7439 
7440 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7441 		r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
7442 			MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7443 			st->msix_io, 30, 0);
7444 		if (r == FAILED) {
7445 			sdev_printk(KERN_WARNING, sdev,
7446 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
7447 			    "QUERY_TASK: scmd(%p)\n", scmd);
7448 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7449 			goto broadcast_aen_retry;
7450 		}
7451 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
7452 		    & MPI2_IOCSTATUS_MASK;
7453 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7454 			sdev_printk(KERN_WARNING, sdev,
7455 				"query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
7456 				ioc_status, scmd);
7457 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7458 			goto broadcast_aen_retry;
7459 		}
7460 
7461 		/* see if IO is still owned by IOC and target */
7462 		if (mpi_reply->ResponseCode ==
7463 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
7464 		     mpi_reply->ResponseCode ==
7465 		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
7466 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7467 			continue;
7468 		}
7469 		task_abort_retries = 0;
7470  tm_retry:
7471 		if (task_abort_retries++ == 60) {
7472 			dewtprintk(ioc,
7473 				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
7474 					    __func__));
7475 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7476 			goto broadcast_aen_retry;
7477 		}
7478 
7479 		if (ioc->shost_recovery)
7480 			goto out_no_lock;
7481 
7482 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
7483 			MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
7484 			st->msix_io, 30, 0);
7485 		if (r == FAILED || st->cb_idx != 0xFF) {
7486 			sdev_printk(KERN_WARNING, sdev,
7487 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
7488 			    "scmd(%p)\n", scmd);
7489 			goto tm_retry;
7490 		}
7491 
7492 		if (task_abort_retries > 1)
7493 			sdev_printk(KERN_WARNING, sdev,
7494 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
7495 			    " scmd(%p)\n",
7496 			    task_abort_retries - 1, scmd);
7497 
7498 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
7499 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7500 	}
7501 
7502 	if (ioc->broadcast_aen_pending) {
7503 		dewtprintk(ioc,
7504 			   ioc_info(ioc,
7505 				    "%s: loop back due to pending AEN\n",
7506 				    __func__));
7507 		 ioc->broadcast_aen_pending = 0;
7508 		 goto broadcast_aen_retry;
7509 	}
7510 
7511  out:
7512 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7513  out_no_lock:
7514 
7515 	dewtprintk(ioc,
7516 		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
7517 			    __func__, query_count, termination_count));
7518 
7519 	ioc->broadcast_aen_busy = 0;
7520 	if (!ioc->shost_recovery)
7521 		_scsih_ublock_io_all_device(ioc);
7522 	mutex_unlock(&ioc->tm_cmds.mutex);
7523 }
7524 
7525 /**
7526  * _scsih_sas_discovery_event - handle discovery events
7527  * @ioc: per adapter object
7528  * @fw_event: The fw_event_work object
7529  * Context: user.
7530  */
7531 static void
7532 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7533 	struct fw_event_work *fw_event)
7534 {
7535 	Mpi2EventDataSasDiscovery_t *event_data =
7536 		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
7537 
7538 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7539 		ioc_info(ioc, "discovery event: (%s)",
7540 			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
7541 			 "start" : "stop");
7542 		if (event_data->DiscoveryStatus)
7543 			pr_cont("discovery_status(0x%08x)",
7544 				le32_to_cpu(event_data->DiscoveryStatus));
7545 		pr_cont("\n");
7546 	}
7547 
7548 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
7549 	    !ioc->sas_hba.num_phys) {
7550 		if (disable_discovery > 0 && ioc->shost_recovery) {
7551 			/* Wait for the reset to complete */
7552 			while (ioc->shost_recovery)
7553 				ssleep(1);
7554 		}
7555 		_scsih_sas_host_add(ioc);
7556 	}
7557 }
7558 
7559 /**
7560  * _scsih_sas_device_discovery_error_event - display SAS device discovery error
7561  *						events
7562  * @ioc: per adapter object
7563  * @fw_event: The fw_event_work object
7564  * Context: user.
7565  */
7566 static void
7567 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7568 	struct fw_event_work *fw_event)
7569 {
7570 	Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
7571 		(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
7572 
7573 	switch (event_data->ReasonCode) {
7574 	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7575 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
7576 			 le16_to_cpu(event_data->DevHandle),
7577 			 (u64)le64_to_cpu(event_data->SASAddress),
7578 			 event_data->PhysicalPort);
7579 		break;
7580 	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7581 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
7582 			 le16_to_cpu(event_data->DevHandle),
7583 			 (u64)le64_to_cpu(event_data->SASAddress),
7584 			 event_data->PhysicalPort);
7585 		break;
7586 	default:
7587 		break;
7588 	}
7589 }
7590 
7591 /**
7592  * _scsih_pcie_enumeration_event - handle enumeration events
7593  * @ioc: per adapter object
7594  * @fw_event: The fw_event_work object
7595  * Context: user.
7596  */
7597 static void
7598 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7599 	struct fw_event_work *fw_event)
7600 {
7601 	Mpi26EventDataPCIeEnumeration_t *event_data =
7602 		(Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
7603 
7604 	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7605 		return;
7606 
7607 	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
7608 		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7609 		 "started" : "completed",
7610 		 event_data->Flags);
7611 	if (event_data->EnumerationStatus)
7612 		pr_cont("enumeration_status(0x%08x)",
7613 			le32_to_cpu(event_data->EnumerationStatus));
7614 	pr_cont("\n");
7615 }
7616 
7617 /**
7618  * _scsih_ir_fastpath - turn on fastpath for IR physdisk
7619  * @ioc: per adapter object
7620  * @handle: device handle for physical disk
7621  * @phys_disk_num: physical disk number
7622  *
7623  * Return: 0 for success, else failure.
7624  */
7625 static int
7626 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7627 {
7628 	Mpi2RaidActionRequest_t *mpi_request;
7629 	Mpi2RaidActionReply_t *mpi_reply;
7630 	u16 smid;
7631 	u8 issue_reset = 0;
7632 	int rc = 0;
7633 	u16 ioc_status;
7634 	u32 log_info;
7635 
7636 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
7637 		return rc;
7638 
7639 	mutex_lock(&ioc->scsih_cmds.mutex);
7640 
7641 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7642 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
7643 		rc = -EAGAIN;
7644 		goto out;
7645 	}
7646 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
7647 
7648 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
7649 	if (!smid) {
7650 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7651 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7652 		rc = -EAGAIN;
7653 		goto out;
7654 	}
7655 
7656 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7657 	ioc->scsih_cmds.smid = smid;
7658 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
7659 
7660 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
7661 	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
7662 	mpi_request->PhysDiskNum = phys_disk_num;
7663 
7664 	dewtprintk(ioc,
7665 		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
7666 			    handle, phys_disk_num));
7667 
7668 	init_completion(&ioc->scsih_cmds.done);
7669 	ioc->put_smid_default(ioc, smid);
7670 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
7671 
7672 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
7673 		issue_reset =
7674 			mpt3sas_base_check_cmd_timeout(ioc,
7675 				ioc->scsih_cmds.status, mpi_request,
7676 				sizeof(Mpi2RaidActionRequest_t)/4);
7677 		rc = -EFAULT;
7678 		goto out;
7679 	}
7680 
7681 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
7682 
7683 		mpi_reply = ioc->scsih_cmds.reply;
7684 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
7685 		if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
7686 			log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
7687 		else
7688 			log_info = 0;
7689 		ioc_status &= MPI2_IOCSTATUS_MASK;
7690 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7691 			dewtprintk(ioc,
7692 				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
7693 					    ioc_status, log_info));
7694 			rc = -EFAULT;
7695 		} else
7696 			dewtprintk(ioc,
7697 				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
7698 	}
7699 
7700  out:
7701 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7702 	mutex_unlock(&ioc->scsih_cmds.mutex);
7703 
7704 	if (issue_reset)
7705 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7706 	return rc;
7707 }
7708 
7709 /**
7710  * _scsih_reprobe_lun - reprobing lun
7711  * @sdev: scsi device struct
7712  * @no_uld_attach: sdev->no_uld_attach flag setting
7713  *
7714  **/
7715 static void
7716 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
7717 {
7718 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
7719 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
7720 	    sdev->no_uld_attach ? "hiding" : "exposing");
7721 	WARN_ON(scsi_device_reprobe(sdev));
7722 }
7723 
7724 /**
7725  * _scsih_sas_volume_add - add new volume
7726  * @ioc: per adapter object
7727  * @element: IR config element data
7728  * Context: user.
7729  */
7730 static void
7731 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
7732 	Mpi2EventIrConfigElement_t *element)
7733 {
7734 	struct _raid_device *raid_device;
7735 	unsigned long flags;
7736 	u64 wwid;
7737 	u16 handle = le16_to_cpu(element->VolDevHandle);
7738 	int rc;
7739 
7740 	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
7741 	if (!wwid) {
7742 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7743 			__FILE__, __LINE__, __func__);
7744 		return;
7745 	}
7746 
7747 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
7748 	raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
7749 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7750 
7751 	if (raid_device)
7752 		return;
7753 
7754 	raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
7755 	if (!raid_device) {
7756 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7757 			__FILE__, __LINE__, __func__);
7758 		return;
7759 	}
7760 
7761 	raid_device->id = ioc->sas_id++;
7762 	raid_device->channel = RAID_CHANNEL;
7763 	raid_device->handle = handle;
7764 	raid_device->wwid = wwid;
7765 	_scsih_raid_device_add(ioc, raid_device);
7766 	if (!ioc->wait_for_discovery_to_complete) {
7767 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
7768 		    raid_device->id, 0);
7769 		if (rc)
7770 			_scsih_raid_device_remove(ioc, raid_device);
7771 	} else {
7772 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
7773 		_scsih_determine_boot_device(ioc, raid_device, 1);
7774 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7775 	}
7776 }
7777 
7778 /**
7779  * _scsih_sas_volume_delete - delete volume
7780  * @ioc: per adapter object
7781  * @handle: volume device handle
7782  * Context: user.
7783  */
7784 static void
7785 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7786 {
7787 	struct _raid_device *raid_device;
7788 	unsigned long flags;
7789 	struct MPT3SAS_TARGET *sas_target_priv_data;
7790 	struct scsi_target *starget = NULL;
7791 
7792 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
7793 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
7794 	if (raid_device) {
7795 		if (raid_device->starget) {
7796 			starget = raid_device->starget;
7797 			sas_target_priv_data = starget->hostdata;
7798 			sas_target_priv_data->deleted = 1;
7799 		}
7800 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7801 			 raid_device->handle, (u64)raid_device->wwid);
7802 		list_del(&raid_device->list);
7803 		kfree(raid_device);
7804 	}
7805 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7806 	if (starget)
7807 		scsi_remove_target(&starget->dev);
7808 }
7809 
7810 /**
7811  * _scsih_sas_pd_expose - expose pd component to /dev/sdX
7812  * @ioc: per adapter object
7813  * @element: IR config element data
7814  * Context: user.
7815  */
7816 static void
7817 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
7818 	Mpi2EventIrConfigElement_t *element)
7819 {
7820 	struct _sas_device *sas_device;
7821 	struct scsi_target *starget = NULL;
7822 	struct MPT3SAS_TARGET *sas_target_priv_data;
7823 	unsigned long flags;
7824 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7825 
7826 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7827 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7828 	if (sas_device) {
7829 		sas_device->volume_handle = 0;
7830 		sas_device->volume_wwid = 0;
7831 		clear_bit(handle, ioc->pd_handles);
7832 		if (sas_device->starget && sas_device->starget->hostdata) {
7833 			starget = sas_device->starget;
7834 			sas_target_priv_data = starget->hostdata;
7835 			sas_target_priv_data->flags &=
7836 			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
7837 		}
7838 	}
7839 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7840 	if (!sas_device)
7841 		return;
7842 
7843 	/* exposing raid component */
7844 	if (starget)
7845 		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
7846 
7847 	sas_device_put(sas_device);
7848 }
7849 
7850 /**
7851  * _scsih_sas_pd_hide - hide pd component from /dev/sdX
7852  * @ioc: per adapter object
7853  * @element: IR config element data
7854  * Context: user.
7855  */
7856 static void
7857 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
7858 	Mpi2EventIrConfigElement_t *element)
7859 {
7860 	struct _sas_device *sas_device;
7861 	struct scsi_target *starget = NULL;
7862 	struct MPT3SAS_TARGET *sas_target_priv_data;
7863 	unsigned long flags;
7864 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7865 	u16 volume_handle = 0;
7866 	u64 volume_wwid = 0;
7867 
7868 	mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
7869 	if (volume_handle)
7870 		mpt3sas_config_get_volume_wwid(ioc, volume_handle,
7871 		    &volume_wwid);
7872 
7873 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7874 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7875 	if (sas_device) {
7876 		set_bit(handle, ioc->pd_handles);
7877 		if (sas_device->starget && sas_device->starget->hostdata) {
7878 			starget = sas_device->starget;
7879 			sas_target_priv_data = starget->hostdata;
7880 			sas_target_priv_data->flags |=
7881 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
7882 			sas_device->volume_handle = volume_handle;
7883 			sas_device->volume_wwid = volume_wwid;
7884 		}
7885 	}
7886 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7887 	if (!sas_device)
7888 		return;
7889 
7890 	/* hiding raid component */
7891 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7892 
7893 	if (starget)
7894 		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
7895 
7896 	sas_device_put(sas_device);
7897 }
7898 
7899 /**
7900  * _scsih_sas_pd_delete - delete pd component
7901  * @ioc: per adapter object
7902  * @element: IR config element data
7903  * Context: user.
7904  */
7905 static void
7906 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
7907 	Mpi2EventIrConfigElement_t *element)
7908 {
7909 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7910 
7911 	_scsih_device_remove_by_handle(ioc, handle);
7912 }
7913 
7914 /**
7915  * _scsih_sas_pd_add - remove pd component
7916  * @ioc: per adapter object
7917  * @element: IR config element data
7918  * Context: user.
7919  */
7920 static void
7921 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
7922 	Mpi2EventIrConfigElement_t *element)
7923 {
7924 	struct _sas_device *sas_device;
7925 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7926 	Mpi2ConfigReply_t mpi_reply;
7927 	Mpi2SasDevicePage0_t sas_device_pg0;
7928 	u32 ioc_status;
7929 	u64 sas_address;
7930 	u16 parent_handle;
7931 
7932 	set_bit(handle, ioc->pd_handles);
7933 
7934 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
7935 	if (sas_device) {
7936 		_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7937 		sas_device_put(sas_device);
7938 		return;
7939 	}
7940 
7941 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7942 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7943 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7944 			__FILE__, __LINE__, __func__);
7945 		return;
7946 	}
7947 
7948 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7949 	    MPI2_IOCSTATUS_MASK;
7950 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7951 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7952 			__FILE__, __LINE__, __func__);
7953 		return;
7954 	}
7955 
7956 	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
7957 	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
7958 		mpt3sas_transport_update_links(ioc, sas_address, handle,
7959 		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
7960 
7961 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7962 	_scsih_add_device(ioc, handle, 0, 1);
7963 }
7964 
7965 /**
7966  * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
7967  * @ioc: per adapter object
7968  * @event_data: event data payload
7969  * Context: user.
7970  */
7971 static void
7972 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7973 	Mpi2EventDataIrConfigChangeList_t *event_data)
7974 {
7975 	Mpi2EventIrConfigElement_t *element;
7976 	u8 element_type;
7977 	int i;
7978 	char *reason_str = NULL, *element_str = NULL;
7979 
7980 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
7981 
7982 	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
7983 		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
7984 		 "foreign" : "native",
7985 		 event_data->NumElements);
7986 	for (i = 0; i < event_data->NumElements; i++, element++) {
7987 		switch (element->ReasonCode) {
7988 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7989 			reason_str = "add";
7990 			break;
7991 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7992 			reason_str = "remove";
7993 			break;
7994 		case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
7995 			reason_str = "no change";
7996 			break;
7997 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7998 			reason_str = "hide";
7999 			break;
8000 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8001 			reason_str = "unhide";
8002 			break;
8003 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8004 			reason_str = "volume_created";
8005 			break;
8006 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8007 			reason_str = "volume_deleted";
8008 			break;
8009 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8010 			reason_str = "pd_created";
8011 			break;
8012 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8013 			reason_str = "pd_deleted";
8014 			break;
8015 		default:
8016 			reason_str = "unknown reason";
8017 			break;
8018 		}
8019 		element_type = le16_to_cpu(element->ElementFlags) &
8020 		    MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
8021 		switch (element_type) {
8022 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
8023 			element_str = "volume";
8024 			break;
8025 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
8026 			element_str = "phys disk";
8027 			break;
8028 		case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
8029 			element_str = "hot spare";
8030 			break;
8031 		default:
8032 			element_str = "unknown element";
8033 			break;
8034 		}
8035 		pr_info("\t(%s:%s), vol handle(0x%04x), " \
8036 		    "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
8037 		    reason_str, le16_to_cpu(element->VolDevHandle),
8038 		    le16_to_cpu(element->PhysDiskDevHandle),
8039 		    element->PhysDiskNum);
8040 	}
8041 }
8042 
8043 /**
8044  * _scsih_sas_ir_config_change_event - handle ir configuration change events
8045  * @ioc: per adapter object
8046  * @fw_event: The fw_event_work object
8047  * Context: user.
8048  */
8049 static void
8050 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
8051 	struct fw_event_work *fw_event)
8052 {
8053 	Mpi2EventIrConfigElement_t *element;
8054 	int i;
8055 	u8 foreign_config;
8056 	Mpi2EventDataIrConfigChangeList_t *event_data =
8057 		(Mpi2EventDataIrConfigChangeList_t *)
8058 		fw_event->event_data;
8059 
8060 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8061 	     (!ioc->hide_ir_msg))
8062 		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
8063 
8064 	foreign_config = (le32_to_cpu(event_data->Flags) &
8065 	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
8066 
8067 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8068 	if (ioc->shost_recovery &&
8069 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8070 		for (i = 0; i < event_data->NumElements; i++, element++) {
8071 			if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
8072 				_scsih_ir_fastpath(ioc,
8073 					le16_to_cpu(element->PhysDiskDevHandle),
8074 					element->PhysDiskNum);
8075 		}
8076 		return;
8077 	}
8078 
8079 	for (i = 0; i < event_data->NumElements; i++, element++) {
8080 
8081 		switch (element->ReasonCode) {
8082 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8083 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8084 			if (!foreign_config)
8085 				_scsih_sas_volume_add(ioc, element);
8086 			break;
8087 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8088 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8089 			if (!foreign_config)
8090 				_scsih_sas_volume_delete(ioc,
8091 				    le16_to_cpu(element->VolDevHandle));
8092 			break;
8093 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8094 			if (!ioc->is_warpdrive)
8095 				_scsih_sas_pd_hide(ioc, element);
8096 			break;
8097 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8098 			if (!ioc->is_warpdrive)
8099 				_scsih_sas_pd_expose(ioc, element);
8100 			break;
8101 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8102 			if (!ioc->is_warpdrive)
8103 				_scsih_sas_pd_add(ioc, element);
8104 			break;
8105 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8106 			if (!ioc->is_warpdrive)
8107 				_scsih_sas_pd_delete(ioc, element);
8108 			break;
8109 		}
8110 	}
8111 }
8112 
8113 /**
8114  * _scsih_sas_ir_volume_event - IR volume event
8115  * @ioc: per adapter object
8116  * @fw_event: The fw_event_work object
8117  * Context: user.
8118  */
8119 static void
8120 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8121 	struct fw_event_work *fw_event)
8122 {
8123 	u64 wwid;
8124 	unsigned long flags;
8125 	struct _raid_device *raid_device;
8126 	u16 handle;
8127 	u32 state;
8128 	int rc;
8129 	Mpi2EventDataIrVolume_t *event_data =
8130 		(Mpi2EventDataIrVolume_t *) fw_event->event_data;
8131 
8132 	if (ioc->shost_recovery)
8133 		return;
8134 
8135 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
8136 		return;
8137 
8138 	handle = le16_to_cpu(event_data->VolDevHandle);
8139 	state = le32_to_cpu(event_data->NewValue);
8140 	if (!ioc->hide_ir_msg)
8141 		dewtprintk(ioc,
8142 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8143 				    __func__, handle,
8144 				    le32_to_cpu(event_data->PreviousValue),
8145 				    state));
8146 	switch (state) {
8147 	case MPI2_RAID_VOL_STATE_MISSING:
8148 	case MPI2_RAID_VOL_STATE_FAILED:
8149 		_scsih_sas_volume_delete(ioc, handle);
8150 		break;
8151 
8152 	case MPI2_RAID_VOL_STATE_ONLINE:
8153 	case MPI2_RAID_VOL_STATE_DEGRADED:
8154 	case MPI2_RAID_VOL_STATE_OPTIMAL:
8155 
8156 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8157 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8158 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8159 
8160 		if (raid_device)
8161 			break;
8162 
8163 		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8164 		if (!wwid) {
8165 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8166 				__FILE__, __LINE__, __func__);
8167 			break;
8168 		}
8169 
8170 		raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8171 		if (!raid_device) {
8172 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8173 				__FILE__, __LINE__, __func__);
8174 			break;
8175 		}
8176 
8177 		raid_device->id = ioc->sas_id++;
8178 		raid_device->channel = RAID_CHANNEL;
8179 		raid_device->handle = handle;
8180 		raid_device->wwid = wwid;
8181 		_scsih_raid_device_add(ioc, raid_device);
8182 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8183 		    raid_device->id, 0);
8184 		if (rc)
8185 			_scsih_raid_device_remove(ioc, raid_device);
8186 		break;
8187 
8188 	case MPI2_RAID_VOL_STATE_INITIALIZING:
8189 	default:
8190 		break;
8191 	}
8192 }
8193 
8194 /**
8195  * _scsih_sas_ir_physical_disk_event - PD event
8196  * @ioc: per adapter object
8197  * @fw_event: The fw_event_work object
8198  * Context: user.
8199  */
8200 static void
8201 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8202 	struct fw_event_work *fw_event)
8203 {
8204 	u16 handle, parent_handle;
8205 	u32 state;
8206 	struct _sas_device *sas_device;
8207 	Mpi2ConfigReply_t mpi_reply;
8208 	Mpi2SasDevicePage0_t sas_device_pg0;
8209 	u32 ioc_status;
8210 	Mpi2EventDataIrPhysicalDisk_t *event_data =
8211 		(Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
8212 	u64 sas_address;
8213 
8214 	if (ioc->shost_recovery)
8215 		return;
8216 
8217 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
8218 		return;
8219 
8220 	handle = le16_to_cpu(event_data->PhysDiskDevHandle);
8221 	state = le32_to_cpu(event_data->NewValue);
8222 
8223 	if (!ioc->hide_ir_msg)
8224 		dewtprintk(ioc,
8225 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8226 				    __func__, handle,
8227 				    le32_to_cpu(event_data->PreviousValue),
8228 				    state));
8229 
8230 	switch (state) {
8231 	case MPI2_RAID_PD_STATE_ONLINE:
8232 	case MPI2_RAID_PD_STATE_DEGRADED:
8233 	case MPI2_RAID_PD_STATE_REBUILDING:
8234 	case MPI2_RAID_PD_STATE_OPTIMAL:
8235 	case MPI2_RAID_PD_STATE_HOT_SPARE:
8236 
8237 		if (!ioc->is_warpdrive)
8238 			set_bit(handle, ioc->pd_handles);
8239 
8240 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8241 		if (sas_device) {
8242 			sas_device_put(sas_device);
8243 			return;
8244 		}
8245 
8246 		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8247 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8248 		    handle))) {
8249 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8250 				__FILE__, __LINE__, __func__);
8251 			return;
8252 		}
8253 
8254 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8255 		    MPI2_IOCSTATUS_MASK;
8256 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8257 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8258 				__FILE__, __LINE__, __func__);
8259 			return;
8260 		}
8261 
8262 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8263 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8264 			mpt3sas_transport_update_links(ioc, sas_address, handle,
8265 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8266 
8267 		_scsih_add_device(ioc, handle, 0, 1);
8268 
8269 		break;
8270 
8271 	case MPI2_RAID_PD_STATE_OFFLINE:
8272 	case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
8273 	case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
8274 	default:
8275 		break;
8276 	}
8277 }
8278 
8279 /**
8280  * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
8281  * @ioc: per adapter object
8282  * @event_data: event data payload
8283  * Context: user.
8284  */
8285 static void
8286 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
8287 	Mpi2EventDataIrOperationStatus_t *event_data)
8288 {
8289 	char *reason_str = NULL;
8290 
8291 	switch (event_data->RAIDOperation) {
8292 	case MPI2_EVENT_IR_RAIDOP_RESYNC:
8293 		reason_str = "resync";
8294 		break;
8295 	case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8296 		reason_str = "online capacity expansion";
8297 		break;
8298 	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8299 		reason_str = "consistency check";
8300 		break;
8301 	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
8302 		reason_str = "background init";
8303 		break;
8304 	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
8305 		reason_str = "make data consistent";
8306 		break;
8307 	}
8308 
8309 	if (!reason_str)
8310 		return;
8311 
8312 	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
8313 		 reason_str,
8314 		 le16_to_cpu(event_data->VolDevHandle),
8315 		 event_data->PercentComplete);
8316 }
8317 
8318 /**
8319  * _scsih_sas_ir_operation_status_event - handle RAID operation events
8320  * @ioc: per adapter object
8321  * @fw_event: The fw_event_work object
8322  * Context: user.
8323  */
8324 static void
8325 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
8326 	struct fw_event_work *fw_event)
8327 {
8328 	Mpi2EventDataIrOperationStatus_t *event_data =
8329 		(Mpi2EventDataIrOperationStatus_t *)
8330 		fw_event->event_data;
8331 	static struct _raid_device *raid_device;
8332 	unsigned long flags;
8333 	u16 handle;
8334 
8335 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8336 	    (!ioc->hide_ir_msg))
8337 		_scsih_sas_ir_operation_status_event_debug(ioc,
8338 		     event_data);
8339 
8340 	/* code added for raid transport support */
8341 	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
8342 
8343 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8344 		handle = le16_to_cpu(event_data->VolDevHandle);
8345 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8346 		if (raid_device)
8347 			raid_device->percent_complete =
8348 			    event_data->PercentComplete;
8349 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8350 	}
8351 }
8352 
8353 /**
8354  * _scsih_prep_device_scan - initialize parameters prior to device scan
8355  * @ioc: per adapter object
8356  *
8357  * Set the deleted flag prior to device scan.  If the device is found during
8358  * the scan, then we clear the deleted flag.
8359  */
8360 static void
8361 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
8362 {
8363 	struct MPT3SAS_DEVICE *sas_device_priv_data;
8364 	struct scsi_device *sdev;
8365 
8366 	shost_for_each_device(sdev, ioc->shost) {
8367 		sas_device_priv_data = sdev->hostdata;
8368 		if (sas_device_priv_data && sas_device_priv_data->sas_target)
8369 			sas_device_priv_data->sas_target->deleted = 1;
8370 	}
8371 }
8372 
8373 /**
8374  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
8375  * @ioc: per adapter object
8376  * @sas_device_pg0: SAS Device page 0
8377  *
8378  * After host reset, find out whether devices are still responding.
8379  * Used in _scsih_remove_unresponsive_sas_devices.
8380  */
8381 static void
8382 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
8383 Mpi2SasDevicePage0_t *sas_device_pg0)
8384 {
8385 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8386 	struct scsi_target *starget;
8387 	struct _sas_device *sas_device = NULL;
8388 	struct _enclosure_node *enclosure_dev = NULL;
8389 	unsigned long flags;
8390 
8391 	if (sas_device_pg0->EnclosureHandle) {
8392 		enclosure_dev =
8393 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8394 				le16_to_cpu(sas_device_pg0->EnclosureHandle));
8395 		if (enclosure_dev == NULL)
8396 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
8397 				 sas_device_pg0->EnclosureHandle);
8398 	}
8399 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8400 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
8401 		if ((sas_device->sas_address == le64_to_cpu(
8402 		    sas_device_pg0->SASAddress)) && (sas_device->slot ==
8403 		    le16_to_cpu(sas_device_pg0->Slot))) {
8404 			sas_device->responding = 1;
8405 			starget = sas_device->starget;
8406 			if (starget && starget->hostdata) {
8407 				sas_target_priv_data = starget->hostdata;
8408 				sas_target_priv_data->tm_busy = 0;
8409 				sas_target_priv_data->deleted = 0;
8410 			} else
8411 				sas_target_priv_data = NULL;
8412 			if (starget) {
8413 				starget_printk(KERN_INFO, starget,
8414 				    "handle(0x%04x), sas_addr(0x%016llx)\n",
8415 				    le16_to_cpu(sas_device_pg0->DevHandle),
8416 				    (unsigned long long)
8417 				    sas_device->sas_address);
8418 
8419 				if (sas_device->enclosure_handle != 0)
8420 					starget_printk(KERN_INFO, starget,
8421 					 "enclosure logical id(0x%016llx),"
8422 					 " slot(%d)\n",
8423 					 (unsigned long long)
8424 					 sas_device->enclosure_logical_id,
8425 					 sas_device->slot);
8426 			}
8427 			if (le16_to_cpu(sas_device_pg0->Flags) &
8428 			      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8429 				sas_device->enclosure_level =
8430 				   sas_device_pg0->EnclosureLevel;
8431 				memcpy(&sas_device->connector_name[0],
8432 					&sas_device_pg0->ConnectorName[0], 4);
8433 			} else {
8434 				sas_device->enclosure_level = 0;
8435 				sas_device->connector_name[0] = '\0';
8436 			}
8437 
8438 			sas_device->enclosure_handle =
8439 				le16_to_cpu(sas_device_pg0->EnclosureHandle);
8440 			sas_device->is_chassis_slot_valid = 0;
8441 			if (enclosure_dev) {
8442 				sas_device->enclosure_logical_id = le64_to_cpu(
8443 					enclosure_dev->pg0.EnclosureLogicalID);
8444 				if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8445 				    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8446 					sas_device->is_chassis_slot_valid = 1;
8447 					sas_device->chassis_slot =
8448 						enclosure_dev->pg0.ChassisSlot;
8449 				}
8450 			}
8451 
8452 			if (sas_device->handle == le16_to_cpu(
8453 			    sas_device_pg0->DevHandle))
8454 				goto out;
8455 			pr_info("\thandle changed from(0x%04x)!!!\n",
8456 			    sas_device->handle);
8457 			sas_device->handle = le16_to_cpu(
8458 			    sas_device_pg0->DevHandle);
8459 			if (sas_target_priv_data)
8460 				sas_target_priv_data->handle =
8461 				    le16_to_cpu(sas_device_pg0->DevHandle);
8462 			goto out;
8463 		}
8464 	}
8465  out:
8466 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8467 }
8468 
8469 /**
8470  * _scsih_create_enclosure_list_after_reset - Free Existing list,
8471  *	And create enclosure list by scanning all Enclosure Page(0)s
8472  * @ioc: per adapter object
8473  */
8474 static void
8475 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8476 {
8477 	struct _enclosure_node *enclosure_dev;
8478 	Mpi2ConfigReply_t mpi_reply;
8479 	u16 enclosure_handle;
8480 	int rc;
8481 
8482 	/* Free existing enclosure list */
8483 	mpt3sas_free_enclosure_list(ioc);
8484 
8485 	/* Re constructing enclosure list after reset*/
8486 	enclosure_handle = 0xFFFF;
8487 	do {
8488 		enclosure_dev =
8489 			kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8490 		if (!enclosure_dev) {
8491 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8492 				__FILE__, __LINE__, __func__);
8493 			return;
8494 		}
8495 		rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8496 				&enclosure_dev->pg0,
8497 				MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
8498 				enclosure_handle);
8499 
8500 		if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8501 						MPI2_IOCSTATUS_MASK)) {
8502 			kfree(enclosure_dev);
8503 			return;
8504 		}
8505 		list_add_tail(&enclosure_dev->list,
8506 						&ioc->enclosure_list);
8507 		enclosure_handle =
8508 			le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
8509 	} while (1);
8510 }
8511 
8512 /**
8513  * _scsih_search_responding_sas_devices -
8514  * @ioc: per adapter object
8515  *
8516  * After host reset, find out whether devices are still responding.
8517  * If not remove.
8518  */
8519 static void
8520 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8521 {
8522 	Mpi2SasDevicePage0_t sas_device_pg0;
8523 	Mpi2ConfigReply_t mpi_reply;
8524 	u16 ioc_status;
8525 	u16 handle;
8526 	u32 device_info;
8527 
8528 	ioc_info(ioc, "search for end-devices: start\n");
8529 
8530 	if (list_empty(&ioc->sas_device_list))
8531 		goto out;
8532 
8533 	handle = 0xFFFF;
8534 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8535 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8536 	    handle))) {
8537 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8538 		    MPI2_IOCSTATUS_MASK;
8539 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8540 			break;
8541 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
8542 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8543 		if (!(_scsih_is_end_device(device_info)))
8544 			continue;
8545 		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
8546 	}
8547 
8548  out:
8549 	ioc_info(ioc, "search for end-devices: complete\n");
8550 }
8551 
8552 /**
8553  * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
8554  * @ioc: per adapter object
8555  * @pcie_device_pg0: PCIe Device page 0
8556  *
8557  * After host reset, find out whether devices are still responding.
8558  * Used in _scsih_remove_unresponding_devices.
8559  */
8560 static void
8561 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8562 	Mpi26PCIeDevicePage0_t *pcie_device_pg0)
8563 {
8564 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8565 	struct scsi_target *starget;
8566 	struct _pcie_device *pcie_device;
8567 	unsigned long flags;
8568 
8569 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8570 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8571 		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8572 		    && (pcie_device->slot == le16_to_cpu(
8573 		    pcie_device_pg0->Slot))) {
8574 			pcie_device->access_status =
8575 					pcie_device_pg0->AccessStatus;
8576 			pcie_device->responding = 1;
8577 			starget = pcie_device->starget;
8578 			if (starget && starget->hostdata) {
8579 				sas_target_priv_data = starget->hostdata;
8580 				sas_target_priv_data->tm_busy = 0;
8581 				sas_target_priv_data->deleted = 0;
8582 			} else
8583 				sas_target_priv_data = NULL;
8584 			if (starget) {
8585 				starget_printk(KERN_INFO, starget,
8586 				    "handle(0x%04x), wwid(0x%016llx) ",
8587 				    pcie_device->handle,
8588 				    (unsigned long long)pcie_device->wwid);
8589 				if (pcie_device->enclosure_handle != 0)
8590 					starget_printk(KERN_INFO, starget,
8591 					    "enclosure logical id(0x%016llx), "
8592 					    "slot(%d)\n",
8593 					    (unsigned long long)
8594 					    pcie_device->enclosure_logical_id,
8595 					    pcie_device->slot);
8596 			}
8597 
8598 			if (((le32_to_cpu(pcie_device_pg0->Flags)) &
8599 			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
8600 			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8601 				pcie_device->enclosure_level =
8602 				    pcie_device_pg0->EnclosureLevel;
8603 				memcpy(&pcie_device->connector_name[0],
8604 				    &pcie_device_pg0->ConnectorName[0], 4);
8605 			} else {
8606 				pcie_device->enclosure_level = 0;
8607 				pcie_device->connector_name[0] = '\0';
8608 			}
8609 
8610 			if (pcie_device->handle == le16_to_cpu(
8611 			    pcie_device_pg0->DevHandle))
8612 				goto out;
8613 			pr_info("\thandle changed from(0x%04x)!!!\n",
8614 			    pcie_device->handle);
8615 			pcie_device->handle = le16_to_cpu(
8616 			    pcie_device_pg0->DevHandle);
8617 			if (sas_target_priv_data)
8618 				sas_target_priv_data->handle =
8619 				    le16_to_cpu(pcie_device_pg0->DevHandle);
8620 			goto out;
8621 		}
8622 	}
8623 
8624  out:
8625 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8626 }
8627 
8628 /**
8629  * _scsih_search_responding_pcie_devices -
8630  * @ioc: per adapter object
8631  *
8632  * After host reset, find out whether devices are still responding.
8633  * If not remove.
8634  */
8635 static void
8636 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8637 {
8638 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8639 	Mpi2ConfigReply_t mpi_reply;
8640 	u16 ioc_status;
8641 	u16 handle;
8642 	u32 device_info;
8643 
8644 	ioc_info(ioc, "search for end-devices: start\n");
8645 
8646 	if (list_empty(&ioc->pcie_device_list))
8647 		goto out;
8648 
8649 	handle = 0xFFFF;
8650 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8651 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8652 		handle))) {
8653 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8654 		    MPI2_IOCSTATUS_MASK;
8655 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8656 			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
8657 				 __func__, ioc_status,
8658 				 le32_to_cpu(mpi_reply.IOCLogInfo));
8659 			break;
8660 		}
8661 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8662 		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8663 		if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8664 			continue;
8665 		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8666 	}
8667 out:
8668 	ioc_info(ioc, "search for PCIe end-devices: complete\n");
8669 }
8670 
8671 /**
8672  * _scsih_mark_responding_raid_device - mark a raid_device as responding
8673  * @ioc: per adapter object
8674  * @wwid: world wide identifier for raid volume
8675  * @handle: device handle
8676  *
8677  * After host reset, find out whether devices are still responding.
8678  * Used in _scsih_remove_unresponsive_raid_devices.
8679  */
8680 static void
8681 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
8682 	u16 handle)
8683 {
8684 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8685 	struct scsi_target *starget;
8686 	struct _raid_device *raid_device;
8687 	unsigned long flags;
8688 
8689 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
8690 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
8691 		if (raid_device->wwid == wwid && raid_device->starget) {
8692 			starget = raid_device->starget;
8693 			if (starget && starget->hostdata) {
8694 				sas_target_priv_data = starget->hostdata;
8695 				sas_target_priv_data->deleted = 0;
8696 			} else
8697 				sas_target_priv_data = NULL;
8698 			raid_device->responding = 1;
8699 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8700 			starget_printk(KERN_INFO, raid_device->starget,
8701 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
8702 			    (unsigned long long)raid_device->wwid);
8703 
8704 			/*
8705 			 * WARPDRIVE: The handles of the PDs might have changed
8706 			 * across the host reset so re-initialize the
8707 			 * required data for Direct IO
8708 			 */
8709 			mpt3sas_init_warpdrive_properties(ioc, raid_device);
8710 			spin_lock_irqsave(&ioc->raid_device_lock, flags);
8711 			if (raid_device->handle == handle) {
8712 				spin_unlock_irqrestore(&ioc->raid_device_lock,
8713 				    flags);
8714 				return;
8715 			}
8716 			pr_info("\thandle changed from(0x%04x)!!!\n",
8717 			    raid_device->handle);
8718 			raid_device->handle = handle;
8719 			if (sas_target_priv_data)
8720 				sas_target_priv_data->handle = handle;
8721 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8722 			return;
8723 		}
8724 	}
8725 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8726 }
8727 
8728 /**
8729  * _scsih_search_responding_raid_devices -
8730  * @ioc: per adapter object
8731  *
8732  * After host reset, find out whether devices are still responding.
8733  * If not remove.
8734  */
8735 static void
8736 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
8737 {
8738 	Mpi2RaidVolPage1_t volume_pg1;
8739 	Mpi2RaidVolPage0_t volume_pg0;
8740 	Mpi2RaidPhysDiskPage0_t pd_pg0;
8741 	Mpi2ConfigReply_t mpi_reply;
8742 	u16 ioc_status;
8743 	u16 handle;
8744 	u8 phys_disk_num;
8745 
8746 	if (!ioc->ir_firmware)
8747 		return;
8748 
8749 	ioc_info(ioc, "search for raid volumes: start\n");
8750 
8751 	if (list_empty(&ioc->raid_device_list))
8752 		goto out;
8753 
8754 	handle = 0xFFFF;
8755 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
8756 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
8757 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8758 		    MPI2_IOCSTATUS_MASK;
8759 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8760 			break;
8761 		handle = le16_to_cpu(volume_pg1.DevHandle);
8762 
8763 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
8764 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
8765 		     sizeof(Mpi2RaidVolPage0_t)))
8766 			continue;
8767 
8768 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
8769 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
8770 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
8771 			_scsih_mark_responding_raid_device(ioc,
8772 			    le64_to_cpu(volume_pg1.WWID), handle);
8773 	}
8774 
8775 	/* refresh the pd_handles */
8776 	if (!ioc->is_warpdrive) {
8777 		phys_disk_num = 0xFF;
8778 		memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
8779 		while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
8780 		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
8781 		    phys_disk_num))) {
8782 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8783 			    MPI2_IOCSTATUS_MASK;
8784 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8785 				break;
8786 			phys_disk_num = pd_pg0.PhysDiskNum;
8787 			handle = le16_to_cpu(pd_pg0.DevHandle);
8788 			set_bit(handle, ioc->pd_handles);
8789 		}
8790 	}
8791  out:
8792 	ioc_info(ioc, "search for responding raid volumes: complete\n");
8793 }
8794 
8795 /**
8796  * _scsih_mark_responding_expander - mark a expander as responding
8797  * @ioc: per adapter object
8798  * @expander_pg0:SAS Expander Config Page0
8799  *
8800  * After host reset, find out whether devices are still responding.
8801  * Used in _scsih_remove_unresponsive_expanders.
8802  */
8803 static void
8804 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
8805 	Mpi2ExpanderPage0_t *expander_pg0)
8806 {
8807 	struct _sas_node *sas_expander = NULL;
8808 	unsigned long flags;
8809 	int i;
8810 	struct _enclosure_node *enclosure_dev = NULL;
8811 	u16 handle = le16_to_cpu(expander_pg0->DevHandle);
8812 	u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
8813 	u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
8814 
8815 	if (enclosure_handle)
8816 		enclosure_dev =
8817 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8818 							enclosure_handle);
8819 
8820 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
8821 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
8822 		if (sas_expander->sas_address != sas_address)
8823 			continue;
8824 		sas_expander->responding = 1;
8825 
8826 		if (enclosure_dev) {
8827 			sas_expander->enclosure_logical_id =
8828 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8829 			sas_expander->enclosure_handle =
8830 			    le16_to_cpu(expander_pg0->EnclosureHandle);
8831 		}
8832 
8833 		if (sas_expander->handle == handle)
8834 			goto out;
8835 		pr_info("\texpander(0x%016llx): handle changed" \
8836 		    " from(0x%04x) to (0x%04x)!!!\n",
8837 		    (unsigned long long)sas_expander->sas_address,
8838 		    sas_expander->handle, handle);
8839 		sas_expander->handle = handle;
8840 		for (i = 0 ; i < sas_expander->num_phys ; i++)
8841 			sas_expander->phy[i].handle = handle;
8842 		goto out;
8843 	}
8844  out:
8845 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8846 }
8847 
8848 /**
8849  * _scsih_search_responding_expanders -
8850  * @ioc: per adapter object
8851  *
8852  * After host reset, find out whether devices are still responding.
8853  * If not remove.
8854  */
8855 static void
8856 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
8857 {
8858 	Mpi2ExpanderPage0_t expander_pg0;
8859 	Mpi2ConfigReply_t mpi_reply;
8860 	u16 ioc_status;
8861 	u64 sas_address;
8862 	u16 handle;
8863 
8864 	ioc_info(ioc, "search for expanders: start\n");
8865 
8866 	if (list_empty(&ioc->sas_expander_list))
8867 		goto out;
8868 
8869 	handle = 0xFFFF;
8870 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
8871 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
8872 
8873 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8874 		    MPI2_IOCSTATUS_MASK;
8875 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8876 			break;
8877 
8878 		handle = le16_to_cpu(expander_pg0.DevHandle);
8879 		sas_address = le64_to_cpu(expander_pg0.SASAddress);
8880 		pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
8881 			handle,
8882 		    (unsigned long long)sas_address);
8883 		_scsih_mark_responding_expander(ioc, &expander_pg0);
8884 	}
8885 
8886  out:
8887 	ioc_info(ioc, "search for expanders: complete\n");
8888 }
8889 
8890 /**
8891  * _scsih_remove_unresponding_devices - removing unresponding devices
8892  * @ioc: per adapter object
8893  */
8894 static void
8895 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8896 {
8897 	struct _sas_device *sas_device, *sas_device_next;
8898 	struct _sas_node *sas_expander, *sas_expander_next;
8899 	struct _raid_device *raid_device, *raid_device_next;
8900 	struct _pcie_device *pcie_device, *pcie_device_next;
8901 	struct list_head tmp_list;
8902 	unsigned long flags;
8903 	LIST_HEAD(head);
8904 
8905 	ioc_info(ioc, "removing unresponding devices: start\n");
8906 
8907 	/* removing unresponding end devices */
8908 	ioc_info(ioc, "removing unresponding devices: end-devices\n");
8909 	/*
8910 	 * Iterate, pulling off devices marked as non-responding. We become the
8911 	 * owner for the reference the list had on any object we prune.
8912 	 */
8913 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8914 	list_for_each_entry_safe(sas_device, sas_device_next,
8915 	    &ioc->sas_device_list, list) {
8916 		if (!sas_device->responding)
8917 			list_move_tail(&sas_device->list, &head);
8918 		else
8919 			sas_device->responding = 0;
8920 	}
8921 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8922 
8923 	/*
8924 	 * Now, uninitialize and remove the unresponding devices we pruned.
8925 	 */
8926 	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
8927 		_scsih_remove_device(ioc, sas_device);
8928 		list_del_init(&sas_device->list);
8929 		sas_device_put(sas_device);
8930 	}
8931 
8932 	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
8933 	INIT_LIST_HEAD(&head);
8934 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8935 	list_for_each_entry_safe(pcie_device, pcie_device_next,
8936 	    &ioc->pcie_device_list, list) {
8937 		if (!pcie_device->responding)
8938 			list_move_tail(&pcie_device->list, &head);
8939 		else
8940 			pcie_device->responding = 0;
8941 	}
8942 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8943 
8944 	list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
8945 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
8946 		list_del_init(&pcie_device->list);
8947 		pcie_device_put(pcie_device);
8948 	}
8949 
8950 	/* removing unresponding volumes */
8951 	if (ioc->ir_firmware) {
8952 		ioc_info(ioc, "removing unresponding devices: volumes\n");
8953 		list_for_each_entry_safe(raid_device, raid_device_next,
8954 		    &ioc->raid_device_list, list) {
8955 			if (!raid_device->responding)
8956 				_scsih_sas_volume_delete(ioc,
8957 				    raid_device->handle);
8958 			else
8959 				raid_device->responding = 0;
8960 		}
8961 	}
8962 
8963 	/* removing unresponding expanders */
8964 	ioc_info(ioc, "removing unresponding devices: expanders\n");
8965 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
8966 	INIT_LIST_HEAD(&tmp_list);
8967 	list_for_each_entry_safe(sas_expander, sas_expander_next,
8968 	    &ioc->sas_expander_list, list) {
8969 		if (!sas_expander->responding)
8970 			list_move_tail(&sas_expander->list, &tmp_list);
8971 		else
8972 			sas_expander->responding = 0;
8973 	}
8974 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8975 	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
8976 	    list) {
8977 		_scsih_expander_node_remove(ioc, sas_expander);
8978 	}
8979 
8980 	ioc_info(ioc, "removing unresponding devices: complete\n");
8981 
8982 	/* unblock devices */
8983 	_scsih_ublock_io_all_device(ioc);
8984 }
8985 
8986 static void
8987 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
8988 	struct _sas_node *sas_expander, u16 handle)
8989 {
8990 	Mpi2ExpanderPage1_t expander_pg1;
8991 	Mpi2ConfigReply_t mpi_reply;
8992 	int i;
8993 
8994 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
8995 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
8996 		    &expander_pg1, i, handle))) {
8997 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8998 				__FILE__, __LINE__, __func__);
8999 			return;
9000 		}
9001 
9002 		mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
9003 		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
9004 		    expander_pg1.NegotiatedLinkRate >> 4);
9005 	}
9006 }
9007 
9008 /**
9009  * _scsih_scan_for_devices_after_reset - scan for devices after host reset
9010  * @ioc: per adapter object
9011  */
9012 static void
9013 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9014 {
9015 	Mpi2ExpanderPage0_t expander_pg0;
9016 	Mpi2SasDevicePage0_t sas_device_pg0;
9017 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
9018 	Mpi2RaidVolPage1_t volume_pg1;
9019 	Mpi2RaidVolPage0_t volume_pg0;
9020 	Mpi2RaidPhysDiskPage0_t pd_pg0;
9021 	Mpi2EventIrConfigElement_t element;
9022 	Mpi2ConfigReply_t mpi_reply;
9023 	u8 phys_disk_num;
9024 	u16 ioc_status;
9025 	u16 handle, parent_handle;
9026 	u64 sas_address;
9027 	struct _sas_device *sas_device;
9028 	struct _pcie_device *pcie_device;
9029 	struct _sas_node *expander_device;
9030 	static struct _raid_device *raid_device;
9031 	u8 retry_count;
9032 	unsigned long flags;
9033 
9034 	ioc_info(ioc, "scan devices: start\n");
9035 
9036 	_scsih_sas_host_refresh(ioc);
9037 
9038 	ioc_info(ioc, "\tscan devices: expanders start\n");
9039 
9040 	/* expanders */
9041 	handle = 0xFFFF;
9042 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
9043 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
9044 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9045 		    MPI2_IOCSTATUS_MASK;
9046 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9047 			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9048 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9049 			break;
9050 		}
9051 		handle = le16_to_cpu(expander_pg0.DevHandle);
9052 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
9053 		expander_device = mpt3sas_scsih_expander_find_by_sas_address(
9054 		    ioc, le64_to_cpu(expander_pg0.SASAddress));
9055 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9056 		if (expander_device)
9057 			_scsih_refresh_expander_links(ioc, expander_device,
9058 			    handle);
9059 		else {
9060 			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9061 				 handle,
9062 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
9063 			_scsih_expander_add(ioc, handle);
9064 			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9065 				 handle,
9066 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
9067 		}
9068 	}
9069 
9070 	ioc_info(ioc, "\tscan devices: expanders complete\n");
9071 
9072 	if (!ioc->ir_firmware)
9073 		goto skip_to_sas;
9074 
9075 	ioc_info(ioc, "\tscan devices: phys disk start\n");
9076 
9077 	/* phys disk */
9078 	phys_disk_num = 0xFF;
9079 	while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9080 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9081 	    phys_disk_num))) {
9082 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9083 		    MPI2_IOCSTATUS_MASK;
9084 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9085 			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9086 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9087 			break;
9088 		}
9089 		phys_disk_num = pd_pg0.PhysDiskNum;
9090 		handle = le16_to_cpu(pd_pg0.DevHandle);
9091 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9092 		if (sas_device) {
9093 			sas_device_put(sas_device);
9094 			continue;
9095 		}
9096 		if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9097 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9098 		    handle) != 0)
9099 			continue;
9100 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9101 		    MPI2_IOCSTATUS_MASK;
9102 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9103 			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
9104 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9105 			break;
9106 		}
9107 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9108 		if (!_scsih_get_sas_address(ioc, parent_handle,
9109 		    &sas_address)) {
9110 			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9111 				 handle,
9112 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9113 			mpt3sas_transport_update_links(ioc, sas_address,
9114 			    handle, sas_device_pg0.PhyNum,
9115 			    MPI2_SAS_NEG_LINK_RATE_1_5);
9116 			set_bit(handle, ioc->pd_handles);
9117 			retry_count = 0;
9118 			/* This will retry adding the end device.
9119 			 * _scsih_add_device() will decide on retries and
9120 			 * return "1" when it should be retried
9121 			 */
9122 			while (_scsih_add_device(ioc, handle, retry_count++,
9123 			    1)) {
9124 				ssleep(1);
9125 			}
9126 			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9127 				 handle,
9128 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9129 		}
9130 	}
9131 
9132 	ioc_info(ioc, "\tscan devices: phys disk complete\n");
9133 
9134 	ioc_info(ioc, "\tscan devices: volumes start\n");
9135 
9136 	/* volumes */
9137 	handle = 0xFFFF;
9138 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9139 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9140 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9141 		    MPI2_IOCSTATUS_MASK;
9142 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9143 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9144 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9145 			break;
9146 		}
9147 		handle = le16_to_cpu(volume_pg1.DevHandle);
9148 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9149 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
9150 		    le64_to_cpu(volume_pg1.WWID));
9151 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9152 		if (raid_device)
9153 			continue;
9154 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9155 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9156 		     sizeof(Mpi2RaidVolPage0_t)))
9157 			continue;
9158 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9159 		    MPI2_IOCSTATUS_MASK;
9160 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9161 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9162 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9163 			break;
9164 		}
9165 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9166 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9167 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
9168 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
9169 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
9170 			element.VolDevHandle = volume_pg1.DevHandle;
9171 			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
9172 				 volume_pg1.DevHandle);
9173 			_scsih_sas_volume_add(ioc, &element);
9174 			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
9175 				 volume_pg1.DevHandle);
9176 		}
9177 	}
9178 
9179 	ioc_info(ioc, "\tscan devices: volumes complete\n");
9180 
9181  skip_to_sas:
9182 
9183 	ioc_info(ioc, "\tscan devices: end devices start\n");
9184 
9185 	/* sas devices */
9186 	handle = 0xFFFF;
9187 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9188 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9189 	    handle))) {
9190 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9191 		    MPI2_IOCSTATUS_MASK;
9192 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9193 			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9194 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9195 			break;
9196 		}
9197 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
9198 		if (!(_scsih_is_end_device(
9199 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
9200 			continue;
9201 		sas_device = mpt3sas_get_sdev_by_addr(ioc,
9202 		    le64_to_cpu(sas_device_pg0.SASAddress));
9203 		if (sas_device) {
9204 			sas_device_put(sas_device);
9205 			continue;
9206 		}
9207 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9208 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
9209 			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9210 				 handle,
9211 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9212 			mpt3sas_transport_update_links(ioc, sas_address, handle,
9213 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
9214 			retry_count = 0;
9215 			/* This will retry adding the end device.
9216 			 * _scsih_add_device() will decide on retries and
9217 			 * return "1" when it should be retried
9218 			 */
9219 			while (_scsih_add_device(ioc, handle, retry_count++,
9220 			    0)) {
9221 				ssleep(1);
9222 			}
9223 			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9224 				 handle,
9225 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9226 		}
9227 	}
9228 	ioc_info(ioc, "\tscan devices: end devices complete\n");
9229 	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
9230 
9231 	/* pcie devices */
9232 	handle = 0xFFFF;
9233 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9234 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9235 		handle))) {
9236 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9237 				& MPI2_IOCSTATUS_MASK;
9238 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9239 			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9240 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9241 			break;
9242 		}
9243 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9244 		if (!(_scsih_is_nvme_pciescsi_device(
9245 			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9246 			continue;
9247 		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
9248 				le64_to_cpu(pcie_device_pg0.WWID));
9249 		if (pcie_device) {
9250 			pcie_device_put(pcie_device);
9251 			continue;
9252 		}
9253 		retry_count = 0;
9254 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9255 		_scsih_pcie_add_device(ioc, handle);
9256 
9257 		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
9258 			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
9259 	}
9260 	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
9261 	ioc_info(ioc, "scan devices: complete\n");
9262 }
9263 
9264 /**
9265  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9266  * @ioc: per adapter object
9267  *
9268  * The handler for doing any required cleanup or initialization.
9269  */
9270 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9271 {
9272 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
9273 }
9274 
9275 /**
9276  * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
9277  * @ioc: per adapter object
9278  *
9279  * The handler for doing any required cleanup or initialization.
9280  */
9281 void
9282 mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9283 {
9284 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
9285 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
9286 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
9287 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
9288 		complete(&ioc->scsih_cmds.done);
9289 	}
9290 	if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
9291 		ioc->tm_cmds.status |= MPT3_CMD_RESET;
9292 		mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
9293 		complete(&ioc->tm_cmds.done);
9294 	}
9295 
9296 	memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
9297 	memset(ioc->device_remove_in_progress, 0,
9298 	       ioc->device_remove_in_progress_sz);
9299 	_scsih_fw_event_cleanup_queue(ioc);
9300 	_scsih_flush_running_cmds(ioc);
9301 }
9302 
9303 /**
9304  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9305  * @ioc: per adapter object
9306  *
9307  * The handler for doing any required cleanup or initialization.
9308  */
9309 void
9310 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
9311 {
9312 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
9313 	if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9314 					   !ioc->sas_hba.num_phys)) {
9315 		_scsih_prep_device_scan(ioc);
9316 		_scsih_create_enclosure_list_after_reset(ioc);
9317 		_scsih_search_responding_sas_devices(ioc);
9318 		_scsih_search_responding_pcie_devices(ioc);
9319 		_scsih_search_responding_raid_devices(ioc);
9320 		_scsih_search_responding_expanders(ioc);
9321 		_scsih_error_recovery_delete_devices(ioc);
9322 	}
9323 }
9324 
9325 /**
9326  * _mpt3sas_fw_work - delayed task for processing firmware events
9327  * @ioc: per adapter object
9328  * @fw_event: The fw_event_work object
9329  * Context: user.
9330  */
9331 static void
9332 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9333 {
9334 	_scsih_fw_event_del_from_list(ioc, fw_event);
9335 
9336 	/* the queue is being flushed so ignore this event */
9337 	if (ioc->remove_host || ioc->pci_error_recovery) {
9338 		fw_event_work_put(fw_event);
9339 		return;
9340 	}
9341 
9342 	switch (fw_event->event) {
9343 	case MPT3SAS_PROCESS_TRIGGER_DIAG:
9344 		mpt3sas_process_trigger_data(ioc,
9345 			(struct SL_WH_TRIGGERS_EVENT_DATA_T *)
9346 			fw_event->event_data);
9347 		break;
9348 	case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
9349 		while (scsi_host_in_recovery(ioc->shost) ||
9350 					 ioc->shost_recovery) {
9351 			/*
9352 			 * If we're unloading, bail. Otherwise, this can become
9353 			 * an infinite loop.
9354 			 */
9355 			if (ioc->remove_host)
9356 				goto out;
9357 			ssleep(1);
9358 		}
9359 		_scsih_remove_unresponding_devices(ioc);
9360 		_scsih_scan_for_devices_after_reset(ioc);
9361 		break;
9362 	case MPT3SAS_PORT_ENABLE_COMPLETE:
9363 		ioc->start_scan = 0;
9364 		if (missing_delay[0] != -1 && missing_delay[1] != -1)
9365 			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
9366 			    missing_delay[1]);
9367 		dewtprintk(ioc,
9368 			   ioc_info(ioc, "port enable: complete from worker thread\n"));
9369 		break;
9370 	case MPT3SAS_TURN_ON_PFA_LED:
9371 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
9372 		break;
9373 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9374 		_scsih_sas_topology_change_event(ioc, fw_event);
9375 		break;
9376 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9377 		if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
9378 			_scsih_sas_device_status_change_event_debug(ioc,
9379 			    (Mpi2EventDataSasDeviceStatusChange_t *)
9380 			    fw_event->event_data);
9381 		break;
9382 	case MPI2_EVENT_SAS_DISCOVERY:
9383 		_scsih_sas_discovery_event(ioc, fw_event);
9384 		break;
9385 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9386 		_scsih_sas_device_discovery_error_event(ioc, fw_event);
9387 		break;
9388 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9389 		_scsih_sas_broadcast_primitive_event(ioc, fw_event);
9390 		break;
9391 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9392 		_scsih_sas_enclosure_dev_status_change_event(ioc,
9393 		    fw_event);
9394 		break;
9395 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9396 		_scsih_sas_ir_config_change_event(ioc, fw_event);
9397 		break;
9398 	case MPI2_EVENT_IR_VOLUME:
9399 		_scsih_sas_ir_volume_event(ioc, fw_event);
9400 		break;
9401 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9402 		_scsih_sas_ir_physical_disk_event(ioc, fw_event);
9403 		break;
9404 	case MPI2_EVENT_IR_OPERATION_STATUS:
9405 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
9406 		break;
9407 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9408 		_scsih_pcie_device_status_change_event(ioc, fw_event);
9409 		break;
9410 	case MPI2_EVENT_PCIE_ENUMERATION:
9411 		_scsih_pcie_enumeration_event(ioc, fw_event);
9412 		break;
9413 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9414 		_scsih_pcie_topology_change_event(ioc, fw_event);
9415 			return;
9416 	break;
9417 	}
9418 out:
9419 	fw_event_work_put(fw_event);
9420 }
9421 
9422 /**
9423  * _firmware_event_work
9424  * @work: The fw_event_work object
9425  * Context: user.
9426  *
9427  * wrappers for the work thread handling firmware events
9428  */
9429 
9430 static void
9431 _firmware_event_work(struct work_struct *work)
9432 {
9433 	struct fw_event_work *fw_event = container_of(work,
9434 	    struct fw_event_work, work);
9435 
9436 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
9437 }
9438 
9439 /**
9440  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
9441  * @ioc: per adapter object
9442  * @msix_index: MSIX table index supplied by the OS
9443  * @reply: reply message frame(lower 32bit addr)
9444  * Context: interrupt.
9445  *
9446  * This function merely adds a new work task into ioc->firmware_event_thread.
9447  * The tasks are worked from _firmware_event_work in user context.
9448  *
9449  * Return: 1 meaning mf should be freed from _base_interrupt
9450  *         0 means the mf is freed from this function.
9451  */
9452 u8
9453 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9454 	u32 reply)
9455 {
9456 	struct fw_event_work *fw_event;
9457 	Mpi2EventNotificationReply_t *mpi_reply;
9458 	u16 event;
9459 	u16 sz;
9460 	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
9461 
9462 	/* events turned off due to host reset */
9463 	if (ioc->pci_error_recovery)
9464 		return 1;
9465 
9466 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
9467 
9468 	if (unlikely(!mpi_reply)) {
9469 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
9470 			__FILE__, __LINE__, __func__);
9471 		return 1;
9472 	}
9473 
9474 	event = le16_to_cpu(mpi_reply->Event);
9475 
9476 	if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
9477 		mpt3sas_trigger_event(ioc, event, 0);
9478 
9479 	switch (event) {
9480 	/* handle these */
9481 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9482 	{
9483 		Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
9484 		    (Mpi2EventDataSasBroadcastPrimitive_t *)
9485 		    mpi_reply->EventData;
9486 
9487 		if (baen_data->Primitive !=
9488 		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
9489 			return 1;
9490 
9491 		if (ioc->broadcast_aen_busy) {
9492 			ioc->broadcast_aen_pending++;
9493 			return 1;
9494 		} else
9495 			ioc->broadcast_aen_busy = 1;
9496 		break;
9497 	}
9498 
9499 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9500 		_scsih_check_topo_delete_events(ioc,
9501 		    (Mpi2EventDataSasTopologyChangeList_t *)
9502 		    mpi_reply->EventData);
9503 		break;
9504 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9505 	_scsih_check_pcie_topo_remove_events(ioc,
9506 		    (Mpi26EventDataPCIeTopologyChangeList_t *)
9507 		    mpi_reply->EventData);
9508 		break;
9509 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9510 		_scsih_check_ir_config_unhide_events(ioc,
9511 		    (Mpi2EventDataIrConfigChangeList_t *)
9512 		    mpi_reply->EventData);
9513 		break;
9514 	case MPI2_EVENT_IR_VOLUME:
9515 		_scsih_check_volume_delete_events(ioc,
9516 		    (Mpi2EventDataIrVolume_t *)
9517 		    mpi_reply->EventData);
9518 		break;
9519 	case MPI2_EVENT_LOG_ENTRY_ADDED:
9520 	{
9521 		Mpi2EventDataLogEntryAdded_t *log_entry;
9522 		u32 *log_code;
9523 
9524 		if (!ioc->is_warpdrive)
9525 			break;
9526 
9527 		log_entry = (Mpi2EventDataLogEntryAdded_t *)
9528 		    mpi_reply->EventData;
9529 		log_code = (u32 *)log_entry->LogData;
9530 
9531 		if (le16_to_cpu(log_entry->LogEntryQualifier)
9532 		    != MPT2_WARPDRIVE_LOGENTRY)
9533 			break;
9534 
9535 		switch (le32_to_cpu(*log_code)) {
9536 		case MPT2_WARPDRIVE_LC_SSDT:
9537 			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9538 			break;
9539 		case MPT2_WARPDRIVE_LC_SSDLW:
9540 			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
9541 			break;
9542 		case MPT2_WARPDRIVE_LC_SSDLF:
9543 			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
9544 			break;
9545 		case MPT2_WARPDRIVE_LC_BRMF:
9546 			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9547 			break;
9548 		}
9549 
9550 		break;
9551 	}
9552 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9553 		_scsih_sas_device_status_change_event(ioc,
9554 		    (Mpi2EventDataSasDeviceStatusChange_t *)
9555 		    mpi_reply->EventData);
9556 		break;
9557 	case MPI2_EVENT_IR_OPERATION_STATUS:
9558 	case MPI2_EVENT_SAS_DISCOVERY:
9559 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9560 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9561 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9562 	case MPI2_EVENT_PCIE_ENUMERATION:
9563 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9564 		break;
9565 
9566 	case MPI2_EVENT_TEMP_THRESHOLD:
9567 		_scsih_temp_threshold_events(ioc,
9568 			(Mpi2EventDataTemperature_t *)
9569 			mpi_reply->EventData);
9570 		break;
9571 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
9572 		ActiveCableEventData =
9573 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
9574 		switch (ActiveCableEventData->ReasonCode) {
9575 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
9576 			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
9577 				   ActiveCableEventData->ReceptacleID);
9578 			pr_notice("cannot be powered and devices connected\n");
9579 			pr_notice("to this active cable will not be seen\n");
9580 			pr_notice("This active cable requires %d mW of power\n",
9581 			     ActiveCableEventData->ActiveCablePowerRequirement);
9582 			break;
9583 
9584 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
9585 			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
9586 				   ActiveCableEventData->ReceptacleID);
9587 			pr_notice(
9588 			    "is not running at optimal speed(12 Gb/s rate)\n");
9589 			break;
9590 		}
9591 
9592 		break;
9593 
9594 	default: /* ignore the rest */
9595 		return 1;
9596 	}
9597 
9598 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
9599 	fw_event = alloc_fw_event_work(sz);
9600 	if (!fw_event) {
9601 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9602 			__FILE__, __LINE__, __func__);
9603 		return 1;
9604 	}
9605 
9606 	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
9607 	fw_event->ioc = ioc;
9608 	fw_event->VF_ID = mpi_reply->VF_ID;
9609 	fw_event->VP_ID = mpi_reply->VP_ID;
9610 	fw_event->event = event;
9611 	_scsih_fw_event_add(ioc, fw_event);
9612 	fw_event_work_put(fw_event);
9613 	return 1;
9614 }
9615 
9616 /**
9617  * _scsih_expander_node_remove - removing expander device from list.
9618  * @ioc: per adapter object
9619  * @sas_expander: the sas_device object
9620  *
9621  * Removing object and freeing associated memory from the
9622  * ioc->sas_expander_list.
9623  */
9624 static void
9625 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
9626 	struct _sas_node *sas_expander)
9627 {
9628 	struct _sas_port *mpt3sas_port, *next;
9629 	unsigned long flags;
9630 
9631 	/* remove sibling ports attached to this expander */
9632 	list_for_each_entry_safe(mpt3sas_port, next,
9633 	   &sas_expander->sas_port_list, port_list) {
9634 		if (ioc->shost_recovery)
9635 			return;
9636 		if (mpt3sas_port->remote_identify.device_type ==
9637 		    SAS_END_DEVICE)
9638 			mpt3sas_device_remove_by_sas_address(ioc,
9639 			    mpt3sas_port->remote_identify.sas_address);
9640 		else if (mpt3sas_port->remote_identify.device_type ==
9641 		    SAS_EDGE_EXPANDER_DEVICE ||
9642 		    mpt3sas_port->remote_identify.device_type ==
9643 		    SAS_FANOUT_EXPANDER_DEVICE)
9644 			mpt3sas_expander_remove(ioc,
9645 			    mpt3sas_port->remote_identify.sas_address);
9646 	}
9647 
9648 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
9649 	    sas_expander->sas_address_parent);
9650 
9651 	ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
9652 		 sas_expander->handle, (unsigned long long)
9653 		 sas_expander->sas_address);
9654 
9655 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
9656 	list_del(&sas_expander->list);
9657 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9658 
9659 	kfree(sas_expander->phy);
9660 	kfree(sas_expander);
9661 }
9662 
9663 /**
9664  * _scsih_ir_shutdown - IR shutdown notification
9665  * @ioc: per adapter object
9666  *
9667  * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
9668  * the host system is shutting down.
9669  */
9670 static void
9671 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
9672 {
9673 	Mpi2RaidActionRequest_t *mpi_request;
9674 	Mpi2RaidActionReply_t *mpi_reply;
9675 	u16 smid;
9676 
9677 	/* is IR firmware build loaded ? */
9678 	if (!ioc->ir_firmware)
9679 		return;
9680 
9681 	/* are there any volumes ? */
9682 	if (list_empty(&ioc->raid_device_list))
9683 		return;
9684 
9685 	mutex_lock(&ioc->scsih_cmds.mutex);
9686 
9687 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
9688 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
9689 		goto out;
9690 	}
9691 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
9692 
9693 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
9694 	if (!smid) {
9695 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
9696 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9697 		goto out;
9698 	}
9699 
9700 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
9701 	ioc->scsih_cmds.smid = smid;
9702 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
9703 
9704 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
9705 	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
9706 
9707 	if (!ioc->hide_ir_msg)
9708 		ioc_info(ioc, "IR shutdown (sending)\n");
9709 	init_completion(&ioc->scsih_cmds.done);
9710 	ioc->put_smid_default(ioc, smid);
9711 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
9712 
9713 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
9714 		ioc_err(ioc, "%s: timeout\n", __func__);
9715 		goto out;
9716 	}
9717 
9718 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
9719 		mpi_reply = ioc->scsih_cmds.reply;
9720 		if (!ioc->hide_ir_msg)
9721 			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
9722 				 le16_to_cpu(mpi_reply->IOCStatus),
9723 				 le32_to_cpu(mpi_reply->IOCLogInfo));
9724 	}
9725 
9726  out:
9727 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9728 	mutex_unlock(&ioc->scsih_cmds.mutex);
9729 }
9730 
9731 /**
9732  * scsih_remove - detach and remove add host
9733  * @pdev: PCI device struct
9734  *
9735  * Routine called when unloading the driver.
9736  */
9737 static void scsih_remove(struct pci_dev *pdev)
9738 {
9739 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9740 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9741 	struct _sas_port *mpt3sas_port, *next_port;
9742 	struct _raid_device *raid_device, *next;
9743 	struct MPT3SAS_TARGET *sas_target_priv_data;
9744 	struct _pcie_device *pcie_device, *pcienext;
9745 	struct workqueue_struct	*wq;
9746 	unsigned long flags;
9747 	Mpi2ConfigReply_t mpi_reply;
9748 
9749 	ioc->remove_host = 1;
9750 
9751 	mpt3sas_wait_for_commands_to_complete(ioc);
9752 	_scsih_flush_running_cmds(ioc);
9753 
9754 	_scsih_fw_event_cleanup_queue(ioc);
9755 
9756 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
9757 	wq = ioc->firmware_event_thread;
9758 	ioc->firmware_event_thread = NULL;
9759 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9760 	if (wq)
9761 		destroy_workqueue(wq);
9762 	/*
9763 	 * Copy back the unmodified ioc page1. so that on next driver load,
9764 	 * current modified changes on ioc page1 won't take effect.
9765 	 */
9766 	if (ioc->is_aero_ioc)
9767 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
9768 				&ioc->ioc_pg1_copy);
9769 	/* release all the volumes */
9770 	_scsih_ir_shutdown(ioc);
9771 	sas_remove_host(shost);
9772 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
9773 	    list) {
9774 		if (raid_device->starget) {
9775 			sas_target_priv_data =
9776 			    raid_device->starget->hostdata;
9777 			sas_target_priv_data->deleted = 1;
9778 			scsi_remove_target(&raid_device->starget->dev);
9779 		}
9780 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9781 			 raid_device->handle, (u64)raid_device->wwid);
9782 		_scsih_raid_device_remove(ioc, raid_device);
9783 	}
9784 	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
9785 		list) {
9786 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9787 		list_del_init(&pcie_device->list);
9788 		pcie_device_put(pcie_device);
9789 	}
9790 
9791 	/* free ports attached to the sas_host */
9792 	list_for_each_entry_safe(mpt3sas_port, next_port,
9793 	   &ioc->sas_hba.sas_port_list, port_list) {
9794 		if (mpt3sas_port->remote_identify.device_type ==
9795 		    SAS_END_DEVICE)
9796 			mpt3sas_device_remove_by_sas_address(ioc,
9797 			    mpt3sas_port->remote_identify.sas_address);
9798 		else if (mpt3sas_port->remote_identify.device_type ==
9799 		    SAS_EDGE_EXPANDER_DEVICE ||
9800 		    mpt3sas_port->remote_identify.device_type ==
9801 		    SAS_FANOUT_EXPANDER_DEVICE)
9802 			mpt3sas_expander_remove(ioc,
9803 			    mpt3sas_port->remote_identify.sas_address);
9804 	}
9805 
9806 	/* free phys attached to the sas_host */
9807 	if (ioc->sas_hba.num_phys) {
9808 		kfree(ioc->sas_hba.phy);
9809 		ioc->sas_hba.phy = NULL;
9810 		ioc->sas_hba.num_phys = 0;
9811 	}
9812 
9813 	mpt3sas_base_detach(ioc);
9814 	spin_lock(&gioc_lock);
9815 	list_del(&ioc->list);
9816 	spin_unlock(&gioc_lock);
9817 	scsi_host_put(shost);
9818 }
9819 
9820 /**
9821  * scsih_shutdown - routine call during system shutdown
9822  * @pdev: PCI device struct
9823  */
9824 static void
9825 scsih_shutdown(struct pci_dev *pdev)
9826 {
9827 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9828 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9829 	struct workqueue_struct	*wq;
9830 	unsigned long flags;
9831 	Mpi2ConfigReply_t mpi_reply;
9832 
9833 	ioc->remove_host = 1;
9834 
9835 	mpt3sas_wait_for_commands_to_complete(ioc);
9836 	_scsih_flush_running_cmds(ioc);
9837 
9838 	_scsih_fw_event_cleanup_queue(ioc);
9839 
9840 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
9841 	wq = ioc->firmware_event_thread;
9842 	ioc->firmware_event_thread = NULL;
9843 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9844 	if (wq)
9845 		destroy_workqueue(wq);
9846 	/*
9847 	 * Copy back the unmodified ioc page1 so that on next driver load,
9848 	 * current modified changes on ioc page1 won't take effect.
9849 	 */
9850 	if (ioc->is_aero_ioc)
9851 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
9852 				&ioc->ioc_pg1_copy);
9853 
9854 	_scsih_ir_shutdown(ioc);
9855 	mpt3sas_base_detach(ioc);
9856 }
9857 
9858 
9859 /**
9860  * _scsih_probe_boot_devices - reports 1st device
9861  * @ioc: per adapter object
9862  *
9863  * If specified in bios page 2, this routine reports the 1st
9864  * device scsi-ml or sas transport for persistent boot device
9865  * purposes.  Please refer to function _scsih_determine_boot_device()
9866  */
9867 static void
9868 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
9869 {
9870 	u32 channel;
9871 	void *device;
9872 	struct _sas_device *sas_device;
9873 	struct _raid_device *raid_device;
9874 	struct _pcie_device *pcie_device;
9875 	u16 handle;
9876 	u64 sas_address_parent;
9877 	u64 sas_address;
9878 	unsigned long flags;
9879 	int rc;
9880 	int tid;
9881 
9882 	 /* no Bios, return immediately */
9883 	if (!ioc->bios_pg3.BiosVersion)
9884 		return;
9885 
9886 	device = NULL;
9887 	if (ioc->req_boot_device.device) {
9888 		device =  ioc->req_boot_device.device;
9889 		channel = ioc->req_boot_device.channel;
9890 	} else if (ioc->req_alt_boot_device.device) {
9891 		device =  ioc->req_alt_boot_device.device;
9892 		channel = ioc->req_alt_boot_device.channel;
9893 	} else if (ioc->current_boot_device.device) {
9894 		device =  ioc->current_boot_device.device;
9895 		channel = ioc->current_boot_device.channel;
9896 	}
9897 
9898 	if (!device)
9899 		return;
9900 
9901 	if (channel == RAID_CHANNEL) {
9902 		raid_device = device;
9903 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9904 		    raid_device->id, 0);
9905 		if (rc)
9906 			_scsih_raid_device_remove(ioc, raid_device);
9907 	} else if (channel == PCIE_CHANNEL) {
9908 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9909 		pcie_device = device;
9910 		tid = pcie_device->id;
9911 		list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
9912 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9913 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
9914 		if (rc)
9915 			_scsih_pcie_device_remove(ioc, pcie_device);
9916 	} else {
9917 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
9918 		sas_device = device;
9919 		handle = sas_device->handle;
9920 		sas_address_parent = sas_device->sas_address_parent;
9921 		sas_address = sas_device->sas_address;
9922 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
9923 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9924 
9925 		if (ioc->hide_drives)
9926 			return;
9927 		if (!mpt3sas_transport_port_add(ioc, handle,
9928 		    sas_address_parent)) {
9929 			_scsih_sas_device_remove(ioc, sas_device);
9930 		} else if (!sas_device->starget) {
9931 			if (!ioc->is_driver_loading) {
9932 				mpt3sas_transport_port_remove(ioc,
9933 				    sas_address,
9934 				    sas_address_parent);
9935 				_scsih_sas_device_remove(ioc, sas_device);
9936 			}
9937 		}
9938 	}
9939 }
9940 
9941 /**
9942  * _scsih_probe_raid - reporting raid volumes to scsi-ml
9943  * @ioc: per adapter object
9944  *
9945  * Called during initial loading of the driver.
9946  */
9947 static void
9948 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
9949 {
9950 	struct _raid_device *raid_device, *raid_next;
9951 	int rc;
9952 
9953 	list_for_each_entry_safe(raid_device, raid_next,
9954 	    &ioc->raid_device_list, list) {
9955 		if (raid_device->starget)
9956 			continue;
9957 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9958 		    raid_device->id, 0);
9959 		if (rc)
9960 			_scsih_raid_device_remove(ioc, raid_device);
9961 	}
9962 }
9963 
9964 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
9965 {
9966 	struct _sas_device *sas_device = NULL;
9967 	unsigned long flags;
9968 
9969 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9970 	if (!list_empty(&ioc->sas_device_init_list)) {
9971 		sas_device = list_first_entry(&ioc->sas_device_init_list,
9972 				struct _sas_device, list);
9973 		sas_device_get(sas_device);
9974 	}
9975 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9976 
9977 	return sas_device;
9978 }
9979 
9980 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
9981 		struct _sas_device *sas_device)
9982 {
9983 	unsigned long flags;
9984 
9985 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9986 
9987 	/*
9988 	 * Since we dropped the lock during the call to port_add(), we need to
9989 	 * be careful here that somebody else didn't move or delete this item
9990 	 * while we were busy with other things.
9991 	 *
9992 	 * If it was on the list, we need a put() for the reference the list
9993 	 * had. Either way, we need a get() for the destination list.
9994 	 */
9995 	if (!list_empty(&sas_device->list)) {
9996 		list_del_init(&sas_device->list);
9997 		sas_device_put(sas_device);
9998 	}
9999 
10000 	sas_device_get(sas_device);
10001 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
10002 
10003 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10004 }
10005 
10006 /**
10007  * _scsih_probe_sas - reporting sas devices to sas transport
10008  * @ioc: per adapter object
10009  *
10010  * Called during initial loading of the driver.
10011  */
10012 static void
10013 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
10014 {
10015 	struct _sas_device *sas_device;
10016 
10017 	if (ioc->hide_drives)
10018 		return;
10019 
10020 	while ((sas_device = get_next_sas_device(ioc))) {
10021 		if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
10022 		    sas_device->sas_address_parent)) {
10023 			_scsih_sas_device_remove(ioc, sas_device);
10024 			sas_device_put(sas_device);
10025 			continue;
10026 		} else if (!sas_device->starget) {
10027 			/*
10028 			 * When asyn scanning is enabled, its not possible to
10029 			 * remove devices while scanning is turned on due to an
10030 			 * oops in scsi_sysfs_add_sdev()->add_device()->
10031 			 * sysfs_addrm_start()
10032 			 */
10033 			if (!ioc->is_driver_loading) {
10034 				mpt3sas_transport_port_remove(ioc,
10035 				    sas_device->sas_address,
10036 				    sas_device->sas_address_parent);
10037 				_scsih_sas_device_remove(ioc, sas_device);
10038 				sas_device_put(sas_device);
10039 				continue;
10040 			}
10041 		}
10042 		sas_device_make_active(ioc, sas_device);
10043 		sas_device_put(sas_device);
10044 	}
10045 }
10046 
10047 /**
10048  * get_next_pcie_device - Get the next pcie device
10049  * @ioc: per adapter object
10050  *
10051  * Get the next pcie device from pcie_device_init_list list.
10052  *
10053  * Return: pcie device structure if pcie_device_init_list list is not empty
10054  * otherwise returns NULL
10055  */
10056 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
10057 {
10058 	struct _pcie_device *pcie_device = NULL;
10059 	unsigned long flags;
10060 
10061 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10062 	if (!list_empty(&ioc->pcie_device_init_list)) {
10063 		pcie_device = list_first_entry(&ioc->pcie_device_init_list,
10064 				struct _pcie_device, list);
10065 		pcie_device_get(pcie_device);
10066 	}
10067 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10068 
10069 	return pcie_device;
10070 }
10071 
10072 /**
10073  * pcie_device_make_active - Add pcie device to pcie_device_list list
10074  * @ioc: per adapter object
10075  * @pcie_device: pcie device object
10076  *
10077  * Add the pcie device which has registered with SCSI Transport Later to
10078  * pcie_device_list list
10079  */
10080 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10081 		struct _pcie_device *pcie_device)
10082 {
10083 	unsigned long flags;
10084 
10085 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10086 
10087 	if (!list_empty(&pcie_device->list)) {
10088 		list_del_init(&pcie_device->list);
10089 		pcie_device_put(pcie_device);
10090 	}
10091 	pcie_device_get(pcie_device);
10092 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
10093 
10094 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10095 }
10096 
10097 /**
10098  * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
10099  * @ioc: per adapter object
10100  *
10101  * Called during initial loading of the driver.
10102  */
10103 static void
10104 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10105 {
10106 	struct _pcie_device *pcie_device;
10107 	int rc;
10108 
10109 	/* PCIe Device List */
10110 	while ((pcie_device = get_next_pcie_device(ioc))) {
10111 		if (pcie_device->starget) {
10112 			pcie_device_put(pcie_device);
10113 			continue;
10114 		}
10115 		if (pcie_device->access_status ==
10116 		    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
10117 			pcie_device_make_active(ioc, pcie_device);
10118 			pcie_device_put(pcie_device);
10119 			continue;
10120 		}
10121 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10122 			pcie_device->id, 0);
10123 		if (rc) {
10124 			_scsih_pcie_device_remove(ioc, pcie_device);
10125 			pcie_device_put(pcie_device);
10126 			continue;
10127 		} else if (!pcie_device->starget) {
10128 			/*
10129 			 * When async scanning is enabled, its not possible to
10130 			 * remove devices while scanning is turned on due to an
10131 			 * oops in scsi_sysfs_add_sdev()->add_device()->
10132 			 * sysfs_addrm_start()
10133 			 */
10134 			if (!ioc->is_driver_loading) {
10135 			/* TODO-- Need to find out whether this condition will
10136 			 * occur or not
10137 			 */
10138 				_scsih_pcie_device_remove(ioc, pcie_device);
10139 				pcie_device_put(pcie_device);
10140 				continue;
10141 			}
10142 		}
10143 		pcie_device_make_active(ioc, pcie_device);
10144 		pcie_device_put(pcie_device);
10145 	}
10146 }
10147 
10148 /**
10149  * _scsih_probe_devices - probing for devices
10150  * @ioc: per adapter object
10151  *
10152  * Called during initial loading of the driver.
10153  */
10154 static void
10155 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
10156 {
10157 	u16 volume_mapping_flags;
10158 
10159 	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
10160 		return;  /* return when IOC doesn't support initiator mode */
10161 
10162 	_scsih_probe_boot_devices(ioc);
10163 
10164 	if (ioc->ir_firmware) {
10165 		volume_mapping_flags =
10166 		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
10167 		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
10168 		if (volume_mapping_flags ==
10169 		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
10170 			_scsih_probe_raid(ioc);
10171 			_scsih_probe_sas(ioc);
10172 		} else {
10173 			_scsih_probe_sas(ioc);
10174 			_scsih_probe_raid(ioc);
10175 		}
10176 	} else {
10177 		_scsih_probe_sas(ioc);
10178 		_scsih_probe_pcie(ioc);
10179 	}
10180 }
10181 
10182 /**
10183  * scsih_scan_start - scsi lld callback for .scan_start
10184  * @shost: SCSI host pointer
10185  *
10186  * The shost has the ability to discover targets on its own instead
10187  * of scanning the entire bus.  In our implemention, we will kick off
10188  * firmware discovery.
10189  */
10190 static void
10191 scsih_scan_start(struct Scsi_Host *shost)
10192 {
10193 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10194 	int rc;
10195 	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
10196 		mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
10197 
10198 	if (disable_discovery > 0)
10199 		return;
10200 
10201 	ioc->start_scan = 1;
10202 	rc = mpt3sas_port_enable(ioc);
10203 
10204 	if (rc != 0)
10205 		ioc_info(ioc, "port enable: FAILED\n");
10206 }
10207 
10208 /**
10209  * scsih_scan_finished - scsi lld callback for .scan_finished
10210  * @shost: SCSI host pointer
10211  * @time: elapsed time of the scan in jiffies
10212  *
10213  * This function will be called periodicallyn until it returns 1 with the
10214  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
10215  * we wait for firmware discovery to complete, then return 1.
10216  */
10217 static int
10218 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10219 {
10220 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10221 
10222 	if (disable_discovery > 0) {
10223 		ioc->is_driver_loading = 0;
10224 		ioc->wait_for_discovery_to_complete = 0;
10225 		return 1;
10226 	}
10227 
10228 	if (time >= (300 * HZ)) {
10229 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10230 		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
10231 		ioc->is_driver_loading = 0;
10232 		return 1;
10233 	}
10234 
10235 	if (ioc->start_scan)
10236 		return 0;
10237 
10238 	if (ioc->start_scan_failed) {
10239 		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
10240 			 ioc->start_scan_failed);
10241 		ioc->is_driver_loading = 0;
10242 		ioc->wait_for_discovery_to_complete = 0;
10243 		ioc->remove_host = 1;
10244 		return 1;
10245 	}
10246 
10247 	ioc_info(ioc, "port enable: SUCCESS\n");
10248 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10249 
10250 	if (ioc->wait_for_discovery_to_complete) {
10251 		ioc->wait_for_discovery_to_complete = 0;
10252 		_scsih_probe_devices(ioc);
10253 	}
10254 	mpt3sas_base_start_watchdog(ioc);
10255 	ioc->is_driver_loading = 0;
10256 	return 1;
10257 }
10258 
10259 /* shost template for SAS 2.0 HBA devices */
10260 static struct scsi_host_template mpt2sas_driver_template = {
10261 	.module				= THIS_MODULE,
10262 	.name				= "Fusion MPT SAS Host",
10263 	.proc_name			= MPT2SAS_DRIVER_NAME,
10264 	.queuecommand			= scsih_qcmd,
10265 	.target_alloc			= scsih_target_alloc,
10266 	.slave_alloc			= scsih_slave_alloc,
10267 	.slave_configure		= scsih_slave_configure,
10268 	.target_destroy			= scsih_target_destroy,
10269 	.slave_destroy			= scsih_slave_destroy,
10270 	.scan_finished			= scsih_scan_finished,
10271 	.scan_start			= scsih_scan_start,
10272 	.change_queue_depth		= scsih_change_queue_depth,
10273 	.eh_abort_handler		= scsih_abort,
10274 	.eh_device_reset_handler	= scsih_dev_reset,
10275 	.eh_target_reset_handler	= scsih_target_reset,
10276 	.eh_host_reset_handler		= scsih_host_reset,
10277 	.bios_param			= scsih_bios_param,
10278 	.can_queue			= 1,
10279 	.this_id			= -1,
10280 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
10281 	.max_sectors			= 32767,
10282 	.cmd_per_lun			= 7,
10283 	.shost_attrs			= mpt3sas_host_attrs,
10284 	.sdev_attrs			= mpt3sas_dev_attrs,
10285 	.track_queue_depth		= 1,
10286 	.cmd_size			= sizeof(struct scsiio_tracker),
10287 };
10288 
10289 /* raid transport support for SAS 2.0 HBA devices */
10290 static struct raid_function_template mpt2sas_raid_functions = {
10291 	.cookie		= &mpt2sas_driver_template,
10292 	.is_raid	= scsih_is_raid,
10293 	.get_resync	= scsih_get_resync,
10294 	.get_state	= scsih_get_state,
10295 };
10296 
10297 /* shost template for SAS 3.0 HBA devices */
10298 static struct scsi_host_template mpt3sas_driver_template = {
10299 	.module				= THIS_MODULE,
10300 	.name				= "Fusion MPT SAS Host",
10301 	.proc_name			= MPT3SAS_DRIVER_NAME,
10302 	.queuecommand			= scsih_qcmd,
10303 	.target_alloc			= scsih_target_alloc,
10304 	.slave_alloc			= scsih_slave_alloc,
10305 	.slave_configure		= scsih_slave_configure,
10306 	.target_destroy			= scsih_target_destroy,
10307 	.slave_destroy			= scsih_slave_destroy,
10308 	.scan_finished			= scsih_scan_finished,
10309 	.scan_start			= scsih_scan_start,
10310 	.change_queue_depth		= scsih_change_queue_depth,
10311 	.eh_abort_handler		= scsih_abort,
10312 	.eh_device_reset_handler	= scsih_dev_reset,
10313 	.eh_target_reset_handler	= scsih_target_reset,
10314 	.eh_host_reset_handler		= scsih_host_reset,
10315 	.bios_param			= scsih_bios_param,
10316 	.can_queue			= 1,
10317 	.this_id			= -1,
10318 	.sg_tablesize			= MPT3SAS_SG_DEPTH,
10319 	.max_sectors			= 32767,
10320 	.max_segment_size		= 0xffffffff,
10321 	.cmd_per_lun			= 7,
10322 	.shost_attrs			= mpt3sas_host_attrs,
10323 	.sdev_attrs			= mpt3sas_dev_attrs,
10324 	.track_queue_depth		= 1,
10325 	.cmd_size			= sizeof(struct scsiio_tracker),
10326 };
10327 
10328 /* raid transport support for SAS 3.0 HBA devices */
10329 static struct raid_function_template mpt3sas_raid_functions = {
10330 	.cookie		= &mpt3sas_driver_template,
10331 	.is_raid	= scsih_is_raid,
10332 	.get_resync	= scsih_get_resync,
10333 	.get_state	= scsih_get_state,
10334 };
10335 
10336 /**
10337  * _scsih_determine_hba_mpi_version - determine in which MPI version class
10338  *					this device belongs to.
10339  * @pdev: PCI device struct
10340  *
10341  * return MPI2_VERSION for SAS 2.0 HBA devices,
10342  *	MPI25_VERSION for SAS 3.0 HBA devices, and
10343  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
10344  */
10345 static u16
10346 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
10347 {
10348 
10349 	switch (pdev->device) {
10350 	case MPI2_MFGPAGE_DEVID_SSS6200:
10351 	case MPI2_MFGPAGE_DEVID_SAS2004:
10352 	case MPI2_MFGPAGE_DEVID_SAS2008:
10353 	case MPI2_MFGPAGE_DEVID_SAS2108_1:
10354 	case MPI2_MFGPAGE_DEVID_SAS2108_2:
10355 	case MPI2_MFGPAGE_DEVID_SAS2108_3:
10356 	case MPI2_MFGPAGE_DEVID_SAS2116_1:
10357 	case MPI2_MFGPAGE_DEVID_SAS2116_2:
10358 	case MPI2_MFGPAGE_DEVID_SAS2208_1:
10359 	case MPI2_MFGPAGE_DEVID_SAS2208_2:
10360 	case MPI2_MFGPAGE_DEVID_SAS2208_3:
10361 	case MPI2_MFGPAGE_DEVID_SAS2208_4:
10362 	case MPI2_MFGPAGE_DEVID_SAS2208_5:
10363 	case MPI2_MFGPAGE_DEVID_SAS2208_6:
10364 	case MPI2_MFGPAGE_DEVID_SAS2308_1:
10365 	case MPI2_MFGPAGE_DEVID_SAS2308_2:
10366 	case MPI2_MFGPAGE_DEVID_SAS2308_3:
10367 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10368 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10369 		return MPI2_VERSION;
10370 	case MPI25_MFGPAGE_DEVID_SAS3004:
10371 	case MPI25_MFGPAGE_DEVID_SAS3008:
10372 	case MPI25_MFGPAGE_DEVID_SAS3108_1:
10373 	case MPI25_MFGPAGE_DEVID_SAS3108_2:
10374 	case MPI25_MFGPAGE_DEVID_SAS3108_5:
10375 	case MPI25_MFGPAGE_DEVID_SAS3108_6:
10376 		return MPI25_VERSION;
10377 	case MPI26_MFGPAGE_DEVID_SAS3216:
10378 	case MPI26_MFGPAGE_DEVID_SAS3224:
10379 	case MPI26_MFGPAGE_DEVID_SAS3316_1:
10380 	case MPI26_MFGPAGE_DEVID_SAS3316_2:
10381 	case MPI26_MFGPAGE_DEVID_SAS3316_3:
10382 	case MPI26_MFGPAGE_DEVID_SAS3316_4:
10383 	case MPI26_MFGPAGE_DEVID_SAS3324_1:
10384 	case MPI26_MFGPAGE_DEVID_SAS3324_2:
10385 	case MPI26_MFGPAGE_DEVID_SAS3324_3:
10386 	case MPI26_MFGPAGE_DEVID_SAS3324_4:
10387 	case MPI26_MFGPAGE_DEVID_SAS3508:
10388 	case MPI26_MFGPAGE_DEVID_SAS3508_1:
10389 	case MPI26_MFGPAGE_DEVID_SAS3408:
10390 	case MPI26_MFGPAGE_DEVID_SAS3516:
10391 	case MPI26_MFGPAGE_DEVID_SAS3516_1:
10392 	case MPI26_MFGPAGE_DEVID_SAS3416:
10393 	case MPI26_MFGPAGE_DEVID_SAS3616:
10394 	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10395 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10396 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10397 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10398 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10399 		return MPI26_VERSION;
10400 	}
10401 	return 0;
10402 }
10403 
10404 /**
10405  * _scsih_probe - attach and add scsi host
10406  * @pdev: PCI device struct
10407  * @id: pci device id
10408  *
10409  * Return: 0 success, anything else error.
10410  */
10411 static int
10412 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10413 {
10414 	struct MPT3SAS_ADAPTER *ioc;
10415 	struct Scsi_Host *shost = NULL;
10416 	int rv;
10417 	u16 hba_mpi_version;
10418 
10419 	/* Determine in which MPI version class this pci device belongs */
10420 	hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
10421 	if (hba_mpi_version == 0)
10422 		return -ENODEV;
10423 
10424 	/* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
10425 	 * for other generation HBA's return with -ENODEV
10426 	 */
10427 	if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
10428 		return -ENODEV;
10429 
10430 	/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
10431 	 * for other generation HBA's return with -ENODEV
10432 	 */
10433 	if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
10434 		|| hba_mpi_version ==  MPI26_VERSION)))
10435 		return -ENODEV;
10436 
10437 	switch (hba_mpi_version) {
10438 	case MPI2_VERSION:
10439 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
10440 			PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
10441 		/* Use mpt2sas driver host template for SAS 2.0 HBA's */
10442 		shost = scsi_host_alloc(&mpt2sas_driver_template,
10443 		  sizeof(struct MPT3SAS_ADAPTER));
10444 		if (!shost)
10445 			return -ENODEV;
10446 		ioc = shost_priv(shost);
10447 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10448 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10449 		ioc->id = mpt2_ids++;
10450 		sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
10451 		switch (pdev->device) {
10452 		case MPI2_MFGPAGE_DEVID_SSS6200:
10453 			ioc->is_warpdrive = 1;
10454 			ioc->hide_ir_msg = 1;
10455 			break;
10456 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10457 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10458 			ioc->is_mcpu_endpoint = 1;
10459 			break;
10460 		default:
10461 			ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
10462 			break;
10463 		}
10464 		break;
10465 	case MPI25_VERSION:
10466 	case MPI26_VERSION:
10467 		/* Use mpt3sas driver host template for SAS 3.0 HBA's */
10468 		shost = scsi_host_alloc(&mpt3sas_driver_template,
10469 		  sizeof(struct MPT3SAS_ADAPTER));
10470 		if (!shost)
10471 			return -ENODEV;
10472 		ioc = shost_priv(shost);
10473 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10474 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10475 		ioc->id = mpt3_ids++;
10476 		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
10477 		switch (pdev->device) {
10478 		case MPI26_MFGPAGE_DEVID_SAS3508:
10479 		case MPI26_MFGPAGE_DEVID_SAS3508_1:
10480 		case MPI26_MFGPAGE_DEVID_SAS3408:
10481 		case MPI26_MFGPAGE_DEVID_SAS3516:
10482 		case MPI26_MFGPAGE_DEVID_SAS3516_1:
10483 		case MPI26_MFGPAGE_DEVID_SAS3416:
10484 		case MPI26_MFGPAGE_DEVID_SAS3616:
10485 		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10486 			ioc->is_gen35_ioc = 1;
10487 			break;
10488 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10489 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10490 			dev_info(&pdev->dev,
10491 			    "HBA is in Configurable Secure mode\n");
10492 			/* fall through */
10493 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10494 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10495 			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
10496 			break;
10497 		default:
10498 			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
10499 		}
10500 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
10501 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
10502 			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
10503 			ioc->combined_reply_queue = 1;
10504 			if (ioc->is_gen35_ioc)
10505 				ioc->combined_reply_index_count =
10506 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
10507 			else
10508 				ioc->combined_reply_index_count =
10509 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
10510 		}
10511 		break;
10512 	default:
10513 		return -ENODEV;
10514 	}
10515 
10516 	INIT_LIST_HEAD(&ioc->list);
10517 	spin_lock(&gioc_lock);
10518 	list_add_tail(&ioc->list, &mpt3sas_ioc_list);
10519 	spin_unlock(&gioc_lock);
10520 	ioc->shost = shost;
10521 	ioc->pdev = pdev;
10522 	ioc->scsi_io_cb_idx = scsi_io_cb_idx;
10523 	ioc->tm_cb_idx = tm_cb_idx;
10524 	ioc->ctl_cb_idx = ctl_cb_idx;
10525 	ioc->base_cb_idx = base_cb_idx;
10526 	ioc->port_enable_cb_idx = port_enable_cb_idx;
10527 	ioc->transport_cb_idx = transport_cb_idx;
10528 	ioc->scsih_cb_idx = scsih_cb_idx;
10529 	ioc->config_cb_idx = config_cb_idx;
10530 	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
10531 	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
10532 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
10533 	ioc->logging_level = logging_level;
10534 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
10535 	/*
10536 	 * Enable MEMORY MOVE support flag.
10537 	 */
10538 	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
10539 
10540 	ioc->enable_sdev_max_qd = enable_sdev_max_qd;
10541 
10542 	/* misc semaphores and spin locks */
10543 	mutex_init(&ioc->reset_in_progress_mutex);
10544 	/* initializing pci_access_mutex lock */
10545 	mutex_init(&ioc->pci_access_mutex);
10546 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
10547 	spin_lock_init(&ioc->scsi_lookup_lock);
10548 	spin_lock_init(&ioc->sas_device_lock);
10549 	spin_lock_init(&ioc->sas_node_lock);
10550 	spin_lock_init(&ioc->fw_event_lock);
10551 	spin_lock_init(&ioc->raid_device_lock);
10552 	spin_lock_init(&ioc->pcie_device_lock);
10553 	spin_lock_init(&ioc->diag_trigger_lock);
10554 
10555 	INIT_LIST_HEAD(&ioc->sas_device_list);
10556 	INIT_LIST_HEAD(&ioc->sas_device_init_list);
10557 	INIT_LIST_HEAD(&ioc->sas_expander_list);
10558 	INIT_LIST_HEAD(&ioc->enclosure_list);
10559 	INIT_LIST_HEAD(&ioc->pcie_device_list);
10560 	INIT_LIST_HEAD(&ioc->pcie_device_init_list);
10561 	INIT_LIST_HEAD(&ioc->fw_event_list);
10562 	INIT_LIST_HEAD(&ioc->raid_device_list);
10563 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
10564 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
10565 	INIT_LIST_HEAD(&ioc->delayed_sc_list);
10566 	INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
10567 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
10568 	INIT_LIST_HEAD(&ioc->reply_queue_list);
10569 
10570 	sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
10571 
10572 	/* init shost parameters */
10573 	shost->max_cmd_len = 32;
10574 	shost->max_lun = max_lun;
10575 	shost->transportt = mpt3sas_transport_template;
10576 	shost->unique_id = ioc->id;
10577 
10578 	if (ioc->is_mcpu_endpoint) {
10579 		/* mCPU MPI support 64K max IO */
10580 		shost->max_sectors = 128;
10581 		ioc_info(ioc, "The max_sectors value is set to %d\n",
10582 			 shost->max_sectors);
10583 	} else {
10584 		if (max_sectors != 0xFFFF) {
10585 			if (max_sectors < 64) {
10586 				shost->max_sectors = 64;
10587 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
10588 					 max_sectors);
10589 			} else if (max_sectors > 32767) {
10590 				shost->max_sectors = 32767;
10591 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
10592 					 max_sectors);
10593 			} else {
10594 				shost->max_sectors = max_sectors & 0xFFFE;
10595 				ioc_info(ioc, "The max_sectors value is set to %d\n",
10596 					 shost->max_sectors);
10597 			}
10598 		}
10599 	}
10600 	/* register EEDP capabilities with SCSI layer */
10601 	if (prot_mask > 0)
10602 		scsi_host_set_prot(shost, prot_mask);
10603 	else
10604 		scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
10605 				   | SHOST_DIF_TYPE2_PROTECTION
10606 				   | SHOST_DIF_TYPE3_PROTECTION);
10607 
10608 	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
10609 
10610 	/* event thread */
10611 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
10612 	    "fw_event_%s%d", ioc->driver_name, ioc->id);
10613 	ioc->firmware_event_thread = alloc_ordered_workqueue(
10614 	    ioc->firmware_event_name, 0);
10615 	if (!ioc->firmware_event_thread) {
10616 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10617 			__FILE__, __LINE__, __func__);
10618 		rv = -ENODEV;
10619 		goto out_thread_fail;
10620 	}
10621 
10622 	ioc->is_driver_loading = 1;
10623 	if ((mpt3sas_base_attach(ioc))) {
10624 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10625 			__FILE__, __LINE__, __func__);
10626 		rv = -ENODEV;
10627 		goto out_attach_fail;
10628 	}
10629 
10630 	if (ioc->is_warpdrive) {
10631 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
10632 			ioc->hide_drives = 0;
10633 		else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
10634 			ioc->hide_drives = 1;
10635 		else {
10636 			if (mpt3sas_get_num_volumes(ioc))
10637 				ioc->hide_drives = 1;
10638 			else
10639 				ioc->hide_drives = 0;
10640 		}
10641 	} else
10642 		ioc->hide_drives = 0;
10643 
10644 	rv = scsi_add_host(shost, &pdev->dev);
10645 	if (rv) {
10646 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10647 			__FILE__, __LINE__, __func__);
10648 		goto out_add_shost_fail;
10649 	}
10650 
10651 	scsi_scan_host(shost);
10652 	return 0;
10653 out_add_shost_fail:
10654 	mpt3sas_base_detach(ioc);
10655  out_attach_fail:
10656 	destroy_workqueue(ioc->firmware_event_thread);
10657  out_thread_fail:
10658 	spin_lock(&gioc_lock);
10659 	list_del(&ioc->list);
10660 	spin_unlock(&gioc_lock);
10661 	scsi_host_put(shost);
10662 	return rv;
10663 }
10664 
10665 #ifdef CONFIG_PM
10666 /**
10667  * scsih_suspend - power management suspend main entry point
10668  * @pdev: PCI device struct
10669  * @state: PM state change to (usually PCI_D3)
10670  *
10671  * Return: 0 success, anything else error.
10672  */
10673 static int
10674 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
10675 {
10676 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10677 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10678 	pci_power_t device_state;
10679 
10680 	mpt3sas_base_stop_watchdog(ioc);
10681 	flush_scheduled_work();
10682 	scsi_block_requests(shost);
10683 	device_state = pci_choose_state(pdev, state);
10684 	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
10685 		 pdev, pci_name(pdev), device_state);
10686 
10687 	pci_save_state(pdev);
10688 	mpt3sas_base_free_resources(ioc);
10689 	pci_set_power_state(pdev, device_state);
10690 	return 0;
10691 }
10692 
10693 /**
10694  * scsih_resume - power management resume main entry point
10695  * @pdev: PCI device struct
10696  *
10697  * Return: 0 success, anything else error.
10698  */
10699 static int
10700 scsih_resume(struct pci_dev *pdev)
10701 {
10702 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10703 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10704 	pci_power_t device_state = pdev->current_state;
10705 	int r;
10706 
10707 	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
10708 		 pdev, pci_name(pdev), device_state);
10709 
10710 	pci_set_power_state(pdev, PCI_D0);
10711 	pci_enable_wake(pdev, PCI_D0, 0);
10712 	pci_restore_state(pdev);
10713 	ioc->pdev = pdev;
10714 	r = mpt3sas_base_map_resources(ioc);
10715 	if (r)
10716 		return r;
10717 
10718 	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
10719 	scsi_unblock_requests(shost);
10720 	mpt3sas_base_start_watchdog(ioc);
10721 	return 0;
10722 }
10723 #endif /* CONFIG_PM */
10724 
10725 /**
10726  * scsih_pci_error_detected - Called when a PCI error is detected.
10727  * @pdev: PCI device struct
10728  * @state: PCI channel state
10729  *
10730  * Description: Called when a PCI error is detected.
10731  *
10732  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
10733  */
10734 static pci_ers_result_t
10735 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10736 {
10737 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10738 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10739 
10740 	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
10741 
10742 	switch (state) {
10743 	case pci_channel_io_normal:
10744 		return PCI_ERS_RESULT_CAN_RECOVER;
10745 	case pci_channel_io_frozen:
10746 		/* Fatal error, prepare for slot reset */
10747 		ioc->pci_error_recovery = 1;
10748 		scsi_block_requests(ioc->shost);
10749 		mpt3sas_base_stop_watchdog(ioc);
10750 		mpt3sas_base_free_resources(ioc);
10751 		return PCI_ERS_RESULT_NEED_RESET;
10752 	case pci_channel_io_perm_failure:
10753 		/* Permanent error, prepare for device removal */
10754 		ioc->pci_error_recovery = 1;
10755 		mpt3sas_base_stop_watchdog(ioc);
10756 		_scsih_flush_running_cmds(ioc);
10757 		return PCI_ERS_RESULT_DISCONNECT;
10758 	}
10759 	return PCI_ERS_RESULT_NEED_RESET;
10760 }
10761 
10762 /**
10763  * scsih_pci_slot_reset - Called when PCI slot has been reset.
10764  * @pdev: PCI device struct
10765  *
10766  * Description: This routine is called by the pci error recovery
10767  * code after the PCI slot has been reset, just before we
10768  * should resume normal operations.
10769  */
10770 static pci_ers_result_t
10771 scsih_pci_slot_reset(struct pci_dev *pdev)
10772 {
10773 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10774 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10775 	int rc;
10776 
10777 	ioc_info(ioc, "PCI error: slot reset callback!!\n");
10778 
10779 	ioc->pci_error_recovery = 0;
10780 	ioc->pdev = pdev;
10781 	pci_restore_state(pdev);
10782 	rc = mpt3sas_base_map_resources(ioc);
10783 	if (rc)
10784 		return PCI_ERS_RESULT_DISCONNECT;
10785 
10786 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
10787 
10788 	ioc_warn(ioc, "hard reset: %s\n",
10789 		 (rc == 0) ? "success" : "failed");
10790 
10791 	if (!rc)
10792 		return PCI_ERS_RESULT_RECOVERED;
10793 	else
10794 		return PCI_ERS_RESULT_DISCONNECT;
10795 }
10796 
10797 /**
10798  * scsih_pci_resume() - resume normal ops after PCI reset
10799  * @pdev: pointer to PCI device
10800  *
10801  * Called when the error recovery driver tells us that its
10802  * OK to resume normal operation. Use completion to allow
10803  * halted scsi ops to resume.
10804  */
10805 static void
10806 scsih_pci_resume(struct pci_dev *pdev)
10807 {
10808 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10809 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10810 
10811 	ioc_info(ioc, "PCI error: resume callback!!\n");
10812 
10813 	mpt3sas_base_start_watchdog(ioc);
10814 	scsi_unblock_requests(ioc->shost);
10815 }
10816 
10817 /**
10818  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
10819  * @pdev: pointer to PCI device
10820  */
10821 static pci_ers_result_t
10822 scsih_pci_mmio_enabled(struct pci_dev *pdev)
10823 {
10824 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10825 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10826 
10827 	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
10828 
10829 	/* TODO - dump whatever for debugging purposes */
10830 
10831 	/* This called only if scsih_pci_error_detected returns
10832 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
10833 	 * works, no need to reset slot.
10834 	 */
10835 	return PCI_ERS_RESULT_RECOVERED;
10836 }
10837 
10838 /**
10839  * scsih__ncq_prio_supp - Check for NCQ command priority support
10840  * @sdev: scsi device struct
10841  *
10842  * This is called when a user indicates they would like to enable
10843  * ncq command priorities. This works only on SATA devices.
10844  */
10845 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
10846 {
10847 	unsigned char *buf;
10848 	bool ncq_prio_supp = false;
10849 
10850 	if (!scsi_device_supports_vpd(sdev))
10851 		return ncq_prio_supp;
10852 
10853 	buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
10854 	if (!buf)
10855 		return ncq_prio_supp;
10856 
10857 	if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
10858 		ncq_prio_supp = (buf[213] >> 4) & 1;
10859 
10860 	kfree(buf);
10861 	return ncq_prio_supp;
10862 }
10863 /*
10864  * The pci device ids are defined in mpi/mpi2_cnfg.h.
10865  */
10866 static const struct pci_device_id mpt3sas_pci_table[] = {
10867 	/* Spitfire ~ 2004 */
10868 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
10869 		PCI_ANY_ID, PCI_ANY_ID },
10870 	/* Falcon ~ 2008 */
10871 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
10872 		PCI_ANY_ID, PCI_ANY_ID },
10873 	/* Liberator ~ 2108 */
10874 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
10875 		PCI_ANY_ID, PCI_ANY_ID },
10876 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
10877 		PCI_ANY_ID, PCI_ANY_ID },
10878 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
10879 		PCI_ANY_ID, PCI_ANY_ID },
10880 	/* Meteor ~ 2116 */
10881 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
10882 		PCI_ANY_ID, PCI_ANY_ID },
10883 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
10884 		PCI_ANY_ID, PCI_ANY_ID },
10885 	/* Thunderbolt ~ 2208 */
10886 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
10887 		PCI_ANY_ID, PCI_ANY_ID },
10888 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
10889 		PCI_ANY_ID, PCI_ANY_ID },
10890 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
10891 		PCI_ANY_ID, PCI_ANY_ID },
10892 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
10893 		PCI_ANY_ID, PCI_ANY_ID },
10894 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
10895 		PCI_ANY_ID, PCI_ANY_ID },
10896 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
10897 		PCI_ANY_ID, PCI_ANY_ID },
10898 	/* Mustang ~ 2308 */
10899 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
10900 		PCI_ANY_ID, PCI_ANY_ID },
10901 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
10902 		PCI_ANY_ID, PCI_ANY_ID },
10903 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
10904 		PCI_ANY_ID, PCI_ANY_ID },
10905 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
10906 		PCI_ANY_ID, PCI_ANY_ID },
10907 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
10908 		PCI_ANY_ID, PCI_ANY_ID },
10909 	/* SSS6200 */
10910 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
10911 		PCI_ANY_ID, PCI_ANY_ID },
10912 	/* Fury ~ 3004 and 3008 */
10913 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
10914 		PCI_ANY_ID, PCI_ANY_ID },
10915 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
10916 		PCI_ANY_ID, PCI_ANY_ID },
10917 	/* Invader ~ 3108 */
10918 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
10919 		PCI_ANY_ID, PCI_ANY_ID },
10920 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
10921 		PCI_ANY_ID, PCI_ANY_ID },
10922 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
10923 		PCI_ANY_ID, PCI_ANY_ID },
10924 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
10925 		PCI_ANY_ID, PCI_ANY_ID },
10926 	/* Cutlass ~ 3216 and 3224 */
10927 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
10928 		PCI_ANY_ID, PCI_ANY_ID },
10929 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
10930 		PCI_ANY_ID, PCI_ANY_ID },
10931 	/* Intruder ~ 3316 and 3324 */
10932 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
10933 		PCI_ANY_ID, PCI_ANY_ID },
10934 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
10935 		PCI_ANY_ID, PCI_ANY_ID },
10936 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
10937 		PCI_ANY_ID, PCI_ANY_ID },
10938 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
10939 		PCI_ANY_ID, PCI_ANY_ID },
10940 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
10941 		PCI_ANY_ID, PCI_ANY_ID },
10942 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
10943 		PCI_ANY_ID, PCI_ANY_ID },
10944 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
10945 		PCI_ANY_ID, PCI_ANY_ID },
10946 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
10947 		PCI_ANY_ID, PCI_ANY_ID },
10948 	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
10949 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
10950 		PCI_ANY_ID, PCI_ANY_ID },
10951 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
10952 		PCI_ANY_ID, PCI_ANY_ID },
10953 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
10954 		PCI_ANY_ID, PCI_ANY_ID },
10955 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
10956 		PCI_ANY_ID, PCI_ANY_ID },
10957 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
10958 		PCI_ANY_ID, PCI_ANY_ID },
10959 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
10960 		PCI_ANY_ID, PCI_ANY_ID },
10961 	/* Mercator ~ 3616*/
10962 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
10963 		PCI_ANY_ID, PCI_ANY_ID },
10964 
10965 	/* Aero SI 0x00E1 Configurable Secure
10966 	 * 0x00E2 Hard Secure
10967 	 */
10968 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
10969 		PCI_ANY_ID, PCI_ANY_ID },
10970 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
10971 		PCI_ANY_ID, PCI_ANY_ID },
10972 
10973 	/* Atlas PCIe Switch Management Port */
10974 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
10975 		PCI_ANY_ID, PCI_ANY_ID },
10976 
10977 	/* Sea SI 0x00E5 Configurable Secure
10978 	 * 0x00E6 Hard Secure
10979 	 */
10980 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
10981 		PCI_ANY_ID, PCI_ANY_ID },
10982 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
10983 		PCI_ANY_ID, PCI_ANY_ID },
10984 
10985 	{0}     /* Terminating entry */
10986 };
10987 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
10988 
10989 static struct pci_error_handlers _mpt3sas_err_handler = {
10990 	.error_detected	= scsih_pci_error_detected,
10991 	.mmio_enabled	= scsih_pci_mmio_enabled,
10992 	.slot_reset	= scsih_pci_slot_reset,
10993 	.resume		= scsih_pci_resume,
10994 };
10995 
10996 static struct pci_driver mpt3sas_driver = {
10997 	.name		= MPT3SAS_DRIVER_NAME,
10998 	.id_table	= mpt3sas_pci_table,
10999 	.probe		= _scsih_probe,
11000 	.remove		= scsih_remove,
11001 	.shutdown	= scsih_shutdown,
11002 	.err_handler	= &_mpt3sas_err_handler,
11003 #ifdef CONFIG_PM
11004 	.suspend	= scsih_suspend,
11005 	.resume		= scsih_resume,
11006 #endif
11007 };
11008 
11009 /**
11010  * scsih_init - main entry point for this driver.
11011  *
11012  * Return: 0 success, anything else error.
11013  */
11014 static int
11015 scsih_init(void)
11016 {
11017 	mpt2_ids = 0;
11018 	mpt3_ids = 0;
11019 
11020 	mpt3sas_base_initialize_callback_handler();
11021 
11022 	 /* queuecommand callback hander */
11023 	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
11024 
11025 	/* task management callback handler */
11026 	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
11027 
11028 	/* base internal commands callback handler */
11029 	base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
11030 	port_enable_cb_idx = mpt3sas_base_register_callback_handler(
11031 	    mpt3sas_port_enable_done);
11032 
11033 	/* transport internal commands callback handler */
11034 	transport_cb_idx = mpt3sas_base_register_callback_handler(
11035 	    mpt3sas_transport_done);
11036 
11037 	/* scsih internal commands callback handler */
11038 	scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
11039 
11040 	/* configuration page API internal commands callback handler */
11041 	config_cb_idx = mpt3sas_base_register_callback_handler(
11042 	    mpt3sas_config_done);
11043 
11044 	/* ctl module callback handler */
11045 	ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
11046 
11047 	tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
11048 	    _scsih_tm_tr_complete);
11049 
11050 	tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
11051 	    _scsih_tm_volume_tr_complete);
11052 
11053 	tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
11054 	    _scsih_sas_control_complete);
11055 
11056 	return 0;
11057 }
11058 
11059 /**
11060  * scsih_exit - exit point for this driver (when it is a module).
11061  *
11062  * Return: 0 success, anything else error.
11063  */
11064 static void
11065 scsih_exit(void)
11066 {
11067 
11068 	mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
11069 	mpt3sas_base_release_callback_handler(tm_cb_idx);
11070 	mpt3sas_base_release_callback_handler(base_cb_idx);
11071 	mpt3sas_base_release_callback_handler(port_enable_cb_idx);
11072 	mpt3sas_base_release_callback_handler(transport_cb_idx);
11073 	mpt3sas_base_release_callback_handler(scsih_cb_idx);
11074 	mpt3sas_base_release_callback_handler(config_cb_idx);
11075 	mpt3sas_base_release_callback_handler(ctl_cb_idx);
11076 
11077 	mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
11078 	mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
11079 	mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
11080 
11081 /* raid transport support */
11082 	if (hbas_to_enumerate != 1)
11083 		raid_class_release(mpt3sas_raid_template);
11084 	if (hbas_to_enumerate != 2)
11085 		raid_class_release(mpt2sas_raid_template);
11086 	sas_release_transport(mpt3sas_transport_template);
11087 }
11088 
11089 /**
11090  * _mpt3sas_init - main entry point for this driver.
11091  *
11092  * Return: 0 success, anything else error.
11093  */
11094 static int __init
11095 _mpt3sas_init(void)
11096 {
11097 	int error;
11098 
11099 	pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
11100 					MPT3SAS_DRIVER_VERSION);
11101 
11102 	mpt3sas_transport_template =
11103 	    sas_attach_transport(&mpt3sas_transport_functions);
11104 	if (!mpt3sas_transport_template)
11105 		return -ENODEV;
11106 
11107 	/* No need attach mpt3sas raid functions template
11108 	 * if hbas_to_enumarate value is one.
11109 	 */
11110 	if (hbas_to_enumerate != 1) {
11111 		mpt3sas_raid_template =
11112 				raid_class_attach(&mpt3sas_raid_functions);
11113 		if (!mpt3sas_raid_template) {
11114 			sas_release_transport(mpt3sas_transport_template);
11115 			return -ENODEV;
11116 		}
11117 	}
11118 
11119 	/* No need to attach mpt2sas raid functions template
11120 	 * if hbas_to_enumarate value is two
11121 	 */
11122 	if (hbas_to_enumerate != 2) {
11123 		mpt2sas_raid_template =
11124 				raid_class_attach(&mpt2sas_raid_functions);
11125 		if (!mpt2sas_raid_template) {
11126 			sas_release_transport(mpt3sas_transport_template);
11127 			return -ENODEV;
11128 		}
11129 	}
11130 
11131 	error = scsih_init();
11132 	if (error) {
11133 		scsih_exit();
11134 		return error;
11135 	}
11136 
11137 	mpt3sas_ctl_init(hbas_to_enumerate);
11138 
11139 	error = pci_register_driver(&mpt3sas_driver);
11140 	if (error)
11141 		scsih_exit();
11142 
11143 	return error;
11144 }
11145 
11146 /**
11147  * _mpt3sas_exit - exit point for this driver (when it is a module).
11148  *
11149  */
11150 static void __exit
11151 _mpt3sas_exit(void)
11152 {
11153 	pr_info("mpt3sas version %s unloading\n",
11154 				MPT3SAS_DRIVER_VERSION);
11155 
11156 	mpt3sas_ctl_exit(hbas_to_enumerate);
11157 
11158 	pci_unregister_driver(&mpt3sas_driver);
11159 
11160 	scsih_exit();
11161 }
11162 
11163 module_init(_mpt3sas_init);
11164 module_exit(_mpt3sas_exit);
11165