1 /*
2  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3  *
4  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5  * Copyright (C) 2012-2014  LSI Corporation
6  * Copyright (C) 2013-2014 Avago Technologies
7  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/aer.h>
56 #include <linux/raid_class.h>
57 #include <asm/unaligned.h>
58 
59 #include "mpt3sas_base.h"
60 
61 #define RAID_CHANNEL 1
62 
63 #define PCIE_CHANNEL 2
64 
65 /* forward proto's */
66 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
67 	struct _sas_node *sas_expander);
68 static void _firmware_event_work(struct work_struct *work);
69 
70 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
71 	struct _sas_device *sas_device);
72 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
73 	u8 retry_count, u8 is_pd);
74 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
76 	struct _pcie_device *pcie_device);
77 static void
78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
80 
81 /* global parameters */
82 LIST_HEAD(mpt3sas_ioc_list);
83 /* global ioc lock for list operations */
84 DEFINE_SPINLOCK(gioc_lock);
85 
86 MODULE_AUTHOR(MPT3SAS_AUTHOR);
87 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
90 MODULE_ALIAS("mpt2sas");
91 
92 /* local parameters */
93 static u8 scsi_io_cb_idx = -1;
94 static u8 tm_cb_idx = -1;
95 static u8 ctl_cb_idx = -1;
96 static u8 base_cb_idx = -1;
97 static u8 port_enable_cb_idx = -1;
98 static u8 transport_cb_idx = -1;
99 static u8 scsih_cb_idx = -1;
100 static u8 config_cb_idx = -1;
101 static int mpt2_ids;
102 static int mpt3_ids;
103 
104 static u8 tm_tr_cb_idx = -1 ;
105 static u8 tm_tr_volume_cb_idx = -1 ;
106 static u8 tm_sas_control_cb_idx = -1;
107 
108 /* command line options */
109 static u32 logging_level;
110 MODULE_PARM_DESC(logging_level,
111 	" bits for enabling additional logging info (default=0)");
112 
113 
114 static ushort max_sectors = 0xFFFF;
115 module_param(max_sectors, ushort, 0444);
116 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
117 
118 
119 static int missing_delay[2] = {-1, -1};
120 module_param_array(missing_delay, int, NULL, 0444);
121 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
122 
123 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
124 #define MPT3SAS_MAX_LUN (16895)
125 static u64 max_lun = MPT3SAS_MAX_LUN;
126 module_param(max_lun, ullong, 0444);
127 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
128 
129 static ushort hbas_to_enumerate;
130 module_param(hbas_to_enumerate, ushort, 0444);
131 MODULE_PARM_DESC(hbas_to_enumerate,
132 		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
133 		  1 - enumerates only SAS 2.0 generation HBAs\n \
134 		  2 - enumerates only SAS 3.0 generation HBAs (default=0)");
135 
136 /* diag_buffer_enable is bitwise
137  * bit 0 set = TRACE
138  * bit 1 set = SNAPSHOT
139  * bit 2 set = EXTENDED
140  *
141  * Either bit can be set, or both
142  */
143 static int diag_buffer_enable = -1;
144 module_param(diag_buffer_enable, int, 0444);
145 MODULE_PARM_DESC(diag_buffer_enable,
146 	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
147 static int disable_discovery = -1;
148 module_param(disable_discovery, int, 0444);
149 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
150 
151 
152 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
153 static int prot_mask = -1;
154 module_param(prot_mask, int, 0444);
155 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
156 
157 static bool enable_sdev_max_qd;
158 module_param(enable_sdev_max_qd, bool, 0444);
159 MODULE_PARM_DESC(enable_sdev_max_qd,
160 	"Enable sdev max qd as can_queue, def=disabled(0)");
161 
162 /* raid transport support */
163 static struct raid_template *mpt3sas_raid_template;
164 static struct raid_template *mpt2sas_raid_template;
165 
166 
167 /**
168  * struct sense_info - common structure for obtaining sense keys
169  * @skey: sense key
170  * @asc: additional sense code
171  * @ascq: additional sense code qualifier
172  */
173 struct sense_info {
174 	u8 skey;
175 	u8 asc;
176 	u8 ascq;
177 };
178 
179 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
180 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
181 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
182 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
183 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
184 /**
185  * struct fw_event_work - firmware event struct
186  * @list: link list framework
187  * @work: work object (ioc->fault_reset_work_q)
188  * @ioc: per adapter object
189  * @device_handle: device handle
190  * @VF_ID: virtual function id
191  * @VP_ID: virtual port id
192  * @ignore: flag meaning this event has been marked to ignore
193  * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
194  * @refcount: kref for this event
195  * @event_data: reply event data payload follows
196  *
197  * This object stored on ioc->fw_event_list.
198  */
199 struct fw_event_work {
200 	struct list_head	list;
201 	struct work_struct	work;
202 
203 	struct MPT3SAS_ADAPTER *ioc;
204 	u16			device_handle;
205 	u8			VF_ID;
206 	u8			VP_ID;
207 	u8			ignore;
208 	u16			event;
209 	struct kref		refcount;
210 	char			event_data[0] __aligned(4);
211 };
212 
213 static void fw_event_work_free(struct kref *r)
214 {
215 	kfree(container_of(r, struct fw_event_work, refcount));
216 }
217 
218 static void fw_event_work_get(struct fw_event_work *fw_work)
219 {
220 	kref_get(&fw_work->refcount);
221 }
222 
223 static void fw_event_work_put(struct fw_event_work *fw_work)
224 {
225 	kref_put(&fw_work->refcount, fw_event_work_free);
226 }
227 
228 static struct fw_event_work *alloc_fw_event_work(int len)
229 {
230 	struct fw_event_work *fw_event;
231 
232 	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
233 	if (!fw_event)
234 		return NULL;
235 
236 	kref_init(&fw_event->refcount);
237 	return fw_event;
238 }
239 
240 /**
241  * struct _scsi_io_transfer - scsi io transfer
242  * @handle: sas device handle (assigned by firmware)
243  * @is_raid: flag set for hidden raid components
244  * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
245  * @data_length: data transfer length
246  * @data_dma: dma pointer to data
247  * @sense: sense data
248  * @lun: lun number
249  * @cdb_length: cdb length
250  * @cdb: cdb contents
251  * @timeout: timeout for this command
252  * @VF_ID: virtual function id
253  * @VP_ID: virtual port id
254  * @valid_reply: flag set for reply message
255  * @sense_length: sense length
256  * @ioc_status: ioc status
257  * @scsi_state: scsi state
258  * @scsi_status: scsi staus
259  * @log_info: log information
260  * @transfer_length: data length transfer when there is a reply message
261  *
262  * Used for sending internal scsi commands to devices within this module.
263  * Refer to _scsi_send_scsi_io().
264  */
265 struct _scsi_io_transfer {
266 	u16	handle;
267 	u8	is_raid;
268 	enum dma_data_direction dir;
269 	u32	data_length;
270 	dma_addr_t data_dma;
271 	u8	sense[SCSI_SENSE_BUFFERSIZE];
272 	u32	lun;
273 	u8	cdb_length;
274 	u8	cdb[32];
275 	u8	timeout;
276 	u8	VF_ID;
277 	u8	VP_ID;
278 	u8	valid_reply;
279   /* the following bits are only valid when 'valid_reply = 1' */
280 	u32	sense_length;
281 	u16	ioc_status;
282 	u8	scsi_state;
283 	u8	scsi_status;
284 	u32	log_info;
285 	u32	transfer_length;
286 };
287 
288 /**
289  * _scsih_set_debug_level - global setting of ioc->logging_level.
290  * @val: ?
291  * @kp: ?
292  *
293  * Note: The logging levels are defined in mpt3sas_debug.h.
294  */
295 static int
296 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
297 {
298 	int ret = param_set_int(val, kp);
299 	struct MPT3SAS_ADAPTER *ioc;
300 
301 	if (ret)
302 		return ret;
303 
304 	pr_info("setting logging_level(0x%08x)\n", logging_level);
305 	spin_lock(&gioc_lock);
306 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
307 		ioc->logging_level = logging_level;
308 	spin_unlock(&gioc_lock);
309 	return 0;
310 }
311 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
312 	&logging_level, 0644);
313 
314 /**
315  * _scsih_srch_boot_sas_address - search based on sas_address
316  * @sas_address: sas address
317  * @boot_device: boot device object from bios page 2
318  *
319  * Return: 1 when there's a match, 0 means no match.
320  */
321 static inline int
322 _scsih_srch_boot_sas_address(u64 sas_address,
323 	Mpi2BootDeviceSasWwid_t *boot_device)
324 {
325 	return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
326 }
327 
328 /**
329  * _scsih_srch_boot_device_name - search based on device name
330  * @device_name: device name specified in INDENTIFY fram
331  * @boot_device: boot device object from bios page 2
332  *
333  * Return: 1 when there's a match, 0 means no match.
334  */
335 static inline int
336 _scsih_srch_boot_device_name(u64 device_name,
337 	Mpi2BootDeviceDeviceName_t *boot_device)
338 {
339 	return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
340 }
341 
342 /**
343  * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
344  * @enclosure_logical_id: enclosure logical id
345  * @slot_number: slot number
346  * @boot_device: boot device object from bios page 2
347  *
348  * Return: 1 when there's a match, 0 means no match.
349  */
350 static inline int
351 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
352 	Mpi2BootDeviceEnclosureSlot_t *boot_device)
353 {
354 	return (enclosure_logical_id == le64_to_cpu(boot_device->
355 	    EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
356 	    SlotNumber)) ? 1 : 0;
357 }
358 
359 /**
360  * _scsih_is_boot_device - search for matching boot device.
361  * @sas_address: sas address
362  * @device_name: device name specified in INDENTIFY fram
363  * @enclosure_logical_id: enclosure logical id
364  * @slot: slot number
365  * @form: specifies boot device form
366  * @boot_device: boot device object from bios page 2
367  *
368  * Return: 1 when there's a match, 0 means no match.
369  */
370 static int
371 _scsih_is_boot_device(u64 sas_address, u64 device_name,
372 	u64 enclosure_logical_id, u16 slot, u8 form,
373 	Mpi2BiosPage2BootDevice_t *boot_device)
374 {
375 	int rc = 0;
376 
377 	switch (form) {
378 	case MPI2_BIOSPAGE2_FORM_SAS_WWID:
379 		if (!sas_address)
380 			break;
381 		rc = _scsih_srch_boot_sas_address(
382 		    sas_address, &boot_device->SasWwid);
383 		break;
384 	case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
385 		if (!enclosure_logical_id)
386 			break;
387 		rc = _scsih_srch_boot_encl_slot(
388 		    enclosure_logical_id,
389 		    slot, &boot_device->EnclosureSlot);
390 		break;
391 	case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
392 		if (!device_name)
393 			break;
394 		rc = _scsih_srch_boot_device_name(
395 		    device_name, &boot_device->DeviceName);
396 		break;
397 	case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
398 		break;
399 	}
400 
401 	return rc;
402 }
403 
404 /**
405  * _scsih_get_sas_address - set the sas_address for given device handle
406  * @ioc: ?
407  * @handle: device handle
408  * @sas_address: sas address
409  *
410  * Return: 0 success, non-zero when failure
411  */
412 static int
413 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
414 	u64 *sas_address)
415 {
416 	Mpi2SasDevicePage0_t sas_device_pg0;
417 	Mpi2ConfigReply_t mpi_reply;
418 	u32 ioc_status;
419 
420 	*sas_address = 0;
421 
422 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
423 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
424 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
425 			__FILE__, __LINE__, __func__);
426 		return -ENXIO;
427 	}
428 
429 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
430 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
431 		/* For HBA, vSES doesn't return HBA SAS address. Instead return
432 		 * vSES's sas address.
433 		 */
434 		if ((handle <= ioc->sas_hba.num_phys) &&
435 		   (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
436 		   MPI2_SAS_DEVICE_INFO_SEP)))
437 			*sas_address = ioc->sas_hba.sas_address;
438 		else
439 			*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
440 		return 0;
441 	}
442 
443 	/* we hit this because the given parent handle doesn't exist */
444 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
445 		return -ENXIO;
446 
447 	/* else error case */
448 	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
449 		handle, ioc_status, __FILE__, __LINE__, __func__);
450 	return -EIO;
451 }
452 
453 /**
454  * _scsih_determine_boot_device - determine boot device.
455  * @ioc: per adapter object
456  * @device: sas_device or pcie_device object
457  * @channel: SAS or PCIe channel
458  *
459  * Determines whether this device should be first reported device to
460  * to scsi-ml or sas transport, this purpose is for persistent boot device.
461  * There are primary, alternate, and current entries in bios page 2. The order
462  * priority is primary, alternate, then current.  This routine saves
463  * the corresponding device object.
464  * The saved data to be used later in _scsih_probe_boot_devices().
465  */
466 static void
467 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
468 	u32 channel)
469 {
470 	struct _sas_device *sas_device;
471 	struct _pcie_device *pcie_device;
472 	struct _raid_device *raid_device;
473 	u64 sas_address;
474 	u64 device_name;
475 	u64 enclosure_logical_id;
476 	u16 slot;
477 
478 	 /* only process this function when driver loads */
479 	if (!ioc->is_driver_loading)
480 		return;
481 
482 	 /* no Bios, return immediately */
483 	if (!ioc->bios_pg3.BiosVersion)
484 		return;
485 
486 	if (channel == RAID_CHANNEL) {
487 		raid_device = device;
488 		sas_address = raid_device->wwid;
489 		device_name = 0;
490 		enclosure_logical_id = 0;
491 		slot = 0;
492 	} else if (channel == PCIE_CHANNEL) {
493 		pcie_device = device;
494 		sas_address = pcie_device->wwid;
495 		device_name = 0;
496 		enclosure_logical_id = 0;
497 		slot = 0;
498 	} else {
499 		sas_device = device;
500 		sas_address = sas_device->sas_address;
501 		device_name = sas_device->device_name;
502 		enclosure_logical_id = sas_device->enclosure_logical_id;
503 		slot = sas_device->slot;
504 	}
505 
506 	if (!ioc->req_boot_device.device) {
507 		if (_scsih_is_boot_device(sas_address, device_name,
508 		    enclosure_logical_id, slot,
509 		    (ioc->bios_pg2.ReqBootDeviceForm &
510 		    MPI2_BIOSPAGE2_FORM_MASK),
511 		    &ioc->bios_pg2.RequestedBootDevice)) {
512 			dinitprintk(ioc,
513 				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
514 					     __func__, (u64)sas_address));
515 			ioc->req_boot_device.device = device;
516 			ioc->req_boot_device.channel = channel;
517 		}
518 	}
519 
520 	if (!ioc->req_alt_boot_device.device) {
521 		if (_scsih_is_boot_device(sas_address, device_name,
522 		    enclosure_logical_id, slot,
523 		    (ioc->bios_pg2.ReqAltBootDeviceForm &
524 		    MPI2_BIOSPAGE2_FORM_MASK),
525 		    &ioc->bios_pg2.RequestedAltBootDevice)) {
526 			dinitprintk(ioc,
527 				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
528 					     __func__, (u64)sas_address));
529 			ioc->req_alt_boot_device.device = device;
530 			ioc->req_alt_boot_device.channel = channel;
531 		}
532 	}
533 
534 	if (!ioc->current_boot_device.device) {
535 		if (_scsih_is_boot_device(sas_address, device_name,
536 		    enclosure_logical_id, slot,
537 		    (ioc->bios_pg2.CurrentBootDeviceForm &
538 		    MPI2_BIOSPAGE2_FORM_MASK),
539 		    &ioc->bios_pg2.CurrentBootDevice)) {
540 			dinitprintk(ioc,
541 				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
542 					     __func__, (u64)sas_address));
543 			ioc->current_boot_device.device = device;
544 			ioc->current_boot_device.channel = channel;
545 		}
546 	}
547 }
548 
549 static struct _sas_device *
550 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
551 		struct MPT3SAS_TARGET *tgt_priv)
552 {
553 	struct _sas_device *ret;
554 
555 	assert_spin_locked(&ioc->sas_device_lock);
556 
557 	ret = tgt_priv->sas_dev;
558 	if (ret)
559 		sas_device_get(ret);
560 
561 	return ret;
562 }
563 
564 static struct _sas_device *
565 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
566 		struct MPT3SAS_TARGET *tgt_priv)
567 {
568 	struct _sas_device *ret;
569 	unsigned long flags;
570 
571 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
572 	ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
573 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
574 
575 	return ret;
576 }
577 
578 static struct _pcie_device *
579 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
580 	struct MPT3SAS_TARGET *tgt_priv)
581 {
582 	struct _pcie_device *ret;
583 
584 	assert_spin_locked(&ioc->pcie_device_lock);
585 
586 	ret = tgt_priv->pcie_dev;
587 	if (ret)
588 		pcie_device_get(ret);
589 
590 	return ret;
591 }
592 
593 /**
594  * mpt3sas_get_pdev_from_target - pcie device search
595  * @ioc: per adapter object
596  * @tgt_priv: starget private object
597  *
598  * Context: This function will acquire ioc->pcie_device_lock and will release
599  * before returning the pcie_device object.
600  *
601  * This searches for pcie_device from target, then return pcie_device object.
602  */
603 static struct _pcie_device *
604 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
605 	struct MPT3SAS_TARGET *tgt_priv)
606 {
607 	struct _pcie_device *ret;
608 	unsigned long flags;
609 
610 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
611 	ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
612 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
613 
614 	return ret;
615 }
616 
617 struct _sas_device *
618 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
619 					u64 sas_address)
620 {
621 	struct _sas_device *sas_device;
622 
623 	assert_spin_locked(&ioc->sas_device_lock);
624 
625 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
626 		if (sas_device->sas_address == sas_address)
627 			goto found_device;
628 
629 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
630 		if (sas_device->sas_address == sas_address)
631 			goto found_device;
632 
633 	return NULL;
634 
635 found_device:
636 	sas_device_get(sas_device);
637 	return sas_device;
638 }
639 
640 /**
641  * mpt3sas_get_sdev_by_addr - sas device search
642  * @ioc: per adapter object
643  * @sas_address: sas address
644  * Context: Calling function should acquire ioc->sas_device_lock
645  *
646  * This searches for sas_device based on sas_address, then return sas_device
647  * object.
648  */
649 struct _sas_device *
650 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
651 	u64 sas_address)
652 {
653 	struct _sas_device *sas_device;
654 	unsigned long flags;
655 
656 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
657 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
658 			sas_address);
659 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
660 
661 	return sas_device;
662 }
663 
664 static struct _sas_device *
665 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
666 {
667 	struct _sas_device *sas_device;
668 
669 	assert_spin_locked(&ioc->sas_device_lock);
670 
671 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
672 		if (sas_device->handle == handle)
673 			goto found_device;
674 
675 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
676 		if (sas_device->handle == handle)
677 			goto found_device;
678 
679 	return NULL;
680 
681 found_device:
682 	sas_device_get(sas_device);
683 	return sas_device;
684 }
685 
686 /**
687  * mpt3sas_get_sdev_by_handle - sas device search
688  * @ioc: per adapter object
689  * @handle: sas device handle (assigned by firmware)
690  * Context: Calling function should acquire ioc->sas_device_lock
691  *
692  * This searches for sas_device based on sas_address, then return sas_device
693  * object.
694  */
695 struct _sas_device *
696 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
697 {
698 	struct _sas_device *sas_device;
699 	unsigned long flags;
700 
701 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
702 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
703 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
704 
705 	return sas_device;
706 }
707 
708 /**
709  * _scsih_display_enclosure_chassis_info - display device location info
710  * @ioc: per adapter object
711  * @sas_device: per sas device object
712  * @sdev: scsi device struct
713  * @starget: scsi target struct
714  */
715 static void
716 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
717 	struct _sas_device *sas_device, struct scsi_device *sdev,
718 	struct scsi_target *starget)
719 {
720 	if (sdev) {
721 		if (sas_device->enclosure_handle != 0)
722 			sdev_printk(KERN_INFO, sdev,
723 			    "enclosure logical id (0x%016llx), slot(%d) \n",
724 			    (unsigned long long)
725 			    sas_device->enclosure_logical_id,
726 			    sas_device->slot);
727 		if (sas_device->connector_name[0] != '\0')
728 			sdev_printk(KERN_INFO, sdev,
729 			    "enclosure level(0x%04x), connector name( %s)\n",
730 			    sas_device->enclosure_level,
731 			    sas_device->connector_name);
732 		if (sas_device->is_chassis_slot_valid)
733 			sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
734 			    sas_device->chassis_slot);
735 	} else if (starget) {
736 		if (sas_device->enclosure_handle != 0)
737 			starget_printk(KERN_INFO, starget,
738 			    "enclosure logical id(0x%016llx), slot(%d) \n",
739 			    (unsigned long long)
740 			    sas_device->enclosure_logical_id,
741 			    sas_device->slot);
742 		if (sas_device->connector_name[0] != '\0')
743 			starget_printk(KERN_INFO, starget,
744 			    "enclosure level(0x%04x), connector name( %s)\n",
745 			    sas_device->enclosure_level,
746 			    sas_device->connector_name);
747 		if (sas_device->is_chassis_slot_valid)
748 			starget_printk(KERN_INFO, starget,
749 			    "chassis slot(0x%04x)\n",
750 			    sas_device->chassis_slot);
751 	} else {
752 		if (sas_device->enclosure_handle != 0)
753 			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
754 				 (u64)sas_device->enclosure_logical_id,
755 				 sas_device->slot);
756 		if (sas_device->connector_name[0] != '\0')
757 			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
758 				 sas_device->enclosure_level,
759 				 sas_device->connector_name);
760 		if (sas_device->is_chassis_slot_valid)
761 			ioc_info(ioc, "chassis slot(0x%04x)\n",
762 				 sas_device->chassis_slot);
763 	}
764 }
765 
766 /**
767  * _scsih_sas_device_remove - remove sas_device from list.
768  * @ioc: per adapter object
769  * @sas_device: the sas_device object
770  * Context: This function will acquire ioc->sas_device_lock.
771  *
772  * If sas_device is on the list, remove it and decrement its reference count.
773  */
774 static void
775 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
776 	struct _sas_device *sas_device)
777 {
778 	unsigned long flags;
779 
780 	if (!sas_device)
781 		return;
782 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
783 		 sas_device->handle, (u64)sas_device->sas_address);
784 
785 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
786 
787 	/*
788 	 * The lock serializes access to the list, but we still need to verify
789 	 * that nobody removed the entry while we were waiting on the lock.
790 	 */
791 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
792 	if (!list_empty(&sas_device->list)) {
793 		list_del_init(&sas_device->list);
794 		sas_device_put(sas_device);
795 	}
796 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
797 }
798 
799 /**
800  * _scsih_device_remove_by_handle - removing device object by handle
801  * @ioc: per adapter object
802  * @handle: device handle
803  */
804 static void
805 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
806 {
807 	struct _sas_device *sas_device;
808 	unsigned long flags;
809 
810 	if (ioc->shost_recovery)
811 		return;
812 
813 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
814 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
815 	if (sas_device) {
816 		list_del_init(&sas_device->list);
817 		sas_device_put(sas_device);
818 	}
819 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
820 	if (sas_device) {
821 		_scsih_remove_device(ioc, sas_device);
822 		sas_device_put(sas_device);
823 	}
824 }
825 
826 /**
827  * mpt3sas_device_remove_by_sas_address - removing device object by sas address
828  * @ioc: per adapter object
829  * @sas_address: device sas_address
830  */
831 void
832 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
833 	u64 sas_address)
834 {
835 	struct _sas_device *sas_device;
836 	unsigned long flags;
837 
838 	if (ioc->shost_recovery)
839 		return;
840 
841 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
842 	sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
843 	if (sas_device) {
844 		list_del_init(&sas_device->list);
845 		sas_device_put(sas_device);
846 	}
847 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
848 	if (sas_device) {
849 		_scsih_remove_device(ioc, sas_device);
850 		sas_device_put(sas_device);
851 	}
852 }
853 
854 /**
855  * _scsih_sas_device_add - insert sas_device to the list.
856  * @ioc: per adapter object
857  * @sas_device: the sas_device object
858  * Context: This function will acquire ioc->sas_device_lock.
859  *
860  * Adding new object to the ioc->sas_device_list.
861  */
862 static void
863 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
864 	struct _sas_device *sas_device)
865 {
866 	unsigned long flags;
867 
868 	dewtprintk(ioc,
869 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
870 			    __func__, sas_device->handle,
871 			    (u64)sas_device->sas_address));
872 
873 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
874 	    NULL, NULL));
875 
876 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
877 	sas_device_get(sas_device);
878 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
879 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
880 
881 	if (ioc->hide_drives) {
882 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
883 		return;
884 	}
885 
886 	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
887 	     sas_device->sas_address_parent)) {
888 		_scsih_sas_device_remove(ioc, sas_device);
889 	} else if (!sas_device->starget) {
890 		/*
891 		 * When asyn scanning is enabled, its not possible to remove
892 		 * devices while scanning is turned on due to an oops in
893 		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
894 		 */
895 		if (!ioc->is_driver_loading) {
896 			mpt3sas_transport_port_remove(ioc,
897 			    sas_device->sas_address,
898 			    sas_device->sas_address_parent);
899 			_scsih_sas_device_remove(ioc, sas_device);
900 		}
901 	} else
902 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
903 }
904 
905 /**
906  * _scsih_sas_device_init_add - insert sas_device to the list.
907  * @ioc: per adapter object
908  * @sas_device: the sas_device object
909  * Context: This function will acquire ioc->sas_device_lock.
910  *
911  * Adding new object at driver load time to the ioc->sas_device_init_list.
912  */
913 static void
914 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
915 	struct _sas_device *sas_device)
916 {
917 	unsigned long flags;
918 
919 	dewtprintk(ioc,
920 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
921 			    __func__, sas_device->handle,
922 			    (u64)sas_device->sas_address));
923 
924 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
925 	    NULL, NULL));
926 
927 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
928 	sas_device_get(sas_device);
929 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
930 	_scsih_determine_boot_device(ioc, sas_device, 0);
931 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
932 }
933 
934 
935 static struct _pcie_device *
936 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
937 {
938 	struct _pcie_device *pcie_device;
939 
940 	assert_spin_locked(&ioc->pcie_device_lock);
941 
942 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
943 		if (pcie_device->wwid == wwid)
944 			goto found_device;
945 
946 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
947 		if (pcie_device->wwid == wwid)
948 			goto found_device;
949 
950 	return NULL;
951 
952 found_device:
953 	pcie_device_get(pcie_device);
954 	return pcie_device;
955 }
956 
957 
958 /**
959  * mpt3sas_get_pdev_by_wwid - pcie device search
960  * @ioc: per adapter object
961  * @wwid: wwid
962  *
963  * Context: This function will acquire ioc->pcie_device_lock and will release
964  * before returning the pcie_device object.
965  *
966  * This searches for pcie_device based on wwid, then return pcie_device object.
967  */
968 static struct _pcie_device *
969 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
970 {
971 	struct _pcie_device *pcie_device;
972 	unsigned long flags;
973 
974 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
975 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
976 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
977 
978 	return pcie_device;
979 }
980 
981 
982 static struct _pcie_device *
983 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
984 	int channel)
985 {
986 	struct _pcie_device *pcie_device;
987 
988 	assert_spin_locked(&ioc->pcie_device_lock);
989 
990 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
991 		if (pcie_device->id == id && pcie_device->channel == channel)
992 			goto found_device;
993 
994 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
995 		if (pcie_device->id == id && pcie_device->channel == channel)
996 			goto found_device;
997 
998 	return NULL;
999 
1000 found_device:
1001 	pcie_device_get(pcie_device);
1002 	return pcie_device;
1003 }
1004 
1005 static struct _pcie_device *
1006 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1007 {
1008 	struct _pcie_device *pcie_device;
1009 
1010 	assert_spin_locked(&ioc->pcie_device_lock);
1011 
1012 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1013 		if (pcie_device->handle == handle)
1014 			goto found_device;
1015 
1016 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1017 		if (pcie_device->handle == handle)
1018 			goto found_device;
1019 
1020 	return NULL;
1021 
1022 found_device:
1023 	pcie_device_get(pcie_device);
1024 	return pcie_device;
1025 }
1026 
1027 
1028 /**
1029  * mpt3sas_get_pdev_by_handle - pcie device search
1030  * @ioc: per adapter object
1031  * @handle: Firmware device handle
1032  *
1033  * Context: This function will acquire ioc->pcie_device_lock and will release
1034  * before returning the pcie_device object.
1035  *
1036  * This searches for pcie_device based on handle, then return pcie_device
1037  * object.
1038  */
1039 struct _pcie_device *
1040 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1041 {
1042 	struct _pcie_device *pcie_device;
1043 	unsigned long flags;
1044 
1045 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1046 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1047 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1048 
1049 	return pcie_device;
1050 }
1051 
1052 /**
1053  * _scsih_pcie_device_remove - remove pcie_device from list.
1054  * @ioc: per adapter object
1055  * @pcie_device: the pcie_device object
1056  * Context: This function will acquire ioc->pcie_device_lock.
1057  *
1058  * If pcie_device is on the list, remove it and decrement its reference count.
1059  */
1060 static void
1061 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1062 	struct _pcie_device *pcie_device)
1063 {
1064 	unsigned long flags;
1065 	int was_on_pcie_device_list = 0;
1066 
1067 	if (!pcie_device)
1068 		return;
1069 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1070 		 pcie_device->handle, (u64)pcie_device->wwid);
1071 	if (pcie_device->enclosure_handle != 0)
1072 		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1073 			 (u64)pcie_device->enclosure_logical_id,
1074 			 pcie_device->slot);
1075 	if (pcie_device->connector_name[0] != '\0')
1076 		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1077 			 pcie_device->enclosure_level,
1078 			 pcie_device->connector_name);
1079 
1080 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1081 	if (!list_empty(&pcie_device->list)) {
1082 		list_del_init(&pcie_device->list);
1083 		was_on_pcie_device_list = 1;
1084 	}
1085 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1086 	if (was_on_pcie_device_list) {
1087 		kfree(pcie_device->serial_number);
1088 		pcie_device_put(pcie_device);
1089 	}
1090 }
1091 
1092 
1093 /**
1094  * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1095  * @ioc: per adapter object
1096  * @handle: device handle
1097  */
1098 static void
1099 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1100 {
1101 	struct _pcie_device *pcie_device;
1102 	unsigned long flags;
1103 	int was_on_pcie_device_list = 0;
1104 
1105 	if (ioc->shost_recovery)
1106 		return;
1107 
1108 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1109 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1110 	if (pcie_device) {
1111 		if (!list_empty(&pcie_device->list)) {
1112 			list_del_init(&pcie_device->list);
1113 			was_on_pcie_device_list = 1;
1114 			pcie_device_put(pcie_device);
1115 		}
1116 	}
1117 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1118 	if (was_on_pcie_device_list) {
1119 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1120 		pcie_device_put(pcie_device);
1121 	}
1122 }
1123 
1124 /**
1125  * _scsih_pcie_device_add - add pcie_device object
1126  * @ioc: per adapter object
1127  * @pcie_device: pcie_device object
1128  *
1129  * This is added to the pcie_device_list link list.
1130  */
1131 static void
1132 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1133 	struct _pcie_device *pcie_device)
1134 {
1135 	unsigned long flags;
1136 
1137 	dewtprintk(ioc,
1138 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1139 			    __func__,
1140 			    pcie_device->handle, (u64)pcie_device->wwid));
1141 	if (pcie_device->enclosure_handle != 0)
1142 		dewtprintk(ioc,
1143 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1144 				    __func__,
1145 				    (u64)pcie_device->enclosure_logical_id,
1146 				    pcie_device->slot));
1147 	if (pcie_device->connector_name[0] != '\0')
1148 		dewtprintk(ioc,
1149 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1150 				    __func__, pcie_device->enclosure_level,
1151 				    pcie_device->connector_name));
1152 
1153 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1154 	pcie_device_get(pcie_device);
1155 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1156 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1157 
1158 	if (pcie_device->access_status ==
1159 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1160 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1161 		return;
1162 	}
1163 	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1164 		_scsih_pcie_device_remove(ioc, pcie_device);
1165 	} else if (!pcie_device->starget) {
1166 		if (!ioc->is_driver_loading) {
1167 /*TODO-- Need to find out whether this condition will occur or not*/
1168 			clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1169 		}
1170 	} else
1171 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1172 }
1173 
1174 /*
1175  * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1176  * @ioc: per adapter object
1177  * @pcie_device: the pcie_device object
1178  * Context: This function will acquire ioc->pcie_device_lock.
1179  *
1180  * Adding new object at driver load time to the ioc->pcie_device_init_list.
1181  */
1182 static void
1183 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1184 				struct _pcie_device *pcie_device)
1185 {
1186 	unsigned long flags;
1187 
1188 	dewtprintk(ioc,
1189 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1190 			    __func__,
1191 			    pcie_device->handle, (u64)pcie_device->wwid));
1192 	if (pcie_device->enclosure_handle != 0)
1193 		dewtprintk(ioc,
1194 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1195 				    __func__,
1196 				    (u64)pcie_device->enclosure_logical_id,
1197 				    pcie_device->slot));
1198 	if (pcie_device->connector_name[0] != '\0')
1199 		dewtprintk(ioc,
1200 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1201 				    __func__, pcie_device->enclosure_level,
1202 				    pcie_device->connector_name));
1203 
1204 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1205 	pcie_device_get(pcie_device);
1206 	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1207 	if (pcie_device->access_status !=
1208 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1209 		_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1210 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1211 }
1212 /**
1213  * _scsih_raid_device_find_by_id - raid device search
1214  * @ioc: per adapter object
1215  * @id: sas device target id
1216  * @channel: sas device channel
1217  * Context: Calling function should acquire ioc->raid_device_lock
1218  *
1219  * This searches for raid_device based on target id, then return raid_device
1220  * object.
1221  */
1222 static struct _raid_device *
1223 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1224 {
1225 	struct _raid_device *raid_device, *r;
1226 
1227 	r = NULL;
1228 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1229 		if (raid_device->id == id && raid_device->channel == channel) {
1230 			r = raid_device;
1231 			goto out;
1232 		}
1233 	}
1234 
1235  out:
1236 	return r;
1237 }
1238 
1239 /**
1240  * mpt3sas_raid_device_find_by_handle - raid device search
1241  * @ioc: per adapter object
1242  * @handle: sas device handle (assigned by firmware)
1243  * Context: Calling function should acquire ioc->raid_device_lock
1244  *
1245  * This searches for raid_device based on handle, then return raid_device
1246  * object.
1247  */
1248 struct _raid_device *
1249 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1250 {
1251 	struct _raid_device *raid_device, *r;
1252 
1253 	r = NULL;
1254 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1255 		if (raid_device->handle != handle)
1256 			continue;
1257 		r = raid_device;
1258 		goto out;
1259 	}
1260 
1261  out:
1262 	return r;
1263 }
1264 
1265 /**
1266  * _scsih_raid_device_find_by_wwid - raid device search
1267  * @ioc: per adapter object
1268  * @wwid: ?
1269  * Context: Calling function should acquire ioc->raid_device_lock
1270  *
1271  * This searches for raid_device based on wwid, then return raid_device
1272  * object.
1273  */
1274 static struct _raid_device *
1275 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1276 {
1277 	struct _raid_device *raid_device, *r;
1278 
1279 	r = NULL;
1280 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1281 		if (raid_device->wwid != wwid)
1282 			continue;
1283 		r = raid_device;
1284 		goto out;
1285 	}
1286 
1287  out:
1288 	return r;
1289 }
1290 
1291 /**
1292  * _scsih_raid_device_add - add raid_device object
1293  * @ioc: per adapter object
1294  * @raid_device: raid_device object
1295  *
1296  * This is added to the raid_device_list link list.
1297  */
1298 static void
1299 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1300 	struct _raid_device *raid_device)
1301 {
1302 	unsigned long flags;
1303 
1304 	dewtprintk(ioc,
1305 		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1306 			    __func__,
1307 			    raid_device->handle, (u64)raid_device->wwid));
1308 
1309 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1310 	list_add_tail(&raid_device->list, &ioc->raid_device_list);
1311 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1312 }
1313 
1314 /**
1315  * _scsih_raid_device_remove - delete raid_device object
1316  * @ioc: per adapter object
1317  * @raid_device: raid_device object
1318  *
1319  */
1320 static void
1321 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1322 	struct _raid_device *raid_device)
1323 {
1324 	unsigned long flags;
1325 
1326 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1327 	list_del(&raid_device->list);
1328 	kfree(raid_device);
1329 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1330 }
1331 
1332 /**
1333  * mpt3sas_scsih_expander_find_by_handle - expander device search
1334  * @ioc: per adapter object
1335  * @handle: expander handle (assigned by firmware)
1336  * Context: Calling function should acquire ioc->sas_device_lock
1337  *
1338  * This searches for expander device based on handle, then returns the
1339  * sas_node object.
1340  */
1341 struct _sas_node *
1342 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1343 {
1344 	struct _sas_node *sas_expander, *r;
1345 
1346 	r = NULL;
1347 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1348 		if (sas_expander->handle != handle)
1349 			continue;
1350 		r = sas_expander;
1351 		goto out;
1352 	}
1353  out:
1354 	return r;
1355 }
1356 
1357 /**
1358  * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1359  * @ioc: per adapter object
1360  * @handle: enclosure handle (assigned by firmware)
1361  * Context: Calling function should acquire ioc->sas_device_lock
1362  *
1363  * This searches for enclosure device based on handle, then returns the
1364  * enclosure object.
1365  */
1366 static struct _enclosure_node *
1367 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1368 {
1369 	struct _enclosure_node *enclosure_dev, *r;
1370 
1371 	r = NULL;
1372 	list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1373 		if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1374 			continue;
1375 		r = enclosure_dev;
1376 		goto out;
1377 	}
1378 out:
1379 	return r;
1380 }
1381 /**
1382  * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1383  * @ioc: per adapter object
1384  * @sas_address: sas address
1385  * Context: Calling function should acquire ioc->sas_node_lock.
1386  *
1387  * This searches for expander device based on sas_address, then returns the
1388  * sas_node object.
1389  */
1390 struct _sas_node *
1391 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1392 	u64 sas_address)
1393 {
1394 	struct _sas_node *sas_expander, *r;
1395 
1396 	r = NULL;
1397 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1398 		if (sas_expander->sas_address != sas_address)
1399 			continue;
1400 		r = sas_expander;
1401 		goto out;
1402 	}
1403  out:
1404 	return r;
1405 }
1406 
1407 /**
1408  * _scsih_expander_node_add - insert expander device to the list.
1409  * @ioc: per adapter object
1410  * @sas_expander: the sas_device object
1411  * Context: This function will acquire ioc->sas_node_lock.
1412  *
1413  * Adding new object to the ioc->sas_expander_list.
1414  */
1415 static void
1416 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1417 	struct _sas_node *sas_expander)
1418 {
1419 	unsigned long flags;
1420 
1421 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
1422 	list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1423 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1424 }
1425 
1426 /**
1427  * _scsih_is_end_device - determines if device is an end device
1428  * @device_info: bitfield providing information about the device.
1429  * Context: none
1430  *
1431  * Return: 1 if end device.
1432  */
1433 static int
1434 _scsih_is_end_device(u32 device_info)
1435 {
1436 	if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1437 		((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1438 		(device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1439 		(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1440 		return 1;
1441 	else
1442 		return 0;
1443 }
1444 
1445 /**
1446  * _scsih_is_nvme_pciescsi_device - determines if
1447  *			device is an pcie nvme/scsi device
1448  * @device_info: bitfield providing information about the device.
1449  * Context: none
1450  *
1451  * Returns 1 if device is pcie device type nvme/scsi.
1452  */
1453 static int
1454 _scsih_is_nvme_pciescsi_device(u32 device_info)
1455 {
1456 	if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1457 	    == MPI26_PCIE_DEVINFO_NVME) ||
1458 	    ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1459 	    == MPI26_PCIE_DEVINFO_SCSI))
1460 		return 1;
1461 	else
1462 		return 0;
1463 }
1464 
1465 /**
1466  * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1467  * @ioc: per adapter object
1468  * @smid: system request message index
1469  *
1470  * Return: the smid stored scmd pointer.
1471  * Then will dereference the stored scmd pointer.
1472  */
1473 struct scsi_cmnd *
1474 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1475 {
1476 	struct scsi_cmnd *scmd = NULL;
1477 	struct scsiio_tracker *st;
1478 	Mpi25SCSIIORequest_t *mpi_request;
1479 
1480 	if (smid > 0  &&
1481 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1482 		u32 unique_tag = smid - 1;
1483 
1484 		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1485 
1486 		/*
1487 		 * If SCSI IO request is outstanding at driver level then
1488 		 * DevHandle filed must be non-zero. If DevHandle is zero
1489 		 * then it means that this smid is free at driver level,
1490 		 * so return NULL.
1491 		 */
1492 		if (!mpi_request->DevHandle)
1493 			return scmd;
1494 
1495 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1496 		if (scmd) {
1497 			st = scsi_cmd_priv(scmd);
1498 			if (st->cb_idx == 0xFF || st->smid == 0)
1499 				scmd = NULL;
1500 		}
1501 	}
1502 	return scmd;
1503 }
1504 
1505 /**
1506  * scsih_change_queue_depth - setting device queue depth
1507  * @sdev: scsi device struct
1508  * @qdepth: requested queue depth
1509  *
1510  * Return: queue depth.
1511  */
1512 static int
1513 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1514 {
1515 	struct Scsi_Host *shost = sdev->host;
1516 	int max_depth;
1517 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1518 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1519 	struct MPT3SAS_TARGET *sas_target_priv_data;
1520 	struct _sas_device *sas_device;
1521 	unsigned long flags;
1522 
1523 	max_depth = shost->can_queue;
1524 
1525 	/*
1526 	 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1527 	 * is disabled.
1528 	 */
1529 	if (ioc->enable_sdev_max_qd)
1530 		goto not_sata;
1531 
1532 	sas_device_priv_data = sdev->hostdata;
1533 	if (!sas_device_priv_data)
1534 		goto not_sata;
1535 	sas_target_priv_data = sas_device_priv_data->sas_target;
1536 	if (!sas_target_priv_data)
1537 		goto not_sata;
1538 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1539 		goto not_sata;
1540 
1541 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1542 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1543 	if (sas_device) {
1544 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1545 			max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1546 
1547 		sas_device_put(sas_device);
1548 	}
1549 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1550 
1551  not_sata:
1552 
1553 	if (!sdev->tagged_supported)
1554 		max_depth = 1;
1555 	if (qdepth > max_depth)
1556 		qdepth = max_depth;
1557 	return scsi_change_queue_depth(sdev, qdepth);
1558 }
1559 
1560 /**
1561  * mpt3sas_scsih_change_queue_depth - setting device queue depth
1562  * @sdev: scsi device struct
1563  * @qdepth: requested queue depth
1564  *
1565  * Returns nothing.
1566  */
1567 void
1568 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1569 {
1570 	struct Scsi_Host *shost = sdev->host;
1571 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1572 
1573 	if (ioc->enable_sdev_max_qd)
1574 		qdepth = shost->can_queue;
1575 
1576 	scsih_change_queue_depth(sdev, qdepth);
1577 }
1578 
1579 /**
1580  * scsih_target_alloc - target add routine
1581  * @starget: scsi target struct
1582  *
1583  * Return: 0 if ok. Any other return is assumed to be an error and
1584  * the device is ignored.
1585  */
1586 static int
1587 scsih_target_alloc(struct scsi_target *starget)
1588 {
1589 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1590 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1591 	struct MPT3SAS_TARGET *sas_target_priv_data;
1592 	struct _sas_device *sas_device;
1593 	struct _raid_device *raid_device;
1594 	struct _pcie_device *pcie_device;
1595 	unsigned long flags;
1596 	struct sas_rphy *rphy;
1597 
1598 	sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1599 				       GFP_KERNEL);
1600 	if (!sas_target_priv_data)
1601 		return -ENOMEM;
1602 
1603 	starget->hostdata = sas_target_priv_data;
1604 	sas_target_priv_data->starget = starget;
1605 	sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1606 
1607 	/* RAID volumes */
1608 	if (starget->channel == RAID_CHANNEL) {
1609 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1610 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1611 		    starget->channel);
1612 		if (raid_device) {
1613 			sas_target_priv_data->handle = raid_device->handle;
1614 			sas_target_priv_data->sas_address = raid_device->wwid;
1615 			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1616 			if (ioc->is_warpdrive)
1617 				sas_target_priv_data->raid_device = raid_device;
1618 			raid_device->starget = starget;
1619 		}
1620 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1621 		return 0;
1622 	}
1623 
1624 	/* PCIe devices */
1625 	if (starget->channel == PCIE_CHANNEL) {
1626 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1627 		pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1628 			starget->channel);
1629 		if (pcie_device) {
1630 			sas_target_priv_data->handle = pcie_device->handle;
1631 			sas_target_priv_data->sas_address = pcie_device->wwid;
1632 			sas_target_priv_data->pcie_dev = pcie_device;
1633 			pcie_device->starget = starget;
1634 			pcie_device->id = starget->id;
1635 			pcie_device->channel = starget->channel;
1636 			sas_target_priv_data->flags |=
1637 				MPT_TARGET_FLAGS_PCIE_DEVICE;
1638 			if (pcie_device->fast_path)
1639 				sas_target_priv_data->flags |=
1640 					MPT_TARGET_FASTPATH_IO;
1641 		}
1642 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1643 		return 0;
1644 	}
1645 
1646 	/* sas/sata devices */
1647 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1648 	rphy = dev_to_rphy(starget->dev.parent);
1649 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1650 	   rphy->identify.sas_address);
1651 
1652 	if (sas_device) {
1653 		sas_target_priv_data->handle = sas_device->handle;
1654 		sas_target_priv_data->sas_address = sas_device->sas_address;
1655 		sas_target_priv_data->sas_dev = sas_device;
1656 		sas_device->starget = starget;
1657 		sas_device->id = starget->id;
1658 		sas_device->channel = starget->channel;
1659 		if (test_bit(sas_device->handle, ioc->pd_handles))
1660 			sas_target_priv_data->flags |=
1661 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
1662 		if (sas_device->fast_path)
1663 			sas_target_priv_data->flags |=
1664 					MPT_TARGET_FASTPATH_IO;
1665 	}
1666 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1667 
1668 	return 0;
1669 }
1670 
1671 /**
1672  * scsih_target_destroy - target destroy routine
1673  * @starget: scsi target struct
1674  */
1675 static void
1676 scsih_target_destroy(struct scsi_target *starget)
1677 {
1678 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1679 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1680 	struct MPT3SAS_TARGET *sas_target_priv_data;
1681 	struct _sas_device *sas_device;
1682 	struct _raid_device *raid_device;
1683 	struct _pcie_device *pcie_device;
1684 	unsigned long flags;
1685 
1686 	sas_target_priv_data = starget->hostdata;
1687 	if (!sas_target_priv_data)
1688 		return;
1689 
1690 	if (starget->channel == RAID_CHANNEL) {
1691 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1692 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1693 		    starget->channel);
1694 		if (raid_device) {
1695 			raid_device->starget = NULL;
1696 			raid_device->sdev = NULL;
1697 		}
1698 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1699 		goto out;
1700 	}
1701 
1702 	if (starget->channel == PCIE_CHANNEL) {
1703 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1704 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1705 							sas_target_priv_data);
1706 		if (pcie_device && (pcie_device->starget == starget) &&
1707 			(pcie_device->id == starget->id) &&
1708 			(pcie_device->channel == starget->channel))
1709 			pcie_device->starget = NULL;
1710 
1711 		if (pcie_device) {
1712 			/*
1713 			 * Corresponding get() is in _scsih_target_alloc()
1714 			 */
1715 			sas_target_priv_data->pcie_dev = NULL;
1716 			pcie_device_put(pcie_device);
1717 			pcie_device_put(pcie_device);
1718 		}
1719 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1720 		goto out;
1721 	}
1722 
1723 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1724 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1725 	if (sas_device && (sas_device->starget == starget) &&
1726 	    (sas_device->id == starget->id) &&
1727 	    (sas_device->channel == starget->channel))
1728 		sas_device->starget = NULL;
1729 
1730 	if (sas_device) {
1731 		/*
1732 		 * Corresponding get() is in _scsih_target_alloc()
1733 		 */
1734 		sas_target_priv_data->sas_dev = NULL;
1735 		sas_device_put(sas_device);
1736 
1737 		sas_device_put(sas_device);
1738 	}
1739 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1740 
1741  out:
1742 	kfree(sas_target_priv_data);
1743 	starget->hostdata = NULL;
1744 }
1745 
1746 /**
1747  * scsih_slave_alloc - device add routine
1748  * @sdev: scsi device struct
1749  *
1750  * Return: 0 if ok. Any other return is assumed to be an error and
1751  * the device is ignored.
1752  */
1753 static int
1754 scsih_slave_alloc(struct scsi_device *sdev)
1755 {
1756 	struct Scsi_Host *shost;
1757 	struct MPT3SAS_ADAPTER *ioc;
1758 	struct MPT3SAS_TARGET *sas_target_priv_data;
1759 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1760 	struct scsi_target *starget;
1761 	struct _raid_device *raid_device;
1762 	struct _sas_device *sas_device;
1763 	struct _pcie_device *pcie_device;
1764 	unsigned long flags;
1765 
1766 	sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
1767 				       GFP_KERNEL);
1768 	if (!sas_device_priv_data)
1769 		return -ENOMEM;
1770 
1771 	sas_device_priv_data->lun = sdev->lun;
1772 	sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
1773 
1774 	starget = scsi_target(sdev);
1775 	sas_target_priv_data = starget->hostdata;
1776 	sas_target_priv_data->num_luns++;
1777 	sas_device_priv_data->sas_target = sas_target_priv_data;
1778 	sdev->hostdata = sas_device_priv_data;
1779 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
1780 		sdev->no_uld_attach = 1;
1781 
1782 	shost = dev_to_shost(&starget->dev);
1783 	ioc = shost_priv(shost);
1784 	if (starget->channel == RAID_CHANNEL) {
1785 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1786 		raid_device = _scsih_raid_device_find_by_id(ioc,
1787 		    starget->id, starget->channel);
1788 		if (raid_device)
1789 			raid_device->sdev = sdev; /* raid is single lun */
1790 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1791 	}
1792 	if (starget->channel == PCIE_CHANNEL) {
1793 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1794 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
1795 				sas_target_priv_data->sas_address);
1796 		if (pcie_device && (pcie_device->starget == NULL)) {
1797 			sdev_printk(KERN_INFO, sdev,
1798 			    "%s : pcie_device->starget set to starget @ %d\n",
1799 			    __func__, __LINE__);
1800 			pcie_device->starget = starget;
1801 		}
1802 
1803 		if (pcie_device)
1804 			pcie_device_put(pcie_device);
1805 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1806 
1807 	} else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1808 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1809 		sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1810 					sas_target_priv_data->sas_address);
1811 		if (sas_device && (sas_device->starget == NULL)) {
1812 			sdev_printk(KERN_INFO, sdev,
1813 			"%s : sas_device->starget set to starget @ %d\n",
1814 			     __func__, __LINE__);
1815 			sas_device->starget = starget;
1816 		}
1817 
1818 		if (sas_device)
1819 			sas_device_put(sas_device);
1820 
1821 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1822 	}
1823 
1824 	return 0;
1825 }
1826 
1827 /**
1828  * scsih_slave_destroy - device destroy routine
1829  * @sdev: scsi device struct
1830  */
1831 static void
1832 scsih_slave_destroy(struct scsi_device *sdev)
1833 {
1834 	struct MPT3SAS_TARGET *sas_target_priv_data;
1835 	struct scsi_target *starget;
1836 	struct Scsi_Host *shost;
1837 	struct MPT3SAS_ADAPTER *ioc;
1838 	struct _sas_device *sas_device;
1839 	struct _pcie_device *pcie_device;
1840 	unsigned long flags;
1841 
1842 	if (!sdev->hostdata)
1843 		return;
1844 
1845 	starget = scsi_target(sdev);
1846 	sas_target_priv_data = starget->hostdata;
1847 	sas_target_priv_data->num_luns--;
1848 
1849 	shost = dev_to_shost(&starget->dev);
1850 	ioc = shost_priv(shost);
1851 
1852 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
1853 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1854 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1855 				sas_target_priv_data);
1856 		if (pcie_device && !sas_target_priv_data->num_luns)
1857 			pcie_device->starget = NULL;
1858 
1859 		if (pcie_device)
1860 			pcie_device_put(pcie_device);
1861 
1862 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1863 
1864 	} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1865 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1866 		sas_device = __mpt3sas_get_sdev_from_target(ioc,
1867 				sas_target_priv_data);
1868 		if (sas_device && !sas_target_priv_data->num_luns)
1869 			sas_device->starget = NULL;
1870 
1871 		if (sas_device)
1872 			sas_device_put(sas_device);
1873 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1874 	}
1875 
1876 	kfree(sdev->hostdata);
1877 	sdev->hostdata = NULL;
1878 }
1879 
1880 /**
1881  * _scsih_display_sata_capabilities - sata capabilities
1882  * @ioc: per adapter object
1883  * @handle: device handle
1884  * @sdev: scsi device struct
1885  */
1886 static void
1887 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
1888 	u16 handle, struct scsi_device *sdev)
1889 {
1890 	Mpi2ConfigReply_t mpi_reply;
1891 	Mpi2SasDevicePage0_t sas_device_pg0;
1892 	u32 ioc_status;
1893 	u16 flags;
1894 	u32 device_info;
1895 
1896 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1897 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
1898 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1899 			__FILE__, __LINE__, __func__);
1900 		return;
1901 	}
1902 
1903 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1904 	    MPI2_IOCSTATUS_MASK;
1905 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1906 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1907 			__FILE__, __LINE__, __func__);
1908 		return;
1909 	}
1910 
1911 	flags = le16_to_cpu(sas_device_pg0.Flags);
1912 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
1913 
1914 	sdev_printk(KERN_INFO, sdev,
1915 	    "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
1916 	    "sw_preserve(%s)\n",
1917 	    (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
1918 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
1919 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
1920 	    "n",
1921 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
1922 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
1923 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
1924 }
1925 
1926 /*
1927  * raid transport support -
1928  * Enabled for SLES11 and newer, in older kernels the driver will panic when
1929  * unloading the driver followed by a load - I believe that the subroutine
1930  * raid_class_release() is not cleaning up properly.
1931  */
1932 
1933 /**
1934  * scsih_is_raid - return boolean indicating device is raid volume
1935  * @dev: the device struct object
1936  */
1937 static int
1938 scsih_is_raid(struct device *dev)
1939 {
1940 	struct scsi_device *sdev = to_scsi_device(dev);
1941 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1942 
1943 	if (ioc->is_warpdrive)
1944 		return 0;
1945 	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
1946 }
1947 
1948 static int
1949 scsih_is_nvme(struct device *dev)
1950 {
1951 	struct scsi_device *sdev = to_scsi_device(dev);
1952 
1953 	return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
1954 }
1955 
1956 /**
1957  * scsih_get_resync - get raid volume resync percent complete
1958  * @dev: the device struct object
1959  */
1960 static void
1961 scsih_get_resync(struct device *dev)
1962 {
1963 	struct scsi_device *sdev = to_scsi_device(dev);
1964 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1965 	static struct _raid_device *raid_device;
1966 	unsigned long flags;
1967 	Mpi2RaidVolPage0_t vol_pg0;
1968 	Mpi2ConfigReply_t mpi_reply;
1969 	u32 volume_status_flags;
1970 	u8 percent_complete;
1971 	u16 handle;
1972 
1973 	percent_complete = 0;
1974 	handle = 0;
1975 	if (ioc->is_warpdrive)
1976 		goto out;
1977 
1978 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1979 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1980 	    sdev->channel);
1981 	if (raid_device) {
1982 		handle = raid_device->handle;
1983 		percent_complete = raid_device->percent_complete;
1984 	}
1985 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1986 
1987 	if (!handle)
1988 		goto out;
1989 
1990 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1991 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
1992 	     sizeof(Mpi2RaidVolPage0_t))) {
1993 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1994 			__FILE__, __LINE__, __func__);
1995 		percent_complete = 0;
1996 		goto out;
1997 	}
1998 
1999 	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2000 	if (!(volume_status_flags &
2001 	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2002 		percent_complete = 0;
2003 
2004  out:
2005 
2006 	switch (ioc->hba_mpi_version_belonged) {
2007 	case MPI2_VERSION:
2008 		raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2009 		break;
2010 	case MPI25_VERSION:
2011 	case MPI26_VERSION:
2012 		raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2013 		break;
2014 	}
2015 }
2016 
2017 /**
2018  * scsih_get_state - get raid volume level
2019  * @dev: the device struct object
2020  */
2021 static void
2022 scsih_get_state(struct device *dev)
2023 {
2024 	struct scsi_device *sdev = to_scsi_device(dev);
2025 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2026 	static struct _raid_device *raid_device;
2027 	unsigned long flags;
2028 	Mpi2RaidVolPage0_t vol_pg0;
2029 	Mpi2ConfigReply_t mpi_reply;
2030 	u32 volstate;
2031 	enum raid_state state = RAID_STATE_UNKNOWN;
2032 	u16 handle = 0;
2033 
2034 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2035 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2036 	    sdev->channel);
2037 	if (raid_device)
2038 		handle = raid_device->handle;
2039 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2040 
2041 	if (!raid_device)
2042 		goto out;
2043 
2044 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2045 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2046 	     sizeof(Mpi2RaidVolPage0_t))) {
2047 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2048 			__FILE__, __LINE__, __func__);
2049 		goto out;
2050 	}
2051 
2052 	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2053 	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2054 		state = RAID_STATE_RESYNCING;
2055 		goto out;
2056 	}
2057 
2058 	switch (vol_pg0.VolumeState) {
2059 	case MPI2_RAID_VOL_STATE_OPTIMAL:
2060 	case MPI2_RAID_VOL_STATE_ONLINE:
2061 		state = RAID_STATE_ACTIVE;
2062 		break;
2063 	case  MPI2_RAID_VOL_STATE_DEGRADED:
2064 		state = RAID_STATE_DEGRADED;
2065 		break;
2066 	case MPI2_RAID_VOL_STATE_FAILED:
2067 	case MPI2_RAID_VOL_STATE_MISSING:
2068 		state = RAID_STATE_OFFLINE;
2069 		break;
2070 	}
2071  out:
2072 	switch (ioc->hba_mpi_version_belonged) {
2073 	case MPI2_VERSION:
2074 		raid_set_state(mpt2sas_raid_template, dev, state);
2075 		break;
2076 	case MPI25_VERSION:
2077 	case MPI26_VERSION:
2078 		raid_set_state(mpt3sas_raid_template, dev, state);
2079 		break;
2080 	}
2081 }
2082 
2083 /**
2084  * _scsih_set_level - set raid level
2085  * @ioc: ?
2086  * @sdev: scsi device struct
2087  * @volume_type: volume type
2088  */
2089 static void
2090 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2091 	struct scsi_device *sdev, u8 volume_type)
2092 {
2093 	enum raid_level level = RAID_LEVEL_UNKNOWN;
2094 
2095 	switch (volume_type) {
2096 	case MPI2_RAID_VOL_TYPE_RAID0:
2097 		level = RAID_LEVEL_0;
2098 		break;
2099 	case MPI2_RAID_VOL_TYPE_RAID10:
2100 		level = RAID_LEVEL_10;
2101 		break;
2102 	case MPI2_RAID_VOL_TYPE_RAID1E:
2103 		level = RAID_LEVEL_1E;
2104 		break;
2105 	case MPI2_RAID_VOL_TYPE_RAID1:
2106 		level = RAID_LEVEL_1;
2107 		break;
2108 	}
2109 
2110 	switch (ioc->hba_mpi_version_belonged) {
2111 	case MPI2_VERSION:
2112 		raid_set_level(mpt2sas_raid_template,
2113 			&sdev->sdev_gendev, level);
2114 		break;
2115 	case MPI25_VERSION:
2116 	case MPI26_VERSION:
2117 		raid_set_level(mpt3sas_raid_template,
2118 			&sdev->sdev_gendev, level);
2119 		break;
2120 	}
2121 }
2122 
2123 
2124 /**
2125  * _scsih_get_volume_capabilities - volume capabilities
2126  * @ioc: per adapter object
2127  * @raid_device: the raid_device object
2128  *
2129  * Return: 0 for success, else 1
2130  */
2131 static int
2132 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2133 	struct _raid_device *raid_device)
2134 {
2135 	Mpi2RaidVolPage0_t *vol_pg0;
2136 	Mpi2RaidPhysDiskPage0_t pd_pg0;
2137 	Mpi2SasDevicePage0_t sas_device_pg0;
2138 	Mpi2ConfigReply_t mpi_reply;
2139 	u16 sz;
2140 	u8 num_pds;
2141 
2142 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2143 	    &num_pds)) || !num_pds) {
2144 		dfailprintk(ioc,
2145 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2146 				     __FILE__, __LINE__, __func__));
2147 		return 1;
2148 	}
2149 
2150 	raid_device->num_pds = num_pds;
2151 	sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2152 	    sizeof(Mpi2RaidVol0PhysDisk_t));
2153 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
2154 	if (!vol_pg0) {
2155 		dfailprintk(ioc,
2156 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2157 				     __FILE__, __LINE__, __func__));
2158 		return 1;
2159 	}
2160 
2161 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2162 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2163 		dfailprintk(ioc,
2164 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2165 				     __FILE__, __LINE__, __func__));
2166 		kfree(vol_pg0);
2167 		return 1;
2168 	}
2169 
2170 	raid_device->volume_type = vol_pg0->VolumeType;
2171 
2172 	/* figure out what the underlying devices are by
2173 	 * obtaining the device_info bits for the 1st device
2174 	 */
2175 	if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2176 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2177 	    vol_pg0->PhysDisk[0].PhysDiskNum))) {
2178 		if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2179 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2180 		    le16_to_cpu(pd_pg0.DevHandle)))) {
2181 			raid_device->device_info =
2182 			    le32_to_cpu(sas_device_pg0.DeviceInfo);
2183 		}
2184 	}
2185 
2186 	kfree(vol_pg0);
2187 	return 0;
2188 }
2189 
2190 /**
2191  * _scsih_enable_tlr - setting TLR flags
2192  * @ioc: per adapter object
2193  * @sdev: scsi device struct
2194  *
2195  * Enabling Transaction Layer Retries for tape devices when
2196  * vpd page 0x90 is present
2197  *
2198  */
2199 static void
2200 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2201 {
2202 
2203 	/* only for TAPE */
2204 	if (sdev->type != TYPE_TAPE)
2205 		return;
2206 
2207 	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2208 		return;
2209 
2210 	sas_enable_tlr(sdev);
2211 	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2212 	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2213 	return;
2214 
2215 }
2216 
2217 /**
2218  * scsih_slave_configure - device configure routine.
2219  * @sdev: scsi device struct
2220  *
2221  * Return: 0 if ok. Any other return is assumed to be an error and
2222  * the device is ignored.
2223  */
2224 static int
2225 scsih_slave_configure(struct scsi_device *sdev)
2226 {
2227 	struct Scsi_Host *shost = sdev->host;
2228 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2229 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2230 	struct MPT3SAS_TARGET *sas_target_priv_data;
2231 	struct _sas_device *sas_device;
2232 	struct _pcie_device *pcie_device;
2233 	struct _raid_device *raid_device;
2234 	unsigned long flags;
2235 	int qdepth;
2236 	u8 ssp_target = 0;
2237 	char *ds = "";
2238 	char *r_level = "";
2239 	u16 handle, volume_handle = 0;
2240 	u64 volume_wwid = 0;
2241 
2242 	qdepth = 1;
2243 	sas_device_priv_data = sdev->hostdata;
2244 	sas_device_priv_data->configured_lun = 1;
2245 	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2246 	sas_target_priv_data = sas_device_priv_data->sas_target;
2247 	handle = sas_target_priv_data->handle;
2248 
2249 	/* raid volume handling */
2250 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2251 
2252 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2253 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2254 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2255 		if (!raid_device) {
2256 			dfailprintk(ioc,
2257 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2258 					     __FILE__, __LINE__, __func__));
2259 			return 1;
2260 		}
2261 
2262 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2263 			dfailprintk(ioc,
2264 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2265 					     __FILE__, __LINE__, __func__));
2266 			return 1;
2267 		}
2268 
2269 		/*
2270 		 * WARPDRIVE: Initialize the required data for Direct IO
2271 		 */
2272 		mpt3sas_init_warpdrive_properties(ioc, raid_device);
2273 
2274 		/* RAID Queue Depth Support
2275 		 * IS volume = underlying qdepth of drive type, either
2276 		 *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2277 		 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2278 		 */
2279 		if (raid_device->device_info &
2280 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2281 			qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2282 			ds = "SSP";
2283 		} else {
2284 			qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2285 			if (raid_device->device_info &
2286 			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2287 				ds = "SATA";
2288 			else
2289 				ds = "STP";
2290 		}
2291 
2292 		switch (raid_device->volume_type) {
2293 		case MPI2_RAID_VOL_TYPE_RAID0:
2294 			r_level = "RAID0";
2295 			break;
2296 		case MPI2_RAID_VOL_TYPE_RAID1E:
2297 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2298 			if (ioc->manu_pg10.OEMIdentifier &&
2299 			    (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2300 			    MFG10_GF0_R10_DISPLAY) &&
2301 			    !(raid_device->num_pds % 2))
2302 				r_level = "RAID10";
2303 			else
2304 				r_level = "RAID1E";
2305 			break;
2306 		case MPI2_RAID_VOL_TYPE_RAID1:
2307 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2308 			r_level = "RAID1";
2309 			break;
2310 		case MPI2_RAID_VOL_TYPE_RAID10:
2311 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2312 			r_level = "RAID10";
2313 			break;
2314 		case MPI2_RAID_VOL_TYPE_UNKNOWN:
2315 		default:
2316 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2317 			r_level = "RAIDX";
2318 			break;
2319 		}
2320 
2321 		if (!ioc->hide_ir_msg)
2322 			sdev_printk(KERN_INFO, sdev,
2323 			   "%s: handle(0x%04x), wwid(0x%016llx),"
2324 			    " pd_count(%d), type(%s)\n",
2325 			    r_level, raid_device->handle,
2326 			    (unsigned long long)raid_device->wwid,
2327 			    raid_device->num_pds, ds);
2328 
2329 		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2330 			blk_queue_max_hw_sectors(sdev->request_queue,
2331 						MPT3SAS_RAID_MAX_SECTORS);
2332 			sdev_printk(KERN_INFO, sdev,
2333 					"Set queue's max_sector to: %u\n",
2334 						MPT3SAS_RAID_MAX_SECTORS);
2335 		}
2336 
2337 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2338 
2339 		/* raid transport support */
2340 		if (!ioc->is_warpdrive)
2341 			_scsih_set_level(ioc, sdev, raid_device->volume_type);
2342 		return 0;
2343 	}
2344 
2345 	/* non-raid handling */
2346 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2347 		if (mpt3sas_config_get_volume_handle(ioc, handle,
2348 		    &volume_handle)) {
2349 			dfailprintk(ioc,
2350 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2351 					     __FILE__, __LINE__, __func__));
2352 			return 1;
2353 		}
2354 		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2355 		    volume_handle, &volume_wwid)) {
2356 			dfailprintk(ioc,
2357 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2358 					     __FILE__, __LINE__, __func__));
2359 			return 1;
2360 		}
2361 	}
2362 
2363 	/* PCIe handling */
2364 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2365 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2366 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2367 				sas_device_priv_data->sas_target->sas_address);
2368 		if (!pcie_device) {
2369 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2370 			dfailprintk(ioc,
2371 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2372 					     __FILE__, __LINE__, __func__));
2373 			return 1;
2374 		}
2375 
2376 		qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2377 		ds = "NVMe";
2378 		sdev_printk(KERN_INFO, sdev,
2379 			"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2380 			ds, handle, (unsigned long long)pcie_device->wwid,
2381 			pcie_device->port_num);
2382 		if (pcie_device->enclosure_handle != 0)
2383 			sdev_printk(KERN_INFO, sdev,
2384 			"%s: enclosure logical id(0x%016llx), slot(%d)\n",
2385 			ds,
2386 			(unsigned long long)pcie_device->enclosure_logical_id,
2387 			pcie_device->slot);
2388 		if (pcie_device->connector_name[0] != '\0')
2389 			sdev_printk(KERN_INFO, sdev,
2390 				"%s: enclosure level(0x%04x),"
2391 				"connector name( %s)\n", ds,
2392 				pcie_device->enclosure_level,
2393 				pcie_device->connector_name);
2394 
2395 		if (pcie_device->nvme_mdts)
2396 			blk_queue_max_hw_sectors(sdev->request_queue,
2397 					pcie_device->nvme_mdts/512);
2398 
2399 		pcie_device_put(pcie_device);
2400 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2401 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2402 		/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2403 		 ** merged and can eliminate holes created during merging
2404 		 ** operation.
2405 		 **/
2406 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2407 				sdev->request_queue);
2408 		blk_queue_virt_boundary(sdev->request_queue,
2409 				ioc->page_size - 1);
2410 		return 0;
2411 	}
2412 
2413 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2414 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2415 	   sas_device_priv_data->sas_target->sas_address);
2416 	if (!sas_device) {
2417 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2418 		dfailprintk(ioc,
2419 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2420 				     __FILE__, __LINE__, __func__));
2421 		return 1;
2422 	}
2423 
2424 	sas_device->volume_handle = volume_handle;
2425 	sas_device->volume_wwid = volume_wwid;
2426 	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2427 		qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2428 		ssp_target = 1;
2429 		if (sas_device->device_info &
2430 				MPI2_SAS_DEVICE_INFO_SEP) {
2431 			sdev_printk(KERN_WARNING, sdev,
2432 			"set ignore_delay_remove for handle(0x%04x)\n",
2433 			sas_device_priv_data->sas_target->handle);
2434 			sas_device_priv_data->ignore_delay_remove = 1;
2435 			ds = "SES";
2436 		} else
2437 			ds = "SSP";
2438 	} else {
2439 		qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2440 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2441 			ds = "STP";
2442 		else if (sas_device->device_info &
2443 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2444 			ds = "SATA";
2445 	}
2446 
2447 	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2448 	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2449 	    ds, handle, (unsigned long long)sas_device->sas_address,
2450 	    sas_device->phy, (unsigned long long)sas_device->device_name);
2451 
2452 	_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2453 
2454 	sas_device_put(sas_device);
2455 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2456 
2457 	if (!ssp_target)
2458 		_scsih_display_sata_capabilities(ioc, handle, sdev);
2459 
2460 
2461 	mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2462 
2463 	if (ssp_target) {
2464 		sas_read_port_mode_page(sdev);
2465 		_scsih_enable_tlr(ioc, sdev);
2466 	}
2467 
2468 	return 0;
2469 }
2470 
2471 /**
2472  * scsih_bios_param - fetch head, sector, cylinder info for a disk
2473  * @sdev: scsi device struct
2474  * @bdev: pointer to block device context
2475  * @capacity: device size (in 512 byte sectors)
2476  * @params: three element array to place output:
2477  *              params[0] number of heads (max 255)
2478  *              params[1] number of sectors (max 63)
2479  *              params[2] number of cylinders
2480  */
2481 static int
2482 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2483 	sector_t capacity, int params[])
2484 {
2485 	int		heads;
2486 	int		sectors;
2487 	sector_t	cylinders;
2488 	ulong		dummy;
2489 
2490 	heads = 64;
2491 	sectors = 32;
2492 
2493 	dummy = heads * sectors;
2494 	cylinders = capacity;
2495 	sector_div(cylinders, dummy);
2496 
2497 	/*
2498 	 * Handle extended translation size for logical drives
2499 	 * > 1Gb
2500 	 */
2501 	if ((ulong)capacity >= 0x200000) {
2502 		heads = 255;
2503 		sectors = 63;
2504 		dummy = heads * sectors;
2505 		cylinders = capacity;
2506 		sector_div(cylinders, dummy);
2507 	}
2508 
2509 	/* return result */
2510 	params[0] = heads;
2511 	params[1] = sectors;
2512 	params[2] = cylinders;
2513 
2514 	return 0;
2515 }
2516 
2517 /**
2518  * _scsih_response_code - translation of device response code
2519  * @ioc: per adapter object
2520  * @response_code: response code returned by the device
2521  */
2522 static void
2523 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2524 {
2525 	char *desc;
2526 
2527 	switch (response_code) {
2528 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2529 		desc = "task management request completed";
2530 		break;
2531 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2532 		desc = "invalid frame";
2533 		break;
2534 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2535 		desc = "task management request not supported";
2536 		break;
2537 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2538 		desc = "task management request failed";
2539 		break;
2540 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2541 		desc = "task management request succeeded";
2542 		break;
2543 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2544 		desc = "invalid lun";
2545 		break;
2546 	case 0xA:
2547 		desc = "overlapped tag attempted";
2548 		break;
2549 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2550 		desc = "task queued, however not sent to target";
2551 		break;
2552 	default:
2553 		desc = "unknown";
2554 		break;
2555 	}
2556 	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2557 }
2558 
2559 /**
2560  * _scsih_tm_done - tm completion routine
2561  * @ioc: per adapter object
2562  * @smid: system request message index
2563  * @msix_index: MSIX table index supplied by the OS
2564  * @reply: reply message frame(lower 32bit addr)
2565  * Context: none.
2566  *
2567  * The callback handler when using scsih_issue_tm.
2568  *
2569  * Return: 1 meaning mf should be freed from _base_interrupt
2570  *         0 means the mf is freed from this function.
2571  */
2572 static u8
2573 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2574 {
2575 	MPI2DefaultReply_t *mpi_reply;
2576 
2577 	if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2578 		return 1;
2579 	if (ioc->tm_cmds.smid != smid)
2580 		return 1;
2581 	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2582 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2583 	if (mpi_reply) {
2584 		memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2585 		ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2586 	}
2587 	ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2588 	complete(&ioc->tm_cmds.done);
2589 	return 1;
2590 }
2591 
2592 /**
2593  * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2594  * @ioc: per adapter object
2595  * @handle: device handle
2596  *
2597  * During taskmangement request, we need to freeze the device queue.
2598  */
2599 void
2600 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2601 {
2602 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2603 	struct scsi_device *sdev;
2604 	u8 skip = 0;
2605 
2606 	shost_for_each_device(sdev, ioc->shost) {
2607 		if (skip)
2608 			continue;
2609 		sas_device_priv_data = sdev->hostdata;
2610 		if (!sas_device_priv_data)
2611 			continue;
2612 		if (sas_device_priv_data->sas_target->handle == handle) {
2613 			sas_device_priv_data->sas_target->tm_busy = 1;
2614 			skip = 1;
2615 			ioc->ignore_loginfos = 1;
2616 		}
2617 	}
2618 }
2619 
2620 /**
2621  * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2622  * @ioc: per adapter object
2623  * @handle: device handle
2624  *
2625  * During taskmangement request, we need to freeze the device queue.
2626  */
2627 void
2628 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2629 {
2630 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2631 	struct scsi_device *sdev;
2632 	u8 skip = 0;
2633 
2634 	shost_for_each_device(sdev, ioc->shost) {
2635 		if (skip)
2636 			continue;
2637 		sas_device_priv_data = sdev->hostdata;
2638 		if (!sas_device_priv_data)
2639 			continue;
2640 		if (sas_device_priv_data->sas_target->handle == handle) {
2641 			sas_device_priv_data->sas_target->tm_busy = 0;
2642 			skip = 1;
2643 			ioc->ignore_loginfos = 0;
2644 		}
2645 	}
2646 }
2647 
2648 /**
2649  * mpt3sas_scsih_issue_tm - main routine for sending tm requests
2650  * @ioc: per adapter struct
2651  * @handle: device handle
2652  * @lun: lun number
2653  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2654  * @smid_task: smid assigned to the task
2655  * @msix_task: MSIX table index supplied by the OS
2656  * @timeout: timeout in seconds
2657  * @tr_method: Target Reset Method
2658  * Context: user
2659  *
2660  * A generic API for sending task management requests to firmware.
2661  *
2662  * The callback index is set inside `ioc->tm_cb_idx`.
2663  * The caller is responsible to check for outstanding commands.
2664  *
2665  * Return: SUCCESS or FAILED.
2666  */
2667 int
2668 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
2669 	u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
2670 {
2671 	Mpi2SCSITaskManagementRequest_t *mpi_request;
2672 	Mpi2SCSITaskManagementReply_t *mpi_reply;
2673 	u16 smid = 0;
2674 	u32 ioc_state;
2675 	int rc;
2676 
2677 	lockdep_assert_held(&ioc->tm_cmds.mutex);
2678 
2679 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2680 		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
2681 		return FAILED;
2682 	}
2683 
2684 	if (ioc->shost_recovery || ioc->remove_host ||
2685 	    ioc->pci_error_recovery) {
2686 		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
2687 		return FAILED;
2688 	}
2689 
2690 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2691 	if (ioc_state & MPI2_DOORBELL_USED) {
2692 		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
2693 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2694 		return (!rc) ? SUCCESS : FAILED;
2695 	}
2696 
2697 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2698 		mpt3sas_base_fault_info(ioc, ioc_state &
2699 		    MPI2_DOORBELL_DATA_MASK);
2700 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2701 		return (!rc) ? SUCCESS : FAILED;
2702 	}
2703 
2704 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2705 	if (!smid) {
2706 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2707 		return FAILED;
2708 	}
2709 
2710 	dtmprintk(ioc,
2711 		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2712 			   handle, type, smid_task, timeout, tr_method));
2713 	ioc->tm_cmds.status = MPT3_CMD_PENDING;
2714 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2715 	ioc->tm_cmds.smid = smid;
2716 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2717 	memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
2718 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2719 	mpi_request->DevHandle = cpu_to_le16(handle);
2720 	mpi_request->TaskType = type;
2721 	mpi_request->MsgFlags = tr_method;
2722 	mpi_request->TaskMID = cpu_to_le16(smid_task);
2723 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2724 	mpt3sas_scsih_set_tm_flag(ioc, handle);
2725 	init_completion(&ioc->tm_cmds.done);
2726 	ioc->put_smid_hi_priority(ioc, smid, msix_task);
2727 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2728 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2729 		if (mpt3sas_base_check_cmd_timeout(ioc,
2730 			ioc->tm_cmds.status, mpi_request,
2731 			sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
2732 			rc = mpt3sas_base_hard_reset_handler(ioc,
2733 					FORCE_BIG_HAMMER);
2734 			rc = (!rc) ? SUCCESS : FAILED;
2735 			goto out;
2736 		}
2737 	}
2738 
2739 	/* sync IRQs in case those were busy during flush. */
2740 	mpt3sas_base_sync_reply_irqs(ioc);
2741 
2742 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
2743 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
2744 		mpi_reply = ioc->tm_cmds.reply;
2745 		dtmprintk(ioc,
2746 			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
2747 				   le16_to_cpu(mpi_reply->IOCStatus),
2748 				   le32_to_cpu(mpi_reply->IOCLogInfo),
2749 				   le32_to_cpu(mpi_reply->TerminationCount)));
2750 		if (ioc->logging_level & MPT_DEBUG_TM) {
2751 			_scsih_response_code(ioc, mpi_reply->ResponseCode);
2752 			if (mpi_reply->IOCStatus)
2753 				_debug_dump_mf(mpi_request,
2754 				    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2755 		}
2756 	}
2757 	rc = SUCCESS;
2758 
2759 out:
2760 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
2761 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
2762 	return rc;
2763 }
2764 
2765 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2766 		u64 lun, u8 type, u16 smid_task, u16 msix_task,
2767 		u8 timeout, u8 tr_method)
2768 {
2769 	int ret;
2770 
2771 	mutex_lock(&ioc->tm_cmds.mutex);
2772 	ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
2773 			msix_task, timeout, tr_method);
2774 	mutex_unlock(&ioc->tm_cmds.mutex);
2775 
2776 	return ret;
2777 }
2778 
2779 /**
2780  * _scsih_tm_display_info - displays info about the device
2781  * @ioc: per adapter struct
2782  * @scmd: pointer to scsi command object
2783  *
2784  * Called by task management callback handlers.
2785  */
2786 static void
2787 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2788 {
2789 	struct scsi_target *starget = scmd->device->sdev_target;
2790 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
2791 	struct _sas_device *sas_device = NULL;
2792 	struct _pcie_device *pcie_device = NULL;
2793 	unsigned long flags;
2794 	char *device_str = NULL;
2795 
2796 	if (!priv_target)
2797 		return;
2798 	if (ioc->hide_ir_msg)
2799 		device_str = "WarpDrive";
2800 	else
2801 		device_str = "volume";
2802 
2803 	scsi_print_command(scmd);
2804 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2805 		starget_printk(KERN_INFO, starget,
2806 			"%s handle(0x%04x), %s wwid(0x%016llx)\n",
2807 			device_str, priv_target->handle,
2808 		    device_str, (unsigned long long)priv_target->sas_address);
2809 
2810 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2811 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2812 		pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
2813 		if (pcie_device) {
2814 			starget_printk(KERN_INFO, starget,
2815 				"handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2816 				pcie_device->handle,
2817 				(unsigned long long)pcie_device->wwid,
2818 				pcie_device->port_num);
2819 			if (pcie_device->enclosure_handle != 0)
2820 				starget_printk(KERN_INFO, starget,
2821 					"enclosure logical id(0x%016llx), slot(%d)\n",
2822 					(unsigned long long)
2823 					pcie_device->enclosure_logical_id,
2824 					pcie_device->slot);
2825 			if (pcie_device->connector_name[0] != '\0')
2826 				starget_printk(KERN_INFO, starget,
2827 					"enclosure level(0x%04x), connector name( %s)\n",
2828 					pcie_device->enclosure_level,
2829 					pcie_device->connector_name);
2830 			pcie_device_put(pcie_device);
2831 		}
2832 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2833 
2834 	} else {
2835 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2836 		sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
2837 		if (sas_device) {
2838 			if (priv_target->flags &
2839 			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
2840 				starget_printk(KERN_INFO, starget,
2841 				    "volume handle(0x%04x), "
2842 				    "volume wwid(0x%016llx)\n",
2843 				    sas_device->volume_handle,
2844 				   (unsigned long long)sas_device->volume_wwid);
2845 			}
2846 			starget_printk(KERN_INFO, starget,
2847 			    "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
2848 			    sas_device->handle,
2849 			    (unsigned long long)sas_device->sas_address,
2850 			    sas_device->phy);
2851 
2852 			_scsih_display_enclosure_chassis_info(NULL, sas_device,
2853 			    NULL, starget);
2854 
2855 			sas_device_put(sas_device);
2856 		}
2857 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2858 	}
2859 }
2860 
2861 /**
2862  * scsih_abort - eh threads main abort routine
2863  * @scmd: pointer to scsi command object
2864  *
2865  * Return: SUCCESS if command aborted else FAILED
2866  */
2867 static int
2868 scsih_abort(struct scsi_cmnd *scmd)
2869 {
2870 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2871 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2872 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2873 	u16 handle;
2874 	int r;
2875 
2876 	u8 timeout = 30;
2877 	struct _pcie_device *pcie_device = NULL;
2878 	sdev_printk(KERN_INFO, scmd->device,
2879 		"attempting task abort! scmd(%p)\n", scmd);
2880 	_scsih_tm_display_info(ioc, scmd);
2881 
2882 	sas_device_priv_data = scmd->device->hostdata;
2883 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2884 	    ioc->remove_host) {
2885 		sdev_printk(KERN_INFO, scmd->device,
2886 			"device been deleted! scmd(%p)\n", scmd);
2887 		scmd->result = DID_NO_CONNECT << 16;
2888 		scmd->scsi_done(scmd);
2889 		r = SUCCESS;
2890 		goto out;
2891 	}
2892 
2893 	/* check for completed command */
2894 	if (st == NULL || st->cb_idx == 0xFF) {
2895 		scmd->result = DID_RESET << 16;
2896 		r = SUCCESS;
2897 		goto out;
2898 	}
2899 
2900 	/* for hidden raid components and volumes this is not supported */
2901 	if (sas_device_priv_data->sas_target->flags &
2902 	    MPT_TARGET_FLAGS_RAID_COMPONENT ||
2903 	    sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2904 		scmd->result = DID_RESET << 16;
2905 		r = FAILED;
2906 		goto out;
2907 	}
2908 
2909 	mpt3sas_halt_firmware(ioc);
2910 
2911 	handle = sas_device_priv_data->sas_target->handle;
2912 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2913 	if (pcie_device && (!ioc->tm_custom_handling) &&
2914 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
2915 		timeout = ioc->nvme_abort_timeout;
2916 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2917 		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
2918 		st->smid, st->msix_io, timeout, 0);
2919 	/* Command must be cleared after abort */
2920 	if (r == SUCCESS && st->cb_idx != 0xFF)
2921 		r = FAILED;
2922  out:
2923 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
2924 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2925 	if (pcie_device)
2926 		pcie_device_put(pcie_device);
2927 	return r;
2928 }
2929 
2930 /**
2931  * scsih_dev_reset - eh threads main device reset routine
2932  * @scmd: pointer to scsi command object
2933  *
2934  * Return: SUCCESS if command aborted else FAILED
2935  */
2936 static int
2937 scsih_dev_reset(struct scsi_cmnd *scmd)
2938 {
2939 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2940 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2941 	struct _sas_device *sas_device = NULL;
2942 	struct _pcie_device *pcie_device = NULL;
2943 	u16	handle;
2944 	u8	tr_method = 0;
2945 	u8	tr_timeout = 30;
2946 	int r;
2947 
2948 	struct scsi_target *starget = scmd->device->sdev_target;
2949 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
2950 
2951 	sdev_printk(KERN_INFO, scmd->device,
2952 		"attempting device reset! scmd(%p)\n", scmd);
2953 	_scsih_tm_display_info(ioc, scmd);
2954 
2955 	sas_device_priv_data = scmd->device->hostdata;
2956 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2957 	    ioc->remove_host) {
2958 		sdev_printk(KERN_INFO, scmd->device,
2959 			"device been deleted! scmd(%p)\n", scmd);
2960 		scmd->result = DID_NO_CONNECT << 16;
2961 		scmd->scsi_done(scmd);
2962 		r = SUCCESS;
2963 		goto out;
2964 	}
2965 
2966 	/* for hidden raid components obtain the volume_handle */
2967 	handle = 0;
2968 	if (sas_device_priv_data->sas_target->flags &
2969 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
2970 		sas_device = mpt3sas_get_sdev_from_target(ioc,
2971 				target_priv_data);
2972 		if (sas_device)
2973 			handle = sas_device->volume_handle;
2974 	} else
2975 		handle = sas_device_priv_data->sas_target->handle;
2976 
2977 	if (!handle) {
2978 		scmd->result = DID_RESET << 16;
2979 		r = FAILED;
2980 		goto out;
2981 	}
2982 
2983 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2984 
2985 	if (pcie_device && (!ioc->tm_custom_handling) &&
2986 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
2987 		tr_timeout = pcie_device->reset_timeout;
2988 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
2989 	} else
2990 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2991 
2992 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2993 		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
2994 		tr_timeout, tr_method);
2995 	/* Check for busy commands after reset */
2996 	if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
2997 		r = FAILED;
2998  out:
2999 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
3000 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3001 
3002 	if (sas_device)
3003 		sas_device_put(sas_device);
3004 	if (pcie_device)
3005 		pcie_device_put(pcie_device);
3006 
3007 	return r;
3008 }
3009 
3010 /**
3011  * scsih_target_reset - eh threads main target reset routine
3012  * @scmd: pointer to scsi command object
3013  *
3014  * Return: SUCCESS if command aborted else FAILED
3015  */
3016 static int
3017 scsih_target_reset(struct scsi_cmnd *scmd)
3018 {
3019 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3020 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3021 	struct _sas_device *sas_device = NULL;
3022 	struct _pcie_device *pcie_device = NULL;
3023 	u16	handle;
3024 	u8	tr_method = 0;
3025 	u8	tr_timeout = 30;
3026 	int r;
3027 	struct scsi_target *starget = scmd->device->sdev_target;
3028 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3029 
3030 	starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
3031 		scmd);
3032 	_scsih_tm_display_info(ioc, scmd);
3033 
3034 	sas_device_priv_data = scmd->device->hostdata;
3035 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3036 	    ioc->remove_host) {
3037 		starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
3038 			scmd);
3039 		scmd->result = DID_NO_CONNECT << 16;
3040 		scmd->scsi_done(scmd);
3041 		r = SUCCESS;
3042 		goto out;
3043 	}
3044 
3045 	/* for hidden raid components obtain the volume_handle */
3046 	handle = 0;
3047 	if (sas_device_priv_data->sas_target->flags &
3048 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3049 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3050 				target_priv_data);
3051 		if (sas_device)
3052 			handle = sas_device->volume_handle;
3053 	} else
3054 		handle = sas_device_priv_data->sas_target->handle;
3055 
3056 	if (!handle) {
3057 		scmd->result = DID_RESET << 16;
3058 		r = FAILED;
3059 		goto out;
3060 	}
3061 
3062 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3063 
3064 	if (pcie_device && (!ioc->tm_custom_handling) &&
3065 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3066 		tr_timeout = pcie_device->reset_timeout;
3067 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3068 	} else
3069 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3070 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
3071 		MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3072 	    tr_timeout, tr_method);
3073 	/* Check for busy commands after reset */
3074 	if (r == SUCCESS && atomic_read(&starget->target_busy))
3075 		r = FAILED;
3076  out:
3077 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
3078 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3079 
3080 	if (sas_device)
3081 		sas_device_put(sas_device);
3082 	if (pcie_device)
3083 		pcie_device_put(pcie_device);
3084 	return r;
3085 }
3086 
3087 
3088 /**
3089  * scsih_host_reset - eh threads main host reset routine
3090  * @scmd: pointer to scsi command object
3091  *
3092  * Return: SUCCESS if command aborted else FAILED
3093  */
3094 static int
3095 scsih_host_reset(struct scsi_cmnd *scmd)
3096 {
3097 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3098 	int r, retval;
3099 
3100 	ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
3101 	scsi_print_command(scmd);
3102 
3103 	if (ioc->is_driver_loading || ioc->remove_host) {
3104 		ioc_info(ioc, "Blocking the host reset\n");
3105 		r = FAILED;
3106 		goto out;
3107 	}
3108 
3109 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3110 	r = (retval < 0) ? FAILED : SUCCESS;
3111 out:
3112 	ioc_info(ioc, "host reset: %s scmd(%p)\n",
3113 		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3114 
3115 	return r;
3116 }
3117 
3118 /**
3119  * _scsih_fw_event_add - insert and queue up fw_event
3120  * @ioc: per adapter object
3121  * @fw_event: object describing the event
3122  * Context: This function will acquire ioc->fw_event_lock.
3123  *
3124  * This adds the firmware event object into link list, then queues it up to
3125  * be processed from user context.
3126  */
3127 static void
3128 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3129 {
3130 	unsigned long flags;
3131 
3132 	if (ioc->firmware_event_thread == NULL)
3133 		return;
3134 
3135 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3136 	fw_event_work_get(fw_event);
3137 	INIT_LIST_HEAD(&fw_event->list);
3138 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3139 	INIT_WORK(&fw_event->work, _firmware_event_work);
3140 	fw_event_work_get(fw_event);
3141 	queue_work(ioc->firmware_event_thread, &fw_event->work);
3142 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3143 }
3144 
3145 /**
3146  * _scsih_fw_event_del_from_list - delete fw_event from the list
3147  * @ioc: per adapter object
3148  * @fw_event: object describing the event
3149  * Context: This function will acquire ioc->fw_event_lock.
3150  *
3151  * If the fw_event is on the fw_event_list, remove it and do a put.
3152  */
3153 static void
3154 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3155 	*fw_event)
3156 {
3157 	unsigned long flags;
3158 
3159 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3160 	if (!list_empty(&fw_event->list)) {
3161 		list_del_init(&fw_event->list);
3162 		fw_event_work_put(fw_event);
3163 	}
3164 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3165 }
3166 
3167 
3168  /**
3169  * mpt3sas_send_trigger_data_event - send event for processing trigger data
3170  * @ioc: per adapter object
3171  * @event_data: trigger event data
3172  */
3173 void
3174 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3175 	struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3176 {
3177 	struct fw_event_work *fw_event;
3178 	u16 sz;
3179 
3180 	if (ioc->is_driver_loading)
3181 		return;
3182 	sz = sizeof(*event_data);
3183 	fw_event = alloc_fw_event_work(sz);
3184 	if (!fw_event)
3185 		return;
3186 	fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3187 	fw_event->ioc = ioc;
3188 	memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3189 	_scsih_fw_event_add(ioc, fw_event);
3190 	fw_event_work_put(fw_event);
3191 }
3192 
3193 /**
3194  * _scsih_error_recovery_delete_devices - remove devices not responding
3195  * @ioc: per adapter object
3196  */
3197 static void
3198 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3199 {
3200 	struct fw_event_work *fw_event;
3201 
3202 	if (ioc->is_driver_loading)
3203 		return;
3204 	fw_event = alloc_fw_event_work(0);
3205 	if (!fw_event)
3206 		return;
3207 	fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3208 	fw_event->ioc = ioc;
3209 	_scsih_fw_event_add(ioc, fw_event);
3210 	fw_event_work_put(fw_event);
3211 }
3212 
3213 /**
3214  * mpt3sas_port_enable_complete - port enable completed (fake event)
3215  * @ioc: per adapter object
3216  */
3217 void
3218 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3219 {
3220 	struct fw_event_work *fw_event;
3221 
3222 	fw_event = alloc_fw_event_work(0);
3223 	if (!fw_event)
3224 		return;
3225 	fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3226 	fw_event->ioc = ioc;
3227 	_scsih_fw_event_add(ioc, fw_event);
3228 	fw_event_work_put(fw_event);
3229 }
3230 
3231 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3232 {
3233 	unsigned long flags;
3234 	struct fw_event_work *fw_event = NULL;
3235 
3236 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3237 	if (!list_empty(&ioc->fw_event_list)) {
3238 		fw_event = list_first_entry(&ioc->fw_event_list,
3239 				struct fw_event_work, list);
3240 		list_del_init(&fw_event->list);
3241 	}
3242 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3243 
3244 	return fw_event;
3245 }
3246 
3247 /**
3248  * _scsih_fw_event_cleanup_queue - cleanup event queue
3249  * @ioc: per adapter object
3250  *
3251  * Walk the firmware event queue, either killing timers, or waiting
3252  * for outstanding events to complete
3253  */
3254 static void
3255 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3256 {
3257 	struct fw_event_work *fw_event;
3258 
3259 	if (list_empty(&ioc->fw_event_list) ||
3260 	     !ioc->firmware_event_thread || in_interrupt())
3261 		return;
3262 
3263 	while ((fw_event = dequeue_next_fw_event(ioc))) {
3264 		/*
3265 		 * Wait on the fw_event to complete. If this returns 1, then
3266 		 * the event was never executed, and we need a put for the
3267 		 * reference the work had on the fw_event.
3268 		 *
3269 		 * If it did execute, we wait for it to finish, and the put will
3270 		 * happen from _firmware_event_work()
3271 		 */
3272 		if (cancel_work_sync(&fw_event->work))
3273 			fw_event_work_put(fw_event);
3274 
3275 		fw_event_work_put(fw_event);
3276 	}
3277 }
3278 
3279 /**
3280  * _scsih_internal_device_block - block the sdev device
3281  * @sdev: per device object
3282  * @sas_device_priv_data : per device driver private data
3283  *
3284  * make sure device is blocked without error, if not
3285  * print an error
3286  */
3287 static void
3288 _scsih_internal_device_block(struct scsi_device *sdev,
3289 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3290 {
3291 	int r = 0;
3292 
3293 	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3294 	    sas_device_priv_data->sas_target->handle);
3295 	sas_device_priv_data->block = 1;
3296 
3297 	r = scsi_internal_device_block_nowait(sdev);
3298 	if (r == -EINVAL)
3299 		sdev_printk(KERN_WARNING, sdev,
3300 		    "device_block failed with return(%d) for handle(0x%04x)\n",
3301 		    r, sas_device_priv_data->sas_target->handle);
3302 }
3303 
3304 /**
3305  * _scsih_internal_device_unblock - unblock the sdev device
3306  * @sdev: per device object
3307  * @sas_device_priv_data : per device driver private data
3308  * make sure device is unblocked without error, if not retry
3309  * by blocking and then unblocking
3310  */
3311 
3312 static void
3313 _scsih_internal_device_unblock(struct scsi_device *sdev,
3314 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3315 {
3316 	int r = 0;
3317 
3318 	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3319 	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3320 	sas_device_priv_data->block = 0;
3321 	r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3322 	if (r == -EINVAL) {
3323 		/* The device has been set to SDEV_RUNNING by SD layer during
3324 		 * device addition but the request queue is still stopped by
3325 		 * our earlier block call. We need to perform a block again
3326 		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3327 
3328 		sdev_printk(KERN_WARNING, sdev,
3329 		    "device_unblock failed with return(%d) for handle(0x%04x) "
3330 		    "performing a block followed by an unblock\n",
3331 		    r, sas_device_priv_data->sas_target->handle);
3332 		sas_device_priv_data->block = 1;
3333 		r = scsi_internal_device_block_nowait(sdev);
3334 		if (r)
3335 			sdev_printk(KERN_WARNING, sdev, "retried device_block "
3336 			    "failed with return(%d) for handle(0x%04x)\n",
3337 			    r, sas_device_priv_data->sas_target->handle);
3338 
3339 		sas_device_priv_data->block = 0;
3340 		r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3341 		if (r)
3342 			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3343 			    " failed with return(%d) for handle(0x%04x)\n",
3344 			    r, sas_device_priv_data->sas_target->handle);
3345 	}
3346 }
3347 
3348 /**
3349  * _scsih_ublock_io_all_device - unblock every device
3350  * @ioc: per adapter object
3351  *
3352  * change the device state from block to running
3353  */
3354 static void
3355 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3356 {
3357 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3358 	struct scsi_device *sdev;
3359 
3360 	shost_for_each_device(sdev, ioc->shost) {
3361 		sas_device_priv_data = sdev->hostdata;
3362 		if (!sas_device_priv_data)
3363 			continue;
3364 		if (!sas_device_priv_data->block)
3365 			continue;
3366 
3367 		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3368 			"device_running, handle(0x%04x)\n",
3369 		    sas_device_priv_data->sas_target->handle));
3370 		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3371 	}
3372 }
3373 
3374 
3375 /**
3376  * _scsih_ublock_io_device - prepare device to be deleted
3377  * @ioc: per adapter object
3378  * @sas_address: sas address
3379  *
3380  * unblock then put device in offline state
3381  */
3382 static void
3383 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
3384 {
3385 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3386 	struct scsi_device *sdev;
3387 
3388 	shost_for_each_device(sdev, ioc->shost) {
3389 		sas_device_priv_data = sdev->hostdata;
3390 		if (!sas_device_priv_data)
3391 			continue;
3392 		if (sas_device_priv_data->sas_target->sas_address
3393 		    != sas_address)
3394 			continue;
3395 		if (sas_device_priv_data->block)
3396 			_scsih_internal_device_unblock(sdev,
3397 				sas_device_priv_data);
3398 	}
3399 }
3400 
3401 /**
3402  * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3403  * @ioc: per adapter object
3404  *
3405  * During device pull we need to appropriately set the sdev state.
3406  */
3407 static void
3408 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3409 {
3410 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3411 	struct scsi_device *sdev;
3412 
3413 	shost_for_each_device(sdev, ioc->shost) {
3414 		sas_device_priv_data = sdev->hostdata;
3415 		if (!sas_device_priv_data)
3416 			continue;
3417 		if (sas_device_priv_data->block)
3418 			continue;
3419 		if (sas_device_priv_data->ignore_delay_remove) {
3420 			sdev_printk(KERN_INFO, sdev,
3421 			"%s skip device_block for SES handle(0x%04x)\n",
3422 			__func__, sas_device_priv_data->sas_target->handle);
3423 			continue;
3424 		}
3425 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3426 	}
3427 }
3428 
3429 /**
3430  * _scsih_block_io_device - set the device state to SDEV_BLOCK
3431  * @ioc: per adapter object
3432  * @handle: device handle
3433  *
3434  * During device pull we need to appropriately set the sdev state.
3435  */
3436 static void
3437 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3438 {
3439 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3440 	struct scsi_device *sdev;
3441 	struct _sas_device *sas_device;
3442 
3443 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3444 
3445 	shost_for_each_device(sdev, ioc->shost) {
3446 		sas_device_priv_data = sdev->hostdata;
3447 		if (!sas_device_priv_data)
3448 			continue;
3449 		if (sas_device_priv_data->sas_target->handle != handle)
3450 			continue;
3451 		if (sas_device_priv_data->block)
3452 			continue;
3453 		if (sas_device && sas_device->pend_sas_rphy_add)
3454 			continue;
3455 		if (sas_device_priv_data->ignore_delay_remove) {
3456 			sdev_printk(KERN_INFO, sdev,
3457 			"%s skip device_block for SES handle(0x%04x)\n",
3458 			__func__, sas_device_priv_data->sas_target->handle);
3459 			continue;
3460 		}
3461 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3462 	}
3463 
3464 	if (sas_device)
3465 		sas_device_put(sas_device);
3466 }
3467 
3468 /**
3469  * _scsih_block_io_to_children_attached_to_ex
3470  * @ioc: per adapter object
3471  * @sas_expander: the sas_device object
3472  *
3473  * This routine set sdev state to SDEV_BLOCK for all devices
3474  * attached to this expander. This function called when expander is
3475  * pulled.
3476  */
3477 static void
3478 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3479 	struct _sas_node *sas_expander)
3480 {
3481 	struct _sas_port *mpt3sas_port;
3482 	struct _sas_device *sas_device;
3483 	struct _sas_node *expander_sibling;
3484 	unsigned long flags;
3485 
3486 	if (!sas_expander)
3487 		return;
3488 
3489 	list_for_each_entry(mpt3sas_port,
3490 	   &sas_expander->sas_port_list, port_list) {
3491 		if (mpt3sas_port->remote_identify.device_type ==
3492 		    SAS_END_DEVICE) {
3493 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
3494 			sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3495 			    mpt3sas_port->remote_identify.sas_address);
3496 			if (sas_device) {
3497 				set_bit(sas_device->handle,
3498 						ioc->blocking_handles);
3499 				sas_device_put(sas_device);
3500 			}
3501 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3502 		}
3503 	}
3504 
3505 	list_for_each_entry(mpt3sas_port,
3506 	   &sas_expander->sas_port_list, port_list) {
3507 
3508 		if (mpt3sas_port->remote_identify.device_type ==
3509 		    SAS_EDGE_EXPANDER_DEVICE ||
3510 		    mpt3sas_port->remote_identify.device_type ==
3511 		    SAS_FANOUT_EXPANDER_DEVICE) {
3512 			expander_sibling =
3513 			    mpt3sas_scsih_expander_find_by_sas_address(
3514 			    ioc, mpt3sas_port->remote_identify.sas_address);
3515 			_scsih_block_io_to_children_attached_to_ex(ioc,
3516 			    expander_sibling);
3517 		}
3518 	}
3519 }
3520 
3521 /**
3522  * _scsih_block_io_to_children_attached_directly
3523  * @ioc: per adapter object
3524  * @event_data: topology change event data
3525  *
3526  * This routine set sdev state to SDEV_BLOCK for all devices
3527  * direct attached during device pull.
3528  */
3529 static void
3530 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3531 	Mpi2EventDataSasTopologyChangeList_t *event_data)
3532 {
3533 	int i;
3534 	u16 handle;
3535 	u16 reason_code;
3536 
3537 	for (i = 0; i < event_data->NumEntries; i++) {
3538 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3539 		if (!handle)
3540 			continue;
3541 		reason_code = event_data->PHY[i].PhyStatus &
3542 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
3543 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3544 			_scsih_block_io_device(ioc, handle);
3545 	}
3546 }
3547 
3548 /**
3549  * _scsih_block_io_to_pcie_children_attached_directly
3550  * @ioc: per adapter object
3551  * @event_data: topology change event data
3552  *
3553  * This routine set sdev state to SDEV_BLOCK for all devices
3554  * direct attached during device pull/reconnect.
3555  */
3556 static void
3557 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3558 		Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3559 {
3560 	int i;
3561 	u16 handle;
3562 	u16 reason_code;
3563 
3564 	for (i = 0; i < event_data->NumEntries; i++) {
3565 		handle =
3566 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
3567 		if (!handle)
3568 			continue;
3569 		reason_code = event_data->PortEntry[i].PortStatus;
3570 		if (reason_code ==
3571 				MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
3572 			_scsih_block_io_device(ioc, handle);
3573 	}
3574 }
3575 /**
3576  * _scsih_tm_tr_send - send task management request
3577  * @ioc: per adapter object
3578  * @handle: device handle
3579  * Context: interrupt time.
3580  *
3581  * This code is to initiate the device removal handshake protocol
3582  * with controller firmware.  This function will issue target reset
3583  * using high priority request queue.  It will send a sas iounit
3584  * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
3585  *
3586  * This is designed to send muliple task management request at the same
3587  * time to the fifo. If the fifo is full, we will append the request,
3588  * and process it in a future completion.
3589  */
3590 static void
3591 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3592 {
3593 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3594 	u16 smid;
3595 	struct _sas_device *sas_device = NULL;
3596 	struct _pcie_device *pcie_device = NULL;
3597 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
3598 	u64 sas_address = 0;
3599 	unsigned long flags;
3600 	struct _tr_list *delayed_tr;
3601 	u32 ioc_state;
3602 	u8 tr_method = 0;
3603 
3604 	if (ioc->pci_error_recovery) {
3605 		dewtprintk(ioc,
3606 			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
3607 				    __func__, handle));
3608 		return;
3609 	}
3610 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3611 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3612 		dewtprintk(ioc,
3613 			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
3614 				    __func__, handle));
3615 		return;
3616 	}
3617 
3618 	/* if PD, then return */
3619 	if (test_bit(handle, ioc->pd_handles))
3620 		return;
3621 
3622 	clear_bit(handle, ioc->pend_os_device_add);
3623 
3624 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
3625 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
3626 	if (sas_device && sas_device->starget &&
3627 	    sas_device->starget->hostdata) {
3628 		sas_target_priv_data = sas_device->starget->hostdata;
3629 		sas_target_priv_data->deleted = 1;
3630 		sas_address = sas_device->sas_address;
3631 	}
3632 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3633 	if (!sas_device) {
3634 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3635 		pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
3636 		if (pcie_device && pcie_device->starget &&
3637 			pcie_device->starget->hostdata) {
3638 			sas_target_priv_data = pcie_device->starget->hostdata;
3639 			sas_target_priv_data->deleted = 1;
3640 			sas_address = pcie_device->wwid;
3641 		}
3642 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3643 		if (pcie_device && (!ioc->tm_custom_handling) &&
3644 		    (!(mpt3sas_scsih_is_pcie_scsi_device(
3645 		    pcie_device->device_info))))
3646 			tr_method =
3647 			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3648 		else
3649 			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3650 	}
3651 	if (sas_target_priv_data) {
3652 		dewtprintk(ioc,
3653 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3654 				    handle, (u64)sas_address));
3655 		if (sas_device) {
3656 			if (sas_device->enclosure_handle != 0)
3657 				dewtprintk(ioc,
3658 					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
3659 						    (u64)sas_device->enclosure_logical_id,
3660 						    sas_device->slot));
3661 			if (sas_device->connector_name[0] != '\0')
3662 				dewtprintk(ioc,
3663 					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
3664 						    sas_device->enclosure_level,
3665 						    sas_device->connector_name));
3666 		} else if (pcie_device) {
3667 			if (pcie_device->enclosure_handle != 0)
3668 				dewtprintk(ioc,
3669 					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
3670 						    (u64)pcie_device->enclosure_logical_id,
3671 						    pcie_device->slot));
3672 			if (pcie_device->connector_name[0] != '\0')
3673 				dewtprintk(ioc,
3674 					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
3675 						    pcie_device->enclosure_level,
3676 						    pcie_device->connector_name));
3677 		}
3678 		_scsih_ublock_io_device(ioc, sas_address);
3679 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
3680 	}
3681 
3682 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
3683 	if (!smid) {
3684 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3685 		if (!delayed_tr)
3686 			goto out;
3687 		INIT_LIST_HEAD(&delayed_tr->list);
3688 		delayed_tr->handle = handle;
3689 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3690 		dewtprintk(ioc,
3691 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3692 				    handle));
3693 		goto out;
3694 	}
3695 
3696 	dewtprintk(ioc,
3697 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3698 			    handle, smid, ioc->tm_tr_cb_idx));
3699 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3700 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3701 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3702 	mpi_request->DevHandle = cpu_to_le16(handle);
3703 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3704 	mpi_request->MsgFlags = tr_method;
3705 	set_bit(handle, ioc->device_remove_in_progress);
3706 	ioc->put_smid_hi_priority(ioc, smid, 0);
3707 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
3708 
3709 out:
3710 	if (sas_device)
3711 		sas_device_put(sas_device);
3712 	if (pcie_device)
3713 		pcie_device_put(pcie_device);
3714 }
3715 
3716 /**
3717  * _scsih_tm_tr_complete -
3718  * @ioc: per adapter object
3719  * @smid: system request message index
3720  * @msix_index: MSIX table index supplied by the OS
3721  * @reply: reply message frame(lower 32bit addr)
3722  * Context: interrupt time.
3723  *
3724  * This is the target reset completion routine.
3725  * This code is part of the code to initiate the device removal
3726  * handshake protocol with controller firmware.
3727  * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
3728  *
3729  * Return: 1 meaning mf should be freed from _base_interrupt
3730  *         0 means the mf is freed from this function.
3731  */
3732 static u8
3733 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3734 	u32 reply)
3735 {
3736 	u16 handle;
3737 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3738 	Mpi2SCSITaskManagementReply_t *mpi_reply =
3739 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3740 	Mpi2SasIoUnitControlRequest_t *mpi_request;
3741 	u16 smid_sas_ctrl;
3742 	u32 ioc_state;
3743 	struct _sc_list *delayed_sc;
3744 
3745 	if (ioc->pci_error_recovery) {
3746 		dewtprintk(ioc,
3747 			   ioc_info(ioc, "%s: host in pci error recovery\n",
3748 				    __func__));
3749 		return 1;
3750 	}
3751 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3752 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3753 		dewtprintk(ioc,
3754 			   ioc_info(ioc, "%s: host is not operational\n",
3755 				    __func__));
3756 		return 1;
3757 	}
3758 	if (unlikely(!mpi_reply)) {
3759 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3760 			__FILE__, __LINE__, __func__);
3761 		return 1;
3762 	}
3763 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3764 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
3765 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3766 		dewtprintk(ioc,
3767 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3768 				   handle,
3769 				   le16_to_cpu(mpi_reply->DevHandle), smid));
3770 		return 0;
3771 	}
3772 
3773 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3774 	dewtprintk(ioc,
3775 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3776 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3777 			    le32_to_cpu(mpi_reply->IOCLogInfo),
3778 			    le32_to_cpu(mpi_reply->TerminationCount)));
3779 
3780 	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
3781 	if (!smid_sas_ctrl) {
3782 		delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
3783 		if (!delayed_sc)
3784 			return _scsih_check_for_pending_tm(ioc, smid);
3785 		INIT_LIST_HEAD(&delayed_sc->list);
3786 		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
3787 		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
3788 		dewtprintk(ioc,
3789 			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
3790 				    handle));
3791 		return _scsih_check_for_pending_tm(ioc, smid);
3792 	}
3793 
3794 	dewtprintk(ioc,
3795 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3796 			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
3797 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
3798 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
3799 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3800 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3801 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
3802 	ioc->put_smid_default(ioc, smid_sas_ctrl);
3803 
3804 	return _scsih_check_for_pending_tm(ioc, smid);
3805 }
3806 
3807 /** _scsih_allow_scmd_to_device - check whether scmd needs to
3808  *				 issue to IOC or not.
3809  * @ioc: per adapter object
3810  * @scmd: pointer to scsi command object
3811  *
3812  * Returns true if scmd can be issued to IOC otherwise returns false.
3813  */
3814 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
3815 	struct scsi_cmnd *scmd)
3816 {
3817 
3818 	if (ioc->pci_error_recovery)
3819 		return false;
3820 
3821 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
3822 		if (ioc->remove_host)
3823 			return false;
3824 
3825 		return true;
3826 	}
3827 
3828 	if (ioc->remove_host) {
3829 
3830 		switch (scmd->cmnd[0]) {
3831 		case SYNCHRONIZE_CACHE:
3832 		case START_STOP:
3833 			return true;
3834 		default:
3835 			return false;
3836 		}
3837 	}
3838 
3839 	return true;
3840 }
3841 
3842 /**
3843  * _scsih_sas_control_complete - completion routine
3844  * @ioc: per adapter object
3845  * @smid: system request message index
3846  * @msix_index: MSIX table index supplied by the OS
3847  * @reply: reply message frame(lower 32bit addr)
3848  * Context: interrupt time.
3849  *
3850  * This is the sas iounit control completion routine.
3851  * This code is part of the code to initiate the device removal
3852  * handshake protocol with controller firmware.
3853  *
3854  * Return: 1 meaning mf should be freed from _base_interrupt
3855  *         0 means the mf is freed from this function.
3856  */
3857 static u8
3858 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3859 	u8 msix_index, u32 reply)
3860 {
3861 	Mpi2SasIoUnitControlReply_t *mpi_reply =
3862 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3863 
3864 	if (likely(mpi_reply)) {
3865 		dewtprintk(ioc,
3866 			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
3867 				    le16_to_cpu(mpi_reply->DevHandle), smid,
3868 				    le16_to_cpu(mpi_reply->IOCStatus),
3869 				    le32_to_cpu(mpi_reply->IOCLogInfo)));
3870 		if (le16_to_cpu(mpi_reply->IOCStatus) ==
3871 		     MPI2_IOCSTATUS_SUCCESS) {
3872 			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
3873 			    ioc->device_remove_in_progress);
3874 		}
3875 	} else {
3876 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3877 			__FILE__, __LINE__, __func__);
3878 	}
3879 	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
3880 }
3881 
3882 /**
3883  * _scsih_tm_tr_volume_send - send target reset request for volumes
3884  * @ioc: per adapter object
3885  * @handle: device handle
3886  * Context: interrupt time.
3887  *
3888  * This is designed to send muliple task management request at the same
3889  * time to the fifo. If the fifo is full, we will append the request,
3890  * and process it in a future completion.
3891  */
3892 static void
3893 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3894 {
3895 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3896 	u16 smid;
3897 	struct _tr_list *delayed_tr;
3898 
3899 	if (ioc->pci_error_recovery) {
3900 		dewtprintk(ioc,
3901 			   ioc_info(ioc, "%s: host reset in progress!\n",
3902 				    __func__));
3903 		return;
3904 	}
3905 
3906 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
3907 	if (!smid) {
3908 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3909 		if (!delayed_tr)
3910 			return;
3911 		INIT_LIST_HEAD(&delayed_tr->list);
3912 		delayed_tr->handle = handle;
3913 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
3914 		dewtprintk(ioc,
3915 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3916 				    handle));
3917 		return;
3918 	}
3919 
3920 	dewtprintk(ioc,
3921 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3922 			    handle, smid, ioc->tm_tr_volume_cb_idx));
3923 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3924 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3925 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3926 	mpi_request->DevHandle = cpu_to_le16(handle);
3927 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3928 	ioc->put_smid_hi_priority(ioc, smid, 0);
3929 }
3930 
3931 /**
3932  * _scsih_tm_volume_tr_complete - target reset completion
3933  * @ioc: per adapter object
3934  * @smid: system request message index
3935  * @msix_index: MSIX table index supplied by the OS
3936  * @reply: reply message frame(lower 32bit addr)
3937  * Context: interrupt time.
3938  *
3939  * Return: 1 meaning mf should be freed from _base_interrupt
3940  *         0 means the mf is freed from this function.
3941  */
3942 static u8
3943 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3944 	u8 msix_index, u32 reply)
3945 {
3946 	u16 handle;
3947 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3948 	Mpi2SCSITaskManagementReply_t *mpi_reply =
3949 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3950 
3951 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
3952 		dewtprintk(ioc,
3953 			   ioc_info(ioc, "%s: host reset in progress!\n",
3954 				    __func__));
3955 		return 1;
3956 	}
3957 	if (unlikely(!mpi_reply)) {
3958 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3959 			__FILE__, __LINE__, __func__);
3960 		return 1;
3961 	}
3962 
3963 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3964 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
3965 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3966 		dewtprintk(ioc,
3967 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3968 				   handle, le16_to_cpu(mpi_reply->DevHandle),
3969 				   smid));
3970 		return 0;
3971 	}
3972 
3973 	dewtprintk(ioc,
3974 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3975 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3976 			    le32_to_cpu(mpi_reply->IOCLogInfo),
3977 			    le32_to_cpu(mpi_reply->TerminationCount)));
3978 
3979 	return _scsih_check_for_pending_tm(ioc, smid);
3980 }
3981 
3982 /**
3983  * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
3984  * @ioc: per adapter object
3985  * @smid: system request message index
3986  * @event: Event ID
3987  * @event_context: used to track events uniquely
3988  *
3989  * Context - processed in interrupt context.
3990  */
3991 static void
3992 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
3993 				U32 event_context)
3994 {
3995 	Mpi2EventAckRequest_t *ack_request;
3996 	int i = smid - ioc->internal_smid;
3997 	unsigned long flags;
3998 
3999 	/* Without releasing the smid just update the
4000 	 * call back index and reuse the same smid for
4001 	 * processing this delayed request
4002 	 */
4003 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4004 	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4005 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4006 
4007 	dewtprintk(ioc,
4008 		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4009 			    le16_to_cpu(event), smid, ioc->base_cb_idx));
4010 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4011 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4012 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4013 	ack_request->Event = event;
4014 	ack_request->EventContext = event_context;
4015 	ack_request->VF_ID = 0;  /* TODO */
4016 	ack_request->VP_ID = 0;
4017 	ioc->put_smid_default(ioc, smid);
4018 }
4019 
4020 /**
4021  * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4022  *				sas_io_unit_ctrl messages
4023  * @ioc: per adapter object
4024  * @smid: system request message index
4025  * @handle: device handle
4026  *
4027  * Context - processed in interrupt context.
4028  */
4029 static void
4030 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4031 					u16 smid, u16 handle)
4032 {
4033 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4034 	u32 ioc_state;
4035 	int i = smid - ioc->internal_smid;
4036 	unsigned long flags;
4037 
4038 	if (ioc->remove_host) {
4039 		dewtprintk(ioc,
4040 			   ioc_info(ioc, "%s: host has been removed\n",
4041 				    __func__));
4042 		return;
4043 	} else if (ioc->pci_error_recovery) {
4044 		dewtprintk(ioc,
4045 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4046 				    __func__));
4047 		return;
4048 	}
4049 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4050 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4051 		dewtprintk(ioc,
4052 			   ioc_info(ioc, "%s: host is not operational\n",
4053 				    __func__));
4054 		return;
4055 	}
4056 
4057 	/* Without releasing the smid just update the
4058 	 * call back index and reuse the same smid for
4059 	 * processing this delayed request
4060 	 */
4061 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4062 	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4063 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4064 
4065 	dewtprintk(ioc,
4066 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4067 			    handle, smid, ioc->tm_sas_control_cb_idx));
4068 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4069 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4070 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4071 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4072 	mpi_request->DevHandle = cpu_to_le16(handle);
4073 	ioc->put_smid_default(ioc, smid);
4074 }
4075 
4076 /**
4077  * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4078  * @ioc: per adapter object
4079  * @smid: system request message index
4080  *
4081  * Context: Executed in interrupt context
4082  *
4083  * This will check delayed internal messages list, and process the
4084  * next request.
4085  *
4086  * Return: 1 meaning mf should be freed from _base_interrupt
4087  *         0 means the mf is freed from this function.
4088  */
4089 u8
4090 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4091 {
4092 	struct _sc_list *delayed_sc;
4093 	struct _event_ack_list *delayed_event_ack;
4094 
4095 	if (!list_empty(&ioc->delayed_event_ack_list)) {
4096 		delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4097 						struct _event_ack_list, list);
4098 		_scsih_issue_delayed_event_ack(ioc, smid,
4099 		  delayed_event_ack->Event, delayed_event_ack->EventContext);
4100 		list_del(&delayed_event_ack->list);
4101 		kfree(delayed_event_ack);
4102 		return 0;
4103 	}
4104 
4105 	if (!list_empty(&ioc->delayed_sc_list)) {
4106 		delayed_sc = list_entry(ioc->delayed_sc_list.next,
4107 						struct _sc_list, list);
4108 		_scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4109 						 delayed_sc->handle);
4110 		list_del(&delayed_sc->list);
4111 		kfree(delayed_sc);
4112 		return 0;
4113 	}
4114 	return 1;
4115 }
4116 
4117 /**
4118  * _scsih_check_for_pending_tm - check for pending task management
4119  * @ioc: per adapter object
4120  * @smid: system request message index
4121  *
4122  * This will check delayed target reset list, and feed the
4123  * next reqeust.
4124  *
4125  * Return: 1 meaning mf should be freed from _base_interrupt
4126  *         0 means the mf is freed from this function.
4127  */
4128 static u8
4129 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4130 {
4131 	struct _tr_list *delayed_tr;
4132 
4133 	if (!list_empty(&ioc->delayed_tr_volume_list)) {
4134 		delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4135 		    struct _tr_list, list);
4136 		mpt3sas_base_free_smid(ioc, smid);
4137 		_scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4138 		list_del(&delayed_tr->list);
4139 		kfree(delayed_tr);
4140 		return 0;
4141 	}
4142 
4143 	if (!list_empty(&ioc->delayed_tr_list)) {
4144 		delayed_tr = list_entry(ioc->delayed_tr_list.next,
4145 		    struct _tr_list, list);
4146 		mpt3sas_base_free_smid(ioc, smid);
4147 		_scsih_tm_tr_send(ioc, delayed_tr->handle);
4148 		list_del(&delayed_tr->list);
4149 		kfree(delayed_tr);
4150 		return 0;
4151 	}
4152 
4153 	return 1;
4154 }
4155 
4156 /**
4157  * _scsih_check_topo_delete_events - sanity check on topo events
4158  * @ioc: per adapter object
4159  * @event_data: the event data payload
4160  *
4161  * This routine added to better handle cable breaker.
4162  *
4163  * This handles the case where driver receives multiple expander
4164  * add and delete events in a single shot.  When there is a delete event
4165  * the routine will void any pending add events waiting in the event queue.
4166  */
4167 static void
4168 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4169 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4170 {
4171 	struct fw_event_work *fw_event;
4172 	Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4173 	u16 expander_handle;
4174 	struct _sas_node *sas_expander;
4175 	unsigned long flags;
4176 	int i, reason_code;
4177 	u16 handle;
4178 
4179 	for (i = 0 ; i < event_data->NumEntries; i++) {
4180 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4181 		if (!handle)
4182 			continue;
4183 		reason_code = event_data->PHY[i].PhyStatus &
4184 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4185 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4186 			_scsih_tm_tr_send(ioc, handle);
4187 	}
4188 
4189 	expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4190 	if (expander_handle < ioc->sas_hba.num_phys) {
4191 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4192 		return;
4193 	}
4194 	if (event_data->ExpStatus ==
4195 	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4196 		/* put expander attached devices into blocking state */
4197 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
4198 		sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4199 		    expander_handle);
4200 		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4201 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4202 		do {
4203 			handle = find_first_bit(ioc->blocking_handles,
4204 			    ioc->facts.MaxDevHandle);
4205 			if (handle < ioc->facts.MaxDevHandle)
4206 				_scsih_block_io_device(ioc, handle);
4207 		} while (test_and_clear_bit(handle, ioc->blocking_handles));
4208 	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4209 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4210 
4211 	if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4212 		return;
4213 
4214 	/* mark ignore flag for pending events */
4215 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4216 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4217 		if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4218 		    fw_event->ignore)
4219 			continue;
4220 		local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4221 				   fw_event->event_data;
4222 		if (local_event_data->ExpStatus ==
4223 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4224 		    local_event_data->ExpStatus ==
4225 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4226 			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4227 			    expander_handle) {
4228 				dewtprintk(ioc,
4229 					   ioc_info(ioc, "setting ignoring flag\n"));
4230 				fw_event->ignore = 1;
4231 			}
4232 		}
4233 	}
4234 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4235 }
4236 
4237 /**
4238  * _scsih_check_pcie_topo_remove_events - sanity check on topo
4239  * events
4240  * @ioc: per adapter object
4241  * @event_data: the event data payload
4242  *
4243  * This handles the case where driver receives multiple switch
4244  * or device add and delete events in a single shot.  When there
4245  * is a delete event the routine will void any pending add
4246  * events waiting in the event queue.
4247  */
4248 static void
4249 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4250 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4251 {
4252 	struct fw_event_work *fw_event;
4253 	Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4254 	unsigned long flags;
4255 	int i, reason_code;
4256 	u16 handle, switch_handle;
4257 
4258 	for (i = 0; i < event_data->NumEntries; i++) {
4259 		handle =
4260 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4261 		if (!handle)
4262 			continue;
4263 		reason_code = event_data->PortEntry[i].PortStatus;
4264 		if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4265 			_scsih_tm_tr_send(ioc, handle);
4266 	}
4267 
4268 	switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4269 	if (!switch_handle) {
4270 		_scsih_block_io_to_pcie_children_attached_directly(
4271 							ioc, event_data);
4272 		return;
4273 	}
4274     /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4275 	if ((event_data->SwitchStatus
4276 		== MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4277 		(event_data->SwitchStatus ==
4278 					MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4279 		_scsih_block_io_to_pcie_children_attached_directly(
4280 							ioc, event_data);
4281 
4282 	if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4283 		return;
4284 
4285 	/* mark ignore flag for pending events */
4286 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4287 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4288 		if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4289 			fw_event->ignore)
4290 			continue;
4291 		local_event_data =
4292 			(Mpi26EventDataPCIeTopologyChangeList_t *)
4293 			fw_event->event_data;
4294 		if (local_event_data->SwitchStatus ==
4295 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4296 		    local_event_data->SwitchStatus ==
4297 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4298 			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4299 				switch_handle) {
4300 				dewtprintk(ioc,
4301 					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
4302 				fw_event->ignore = 1;
4303 			}
4304 		}
4305 	}
4306 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4307 }
4308 
4309 /**
4310  * _scsih_set_volume_delete_flag - setting volume delete flag
4311  * @ioc: per adapter object
4312  * @handle: device handle
4313  *
4314  * This returns nothing.
4315  */
4316 static void
4317 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4318 {
4319 	struct _raid_device *raid_device;
4320 	struct MPT3SAS_TARGET *sas_target_priv_data;
4321 	unsigned long flags;
4322 
4323 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
4324 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4325 	if (raid_device && raid_device->starget &&
4326 	    raid_device->starget->hostdata) {
4327 		sas_target_priv_data =
4328 		    raid_device->starget->hostdata;
4329 		sas_target_priv_data->deleted = 1;
4330 		dewtprintk(ioc,
4331 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4332 				    handle, (u64)raid_device->wwid));
4333 	}
4334 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4335 }
4336 
4337 /**
4338  * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4339  * @handle: input handle
4340  * @a: handle for volume a
4341  * @b: handle for volume b
4342  *
4343  * IR firmware only supports two raid volumes.  The purpose of this
4344  * routine is to set the volume handle in either a or b. When the given
4345  * input handle is non-zero, or when a and b have not been set before.
4346  */
4347 static void
4348 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4349 {
4350 	if (!handle || handle == *a || handle == *b)
4351 		return;
4352 	if (!*a)
4353 		*a = handle;
4354 	else if (!*b)
4355 		*b = handle;
4356 }
4357 
4358 /**
4359  * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4360  * @ioc: per adapter object
4361  * @event_data: the event data payload
4362  * Context: interrupt time.
4363  *
4364  * This routine will send target reset to volume, followed by target
4365  * resets to the PDs. This is called when a PD has been removed, or
4366  * volume has been deleted or removed. When the target reset is sent
4367  * to volume, the PD target resets need to be queued to start upon
4368  * completion of the volume target reset.
4369  */
4370 static void
4371 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4372 	Mpi2EventDataIrConfigChangeList_t *event_data)
4373 {
4374 	Mpi2EventIrConfigElement_t *element;
4375 	int i;
4376 	u16 handle, volume_handle, a, b;
4377 	struct _tr_list *delayed_tr;
4378 
4379 	a = 0;
4380 	b = 0;
4381 
4382 	if (ioc->is_warpdrive)
4383 		return;
4384 
4385 	/* Volume Resets for Deleted or Removed */
4386 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4387 	for (i = 0; i < event_data->NumElements; i++, element++) {
4388 		if (le32_to_cpu(event_data->Flags) &
4389 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4390 			continue;
4391 		if (element->ReasonCode ==
4392 		    MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4393 		    element->ReasonCode ==
4394 		    MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4395 			volume_handle = le16_to_cpu(element->VolDevHandle);
4396 			_scsih_set_volume_delete_flag(ioc, volume_handle);
4397 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4398 		}
4399 	}
4400 
4401 	/* Volume Resets for UNHIDE events */
4402 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4403 	for (i = 0; i < event_data->NumElements; i++, element++) {
4404 		if (le32_to_cpu(event_data->Flags) &
4405 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4406 			continue;
4407 		if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4408 			volume_handle = le16_to_cpu(element->VolDevHandle);
4409 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4410 		}
4411 	}
4412 
4413 	if (a)
4414 		_scsih_tm_tr_volume_send(ioc, a);
4415 	if (b)
4416 		_scsih_tm_tr_volume_send(ioc, b);
4417 
4418 	/* PD target resets */
4419 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4420 	for (i = 0; i < event_data->NumElements; i++, element++) {
4421 		if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4422 			continue;
4423 		handle = le16_to_cpu(element->PhysDiskDevHandle);
4424 		volume_handle = le16_to_cpu(element->VolDevHandle);
4425 		clear_bit(handle, ioc->pd_handles);
4426 		if (!volume_handle)
4427 			_scsih_tm_tr_send(ioc, handle);
4428 		else if (volume_handle == a || volume_handle == b) {
4429 			delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4430 			BUG_ON(!delayed_tr);
4431 			INIT_LIST_HEAD(&delayed_tr->list);
4432 			delayed_tr->handle = handle;
4433 			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4434 			dewtprintk(ioc,
4435 				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4436 					    handle));
4437 		} else
4438 			_scsih_tm_tr_send(ioc, handle);
4439 	}
4440 }
4441 
4442 
4443 /**
4444  * _scsih_check_volume_delete_events - set delete flag for volumes
4445  * @ioc: per adapter object
4446  * @event_data: the event data payload
4447  * Context: interrupt time.
4448  *
4449  * This will handle the case when the cable connected to entire volume is
4450  * pulled. We will take care of setting the deleted flag so normal IO will
4451  * not be sent.
4452  */
4453 static void
4454 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4455 	Mpi2EventDataIrVolume_t *event_data)
4456 {
4457 	u32 state;
4458 
4459 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4460 		return;
4461 	state = le32_to_cpu(event_data->NewValue);
4462 	if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4463 	    MPI2_RAID_VOL_STATE_FAILED)
4464 		_scsih_set_volume_delete_flag(ioc,
4465 		    le16_to_cpu(event_data->VolDevHandle));
4466 }
4467 
4468 /**
4469  * _scsih_temp_threshold_events - display temperature threshold exceeded events
4470  * @ioc: per adapter object
4471  * @event_data: the temp threshold event data
4472  * Context: interrupt time.
4473  */
4474 static void
4475 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4476 	Mpi2EventDataTemperature_t *event_data)
4477 {
4478 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
4479 		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4480 			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4481 			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4482 			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4483 			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4484 			event_data->SensorNum);
4485 		ioc_err(ioc, "Current Temp In Celsius: %d\n",
4486 			event_data->CurrentTemperature);
4487 	}
4488 }
4489 
4490 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4491 {
4492 	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4493 
4494 	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4495 		return 0;
4496 
4497 	if (pending)
4498 		return test_and_set_bit(0, &priv->ata_command_pending);
4499 
4500 	clear_bit(0, &priv->ata_command_pending);
4501 	return 0;
4502 }
4503 
4504 /**
4505  * _scsih_flush_running_cmds - completing outstanding commands.
4506  * @ioc: per adapter object
4507  *
4508  * The flushing out of all pending scmd commands following host reset,
4509  * where all IO is dropped to the floor.
4510  */
4511 static void
4512 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4513 {
4514 	struct scsi_cmnd *scmd;
4515 	struct scsiio_tracker *st;
4516 	u16 smid;
4517 	int count = 0;
4518 
4519 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4520 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4521 		if (!scmd)
4522 			continue;
4523 		count++;
4524 		_scsih_set_satl_pending(scmd, false);
4525 		st = scsi_cmd_priv(scmd);
4526 		mpt3sas_base_clear_st(ioc, st);
4527 		scsi_dma_unmap(scmd);
4528 		if (ioc->pci_error_recovery || ioc->remove_host)
4529 			scmd->result = DID_NO_CONNECT << 16;
4530 		else
4531 			scmd->result = DID_RESET << 16;
4532 		scmd->scsi_done(scmd);
4533 	}
4534 	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4535 }
4536 
4537 /**
4538  * _scsih_setup_eedp - setup MPI request for EEDP transfer
4539  * @ioc: per adapter object
4540  * @scmd: pointer to scsi command object
4541  * @mpi_request: pointer to the SCSI_IO request message frame
4542  *
4543  * Supporting protection 1 and 3.
4544  */
4545 static void
4546 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4547 	Mpi25SCSIIORequest_t *mpi_request)
4548 {
4549 	u16 eedp_flags;
4550 	unsigned char prot_op = scsi_get_prot_op(scmd);
4551 	unsigned char prot_type = scsi_get_prot_type(scmd);
4552 	Mpi25SCSIIORequest_t *mpi_request_3v =
4553 	   (Mpi25SCSIIORequest_t *)mpi_request;
4554 
4555 	if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
4556 		return;
4557 
4558 	if (prot_op ==  SCSI_PROT_READ_STRIP)
4559 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
4560 	else if (prot_op ==  SCSI_PROT_WRITE_INSERT)
4561 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
4562 	else
4563 		return;
4564 
4565 	switch (prot_type) {
4566 	case SCSI_PROT_DIF_TYPE1:
4567 	case SCSI_PROT_DIF_TYPE2:
4568 
4569 		/*
4570 		* enable ref/guard checking
4571 		* auto increment ref tag
4572 		*/
4573 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
4574 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
4575 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4576 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
4577 		    cpu_to_be32(t10_pi_ref_tag(scmd->request));
4578 		break;
4579 
4580 	case SCSI_PROT_DIF_TYPE3:
4581 
4582 		/*
4583 		* enable guard checking
4584 		*/
4585 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4586 
4587 		break;
4588 	}
4589 
4590 	mpi_request_3v->EEDPBlockSize =
4591 	    cpu_to_le16(scmd->device->sector_size);
4592 
4593 	if (ioc->is_gen35_ioc)
4594 		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
4595 	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
4596 }
4597 
4598 /**
4599  * _scsih_eedp_error_handling - return sense code for EEDP errors
4600  * @scmd: pointer to scsi command object
4601  * @ioc_status: ioc status
4602  */
4603 static void
4604 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4605 {
4606 	u8 ascq;
4607 
4608 	switch (ioc_status) {
4609 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4610 		ascq = 0x01;
4611 		break;
4612 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4613 		ascq = 0x02;
4614 		break;
4615 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4616 		ascq = 0x03;
4617 		break;
4618 	default:
4619 		ascq = 0x00;
4620 		break;
4621 	}
4622 	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
4623 	    ascq);
4624 	scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
4625 	    SAM_STAT_CHECK_CONDITION;
4626 }
4627 
4628 /**
4629  * scsih_qcmd - main scsi request entry point
4630  * @shost: SCSI host pointer
4631  * @scmd: pointer to scsi command object
4632  *
4633  * The callback index is set inside `ioc->scsi_io_cb_idx`.
4634  *
4635  * Return: 0 on success.  If there's a failure, return either:
4636  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
4637  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
4638  */
4639 static int
4640 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4641 {
4642 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4643 	struct MPT3SAS_DEVICE *sas_device_priv_data;
4644 	struct MPT3SAS_TARGET *sas_target_priv_data;
4645 	struct _raid_device *raid_device;
4646 	struct request *rq = scmd->request;
4647 	int class;
4648 	Mpi25SCSIIORequest_t *mpi_request;
4649 	struct _pcie_device *pcie_device = NULL;
4650 	u32 mpi_control;
4651 	u16 smid;
4652 	u16 handle;
4653 
4654 	if (ioc->logging_level & MPT_DEBUG_SCSI)
4655 		scsi_print_command(scmd);
4656 
4657 	sas_device_priv_data = scmd->device->hostdata;
4658 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4659 		scmd->result = DID_NO_CONNECT << 16;
4660 		scmd->scsi_done(scmd);
4661 		return 0;
4662 	}
4663 
4664 	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
4665 		scmd->result = DID_NO_CONNECT << 16;
4666 		scmd->scsi_done(scmd);
4667 		return 0;
4668 	}
4669 
4670 	sas_target_priv_data = sas_device_priv_data->sas_target;
4671 
4672 	/* invalid device handle */
4673 	handle = sas_target_priv_data->handle;
4674 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
4675 		scmd->result = DID_NO_CONNECT << 16;
4676 		scmd->scsi_done(scmd);
4677 		return 0;
4678 	}
4679 
4680 
4681 	if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
4682 		/* host recovery or link resets sent via IOCTLs */
4683 		return SCSI_MLQUEUE_HOST_BUSY;
4684 	} else if (sas_target_priv_data->deleted) {
4685 		/* device has been deleted */
4686 		scmd->result = DID_NO_CONNECT << 16;
4687 		scmd->scsi_done(scmd);
4688 		return 0;
4689 	} else if (sas_target_priv_data->tm_busy ||
4690 		   sas_device_priv_data->block) {
4691 		/* device busy with task management */
4692 		return SCSI_MLQUEUE_DEVICE_BUSY;
4693 	}
4694 
4695 	/*
4696 	 * Bug work around for firmware SATL handling.  The loop
4697 	 * is based on atomic operations and ensures consistency
4698 	 * since we're lockless at this point
4699 	 */
4700 	do {
4701 		if (test_bit(0, &sas_device_priv_data->ata_command_pending))
4702 			return SCSI_MLQUEUE_DEVICE_BUSY;
4703 	} while (_scsih_set_satl_pending(scmd, true));
4704 
4705 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4706 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
4707 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4708 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
4709 	else
4710 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
4711 
4712 	/* set tags */
4713 	mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
4714 	/* NCQ Prio supported, make sure control indicated high priority */
4715 	if (sas_device_priv_data->ncq_prio_enable) {
4716 		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4717 		if (class == IOPRIO_CLASS_RT)
4718 			mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
4719 	}
4720 	/* Make sure Device is not raid volume.
4721 	 * We do not expose raid functionality to upper layer for warpdrive.
4722 	 */
4723 	if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
4724 		&& !scsih_is_nvme(&scmd->device->sdev_gendev))
4725 		&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
4726 		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
4727 
4728 	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
4729 	if (!smid) {
4730 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4731 		_scsih_set_satl_pending(scmd, false);
4732 		goto out;
4733 	}
4734 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4735 	memset(mpi_request, 0, ioc->request_sz);
4736 	_scsih_setup_eedp(ioc, scmd, mpi_request);
4737 
4738 	if (scmd->cmd_len == 32)
4739 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
4740 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4741 	if (sas_device_priv_data->sas_target->flags &
4742 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
4743 		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
4744 	else
4745 		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4746 	mpi_request->DevHandle = cpu_to_le16(handle);
4747 	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
4748 	mpi_request->Control = cpu_to_le32(mpi_control);
4749 	mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
4750 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
4751 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
4752 	mpi_request->SenseBufferLowAddress =
4753 	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
4754 	mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
4755 	int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
4756 	    mpi_request->LUN);
4757 	memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4758 
4759 	if (mpi_request->DataLength) {
4760 		pcie_device = sas_target_priv_data->pcie_dev;
4761 		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
4762 			mpt3sas_base_free_smid(ioc, smid);
4763 			_scsih_set_satl_pending(scmd, false);
4764 			goto out;
4765 		}
4766 	} else
4767 		ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
4768 
4769 	raid_device = sas_target_priv_data->raid_device;
4770 	if (raid_device && raid_device->direct_io_enabled)
4771 		mpt3sas_setup_direct_io(ioc, scmd,
4772 			raid_device, mpi_request);
4773 
4774 	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
4775 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
4776 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
4777 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
4778 			ioc->put_smid_fast_path(ioc, smid, handle);
4779 		} else
4780 			ioc->put_smid_scsi_io(ioc, smid,
4781 			    le16_to_cpu(mpi_request->DevHandle));
4782 	} else
4783 		ioc->put_smid_default(ioc, smid);
4784 	return 0;
4785 
4786  out:
4787 	return SCSI_MLQUEUE_HOST_BUSY;
4788 }
4789 
4790 /**
4791  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
4792  * @sense_buffer: sense data returned by target
4793  * @data: normalized skey/asc/ascq
4794  */
4795 static void
4796 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
4797 {
4798 	if ((sense_buffer[0] & 0x7F) >= 0x72) {
4799 		/* descriptor format */
4800 		data->skey = sense_buffer[1] & 0x0F;
4801 		data->asc = sense_buffer[2];
4802 		data->ascq = sense_buffer[3];
4803 	} else {
4804 		/* fixed format */
4805 		data->skey = sense_buffer[2] & 0x0F;
4806 		data->asc = sense_buffer[12];
4807 		data->ascq = sense_buffer[13];
4808 	}
4809 }
4810 
4811 /**
4812  * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
4813  * @ioc: per adapter object
4814  * @scmd: pointer to scsi command object
4815  * @mpi_reply: reply mf payload returned from firmware
4816  * @smid: ?
4817  *
4818  * scsi_status - SCSI Status code returned from target device
4819  * scsi_state - state info associated with SCSI_IO determined by ioc
4820  * ioc_status - ioc supplied status info
4821  */
4822 static void
4823 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4824 	Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
4825 {
4826 	u32 response_info;
4827 	u8 *response_bytes;
4828 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
4829 	    MPI2_IOCSTATUS_MASK;
4830 	u8 scsi_state = mpi_reply->SCSIState;
4831 	u8 scsi_status = mpi_reply->SCSIStatus;
4832 	char *desc_ioc_state = NULL;
4833 	char *desc_scsi_status = NULL;
4834 	char *desc_scsi_state = ioc->tmp_string;
4835 	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
4836 	struct _sas_device *sas_device = NULL;
4837 	struct _pcie_device *pcie_device = NULL;
4838 	struct scsi_target *starget = scmd->device->sdev_target;
4839 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
4840 	char *device_str = NULL;
4841 
4842 	if (!priv_target)
4843 		return;
4844 	if (ioc->hide_ir_msg)
4845 		device_str = "WarpDrive";
4846 	else
4847 		device_str = "volume";
4848 
4849 	if (log_info == 0x31170000)
4850 		return;
4851 
4852 	switch (ioc_status) {
4853 	case MPI2_IOCSTATUS_SUCCESS:
4854 		desc_ioc_state = "success";
4855 		break;
4856 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
4857 		desc_ioc_state = "invalid function";
4858 		break;
4859 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
4860 		desc_ioc_state = "scsi recovered error";
4861 		break;
4862 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
4863 		desc_ioc_state = "scsi invalid dev handle";
4864 		break;
4865 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4866 		desc_ioc_state = "scsi device not there";
4867 		break;
4868 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
4869 		desc_ioc_state = "scsi data overrun";
4870 		break;
4871 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
4872 		desc_ioc_state = "scsi data underrun";
4873 		break;
4874 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
4875 		desc_ioc_state = "scsi io data error";
4876 		break;
4877 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4878 		desc_ioc_state = "scsi protocol error";
4879 		break;
4880 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
4881 		desc_ioc_state = "scsi task terminated";
4882 		break;
4883 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4884 		desc_ioc_state = "scsi residual mismatch";
4885 		break;
4886 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4887 		desc_ioc_state = "scsi task mgmt failed";
4888 		break;
4889 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
4890 		desc_ioc_state = "scsi ioc terminated";
4891 		break;
4892 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
4893 		desc_ioc_state = "scsi ext terminated";
4894 		break;
4895 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4896 		desc_ioc_state = "eedp guard error";
4897 		break;
4898 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4899 		desc_ioc_state = "eedp ref tag error";
4900 		break;
4901 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4902 		desc_ioc_state = "eedp app tag error";
4903 		break;
4904 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
4905 		desc_ioc_state = "insufficient power";
4906 		break;
4907 	default:
4908 		desc_ioc_state = "unknown";
4909 		break;
4910 	}
4911 
4912 	switch (scsi_status) {
4913 	case MPI2_SCSI_STATUS_GOOD:
4914 		desc_scsi_status = "good";
4915 		break;
4916 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
4917 		desc_scsi_status = "check condition";
4918 		break;
4919 	case MPI2_SCSI_STATUS_CONDITION_MET:
4920 		desc_scsi_status = "condition met";
4921 		break;
4922 	case MPI2_SCSI_STATUS_BUSY:
4923 		desc_scsi_status = "busy";
4924 		break;
4925 	case MPI2_SCSI_STATUS_INTERMEDIATE:
4926 		desc_scsi_status = "intermediate";
4927 		break;
4928 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
4929 		desc_scsi_status = "intermediate condmet";
4930 		break;
4931 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
4932 		desc_scsi_status = "reservation conflict";
4933 		break;
4934 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
4935 		desc_scsi_status = "command terminated";
4936 		break;
4937 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
4938 		desc_scsi_status = "task set full";
4939 		break;
4940 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
4941 		desc_scsi_status = "aca active";
4942 		break;
4943 	case MPI2_SCSI_STATUS_TASK_ABORTED:
4944 		desc_scsi_status = "task aborted";
4945 		break;
4946 	default:
4947 		desc_scsi_status = "unknown";
4948 		break;
4949 	}
4950 
4951 	desc_scsi_state[0] = '\0';
4952 	if (!scsi_state)
4953 		desc_scsi_state = " ";
4954 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
4955 		strcat(desc_scsi_state, "response info ");
4956 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
4957 		strcat(desc_scsi_state, "state terminated ");
4958 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
4959 		strcat(desc_scsi_state, "no status ");
4960 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
4961 		strcat(desc_scsi_state, "autosense failed ");
4962 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
4963 		strcat(desc_scsi_state, "autosense valid ");
4964 
4965 	scsi_print_command(scmd);
4966 
4967 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
4968 		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
4969 			 device_str, (u64)priv_target->sas_address);
4970 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
4971 		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
4972 		if (pcie_device) {
4973 			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
4974 				 (u64)pcie_device->wwid, pcie_device->port_num);
4975 			if (pcie_device->enclosure_handle != 0)
4976 				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
4977 					 (u64)pcie_device->enclosure_logical_id,
4978 					 pcie_device->slot);
4979 			if (pcie_device->connector_name[0])
4980 				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
4981 					 pcie_device->enclosure_level,
4982 					 pcie_device->connector_name);
4983 			pcie_device_put(pcie_device);
4984 		}
4985 	} else {
4986 		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
4987 		if (sas_device) {
4988 			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
4989 				 (u64)sas_device->sas_address, sas_device->phy);
4990 
4991 			_scsih_display_enclosure_chassis_info(ioc, sas_device,
4992 			    NULL, NULL);
4993 
4994 			sas_device_put(sas_device);
4995 		}
4996 	}
4997 
4998 	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
4999 		 le16_to_cpu(mpi_reply->DevHandle),
5000 		 desc_ioc_state, ioc_status, smid);
5001 	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5002 		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5003 	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5004 		 le16_to_cpu(mpi_reply->TaskTag),
5005 		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5006 	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5007 		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5008 
5009 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5010 		struct sense_info data;
5011 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5012 		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5013 			 data.skey, data.asc, data.ascq,
5014 			 le32_to_cpu(mpi_reply->SenseCount));
5015 	}
5016 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5017 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5018 		response_bytes = (u8 *)&response_info;
5019 		_scsih_response_code(ioc, response_bytes[0]);
5020 	}
5021 }
5022 
5023 /**
5024  * _scsih_turn_on_pfa_led - illuminate PFA LED
5025  * @ioc: per adapter object
5026  * @handle: device handle
5027  * Context: process
5028  */
5029 static void
5030 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5031 {
5032 	Mpi2SepReply_t mpi_reply;
5033 	Mpi2SepRequest_t mpi_request;
5034 	struct _sas_device *sas_device;
5035 
5036 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5037 	if (!sas_device)
5038 		return;
5039 
5040 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5041 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5042 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5043 	mpi_request.SlotStatus =
5044 	    cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5045 	mpi_request.DevHandle = cpu_to_le16(handle);
5046 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5047 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5048 	    &mpi_request)) != 0) {
5049 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5050 			__FILE__, __LINE__, __func__);
5051 		goto out;
5052 	}
5053 	sas_device->pfa_led_on = 1;
5054 
5055 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5056 		dewtprintk(ioc,
5057 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5058 				    le16_to_cpu(mpi_reply.IOCStatus),
5059 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5060 		goto out;
5061 	}
5062 out:
5063 	sas_device_put(sas_device);
5064 }
5065 
5066 /**
5067  * _scsih_turn_off_pfa_led - turn off Fault LED
5068  * @ioc: per adapter object
5069  * @sas_device: sas device whose PFA LED has to turned off
5070  * Context: process
5071  */
5072 static void
5073 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5074 	struct _sas_device *sas_device)
5075 {
5076 	Mpi2SepReply_t mpi_reply;
5077 	Mpi2SepRequest_t mpi_request;
5078 
5079 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5080 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5081 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5082 	mpi_request.SlotStatus = 0;
5083 	mpi_request.Slot = cpu_to_le16(sas_device->slot);
5084 	mpi_request.DevHandle = 0;
5085 	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5086 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5087 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5088 		&mpi_request)) != 0) {
5089 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5090 			__FILE__, __LINE__, __func__);
5091 		return;
5092 	}
5093 
5094 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5095 		dewtprintk(ioc,
5096 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5097 				    le16_to_cpu(mpi_reply.IOCStatus),
5098 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5099 		return;
5100 	}
5101 }
5102 
5103 /**
5104  * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5105  * @ioc: per adapter object
5106  * @handle: device handle
5107  * Context: interrupt.
5108  */
5109 static void
5110 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5111 {
5112 	struct fw_event_work *fw_event;
5113 
5114 	fw_event = alloc_fw_event_work(0);
5115 	if (!fw_event)
5116 		return;
5117 	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5118 	fw_event->device_handle = handle;
5119 	fw_event->ioc = ioc;
5120 	_scsih_fw_event_add(ioc, fw_event);
5121 	fw_event_work_put(fw_event);
5122 }
5123 
5124 /**
5125  * _scsih_smart_predicted_fault - process smart errors
5126  * @ioc: per adapter object
5127  * @handle: device handle
5128  * Context: interrupt.
5129  */
5130 static void
5131 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5132 {
5133 	struct scsi_target *starget;
5134 	struct MPT3SAS_TARGET *sas_target_priv_data;
5135 	Mpi2EventNotificationReply_t *event_reply;
5136 	Mpi2EventDataSasDeviceStatusChange_t *event_data;
5137 	struct _sas_device *sas_device;
5138 	ssize_t sz;
5139 	unsigned long flags;
5140 
5141 	/* only handle non-raid devices */
5142 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5143 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5144 	if (!sas_device)
5145 		goto out_unlock;
5146 
5147 	starget = sas_device->starget;
5148 	sas_target_priv_data = starget->hostdata;
5149 
5150 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5151 	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5152 		goto out_unlock;
5153 
5154 	_scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5155 
5156 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5157 
5158 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5159 		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5160 
5161 	/* insert into event log */
5162 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5163 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5164 	event_reply = kzalloc(sz, GFP_ATOMIC);
5165 	if (!event_reply) {
5166 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5167 			__FILE__, __LINE__, __func__);
5168 		goto out;
5169 	}
5170 
5171 	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5172 	event_reply->Event =
5173 	    cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5174 	event_reply->MsgLength = sz/4;
5175 	event_reply->EventDataLength =
5176 	    cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5177 	event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5178 	    event_reply->EventData;
5179 	event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5180 	event_data->ASC = 0x5D;
5181 	event_data->DevHandle = cpu_to_le16(handle);
5182 	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5183 	mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5184 	kfree(event_reply);
5185 out:
5186 	if (sas_device)
5187 		sas_device_put(sas_device);
5188 	return;
5189 
5190 out_unlock:
5191 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5192 	goto out;
5193 }
5194 
5195 /**
5196  * _scsih_io_done - scsi request callback
5197  * @ioc: per adapter object
5198  * @smid: system request message index
5199  * @msix_index: MSIX table index supplied by the OS
5200  * @reply: reply message frame(lower 32bit addr)
5201  *
5202  * Callback handler when using _scsih_qcmd.
5203  *
5204  * Return: 1 meaning mf should be freed from _base_interrupt
5205  *         0 means the mf is freed from this function.
5206  */
5207 static u8
5208 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5209 {
5210 	Mpi25SCSIIORequest_t *mpi_request;
5211 	Mpi2SCSIIOReply_t *mpi_reply;
5212 	struct scsi_cmnd *scmd;
5213 	struct scsiio_tracker *st;
5214 	u16 ioc_status;
5215 	u32 xfer_cnt;
5216 	u8 scsi_state;
5217 	u8 scsi_status;
5218 	u32 log_info;
5219 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5220 	u32 response_code = 0;
5221 
5222 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5223 
5224 	scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5225 	if (scmd == NULL)
5226 		return 1;
5227 
5228 	_scsih_set_satl_pending(scmd, false);
5229 
5230 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5231 
5232 	if (mpi_reply == NULL) {
5233 		scmd->result = DID_OK << 16;
5234 		goto out;
5235 	}
5236 
5237 	sas_device_priv_data = scmd->device->hostdata;
5238 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5239 	     sas_device_priv_data->sas_target->deleted) {
5240 		scmd->result = DID_NO_CONNECT << 16;
5241 		goto out;
5242 	}
5243 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5244 
5245 	/*
5246 	 * WARPDRIVE: If direct_io is set then it is directIO,
5247 	 * the failed direct I/O should be redirected to volume
5248 	 */
5249 	st = scsi_cmd_priv(scmd);
5250 	if (st->direct_io &&
5251 	     ((ioc_status & MPI2_IOCSTATUS_MASK)
5252 	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5253 		st->direct_io = 0;
5254 		st->scmd = scmd;
5255 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5256 		mpi_request->DevHandle =
5257 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
5258 		ioc->put_smid_scsi_io(ioc, smid,
5259 		    sas_device_priv_data->sas_target->handle);
5260 		return 0;
5261 	}
5262 	/* turning off TLR */
5263 	scsi_state = mpi_reply->SCSIState;
5264 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5265 		response_code =
5266 		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5267 	if (!sas_device_priv_data->tlr_snoop_check) {
5268 		sas_device_priv_data->tlr_snoop_check++;
5269 		if ((!ioc->is_warpdrive &&
5270 		    !scsih_is_raid(&scmd->device->sdev_gendev) &&
5271 		    !scsih_is_nvme(&scmd->device->sdev_gendev))
5272 		    && sas_is_tlr_enabled(scmd->device) &&
5273 		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5274 			sas_disable_tlr(scmd->device);
5275 			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5276 		}
5277 	}
5278 
5279 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5280 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5281 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5282 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
5283 	else
5284 		log_info = 0;
5285 	ioc_status &= MPI2_IOCSTATUS_MASK;
5286 	scsi_status = mpi_reply->SCSIStatus;
5287 
5288 	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5289 	    (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5290 	     scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5291 	     scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5292 		ioc_status = MPI2_IOCSTATUS_SUCCESS;
5293 	}
5294 
5295 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5296 		struct sense_info data;
5297 		const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5298 		    smid);
5299 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5300 		    le32_to_cpu(mpi_reply->SenseCount));
5301 		memcpy(scmd->sense_buffer, sense_data, sz);
5302 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5303 		/* failure prediction threshold exceeded */
5304 		if (data.asc == 0x5D)
5305 			_scsih_smart_predicted_fault(ioc,
5306 			    le16_to_cpu(mpi_reply->DevHandle));
5307 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5308 
5309 		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5310 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5311 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5312 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5313 			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5314 	}
5315 	switch (ioc_status) {
5316 	case MPI2_IOCSTATUS_BUSY:
5317 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5318 		scmd->result = SAM_STAT_BUSY;
5319 		break;
5320 
5321 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5322 		scmd->result = DID_NO_CONNECT << 16;
5323 		break;
5324 
5325 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5326 		if (sas_device_priv_data->block) {
5327 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5328 			goto out;
5329 		}
5330 		if (log_info == 0x31110630) {
5331 			if (scmd->retries > 2) {
5332 				scmd->result = DID_NO_CONNECT << 16;
5333 				scsi_device_set_state(scmd->device,
5334 				    SDEV_OFFLINE);
5335 			} else {
5336 				scmd->result = DID_SOFT_ERROR << 16;
5337 				scmd->device->expecting_cc_ua = 1;
5338 			}
5339 			break;
5340 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5341 			scmd->result = DID_RESET << 16;
5342 			break;
5343 		} else if ((scmd->device->channel == RAID_CHANNEL) &&
5344 		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5345 		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5346 			scmd->result = DID_RESET << 16;
5347 			break;
5348 		}
5349 		scmd->result = DID_SOFT_ERROR << 16;
5350 		break;
5351 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5352 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5353 		scmd->result = DID_RESET << 16;
5354 		break;
5355 
5356 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5357 		if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5358 			scmd->result = DID_SOFT_ERROR << 16;
5359 		else
5360 			scmd->result = (DID_OK << 16) | scsi_status;
5361 		break;
5362 
5363 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5364 		scmd->result = (DID_OK << 16) | scsi_status;
5365 
5366 		if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5367 			break;
5368 
5369 		if (xfer_cnt < scmd->underflow) {
5370 			if (scsi_status == SAM_STAT_BUSY)
5371 				scmd->result = SAM_STAT_BUSY;
5372 			else
5373 				scmd->result = DID_SOFT_ERROR << 16;
5374 		} else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5375 		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
5376 			scmd->result = DID_SOFT_ERROR << 16;
5377 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5378 			scmd->result = DID_RESET << 16;
5379 		else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5380 			mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5381 			mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5382 			scmd->result = (DRIVER_SENSE << 24) |
5383 			    SAM_STAT_CHECK_CONDITION;
5384 			scmd->sense_buffer[0] = 0x70;
5385 			scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5386 			scmd->sense_buffer[12] = 0x20;
5387 			scmd->sense_buffer[13] = 0;
5388 		}
5389 		break;
5390 
5391 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5392 		scsi_set_resid(scmd, 0);
5393 		/* fall through */
5394 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5395 	case MPI2_IOCSTATUS_SUCCESS:
5396 		scmd->result = (DID_OK << 16) | scsi_status;
5397 		if (response_code ==
5398 		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5399 		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5400 		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5401 			scmd->result = DID_SOFT_ERROR << 16;
5402 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5403 			scmd->result = DID_RESET << 16;
5404 		break;
5405 
5406 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5407 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5408 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5409 		_scsih_eedp_error_handling(scmd, ioc_status);
5410 		break;
5411 
5412 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5413 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5414 	case MPI2_IOCSTATUS_INVALID_SGL:
5415 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
5416 	case MPI2_IOCSTATUS_INVALID_FIELD:
5417 	case MPI2_IOCSTATUS_INVALID_STATE:
5418 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5419 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5420 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5421 	default:
5422 		scmd->result = DID_SOFT_ERROR << 16;
5423 		break;
5424 
5425 	}
5426 
5427 	if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5428 		_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5429 
5430  out:
5431 
5432 	scsi_dma_unmap(scmd);
5433 	mpt3sas_base_free_smid(ioc, smid);
5434 	scmd->scsi_done(scmd);
5435 	return 0;
5436 }
5437 
5438 /**
5439  * _scsih_sas_host_refresh - refreshing sas host object contents
5440  * @ioc: per adapter object
5441  * Context: user
5442  *
5443  * During port enable, fw will send topology events for every device. Its
5444  * possible that the handles may change from the previous setting, so this
5445  * code keeping handles updating if changed.
5446  */
5447 static void
5448 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
5449 {
5450 	u16 sz;
5451 	u16 ioc_status;
5452 	int i;
5453 	Mpi2ConfigReply_t mpi_reply;
5454 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5455 	u16 attached_handle;
5456 	u8 link_rate;
5457 
5458 	dtmprintk(ioc,
5459 		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
5460 			   (u64)ioc->sas_hba.sas_address));
5461 
5462 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
5463 	    * sizeof(Mpi2SasIOUnit0PhyData_t));
5464 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5465 	if (!sas_iounit_pg0) {
5466 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5467 			__FILE__, __LINE__, __func__);
5468 		return;
5469 	}
5470 
5471 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5472 	    sas_iounit_pg0, sz)) != 0)
5473 		goto out;
5474 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5475 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5476 		goto out;
5477 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5478 		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
5479 		if (i == 0)
5480 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5481 			    PhyData[0].ControllerDevHandle);
5482 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5483 		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
5484 		    AttachedDevHandle);
5485 		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
5486 			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
5487 		mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
5488 		    attached_handle, i, link_rate);
5489 	}
5490  out:
5491 	kfree(sas_iounit_pg0);
5492 }
5493 
5494 /**
5495  * _scsih_sas_host_add - create sas host object
5496  * @ioc: per adapter object
5497  *
5498  * Creating host side data object, stored in ioc->sas_hba
5499  */
5500 static void
5501 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5502 {
5503 	int i;
5504 	Mpi2ConfigReply_t mpi_reply;
5505 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5506 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5507 	Mpi2SasPhyPage0_t phy_pg0;
5508 	Mpi2SasDevicePage0_t sas_device_pg0;
5509 	Mpi2SasEnclosurePage0_t enclosure_pg0;
5510 	u16 ioc_status;
5511 	u16 sz;
5512 	u8 device_missing_delay;
5513 	u8 num_phys;
5514 
5515 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
5516 	if (!num_phys) {
5517 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5518 			__FILE__, __LINE__, __func__);
5519 		return;
5520 	}
5521 	ioc->sas_hba.phy = kcalloc(num_phys,
5522 	    sizeof(struct _sas_phy), GFP_KERNEL);
5523 	if (!ioc->sas_hba.phy) {
5524 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5525 			__FILE__, __LINE__, __func__);
5526 		goto out;
5527 	}
5528 	ioc->sas_hba.num_phys = num_phys;
5529 
5530 	/* sas_iounit page 0 */
5531 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
5532 	    sizeof(Mpi2SasIOUnit0PhyData_t));
5533 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5534 	if (!sas_iounit_pg0) {
5535 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5536 			__FILE__, __LINE__, __func__);
5537 		return;
5538 	}
5539 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5540 	    sas_iounit_pg0, sz))) {
5541 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5542 			__FILE__, __LINE__, __func__);
5543 		goto out;
5544 	}
5545 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5546 	    MPI2_IOCSTATUS_MASK;
5547 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5548 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5549 			__FILE__, __LINE__, __func__);
5550 		goto out;
5551 	}
5552 
5553 	/* sas_iounit page 1 */
5554 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
5555 	    sizeof(Mpi2SasIOUnit1PhyData_t));
5556 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
5557 	if (!sas_iounit_pg1) {
5558 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5559 			__FILE__, __LINE__, __func__);
5560 		goto out;
5561 	}
5562 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5563 	    sas_iounit_pg1, sz))) {
5564 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5565 			__FILE__, __LINE__, __func__);
5566 		goto out;
5567 	}
5568 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5569 	    MPI2_IOCSTATUS_MASK;
5570 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5571 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5572 			__FILE__, __LINE__, __func__);
5573 		goto out;
5574 	}
5575 
5576 	ioc->io_missing_delay =
5577 	    sas_iounit_pg1->IODeviceMissingDelay;
5578 	device_missing_delay =
5579 	    sas_iounit_pg1->ReportDeviceMissingDelay;
5580 	if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
5581 		ioc->device_missing_delay = (device_missing_delay &
5582 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
5583 	else
5584 		ioc->device_missing_delay = device_missing_delay &
5585 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
5586 
5587 	ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
5588 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5589 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5590 		    i))) {
5591 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5592 				__FILE__, __LINE__, __func__);
5593 			goto out;
5594 		}
5595 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5596 		    MPI2_IOCSTATUS_MASK;
5597 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5598 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5599 				__FILE__, __LINE__, __func__);
5600 			goto out;
5601 		}
5602 
5603 		if (i == 0)
5604 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5605 			    PhyData[0].ControllerDevHandle);
5606 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5607 		ioc->sas_hba.phy[i].phy_id = i;
5608 		mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
5609 		    phy_pg0, ioc->sas_hba.parent_dev);
5610 	}
5611 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5612 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
5613 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5614 			__FILE__, __LINE__, __func__);
5615 		goto out;
5616 	}
5617 	ioc->sas_hba.enclosure_handle =
5618 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
5619 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5620 	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5621 		 ioc->sas_hba.handle,
5622 		 (u64)ioc->sas_hba.sas_address,
5623 		 ioc->sas_hba.num_phys);
5624 
5625 	if (ioc->sas_hba.enclosure_handle) {
5626 		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
5627 		    &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
5628 		   ioc->sas_hba.enclosure_handle)))
5629 			ioc->sas_hba.enclosure_logical_id =
5630 			    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5631 	}
5632 
5633  out:
5634 	kfree(sas_iounit_pg1);
5635 	kfree(sas_iounit_pg0);
5636 }
5637 
5638 /**
5639  * _scsih_expander_add -  creating expander object
5640  * @ioc: per adapter object
5641  * @handle: expander handle
5642  *
5643  * Creating expander object, stored in ioc->sas_expander_list.
5644  *
5645  * Return: 0 for success, else error.
5646  */
5647 static int
5648 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5649 {
5650 	struct _sas_node *sas_expander;
5651 	struct _enclosure_node *enclosure_dev;
5652 	Mpi2ConfigReply_t mpi_reply;
5653 	Mpi2ExpanderPage0_t expander_pg0;
5654 	Mpi2ExpanderPage1_t expander_pg1;
5655 	u32 ioc_status;
5656 	u16 parent_handle;
5657 	u64 sas_address, sas_address_parent = 0;
5658 	int i;
5659 	unsigned long flags;
5660 	struct _sas_port *mpt3sas_port = NULL;
5661 
5662 	int rc = 0;
5663 
5664 	if (!handle)
5665 		return -1;
5666 
5667 	if (ioc->shost_recovery || ioc->pci_error_recovery)
5668 		return -1;
5669 
5670 	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
5671 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
5672 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5673 			__FILE__, __LINE__, __func__);
5674 		return -1;
5675 	}
5676 
5677 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5678 	    MPI2_IOCSTATUS_MASK;
5679 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5680 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5681 			__FILE__, __LINE__, __func__);
5682 		return -1;
5683 	}
5684 
5685 	/* handle out of order topology events */
5686 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
5687 	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
5688 	    != 0) {
5689 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5690 			__FILE__, __LINE__, __func__);
5691 		return -1;
5692 	}
5693 	if (sas_address_parent != ioc->sas_hba.sas_address) {
5694 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
5695 		sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5696 		    sas_address_parent);
5697 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5698 		if (!sas_expander) {
5699 			rc = _scsih_expander_add(ioc, parent_handle);
5700 			if (rc != 0)
5701 				return rc;
5702 		}
5703 	}
5704 
5705 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
5706 	sas_address = le64_to_cpu(expander_pg0.SASAddress);
5707 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5708 	    sas_address);
5709 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5710 
5711 	if (sas_expander)
5712 		return 0;
5713 
5714 	sas_expander = kzalloc(sizeof(struct _sas_node),
5715 	    GFP_KERNEL);
5716 	if (!sas_expander) {
5717 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5718 			__FILE__, __LINE__, __func__);
5719 		return -1;
5720 	}
5721 
5722 	sas_expander->handle = handle;
5723 	sas_expander->num_phys = expander_pg0.NumPhys;
5724 	sas_expander->sas_address_parent = sas_address_parent;
5725 	sas_expander->sas_address = sas_address;
5726 
5727 	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5728 		 handle, parent_handle,
5729 		 (u64)sas_expander->sas_address, sas_expander->num_phys);
5730 
5731 	if (!sas_expander->num_phys)
5732 		goto out_fail;
5733 	sas_expander->phy = kcalloc(sas_expander->num_phys,
5734 	    sizeof(struct _sas_phy), GFP_KERNEL);
5735 	if (!sas_expander->phy) {
5736 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5737 			__FILE__, __LINE__, __func__);
5738 		rc = -1;
5739 		goto out_fail;
5740 	}
5741 
5742 	INIT_LIST_HEAD(&sas_expander->sas_port_list);
5743 	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
5744 	    sas_address_parent);
5745 	if (!mpt3sas_port) {
5746 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5747 			__FILE__, __LINE__, __func__);
5748 		rc = -1;
5749 		goto out_fail;
5750 	}
5751 	sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
5752 
5753 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
5754 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
5755 		    &expander_pg1, i, handle))) {
5756 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5757 				__FILE__, __LINE__, __func__);
5758 			rc = -1;
5759 			goto out_fail;
5760 		}
5761 		sas_expander->phy[i].handle = handle;
5762 		sas_expander->phy[i].phy_id = i;
5763 
5764 		if ((mpt3sas_transport_add_expander_phy(ioc,
5765 		    &sas_expander->phy[i], expander_pg1,
5766 		    sas_expander->parent_dev))) {
5767 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5768 				__FILE__, __LINE__, __func__);
5769 			rc = -1;
5770 			goto out_fail;
5771 		}
5772 	}
5773 
5774 	if (sas_expander->enclosure_handle) {
5775 		enclosure_dev =
5776 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
5777 						sas_expander->enclosure_handle);
5778 		if (enclosure_dev)
5779 			sas_expander->enclosure_logical_id =
5780 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5781 	}
5782 
5783 	_scsih_expander_node_add(ioc, sas_expander);
5784 	return 0;
5785 
5786  out_fail:
5787 
5788 	if (mpt3sas_port)
5789 		mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
5790 		    sas_address_parent);
5791 	kfree(sas_expander);
5792 	return rc;
5793 }
5794 
5795 /**
5796  * mpt3sas_expander_remove - removing expander object
5797  * @ioc: per adapter object
5798  * @sas_address: expander sas_address
5799  */
5800 void
5801 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
5802 {
5803 	struct _sas_node *sas_expander;
5804 	unsigned long flags;
5805 
5806 	if (ioc->shost_recovery)
5807 		return;
5808 
5809 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
5810 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5811 	    sas_address);
5812 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5813 	if (sas_expander)
5814 		_scsih_expander_node_remove(ioc, sas_expander);
5815 }
5816 
5817 /**
5818  * _scsih_done -  internal SCSI_IO callback handler.
5819  * @ioc: per adapter object
5820  * @smid: system request message index
5821  * @msix_index: MSIX table index supplied by the OS
5822  * @reply: reply message frame(lower 32bit addr)
5823  *
5824  * Callback handler when sending internal generated SCSI_IO.
5825  * The callback index passed is `ioc->scsih_cb_idx`
5826  *
5827  * Return: 1 meaning mf should be freed from _base_interrupt
5828  *         0 means the mf is freed from this function.
5829  */
5830 static u8
5831 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5832 {
5833 	MPI2DefaultReply_t *mpi_reply;
5834 
5835 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
5836 	if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
5837 		return 1;
5838 	if (ioc->scsih_cmds.smid != smid)
5839 		return 1;
5840 	ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
5841 	if (mpi_reply) {
5842 		memcpy(ioc->scsih_cmds.reply, mpi_reply,
5843 		    mpi_reply->MsgLength*4);
5844 		ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
5845 	}
5846 	ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
5847 	complete(&ioc->scsih_cmds.done);
5848 	return 1;
5849 }
5850 
5851 
5852 
5853 
5854 #define MPT3_MAX_LUNS (255)
5855 
5856 
5857 /**
5858  * _scsih_check_access_status - check access flags
5859  * @ioc: per adapter object
5860  * @sas_address: sas address
5861  * @handle: sas device handle
5862  * @access_status: errors returned during discovery of the device
5863  *
5864  * Return: 0 for success, else failure
5865  */
5866 static u8
5867 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
5868 	u16 handle, u8 access_status)
5869 {
5870 	u8 rc = 1;
5871 	char *desc = NULL;
5872 
5873 	switch (access_status) {
5874 	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
5875 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
5876 		rc = 0;
5877 		break;
5878 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
5879 		desc = "sata capability failed";
5880 		break;
5881 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
5882 		desc = "sata affiliation conflict";
5883 		break;
5884 	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
5885 		desc = "route not addressable";
5886 		break;
5887 	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
5888 		desc = "smp error not addressable";
5889 		break;
5890 	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
5891 		desc = "device blocked";
5892 		break;
5893 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
5894 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
5895 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
5896 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
5897 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
5898 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
5899 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
5900 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
5901 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
5902 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
5903 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
5904 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
5905 		desc = "sata initialization failed";
5906 		break;
5907 	default:
5908 		desc = "unknown";
5909 		break;
5910 	}
5911 
5912 	if (!rc)
5913 		return 0;
5914 
5915 	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
5916 		desc, (u64)sas_address, handle);
5917 	return rc;
5918 }
5919 
5920 /**
5921  * _scsih_check_device - checking device responsiveness
5922  * @ioc: per adapter object
5923  * @parent_sas_address: sas address of parent expander or sas host
5924  * @handle: attached device handle
5925  * @phy_number: phy number
5926  * @link_rate: new link rate
5927  */
5928 static void
5929 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5930 	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
5931 {
5932 	Mpi2ConfigReply_t mpi_reply;
5933 	Mpi2SasDevicePage0_t sas_device_pg0;
5934 	struct _sas_device *sas_device;
5935 	struct _enclosure_node *enclosure_dev = NULL;
5936 	u32 ioc_status;
5937 	unsigned long flags;
5938 	u64 sas_address;
5939 	struct scsi_target *starget;
5940 	struct MPT3SAS_TARGET *sas_target_priv_data;
5941 	u32 device_info;
5942 
5943 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5944 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
5945 		return;
5946 
5947 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5948 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5949 		return;
5950 
5951 	/* wide port handling ~ we need only handle device once for the phy that
5952 	 * is matched in sas device page zero
5953 	 */
5954 	if (phy_number != sas_device_pg0.PhyNum)
5955 		return;
5956 
5957 	/* check if this is end device */
5958 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
5959 	if (!(_scsih_is_end_device(device_info)))
5960 		return;
5961 
5962 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5963 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5964 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
5965 	    sas_address);
5966 
5967 	if (!sas_device)
5968 		goto out_unlock;
5969 
5970 	if (unlikely(sas_device->handle != handle)) {
5971 		starget = sas_device->starget;
5972 		sas_target_priv_data = starget->hostdata;
5973 		starget_printk(KERN_INFO, starget,
5974 			"handle changed from(0x%04x) to (0x%04x)!!!\n",
5975 			sas_device->handle, handle);
5976 		sas_target_priv_data->handle = handle;
5977 		sas_device->handle = handle;
5978 		if (le16_to_cpu(sas_device_pg0.Flags) &
5979 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
5980 			sas_device->enclosure_level =
5981 				sas_device_pg0.EnclosureLevel;
5982 			memcpy(sas_device->connector_name,
5983 				sas_device_pg0.ConnectorName, 4);
5984 			sas_device->connector_name[4] = '\0';
5985 		} else {
5986 			sas_device->enclosure_level = 0;
5987 			sas_device->connector_name[0] = '\0';
5988 		}
5989 
5990 		sas_device->enclosure_handle =
5991 				le16_to_cpu(sas_device_pg0.EnclosureHandle);
5992 		sas_device->is_chassis_slot_valid = 0;
5993 		enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
5994 						sas_device->enclosure_handle);
5995 		if (enclosure_dev) {
5996 			sas_device->enclosure_logical_id =
5997 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5998 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
5999 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6000 				sas_device->is_chassis_slot_valid = 1;
6001 				sas_device->chassis_slot =
6002 					enclosure_dev->pg0.ChassisSlot;
6003 			}
6004 		}
6005 	}
6006 
6007 	/* check if device is present */
6008 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
6009 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6010 		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
6011 			handle);
6012 		goto out_unlock;
6013 	}
6014 
6015 	/* check if there were any issues with discovery */
6016 	if (_scsih_check_access_status(ioc, sas_address, handle,
6017 	    sas_device_pg0.AccessStatus))
6018 		goto out_unlock;
6019 
6020 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6021 	_scsih_ublock_io_device(ioc, sas_address);
6022 
6023 	if (sas_device)
6024 		sas_device_put(sas_device);
6025 	return;
6026 
6027 out_unlock:
6028 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6029 	if (sas_device)
6030 		sas_device_put(sas_device);
6031 }
6032 
6033 /**
6034  * _scsih_add_device -  creating sas device object
6035  * @ioc: per adapter object
6036  * @handle: sas device handle
6037  * @phy_num: phy number end device attached to
6038  * @is_pd: is this hidden raid component
6039  *
6040  * Creating end device object, stored in ioc->sas_device_list.
6041  *
6042  * Return: 0 for success, non-zero for failure.
6043  */
6044 static int
6045 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6046 	u8 is_pd)
6047 {
6048 	Mpi2ConfigReply_t mpi_reply;
6049 	Mpi2SasDevicePage0_t sas_device_pg0;
6050 	struct _sas_device *sas_device;
6051 	struct _enclosure_node *enclosure_dev = NULL;
6052 	u32 ioc_status;
6053 	u64 sas_address;
6054 	u32 device_info;
6055 
6056 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6057 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
6058 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6059 			__FILE__, __LINE__, __func__);
6060 		return -1;
6061 	}
6062 
6063 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6064 	    MPI2_IOCSTATUS_MASK;
6065 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6066 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6067 			__FILE__, __LINE__, __func__);
6068 		return -1;
6069 	}
6070 
6071 	/* check if this is end device */
6072 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6073 	if (!(_scsih_is_end_device(device_info)))
6074 		return -1;
6075 	set_bit(handle, ioc->pend_os_device_add);
6076 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6077 
6078 	/* check if device is present */
6079 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
6080 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6081 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6082 			handle);
6083 		return -1;
6084 	}
6085 
6086 	/* check if there were any issues with discovery */
6087 	if (_scsih_check_access_status(ioc, sas_address, handle,
6088 	    sas_device_pg0.AccessStatus))
6089 		return -1;
6090 
6091 	sas_device = mpt3sas_get_sdev_by_addr(ioc,
6092 					sas_address);
6093 	if (sas_device) {
6094 		clear_bit(handle, ioc->pend_os_device_add);
6095 		sas_device_put(sas_device);
6096 		return -1;
6097 	}
6098 
6099 	if (sas_device_pg0.EnclosureHandle) {
6100 		enclosure_dev =
6101 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6102 			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
6103 		if (enclosure_dev == NULL)
6104 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
6105 				 sas_device_pg0.EnclosureHandle);
6106 	}
6107 
6108 	sas_device = kzalloc(sizeof(struct _sas_device),
6109 	    GFP_KERNEL);
6110 	if (!sas_device) {
6111 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6112 			__FILE__, __LINE__, __func__);
6113 		return 0;
6114 	}
6115 
6116 	kref_init(&sas_device->refcount);
6117 	sas_device->handle = handle;
6118 	if (_scsih_get_sas_address(ioc,
6119 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
6120 	    &sas_device->sas_address_parent) != 0)
6121 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6122 			__FILE__, __LINE__, __func__);
6123 	sas_device->enclosure_handle =
6124 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
6125 	if (sas_device->enclosure_handle != 0)
6126 		sas_device->slot =
6127 		    le16_to_cpu(sas_device_pg0.Slot);
6128 	sas_device->device_info = device_info;
6129 	sas_device->sas_address = sas_address;
6130 	sas_device->phy = sas_device_pg0.PhyNum;
6131 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
6132 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6133 
6134 	if (le16_to_cpu(sas_device_pg0.Flags)
6135 		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6136 		sas_device->enclosure_level =
6137 			sas_device_pg0.EnclosureLevel;
6138 		memcpy(sas_device->connector_name,
6139 			sas_device_pg0.ConnectorName, 4);
6140 		sas_device->connector_name[4] = '\0';
6141 	} else {
6142 		sas_device->enclosure_level = 0;
6143 		sas_device->connector_name[0] = '\0';
6144 	}
6145 	/* get enclosure_logical_id & chassis_slot*/
6146 	sas_device->is_chassis_slot_valid = 0;
6147 	if (enclosure_dev) {
6148 		sas_device->enclosure_logical_id =
6149 		    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6150 		if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6151 		    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6152 			sas_device->is_chassis_slot_valid = 1;
6153 			sas_device->chassis_slot =
6154 					enclosure_dev->pg0.ChassisSlot;
6155 		}
6156 	}
6157 
6158 	/* get device name */
6159 	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
6160 
6161 	if (ioc->wait_for_discovery_to_complete)
6162 		_scsih_sas_device_init_add(ioc, sas_device);
6163 	else
6164 		_scsih_sas_device_add(ioc, sas_device);
6165 
6166 	sas_device_put(sas_device);
6167 	return 0;
6168 }
6169 
6170 /**
6171  * _scsih_remove_device -  removing sas device object
6172  * @ioc: per adapter object
6173  * @sas_device: the sas_device object
6174  */
6175 static void
6176 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6177 	struct _sas_device *sas_device)
6178 {
6179 	struct MPT3SAS_TARGET *sas_target_priv_data;
6180 
6181 	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
6182 	     (sas_device->pfa_led_on)) {
6183 		_scsih_turn_off_pfa_led(ioc, sas_device);
6184 		sas_device->pfa_led_on = 0;
6185 	}
6186 
6187 	dewtprintk(ioc,
6188 		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
6189 			    __func__,
6190 			    sas_device->handle, (u64)sas_device->sas_address));
6191 
6192 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6193 	    NULL, NULL));
6194 
6195 	if (sas_device->starget && sas_device->starget->hostdata) {
6196 		sas_target_priv_data = sas_device->starget->hostdata;
6197 		sas_target_priv_data->deleted = 1;
6198 		_scsih_ublock_io_device(ioc, sas_device->sas_address);
6199 		sas_target_priv_data->handle =
6200 		     MPT3SAS_INVALID_DEVICE_HANDLE;
6201 	}
6202 
6203 	if (!ioc->hide_drives)
6204 		mpt3sas_transport_port_remove(ioc,
6205 		    sas_device->sas_address,
6206 		    sas_device->sas_address_parent);
6207 
6208 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
6209 		 sas_device->handle, (u64)sas_device->sas_address);
6210 
6211 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
6212 
6213 	dewtprintk(ioc,
6214 		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
6215 			    __func__,
6216 			    sas_device->handle, (u64)sas_device->sas_address));
6217 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6218 	    NULL, NULL));
6219 }
6220 
6221 /**
6222  * _scsih_sas_topology_change_event_debug - debug for topology event
6223  * @ioc: per adapter object
6224  * @event_data: event data payload
6225  * Context: user.
6226  */
6227 static void
6228 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6229 	Mpi2EventDataSasTopologyChangeList_t *event_data)
6230 {
6231 	int i;
6232 	u16 handle;
6233 	u16 reason_code;
6234 	u8 phy_number;
6235 	char *status_str = NULL;
6236 	u8 link_rate, prev_link_rate;
6237 
6238 	switch (event_data->ExpStatus) {
6239 	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6240 		status_str = "add";
6241 		break;
6242 	case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6243 		status_str = "remove";
6244 		break;
6245 	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6246 	case 0:
6247 		status_str =  "responding";
6248 		break;
6249 	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6250 		status_str = "remove delay";
6251 		break;
6252 	default:
6253 		status_str = "unknown status";
6254 		break;
6255 	}
6256 	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
6257 	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
6258 	    "start_phy(%02d), count(%d)\n",
6259 	    le16_to_cpu(event_data->ExpanderDevHandle),
6260 	    le16_to_cpu(event_data->EnclosureHandle),
6261 	    event_data->StartPhyNum, event_data->NumEntries);
6262 	for (i = 0; i < event_data->NumEntries; i++) {
6263 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6264 		if (!handle)
6265 			continue;
6266 		phy_number = event_data->StartPhyNum + i;
6267 		reason_code = event_data->PHY[i].PhyStatus &
6268 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6269 		switch (reason_code) {
6270 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6271 			status_str = "target add";
6272 			break;
6273 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6274 			status_str = "target remove";
6275 			break;
6276 		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6277 			status_str = "delay target remove";
6278 			break;
6279 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6280 			status_str = "link rate change";
6281 			break;
6282 		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6283 			status_str = "target responding";
6284 			break;
6285 		default:
6286 			status_str = "unknown";
6287 			break;
6288 		}
6289 		link_rate = event_data->PHY[i].LinkRate >> 4;
6290 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6291 		pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
6292 		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
6293 		    handle, status_str, link_rate, prev_link_rate);
6294 
6295 	}
6296 }
6297 
6298 /**
6299  * _scsih_sas_topology_change_event - handle topology changes
6300  * @ioc: per adapter object
6301  * @fw_event: The fw_event_work object
6302  * Context: user.
6303  *
6304  */
6305 static int
6306 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6307 	struct fw_event_work *fw_event)
6308 {
6309 	int i;
6310 	u16 parent_handle, handle;
6311 	u16 reason_code;
6312 	u8 phy_number, max_phys;
6313 	struct _sas_node *sas_expander;
6314 	u64 sas_address;
6315 	unsigned long flags;
6316 	u8 link_rate, prev_link_rate;
6317 	Mpi2EventDataSasTopologyChangeList_t *event_data =
6318 		(Mpi2EventDataSasTopologyChangeList_t *)
6319 		fw_event->event_data;
6320 
6321 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6322 		_scsih_sas_topology_change_event_debug(ioc, event_data);
6323 
6324 	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
6325 		return 0;
6326 
6327 	if (!ioc->sas_hba.num_phys)
6328 		_scsih_sas_host_add(ioc);
6329 	else
6330 		_scsih_sas_host_refresh(ioc);
6331 
6332 	if (fw_event->ignore) {
6333 		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
6334 		return 0;
6335 	}
6336 
6337 	parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
6338 
6339 	/* handle expander add */
6340 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
6341 		if (_scsih_expander_add(ioc, parent_handle) != 0)
6342 			return 0;
6343 
6344 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6345 	sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
6346 	    parent_handle);
6347 	if (sas_expander) {
6348 		sas_address = sas_expander->sas_address;
6349 		max_phys = sas_expander->num_phys;
6350 	} else if (parent_handle < ioc->sas_hba.num_phys) {
6351 		sas_address = ioc->sas_hba.sas_address;
6352 		max_phys = ioc->sas_hba.num_phys;
6353 	} else {
6354 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6355 		return 0;
6356 	}
6357 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6358 
6359 	/* handle siblings events */
6360 	for (i = 0; i < event_data->NumEntries; i++) {
6361 		if (fw_event->ignore) {
6362 			dewtprintk(ioc,
6363 				   ioc_info(ioc, "ignoring expander event\n"));
6364 			return 0;
6365 		}
6366 		if (ioc->remove_host || ioc->pci_error_recovery)
6367 			return 0;
6368 		phy_number = event_data->StartPhyNum + i;
6369 		if (phy_number >= max_phys)
6370 			continue;
6371 		reason_code = event_data->PHY[i].PhyStatus &
6372 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6373 		if ((event_data->PHY[i].PhyStatus &
6374 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
6375 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
6376 				continue;
6377 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6378 		if (!handle)
6379 			continue;
6380 		link_rate = event_data->PHY[i].LinkRate >> 4;
6381 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6382 		switch (reason_code) {
6383 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6384 
6385 			if (ioc->shost_recovery)
6386 				break;
6387 
6388 			if (link_rate == prev_link_rate)
6389 				break;
6390 
6391 			mpt3sas_transport_update_links(ioc, sas_address,
6392 			    handle, phy_number, link_rate);
6393 
6394 			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6395 				break;
6396 
6397 			_scsih_check_device(ioc, sas_address, handle,
6398 			    phy_number, link_rate);
6399 
6400 			if (!test_bit(handle, ioc->pend_os_device_add))
6401 				break;
6402 
6403 			/* fall through */
6404 
6405 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6406 
6407 			if (ioc->shost_recovery)
6408 				break;
6409 
6410 			mpt3sas_transport_update_links(ioc, sas_address,
6411 			    handle, phy_number, link_rate);
6412 
6413 			_scsih_add_device(ioc, handle, phy_number, 0);
6414 
6415 			break;
6416 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6417 
6418 			_scsih_device_remove_by_handle(ioc, handle);
6419 			break;
6420 		}
6421 	}
6422 
6423 	/* handle expander removal */
6424 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
6425 	    sas_expander)
6426 		mpt3sas_expander_remove(ioc, sas_address);
6427 
6428 	return 0;
6429 }
6430 
6431 /**
6432  * _scsih_sas_device_status_change_event_debug - debug for device event
6433  * @ioc: ?
6434  * @event_data: event data payload
6435  * Context: user.
6436  */
6437 static void
6438 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6439 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
6440 {
6441 	char *reason_str = NULL;
6442 
6443 	switch (event_data->ReasonCode) {
6444 	case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
6445 		reason_str = "smart data";
6446 		break;
6447 	case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
6448 		reason_str = "unsupported device discovered";
6449 		break;
6450 	case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
6451 		reason_str = "internal device reset";
6452 		break;
6453 	case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
6454 		reason_str = "internal task abort";
6455 		break;
6456 	case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
6457 		reason_str = "internal task abort set";
6458 		break;
6459 	case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
6460 		reason_str = "internal clear task set";
6461 		break;
6462 	case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
6463 		reason_str = "internal query task";
6464 		break;
6465 	case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
6466 		reason_str = "sata init failure";
6467 		break;
6468 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
6469 		reason_str = "internal device reset complete";
6470 		break;
6471 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
6472 		reason_str = "internal task abort complete";
6473 		break;
6474 	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
6475 		reason_str = "internal async notification";
6476 		break;
6477 	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
6478 		reason_str = "expander reduced functionality";
6479 		break;
6480 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
6481 		reason_str = "expander reduced functionality complete";
6482 		break;
6483 	default:
6484 		reason_str = "unknown reason";
6485 		break;
6486 	}
6487 	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
6488 		 reason_str, le16_to_cpu(event_data->DevHandle),
6489 		 (u64)le64_to_cpu(event_data->SASAddress),
6490 		 le16_to_cpu(event_data->TaskTag));
6491 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
6492 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
6493 			event_data->ASC, event_data->ASCQ);
6494 	pr_cont("\n");
6495 }
6496 
6497 /**
6498  * _scsih_sas_device_status_change_event - handle device status change
6499  * @ioc: per adapter object
6500  * @event_data: The fw event
6501  * Context: user.
6502  */
6503 static void
6504 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6505 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
6506 {
6507 	struct MPT3SAS_TARGET *target_priv_data;
6508 	struct _sas_device *sas_device;
6509 	u64 sas_address;
6510 	unsigned long flags;
6511 
6512 	/* In MPI Revision K (0xC), the internal device reset complete was
6513 	 * implemented, so avoid setting tm_busy flag for older firmware.
6514 	 */
6515 	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
6516 		return;
6517 
6518 	if (event_data->ReasonCode !=
6519 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
6520 	   event_data->ReasonCode !=
6521 	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
6522 		return;
6523 
6524 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
6525 	sas_address = le64_to_cpu(event_data->SASAddress);
6526 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6527 	    sas_address);
6528 
6529 	if (!sas_device || !sas_device->starget)
6530 		goto out;
6531 
6532 	target_priv_data = sas_device->starget->hostdata;
6533 	if (!target_priv_data)
6534 		goto out;
6535 
6536 	if (event_data->ReasonCode ==
6537 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
6538 		target_priv_data->tm_busy = 1;
6539 	else
6540 		target_priv_data->tm_busy = 0;
6541 
6542 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6543 		ioc_info(ioc,
6544 		    "%s tm_busy flag for handle(0x%04x)\n",
6545 		    (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
6546 		    target_priv_data->handle);
6547 
6548 out:
6549 	if (sas_device)
6550 		sas_device_put(sas_device);
6551 
6552 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6553 }
6554 
6555 
6556 /**
6557  * _scsih_check_pcie_access_status - check access flags
6558  * @ioc: per adapter object
6559  * @wwid: wwid
6560  * @handle: sas device handle
6561  * @access_status: errors returned during discovery of the device
6562  *
6563  * Return: 0 for success, else failure
6564  */
6565 static u8
6566 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6567 	u16 handle, u8 access_status)
6568 {
6569 	u8 rc = 1;
6570 	char *desc = NULL;
6571 
6572 	switch (access_status) {
6573 	case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
6574 	case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
6575 		rc = 0;
6576 		break;
6577 	case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
6578 		desc = "PCIe device capability failed";
6579 		break;
6580 	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6581 		desc = "PCIe device blocked";
6582 		ioc_info(ioc,
6583 		    "Device with Access Status (%s): wwid(0x%016llx), "
6584 		    "handle(0x%04x)\n ll only be added to the internal list",
6585 		    desc, (u64)wwid, handle);
6586 		rc = 0;
6587 		break;
6588 	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6589 		desc = "PCIe device mem space access failed";
6590 		break;
6591 	case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
6592 		desc = "PCIe device unsupported";
6593 		break;
6594 	case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
6595 		desc = "PCIe device MSIx Required";
6596 		break;
6597 	case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
6598 		desc = "PCIe device init fail max";
6599 		break;
6600 	case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
6601 		desc = "PCIe device status unknown";
6602 		break;
6603 	case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
6604 		desc = "nvme ready timeout";
6605 		break;
6606 	case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
6607 		desc = "nvme device configuration unsupported";
6608 		break;
6609 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
6610 		desc = "nvme identify failed";
6611 		break;
6612 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
6613 		desc = "nvme qconfig failed";
6614 		break;
6615 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
6616 		desc = "nvme qcreation failed";
6617 		break;
6618 	case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
6619 		desc = "nvme eventcfg failed";
6620 		break;
6621 	case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
6622 		desc = "nvme get feature stat failed";
6623 		break;
6624 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
6625 		desc = "nvme idle timeout";
6626 		break;
6627 	case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
6628 		desc = "nvme failure status";
6629 		break;
6630 	default:
6631 		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
6632 			access_status, (u64)wwid, handle);
6633 		return rc;
6634 	}
6635 
6636 	if (!rc)
6637 		return rc;
6638 
6639 	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6640 		 desc, (u64)wwid, handle);
6641 	return rc;
6642 }
6643 
6644 /**
6645  * _scsih_pcie_device_remove_from_sml -  removing pcie device
6646  * from SML and free up associated memory
6647  * @ioc: per adapter object
6648  * @pcie_device: the pcie_device object
6649  */
6650 static void
6651 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6652 	struct _pcie_device *pcie_device)
6653 {
6654 	struct MPT3SAS_TARGET *sas_target_priv_data;
6655 
6656 	dewtprintk(ioc,
6657 		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
6658 			    __func__,
6659 			    pcie_device->handle, (u64)pcie_device->wwid));
6660 	if (pcie_device->enclosure_handle != 0)
6661 		dewtprintk(ioc,
6662 			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6663 				    __func__,
6664 				    (u64)pcie_device->enclosure_logical_id,
6665 				    pcie_device->slot));
6666 	if (pcie_device->connector_name[0] != '\0')
6667 		dewtprintk(ioc,
6668 			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
6669 				    __func__,
6670 				    pcie_device->enclosure_level,
6671 				    pcie_device->connector_name));
6672 
6673 	if (pcie_device->starget && pcie_device->starget->hostdata) {
6674 		sas_target_priv_data = pcie_device->starget->hostdata;
6675 		sas_target_priv_data->deleted = 1;
6676 		_scsih_ublock_io_device(ioc, pcie_device->wwid);
6677 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6678 	}
6679 
6680 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
6681 		 pcie_device->handle, (u64)pcie_device->wwid);
6682 	if (pcie_device->enclosure_handle != 0)
6683 		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6684 			 (u64)pcie_device->enclosure_logical_id,
6685 			 pcie_device->slot);
6686 	if (pcie_device->connector_name[0] != '\0')
6687 		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
6688 			 pcie_device->enclosure_level,
6689 			 pcie_device->connector_name);
6690 
6691 	if (pcie_device->starget && (pcie_device->access_status !=
6692 				MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
6693 		scsi_remove_target(&pcie_device->starget->dev);
6694 	dewtprintk(ioc,
6695 		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
6696 			    __func__,
6697 			    pcie_device->handle, (u64)pcie_device->wwid));
6698 	if (pcie_device->enclosure_handle != 0)
6699 		dewtprintk(ioc,
6700 			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
6701 				    __func__,
6702 				    (u64)pcie_device->enclosure_logical_id,
6703 				    pcie_device->slot));
6704 	if (pcie_device->connector_name[0] != '\0')
6705 		dewtprintk(ioc,
6706 			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
6707 				    __func__,
6708 				    pcie_device->enclosure_level,
6709 				    pcie_device->connector_name));
6710 
6711 	kfree(pcie_device->serial_number);
6712 }
6713 
6714 
6715 /**
6716  * _scsih_pcie_check_device - checking device responsiveness
6717  * @ioc: per adapter object
6718  * @handle: attached device handle
6719  */
6720 static void
6721 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6722 {
6723 	Mpi2ConfigReply_t mpi_reply;
6724 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
6725 	u32 ioc_status;
6726 	struct _pcie_device *pcie_device;
6727 	u64 wwid;
6728 	unsigned long flags;
6729 	struct scsi_target *starget;
6730 	struct MPT3SAS_TARGET *sas_target_priv_data;
6731 	u32 device_info;
6732 
6733 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6734 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
6735 		return;
6736 
6737 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6738 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6739 		return;
6740 
6741 	/* check if this is end device */
6742 	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6743 	if (!(_scsih_is_nvme_pciescsi_device(device_info)))
6744 		return;
6745 
6746 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
6747 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
6748 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
6749 
6750 	if (!pcie_device) {
6751 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6752 		return;
6753 	}
6754 
6755 	if (unlikely(pcie_device->handle != handle)) {
6756 		starget = pcie_device->starget;
6757 		sas_target_priv_data = starget->hostdata;
6758 		pcie_device->access_status = pcie_device_pg0.AccessStatus;
6759 		starget_printk(KERN_INFO, starget,
6760 		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
6761 		    pcie_device->handle, handle);
6762 		sas_target_priv_data->handle = handle;
6763 		pcie_device->handle = handle;
6764 
6765 		if (le32_to_cpu(pcie_device_pg0.Flags) &
6766 		    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6767 			pcie_device->enclosure_level =
6768 			    pcie_device_pg0.EnclosureLevel;
6769 			memcpy(&pcie_device->connector_name[0],
6770 			    &pcie_device_pg0.ConnectorName[0], 4);
6771 		} else {
6772 			pcie_device->enclosure_level = 0;
6773 			pcie_device->connector_name[0] = '\0';
6774 		}
6775 	}
6776 
6777 	/* check if device is present */
6778 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6779 	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6780 		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
6781 			 handle);
6782 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6783 		pcie_device_put(pcie_device);
6784 		return;
6785 	}
6786 
6787 	/* check if there were any issues with discovery */
6788 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6789 	    pcie_device_pg0.AccessStatus)) {
6790 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6791 		pcie_device_put(pcie_device);
6792 		return;
6793 	}
6794 
6795 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6796 	pcie_device_put(pcie_device);
6797 
6798 	_scsih_ublock_io_device(ioc, wwid);
6799 
6800 	return;
6801 }
6802 
6803 /**
6804  * _scsih_pcie_add_device -  creating pcie device object
6805  * @ioc: per adapter object
6806  * @handle: pcie device handle
6807  *
6808  * Creating end device object, stored in ioc->pcie_device_list.
6809  *
6810  * Return: 1 means queue the event later, 0 means complete the event
6811  */
6812 static int
6813 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6814 {
6815 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
6816 	Mpi26PCIeDevicePage2_t pcie_device_pg2;
6817 	Mpi2ConfigReply_t mpi_reply;
6818 	struct _pcie_device *pcie_device;
6819 	struct _enclosure_node *enclosure_dev;
6820 	u32 ioc_status;
6821 	u64 wwid;
6822 
6823 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6824 	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
6825 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6826 			__FILE__, __LINE__, __func__);
6827 		return 0;
6828 	}
6829 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6830 	    MPI2_IOCSTATUS_MASK;
6831 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6832 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6833 			__FILE__, __LINE__, __func__);
6834 		return 0;
6835 	}
6836 
6837 	set_bit(handle, ioc->pend_os_device_add);
6838 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
6839 
6840 	/* check if device is present */
6841 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6842 		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6843 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6844 			handle);
6845 		return 0;
6846 	}
6847 
6848 	/* check if there were any issues with discovery */
6849 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6850 	    pcie_device_pg0.AccessStatus))
6851 		return 0;
6852 
6853 	if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
6854 	    (pcie_device_pg0.DeviceInfo))))
6855 		return 0;
6856 
6857 	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
6858 	if (pcie_device) {
6859 		clear_bit(handle, ioc->pend_os_device_add);
6860 		pcie_device_put(pcie_device);
6861 		return 0;
6862 	}
6863 
6864 	/* PCIe Device Page 2 contains read-only information about a
6865 	 * specific NVMe device; therefore, this page is only
6866 	 * valid for NVMe devices and skip for pcie devices of type scsi.
6867 	 */
6868 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
6869 		le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
6870 		if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
6871 		    &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
6872 		    handle)) {
6873 			ioc_err(ioc,
6874 			    "failure at %s:%d/%s()!\n", __FILE__,
6875 			    __LINE__, __func__);
6876 			return 0;
6877 		}
6878 
6879 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6880 					MPI2_IOCSTATUS_MASK;
6881 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6882 			ioc_err(ioc,
6883 			    "failure at %s:%d/%s()!\n", __FILE__,
6884 			    __LINE__, __func__);
6885 			return 0;
6886 		}
6887 	}
6888 
6889 	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
6890 	if (!pcie_device) {
6891 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6892 			__FILE__, __LINE__, __func__);
6893 		return 0;
6894 	}
6895 
6896 	kref_init(&pcie_device->refcount);
6897 	pcie_device->id = ioc->pcie_target_id++;
6898 	pcie_device->channel = PCIE_CHANNEL;
6899 	pcie_device->handle = handle;
6900 	pcie_device->access_status = pcie_device_pg0.AccessStatus;
6901 	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6902 	pcie_device->wwid = wwid;
6903 	pcie_device->port_num = pcie_device_pg0.PortNum;
6904 	pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
6905 	    MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6906 
6907 	pcie_device->enclosure_handle =
6908 	    le16_to_cpu(pcie_device_pg0.EnclosureHandle);
6909 	if (pcie_device->enclosure_handle != 0)
6910 		pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
6911 
6912 	if (le32_to_cpu(pcie_device_pg0.Flags) &
6913 	    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6914 		pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
6915 		memcpy(&pcie_device->connector_name[0],
6916 		    &pcie_device_pg0.ConnectorName[0], 4);
6917 	} else {
6918 		pcie_device->enclosure_level = 0;
6919 		pcie_device->connector_name[0] = '\0';
6920 	}
6921 
6922 	/* get enclosure_logical_id */
6923 	if (pcie_device->enclosure_handle) {
6924 		enclosure_dev =
6925 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6926 						pcie_device->enclosure_handle);
6927 		if (enclosure_dev)
6928 			pcie_device->enclosure_logical_id =
6929 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6930 	}
6931 	/* TODO -- Add device name once FW supports it */
6932 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
6933 	    le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
6934 		pcie_device->nvme_mdts =
6935 		    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
6936 		if (pcie_device_pg2.ControllerResetTO)
6937 			pcie_device->reset_timeout =
6938 			    pcie_device_pg2.ControllerResetTO;
6939 		else
6940 			pcie_device->reset_timeout = 30;
6941 	} else
6942 		pcie_device->reset_timeout = 30;
6943 
6944 	if (ioc->wait_for_discovery_to_complete)
6945 		_scsih_pcie_device_init_add(ioc, pcie_device);
6946 	else
6947 		_scsih_pcie_device_add(ioc, pcie_device);
6948 
6949 	pcie_device_put(pcie_device);
6950 	return 0;
6951 }
6952 
6953 /**
6954  * _scsih_pcie_topology_change_event_debug - debug for topology
6955  * event
6956  * @ioc: per adapter object
6957  * @event_data: event data payload
6958  * Context: user.
6959  */
6960 static void
6961 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6962 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
6963 {
6964 	int i;
6965 	u16 handle;
6966 	u16 reason_code;
6967 	u8 port_number;
6968 	char *status_str = NULL;
6969 	u8 link_rate, prev_link_rate;
6970 
6971 	switch (event_data->SwitchStatus) {
6972 	case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
6973 		status_str = "add";
6974 		break;
6975 	case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
6976 		status_str = "remove";
6977 		break;
6978 	case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
6979 	case 0:
6980 		status_str =  "responding";
6981 		break;
6982 	case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
6983 		status_str = "remove delay";
6984 		break;
6985 	default:
6986 		status_str = "unknown status";
6987 		break;
6988 	}
6989 	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
6990 	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
6991 		"start_port(%02d), count(%d)\n",
6992 		le16_to_cpu(event_data->SwitchDevHandle),
6993 		le16_to_cpu(event_data->EnclosureHandle),
6994 		event_data->StartPortNum, event_data->NumEntries);
6995 	for (i = 0; i < event_data->NumEntries; i++) {
6996 		handle =
6997 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
6998 		if (!handle)
6999 			continue;
7000 		port_number = event_data->StartPortNum + i;
7001 		reason_code = event_data->PortEntry[i].PortStatus;
7002 		switch (reason_code) {
7003 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7004 			status_str = "target add";
7005 			break;
7006 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7007 			status_str = "target remove";
7008 			break;
7009 		case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
7010 			status_str = "delay target remove";
7011 			break;
7012 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7013 			status_str = "link rate change";
7014 			break;
7015 		case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
7016 			status_str = "target responding";
7017 			break;
7018 		default:
7019 			status_str = "unknown";
7020 			break;
7021 		}
7022 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
7023 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7024 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
7025 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7026 		pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
7027 			" link rate: new(0x%02x), old(0x%02x)\n", port_number,
7028 			handle, status_str, link_rate, prev_link_rate);
7029 	}
7030 }
7031 
7032 /**
7033  * _scsih_pcie_topology_change_event - handle PCIe topology
7034  *  changes
7035  * @ioc: per adapter object
7036  * @fw_event: The fw_event_work object
7037  * Context: user.
7038  *
7039  */
7040 static void
7041 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7042 	struct fw_event_work *fw_event)
7043 {
7044 	int i;
7045 	u16 handle;
7046 	u16 reason_code;
7047 	u8 link_rate, prev_link_rate;
7048 	unsigned long flags;
7049 	int rc;
7050 	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
7051 		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
7052 	struct _pcie_device *pcie_device;
7053 
7054 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7055 		_scsih_pcie_topology_change_event_debug(ioc, event_data);
7056 
7057 	if (ioc->shost_recovery || ioc->remove_host ||
7058 		ioc->pci_error_recovery)
7059 		return;
7060 
7061 	if (fw_event->ignore) {
7062 		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
7063 		return;
7064 	}
7065 
7066 	/* handle siblings events */
7067 	for (i = 0; i < event_data->NumEntries; i++) {
7068 		if (fw_event->ignore) {
7069 			dewtprintk(ioc,
7070 				   ioc_info(ioc, "ignoring switch event\n"));
7071 			return;
7072 		}
7073 		if (ioc->remove_host || ioc->pci_error_recovery)
7074 			return;
7075 		reason_code = event_data->PortEntry[i].PortStatus;
7076 		handle =
7077 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7078 		if (!handle)
7079 			continue;
7080 
7081 		link_rate = event_data->PortEntry[i].CurrentPortInfo
7082 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7083 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
7084 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7085 
7086 		switch (reason_code) {
7087 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7088 			if (ioc->shost_recovery)
7089 				break;
7090 			if (link_rate == prev_link_rate)
7091 				break;
7092 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7093 				break;
7094 
7095 			_scsih_pcie_check_device(ioc, handle);
7096 
7097 			/* This code after this point handles the test case
7098 			 * where a device has been added, however its returning
7099 			 * BUSY for sometime.  Then before the Device Missing
7100 			 * Delay expires and the device becomes READY, the
7101 			 * device is removed and added back.
7102 			 */
7103 			spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7104 			pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
7105 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7106 
7107 			if (pcie_device) {
7108 				pcie_device_put(pcie_device);
7109 				break;
7110 			}
7111 
7112 			if (!test_bit(handle, ioc->pend_os_device_add))
7113 				break;
7114 
7115 			dewtprintk(ioc,
7116 				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
7117 					    handle));
7118 			event_data->PortEntry[i].PortStatus &= 0xF0;
7119 			event_data->PortEntry[i].PortStatus |=
7120 				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
7121 			/* fall through */
7122 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7123 			if (ioc->shost_recovery)
7124 				break;
7125 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7126 				break;
7127 
7128 			rc = _scsih_pcie_add_device(ioc, handle);
7129 			if (!rc) {
7130 				/* mark entry vacant */
7131 				/* TODO This needs to be reviewed and fixed,
7132 				 * we dont have an entry
7133 				 * to make an event void like vacant
7134 				 */
7135 				event_data->PortEntry[i].PortStatus |=
7136 					MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
7137 			}
7138 			break;
7139 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7140 			_scsih_pcie_device_remove_by_handle(ioc, handle);
7141 			break;
7142 		}
7143 	}
7144 }
7145 
7146 /**
7147  * _scsih_pcie_device_status_change_event_debug - debug for device event
7148  * @ioc: ?
7149  * @event_data: event data payload
7150  * Context: user.
7151  */
7152 static void
7153 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7154 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
7155 {
7156 	char *reason_str = NULL;
7157 
7158 	switch (event_data->ReasonCode) {
7159 	case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
7160 		reason_str = "smart data";
7161 		break;
7162 	case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
7163 		reason_str = "unsupported device discovered";
7164 		break;
7165 	case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
7166 		reason_str = "internal device reset";
7167 		break;
7168 	case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
7169 		reason_str = "internal task abort";
7170 		break;
7171 	case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7172 		reason_str = "internal task abort set";
7173 		break;
7174 	case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7175 		reason_str = "internal clear task set";
7176 		break;
7177 	case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
7178 		reason_str = "internal query task";
7179 		break;
7180 	case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
7181 		reason_str = "device init failure";
7182 		break;
7183 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7184 		reason_str = "internal device reset complete";
7185 		break;
7186 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7187 		reason_str = "internal task abort complete";
7188 		break;
7189 	case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7190 		reason_str = "internal async notification";
7191 		break;
7192 	case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
7193 		reason_str = "pcie hot reset failed";
7194 		break;
7195 	default:
7196 		reason_str = "unknown reason";
7197 		break;
7198 	}
7199 
7200 	ioc_info(ioc, "PCIE device status change: (%s)\n"
7201 		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7202 		 reason_str, le16_to_cpu(event_data->DevHandle),
7203 		 (u64)le64_to_cpu(event_data->WWID),
7204 		 le16_to_cpu(event_data->TaskTag));
7205 	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7206 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7207 			event_data->ASC, event_data->ASCQ);
7208 	pr_cont("\n");
7209 }
7210 
7211 /**
7212  * _scsih_pcie_device_status_change_event - handle device status
7213  * change
7214  * @ioc: per adapter object
7215  * @fw_event: The fw_event_work object
7216  * Context: user.
7217  */
7218 static void
7219 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7220 	struct fw_event_work *fw_event)
7221 {
7222 	struct MPT3SAS_TARGET *target_priv_data;
7223 	struct _pcie_device *pcie_device;
7224 	u64 wwid;
7225 	unsigned long flags;
7226 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
7227 		(Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
7228 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7229 		_scsih_pcie_device_status_change_event_debug(ioc,
7230 			event_data);
7231 
7232 	if (event_data->ReasonCode !=
7233 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7234 		event_data->ReasonCode !=
7235 		MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7236 		return;
7237 
7238 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7239 	wwid = le64_to_cpu(event_data->WWID);
7240 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7241 
7242 	if (!pcie_device || !pcie_device->starget)
7243 		goto out;
7244 
7245 	target_priv_data = pcie_device->starget->hostdata;
7246 	if (!target_priv_data)
7247 		goto out;
7248 
7249 	if (event_data->ReasonCode ==
7250 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
7251 		target_priv_data->tm_busy = 1;
7252 	else
7253 		target_priv_data->tm_busy = 0;
7254 out:
7255 	if (pcie_device)
7256 		pcie_device_put(pcie_device);
7257 
7258 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7259 }
7260 
7261 /**
7262  * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
7263  * event
7264  * @ioc: per adapter object
7265  * @event_data: event data payload
7266  * Context: user.
7267  */
7268 static void
7269 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7270 	Mpi2EventDataSasEnclDevStatusChange_t *event_data)
7271 {
7272 	char *reason_str = NULL;
7273 
7274 	switch (event_data->ReasonCode) {
7275 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7276 		reason_str = "enclosure add";
7277 		break;
7278 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7279 		reason_str = "enclosure remove";
7280 		break;
7281 	default:
7282 		reason_str = "unknown reason";
7283 		break;
7284 	}
7285 
7286 	ioc_info(ioc, "enclosure status change: (%s)\n"
7287 		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
7288 		 reason_str,
7289 		 le16_to_cpu(event_data->EnclosureHandle),
7290 		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
7291 		 le16_to_cpu(event_data->StartSlot));
7292 }
7293 
7294 /**
7295  * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
7296  * @ioc: per adapter object
7297  * @fw_event: The fw_event_work object
7298  * Context: user.
7299  */
7300 static void
7301 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7302 	struct fw_event_work *fw_event)
7303 {
7304 	Mpi2ConfigReply_t mpi_reply;
7305 	struct _enclosure_node *enclosure_dev = NULL;
7306 	Mpi2EventDataSasEnclDevStatusChange_t *event_data =
7307 		(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
7308 	int rc;
7309 	u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
7310 
7311 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7312 		_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
7313 		     (Mpi2EventDataSasEnclDevStatusChange_t *)
7314 		     fw_event->event_data);
7315 	if (ioc->shost_recovery)
7316 		return;
7317 
7318 	if (enclosure_handle)
7319 		enclosure_dev =
7320 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7321 						enclosure_handle);
7322 	switch (event_data->ReasonCode) {
7323 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7324 		if (!enclosure_dev) {
7325 			enclosure_dev =
7326 				kzalloc(sizeof(struct _enclosure_node),
7327 					GFP_KERNEL);
7328 			if (!enclosure_dev) {
7329 				ioc_info(ioc, "failure at %s:%d/%s()!\n",
7330 					 __FILE__, __LINE__, __func__);
7331 				return;
7332 			}
7333 			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7334 				&enclosure_dev->pg0,
7335 				MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7336 				enclosure_handle);
7337 
7338 			if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
7339 						MPI2_IOCSTATUS_MASK)) {
7340 				kfree(enclosure_dev);
7341 				return;
7342 			}
7343 
7344 			list_add_tail(&enclosure_dev->list,
7345 							&ioc->enclosure_list);
7346 		}
7347 		break;
7348 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7349 		if (enclosure_dev) {
7350 			list_del(&enclosure_dev->list);
7351 			kfree(enclosure_dev);
7352 		}
7353 		break;
7354 	default:
7355 		break;
7356 	}
7357 }
7358 
7359 /**
7360  * _scsih_sas_broadcast_primitive_event - handle broadcast events
7361  * @ioc: per adapter object
7362  * @fw_event: The fw_event_work object
7363  * Context: user.
7364  */
7365 static void
7366 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7367 	struct fw_event_work *fw_event)
7368 {
7369 	struct scsi_cmnd *scmd;
7370 	struct scsi_device *sdev;
7371 	struct scsiio_tracker *st;
7372 	u16 smid, handle;
7373 	u32 lun;
7374 	struct MPT3SAS_DEVICE *sas_device_priv_data;
7375 	u32 termination_count;
7376 	u32 query_count;
7377 	Mpi2SCSITaskManagementReply_t *mpi_reply;
7378 	Mpi2EventDataSasBroadcastPrimitive_t *event_data =
7379 		(Mpi2EventDataSasBroadcastPrimitive_t *)
7380 		fw_event->event_data;
7381 	u16 ioc_status;
7382 	unsigned long flags;
7383 	int r;
7384 	u8 max_retries = 0;
7385 	u8 task_abort_retries;
7386 
7387 	mutex_lock(&ioc->tm_cmds.mutex);
7388 	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
7389 		 __func__, event_data->PhyNum, event_data->PortWidth);
7390 
7391 	_scsih_block_io_all_device(ioc);
7392 
7393 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7394 	mpi_reply = ioc->tm_cmds.reply;
7395  broadcast_aen_retry:
7396 
7397 	/* sanity checks for retrying this loop */
7398 	if (max_retries++ == 5) {
7399 		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
7400 		goto out;
7401 	} else if (max_retries > 1)
7402 		dewtprintk(ioc,
7403 			   ioc_info(ioc, "%s: %d retry\n",
7404 				    __func__, max_retries - 1));
7405 
7406 	termination_count = 0;
7407 	query_count = 0;
7408 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
7409 		if (ioc->shost_recovery)
7410 			goto out;
7411 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
7412 		if (!scmd)
7413 			continue;
7414 		st = scsi_cmd_priv(scmd);
7415 		sdev = scmd->device;
7416 		sas_device_priv_data = sdev->hostdata;
7417 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
7418 			continue;
7419 		 /* skip hidden raid components */
7420 		if (sas_device_priv_data->sas_target->flags &
7421 		    MPT_TARGET_FLAGS_RAID_COMPONENT)
7422 			continue;
7423 		 /* skip volumes */
7424 		if (sas_device_priv_data->sas_target->flags &
7425 		    MPT_TARGET_FLAGS_VOLUME)
7426 			continue;
7427 		 /* skip PCIe devices */
7428 		if (sas_device_priv_data->sas_target->flags &
7429 		    MPT_TARGET_FLAGS_PCIE_DEVICE)
7430 			continue;
7431 
7432 		handle = sas_device_priv_data->sas_target->handle;
7433 		lun = sas_device_priv_data->lun;
7434 		query_count++;
7435 
7436 		if (ioc->shost_recovery)
7437 			goto out;
7438 
7439 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7440 		r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
7441 			MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7442 			st->msix_io, 30, 0);
7443 		if (r == FAILED) {
7444 			sdev_printk(KERN_WARNING, sdev,
7445 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
7446 			    "QUERY_TASK: scmd(%p)\n", scmd);
7447 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7448 			goto broadcast_aen_retry;
7449 		}
7450 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
7451 		    & MPI2_IOCSTATUS_MASK;
7452 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7453 			sdev_printk(KERN_WARNING, sdev,
7454 				"query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
7455 				ioc_status, scmd);
7456 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7457 			goto broadcast_aen_retry;
7458 		}
7459 
7460 		/* see if IO is still owned by IOC and target */
7461 		if (mpi_reply->ResponseCode ==
7462 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
7463 		     mpi_reply->ResponseCode ==
7464 		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
7465 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7466 			continue;
7467 		}
7468 		task_abort_retries = 0;
7469  tm_retry:
7470 		if (task_abort_retries++ == 60) {
7471 			dewtprintk(ioc,
7472 				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
7473 					    __func__));
7474 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7475 			goto broadcast_aen_retry;
7476 		}
7477 
7478 		if (ioc->shost_recovery)
7479 			goto out_no_lock;
7480 
7481 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
7482 			MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
7483 			st->msix_io, 30, 0);
7484 		if (r == FAILED || st->cb_idx != 0xFF) {
7485 			sdev_printk(KERN_WARNING, sdev,
7486 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
7487 			    "scmd(%p)\n", scmd);
7488 			goto tm_retry;
7489 		}
7490 
7491 		if (task_abort_retries > 1)
7492 			sdev_printk(KERN_WARNING, sdev,
7493 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
7494 			    " scmd(%p)\n",
7495 			    task_abort_retries - 1, scmd);
7496 
7497 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
7498 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7499 	}
7500 
7501 	if (ioc->broadcast_aen_pending) {
7502 		dewtprintk(ioc,
7503 			   ioc_info(ioc,
7504 				    "%s: loop back due to pending AEN\n",
7505 				    __func__));
7506 		 ioc->broadcast_aen_pending = 0;
7507 		 goto broadcast_aen_retry;
7508 	}
7509 
7510  out:
7511 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7512  out_no_lock:
7513 
7514 	dewtprintk(ioc,
7515 		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
7516 			    __func__, query_count, termination_count));
7517 
7518 	ioc->broadcast_aen_busy = 0;
7519 	if (!ioc->shost_recovery)
7520 		_scsih_ublock_io_all_device(ioc);
7521 	mutex_unlock(&ioc->tm_cmds.mutex);
7522 }
7523 
7524 /**
7525  * _scsih_sas_discovery_event - handle discovery events
7526  * @ioc: per adapter object
7527  * @fw_event: The fw_event_work object
7528  * Context: user.
7529  */
7530 static void
7531 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7532 	struct fw_event_work *fw_event)
7533 {
7534 	Mpi2EventDataSasDiscovery_t *event_data =
7535 		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
7536 
7537 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7538 		ioc_info(ioc, "discovery event: (%s)",
7539 			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
7540 			 "start" : "stop");
7541 		if (event_data->DiscoveryStatus)
7542 			pr_cont("discovery_status(0x%08x)",
7543 				le32_to_cpu(event_data->DiscoveryStatus));
7544 		pr_cont("\n");
7545 	}
7546 
7547 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
7548 	    !ioc->sas_hba.num_phys) {
7549 		if (disable_discovery > 0 && ioc->shost_recovery) {
7550 			/* Wait for the reset to complete */
7551 			while (ioc->shost_recovery)
7552 				ssleep(1);
7553 		}
7554 		_scsih_sas_host_add(ioc);
7555 	}
7556 }
7557 
7558 /**
7559  * _scsih_sas_device_discovery_error_event - display SAS device discovery error
7560  *						events
7561  * @ioc: per adapter object
7562  * @fw_event: The fw_event_work object
7563  * Context: user.
7564  */
7565 static void
7566 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7567 	struct fw_event_work *fw_event)
7568 {
7569 	Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
7570 		(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
7571 
7572 	switch (event_data->ReasonCode) {
7573 	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7574 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
7575 			 le16_to_cpu(event_data->DevHandle),
7576 			 (u64)le64_to_cpu(event_data->SASAddress),
7577 			 event_data->PhysicalPort);
7578 		break;
7579 	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7580 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
7581 			 le16_to_cpu(event_data->DevHandle),
7582 			 (u64)le64_to_cpu(event_data->SASAddress),
7583 			 event_data->PhysicalPort);
7584 		break;
7585 	default:
7586 		break;
7587 	}
7588 }
7589 
7590 /**
7591  * _scsih_pcie_enumeration_event - handle enumeration events
7592  * @ioc: per adapter object
7593  * @fw_event: The fw_event_work object
7594  * Context: user.
7595  */
7596 static void
7597 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7598 	struct fw_event_work *fw_event)
7599 {
7600 	Mpi26EventDataPCIeEnumeration_t *event_data =
7601 		(Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
7602 
7603 	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7604 		return;
7605 
7606 	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
7607 		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7608 		 "started" : "completed",
7609 		 event_data->Flags);
7610 	if (event_data->EnumerationStatus)
7611 		pr_cont("enumeration_status(0x%08x)",
7612 			le32_to_cpu(event_data->EnumerationStatus));
7613 	pr_cont("\n");
7614 }
7615 
7616 /**
7617  * _scsih_ir_fastpath - turn on fastpath for IR physdisk
7618  * @ioc: per adapter object
7619  * @handle: device handle for physical disk
7620  * @phys_disk_num: physical disk number
7621  *
7622  * Return: 0 for success, else failure.
7623  */
7624 static int
7625 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7626 {
7627 	Mpi2RaidActionRequest_t *mpi_request;
7628 	Mpi2RaidActionReply_t *mpi_reply;
7629 	u16 smid;
7630 	u8 issue_reset = 0;
7631 	int rc = 0;
7632 	u16 ioc_status;
7633 	u32 log_info;
7634 
7635 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
7636 		return rc;
7637 
7638 	mutex_lock(&ioc->scsih_cmds.mutex);
7639 
7640 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7641 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
7642 		rc = -EAGAIN;
7643 		goto out;
7644 	}
7645 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
7646 
7647 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
7648 	if (!smid) {
7649 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7650 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7651 		rc = -EAGAIN;
7652 		goto out;
7653 	}
7654 
7655 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7656 	ioc->scsih_cmds.smid = smid;
7657 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
7658 
7659 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
7660 	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
7661 	mpi_request->PhysDiskNum = phys_disk_num;
7662 
7663 	dewtprintk(ioc,
7664 		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
7665 			    handle, phys_disk_num));
7666 
7667 	init_completion(&ioc->scsih_cmds.done);
7668 	ioc->put_smid_default(ioc, smid);
7669 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
7670 
7671 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
7672 		issue_reset =
7673 			mpt3sas_base_check_cmd_timeout(ioc,
7674 				ioc->scsih_cmds.status, mpi_request,
7675 				sizeof(Mpi2RaidActionRequest_t)/4);
7676 		rc = -EFAULT;
7677 		goto out;
7678 	}
7679 
7680 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
7681 
7682 		mpi_reply = ioc->scsih_cmds.reply;
7683 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
7684 		if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
7685 			log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
7686 		else
7687 			log_info = 0;
7688 		ioc_status &= MPI2_IOCSTATUS_MASK;
7689 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7690 			dewtprintk(ioc,
7691 				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
7692 					    ioc_status, log_info));
7693 			rc = -EFAULT;
7694 		} else
7695 			dewtprintk(ioc,
7696 				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
7697 	}
7698 
7699  out:
7700 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7701 	mutex_unlock(&ioc->scsih_cmds.mutex);
7702 
7703 	if (issue_reset)
7704 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7705 	return rc;
7706 }
7707 
7708 /**
7709  * _scsih_reprobe_lun - reprobing lun
7710  * @sdev: scsi device struct
7711  * @no_uld_attach: sdev->no_uld_attach flag setting
7712  *
7713  **/
7714 static void
7715 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
7716 {
7717 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
7718 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
7719 	    sdev->no_uld_attach ? "hiding" : "exposing");
7720 	WARN_ON(scsi_device_reprobe(sdev));
7721 }
7722 
7723 /**
7724  * _scsih_sas_volume_add - add new volume
7725  * @ioc: per adapter object
7726  * @element: IR config element data
7727  * Context: user.
7728  */
7729 static void
7730 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
7731 	Mpi2EventIrConfigElement_t *element)
7732 {
7733 	struct _raid_device *raid_device;
7734 	unsigned long flags;
7735 	u64 wwid;
7736 	u16 handle = le16_to_cpu(element->VolDevHandle);
7737 	int rc;
7738 
7739 	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
7740 	if (!wwid) {
7741 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7742 			__FILE__, __LINE__, __func__);
7743 		return;
7744 	}
7745 
7746 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
7747 	raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
7748 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7749 
7750 	if (raid_device)
7751 		return;
7752 
7753 	raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
7754 	if (!raid_device) {
7755 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7756 			__FILE__, __LINE__, __func__);
7757 		return;
7758 	}
7759 
7760 	raid_device->id = ioc->sas_id++;
7761 	raid_device->channel = RAID_CHANNEL;
7762 	raid_device->handle = handle;
7763 	raid_device->wwid = wwid;
7764 	_scsih_raid_device_add(ioc, raid_device);
7765 	if (!ioc->wait_for_discovery_to_complete) {
7766 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
7767 		    raid_device->id, 0);
7768 		if (rc)
7769 			_scsih_raid_device_remove(ioc, raid_device);
7770 	} else {
7771 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
7772 		_scsih_determine_boot_device(ioc, raid_device, 1);
7773 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7774 	}
7775 }
7776 
7777 /**
7778  * _scsih_sas_volume_delete - delete volume
7779  * @ioc: per adapter object
7780  * @handle: volume device handle
7781  * Context: user.
7782  */
7783 static void
7784 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7785 {
7786 	struct _raid_device *raid_device;
7787 	unsigned long flags;
7788 	struct MPT3SAS_TARGET *sas_target_priv_data;
7789 	struct scsi_target *starget = NULL;
7790 
7791 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
7792 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
7793 	if (raid_device) {
7794 		if (raid_device->starget) {
7795 			starget = raid_device->starget;
7796 			sas_target_priv_data = starget->hostdata;
7797 			sas_target_priv_data->deleted = 1;
7798 		}
7799 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7800 			 raid_device->handle, (u64)raid_device->wwid);
7801 		list_del(&raid_device->list);
7802 		kfree(raid_device);
7803 	}
7804 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7805 	if (starget)
7806 		scsi_remove_target(&starget->dev);
7807 }
7808 
7809 /**
7810  * _scsih_sas_pd_expose - expose pd component to /dev/sdX
7811  * @ioc: per adapter object
7812  * @element: IR config element data
7813  * Context: user.
7814  */
7815 static void
7816 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
7817 	Mpi2EventIrConfigElement_t *element)
7818 {
7819 	struct _sas_device *sas_device;
7820 	struct scsi_target *starget = NULL;
7821 	struct MPT3SAS_TARGET *sas_target_priv_data;
7822 	unsigned long flags;
7823 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7824 
7825 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7826 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7827 	if (sas_device) {
7828 		sas_device->volume_handle = 0;
7829 		sas_device->volume_wwid = 0;
7830 		clear_bit(handle, ioc->pd_handles);
7831 		if (sas_device->starget && sas_device->starget->hostdata) {
7832 			starget = sas_device->starget;
7833 			sas_target_priv_data = starget->hostdata;
7834 			sas_target_priv_data->flags &=
7835 			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
7836 		}
7837 	}
7838 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7839 	if (!sas_device)
7840 		return;
7841 
7842 	/* exposing raid component */
7843 	if (starget)
7844 		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
7845 
7846 	sas_device_put(sas_device);
7847 }
7848 
7849 /**
7850  * _scsih_sas_pd_hide - hide pd component from /dev/sdX
7851  * @ioc: per adapter object
7852  * @element: IR config element data
7853  * Context: user.
7854  */
7855 static void
7856 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
7857 	Mpi2EventIrConfigElement_t *element)
7858 {
7859 	struct _sas_device *sas_device;
7860 	struct scsi_target *starget = NULL;
7861 	struct MPT3SAS_TARGET *sas_target_priv_data;
7862 	unsigned long flags;
7863 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7864 	u16 volume_handle = 0;
7865 	u64 volume_wwid = 0;
7866 
7867 	mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
7868 	if (volume_handle)
7869 		mpt3sas_config_get_volume_wwid(ioc, volume_handle,
7870 		    &volume_wwid);
7871 
7872 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7873 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7874 	if (sas_device) {
7875 		set_bit(handle, ioc->pd_handles);
7876 		if (sas_device->starget && sas_device->starget->hostdata) {
7877 			starget = sas_device->starget;
7878 			sas_target_priv_data = starget->hostdata;
7879 			sas_target_priv_data->flags |=
7880 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
7881 			sas_device->volume_handle = volume_handle;
7882 			sas_device->volume_wwid = volume_wwid;
7883 		}
7884 	}
7885 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7886 	if (!sas_device)
7887 		return;
7888 
7889 	/* hiding raid component */
7890 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7891 
7892 	if (starget)
7893 		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
7894 
7895 	sas_device_put(sas_device);
7896 }
7897 
7898 /**
7899  * _scsih_sas_pd_delete - delete pd component
7900  * @ioc: per adapter object
7901  * @element: IR config element data
7902  * Context: user.
7903  */
7904 static void
7905 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
7906 	Mpi2EventIrConfigElement_t *element)
7907 {
7908 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7909 
7910 	_scsih_device_remove_by_handle(ioc, handle);
7911 }
7912 
7913 /**
7914  * _scsih_sas_pd_add - remove pd component
7915  * @ioc: per adapter object
7916  * @element: IR config element data
7917  * Context: user.
7918  */
7919 static void
7920 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
7921 	Mpi2EventIrConfigElement_t *element)
7922 {
7923 	struct _sas_device *sas_device;
7924 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7925 	Mpi2ConfigReply_t mpi_reply;
7926 	Mpi2SasDevicePage0_t sas_device_pg0;
7927 	u32 ioc_status;
7928 	u64 sas_address;
7929 	u16 parent_handle;
7930 
7931 	set_bit(handle, ioc->pd_handles);
7932 
7933 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
7934 	if (sas_device) {
7935 		_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7936 		sas_device_put(sas_device);
7937 		return;
7938 	}
7939 
7940 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7941 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7942 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7943 			__FILE__, __LINE__, __func__);
7944 		return;
7945 	}
7946 
7947 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7948 	    MPI2_IOCSTATUS_MASK;
7949 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7950 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7951 			__FILE__, __LINE__, __func__);
7952 		return;
7953 	}
7954 
7955 	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
7956 	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
7957 		mpt3sas_transport_update_links(ioc, sas_address, handle,
7958 		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
7959 
7960 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7961 	_scsih_add_device(ioc, handle, 0, 1);
7962 }
7963 
7964 /**
7965  * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
7966  * @ioc: per adapter object
7967  * @event_data: event data payload
7968  * Context: user.
7969  */
7970 static void
7971 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7972 	Mpi2EventDataIrConfigChangeList_t *event_data)
7973 {
7974 	Mpi2EventIrConfigElement_t *element;
7975 	u8 element_type;
7976 	int i;
7977 	char *reason_str = NULL, *element_str = NULL;
7978 
7979 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
7980 
7981 	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
7982 		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
7983 		 "foreign" : "native",
7984 		 event_data->NumElements);
7985 	for (i = 0; i < event_data->NumElements; i++, element++) {
7986 		switch (element->ReasonCode) {
7987 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7988 			reason_str = "add";
7989 			break;
7990 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7991 			reason_str = "remove";
7992 			break;
7993 		case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
7994 			reason_str = "no change";
7995 			break;
7996 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7997 			reason_str = "hide";
7998 			break;
7999 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8000 			reason_str = "unhide";
8001 			break;
8002 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8003 			reason_str = "volume_created";
8004 			break;
8005 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8006 			reason_str = "volume_deleted";
8007 			break;
8008 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8009 			reason_str = "pd_created";
8010 			break;
8011 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8012 			reason_str = "pd_deleted";
8013 			break;
8014 		default:
8015 			reason_str = "unknown reason";
8016 			break;
8017 		}
8018 		element_type = le16_to_cpu(element->ElementFlags) &
8019 		    MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
8020 		switch (element_type) {
8021 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
8022 			element_str = "volume";
8023 			break;
8024 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
8025 			element_str = "phys disk";
8026 			break;
8027 		case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
8028 			element_str = "hot spare";
8029 			break;
8030 		default:
8031 			element_str = "unknown element";
8032 			break;
8033 		}
8034 		pr_info("\t(%s:%s), vol handle(0x%04x), " \
8035 		    "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
8036 		    reason_str, le16_to_cpu(element->VolDevHandle),
8037 		    le16_to_cpu(element->PhysDiskDevHandle),
8038 		    element->PhysDiskNum);
8039 	}
8040 }
8041 
8042 /**
8043  * _scsih_sas_ir_config_change_event - handle ir configuration change events
8044  * @ioc: per adapter object
8045  * @fw_event: The fw_event_work object
8046  * Context: user.
8047  */
8048 static void
8049 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
8050 	struct fw_event_work *fw_event)
8051 {
8052 	Mpi2EventIrConfigElement_t *element;
8053 	int i;
8054 	u8 foreign_config;
8055 	Mpi2EventDataIrConfigChangeList_t *event_data =
8056 		(Mpi2EventDataIrConfigChangeList_t *)
8057 		fw_event->event_data;
8058 
8059 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8060 	     (!ioc->hide_ir_msg))
8061 		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
8062 
8063 	foreign_config = (le32_to_cpu(event_data->Flags) &
8064 	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
8065 
8066 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8067 	if (ioc->shost_recovery &&
8068 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8069 		for (i = 0; i < event_data->NumElements; i++, element++) {
8070 			if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
8071 				_scsih_ir_fastpath(ioc,
8072 					le16_to_cpu(element->PhysDiskDevHandle),
8073 					element->PhysDiskNum);
8074 		}
8075 		return;
8076 	}
8077 
8078 	for (i = 0; i < event_data->NumElements; i++, element++) {
8079 
8080 		switch (element->ReasonCode) {
8081 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8082 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8083 			if (!foreign_config)
8084 				_scsih_sas_volume_add(ioc, element);
8085 			break;
8086 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8087 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8088 			if (!foreign_config)
8089 				_scsih_sas_volume_delete(ioc,
8090 				    le16_to_cpu(element->VolDevHandle));
8091 			break;
8092 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8093 			if (!ioc->is_warpdrive)
8094 				_scsih_sas_pd_hide(ioc, element);
8095 			break;
8096 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8097 			if (!ioc->is_warpdrive)
8098 				_scsih_sas_pd_expose(ioc, element);
8099 			break;
8100 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8101 			if (!ioc->is_warpdrive)
8102 				_scsih_sas_pd_add(ioc, element);
8103 			break;
8104 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8105 			if (!ioc->is_warpdrive)
8106 				_scsih_sas_pd_delete(ioc, element);
8107 			break;
8108 		}
8109 	}
8110 }
8111 
8112 /**
8113  * _scsih_sas_ir_volume_event - IR volume event
8114  * @ioc: per adapter object
8115  * @fw_event: The fw_event_work object
8116  * Context: user.
8117  */
8118 static void
8119 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8120 	struct fw_event_work *fw_event)
8121 {
8122 	u64 wwid;
8123 	unsigned long flags;
8124 	struct _raid_device *raid_device;
8125 	u16 handle;
8126 	u32 state;
8127 	int rc;
8128 	Mpi2EventDataIrVolume_t *event_data =
8129 		(Mpi2EventDataIrVolume_t *) fw_event->event_data;
8130 
8131 	if (ioc->shost_recovery)
8132 		return;
8133 
8134 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
8135 		return;
8136 
8137 	handle = le16_to_cpu(event_data->VolDevHandle);
8138 	state = le32_to_cpu(event_data->NewValue);
8139 	if (!ioc->hide_ir_msg)
8140 		dewtprintk(ioc,
8141 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8142 				    __func__, handle,
8143 				    le32_to_cpu(event_data->PreviousValue),
8144 				    state));
8145 	switch (state) {
8146 	case MPI2_RAID_VOL_STATE_MISSING:
8147 	case MPI2_RAID_VOL_STATE_FAILED:
8148 		_scsih_sas_volume_delete(ioc, handle);
8149 		break;
8150 
8151 	case MPI2_RAID_VOL_STATE_ONLINE:
8152 	case MPI2_RAID_VOL_STATE_DEGRADED:
8153 	case MPI2_RAID_VOL_STATE_OPTIMAL:
8154 
8155 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8156 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8157 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8158 
8159 		if (raid_device)
8160 			break;
8161 
8162 		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8163 		if (!wwid) {
8164 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8165 				__FILE__, __LINE__, __func__);
8166 			break;
8167 		}
8168 
8169 		raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8170 		if (!raid_device) {
8171 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8172 				__FILE__, __LINE__, __func__);
8173 			break;
8174 		}
8175 
8176 		raid_device->id = ioc->sas_id++;
8177 		raid_device->channel = RAID_CHANNEL;
8178 		raid_device->handle = handle;
8179 		raid_device->wwid = wwid;
8180 		_scsih_raid_device_add(ioc, raid_device);
8181 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8182 		    raid_device->id, 0);
8183 		if (rc)
8184 			_scsih_raid_device_remove(ioc, raid_device);
8185 		break;
8186 
8187 	case MPI2_RAID_VOL_STATE_INITIALIZING:
8188 	default:
8189 		break;
8190 	}
8191 }
8192 
8193 /**
8194  * _scsih_sas_ir_physical_disk_event - PD event
8195  * @ioc: per adapter object
8196  * @fw_event: The fw_event_work object
8197  * Context: user.
8198  */
8199 static void
8200 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8201 	struct fw_event_work *fw_event)
8202 {
8203 	u16 handle, parent_handle;
8204 	u32 state;
8205 	struct _sas_device *sas_device;
8206 	Mpi2ConfigReply_t mpi_reply;
8207 	Mpi2SasDevicePage0_t sas_device_pg0;
8208 	u32 ioc_status;
8209 	Mpi2EventDataIrPhysicalDisk_t *event_data =
8210 		(Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
8211 	u64 sas_address;
8212 
8213 	if (ioc->shost_recovery)
8214 		return;
8215 
8216 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
8217 		return;
8218 
8219 	handle = le16_to_cpu(event_data->PhysDiskDevHandle);
8220 	state = le32_to_cpu(event_data->NewValue);
8221 
8222 	if (!ioc->hide_ir_msg)
8223 		dewtprintk(ioc,
8224 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8225 				    __func__, handle,
8226 				    le32_to_cpu(event_data->PreviousValue),
8227 				    state));
8228 
8229 	switch (state) {
8230 	case MPI2_RAID_PD_STATE_ONLINE:
8231 	case MPI2_RAID_PD_STATE_DEGRADED:
8232 	case MPI2_RAID_PD_STATE_REBUILDING:
8233 	case MPI2_RAID_PD_STATE_OPTIMAL:
8234 	case MPI2_RAID_PD_STATE_HOT_SPARE:
8235 
8236 		if (!ioc->is_warpdrive)
8237 			set_bit(handle, ioc->pd_handles);
8238 
8239 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8240 		if (sas_device) {
8241 			sas_device_put(sas_device);
8242 			return;
8243 		}
8244 
8245 		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8246 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8247 		    handle))) {
8248 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8249 				__FILE__, __LINE__, __func__);
8250 			return;
8251 		}
8252 
8253 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8254 		    MPI2_IOCSTATUS_MASK;
8255 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8256 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8257 				__FILE__, __LINE__, __func__);
8258 			return;
8259 		}
8260 
8261 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8262 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8263 			mpt3sas_transport_update_links(ioc, sas_address, handle,
8264 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8265 
8266 		_scsih_add_device(ioc, handle, 0, 1);
8267 
8268 		break;
8269 
8270 	case MPI2_RAID_PD_STATE_OFFLINE:
8271 	case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
8272 	case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
8273 	default:
8274 		break;
8275 	}
8276 }
8277 
8278 /**
8279  * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
8280  * @ioc: per adapter object
8281  * @event_data: event data payload
8282  * Context: user.
8283  */
8284 static void
8285 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
8286 	Mpi2EventDataIrOperationStatus_t *event_data)
8287 {
8288 	char *reason_str = NULL;
8289 
8290 	switch (event_data->RAIDOperation) {
8291 	case MPI2_EVENT_IR_RAIDOP_RESYNC:
8292 		reason_str = "resync";
8293 		break;
8294 	case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8295 		reason_str = "online capacity expansion";
8296 		break;
8297 	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8298 		reason_str = "consistency check";
8299 		break;
8300 	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
8301 		reason_str = "background init";
8302 		break;
8303 	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
8304 		reason_str = "make data consistent";
8305 		break;
8306 	}
8307 
8308 	if (!reason_str)
8309 		return;
8310 
8311 	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
8312 		 reason_str,
8313 		 le16_to_cpu(event_data->VolDevHandle),
8314 		 event_data->PercentComplete);
8315 }
8316 
8317 /**
8318  * _scsih_sas_ir_operation_status_event - handle RAID operation events
8319  * @ioc: per adapter object
8320  * @fw_event: The fw_event_work object
8321  * Context: user.
8322  */
8323 static void
8324 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
8325 	struct fw_event_work *fw_event)
8326 {
8327 	Mpi2EventDataIrOperationStatus_t *event_data =
8328 		(Mpi2EventDataIrOperationStatus_t *)
8329 		fw_event->event_data;
8330 	static struct _raid_device *raid_device;
8331 	unsigned long flags;
8332 	u16 handle;
8333 
8334 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8335 	    (!ioc->hide_ir_msg))
8336 		_scsih_sas_ir_operation_status_event_debug(ioc,
8337 		     event_data);
8338 
8339 	/* code added for raid transport support */
8340 	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
8341 
8342 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8343 		handle = le16_to_cpu(event_data->VolDevHandle);
8344 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8345 		if (raid_device)
8346 			raid_device->percent_complete =
8347 			    event_data->PercentComplete;
8348 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8349 	}
8350 }
8351 
8352 /**
8353  * _scsih_prep_device_scan - initialize parameters prior to device scan
8354  * @ioc: per adapter object
8355  *
8356  * Set the deleted flag prior to device scan.  If the device is found during
8357  * the scan, then we clear the deleted flag.
8358  */
8359 static void
8360 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
8361 {
8362 	struct MPT3SAS_DEVICE *sas_device_priv_data;
8363 	struct scsi_device *sdev;
8364 
8365 	shost_for_each_device(sdev, ioc->shost) {
8366 		sas_device_priv_data = sdev->hostdata;
8367 		if (sas_device_priv_data && sas_device_priv_data->sas_target)
8368 			sas_device_priv_data->sas_target->deleted = 1;
8369 	}
8370 }
8371 
8372 /**
8373  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
8374  * @ioc: per adapter object
8375  * @sas_device_pg0: SAS Device page 0
8376  *
8377  * After host reset, find out whether devices are still responding.
8378  * Used in _scsih_remove_unresponsive_sas_devices.
8379  */
8380 static void
8381 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
8382 Mpi2SasDevicePage0_t *sas_device_pg0)
8383 {
8384 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8385 	struct scsi_target *starget;
8386 	struct _sas_device *sas_device = NULL;
8387 	struct _enclosure_node *enclosure_dev = NULL;
8388 	unsigned long flags;
8389 
8390 	if (sas_device_pg0->EnclosureHandle) {
8391 		enclosure_dev =
8392 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8393 				le16_to_cpu(sas_device_pg0->EnclosureHandle));
8394 		if (enclosure_dev == NULL)
8395 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
8396 				 sas_device_pg0->EnclosureHandle);
8397 	}
8398 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8399 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
8400 		if ((sas_device->sas_address == le64_to_cpu(
8401 		    sas_device_pg0->SASAddress)) && (sas_device->slot ==
8402 		    le16_to_cpu(sas_device_pg0->Slot))) {
8403 			sas_device->responding = 1;
8404 			starget = sas_device->starget;
8405 			if (starget && starget->hostdata) {
8406 				sas_target_priv_data = starget->hostdata;
8407 				sas_target_priv_data->tm_busy = 0;
8408 				sas_target_priv_data->deleted = 0;
8409 			} else
8410 				sas_target_priv_data = NULL;
8411 			if (starget) {
8412 				starget_printk(KERN_INFO, starget,
8413 				    "handle(0x%04x), sas_addr(0x%016llx)\n",
8414 				    le16_to_cpu(sas_device_pg0->DevHandle),
8415 				    (unsigned long long)
8416 				    sas_device->sas_address);
8417 
8418 				if (sas_device->enclosure_handle != 0)
8419 					starget_printk(KERN_INFO, starget,
8420 					 "enclosure logical id(0x%016llx),"
8421 					 " slot(%d)\n",
8422 					 (unsigned long long)
8423 					 sas_device->enclosure_logical_id,
8424 					 sas_device->slot);
8425 			}
8426 			if (le16_to_cpu(sas_device_pg0->Flags) &
8427 			      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8428 				sas_device->enclosure_level =
8429 				   sas_device_pg0->EnclosureLevel;
8430 				memcpy(&sas_device->connector_name[0],
8431 					&sas_device_pg0->ConnectorName[0], 4);
8432 			} else {
8433 				sas_device->enclosure_level = 0;
8434 				sas_device->connector_name[0] = '\0';
8435 			}
8436 
8437 			sas_device->enclosure_handle =
8438 				le16_to_cpu(sas_device_pg0->EnclosureHandle);
8439 			sas_device->is_chassis_slot_valid = 0;
8440 			if (enclosure_dev) {
8441 				sas_device->enclosure_logical_id = le64_to_cpu(
8442 					enclosure_dev->pg0.EnclosureLogicalID);
8443 				if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8444 				    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8445 					sas_device->is_chassis_slot_valid = 1;
8446 					sas_device->chassis_slot =
8447 						enclosure_dev->pg0.ChassisSlot;
8448 				}
8449 			}
8450 
8451 			if (sas_device->handle == le16_to_cpu(
8452 			    sas_device_pg0->DevHandle))
8453 				goto out;
8454 			pr_info("\thandle changed from(0x%04x)!!!\n",
8455 			    sas_device->handle);
8456 			sas_device->handle = le16_to_cpu(
8457 			    sas_device_pg0->DevHandle);
8458 			if (sas_target_priv_data)
8459 				sas_target_priv_data->handle =
8460 				    le16_to_cpu(sas_device_pg0->DevHandle);
8461 			goto out;
8462 		}
8463 	}
8464  out:
8465 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8466 }
8467 
8468 /**
8469  * _scsih_create_enclosure_list_after_reset - Free Existing list,
8470  *	And create enclosure list by scanning all Enclosure Page(0)s
8471  * @ioc: per adapter object
8472  */
8473 static void
8474 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8475 {
8476 	struct _enclosure_node *enclosure_dev;
8477 	Mpi2ConfigReply_t mpi_reply;
8478 	u16 enclosure_handle;
8479 	int rc;
8480 
8481 	/* Free existing enclosure list */
8482 	mpt3sas_free_enclosure_list(ioc);
8483 
8484 	/* Re constructing enclosure list after reset*/
8485 	enclosure_handle = 0xFFFF;
8486 	do {
8487 		enclosure_dev =
8488 			kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8489 		if (!enclosure_dev) {
8490 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8491 				__FILE__, __LINE__, __func__);
8492 			return;
8493 		}
8494 		rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8495 				&enclosure_dev->pg0,
8496 				MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
8497 				enclosure_handle);
8498 
8499 		if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8500 						MPI2_IOCSTATUS_MASK)) {
8501 			kfree(enclosure_dev);
8502 			return;
8503 		}
8504 		list_add_tail(&enclosure_dev->list,
8505 						&ioc->enclosure_list);
8506 		enclosure_handle =
8507 			le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
8508 	} while (1);
8509 }
8510 
8511 /**
8512  * _scsih_search_responding_sas_devices -
8513  * @ioc: per adapter object
8514  *
8515  * After host reset, find out whether devices are still responding.
8516  * If not remove.
8517  */
8518 static void
8519 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8520 {
8521 	Mpi2SasDevicePage0_t sas_device_pg0;
8522 	Mpi2ConfigReply_t mpi_reply;
8523 	u16 ioc_status;
8524 	u16 handle;
8525 	u32 device_info;
8526 
8527 	ioc_info(ioc, "search for end-devices: start\n");
8528 
8529 	if (list_empty(&ioc->sas_device_list))
8530 		goto out;
8531 
8532 	handle = 0xFFFF;
8533 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8534 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8535 	    handle))) {
8536 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8537 		    MPI2_IOCSTATUS_MASK;
8538 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8539 			break;
8540 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
8541 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8542 		if (!(_scsih_is_end_device(device_info)))
8543 			continue;
8544 		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
8545 	}
8546 
8547  out:
8548 	ioc_info(ioc, "search for end-devices: complete\n");
8549 }
8550 
8551 /**
8552  * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
8553  * @ioc: per adapter object
8554  * @pcie_device_pg0: PCIe Device page 0
8555  *
8556  * After host reset, find out whether devices are still responding.
8557  * Used in _scsih_remove_unresponding_devices.
8558  */
8559 static void
8560 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8561 	Mpi26PCIeDevicePage0_t *pcie_device_pg0)
8562 {
8563 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8564 	struct scsi_target *starget;
8565 	struct _pcie_device *pcie_device;
8566 	unsigned long flags;
8567 
8568 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8569 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8570 		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8571 		    && (pcie_device->slot == le16_to_cpu(
8572 		    pcie_device_pg0->Slot))) {
8573 			pcie_device->access_status =
8574 					pcie_device_pg0->AccessStatus;
8575 			pcie_device->responding = 1;
8576 			starget = pcie_device->starget;
8577 			if (starget && starget->hostdata) {
8578 				sas_target_priv_data = starget->hostdata;
8579 				sas_target_priv_data->tm_busy = 0;
8580 				sas_target_priv_data->deleted = 0;
8581 			} else
8582 				sas_target_priv_data = NULL;
8583 			if (starget) {
8584 				starget_printk(KERN_INFO, starget,
8585 				    "handle(0x%04x), wwid(0x%016llx) ",
8586 				    pcie_device->handle,
8587 				    (unsigned long long)pcie_device->wwid);
8588 				if (pcie_device->enclosure_handle != 0)
8589 					starget_printk(KERN_INFO, starget,
8590 					    "enclosure logical id(0x%016llx), "
8591 					    "slot(%d)\n",
8592 					    (unsigned long long)
8593 					    pcie_device->enclosure_logical_id,
8594 					    pcie_device->slot);
8595 			}
8596 
8597 			if (((le32_to_cpu(pcie_device_pg0->Flags)) &
8598 			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
8599 			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8600 				pcie_device->enclosure_level =
8601 				    pcie_device_pg0->EnclosureLevel;
8602 				memcpy(&pcie_device->connector_name[0],
8603 				    &pcie_device_pg0->ConnectorName[0], 4);
8604 			} else {
8605 				pcie_device->enclosure_level = 0;
8606 				pcie_device->connector_name[0] = '\0';
8607 			}
8608 
8609 			if (pcie_device->handle == le16_to_cpu(
8610 			    pcie_device_pg0->DevHandle))
8611 				goto out;
8612 			pr_info("\thandle changed from(0x%04x)!!!\n",
8613 			    pcie_device->handle);
8614 			pcie_device->handle = le16_to_cpu(
8615 			    pcie_device_pg0->DevHandle);
8616 			if (sas_target_priv_data)
8617 				sas_target_priv_data->handle =
8618 				    le16_to_cpu(pcie_device_pg0->DevHandle);
8619 			goto out;
8620 		}
8621 	}
8622 
8623  out:
8624 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8625 }
8626 
8627 /**
8628  * _scsih_search_responding_pcie_devices -
8629  * @ioc: per adapter object
8630  *
8631  * After host reset, find out whether devices are still responding.
8632  * If not remove.
8633  */
8634 static void
8635 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8636 {
8637 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8638 	Mpi2ConfigReply_t mpi_reply;
8639 	u16 ioc_status;
8640 	u16 handle;
8641 	u32 device_info;
8642 
8643 	ioc_info(ioc, "search for end-devices: start\n");
8644 
8645 	if (list_empty(&ioc->pcie_device_list))
8646 		goto out;
8647 
8648 	handle = 0xFFFF;
8649 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8650 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8651 		handle))) {
8652 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8653 		    MPI2_IOCSTATUS_MASK;
8654 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8655 			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
8656 				 __func__, ioc_status,
8657 				 le32_to_cpu(mpi_reply.IOCLogInfo));
8658 			break;
8659 		}
8660 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8661 		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8662 		if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8663 			continue;
8664 		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8665 	}
8666 out:
8667 	ioc_info(ioc, "search for PCIe end-devices: complete\n");
8668 }
8669 
8670 /**
8671  * _scsih_mark_responding_raid_device - mark a raid_device as responding
8672  * @ioc: per adapter object
8673  * @wwid: world wide identifier for raid volume
8674  * @handle: device handle
8675  *
8676  * After host reset, find out whether devices are still responding.
8677  * Used in _scsih_remove_unresponsive_raid_devices.
8678  */
8679 static void
8680 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
8681 	u16 handle)
8682 {
8683 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8684 	struct scsi_target *starget;
8685 	struct _raid_device *raid_device;
8686 	unsigned long flags;
8687 
8688 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
8689 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
8690 		if (raid_device->wwid == wwid && raid_device->starget) {
8691 			starget = raid_device->starget;
8692 			if (starget && starget->hostdata) {
8693 				sas_target_priv_data = starget->hostdata;
8694 				sas_target_priv_data->deleted = 0;
8695 			} else
8696 				sas_target_priv_data = NULL;
8697 			raid_device->responding = 1;
8698 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8699 			starget_printk(KERN_INFO, raid_device->starget,
8700 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
8701 			    (unsigned long long)raid_device->wwid);
8702 
8703 			/*
8704 			 * WARPDRIVE: The handles of the PDs might have changed
8705 			 * across the host reset so re-initialize the
8706 			 * required data for Direct IO
8707 			 */
8708 			mpt3sas_init_warpdrive_properties(ioc, raid_device);
8709 			spin_lock_irqsave(&ioc->raid_device_lock, flags);
8710 			if (raid_device->handle == handle) {
8711 				spin_unlock_irqrestore(&ioc->raid_device_lock,
8712 				    flags);
8713 				return;
8714 			}
8715 			pr_info("\thandle changed from(0x%04x)!!!\n",
8716 			    raid_device->handle);
8717 			raid_device->handle = handle;
8718 			if (sas_target_priv_data)
8719 				sas_target_priv_data->handle = handle;
8720 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8721 			return;
8722 		}
8723 	}
8724 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8725 }
8726 
8727 /**
8728  * _scsih_search_responding_raid_devices -
8729  * @ioc: per adapter object
8730  *
8731  * After host reset, find out whether devices are still responding.
8732  * If not remove.
8733  */
8734 static void
8735 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
8736 {
8737 	Mpi2RaidVolPage1_t volume_pg1;
8738 	Mpi2RaidVolPage0_t volume_pg0;
8739 	Mpi2RaidPhysDiskPage0_t pd_pg0;
8740 	Mpi2ConfigReply_t mpi_reply;
8741 	u16 ioc_status;
8742 	u16 handle;
8743 	u8 phys_disk_num;
8744 
8745 	if (!ioc->ir_firmware)
8746 		return;
8747 
8748 	ioc_info(ioc, "search for raid volumes: start\n");
8749 
8750 	if (list_empty(&ioc->raid_device_list))
8751 		goto out;
8752 
8753 	handle = 0xFFFF;
8754 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
8755 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
8756 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8757 		    MPI2_IOCSTATUS_MASK;
8758 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8759 			break;
8760 		handle = le16_to_cpu(volume_pg1.DevHandle);
8761 
8762 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
8763 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
8764 		     sizeof(Mpi2RaidVolPage0_t)))
8765 			continue;
8766 
8767 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
8768 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
8769 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
8770 			_scsih_mark_responding_raid_device(ioc,
8771 			    le64_to_cpu(volume_pg1.WWID), handle);
8772 	}
8773 
8774 	/* refresh the pd_handles */
8775 	if (!ioc->is_warpdrive) {
8776 		phys_disk_num = 0xFF;
8777 		memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
8778 		while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
8779 		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
8780 		    phys_disk_num))) {
8781 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8782 			    MPI2_IOCSTATUS_MASK;
8783 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8784 				break;
8785 			phys_disk_num = pd_pg0.PhysDiskNum;
8786 			handle = le16_to_cpu(pd_pg0.DevHandle);
8787 			set_bit(handle, ioc->pd_handles);
8788 		}
8789 	}
8790  out:
8791 	ioc_info(ioc, "search for responding raid volumes: complete\n");
8792 }
8793 
8794 /**
8795  * _scsih_mark_responding_expander - mark a expander as responding
8796  * @ioc: per adapter object
8797  * @expander_pg0:SAS Expander Config Page0
8798  *
8799  * After host reset, find out whether devices are still responding.
8800  * Used in _scsih_remove_unresponsive_expanders.
8801  */
8802 static void
8803 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
8804 	Mpi2ExpanderPage0_t *expander_pg0)
8805 {
8806 	struct _sas_node *sas_expander = NULL;
8807 	unsigned long flags;
8808 	int i;
8809 	struct _enclosure_node *enclosure_dev = NULL;
8810 	u16 handle = le16_to_cpu(expander_pg0->DevHandle);
8811 	u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
8812 	u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
8813 
8814 	if (enclosure_handle)
8815 		enclosure_dev =
8816 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8817 							enclosure_handle);
8818 
8819 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
8820 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
8821 		if (sas_expander->sas_address != sas_address)
8822 			continue;
8823 		sas_expander->responding = 1;
8824 
8825 		if (enclosure_dev) {
8826 			sas_expander->enclosure_logical_id =
8827 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8828 			sas_expander->enclosure_handle =
8829 			    le16_to_cpu(expander_pg0->EnclosureHandle);
8830 		}
8831 
8832 		if (sas_expander->handle == handle)
8833 			goto out;
8834 		pr_info("\texpander(0x%016llx): handle changed" \
8835 		    " from(0x%04x) to (0x%04x)!!!\n",
8836 		    (unsigned long long)sas_expander->sas_address,
8837 		    sas_expander->handle, handle);
8838 		sas_expander->handle = handle;
8839 		for (i = 0 ; i < sas_expander->num_phys ; i++)
8840 			sas_expander->phy[i].handle = handle;
8841 		goto out;
8842 	}
8843  out:
8844 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8845 }
8846 
8847 /**
8848  * _scsih_search_responding_expanders -
8849  * @ioc: per adapter object
8850  *
8851  * After host reset, find out whether devices are still responding.
8852  * If not remove.
8853  */
8854 static void
8855 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
8856 {
8857 	Mpi2ExpanderPage0_t expander_pg0;
8858 	Mpi2ConfigReply_t mpi_reply;
8859 	u16 ioc_status;
8860 	u64 sas_address;
8861 	u16 handle;
8862 
8863 	ioc_info(ioc, "search for expanders: start\n");
8864 
8865 	if (list_empty(&ioc->sas_expander_list))
8866 		goto out;
8867 
8868 	handle = 0xFFFF;
8869 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
8870 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
8871 
8872 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8873 		    MPI2_IOCSTATUS_MASK;
8874 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8875 			break;
8876 
8877 		handle = le16_to_cpu(expander_pg0.DevHandle);
8878 		sas_address = le64_to_cpu(expander_pg0.SASAddress);
8879 		pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
8880 			handle,
8881 		    (unsigned long long)sas_address);
8882 		_scsih_mark_responding_expander(ioc, &expander_pg0);
8883 	}
8884 
8885  out:
8886 	ioc_info(ioc, "search for expanders: complete\n");
8887 }
8888 
8889 /**
8890  * _scsih_remove_unresponding_devices - removing unresponding devices
8891  * @ioc: per adapter object
8892  */
8893 static void
8894 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8895 {
8896 	struct _sas_device *sas_device, *sas_device_next;
8897 	struct _sas_node *sas_expander, *sas_expander_next;
8898 	struct _raid_device *raid_device, *raid_device_next;
8899 	struct _pcie_device *pcie_device, *pcie_device_next;
8900 	struct list_head tmp_list;
8901 	unsigned long flags;
8902 	LIST_HEAD(head);
8903 
8904 	ioc_info(ioc, "removing unresponding devices: start\n");
8905 
8906 	/* removing unresponding end devices */
8907 	ioc_info(ioc, "removing unresponding devices: end-devices\n");
8908 	/*
8909 	 * Iterate, pulling off devices marked as non-responding. We become the
8910 	 * owner for the reference the list had on any object we prune.
8911 	 */
8912 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8913 	list_for_each_entry_safe(sas_device, sas_device_next,
8914 	    &ioc->sas_device_list, list) {
8915 		if (!sas_device->responding)
8916 			list_move_tail(&sas_device->list, &head);
8917 		else
8918 			sas_device->responding = 0;
8919 	}
8920 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8921 
8922 	/*
8923 	 * Now, uninitialize and remove the unresponding devices we pruned.
8924 	 */
8925 	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
8926 		_scsih_remove_device(ioc, sas_device);
8927 		list_del_init(&sas_device->list);
8928 		sas_device_put(sas_device);
8929 	}
8930 
8931 	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
8932 	INIT_LIST_HEAD(&head);
8933 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8934 	list_for_each_entry_safe(pcie_device, pcie_device_next,
8935 	    &ioc->pcie_device_list, list) {
8936 		if (!pcie_device->responding)
8937 			list_move_tail(&pcie_device->list, &head);
8938 		else
8939 			pcie_device->responding = 0;
8940 	}
8941 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8942 
8943 	list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
8944 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
8945 		list_del_init(&pcie_device->list);
8946 		pcie_device_put(pcie_device);
8947 	}
8948 
8949 	/* removing unresponding volumes */
8950 	if (ioc->ir_firmware) {
8951 		ioc_info(ioc, "removing unresponding devices: volumes\n");
8952 		list_for_each_entry_safe(raid_device, raid_device_next,
8953 		    &ioc->raid_device_list, list) {
8954 			if (!raid_device->responding)
8955 				_scsih_sas_volume_delete(ioc,
8956 				    raid_device->handle);
8957 			else
8958 				raid_device->responding = 0;
8959 		}
8960 	}
8961 
8962 	/* removing unresponding expanders */
8963 	ioc_info(ioc, "removing unresponding devices: expanders\n");
8964 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
8965 	INIT_LIST_HEAD(&tmp_list);
8966 	list_for_each_entry_safe(sas_expander, sas_expander_next,
8967 	    &ioc->sas_expander_list, list) {
8968 		if (!sas_expander->responding)
8969 			list_move_tail(&sas_expander->list, &tmp_list);
8970 		else
8971 			sas_expander->responding = 0;
8972 	}
8973 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8974 	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
8975 	    list) {
8976 		_scsih_expander_node_remove(ioc, sas_expander);
8977 	}
8978 
8979 	ioc_info(ioc, "removing unresponding devices: complete\n");
8980 
8981 	/* unblock devices */
8982 	_scsih_ublock_io_all_device(ioc);
8983 }
8984 
8985 static void
8986 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
8987 	struct _sas_node *sas_expander, u16 handle)
8988 {
8989 	Mpi2ExpanderPage1_t expander_pg1;
8990 	Mpi2ConfigReply_t mpi_reply;
8991 	int i;
8992 
8993 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
8994 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
8995 		    &expander_pg1, i, handle))) {
8996 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8997 				__FILE__, __LINE__, __func__);
8998 			return;
8999 		}
9000 
9001 		mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
9002 		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
9003 		    expander_pg1.NegotiatedLinkRate >> 4);
9004 	}
9005 }
9006 
9007 /**
9008  * _scsih_scan_for_devices_after_reset - scan for devices after host reset
9009  * @ioc: per adapter object
9010  */
9011 static void
9012 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9013 {
9014 	Mpi2ExpanderPage0_t expander_pg0;
9015 	Mpi2SasDevicePage0_t sas_device_pg0;
9016 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
9017 	Mpi2RaidVolPage1_t volume_pg1;
9018 	Mpi2RaidVolPage0_t volume_pg0;
9019 	Mpi2RaidPhysDiskPage0_t pd_pg0;
9020 	Mpi2EventIrConfigElement_t element;
9021 	Mpi2ConfigReply_t mpi_reply;
9022 	u8 phys_disk_num;
9023 	u16 ioc_status;
9024 	u16 handle, parent_handle;
9025 	u64 sas_address;
9026 	struct _sas_device *sas_device;
9027 	struct _pcie_device *pcie_device;
9028 	struct _sas_node *expander_device;
9029 	static struct _raid_device *raid_device;
9030 	u8 retry_count;
9031 	unsigned long flags;
9032 
9033 	ioc_info(ioc, "scan devices: start\n");
9034 
9035 	_scsih_sas_host_refresh(ioc);
9036 
9037 	ioc_info(ioc, "\tscan devices: expanders start\n");
9038 
9039 	/* expanders */
9040 	handle = 0xFFFF;
9041 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
9042 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
9043 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9044 		    MPI2_IOCSTATUS_MASK;
9045 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9046 			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9047 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9048 			break;
9049 		}
9050 		handle = le16_to_cpu(expander_pg0.DevHandle);
9051 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
9052 		expander_device = mpt3sas_scsih_expander_find_by_sas_address(
9053 		    ioc, le64_to_cpu(expander_pg0.SASAddress));
9054 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9055 		if (expander_device)
9056 			_scsih_refresh_expander_links(ioc, expander_device,
9057 			    handle);
9058 		else {
9059 			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9060 				 handle,
9061 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
9062 			_scsih_expander_add(ioc, handle);
9063 			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9064 				 handle,
9065 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
9066 		}
9067 	}
9068 
9069 	ioc_info(ioc, "\tscan devices: expanders complete\n");
9070 
9071 	if (!ioc->ir_firmware)
9072 		goto skip_to_sas;
9073 
9074 	ioc_info(ioc, "\tscan devices: phys disk start\n");
9075 
9076 	/* phys disk */
9077 	phys_disk_num = 0xFF;
9078 	while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9079 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9080 	    phys_disk_num))) {
9081 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9082 		    MPI2_IOCSTATUS_MASK;
9083 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9084 			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9085 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9086 			break;
9087 		}
9088 		phys_disk_num = pd_pg0.PhysDiskNum;
9089 		handle = le16_to_cpu(pd_pg0.DevHandle);
9090 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9091 		if (sas_device) {
9092 			sas_device_put(sas_device);
9093 			continue;
9094 		}
9095 		if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9096 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9097 		    handle) != 0)
9098 			continue;
9099 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9100 		    MPI2_IOCSTATUS_MASK;
9101 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9102 			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
9103 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9104 			break;
9105 		}
9106 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9107 		if (!_scsih_get_sas_address(ioc, parent_handle,
9108 		    &sas_address)) {
9109 			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9110 				 handle,
9111 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9112 			mpt3sas_transport_update_links(ioc, sas_address,
9113 			    handle, sas_device_pg0.PhyNum,
9114 			    MPI2_SAS_NEG_LINK_RATE_1_5);
9115 			set_bit(handle, ioc->pd_handles);
9116 			retry_count = 0;
9117 			/* This will retry adding the end device.
9118 			 * _scsih_add_device() will decide on retries and
9119 			 * return "1" when it should be retried
9120 			 */
9121 			while (_scsih_add_device(ioc, handle, retry_count++,
9122 			    1)) {
9123 				ssleep(1);
9124 			}
9125 			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9126 				 handle,
9127 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9128 		}
9129 	}
9130 
9131 	ioc_info(ioc, "\tscan devices: phys disk complete\n");
9132 
9133 	ioc_info(ioc, "\tscan devices: volumes start\n");
9134 
9135 	/* volumes */
9136 	handle = 0xFFFF;
9137 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9138 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9139 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9140 		    MPI2_IOCSTATUS_MASK;
9141 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9142 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9143 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9144 			break;
9145 		}
9146 		handle = le16_to_cpu(volume_pg1.DevHandle);
9147 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9148 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
9149 		    le64_to_cpu(volume_pg1.WWID));
9150 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9151 		if (raid_device)
9152 			continue;
9153 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9154 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9155 		     sizeof(Mpi2RaidVolPage0_t)))
9156 			continue;
9157 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9158 		    MPI2_IOCSTATUS_MASK;
9159 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9160 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9161 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9162 			break;
9163 		}
9164 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9165 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9166 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
9167 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
9168 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
9169 			element.VolDevHandle = volume_pg1.DevHandle;
9170 			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
9171 				 volume_pg1.DevHandle);
9172 			_scsih_sas_volume_add(ioc, &element);
9173 			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
9174 				 volume_pg1.DevHandle);
9175 		}
9176 	}
9177 
9178 	ioc_info(ioc, "\tscan devices: volumes complete\n");
9179 
9180  skip_to_sas:
9181 
9182 	ioc_info(ioc, "\tscan devices: end devices start\n");
9183 
9184 	/* sas devices */
9185 	handle = 0xFFFF;
9186 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9187 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9188 	    handle))) {
9189 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9190 		    MPI2_IOCSTATUS_MASK;
9191 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9192 			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9193 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9194 			break;
9195 		}
9196 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
9197 		if (!(_scsih_is_end_device(
9198 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
9199 			continue;
9200 		sas_device = mpt3sas_get_sdev_by_addr(ioc,
9201 		    le64_to_cpu(sas_device_pg0.SASAddress));
9202 		if (sas_device) {
9203 			sas_device_put(sas_device);
9204 			continue;
9205 		}
9206 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9207 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
9208 			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9209 				 handle,
9210 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9211 			mpt3sas_transport_update_links(ioc, sas_address, handle,
9212 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
9213 			retry_count = 0;
9214 			/* This will retry adding the end device.
9215 			 * _scsih_add_device() will decide on retries and
9216 			 * return "1" when it should be retried
9217 			 */
9218 			while (_scsih_add_device(ioc, handle, retry_count++,
9219 			    0)) {
9220 				ssleep(1);
9221 			}
9222 			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9223 				 handle,
9224 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9225 		}
9226 	}
9227 	ioc_info(ioc, "\tscan devices: end devices complete\n");
9228 	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
9229 
9230 	/* pcie devices */
9231 	handle = 0xFFFF;
9232 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9233 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9234 		handle))) {
9235 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9236 				& MPI2_IOCSTATUS_MASK;
9237 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9238 			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9239 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9240 			break;
9241 		}
9242 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9243 		if (!(_scsih_is_nvme_pciescsi_device(
9244 			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9245 			continue;
9246 		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
9247 				le64_to_cpu(pcie_device_pg0.WWID));
9248 		if (pcie_device) {
9249 			pcie_device_put(pcie_device);
9250 			continue;
9251 		}
9252 		retry_count = 0;
9253 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9254 		_scsih_pcie_add_device(ioc, handle);
9255 
9256 		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
9257 			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
9258 	}
9259 	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
9260 	ioc_info(ioc, "scan devices: complete\n");
9261 }
9262 
9263 /**
9264  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9265  * @ioc: per adapter object
9266  *
9267  * The handler for doing any required cleanup or initialization.
9268  */
9269 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9270 {
9271 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
9272 }
9273 
9274 /**
9275  * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
9276  * @ioc: per adapter object
9277  *
9278  * The handler for doing any required cleanup or initialization.
9279  */
9280 void
9281 mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9282 {
9283 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
9284 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
9285 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
9286 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
9287 		complete(&ioc->scsih_cmds.done);
9288 	}
9289 	if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
9290 		ioc->tm_cmds.status |= MPT3_CMD_RESET;
9291 		mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
9292 		complete(&ioc->tm_cmds.done);
9293 	}
9294 
9295 	memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
9296 	memset(ioc->device_remove_in_progress, 0,
9297 	       ioc->device_remove_in_progress_sz);
9298 	_scsih_fw_event_cleanup_queue(ioc);
9299 	_scsih_flush_running_cmds(ioc);
9300 }
9301 
9302 /**
9303  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9304  * @ioc: per adapter object
9305  *
9306  * The handler for doing any required cleanup or initialization.
9307  */
9308 void
9309 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
9310 {
9311 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
9312 	if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9313 					   !ioc->sas_hba.num_phys)) {
9314 		_scsih_prep_device_scan(ioc);
9315 		_scsih_create_enclosure_list_after_reset(ioc);
9316 		_scsih_search_responding_sas_devices(ioc);
9317 		_scsih_search_responding_pcie_devices(ioc);
9318 		_scsih_search_responding_raid_devices(ioc);
9319 		_scsih_search_responding_expanders(ioc);
9320 		_scsih_error_recovery_delete_devices(ioc);
9321 	}
9322 }
9323 
9324 /**
9325  * _mpt3sas_fw_work - delayed task for processing firmware events
9326  * @ioc: per adapter object
9327  * @fw_event: The fw_event_work object
9328  * Context: user.
9329  */
9330 static void
9331 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9332 {
9333 	_scsih_fw_event_del_from_list(ioc, fw_event);
9334 
9335 	/* the queue is being flushed so ignore this event */
9336 	if (ioc->remove_host || ioc->pci_error_recovery) {
9337 		fw_event_work_put(fw_event);
9338 		return;
9339 	}
9340 
9341 	switch (fw_event->event) {
9342 	case MPT3SAS_PROCESS_TRIGGER_DIAG:
9343 		mpt3sas_process_trigger_data(ioc,
9344 			(struct SL_WH_TRIGGERS_EVENT_DATA_T *)
9345 			fw_event->event_data);
9346 		break;
9347 	case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
9348 		while (scsi_host_in_recovery(ioc->shost) ||
9349 					 ioc->shost_recovery) {
9350 			/*
9351 			 * If we're unloading, bail. Otherwise, this can become
9352 			 * an infinite loop.
9353 			 */
9354 			if (ioc->remove_host)
9355 				goto out;
9356 			ssleep(1);
9357 		}
9358 		_scsih_remove_unresponding_devices(ioc);
9359 		_scsih_scan_for_devices_after_reset(ioc);
9360 		break;
9361 	case MPT3SAS_PORT_ENABLE_COMPLETE:
9362 		ioc->start_scan = 0;
9363 		if (missing_delay[0] != -1 && missing_delay[1] != -1)
9364 			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
9365 			    missing_delay[1]);
9366 		dewtprintk(ioc,
9367 			   ioc_info(ioc, "port enable: complete from worker thread\n"));
9368 		break;
9369 	case MPT3SAS_TURN_ON_PFA_LED:
9370 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
9371 		break;
9372 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9373 		_scsih_sas_topology_change_event(ioc, fw_event);
9374 		break;
9375 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9376 		if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
9377 			_scsih_sas_device_status_change_event_debug(ioc,
9378 			    (Mpi2EventDataSasDeviceStatusChange_t *)
9379 			    fw_event->event_data);
9380 		break;
9381 	case MPI2_EVENT_SAS_DISCOVERY:
9382 		_scsih_sas_discovery_event(ioc, fw_event);
9383 		break;
9384 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9385 		_scsih_sas_device_discovery_error_event(ioc, fw_event);
9386 		break;
9387 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9388 		_scsih_sas_broadcast_primitive_event(ioc, fw_event);
9389 		break;
9390 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9391 		_scsih_sas_enclosure_dev_status_change_event(ioc,
9392 		    fw_event);
9393 		break;
9394 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9395 		_scsih_sas_ir_config_change_event(ioc, fw_event);
9396 		break;
9397 	case MPI2_EVENT_IR_VOLUME:
9398 		_scsih_sas_ir_volume_event(ioc, fw_event);
9399 		break;
9400 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9401 		_scsih_sas_ir_physical_disk_event(ioc, fw_event);
9402 		break;
9403 	case MPI2_EVENT_IR_OPERATION_STATUS:
9404 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
9405 		break;
9406 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9407 		_scsih_pcie_device_status_change_event(ioc, fw_event);
9408 		break;
9409 	case MPI2_EVENT_PCIE_ENUMERATION:
9410 		_scsih_pcie_enumeration_event(ioc, fw_event);
9411 		break;
9412 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9413 		_scsih_pcie_topology_change_event(ioc, fw_event);
9414 			return;
9415 	break;
9416 	}
9417 out:
9418 	fw_event_work_put(fw_event);
9419 }
9420 
9421 /**
9422  * _firmware_event_work
9423  * @work: The fw_event_work object
9424  * Context: user.
9425  *
9426  * wrappers for the work thread handling firmware events
9427  */
9428 
9429 static void
9430 _firmware_event_work(struct work_struct *work)
9431 {
9432 	struct fw_event_work *fw_event = container_of(work,
9433 	    struct fw_event_work, work);
9434 
9435 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
9436 }
9437 
9438 /**
9439  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
9440  * @ioc: per adapter object
9441  * @msix_index: MSIX table index supplied by the OS
9442  * @reply: reply message frame(lower 32bit addr)
9443  * Context: interrupt.
9444  *
9445  * This function merely adds a new work task into ioc->firmware_event_thread.
9446  * The tasks are worked from _firmware_event_work in user context.
9447  *
9448  * Return: 1 meaning mf should be freed from _base_interrupt
9449  *         0 means the mf is freed from this function.
9450  */
9451 u8
9452 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9453 	u32 reply)
9454 {
9455 	struct fw_event_work *fw_event;
9456 	Mpi2EventNotificationReply_t *mpi_reply;
9457 	u16 event;
9458 	u16 sz;
9459 	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
9460 
9461 	/* events turned off due to host reset */
9462 	if (ioc->pci_error_recovery)
9463 		return 1;
9464 
9465 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
9466 
9467 	if (unlikely(!mpi_reply)) {
9468 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
9469 			__FILE__, __LINE__, __func__);
9470 		return 1;
9471 	}
9472 
9473 	event = le16_to_cpu(mpi_reply->Event);
9474 
9475 	if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
9476 		mpt3sas_trigger_event(ioc, event, 0);
9477 
9478 	switch (event) {
9479 	/* handle these */
9480 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9481 	{
9482 		Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
9483 		    (Mpi2EventDataSasBroadcastPrimitive_t *)
9484 		    mpi_reply->EventData;
9485 
9486 		if (baen_data->Primitive !=
9487 		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
9488 			return 1;
9489 
9490 		if (ioc->broadcast_aen_busy) {
9491 			ioc->broadcast_aen_pending++;
9492 			return 1;
9493 		} else
9494 			ioc->broadcast_aen_busy = 1;
9495 		break;
9496 	}
9497 
9498 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9499 		_scsih_check_topo_delete_events(ioc,
9500 		    (Mpi2EventDataSasTopologyChangeList_t *)
9501 		    mpi_reply->EventData);
9502 		break;
9503 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9504 	_scsih_check_pcie_topo_remove_events(ioc,
9505 		    (Mpi26EventDataPCIeTopologyChangeList_t *)
9506 		    mpi_reply->EventData);
9507 		break;
9508 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9509 		_scsih_check_ir_config_unhide_events(ioc,
9510 		    (Mpi2EventDataIrConfigChangeList_t *)
9511 		    mpi_reply->EventData);
9512 		break;
9513 	case MPI2_EVENT_IR_VOLUME:
9514 		_scsih_check_volume_delete_events(ioc,
9515 		    (Mpi2EventDataIrVolume_t *)
9516 		    mpi_reply->EventData);
9517 		break;
9518 	case MPI2_EVENT_LOG_ENTRY_ADDED:
9519 	{
9520 		Mpi2EventDataLogEntryAdded_t *log_entry;
9521 		u32 *log_code;
9522 
9523 		if (!ioc->is_warpdrive)
9524 			break;
9525 
9526 		log_entry = (Mpi2EventDataLogEntryAdded_t *)
9527 		    mpi_reply->EventData;
9528 		log_code = (u32 *)log_entry->LogData;
9529 
9530 		if (le16_to_cpu(log_entry->LogEntryQualifier)
9531 		    != MPT2_WARPDRIVE_LOGENTRY)
9532 			break;
9533 
9534 		switch (le32_to_cpu(*log_code)) {
9535 		case MPT2_WARPDRIVE_LC_SSDT:
9536 			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9537 			break;
9538 		case MPT2_WARPDRIVE_LC_SSDLW:
9539 			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
9540 			break;
9541 		case MPT2_WARPDRIVE_LC_SSDLF:
9542 			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
9543 			break;
9544 		case MPT2_WARPDRIVE_LC_BRMF:
9545 			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9546 			break;
9547 		}
9548 
9549 		break;
9550 	}
9551 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9552 		_scsih_sas_device_status_change_event(ioc,
9553 		    (Mpi2EventDataSasDeviceStatusChange_t *)
9554 		    mpi_reply->EventData);
9555 		break;
9556 	case MPI2_EVENT_IR_OPERATION_STATUS:
9557 	case MPI2_EVENT_SAS_DISCOVERY:
9558 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9559 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9560 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9561 	case MPI2_EVENT_PCIE_ENUMERATION:
9562 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9563 		break;
9564 
9565 	case MPI2_EVENT_TEMP_THRESHOLD:
9566 		_scsih_temp_threshold_events(ioc,
9567 			(Mpi2EventDataTemperature_t *)
9568 			mpi_reply->EventData);
9569 		break;
9570 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
9571 		ActiveCableEventData =
9572 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
9573 		switch (ActiveCableEventData->ReasonCode) {
9574 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
9575 			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
9576 				   ActiveCableEventData->ReceptacleID);
9577 			pr_notice("cannot be powered and devices connected\n");
9578 			pr_notice("to this active cable will not be seen\n");
9579 			pr_notice("This active cable requires %d mW of power\n",
9580 			     ActiveCableEventData->ActiveCablePowerRequirement);
9581 			break;
9582 
9583 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
9584 			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
9585 				   ActiveCableEventData->ReceptacleID);
9586 			pr_notice(
9587 			    "is not running at optimal speed(12 Gb/s rate)\n");
9588 			break;
9589 		}
9590 
9591 		break;
9592 
9593 	default: /* ignore the rest */
9594 		return 1;
9595 	}
9596 
9597 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
9598 	fw_event = alloc_fw_event_work(sz);
9599 	if (!fw_event) {
9600 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9601 			__FILE__, __LINE__, __func__);
9602 		return 1;
9603 	}
9604 
9605 	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
9606 	fw_event->ioc = ioc;
9607 	fw_event->VF_ID = mpi_reply->VF_ID;
9608 	fw_event->VP_ID = mpi_reply->VP_ID;
9609 	fw_event->event = event;
9610 	_scsih_fw_event_add(ioc, fw_event);
9611 	fw_event_work_put(fw_event);
9612 	return 1;
9613 }
9614 
9615 /**
9616  * _scsih_expander_node_remove - removing expander device from list.
9617  * @ioc: per adapter object
9618  * @sas_expander: the sas_device object
9619  *
9620  * Removing object and freeing associated memory from the
9621  * ioc->sas_expander_list.
9622  */
9623 static void
9624 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
9625 	struct _sas_node *sas_expander)
9626 {
9627 	struct _sas_port *mpt3sas_port, *next;
9628 	unsigned long flags;
9629 
9630 	/* remove sibling ports attached to this expander */
9631 	list_for_each_entry_safe(mpt3sas_port, next,
9632 	   &sas_expander->sas_port_list, port_list) {
9633 		if (ioc->shost_recovery)
9634 			return;
9635 		if (mpt3sas_port->remote_identify.device_type ==
9636 		    SAS_END_DEVICE)
9637 			mpt3sas_device_remove_by_sas_address(ioc,
9638 			    mpt3sas_port->remote_identify.sas_address);
9639 		else if (mpt3sas_port->remote_identify.device_type ==
9640 		    SAS_EDGE_EXPANDER_DEVICE ||
9641 		    mpt3sas_port->remote_identify.device_type ==
9642 		    SAS_FANOUT_EXPANDER_DEVICE)
9643 			mpt3sas_expander_remove(ioc,
9644 			    mpt3sas_port->remote_identify.sas_address);
9645 	}
9646 
9647 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
9648 	    sas_expander->sas_address_parent);
9649 
9650 	ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
9651 		 sas_expander->handle, (unsigned long long)
9652 		 sas_expander->sas_address);
9653 
9654 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
9655 	list_del(&sas_expander->list);
9656 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9657 
9658 	kfree(sas_expander->phy);
9659 	kfree(sas_expander);
9660 }
9661 
9662 /**
9663  * _scsih_ir_shutdown - IR shutdown notification
9664  * @ioc: per adapter object
9665  *
9666  * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
9667  * the host system is shutting down.
9668  */
9669 static void
9670 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
9671 {
9672 	Mpi2RaidActionRequest_t *mpi_request;
9673 	Mpi2RaidActionReply_t *mpi_reply;
9674 	u16 smid;
9675 
9676 	/* is IR firmware build loaded ? */
9677 	if (!ioc->ir_firmware)
9678 		return;
9679 
9680 	/* are there any volumes ? */
9681 	if (list_empty(&ioc->raid_device_list))
9682 		return;
9683 
9684 	mutex_lock(&ioc->scsih_cmds.mutex);
9685 
9686 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
9687 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
9688 		goto out;
9689 	}
9690 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
9691 
9692 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
9693 	if (!smid) {
9694 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
9695 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9696 		goto out;
9697 	}
9698 
9699 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
9700 	ioc->scsih_cmds.smid = smid;
9701 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
9702 
9703 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
9704 	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
9705 
9706 	if (!ioc->hide_ir_msg)
9707 		ioc_info(ioc, "IR shutdown (sending)\n");
9708 	init_completion(&ioc->scsih_cmds.done);
9709 	ioc->put_smid_default(ioc, smid);
9710 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
9711 
9712 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
9713 		ioc_err(ioc, "%s: timeout\n", __func__);
9714 		goto out;
9715 	}
9716 
9717 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
9718 		mpi_reply = ioc->scsih_cmds.reply;
9719 		if (!ioc->hide_ir_msg)
9720 			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
9721 				 le16_to_cpu(mpi_reply->IOCStatus),
9722 				 le32_to_cpu(mpi_reply->IOCLogInfo));
9723 	}
9724 
9725  out:
9726 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9727 	mutex_unlock(&ioc->scsih_cmds.mutex);
9728 }
9729 
9730 /**
9731  * scsih_remove - detach and remove add host
9732  * @pdev: PCI device struct
9733  *
9734  * Routine called when unloading the driver.
9735  */
9736 static void scsih_remove(struct pci_dev *pdev)
9737 {
9738 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9739 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9740 	struct _sas_port *mpt3sas_port, *next_port;
9741 	struct _raid_device *raid_device, *next;
9742 	struct MPT3SAS_TARGET *sas_target_priv_data;
9743 	struct _pcie_device *pcie_device, *pcienext;
9744 	struct workqueue_struct	*wq;
9745 	unsigned long flags;
9746 	Mpi2ConfigReply_t mpi_reply;
9747 
9748 	ioc->remove_host = 1;
9749 
9750 	mpt3sas_wait_for_commands_to_complete(ioc);
9751 	_scsih_flush_running_cmds(ioc);
9752 
9753 	_scsih_fw_event_cleanup_queue(ioc);
9754 
9755 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
9756 	wq = ioc->firmware_event_thread;
9757 	ioc->firmware_event_thread = NULL;
9758 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9759 	if (wq)
9760 		destroy_workqueue(wq);
9761 	/*
9762 	 * Copy back the unmodified ioc page1. so that on next driver load,
9763 	 * current modified changes on ioc page1 won't take effect.
9764 	 */
9765 	if (ioc->is_aero_ioc)
9766 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
9767 				&ioc->ioc_pg1_copy);
9768 	/* release all the volumes */
9769 	_scsih_ir_shutdown(ioc);
9770 	sas_remove_host(shost);
9771 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
9772 	    list) {
9773 		if (raid_device->starget) {
9774 			sas_target_priv_data =
9775 			    raid_device->starget->hostdata;
9776 			sas_target_priv_data->deleted = 1;
9777 			scsi_remove_target(&raid_device->starget->dev);
9778 		}
9779 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9780 			 raid_device->handle, (u64)raid_device->wwid);
9781 		_scsih_raid_device_remove(ioc, raid_device);
9782 	}
9783 	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
9784 		list) {
9785 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9786 		list_del_init(&pcie_device->list);
9787 		pcie_device_put(pcie_device);
9788 	}
9789 
9790 	/* free ports attached to the sas_host */
9791 	list_for_each_entry_safe(mpt3sas_port, next_port,
9792 	   &ioc->sas_hba.sas_port_list, port_list) {
9793 		if (mpt3sas_port->remote_identify.device_type ==
9794 		    SAS_END_DEVICE)
9795 			mpt3sas_device_remove_by_sas_address(ioc,
9796 			    mpt3sas_port->remote_identify.sas_address);
9797 		else if (mpt3sas_port->remote_identify.device_type ==
9798 		    SAS_EDGE_EXPANDER_DEVICE ||
9799 		    mpt3sas_port->remote_identify.device_type ==
9800 		    SAS_FANOUT_EXPANDER_DEVICE)
9801 			mpt3sas_expander_remove(ioc,
9802 			    mpt3sas_port->remote_identify.sas_address);
9803 	}
9804 
9805 	/* free phys attached to the sas_host */
9806 	if (ioc->sas_hba.num_phys) {
9807 		kfree(ioc->sas_hba.phy);
9808 		ioc->sas_hba.phy = NULL;
9809 		ioc->sas_hba.num_phys = 0;
9810 	}
9811 
9812 	mpt3sas_base_detach(ioc);
9813 	spin_lock(&gioc_lock);
9814 	list_del(&ioc->list);
9815 	spin_unlock(&gioc_lock);
9816 	scsi_host_put(shost);
9817 }
9818 
9819 /**
9820  * scsih_shutdown - routine call during system shutdown
9821  * @pdev: PCI device struct
9822  */
9823 static void
9824 scsih_shutdown(struct pci_dev *pdev)
9825 {
9826 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9827 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9828 	struct workqueue_struct	*wq;
9829 	unsigned long flags;
9830 	Mpi2ConfigReply_t mpi_reply;
9831 
9832 	ioc->remove_host = 1;
9833 
9834 	mpt3sas_wait_for_commands_to_complete(ioc);
9835 	_scsih_flush_running_cmds(ioc);
9836 
9837 	_scsih_fw_event_cleanup_queue(ioc);
9838 
9839 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
9840 	wq = ioc->firmware_event_thread;
9841 	ioc->firmware_event_thread = NULL;
9842 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9843 	if (wq)
9844 		destroy_workqueue(wq);
9845 	/*
9846 	 * Copy back the unmodified ioc page1 so that on next driver load,
9847 	 * current modified changes on ioc page1 won't take effect.
9848 	 */
9849 	if (ioc->is_aero_ioc)
9850 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
9851 				&ioc->ioc_pg1_copy);
9852 
9853 	_scsih_ir_shutdown(ioc);
9854 	mpt3sas_base_detach(ioc);
9855 }
9856 
9857 
9858 /**
9859  * _scsih_probe_boot_devices - reports 1st device
9860  * @ioc: per adapter object
9861  *
9862  * If specified in bios page 2, this routine reports the 1st
9863  * device scsi-ml or sas transport for persistent boot device
9864  * purposes.  Please refer to function _scsih_determine_boot_device()
9865  */
9866 static void
9867 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
9868 {
9869 	u32 channel;
9870 	void *device;
9871 	struct _sas_device *sas_device;
9872 	struct _raid_device *raid_device;
9873 	struct _pcie_device *pcie_device;
9874 	u16 handle;
9875 	u64 sas_address_parent;
9876 	u64 sas_address;
9877 	unsigned long flags;
9878 	int rc;
9879 	int tid;
9880 
9881 	 /* no Bios, return immediately */
9882 	if (!ioc->bios_pg3.BiosVersion)
9883 		return;
9884 
9885 	device = NULL;
9886 	if (ioc->req_boot_device.device) {
9887 		device =  ioc->req_boot_device.device;
9888 		channel = ioc->req_boot_device.channel;
9889 	} else if (ioc->req_alt_boot_device.device) {
9890 		device =  ioc->req_alt_boot_device.device;
9891 		channel = ioc->req_alt_boot_device.channel;
9892 	} else if (ioc->current_boot_device.device) {
9893 		device =  ioc->current_boot_device.device;
9894 		channel = ioc->current_boot_device.channel;
9895 	}
9896 
9897 	if (!device)
9898 		return;
9899 
9900 	if (channel == RAID_CHANNEL) {
9901 		raid_device = device;
9902 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9903 		    raid_device->id, 0);
9904 		if (rc)
9905 			_scsih_raid_device_remove(ioc, raid_device);
9906 	} else if (channel == PCIE_CHANNEL) {
9907 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9908 		pcie_device = device;
9909 		tid = pcie_device->id;
9910 		list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
9911 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9912 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
9913 		if (rc)
9914 			_scsih_pcie_device_remove(ioc, pcie_device);
9915 	} else {
9916 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
9917 		sas_device = device;
9918 		handle = sas_device->handle;
9919 		sas_address_parent = sas_device->sas_address_parent;
9920 		sas_address = sas_device->sas_address;
9921 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
9922 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9923 
9924 		if (ioc->hide_drives)
9925 			return;
9926 		if (!mpt3sas_transport_port_add(ioc, handle,
9927 		    sas_address_parent)) {
9928 			_scsih_sas_device_remove(ioc, sas_device);
9929 		} else if (!sas_device->starget) {
9930 			if (!ioc->is_driver_loading) {
9931 				mpt3sas_transport_port_remove(ioc,
9932 				    sas_address,
9933 				    sas_address_parent);
9934 				_scsih_sas_device_remove(ioc, sas_device);
9935 			}
9936 		}
9937 	}
9938 }
9939 
9940 /**
9941  * _scsih_probe_raid - reporting raid volumes to scsi-ml
9942  * @ioc: per adapter object
9943  *
9944  * Called during initial loading of the driver.
9945  */
9946 static void
9947 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
9948 {
9949 	struct _raid_device *raid_device, *raid_next;
9950 	int rc;
9951 
9952 	list_for_each_entry_safe(raid_device, raid_next,
9953 	    &ioc->raid_device_list, list) {
9954 		if (raid_device->starget)
9955 			continue;
9956 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9957 		    raid_device->id, 0);
9958 		if (rc)
9959 			_scsih_raid_device_remove(ioc, raid_device);
9960 	}
9961 }
9962 
9963 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
9964 {
9965 	struct _sas_device *sas_device = NULL;
9966 	unsigned long flags;
9967 
9968 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9969 	if (!list_empty(&ioc->sas_device_init_list)) {
9970 		sas_device = list_first_entry(&ioc->sas_device_init_list,
9971 				struct _sas_device, list);
9972 		sas_device_get(sas_device);
9973 	}
9974 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9975 
9976 	return sas_device;
9977 }
9978 
9979 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
9980 		struct _sas_device *sas_device)
9981 {
9982 	unsigned long flags;
9983 
9984 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9985 
9986 	/*
9987 	 * Since we dropped the lock during the call to port_add(), we need to
9988 	 * be careful here that somebody else didn't move or delete this item
9989 	 * while we were busy with other things.
9990 	 *
9991 	 * If it was on the list, we need a put() for the reference the list
9992 	 * had. Either way, we need a get() for the destination list.
9993 	 */
9994 	if (!list_empty(&sas_device->list)) {
9995 		list_del_init(&sas_device->list);
9996 		sas_device_put(sas_device);
9997 	}
9998 
9999 	sas_device_get(sas_device);
10000 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
10001 
10002 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10003 }
10004 
10005 /**
10006  * _scsih_probe_sas - reporting sas devices to sas transport
10007  * @ioc: per adapter object
10008  *
10009  * Called during initial loading of the driver.
10010  */
10011 static void
10012 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
10013 {
10014 	struct _sas_device *sas_device;
10015 
10016 	if (ioc->hide_drives)
10017 		return;
10018 
10019 	while ((sas_device = get_next_sas_device(ioc))) {
10020 		if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
10021 		    sas_device->sas_address_parent)) {
10022 			_scsih_sas_device_remove(ioc, sas_device);
10023 			sas_device_put(sas_device);
10024 			continue;
10025 		} else if (!sas_device->starget) {
10026 			/*
10027 			 * When asyn scanning is enabled, its not possible to
10028 			 * remove devices while scanning is turned on due to an
10029 			 * oops in scsi_sysfs_add_sdev()->add_device()->
10030 			 * sysfs_addrm_start()
10031 			 */
10032 			if (!ioc->is_driver_loading) {
10033 				mpt3sas_transport_port_remove(ioc,
10034 				    sas_device->sas_address,
10035 				    sas_device->sas_address_parent);
10036 				_scsih_sas_device_remove(ioc, sas_device);
10037 				sas_device_put(sas_device);
10038 				continue;
10039 			}
10040 		}
10041 		sas_device_make_active(ioc, sas_device);
10042 		sas_device_put(sas_device);
10043 	}
10044 }
10045 
10046 /**
10047  * get_next_pcie_device - Get the next pcie device
10048  * @ioc: per adapter object
10049  *
10050  * Get the next pcie device from pcie_device_init_list list.
10051  *
10052  * Return: pcie device structure if pcie_device_init_list list is not empty
10053  * otherwise returns NULL
10054  */
10055 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
10056 {
10057 	struct _pcie_device *pcie_device = NULL;
10058 	unsigned long flags;
10059 
10060 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10061 	if (!list_empty(&ioc->pcie_device_init_list)) {
10062 		pcie_device = list_first_entry(&ioc->pcie_device_init_list,
10063 				struct _pcie_device, list);
10064 		pcie_device_get(pcie_device);
10065 	}
10066 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10067 
10068 	return pcie_device;
10069 }
10070 
10071 /**
10072  * pcie_device_make_active - Add pcie device to pcie_device_list list
10073  * @ioc: per adapter object
10074  * @pcie_device: pcie device object
10075  *
10076  * Add the pcie device which has registered with SCSI Transport Later to
10077  * pcie_device_list list
10078  */
10079 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10080 		struct _pcie_device *pcie_device)
10081 {
10082 	unsigned long flags;
10083 
10084 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10085 
10086 	if (!list_empty(&pcie_device->list)) {
10087 		list_del_init(&pcie_device->list);
10088 		pcie_device_put(pcie_device);
10089 	}
10090 	pcie_device_get(pcie_device);
10091 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
10092 
10093 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10094 }
10095 
10096 /**
10097  * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
10098  * @ioc: per adapter object
10099  *
10100  * Called during initial loading of the driver.
10101  */
10102 static void
10103 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10104 {
10105 	struct _pcie_device *pcie_device;
10106 	int rc;
10107 
10108 	/* PCIe Device List */
10109 	while ((pcie_device = get_next_pcie_device(ioc))) {
10110 		if (pcie_device->starget) {
10111 			pcie_device_put(pcie_device);
10112 			continue;
10113 		}
10114 		if (pcie_device->access_status ==
10115 		    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
10116 			pcie_device_make_active(ioc, pcie_device);
10117 			pcie_device_put(pcie_device);
10118 			continue;
10119 		}
10120 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10121 			pcie_device->id, 0);
10122 		if (rc) {
10123 			_scsih_pcie_device_remove(ioc, pcie_device);
10124 			pcie_device_put(pcie_device);
10125 			continue;
10126 		} else if (!pcie_device->starget) {
10127 			/*
10128 			 * When async scanning is enabled, its not possible to
10129 			 * remove devices while scanning is turned on due to an
10130 			 * oops in scsi_sysfs_add_sdev()->add_device()->
10131 			 * sysfs_addrm_start()
10132 			 */
10133 			if (!ioc->is_driver_loading) {
10134 			/* TODO-- Need to find out whether this condition will
10135 			 * occur or not
10136 			 */
10137 				_scsih_pcie_device_remove(ioc, pcie_device);
10138 				pcie_device_put(pcie_device);
10139 				continue;
10140 			}
10141 		}
10142 		pcie_device_make_active(ioc, pcie_device);
10143 		pcie_device_put(pcie_device);
10144 	}
10145 }
10146 
10147 /**
10148  * _scsih_probe_devices - probing for devices
10149  * @ioc: per adapter object
10150  *
10151  * Called during initial loading of the driver.
10152  */
10153 static void
10154 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
10155 {
10156 	u16 volume_mapping_flags;
10157 
10158 	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
10159 		return;  /* return when IOC doesn't support initiator mode */
10160 
10161 	_scsih_probe_boot_devices(ioc);
10162 
10163 	if (ioc->ir_firmware) {
10164 		volume_mapping_flags =
10165 		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
10166 		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
10167 		if (volume_mapping_flags ==
10168 		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
10169 			_scsih_probe_raid(ioc);
10170 			_scsih_probe_sas(ioc);
10171 		} else {
10172 			_scsih_probe_sas(ioc);
10173 			_scsih_probe_raid(ioc);
10174 		}
10175 	} else {
10176 		_scsih_probe_sas(ioc);
10177 		_scsih_probe_pcie(ioc);
10178 	}
10179 }
10180 
10181 /**
10182  * scsih_scan_start - scsi lld callback for .scan_start
10183  * @shost: SCSI host pointer
10184  *
10185  * The shost has the ability to discover targets on its own instead
10186  * of scanning the entire bus.  In our implemention, we will kick off
10187  * firmware discovery.
10188  */
10189 static void
10190 scsih_scan_start(struct Scsi_Host *shost)
10191 {
10192 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10193 	int rc;
10194 	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
10195 		mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
10196 	else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
10197 		mpt3sas_enable_diag_buffer(ioc, 1);
10198 
10199 	if (disable_discovery > 0)
10200 		return;
10201 
10202 	ioc->start_scan = 1;
10203 	rc = mpt3sas_port_enable(ioc);
10204 
10205 	if (rc != 0)
10206 		ioc_info(ioc, "port enable: FAILED\n");
10207 }
10208 
10209 /**
10210  * scsih_scan_finished - scsi lld callback for .scan_finished
10211  * @shost: SCSI host pointer
10212  * @time: elapsed time of the scan in jiffies
10213  *
10214  * This function will be called periodicallyn until it returns 1 with the
10215  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
10216  * we wait for firmware discovery to complete, then return 1.
10217  */
10218 static int
10219 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10220 {
10221 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10222 
10223 	if (disable_discovery > 0) {
10224 		ioc->is_driver_loading = 0;
10225 		ioc->wait_for_discovery_to_complete = 0;
10226 		return 1;
10227 	}
10228 
10229 	if (time >= (300 * HZ)) {
10230 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10231 		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
10232 		ioc->is_driver_loading = 0;
10233 		return 1;
10234 	}
10235 
10236 	if (ioc->start_scan)
10237 		return 0;
10238 
10239 	if (ioc->start_scan_failed) {
10240 		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
10241 			 ioc->start_scan_failed);
10242 		ioc->is_driver_loading = 0;
10243 		ioc->wait_for_discovery_to_complete = 0;
10244 		ioc->remove_host = 1;
10245 		return 1;
10246 	}
10247 
10248 	ioc_info(ioc, "port enable: SUCCESS\n");
10249 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10250 
10251 	if (ioc->wait_for_discovery_to_complete) {
10252 		ioc->wait_for_discovery_to_complete = 0;
10253 		_scsih_probe_devices(ioc);
10254 	}
10255 	mpt3sas_base_start_watchdog(ioc);
10256 	ioc->is_driver_loading = 0;
10257 	return 1;
10258 }
10259 
10260 /* shost template for SAS 2.0 HBA devices */
10261 static struct scsi_host_template mpt2sas_driver_template = {
10262 	.module				= THIS_MODULE,
10263 	.name				= "Fusion MPT SAS Host",
10264 	.proc_name			= MPT2SAS_DRIVER_NAME,
10265 	.queuecommand			= scsih_qcmd,
10266 	.target_alloc			= scsih_target_alloc,
10267 	.slave_alloc			= scsih_slave_alloc,
10268 	.slave_configure		= scsih_slave_configure,
10269 	.target_destroy			= scsih_target_destroy,
10270 	.slave_destroy			= scsih_slave_destroy,
10271 	.scan_finished			= scsih_scan_finished,
10272 	.scan_start			= scsih_scan_start,
10273 	.change_queue_depth		= scsih_change_queue_depth,
10274 	.eh_abort_handler		= scsih_abort,
10275 	.eh_device_reset_handler	= scsih_dev_reset,
10276 	.eh_target_reset_handler	= scsih_target_reset,
10277 	.eh_host_reset_handler		= scsih_host_reset,
10278 	.bios_param			= scsih_bios_param,
10279 	.can_queue			= 1,
10280 	.this_id			= -1,
10281 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
10282 	.max_sectors			= 32767,
10283 	.cmd_per_lun			= 7,
10284 	.shost_attrs			= mpt3sas_host_attrs,
10285 	.sdev_attrs			= mpt3sas_dev_attrs,
10286 	.track_queue_depth		= 1,
10287 	.cmd_size			= sizeof(struct scsiio_tracker),
10288 };
10289 
10290 /* raid transport support for SAS 2.0 HBA devices */
10291 static struct raid_function_template mpt2sas_raid_functions = {
10292 	.cookie		= &mpt2sas_driver_template,
10293 	.is_raid	= scsih_is_raid,
10294 	.get_resync	= scsih_get_resync,
10295 	.get_state	= scsih_get_state,
10296 };
10297 
10298 /* shost template for SAS 3.0 HBA devices */
10299 static struct scsi_host_template mpt3sas_driver_template = {
10300 	.module				= THIS_MODULE,
10301 	.name				= "Fusion MPT SAS Host",
10302 	.proc_name			= MPT3SAS_DRIVER_NAME,
10303 	.queuecommand			= scsih_qcmd,
10304 	.target_alloc			= scsih_target_alloc,
10305 	.slave_alloc			= scsih_slave_alloc,
10306 	.slave_configure		= scsih_slave_configure,
10307 	.target_destroy			= scsih_target_destroy,
10308 	.slave_destroy			= scsih_slave_destroy,
10309 	.scan_finished			= scsih_scan_finished,
10310 	.scan_start			= scsih_scan_start,
10311 	.change_queue_depth		= scsih_change_queue_depth,
10312 	.eh_abort_handler		= scsih_abort,
10313 	.eh_device_reset_handler	= scsih_dev_reset,
10314 	.eh_target_reset_handler	= scsih_target_reset,
10315 	.eh_host_reset_handler		= scsih_host_reset,
10316 	.bios_param			= scsih_bios_param,
10317 	.can_queue			= 1,
10318 	.this_id			= -1,
10319 	.sg_tablesize			= MPT3SAS_SG_DEPTH,
10320 	.max_sectors			= 32767,
10321 	.max_segment_size		= 0xffffffff,
10322 	.cmd_per_lun			= 7,
10323 	.shost_attrs			= mpt3sas_host_attrs,
10324 	.sdev_attrs			= mpt3sas_dev_attrs,
10325 	.track_queue_depth		= 1,
10326 	.cmd_size			= sizeof(struct scsiio_tracker),
10327 };
10328 
10329 /* raid transport support for SAS 3.0 HBA devices */
10330 static struct raid_function_template mpt3sas_raid_functions = {
10331 	.cookie		= &mpt3sas_driver_template,
10332 	.is_raid	= scsih_is_raid,
10333 	.get_resync	= scsih_get_resync,
10334 	.get_state	= scsih_get_state,
10335 };
10336 
10337 /**
10338  * _scsih_determine_hba_mpi_version - determine in which MPI version class
10339  *					this device belongs to.
10340  * @pdev: PCI device struct
10341  *
10342  * return MPI2_VERSION for SAS 2.0 HBA devices,
10343  *	MPI25_VERSION for SAS 3.0 HBA devices, and
10344  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
10345  */
10346 static u16
10347 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
10348 {
10349 
10350 	switch (pdev->device) {
10351 	case MPI2_MFGPAGE_DEVID_SSS6200:
10352 	case MPI2_MFGPAGE_DEVID_SAS2004:
10353 	case MPI2_MFGPAGE_DEVID_SAS2008:
10354 	case MPI2_MFGPAGE_DEVID_SAS2108_1:
10355 	case MPI2_MFGPAGE_DEVID_SAS2108_2:
10356 	case MPI2_MFGPAGE_DEVID_SAS2108_3:
10357 	case MPI2_MFGPAGE_DEVID_SAS2116_1:
10358 	case MPI2_MFGPAGE_DEVID_SAS2116_2:
10359 	case MPI2_MFGPAGE_DEVID_SAS2208_1:
10360 	case MPI2_MFGPAGE_DEVID_SAS2208_2:
10361 	case MPI2_MFGPAGE_DEVID_SAS2208_3:
10362 	case MPI2_MFGPAGE_DEVID_SAS2208_4:
10363 	case MPI2_MFGPAGE_DEVID_SAS2208_5:
10364 	case MPI2_MFGPAGE_DEVID_SAS2208_6:
10365 	case MPI2_MFGPAGE_DEVID_SAS2308_1:
10366 	case MPI2_MFGPAGE_DEVID_SAS2308_2:
10367 	case MPI2_MFGPAGE_DEVID_SAS2308_3:
10368 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10369 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10370 		return MPI2_VERSION;
10371 	case MPI25_MFGPAGE_DEVID_SAS3004:
10372 	case MPI25_MFGPAGE_DEVID_SAS3008:
10373 	case MPI25_MFGPAGE_DEVID_SAS3108_1:
10374 	case MPI25_MFGPAGE_DEVID_SAS3108_2:
10375 	case MPI25_MFGPAGE_DEVID_SAS3108_5:
10376 	case MPI25_MFGPAGE_DEVID_SAS3108_6:
10377 		return MPI25_VERSION;
10378 	case MPI26_MFGPAGE_DEVID_SAS3216:
10379 	case MPI26_MFGPAGE_DEVID_SAS3224:
10380 	case MPI26_MFGPAGE_DEVID_SAS3316_1:
10381 	case MPI26_MFGPAGE_DEVID_SAS3316_2:
10382 	case MPI26_MFGPAGE_DEVID_SAS3316_3:
10383 	case MPI26_MFGPAGE_DEVID_SAS3316_4:
10384 	case MPI26_MFGPAGE_DEVID_SAS3324_1:
10385 	case MPI26_MFGPAGE_DEVID_SAS3324_2:
10386 	case MPI26_MFGPAGE_DEVID_SAS3324_3:
10387 	case MPI26_MFGPAGE_DEVID_SAS3324_4:
10388 	case MPI26_MFGPAGE_DEVID_SAS3508:
10389 	case MPI26_MFGPAGE_DEVID_SAS3508_1:
10390 	case MPI26_MFGPAGE_DEVID_SAS3408:
10391 	case MPI26_MFGPAGE_DEVID_SAS3516:
10392 	case MPI26_MFGPAGE_DEVID_SAS3516_1:
10393 	case MPI26_MFGPAGE_DEVID_SAS3416:
10394 	case MPI26_MFGPAGE_DEVID_SAS3616:
10395 	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10396 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10397 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10398 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10399 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10400 		return MPI26_VERSION;
10401 	}
10402 	return 0;
10403 }
10404 
10405 /**
10406  * _scsih_probe - attach and add scsi host
10407  * @pdev: PCI device struct
10408  * @id: pci device id
10409  *
10410  * Return: 0 success, anything else error.
10411  */
10412 static int
10413 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10414 {
10415 	struct MPT3SAS_ADAPTER *ioc;
10416 	struct Scsi_Host *shost = NULL;
10417 	int rv;
10418 	u16 hba_mpi_version;
10419 
10420 	/* Determine in which MPI version class this pci device belongs */
10421 	hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
10422 	if (hba_mpi_version == 0)
10423 		return -ENODEV;
10424 
10425 	/* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
10426 	 * for other generation HBA's return with -ENODEV
10427 	 */
10428 	if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
10429 		return -ENODEV;
10430 
10431 	/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
10432 	 * for other generation HBA's return with -ENODEV
10433 	 */
10434 	if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
10435 		|| hba_mpi_version ==  MPI26_VERSION)))
10436 		return -ENODEV;
10437 
10438 	switch (hba_mpi_version) {
10439 	case MPI2_VERSION:
10440 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
10441 			PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
10442 		/* Use mpt2sas driver host template for SAS 2.0 HBA's */
10443 		shost = scsi_host_alloc(&mpt2sas_driver_template,
10444 		  sizeof(struct MPT3SAS_ADAPTER));
10445 		if (!shost)
10446 			return -ENODEV;
10447 		ioc = shost_priv(shost);
10448 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10449 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10450 		ioc->id = mpt2_ids++;
10451 		sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
10452 		switch (pdev->device) {
10453 		case MPI2_MFGPAGE_DEVID_SSS6200:
10454 			ioc->is_warpdrive = 1;
10455 			ioc->hide_ir_msg = 1;
10456 			break;
10457 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10458 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10459 			ioc->is_mcpu_endpoint = 1;
10460 			break;
10461 		default:
10462 			ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
10463 			break;
10464 		}
10465 		break;
10466 	case MPI25_VERSION:
10467 	case MPI26_VERSION:
10468 		/* Use mpt3sas driver host template for SAS 3.0 HBA's */
10469 		shost = scsi_host_alloc(&mpt3sas_driver_template,
10470 		  sizeof(struct MPT3SAS_ADAPTER));
10471 		if (!shost)
10472 			return -ENODEV;
10473 		ioc = shost_priv(shost);
10474 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10475 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10476 		ioc->id = mpt3_ids++;
10477 		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
10478 		switch (pdev->device) {
10479 		case MPI26_MFGPAGE_DEVID_SAS3508:
10480 		case MPI26_MFGPAGE_DEVID_SAS3508_1:
10481 		case MPI26_MFGPAGE_DEVID_SAS3408:
10482 		case MPI26_MFGPAGE_DEVID_SAS3516:
10483 		case MPI26_MFGPAGE_DEVID_SAS3516_1:
10484 		case MPI26_MFGPAGE_DEVID_SAS3416:
10485 		case MPI26_MFGPAGE_DEVID_SAS3616:
10486 		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10487 			ioc->is_gen35_ioc = 1;
10488 			break;
10489 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10490 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10491 			dev_info(&pdev->dev,
10492 			    "HBA is in Configurable Secure mode\n");
10493 			/* fall through */
10494 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10495 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10496 			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
10497 			break;
10498 		default:
10499 			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
10500 		}
10501 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
10502 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
10503 			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
10504 			ioc->combined_reply_queue = 1;
10505 			if (ioc->is_gen35_ioc)
10506 				ioc->combined_reply_index_count =
10507 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
10508 			else
10509 				ioc->combined_reply_index_count =
10510 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
10511 		}
10512 		break;
10513 	default:
10514 		return -ENODEV;
10515 	}
10516 
10517 	INIT_LIST_HEAD(&ioc->list);
10518 	spin_lock(&gioc_lock);
10519 	list_add_tail(&ioc->list, &mpt3sas_ioc_list);
10520 	spin_unlock(&gioc_lock);
10521 	ioc->shost = shost;
10522 	ioc->pdev = pdev;
10523 	ioc->scsi_io_cb_idx = scsi_io_cb_idx;
10524 	ioc->tm_cb_idx = tm_cb_idx;
10525 	ioc->ctl_cb_idx = ctl_cb_idx;
10526 	ioc->base_cb_idx = base_cb_idx;
10527 	ioc->port_enable_cb_idx = port_enable_cb_idx;
10528 	ioc->transport_cb_idx = transport_cb_idx;
10529 	ioc->scsih_cb_idx = scsih_cb_idx;
10530 	ioc->config_cb_idx = config_cb_idx;
10531 	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
10532 	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
10533 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
10534 	ioc->logging_level = logging_level;
10535 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
10536 	/*
10537 	 * Enable MEMORY MOVE support flag.
10538 	 */
10539 	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
10540 
10541 	ioc->enable_sdev_max_qd = enable_sdev_max_qd;
10542 
10543 	/* misc semaphores and spin locks */
10544 	mutex_init(&ioc->reset_in_progress_mutex);
10545 	/* initializing pci_access_mutex lock */
10546 	mutex_init(&ioc->pci_access_mutex);
10547 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
10548 	spin_lock_init(&ioc->scsi_lookup_lock);
10549 	spin_lock_init(&ioc->sas_device_lock);
10550 	spin_lock_init(&ioc->sas_node_lock);
10551 	spin_lock_init(&ioc->fw_event_lock);
10552 	spin_lock_init(&ioc->raid_device_lock);
10553 	spin_lock_init(&ioc->pcie_device_lock);
10554 	spin_lock_init(&ioc->diag_trigger_lock);
10555 
10556 	INIT_LIST_HEAD(&ioc->sas_device_list);
10557 	INIT_LIST_HEAD(&ioc->sas_device_init_list);
10558 	INIT_LIST_HEAD(&ioc->sas_expander_list);
10559 	INIT_LIST_HEAD(&ioc->enclosure_list);
10560 	INIT_LIST_HEAD(&ioc->pcie_device_list);
10561 	INIT_LIST_HEAD(&ioc->pcie_device_init_list);
10562 	INIT_LIST_HEAD(&ioc->fw_event_list);
10563 	INIT_LIST_HEAD(&ioc->raid_device_list);
10564 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
10565 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
10566 	INIT_LIST_HEAD(&ioc->delayed_sc_list);
10567 	INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
10568 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
10569 	INIT_LIST_HEAD(&ioc->reply_queue_list);
10570 
10571 	sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
10572 
10573 	/* init shost parameters */
10574 	shost->max_cmd_len = 32;
10575 	shost->max_lun = max_lun;
10576 	shost->transportt = mpt3sas_transport_template;
10577 	shost->unique_id = ioc->id;
10578 
10579 	if (ioc->is_mcpu_endpoint) {
10580 		/* mCPU MPI support 64K max IO */
10581 		shost->max_sectors = 128;
10582 		ioc_info(ioc, "The max_sectors value is set to %d\n",
10583 			 shost->max_sectors);
10584 	} else {
10585 		if (max_sectors != 0xFFFF) {
10586 			if (max_sectors < 64) {
10587 				shost->max_sectors = 64;
10588 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
10589 					 max_sectors);
10590 			} else if (max_sectors > 32767) {
10591 				shost->max_sectors = 32767;
10592 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
10593 					 max_sectors);
10594 			} else {
10595 				shost->max_sectors = max_sectors & 0xFFFE;
10596 				ioc_info(ioc, "The max_sectors value is set to %d\n",
10597 					 shost->max_sectors);
10598 			}
10599 		}
10600 	}
10601 	/* register EEDP capabilities with SCSI layer */
10602 	if (prot_mask > 0)
10603 		scsi_host_set_prot(shost, prot_mask);
10604 	else
10605 		scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
10606 				   | SHOST_DIF_TYPE2_PROTECTION
10607 				   | SHOST_DIF_TYPE3_PROTECTION);
10608 
10609 	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
10610 
10611 	/* event thread */
10612 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
10613 	    "fw_event_%s%d", ioc->driver_name, ioc->id);
10614 	ioc->firmware_event_thread = alloc_ordered_workqueue(
10615 	    ioc->firmware_event_name, 0);
10616 	if (!ioc->firmware_event_thread) {
10617 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10618 			__FILE__, __LINE__, __func__);
10619 		rv = -ENODEV;
10620 		goto out_thread_fail;
10621 	}
10622 
10623 	ioc->is_driver_loading = 1;
10624 	if ((mpt3sas_base_attach(ioc))) {
10625 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10626 			__FILE__, __LINE__, __func__);
10627 		rv = -ENODEV;
10628 		goto out_attach_fail;
10629 	}
10630 
10631 	if (ioc->is_warpdrive) {
10632 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
10633 			ioc->hide_drives = 0;
10634 		else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
10635 			ioc->hide_drives = 1;
10636 		else {
10637 			if (mpt3sas_get_num_volumes(ioc))
10638 				ioc->hide_drives = 1;
10639 			else
10640 				ioc->hide_drives = 0;
10641 		}
10642 	} else
10643 		ioc->hide_drives = 0;
10644 
10645 	rv = scsi_add_host(shost, &pdev->dev);
10646 	if (rv) {
10647 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10648 			__FILE__, __LINE__, __func__);
10649 		goto out_add_shost_fail;
10650 	}
10651 
10652 	scsi_scan_host(shost);
10653 	return 0;
10654 out_add_shost_fail:
10655 	mpt3sas_base_detach(ioc);
10656  out_attach_fail:
10657 	destroy_workqueue(ioc->firmware_event_thread);
10658  out_thread_fail:
10659 	spin_lock(&gioc_lock);
10660 	list_del(&ioc->list);
10661 	spin_unlock(&gioc_lock);
10662 	scsi_host_put(shost);
10663 	return rv;
10664 }
10665 
10666 #ifdef CONFIG_PM
10667 /**
10668  * scsih_suspend - power management suspend main entry point
10669  * @pdev: PCI device struct
10670  * @state: PM state change to (usually PCI_D3)
10671  *
10672  * Return: 0 success, anything else error.
10673  */
10674 static int
10675 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
10676 {
10677 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10678 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10679 	pci_power_t device_state;
10680 
10681 	mpt3sas_base_stop_watchdog(ioc);
10682 	flush_scheduled_work();
10683 	scsi_block_requests(shost);
10684 	device_state = pci_choose_state(pdev, state);
10685 	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
10686 		 pdev, pci_name(pdev), device_state);
10687 
10688 	pci_save_state(pdev);
10689 	mpt3sas_base_free_resources(ioc);
10690 	pci_set_power_state(pdev, device_state);
10691 	return 0;
10692 }
10693 
10694 /**
10695  * scsih_resume - power management resume main entry point
10696  * @pdev: PCI device struct
10697  *
10698  * Return: 0 success, anything else error.
10699  */
10700 static int
10701 scsih_resume(struct pci_dev *pdev)
10702 {
10703 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10704 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10705 	pci_power_t device_state = pdev->current_state;
10706 	int r;
10707 
10708 	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
10709 		 pdev, pci_name(pdev), device_state);
10710 
10711 	pci_set_power_state(pdev, PCI_D0);
10712 	pci_enable_wake(pdev, PCI_D0, 0);
10713 	pci_restore_state(pdev);
10714 	ioc->pdev = pdev;
10715 	r = mpt3sas_base_map_resources(ioc);
10716 	if (r)
10717 		return r;
10718 
10719 	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
10720 	scsi_unblock_requests(shost);
10721 	mpt3sas_base_start_watchdog(ioc);
10722 	return 0;
10723 }
10724 #endif /* CONFIG_PM */
10725 
10726 /**
10727  * scsih_pci_error_detected - Called when a PCI error is detected.
10728  * @pdev: PCI device struct
10729  * @state: PCI channel state
10730  *
10731  * Description: Called when a PCI error is detected.
10732  *
10733  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
10734  */
10735 static pci_ers_result_t
10736 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10737 {
10738 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10739 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10740 
10741 	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
10742 
10743 	switch (state) {
10744 	case pci_channel_io_normal:
10745 		return PCI_ERS_RESULT_CAN_RECOVER;
10746 	case pci_channel_io_frozen:
10747 		/* Fatal error, prepare for slot reset */
10748 		ioc->pci_error_recovery = 1;
10749 		scsi_block_requests(ioc->shost);
10750 		mpt3sas_base_stop_watchdog(ioc);
10751 		mpt3sas_base_free_resources(ioc);
10752 		return PCI_ERS_RESULT_NEED_RESET;
10753 	case pci_channel_io_perm_failure:
10754 		/* Permanent error, prepare for device removal */
10755 		ioc->pci_error_recovery = 1;
10756 		mpt3sas_base_stop_watchdog(ioc);
10757 		_scsih_flush_running_cmds(ioc);
10758 		return PCI_ERS_RESULT_DISCONNECT;
10759 	}
10760 	return PCI_ERS_RESULT_NEED_RESET;
10761 }
10762 
10763 /**
10764  * scsih_pci_slot_reset - Called when PCI slot has been reset.
10765  * @pdev: PCI device struct
10766  *
10767  * Description: This routine is called by the pci error recovery
10768  * code after the PCI slot has been reset, just before we
10769  * should resume normal operations.
10770  */
10771 static pci_ers_result_t
10772 scsih_pci_slot_reset(struct pci_dev *pdev)
10773 {
10774 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10775 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10776 	int rc;
10777 
10778 	ioc_info(ioc, "PCI error: slot reset callback!!\n");
10779 
10780 	ioc->pci_error_recovery = 0;
10781 	ioc->pdev = pdev;
10782 	pci_restore_state(pdev);
10783 	rc = mpt3sas_base_map_resources(ioc);
10784 	if (rc)
10785 		return PCI_ERS_RESULT_DISCONNECT;
10786 
10787 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
10788 
10789 	ioc_warn(ioc, "hard reset: %s\n",
10790 		 (rc == 0) ? "success" : "failed");
10791 
10792 	if (!rc)
10793 		return PCI_ERS_RESULT_RECOVERED;
10794 	else
10795 		return PCI_ERS_RESULT_DISCONNECT;
10796 }
10797 
10798 /**
10799  * scsih_pci_resume() - resume normal ops after PCI reset
10800  * @pdev: pointer to PCI device
10801  *
10802  * Called when the error recovery driver tells us that its
10803  * OK to resume normal operation. Use completion to allow
10804  * halted scsi ops to resume.
10805  */
10806 static void
10807 scsih_pci_resume(struct pci_dev *pdev)
10808 {
10809 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10810 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10811 
10812 	ioc_info(ioc, "PCI error: resume callback!!\n");
10813 
10814 	mpt3sas_base_start_watchdog(ioc);
10815 	scsi_unblock_requests(ioc->shost);
10816 }
10817 
10818 /**
10819  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
10820  * @pdev: pointer to PCI device
10821  */
10822 static pci_ers_result_t
10823 scsih_pci_mmio_enabled(struct pci_dev *pdev)
10824 {
10825 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10826 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10827 
10828 	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
10829 
10830 	/* TODO - dump whatever for debugging purposes */
10831 
10832 	/* This called only if scsih_pci_error_detected returns
10833 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
10834 	 * works, no need to reset slot.
10835 	 */
10836 	return PCI_ERS_RESULT_RECOVERED;
10837 }
10838 
10839 /**
10840  * scsih__ncq_prio_supp - Check for NCQ command priority support
10841  * @sdev: scsi device struct
10842  *
10843  * This is called when a user indicates they would like to enable
10844  * ncq command priorities. This works only on SATA devices.
10845  */
10846 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
10847 {
10848 	unsigned char *buf;
10849 	bool ncq_prio_supp = false;
10850 
10851 	if (!scsi_device_supports_vpd(sdev))
10852 		return ncq_prio_supp;
10853 
10854 	buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
10855 	if (!buf)
10856 		return ncq_prio_supp;
10857 
10858 	if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
10859 		ncq_prio_supp = (buf[213] >> 4) & 1;
10860 
10861 	kfree(buf);
10862 	return ncq_prio_supp;
10863 }
10864 /*
10865  * The pci device ids are defined in mpi/mpi2_cnfg.h.
10866  */
10867 static const struct pci_device_id mpt3sas_pci_table[] = {
10868 	/* Spitfire ~ 2004 */
10869 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
10870 		PCI_ANY_ID, PCI_ANY_ID },
10871 	/* Falcon ~ 2008 */
10872 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
10873 		PCI_ANY_ID, PCI_ANY_ID },
10874 	/* Liberator ~ 2108 */
10875 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
10876 		PCI_ANY_ID, PCI_ANY_ID },
10877 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
10878 		PCI_ANY_ID, PCI_ANY_ID },
10879 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
10880 		PCI_ANY_ID, PCI_ANY_ID },
10881 	/* Meteor ~ 2116 */
10882 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
10883 		PCI_ANY_ID, PCI_ANY_ID },
10884 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
10885 		PCI_ANY_ID, PCI_ANY_ID },
10886 	/* Thunderbolt ~ 2208 */
10887 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
10888 		PCI_ANY_ID, PCI_ANY_ID },
10889 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
10890 		PCI_ANY_ID, PCI_ANY_ID },
10891 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
10892 		PCI_ANY_ID, PCI_ANY_ID },
10893 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
10894 		PCI_ANY_ID, PCI_ANY_ID },
10895 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
10896 		PCI_ANY_ID, PCI_ANY_ID },
10897 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
10898 		PCI_ANY_ID, PCI_ANY_ID },
10899 	/* Mustang ~ 2308 */
10900 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
10901 		PCI_ANY_ID, PCI_ANY_ID },
10902 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
10903 		PCI_ANY_ID, PCI_ANY_ID },
10904 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
10905 		PCI_ANY_ID, PCI_ANY_ID },
10906 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
10907 		PCI_ANY_ID, PCI_ANY_ID },
10908 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
10909 		PCI_ANY_ID, PCI_ANY_ID },
10910 	/* SSS6200 */
10911 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
10912 		PCI_ANY_ID, PCI_ANY_ID },
10913 	/* Fury ~ 3004 and 3008 */
10914 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
10915 		PCI_ANY_ID, PCI_ANY_ID },
10916 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
10917 		PCI_ANY_ID, PCI_ANY_ID },
10918 	/* Invader ~ 3108 */
10919 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
10920 		PCI_ANY_ID, PCI_ANY_ID },
10921 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
10922 		PCI_ANY_ID, PCI_ANY_ID },
10923 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
10924 		PCI_ANY_ID, PCI_ANY_ID },
10925 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
10926 		PCI_ANY_ID, PCI_ANY_ID },
10927 	/* Cutlass ~ 3216 and 3224 */
10928 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
10929 		PCI_ANY_ID, PCI_ANY_ID },
10930 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
10931 		PCI_ANY_ID, PCI_ANY_ID },
10932 	/* Intruder ~ 3316 and 3324 */
10933 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
10934 		PCI_ANY_ID, PCI_ANY_ID },
10935 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
10936 		PCI_ANY_ID, PCI_ANY_ID },
10937 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
10938 		PCI_ANY_ID, PCI_ANY_ID },
10939 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
10940 		PCI_ANY_ID, PCI_ANY_ID },
10941 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
10942 		PCI_ANY_ID, PCI_ANY_ID },
10943 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
10944 		PCI_ANY_ID, PCI_ANY_ID },
10945 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
10946 		PCI_ANY_ID, PCI_ANY_ID },
10947 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
10948 		PCI_ANY_ID, PCI_ANY_ID },
10949 	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
10950 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
10951 		PCI_ANY_ID, PCI_ANY_ID },
10952 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
10953 		PCI_ANY_ID, PCI_ANY_ID },
10954 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
10955 		PCI_ANY_ID, PCI_ANY_ID },
10956 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
10957 		PCI_ANY_ID, PCI_ANY_ID },
10958 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
10959 		PCI_ANY_ID, PCI_ANY_ID },
10960 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
10961 		PCI_ANY_ID, PCI_ANY_ID },
10962 	/* Mercator ~ 3616*/
10963 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
10964 		PCI_ANY_ID, PCI_ANY_ID },
10965 
10966 	/* Aero SI 0x00E1 Configurable Secure
10967 	 * 0x00E2 Hard Secure
10968 	 */
10969 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
10970 		PCI_ANY_ID, PCI_ANY_ID },
10971 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
10972 		PCI_ANY_ID, PCI_ANY_ID },
10973 
10974 	/* Atlas PCIe Switch Management Port */
10975 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
10976 		PCI_ANY_ID, PCI_ANY_ID },
10977 
10978 	/* Sea SI 0x00E5 Configurable Secure
10979 	 * 0x00E6 Hard Secure
10980 	 */
10981 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
10982 		PCI_ANY_ID, PCI_ANY_ID },
10983 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
10984 		PCI_ANY_ID, PCI_ANY_ID },
10985 
10986 	{0}     /* Terminating entry */
10987 };
10988 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
10989 
10990 static struct pci_error_handlers _mpt3sas_err_handler = {
10991 	.error_detected	= scsih_pci_error_detected,
10992 	.mmio_enabled	= scsih_pci_mmio_enabled,
10993 	.slot_reset	= scsih_pci_slot_reset,
10994 	.resume		= scsih_pci_resume,
10995 };
10996 
10997 static struct pci_driver mpt3sas_driver = {
10998 	.name		= MPT3SAS_DRIVER_NAME,
10999 	.id_table	= mpt3sas_pci_table,
11000 	.probe		= _scsih_probe,
11001 	.remove		= scsih_remove,
11002 	.shutdown	= scsih_shutdown,
11003 	.err_handler	= &_mpt3sas_err_handler,
11004 #ifdef CONFIG_PM
11005 	.suspend	= scsih_suspend,
11006 	.resume		= scsih_resume,
11007 #endif
11008 };
11009 
11010 /**
11011  * scsih_init - main entry point for this driver.
11012  *
11013  * Return: 0 success, anything else error.
11014  */
11015 static int
11016 scsih_init(void)
11017 {
11018 	mpt2_ids = 0;
11019 	mpt3_ids = 0;
11020 
11021 	mpt3sas_base_initialize_callback_handler();
11022 
11023 	 /* queuecommand callback hander */
11024 	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
11025 
11026 	/* task management callback handler */
11027 	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
11028 
11029 	/* base internal commands callback handler */
11030 	base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
11031 	port_enable_cb_idx = mpt3sas_base_register_callback_handler(
11032 	    mpt3sas_port_enable_done);
11033 
11034 	/* transport internal commands callback handler */
11035 	transport_cb_idx = mpt3sas_base_register_callback_handler(
11036 	    mpt3sas_transport_done);
11037 
11038 	/* scsih internal commands callback handler */
11039 	scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
11040 
11041 	/* configuration page API internal commands callback handler */
11042 	config_cb_idx = mpt3sas_base_register_callback_handler(
11043 	    mpt3sas_config_done);
11044 
11045 	/* ctl module callback handler */
11046 	ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
11047 
11048 	tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
11049 	    _scsih_tm_tr_complete);
11050 
11051 	tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
11052 	    _scsih_tm_volume_tr_complete);
11053 
11054 	tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
11055 	    _scsih_sas_control_complete);
11056 
11057 	return 0;
11058 }
11059 
11060 /**
11061  * scsih_exit - exit point for this driver (when it is a module).
11062  *
11063  * Return: 0 success, anything else error.
11064  */
11065 static void
11066 scsih_exit(void)
11067 {
11068 
11069 	mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
11070 	mpt3sas_base_release_callback_handler(tm_cb_idx);
11071 	mpt3sas_base_release_callback_handler(base_cb_idx);
11072 	mpt3sas_base_release_callback_handler(port_enable_cb_idx);
11073 	mpt3sas_base_release_callback_handler(transport_cb_idx);
11074 	mpt3sas_base_release_callback_handler(scsih_cb_idx);
11075 	mpt3sas_base_release_callback_handler(config_cb_idx);
11076 	mpt3sas_base_release_callback_handler(ctl_cb_idx);
11077 
11078 	mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
11079 	mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
11080 	mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
11081 
11082 /* raid transport support */
11083 	if (hbas_to_enumerate != 1)
11084 		raid_class_release(mpt3sas_raid_template);
11085 	if (hbas_to_enumerate != 2)
11086 		raid_class_release(mpt2sas_raid_template);
11087 	sas_release_transport(mpt3sas_transport_template);
11088 }
11089 
11090 /**
11091  * _mpt3sas_init - main entry point for this driver.
11092  *
11093  * Return: 0 success, anything else error.
11094  */
11095 static int __init
11096 _mpt3sas_init(void)
11097 {
11098 	int error;
11099 
11100 	pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
11101 					MPT3SAS_DRIVER_VERSION);
11102 
11103 	mpt3sas_transport_template =
11104 	    sas_attach_transport(&mpt3sas_transport_functions);
11105 	if (!mpt3sas_transport_template)
11106 		return -ENODEV;
11107 
11108 	/* No need attach mpt3sas raid functions template
11109 	 * if hbas_to_enumarate value is one.
11110 	 */
11111 	if (hbas_to_enumerate != 1) {
11112 		mpt3sas_raid_template =
11113 				raid_class_attach(&mpt3sas_raid_functions);
11114 		if (!mpt3sas_raid_template) {
11115 			sas_release_transport(mpt3sas_transport_template);
11116 			return -ENODEV;
11117 		}
11118 	}
11119 
11120 	/* No need to attach mpt2sas raid functions template
11121 	 * if hbas_to_enumarate value is two
11122 	 */
11123 	if (hbas_to_enumerate != 2) {
11124 		mpt2sas_raid_template =
11125 				raid_class_attach(&mpt2sas_raid_functions);
11126 		if (!mpt2sas_raid_template) {
11127 			sas_release_transport(mpt3sas_transport_template);
11128 			return -ENODEV;
11129 		}
11130 	}
11131 
11132 	error = scsih_init();
11133 	if (error) {
11134 		scsih_exit();
11135 		return error;
11136 	}
11137 
11138 	mpt3sas_ctl_init(hbas_to_enumerate);
11139 
11140 	error = pci_register_driver(&mpt3sas_driver);
11141 	if (error)
11142 		scsih_exit();
11143 
11144 	return error;
11145 }
11146 
11147 /**
11148  * _mpt3sas_exit - exit point for this driver (when it is a module).
11149  *
11150  */
11151 static void __exit
11152 _mpt3sas_exit(void)
11153 {
11154 	pr_info("mpt3sas version %s unloading\n",
11155 				MPT3SAS_DRIVER_VERSION);
11156 
11157 	mpt3sas_ctl_exit(hbas_to_enumerate);
11158 
11159 	pci_unregister_driver(&mpt3sas_driver);
11160 
11161 	scsih_exit();
11162 }
11163 
11164 module_init(_mpt3sas_init);
11165 module_exit(_mpt3sas_exit);
11166