1 /*
2  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3  *
4  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5  * Copyright (C) 2012-2014  LSI Corporation
6  * Copyright (C) 2013-2014 Avago Technologies
7  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/aer.h>
56 #include <linux/raid_class.h>
57 #include <asm/unaligned.h>
58 
59 #include "mpt3sas_base.h"
60 
61 #define RAID_CHANNEL 1
62 
63 #define PCIE_CHANNEL 2
64 
65 /* forward proto's */
66 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
67 	struct _sas_node *sas_expander);
68 static void _firmware_event_work(struct work_struct *work);
69 
70 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
71 	struct _sas_device *sas_device);
72 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
73 	u8 retry_count, u8 is_pd);
74 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
76 	struct _pcie_device *pcie_device);
77 static void
78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
80 
81 /* global parameters */
82 LIST_HEAD(mpt3sas_ioc_list);
83 /* global ioc lock for list operations */
84 DEFINE_SPINLOCK(gioc_lock);
85 
86 MODULE_AUTHOR(MPT3SAS_AUTHOR);
87 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
90 MODULE_ALIAS("mpt2sas");
91 
92 /* local parameters */
93 static u8 scsi_io_cb_idx = -1;
94 static u8 tm_cb_idx = -1;
95 static u8 ctl_cb_idx = -1;
96 static u8 base_cb_idx = -1;
97 static u8 port_enable_cb_idx = -1;
98 static u8 transport_cb_idx = -1;
99 static u8 scsih_cb_idx = -1;
100 static u8 config_cb_idx = -1;
101 static int mpt2_ids;
102 static int mpt3_ids;
103 
104 static u8 tm_tr_cb_idx = -1 ;
105 static u8 tm_tr_volume_cb_idx = -1 ;
106 static u8 tm_sas_control_cb_idx = -1;
107 
108 /* command line options */
109 static u32 logging_level;
110 MODULE_PARM_DESC(logging_level,
111 	" bits for enabling additional logging info (default=0)");
112 
113 
114 static ushort max_sectors = 0xFFFF;
115 module_param(max_sectors, ushort, 0444);
116 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
117 
118 
119 static int missing_delay[2] = {-1, -1};
120 module_param_array(missing_delay, int, NULL, 0444);
121 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
122 
123 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
124 #define MPT3SAS_MAX_LUN (16895)
125 static u64 max_lun = MPT3SAS_MAX_LUN;
126 module_param(max_lun, ullong, 0444);
127 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
128 
129 static ushort hbas_to_enumerate;
130 module_param(hbas_to_enumerate, ushort, 0444);
131 MODULE_PARM_DESC(hbas_to_enumerate,
132 		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
133 		  1 - enumerates only SAS 2.0 generation HBAs\n \
134 		  2 - enumerates only SAS 3.0 generation HBAs (default=0)");
135 
136 /* diag_buffer_enable is bitwise
137  * bit 0 set = TRACE
138  * bit 1 set = SNAPSHOT
139  * bit 2 set = EXTENDED
140  *
141  * Either bit can be set, or both
142  */
143 static int diag_buffer_enable = -1;
144 module_param(diag_buffer_enable, int, 0444);
145 MODULE_PARM_DESC(diag_buffer_enable,
146 	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
147 static int disable_discovery = -1;
148 module_param(disable_discovery, int, 0444);
149 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
150 
151 
152 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
153 static int prot_mask = -1;
154 module_param(prot_mask, int, 0444);
155 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
156 
157 static bool enable_sdev_max_qd;
158 module_param(enable_sdev_max_qd, bool, 0444);
159 MODULE_PARM_DESC(enable_sdev_max_qd,
160 	"Enable sdev max qd as can_queue, def=disabled(0)");
161 
162 static int multipath_on_hba = -1;
163 module_param(multipath_on_hba, int, 0);
164 MODULE_PARM_DESC(multipath_on_hba,
165 	"Multipath support to add same target device\n\t\t"
166 	"as many times as it is visible to HBA from various paths\n\t\t"
167 	"(by default:\n\t\t"
168 	"\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
169 	"\t SAS 3.5 HBA - This will be enabled)");
170 
171 /* raid transport support */
172 static struct raid_template *mpt3sas_raid_template;
173 static struct raid_template *mpt2sas_raid_template;
174 
175 
176 /**
177  * struct sense_info - common structure for obtaining sense keys
178  * @skey: sense key
179  * @asc: additional sense code
180  * @ascq: additional sense code qualifier
181  */
182 struct sense_info {
183 	u8 skey;
184 	u8 asc;
185 	u8 ascq;
186 };
187 
188 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
189 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
190 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
191 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
192 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
193 /**
194  * struct fw_event_work - firmware event struct
195  * @list: link list framework
196  * @work: work object (ioc->fault_reset_work_q)
197  * @ioc: per adapter object
198  * @device_handle: device handle
199  * @VF_ID: virtual function id
200  * @VP_ID: virtual port id
201  * @ignore: flag meaning this event has been marked to ignore
202  * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
203  * @refcount: kref for this event
204  * @event_data: reply event data payload follows
205  *
206  * This object stored on ioc->fw_event_list.
207  */
208 struct fw_event_work {
209 	struct list_head	list;
210 	struct work_struct	work;
211 
212 	struct MPT3SAS_ADAPTER *ioc;
213 	u16			device_handle;
214 	u8			VF_ID;
215 	u8			VP_ID;
216 	u8			ignore;
217 	u16			event;
218 	struct kref		refcount;
219 	char			event_data[] __aligned(4);
220 };
221 
222 static void fw_event_work_free(struct kref *r)
223 {
224 	kfree(container_of(r, struct fw_event_work, refcount));
225 }
226 
227 static void fw_event_work_get(struct fw_event_work *fw_work)
228 {
229 	kref_get(&fw_work->refcount);
230 }
231 
232 static void fw_event_work_put(struct fw_event_work *fw_work)
233 {
234 	kref_put(&fw_work->refcount, fw_event_work_free);
235 }
236 
237 static struct fw_event_work *alloc_fw_event_work(int len)
238 {
239 	struct fw_event_work *fw_event;
240 
241 	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
242 	if (!fw_event)
243 		return NULL;
244 
245 	kref_init(&fw_event->refcount);
246 	return fw_event;
247 }
248 
249 /**
250  * struct _scsi_io_transfer - scsi io transfer
251  * @handle: sas device handle (assigned by firmware)
252  * @is_raid: flag set for hidden raid components
253  * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
254  * @data_length: data transfer length
255  * @data_dma: dma pointer to data
256  * @sense: sense data
257  * @lun: lun number
258  * @cdb_length: cdb length
259  * @cdb: cdb contents
260  * @timeout: timeout for this command
261  * @VF_ID: virtual function id
262  * @VP_ID: virtual port id
263  * @valid_reply: flag set for reply message
264  * @sense_length: sense length
265  * @ioc_status: ioc status
266  * @scsi_state: scsi state
267  * @scsi_status: scsi staus
268  * @log_info: log information
269  * @transfer_length: data length transfer when there is a reply message
270  *
271  * Used for sending internal scsi commands to devices within this module.
272  * Refer to _scsi_send_scsi_io().
273  */
274 struct _scsi_io_transfer {
275 	u16	handle;
276 	u8	is_raid;
277 	enum dma_data_direction dir;
278 	u32	data_length;
279 	dma_addr_t data_dma;
280 	u8	sense[SCSI_SENSE_BUFFERSIZE];
281 	u32	lun;
282 	u8	cdb_length;
283 	u8	cdb[32];
284 	u8	timeout;
285 	u8	VF_ID;
286 	u8	VP_ID;
287 	u8	valid_reply;
288   /* the following bits are only valid when 'valid_reply = 1' */
289 	u32	sense_length;
290 	u16	ioc_status;
291 	u8	scsi_state;
292 	u8	scsi_status;
293 	u32	log_info;
294 	u32	transfer_length;
295 };
296 
297 /**
298  * _scsih_set_debug_level - global setting of ioc->logging_level.
299  * @val: ?
300  * @kp: ?
301  *
302  * Note: The logging levels are defined in mpt3sas_debug.h.
303  */
304 static int
305 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
306 {
307 	int ret = param_set_int(val, kp);
308 	struct MPT3SAS_ADAPTER *ioc;
309 
310 	if (ret)
311 		return ret;
312 
313 	pr_info("setting logging_level(0x%08x)\n", logging_level);
314 	spin_lock(&gioc_lock);
315 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
316 		ioc->logging_level = logging_level;
317 	spin_unlock(&gioc_lock);
318 	return 0;
319 }
320 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
321 	&logging_level, 0644);
322 
323 /**
324  * _scsih_srch_boot_sas_address - search based on sas_address
325  * @sas_address: sas address
326  * @boot_device: boot device object from bios page 2
327  *
328  * Return: 1 when there's a match, 0 means no match.
329  */
330 static inline int
331 _scsih_srch_boot_sas_address(u64 sas_address,
332 	Mpi2BootDeviceSasWwid_t *boot_device)
333 {
334 	return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
335 }
336 
337 /**
338  * _scsih_srch_boot_device_name - search based on device name
339  * @device_name: device name specified in INDENTIFY fram
340  * @boot_device: boot device object from bios page 2
341  *
342  * Return: 1 when there's a match, 0 means no match.
343  */
344 static inline int
345 _scsih_srch_boot_device_name(u64 device_name,
346 	Mpi2BootDeviceDeviceName_t *boot_device)
347 {
348 	return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
349 }
350 
351 /**
352  * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
353  * @enclosure_logical_id: enclosure logical id
354  * @slot_number: slot number
355  * @boot_device: boot device object from bios page 2
356  *
357  * Return: 1 when there's a match, 0 means no match.
358  */
359 static inline int
360 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
361 	Mpi2BootDeviceEnclosureSlot_t *boot_device)
362 {
363 	return (enclosure_logical_id == le64_to_cpu(boot_device->
364 	    EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
365 	    SlotNumber)) ? 1 : 0;
366 }
367 
368 /**
369  * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
370  *			  port number from port list
371  * @ioc: per adapter object
372  * @port_id: port number
373  * @bypass_dirty_port_flag: when set look the matching hba port entry even
374  *			if hba port entry is marked as dirty.
375  *
376  * Search for hba port entry corresponding to provided port number,
377  * if available return port object otherwise return NULL.
378  */
379 struct hba_port *
380 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
381 	u8 port_id, u8 bypass_dirty_port_flag)
382 {
383 	struct hba_port *port, *port_next;
384 
385 	/*
386 	 * When multipath_on_hba is disabled then
387 	 * search the hba_port entry using default
388 	 * port id i.e. 255
389 	 */
390 	if (!ioc->multipath_on_hba)
391 		port_id = MULTIPATH_DISABLED_PORT_ID;
392 
393 	list_for_each_entry_safe(port, port_next,
394 	    &ioc->port_table_list, list) {
395 		if (port->port_id != port_id)
396 			continue;
397 		if (bypass_dirty_port_flag)
398 			return port;
399 		if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
400 			continue;
401 		return port;
402 	}
403 
404 	/*
405 	 * Allocate hba_port object for default port id (i.e. 255)
406 	 * when multipath_on_hba is disabled for the HBA.
407 	 * And add this object to port_table_list.
408 	 */
409 	if (!ioc->multipath_on_hba) {
410 		port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
411 		if (!port)
412 			return NULL;
413 
414 		port->port_id = port_id;
415 		ioc_info(ioc,
416 		   "hba_port entry: %p, port: %d is added to hba_port list\n",
417 		   port, port->port_id);
418 		list_add_tail(&port->list,
419 		    &ioc->port_table_list);
420 		return port;
421 	}
422 	return NULL;
423 }
424 
425 /**
426  * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
427  * @ioc: per adapter object
428  * @port: hba_port object
429  * @phy: phy number
430  *
431  * Return virtual_phy object corresponding to phy number.
432  */
433 struct virtual_phy *
434 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
435 	struct hba_port *port, u32 phy)
436 {
437 	struct virtual_phy *vphy, *vphy_next;
438 
439 	if (!port->vphys_mask)
440 		return NULL;
441 
442 	list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
443 		if (vphy->phy_mask & (1 << phy))
444 			return vphy;
445 	}
446 	return NULL;
447 }
448 
449 /**
450  * _scsih_is_boot_device - search for matching boot device.
451  * @sas_address: sas address
452  * @device_name: device name specified in INDENTIFY fram
453  * @enclosure_logical_id: enclosure logical id
454  * @slot: slot number
455  * @form: specifies boot device form
456  * @boot_device: boot device object from bios page 2
457  *
458  * Return: 1 when there's a match, 0 means no match.
459  */
460 static int
461 _scsih_is_boot_device(u64 sas_address, u64 device_name,
462 	u64 enclosure_logical_id, u16 slot, u8 form,
463 	Mpi2BiosPage2BootDevice_t *boot_device)
464 {
465 	int rc = 0;
466 
467 	switch (form) {
468 	case MPI2_BIOSPAGE2_FORM_SAS_WWID:
469 		if (!sas_address)
470 			break;
471 		rc = _scsih_srch_boot_sas_address(
472 		    sas_address, &boot_device->SasWwid);
473 		break;
474 	case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
475 		if (!enclosure_logical_id)
476 			break;
477 		rc = _scsih_srch_boot_encl_slot(
478 		    enclosure_logical_id,
479 		    slot, &boot_device->EnclosureSlot);
480 		break;
481 	case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
482 		if (!device_name)
483 			break;
484 		rc = _scsih_srch_boot_device_name(
485 		    device_name, &boot_device->DeviceName);
486 		break;
487 	case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
488 		break;
489 	}
490 
491 	return rc;
492 }
493 
494 /**
495  * _scsih_get_sas_address - set the sas_address for given device handle
496  * @ioc: ?
497  * @handle: device handle
498  * @sas_address: sas address
499  *
500  * Return: 0 success, non-zero when failure
501  */
502 static int
503 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
504 	u64 *sas_address)
505 {
506 	Mpi2SasDevicePage0_t sas_device_pg0;
507 	Mpi2ConfigReply_t mpi_reply;
508 	u32 ioc_status;
509 
510 	*sas_address = 0;
511 
512 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
513 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
514 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
515 			__FILE__, __LINE__, __func__);
516 		return -ENXIO;
517 	}
518 
519 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
520 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
521 		/* For HBA, vSES doesn't return HBA SAS address. Instead return
522 		 * vSES's sas address.
523 		 */
524 		if ((handle <= ioc->sas_hba.num_phys) &&
525 		   (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
526 		   MPI2_SAS_DEVICE_INFO_SEP)))
527 			*sas_address = ioc->sas_hba.sas_address;
528 		else
529 			*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
530 		return 0;
531 	}
532 
533 	/* we hit this because the given parent handle doesn't exist */
534 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
535 		return -ENXIO;
536 
537 	/* else error case */
538 	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
539 		handle, ioc_status, __FILE__, __LINE__, __func__);
540 	return -EIO;
541 }
542 
543 /**
544  * _scsih_determine_boot_device - determine boot device.
545  * @ioc: per adapter object
546  * @device: sas_device or pcie_device object
547  * @channel: SAS or PCIe channel
548  *
549  * Determines whether this device should be first reported device to
550  * to scsi-ml or sas transport, this purpose is for persistent boot device.
551  * There are primary, alternate, and current entries in bios page 2. The order
552  * priority is primary, alternate, then current.  This routine saves
553  * the corresponding device object.
554  * The saved data to be used later in _scsih_probe_boot_devices().
555  */
556 static void
557 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
558 	u32 channel)
559 {
560 	struct _sas_device *sas_device;
561 	struct _pcie_device *pcie_device;
562 	struct _raid_device *raid_device;
563 	u64 sas_address;
564 	u64 device_name;
565 	u64 enclosure_logical_id;
566 	u16 slot;
567 
568 	 /* only process this function when driver loads */
569 	if (!ioc->is_driver_loading)
570 		return;
571 
572 	 /* no Bios, return immediately */
573 	if (!ioc->bios_pg3.BiosVersion)
574 		return;
575 
576 	if (channel == RAID_CHANNEL) {
577 		raid_device = device;
578 		sas_address = raid_device->wwid;
579 		device_name = 0;
580 		enclosure_logical_id = 0;
581 		slot = 0;
582 	} else if (channel == PCIE_CHANNEL) {
583 		pcie_device = device;
584 		sas_address = pcie_device->wwid;
585 		device_name = 0;
586 		enclosure_logical_id = 0;
587 		slot = 0;
588 	} else {
589 		sas_device = device;
590 		sas_address = sas_device->sas_address;
591 		device_name = sas_device->device_name;
592 		enclosure_logical_id = sas_device->enclosure_logical_id;
593 		slot = sas_device->slot;
594 	}
595 
596 	if (!ioc->req_boot_device.device) {
597 		if (_scsih_is_boot_device(sas_address, device_name,
598 		    enclosure_logical_id, slot,
599 		    (ioc->bios_pg2.ReqBootDeviceForm &
600 		    MPI2_BIOSPAGE2_FORM_MASK),
601 		    &ioc->bios_pg2.RequestedBootDevice)) {
602 			dinitprintk(ioc,
603 				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
604 					     __func__, (u64)sas_address));
605 			ioc->req_boot_device.device = device;
606 			ioc->req_boot_device.channel = channel;
607 		}
608 	}
609 
610 	if (!ioc->req_alt_boot_device.device) {
611 		if (_scsih_is_boot_device(sas_address, device_name,
612 		    enclosure_logical_id, slot,
613 		    (ioc->bios_pg2.ReqAltBootDeviceForm &
614 		    MPI2_BIOSPAGE2_FORM_MASK),
615 		    &ioc->bios_pg2.RequestedAltBootDevice)) {
616 			dinitprintk(ioc,
617 				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
618 					     __func__, (u64)sas_address));
619 			ioc->req_alt_boot_device.device = device;
620 			ioc->req_alt_boot_device.channel = channel;
621 		}
622 	}
623 
624 	if (!ioc->current_boot_device.device) {
625 		if (_scsih_is_boot_device(sas_address, device_name,
626 		    enclosure_logical_id, slot,
627 		    (ioc->bios_pg2.CurrentBootDeviceForm &
628 		    MPI2_BIOSPAGE2_FORM_MASK),
629 		    &ioc->bios_pg2.CurrentBootDevice)) {
630 			dinitprintk(ioc,
631 				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
632 					     __func__, (u64)sas_address));
633 			ioc->current_boot_device.device = device;
634 			ioc->current_boot_device.channel = channel;
635 		}
636 	}
637 }
638 
639 static struct _sas_device *
640 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
641 		struct MPT3SAS_TARGET *tgt_priv)
642 {
643 	struct _sas_device *ret;
644 
645 	assert_spin_locked(&ioc->sas_device_lock);
646 
647 	ret = tgt_priv->sas_dev;
648 	if (ret)
649 		sas_device_get(ret);
650 
651 	return ret;
652 }
653 
654 static struct _sas_device *
655 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
656 		struct MPT3SAS_TARGET *tgt_priv)
657 {
658 	struct _sas_device *ret;
659 	unsigned long flags;
660 
661 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
662 	ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
663 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
664 
665 	return ret;
666 }
667 
668 static struct _pcie_device *
669 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
670 	struct MPT3SAS_TARGET *tgt_priv)
671 {
672 	struct _pcie_device *ret;
673 
674 	assert_spin_locked(&ioc->pcie_device_lock);
675 
676 	ret = tgt_priv->pcie_dev;
677 	if (ret)
678 		pcie_device_get(ret);
679 
680 	return ret;
681 }
682 
683 /**
684  * mpt3sas_get_pdev_from_target - pcie device search
685  * @ioc: per adapter object
686  * @tgt_priv: starget private object
687  *
688  * Context: This function will acquire ioc->pcie_device_lock and will release
689  * before returning the pcie_device object.
690  *
691  * This searches for pcie_device from target, then return pcie_device object.
692  */
693 static struct _pcie_device *
694 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
695 	struct MPT3SAS_TARGET *tgt_priv)
696 {
697 	struct _pcie_device *ret;
698 	unsigned long flags;
699 
700 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
701 	ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
702 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
703 
704 	return ret;
705 }
706 
707 
708 /**
709  * __mpt3sas_get_sdev_by_rphy - sas device search
710  * @ioc: per adapter object
711  * @rphy: sas_rphy pointer
712  *
713  * Context: This function will acquire ioc->sas_device_lock and will release
714  * before returning the sas_device object.
715  *
716  * This searches for sas_device from rphy object
717  * then return sas_device object.
718  */
719 struct _sas_device *
720 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
721 	struct sas_rphy *rphy)
722 {
723 	struct _sas_device *sas_device;
724 
725 	assert_spin_locked(&ioc->sas_device_lock);
726 
727 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
728 		if (sas_device->rphy != rphy)
729 			continue;
730 		sas_device_get(sas_device);
731 		return sas_device;
732 	}
733 
734 	sas_device = NULL;
735 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
736 		if (sas_device->rphy != rphy)
737 			continue;
738 		sas_device_get(sas_device);
739 		return sas_device;
740 	}
741 
742 	return NULL;
743 }
744 
745 /**
746  * mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
747  *				sas address from sas_device_list list
748  * @ioc: per adapter object
749  * @port: port number
750  *
751  * Search for _sas_device object corresponding to provided sas address,
752  * if available return _sas_device object address otherwise return NULL.
753  */
754 struct _sas_device *
755 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
756 	u64 sas_address, struct hba_port *port)
757 {
758 	struct _sas_device *sas_device;
759 
760 	if (!port)
761 		return NULL;
762 
763 	assert_spin_locked(&ioc->sas_device_lock);
764 
765 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
766 		if (sas_device->sas_address != sas_address)
767 			continue;
768 		if (sas_device->port != port)
769 			continue;
770 		sas_device_get(sas_device);
771 		return sas_device;
772 	}
773 
774 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
775 		if (sas_device->sas_address != sas_address)
776 			continue;
777 		if (sas_device->port != port)
778 			continue;
779 		sas_device_get(sas_device);
780 		return sas_device;
781 	}
782 
783 	return NULL;
784 }
785 
786 /**
787  * mpt3sas_get_sdev_by_addr - sas device search
788  * @ioc: per adapter object
789  * @sas_address: sas address
790  * @port: hba port entry
791  * Context: Calling function should acquire ioc->sas_device_lock
792  *
793  * This searches for sas_device based on sas_address & port number,
794  * then return sas_device object.
795  */
796 struct _sas_device *
797 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
798 	u64 sas_address, struct hba_port *port)
799 {
800 	struct _sas_device *sas_device;
801 	unsigned long flags;
802 
803 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
804 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
805 	    sas_address, port);
806 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
807 
808 	return sas_device;
809 }
810 
811 static struct _sas_device *
812 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
813 {
814 	struct _sas_device *sas_device;
815 
816 	assert_spin_locked(&ioc->sas_device_lock);
817 
818 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
819 		if (sas_device->handle == handle)
820 			goto found_device;
821 
822 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
823 		if (sas_device->handle == handle)
824 			goto found_device;
825 
826 	return NULL;
827 
828 found_device:
829 	sas_device_get(sas_device);
830 	return sas_device;
831 }
832 
833 /**
834  * mpt3sas_get_sdev_by_handle - sas device search
835  * @ioc: per adapter object
836  * @handle: sas device handle (assigned by firmware)
837  * Context: Calling function should acquire ioc->sas_device_lock
838  *
839  * This searches for sas_device based on sas_address, then return sas_device
840  * object.
841  */
842 struct _sas_device *
843 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
844 {
845 	struct _sas_device *sas_device;
846 	unsigned long flags;
847 
848 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
849 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
850 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
851 
852 	return sas_device;
853 }
854 
855 /**
856  * _scsih_display_enclosure_chassis_info - display device location info
857  * @ioc: per adapter object
858  * @sas_device: per sas device object
859  * @sdev: scsi device struct
860  * @starget: scsi target struct
861  */
862 static void
863 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
864 	struct _sas_device *sas_device, struct scsi_device *sdev,
865 	struct scsi_target *starget)
866 {
867 	if (sdev) {
868 		if (sas_device->enclosure_handle != 0)
869 			sdev_printk(KERN_INFO, sdev,
870 			    "enclosure logical id (0x%016llx), slot(%d) \n",
871 			    (unsigned long long)
872 			    sas_device->enclosure_logical_id,
873 			    sas_device->slot);
874 		if (sas_device->connector_name[0] != '\0')
875 			sdev_printk(KERN_INFO, sdev,
876 			    "enclosure level(0x%04x), connector name( %s)\n",
877 			    sas_device->enclosure_level,
878 			    sas_device->connector_name);
879 		if (sas_device->is_chassis_slot_valid)
880 			sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
881 			    sas_device->chassis_slot);
882 	} else if (starget) {
883 		if (sas_device->enclosure_handle != 0)
884 			starget_printk(KERN_INFO, starget,
885 			    "enclosure logical id(0x%016llx), slot(%d) \n",
886 			    (unsigned long long)
887 			    sas_device->enclosure_logical_id,
888 			    sas_device->slot);
889 		if (sas_device->connector_name[0] != '\0')
890 			starget_printk(KERN_INFO, starget,
891 			    "enclosure level(0x%04x), connector name( %s)\n",
892 			    sas_device->enclosure_level,
893 			    sas_device->connector_name);
894 		if (sas_device->is_chassis_slot_valid)
895 			starget_printk(KERN_INFO, starget,
896 			    "chassis slot(0x%04x)\n",
897 			    sas_device->chassis_slot);
898 	} else {
899 		if (sas_device->enclosure_handle != 0)
900 			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
901 				 (u64)sas_device->enclosure_logical_id,
902 				 sas_device->slot);
903 		if (sas_device->connector_name[0] != '\0')
904 			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
905 				 sas_device->enclosure_level,
906 				 sas_device->connector_name);
907 		if (sas_device->is_chassis_slot_valid)
908 			ioc_info(ioc, "chassis slot(0x%04x)\n",
909 				 sas_device->chassis_slot);
910 	}
911 }
912 
913 /**
914  * _scsih_sas_device_remove - remove sas_device from list.
915  * @ioc: per adapter object
916  * @sas_device: the sas_device object
917  * Context: This function will acquire ioc->sas_device_lock.
918  *
919  * If sas_device is on the list, remove it and decrement its reference count.
920  */
921 static void
922 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
923 	struct _sas_device *sas_device)
924 {
925 	unsigned long flags;
926 
927 	if (!sas_device)
928 		return;
929 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
930 		 sas_device->handle, (u64)sas_device->sas_address);
931 
932 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
933 
934 	/*
935 	 * The lock serializes access to the list, but we still need to verify
936 	 * that nobody removed the entry while we were waiting on the lock.
937 	 */
938 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
939 	if (!list_empty(&sas_device->list)) {
940 		list_del_init(&sas_device->list);
941 		sas_device_put(sas_device);
942 	}
943 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
944 }
945 
946 /**
947  * _scsih_device_remove_by_handle - removing device object by handle
948  * @ioc: per adapter object
949  * @handle: device handle
950  */
951 static void
952 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
953 {
954 	struct _sas_device *sas_device;
955 	unsigned long flags;
956 
957 	if (ioc->shost_recovery)
958 		return;
959 
960 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
961 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
962 	if (sas_device) {
963 		list_del_init(&sas_device->list);
964 		sas_device_put(sas_device);
965 	}
966 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
967 	if (sas_device) {
968 		_scsih_remove_device(ioc, sas_device);
969 		sas_device_put(sas_device);
970 	}
971 }
972 
973 /**
974  * mpt3sas_device_remove_by_sas_address - removing device object by
975  *					sas address & port number
976  * @ioc: per adapter object
977  * @sas_address: device sas_address
978  * @port: hba port entry
979  *
980  * Return nothing.
981  */
982 void
983 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
984 	u64 sas_address, struct hba_port *port)
985 {
986 	struct _sas_device *sas_device;
987 	unsigned long flags;
988 
989 	if (ioc->shost_recovery)
990 		return;
991 
992 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
993 	sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
994 	if (sas_device) {
995 		list_del_init(&sas_device->list);
996 		sas_device_put(sas_device);
997 	}
998 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
999 	if (sas_device) {
1000 		_scsih_remove_device(ioc, sas_device);
1001 		sas_device_put(sas_device);
1002 	}
1003 }
1004 
1005 /**
1006  * _scsih_sas_device_add - insert sas_device to the list.
1007  * @ioc: per adapter object
1008  * @sas_device: the sas_device object
1009  * Context: This function will acquire ioc->sas_device_lock.
1010  *
1011  * Adding new object to the ioc->sas_device_list.
1012  */
1013 static void
1014 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1015 	struct _sas_device *sas_device)
1016 {
1017 	unsigned long flags;
1018 
1019 	dewtprintk(ioc,
1020 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1021 			    __func__, sas_device->handle,
1022 			    (u64)sas_device->sas_address));
1023 
1024 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1025 	    NULL, NULL));
1026 
1027 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1028 	sas_device_get(sas_device);
1029 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
1030 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1031 
1032 	if (ioc->hide_drives) {
1033 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
1034 		return;
1035 	}
1036 
1037 	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1038 	     sas_device->sas_address_parent, sas_device->port)) {
1039 		_scsih_sas_device_remove(ioc, sas_device);
1040 	} else if (!sas_device->starget) {
1041 		/*
1042 		 * When asyn scanning is enabled, its not possible to remove
1043 		 * devices while scanning is turned on due to an oops in
1044 		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1045 		 */
1046 		if (!ioc->is_driver_loading) {
1047 			mpt3sas_transport_port_remove(ioc,
1048 			    sas_device->sas_address,
1049 			    sas_device->sas_address_parent,
1050 			    sas_device->port);
1051 			_scsih_sas_device_remove(ioc, sas_device);
1052 		}
1053 	} else
1054 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
1055 }
1056 
1057 /**
1058  * _scsih_sas_device_init_add - insert sas_device to the list.
1059  * @ioc: per adapter object
1060  * @sas_device: the sas_device object
1061  * Context: This function will acquire ioc->sas_device_lock.
1062  *
1063  * Adding new object at driver load time to the ioc->sas_device_init_list.
1064  */
1065 static void
1066 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1067 	struct _sas_device *sas_device)
1068 {
1069 	unsigned long flags;
1070 
1071 	dewtprintk(ioc,
1072 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1073 			    __func__, sas_device->handle,
1074 			    (u64)sas_device->sas_address));
1075 
1076 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1077 	    NULL, NULL));
1078 
1079 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1080 	sas_device_get(sas_device);
1081 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1082 	_scsih_determine_boot_device(ioc, sas_device, 0);
1083 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1084 }
1085 
1086 
1087 static struct _pcie_device *
1088 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1089 {
1090 	struct _pcie_device *pcie_device;
1091 
1092 	assert_spin_locked(&ioc->pcie_device_lock);
1093 
1094 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1095 		if (pcie_device->wwid == wwid)
1096 			goto found_device;
1097 
1098 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1099 		if (pcie_device->wwid == wwid)
1100 			goto found_device;
1101 
1102 	return NULL;
1103 
1104 found_device:
1105 	pcie_device_get(pcie_device);
1106 	return pcie_device;
1107 }
1108 
1109 
1110 /**
1111  * mpt3sas_get_pdev_by_wwid - pcie device search
1112  * @ioc: per adapter object
1113  * @wwid: wwid
1114  *
1115  * Context: This function will acquire ioc->pcie_device_lock and will release
1116  * before returning the pcie_device object.
1117  *
1118  * This searches for pcie_device based on wwid, then return pcie_device object.
1119  */
1120 static struct _pcie_device *
1121 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1122 {
1123 	struct _pcie_device *pcie_device;
1124 	unsigned long flags;
1125 
1126 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1127 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1128 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1129 
1130 	return pcie_device;
1131 }
1132 
1133 
1134 static struct _pcie_device *
1135 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1136 	int channel)
1137 {
1138 	struct _pcie_device *pcie_device;
1139 
1140 	assert_spin_locked(&ioc->pcie_device_lock);
1141 
1142 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1143 		if (pcie_device->id == id && pcie_device->channel == channel)
1144 			goto found_device;
1145 
1146 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1147 		if (pcie_device->id == id && pcie_device->channel == channel)
1148 			goto found_device;
1149 
1150 	return NULL;
1151 
1152 found_device:
1153 	pcie_device_get(pcie_device);
1154 	return pcie_device;
1155 }
1156 
1157 static struct _pcie_device *
1158 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1159 {
1160 	struct _pcie_device *pcie_device;
1161 
1162 	assert_spin_locked(&ioc->pcie_device_lock);
1163 
1164 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1165 		if (pcie_device->handle == handle)
1166 			goto found_device;
1167 
1168 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1169 		if (pcie_device->handle == handle)
1170 			goto found_device;
1171 
1172 	return NULL;
1173 
1174 found_device:
1175 	pcie_device_get(pcie_device);
1176 	return pcie_device;
1177 }
1178 
1179 
1180 /**
1181  * mpt3sas_get_pdev_by_handle - pcie device search
1182  * @ioc: per adapter object
1183  * @handle: Firmware device handle
1184  *
1185  * Context: This function will acquire ioc->pcie_device_lock and will release
1186  * before returning the pcie_device object.
1187  *
1188  * This searches for pcie_device based on handle, then return pcie_device
1189  * object.
1190  */
1191 struct _pcie_device *
1192 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1193 {
1194 	struct _pcie_device *pcie_device;
1195 	unsigned long flags;
1196 
1197 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1198 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1199 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1200 
1201 	return pcie_device;
1202 }
1203 
1204 /**
1205  * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1206  * @ioc: per adapter object
1207  * Context: This function will acquire ioc->pcie_device_lock
1208  *
1209  * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1210  * which has reported maximum among all available NVMe drives.
1211  * Minimum max_shutdown_latency will be six seconds.
1212  */
1213 static void
1214 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1215 {
1216 	struct _pcie_device *pcie_device;
1217 	unsigned long flags;
1218 	u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1219 
1220 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1221 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1222 		if (pcie_device->shutdown_latency) {
1223 			if (shutdown_latency < pcie_device->shutdown_latency)
1224 				shutdown_latency =
1225 					pcie_device->shutdown_latency;
1226 		}
1227 	}
1228 	ioc->max_shutdown_latency = shutdown_latency;
1229 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1230 }
1231 
1232 /**
1233  * _scsih_pcie_device_remove - remove pcie_device from list.
1234  * @ioc: per adapter object
1235  * @pcie_device: the pcie_device object
1236  * Context: This function will acquire ioc->pcie_device_lock.
1237  *
1238  * If pcie_device is on the list, remove it and decrement its reference count.
1239  */
1240 static void
1241 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1242 	struct _pcie_device *pcie_device)
1243 {
1244 	unsigned long flags;
1245 	int was_on_pcie_device_list = 0;
1246 	u8 update_latency = 0;
1247 
1248 	if (!pcie_device)
1249 		return;
1250 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1251 		 pcie_device->handle, (u64)pcie_device->wwid);
1252 	if (pcie_device->enclosure_handle != 0)
1253 		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1254 			 (u64)pcie_device->enclosure_logical_id,
1255 			 pcie_device->slot);
1256 	if (pcie_device->connector_name[0] != '\0')
1257 		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1258 			 pcie_device->enclosure_level,
1259 			 pcie_device->connector_name);
1260 
1261 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1262 	if (!list_empty(&pcie_device->list)) {
1263 		list_del_init(&pcie_device->list);
1264 		was_on_pcie_device_list = 1;
1265 	}
1266 	if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1267 		update_latency = 1;
1268 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1269 	if (was_on_pcie_device_list) {
1270 		kfree(pcie_device->serial_number);
1271 		pcie_device_put(pcie_device);
1272 	}
1273 
1274 	/*
1275 	 * This device's RTD3 Entry Latency matches IOC's
1276 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1277 	 * from the available drives as current drive is getting removed.
1278 	 */
1279 	if (update_latency)
1280 		_scsih_set_nvme_max_shutdown_latency(ioc);
1281 }
1282 
1283 
1284 /**
1285  * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1286  * @ioc: per adapter object
1287  * @handle: device handle
1288  */
1289 static void
1290 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1291 {
1292 	struct _pcie_device *pcie_device;
1293 	unsigned long flags;
1294 	int was_on_pcie_device_list = 0;
1295 	u8 update_latency = 0;
1296 
1297 	if (ioc->shost_recovery)
1298 		return;
1299 
1300 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1301 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1302 	if (pcie_device) {
1303 		if (!list_empty(&pcie_device->list)) {
1304 			list_del_init(&pcie_device->list);
1305 			was_on_pcie_device_list = 1;
1306 			pcie_device_put(pcie_device);
1307 		}
1308 		if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1309 			update_latency = 1;
1310 	}
1311 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1312 	if (was_on_pcie_device_list) {
1313 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1314 		pcie_device_put(pcie_device);
1315 	}
1316 
1317 	/*
1318 	 * This device's RTD3 Entry Latency matches IOC's
1319 	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1320 	 * from the available drives as current drive is getting removed.
1321 	 */
1322 	if (update_latency)
1323 		_scsih_set_nvme_max_shutdown_latency(ioc);
1324 }
1325 
1326 /**
1327  * _scsih_pcie_device_add - add pcie_device object
1328  * @ioc: per adapter object
1329  * @pcie_device: pcie_device object
1330  *
1331  * This is added to the pcie_device_list link list.
1332  */
1333 static void
1334 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1335 	struct _pcie_device *pcie_device)
1336 {
1337 	unsigned long flags;
1338 
1339 	dewtprintk(ioc,
1340 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1341 			    __func__,
1342 			    pcie_device->handle, (u64)pcie_device->wwid));
1343 	if (pcie_device->enclosure_handle != 0)
1344 		dewtprintk(ioc,
1345 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1346 				    __func__,
1347 				    (u64)pcie_device->enclosure_logical_id,
1348 				    pcie_device->slot));
1349 	if (pcie_device->connector_name[0] != '\0')
1350 		dewtprintk(ioc,
1351 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1352 				    __func__, pcie_device->enclosure_level,
1353 				    pcie_device->connector_name));
1354 
1355 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1356 	pcie_device_get(pcie_device);
1357 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1358 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1359 
1360 	if (pcie_device->access_status ==
1361 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1362 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1363 		return;
1364 	}
1365 	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1366 		_scsih_pcie_device_remove(ioc, pcie_device);
1367 	} else if (!pcie_device->starget) {
1368 		if (!ioc->is_driver_loading) {
1369 /*TODO-- Need to find out whether this condition will occur or not*/
1370 			clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1371 		}
1372 	} else
1373 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1374 }
1375 
1376 /*
1377  * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1378  * @ioc: per adapter object
1379  * @pcie_device: the pcie_device object
1380  * Context: This function will acquire ioc->pcie_device_lock.
1381  *
1382  * Adding new object at driver load time to the ioc->pcie_device_init_list.
1383  */
1384 static void
1385 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1386 				struct _pcie_device *pcie_device)
1387 {
1388 	unsigned long flags;
1389 
1390 	dewtprintk(ioc,
1391 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1392 			    __func__,
1393 			    pcie_device->handle, (u64)pcie_device->wwid));
1394 	if (pcie_device->enclosure_handle != 0)
1395 		dewtprintk(ioc,
1396 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1397 				    __func__,
1398 				    (u64)pcie_device->enclosure_logical_id,
1399 				    pcie_device->slot));
1400 	if (pcie_device->connector_name[0] != '\0')
1401 		dewtprintk(ioc,
1402 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1403 				    __func__, pcie_device->enclosure_level,
1404 				    pcie_device->connector_name));
1405 
1406 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1407 	pcie_device_get(pcie_device);
1408 	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1409 	if (pcie_device->access_status !=
1410 	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1411 		_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1412 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1413 }
1414 /**
1415  * _scsih_raid_device_find_by_id - raid device search
1416  * @ioc: per adapter object
1417  * @id: sas device target id
1418  * @channel: sas device channel
1419  * Context: Calling function should acquire ioc->raid_device_lock
1420  *
1421  * This searches for raid_device based on target id, then return raid_device
1422  * object.
1423  */
1424 static struct _raid_device *
1425 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1426 {
1427 	struct _raid_device *raid_device, *r;
1428 
1429 	r = NULL;
1430 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1431 		if (raid_device->id == id && raid_device->channel == channel) {
1432 			r = raid_device;
1433 			goto out;
1434 		}
1435 	}
1436 
1437  out:
1438 	return r;
1439 }
1440 
1441 /**
1442  * mpt3sas_raid_device_find_by_handle - raid device search
1443  * @ioc: per adapter object
1444  * @handle: sas device handle (assigned by firmware)
1445  * Context: Calling function should acquire ioc->raid_device_lock
1446  *
1447  * This searches for raid_device based on handle, then return raid_device
1448  * object.
1449  */
1450 struct _raid_device *
1451 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1452 {
1453 	struct _raid_device *raid_device, *r;
1454 
1455 	r = NULL;
1456 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1457 		if (raid_device->handle != handle)
1458 			continue;
1459 		r = raid_device;
1460 		goto out;
1461 	}
1462 
1463  out:
1464 	return r;
1465 }
1466 
1467 /**
1468  * _scsih_raid_device_find_by_wwid - raid device search
1469  * @ioc: per adapter object
1470  * @wwid: ?
1471  * Context: Calling function should acquire ioc->raid_device_lock
1472  *
1473  * This searches for raid_device based on wwid, then return raid_device
1474  * object.
1475  */
1476 static struct _raid_device *
1477 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1478 {
1479 	struct _raid_device *raid_device, *r;
1480 
1481 	r = NULL;
1482 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1483 		if (raid_device->wwid != wwid)
1484 			continue;
1485 		r = raid_device;
1486 		goto out;
1487 	}
1488 
1489  out:
1490 	return r;
1491 }
1492 
1493 /**
1494  * _scsih_raid_device_add - add raid_device object
1495  * @ioc: per adapter object
1496  * @raid_device: raid_device object
1497  *
1498  * This is added to the raid_device_list link list.
1499  */
1500 static void
1501 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1502 	struct _raid_device *raid_device)
1503 {
1504 	unsigned long flags;
1505 
1506 	dewtprintk(ioc,
1507 		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1508 			    __func__,
1509 			    raid_device->handle, (u64)raid_device->wwid));
1510 
1511 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1512 	list_add_tail(&raid_device->list, &ioc->raid_device_list);
1513 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1514 }
1515 
1516 /**
1517  * _scsih_raid_device_remove - delete raid_device object
1518  * @ioc: per adapter object
1519  * @raid_device: raid_device object
1520  *
1521  */
1522 static void
1523 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1524 	struct _raid_device *raid_device)
1525 {
1526 	unsigned long flags;
1527 
1528 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1529 	list_del(&raid_device->list);
1530 	kfree(raid_device);
1531 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1532 }
1533 
1534 /**
1535  * mpt3sas_scsih_expander_find_by_handle - expander device search
1536  * @ioc: per adapter object
1537  * @handle: expander handle (assigned by firmware)
1538  * Context: Calling function should acquire ioc->sas_device_lock
1539  *
1540  * This searches for expander device based on handle, then returns the
1541  * sas_node object.
1542  */
1543 struct _sas_node *
1544 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1545 {
1546 	struct _sas_node *sas_expander, *r;
1547 
1548 	r = NULL;
1549 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1550 		if (sas_expander->handle != handle)
1551 			continue;
1552 		r = sas_expander;
1553 		goto out;
1554 	}
1555  out:
1556 	return r;
1557 }
1558 
1559 /**
1560  * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1561  * @ioc: per adapter object
1562  * @handle: enclosure handle (assigned by firmware)
1563  * Context: Calling function should acquire ioc->sas_device_lock
1564  *
1565  * This searches for enclosure device based on handle, then returns the
1566  * enclosure object.
1567  */
1568 static struct _enclosure_node *
1569 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1570 {
1571 	struct _enclosure_node *enclosure_dev, *r;
1572 
1573 	r = NULL;
1574 	list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1575 		if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1576 			continue;
1577 		r = enclosure_dev;
1578 		goto out;
1579 	}
1580 out:
1581 	return r;
1582 }
1583 /**
1584  * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1585  * @ioc: per adapter object
1586  * @sas_address: sas address
1587  * @port: hba port entry
1588  * Context: Calling function should acquire ioc->sas_node_lock.
1589  *
1590  * This searches for expander device based on sas_address & port number,
1591  * then returns the sas_node object.
1592  */
1593 struct _sas_node *
1594 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1595 	u64 sas_address, struct hba_port *port)
1596 {
1597 	struct _sas_node *sas_expander, *r = NULL;
1598 
1599 	if (!port)
1600 		return r;
1601 
1602 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1603 		if (sas_expander->sas_address != sas_address)
1604 			continue;
1605 		if (sas_expander->port != port)
1606 			continue;
1607 		r = sas_expander;
1608 		goto out;
1609 	}
1610  out:
1611 	return r;
1612 }
1613 
1614 /**
1615  * _scsih_expander_node_add - insert expander device to the list.
1616  * @ioc: per adapter object
1617  * @sas_expander: the sas_device object
1618  * Context: This function will acquire ioc->sas_node_lock.
1619  *
1620  * Adding new object to the ioc->sas_expander_list.
1621  */
1622 static void
1623 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1624 	struct _sas_node *sas_expander)
1625 {
1626 	unsigned long flags;
1627 
1628 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
1629 	list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1630 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1631 }
1632 
1633 /**
1634  * _scsih_is_end_device - determines if device is an end device
1635  * @device_info: bitfield providing information about the device.
1636  * Context: none
1637  *
1638  * Return: 1 if end device.
1639  */
1640 static int
1641 _scsih_is_end_device(u32 device_info)
1642 {
1643 	if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1644 		((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1645 		(device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1646 		(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1647 		return 1;
1648 	else
1649 		return 0;
1650 }
1651 
1652 /**
1653  * _scsih_is_nvme_pciescsi_device - determines if
1654  *			device is an pcie nvme/scsi device
1655  * @device_info: bitfield providing information about the device.
1656  * Context: none
1657  *
1658  * Returns 1 if device is pcie device type nvme/scsi.
1659  */
1660 static int
1661 _scsih_is_nvme_pciescsi_device(u32 device_info)
1662 {
1663 	if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1664 	    == MPI26_PCIE_DEVINFO_NVME) ||
1665 	    ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1666 	    == MPI26_PCIE_DEVINFO_SCSI))
1667 		return 1;
1668 	else
1669 		return 0;
1670 }
1671 
1672 /**
1673  * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1674  * @ioc: per adapter object
1675  * @id: target id
1676  * @channel: channel
1677  * Context: This function will acquire ioc->scsi_lookup_lock.
1678  *
1679  * This will search for a matching channel:id in the scsi_lookup array,
1680  * returning 1 if found.
1681  */
1682 static u8
1683 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1684 	int channel)
1685 {
1686 	int smid;
1687 	struct scsi_cmnd *scmd;
1688 
1689 	for (smid = 1;
1690 	     smid <= ioc->shost->can_queue; smid++) {
1691 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1692 		if (!scmd)
1693 			continue;
1694 		if (scmd->device->id == id &&
1695 		    scmd->device->channel == channel)
1696 			return 1;
1697 	}
1698 	return 0;
1699 }
1700 
1701 /**
1702  * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1703  * @ioc: per adapter object
1704  * @id: target id
1705  * @lun: lun number
1706  * @channel: channel
1707  * Context: This function will acquire ioc->scsi_lookup_lock.
1708  *
1709  * This will search for a matching channel:id:lun in the scsi_lookup array,
1710  * returning 1 if found.
1711  */
1712 static u8
1713 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1714 	unsigned int lun, int channel)
1715 {
1716 	int smid;
1717 	struct scsi_cmnd *scmd;
1718 
1719 	for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1720 
1721 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1722 		if (!scmd)
1723 			continue;
1724 		if (scmd->device->id == id &&
1725 		    scmd->device->channel == channel &&
1726 		    scmd->device->lun == lun)
1727 			return 1;
1728 	}
1729 	return 0;
1730 }
1731 
1732 /**
1733  * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1734  * @ioc: per adapter object
1735  * @smid: system request message index
1736  *
1737  * Return: the smid stored scmd pointer.
1738  * Then will dereference the stored scmd pointer.
1739  */
1740 struct scsi_cmnd *
1741 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1742 {
1743 	struct scsi_cmnd *scmd = NULL;
1744 	struct scsiio_tracker *st;
1745 	Mpi25SCSIIORequest_t *mpi_request;
1746 
1747 	if (smid > 0  &&
1748 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1749 		u32 unique_tag = smid - 1;
1750 
1751 		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1752 
1753 		/*
1754 		 * If SCSI IO request is outstanding at driver level then
1755 		 * DevHandle filed must be non-zero. If DevHandle is zero
1756 		 * then it means that this smid is free at driver level,
1757 		 * so return NULL.
1758 		 */
1759 		if (!mpi_request->DevHandle)
1760 			return scmd;
1761 
1762 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1763 		if (scmd) {
1764 			st = scsi_cmd_priv(scmd);
1765 			if (st->cb_idx == 0xFF || st->smid == 0)
1766 				scmd = NULL;
1767 		}
1768 	}
1769 	return scmd;
1770 }
1771 
1772 /**
1773  * scsih_change_queue_depth - setting device queue depth
1774  * @sdev: scsi device struct
1775  * @qdepth: requested queue depth
1776  *
1777  * Return: queue depth.
1778  */
1779 static int
1780 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1781 {
1782 	struct Scsi_Host *shost = sdev->host;
1783 	int max_depth;
1784 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1785 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1786 	struct MPT3SAS_TARGET *sas_target_priv_data;
1787 	struct _sas_device *sas_device;
1788 	unsigned long flags;
1789 
1790 	max_depth = shost->can_queue;
1791 
1792 	/*
1793 	 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1794 	 * is disabled.
1795 	 */
1796 	if (ioc->enable_sdev_max_qd)
1797 		goto not_sata;
1798 
1799 	sas_device_priv_data = sdev->hostdata;
1800 	if (!sas_device_priv_data)
1801 		goto not_sata;
1802 	sas_target_priv_data = sas_device_priv_data->sas_target;
1803 	if (!sas_target_priv_data)
1804 		goto not_sata;
1805 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1806 		goto not_sata;
1807 
1808 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1809 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1810 	if (sas_device) {
1811 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1812 			max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1813 
1814 		sas_device_put(sas_device);
1815 	}
1816 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1817 
1818  not_sata:
1819 
1820 	if (!sdev->tagged_supported)
1821 		max_depth = 1;
1822 	if (qdepth > max_depth)
1823 		qdepth = max_depth;
1824 	scsi_change_queue_depth(sdev, qdepth);
1825 	sdev_printk(KERN_INFO, sdev,
1826 	    "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1827 	    sdev->queue_depth, sdev->tagged_supported,
1828 	    sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1829 	return sdev->queue_depth;
1830 }
1831 
1832 /**
1833  * mpt3sas_scsih_change_queue_depth - setting device queue depth
1834  * @sdev: scsi device struct
1835  * @qdepth: requested queue depth
1836  *
1837  * Returns nothing.
1838  */
1839 void
1840 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1841 {
1842 	struct Scsi_Host *shost = sdev->host;
1843 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1844 
1845 	if (ioc->enable_sdev_max_qd)
1846 		qdepth = shost->can_queue;
1847 
1848 	scsih_change_queue_depth(sdev, qdepth);
1849 }
1850 
1851 /**
1852  * scsih_target_alloc - target add routine
1853  * @starget: scsi target struct
1854  *
1855  * Return: 0 if ok. Any other return is assumed to be an error and
1856  * the device is ignored.
1857  */
1858 static int
1859 scsih_target_alloc(struct scsi_target *starget)
1860 {
1861 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1862 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1863 	struct MPT3SAS_TARGET *sas_target_priv_data;
1864 	struct _sas_device *sas_device;
1865 	struct _raid_device *raid_device;
1866 	struct _pcie_device *pcie_device;
1867 	unsigned long flags;
1868 	struct sas_rphy *rphy;
1869 
1870 	sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1871 				       GFP_KERNEL);
1872 	if (!sas_target_priv_data)
1873 		return -ENOMEM;
1874 
1875 	starget->hostdata = sas_target_priv_data;
1876 	sas_target_priv_data->starget = starget;
1877 	sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1878 
1879 	/* RAID volumes */
1880 	if (starget->channel == RAID_CHANNEL) {
1881 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1882 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1883 		    starget->channel);
1884 		if (raid_device) {
1885 			sas_target_priv_data->handle = raid_device->handle;
1886 			sas_target_priv_data->sas_address = raid_device->wwid;
1887 			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1888 			if (ioc->is_warpdrive)
1889 				sas_target_priv_data->raid_device = raid_device;
1890 			raid_device->starget = starget;
1891 		}
1892 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1893 		return 0;
1894 	}
1895 
1896 	/* PCIe devices */
1897 	if (starget->channel == PCIE_CHANNEL) {
1898 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1899 		pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1900 			starget->channel);
1901 		if (pcie_device) {
1902 			sas_target_priv_data->handle = pcie_device->handle;
1903 			sas_target_priv_data->sas_address = pcie_device->wwid;
1904 			sas_target_priv_data->port = NULL;
1905 			sas_target_priv_data->pcie_dev = pcie_device;
1906 			pcie_device->starget = starget;
1907 			pcie_device->id = starget->id;
1908 			pcie_device->channel = starget->channel;
1909 			sas_target_priv_data->flags |=
1910 				MPT_TARGET_FLAGS_PCIE_DEVICE;
1911 			if (pcie_device->fast_path)
1912 				sas_target_priv_data->flags |=
1913 					MPT_TARGET_FASTPATH_IO;
1914 		}
1915 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1916 		return 0;
1917 	}
1918 
1919 	/* sas/sata devices */
1920 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1921 	rphy = dev_to_rphy(starget->dev.parent);
1922 	sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1923 
1924 	if (sas_device) {
1925 		sas_target_priv_data->handle = sas_device->handle;
1926 		sas_target_priv_data->sas_address = sas_device->sas_address;
1927 		sas_target_priv_data->port = sas_device->port;
1928 		sas_target_priv_data->sas_dev = sas_device;
1929 		sas_device->starget = starget;
1930 		sas_device->id = starget->id;
1931 		sas_device->channel = starget->channel;
1932 		if (test_bit(sas_device->handle, ioc->pd_handles))
1933 			sas_target_priv_data->flags |=
1934 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
1935 		if (sas_device->fast_path)
1936 			sas_target_priv_data->flags |=
1937 					MPT_TARGET_FASTPATH_IO;
1938 	}
1939 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1940 
1941 	return 0;
1942 }
1943 
1944 /**
1945  * scsih_target_destroy - target destroy routine
1946  * @starget: scsi target struct
1947  */
1948 static void
1949 scsih_target_destroy(struct scsi_target *starget)
1950 {
1951 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1952 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1953 	struct MPT3SAS_TARGET *sas_target_priv_data;
1954 	struct _sas_device *sas_device;
1955 	struct _raid_device *raid_device;
1956 	struct _pcie_device *pcie_device;
1957 	unsigned long flags;
1958 
1959 	sas_target_priv_data = starget->hostdata;
1960 	if (!sas_target_priv_data)
1961 		return;
1962 
1963 	if (starget->channel == RAID_CHANNEL) {
1964 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1965 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1966 		    starget->channel);
1967 		if (raid_device) {
1968 			raid_device->starget = NULL;
1969 			raid_device->sdev = NULL;
1970 		}
1971 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1972 		goto out;
1973 	}
1974 
1975 	if (starget->channel == PCIE_CHANNEL) {
1976 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1977 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1978 							sas_target_priv_data);
1979 		if (pcie_device && (pcie_device->starget == starget) &&
1980 			(pcie_device->id == starget->id) &&
1981 			(pcie_device->channel == starget->channel))
1982 			pcie_device->starget = NULL;
1983 
1984 		if (pcie_device) {
1985 			/*
1986 			 * Corresponding get() is in _scsih_target_alloc()
1987 			 */
1988 			sas_target_priv_data->pcie_dev = NULL;
1989 			pcie_device_put(pcie_device);
1990 			pcie_device_put(pcie_device);
1991 		}
1992 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1993 		goto out;
1994 	}
1995 
1996 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1997 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1998 	if (sas_device && (sas_device->starget == starget) &&
1999 	    (sas_device->id == starget->id) &&
2000 	    (sas_device->channel == starget->channel))
2001 		sas_device->starget = NULL;
2002 
2003 	if (sas_device) {
2004 		/*
2005 		 * Corresponding get() is in _scsih_target_alloc()
2006 		 */
2007 		sas_target_priv_data->sas_dev = NULL;
2008 		sas_device_put(sas_device);
2009 
2010 		sas_device_put(sas_device);
2011 	}
2012 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2013 
2014  out:
2015 	kfree(sas_target_priv_data);
2016 	starget->hostdata = NULL;
2017 }
2018 
2019 /**
2020  * scsih_slave_alloc - device add routine
2021  * @sdev: scsi device struct
2022  *
2023  * Return: 0 if ok. Any other return is assumed to be an error and
2024  * the device is ignored.
2025  */
2026 static int
2027 scsih_slave_alloc(struct scsi_device *sdev)
2028 {
2029 	struct Scsi_Host *shost;
2030 	struct MPT3SAS_ADAPTER *ioc;
2031 	struct MPT3SAS_TARGET *sas_target_priv_data;
2032 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2033 	struct scsi_target *starget;
2034 	struct _raid_device *raid_device;
2035 	struct _sas_device *sas_device;
2036 	struct _pcie_device *pcie_device;
2037 	unsigned long flags;
2038 
2039 	sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2040 				       GFP_KERNEL);
2041 	if (!sas_device_priv_data)
2042 		return -ENOMEM;
2043 
2044 	sas_device_priv_data->lun = sdev->lun;
2045 	sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2046 
2047 	starget = scsi_target(sdev);
2048 	sas_target_priv_data = starget->hostdata;
2049 	sas_target_priv_data->num_luns++;
2050 	sas_device_priv_data->sas_target = sas_target_priv_data;
2051 	sdev->hostdata = sas_device_priv_data;
2052 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2053 		sdev->no_uld_attach = 1;
2054 
2055 	shost = dev_to_shost(&starget->dev);
2056 	ioc = shost_priv(shost);
2057 	if (starget->channel == RAID_CHANNEL) {
2058 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2059 		raid_device = _scsih_raid_device_find_by_id(ioc,
2060 		    starget->id, starget->channel);
2061 		if (raid_device)
2062 			raid_device->sdev = sdev; /* raid is single lun */
2063 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2064 	}
2065 	if (starget->channel == PCIE_CHANNEL) {
2066 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2067 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2068 				sas_target_priv_data->sas_address);
2069 		if (pcie_device && (pcie_device->starget == NULL)) {
2070 			sdev_printk(KERN_INFO, sdev,
2071 			    "%s : pcie_device->starget set to starget @ %d\n",
2072 			    __func__, __LINE__);
2073 			pcie_device->starget = starget;
2074 		}
2075 
2076 		if (pcie_device)
2077 			pcie_device_put(pcie_device);
2078 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2079 
2080 	} else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2081 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2082 		sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2083 		    sas_target_priv_data->sas_address,
2084 		    sas_target_priv_data->port);
2085 		if (sas_device && (sas_device->starget == NULL)) {
2086 			sdev_printk(KERN_INFO, sdev,
2087 			"%s : sas_device->starget set to starget @ %d\n",
2088 			     __func__, __LINE__);
2089 			sas_device->starget = starget;
2090 		}
2091 
2092 		if (sas_device)
2093 			sas_device_put(sas_device);
2094 
2095 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2096 	}
2097 
2098 	return 0;
2099 }
2100 
2101 /**
2102  * scsih_slave_destroy - device destroy routine
2103  * @sdev: scsi device struct
2104  */
2105 static void
2106 scsih_slave_destroy(struct scsi_device *sdev)
2107 {
2108 	struct MPT3SAS_TARGET *sas_target_priv_data;
2109 	struct scsi_target *starget;
2110 	struct Scsi_Host *shost;
2111 	struct MPT3SAS_ADAPTER *ioc;
2112 	struct _sas_device *sas_device;
2113 	struct _pcie_device *pcie_device;
2114 	unsigned long flags;
2115 
2116 	if (!sdev->hostdata)
2117 		return;
2118 
2119 	starget = scsi_target(sdev);
2120 	sas_target_priv_data = starget->hostdata;
2121 	sas_target_priv_data->num_luns--;
2122 
2123 	shost = dev_to_shost(&starget->dev);
2124 	ioc = shost_priv(shost);
2125 
2126 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2127 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2128 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2129 				sas_target_priv_data);
2130 		if (pcie_device && !sas_target_priv_data->num_luns)
2131 			pcie_device->starget = NULL;
2132 
2133 		if (pcie_device)
2134 			pcie_device_put(pcie_device);
2135 
2136 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2137 
2138 	} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2139 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2140 		sas_device = __mpt3sas_get_sdev_from_target(ioc,
2141 				sas_target_priv_data);
2142 		if (sas_device && !sas_target_priv_data->num_luns)
2143 			sas_device->starget = NULL;
2144 
2145 		if (sas_device)
2146 			sas_device_put(sas_device);
2147 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2148 	}
2149 
2150 	kfree(sdev->hostdata);
2151 	sdev->hostdata = NULL;
2152 }
2153 
2154 /**
2155  * _scsih_display_sata_capabilities - sata capabilities
2156  * @ioc: per adapter object
2157  * @handle: device handle
2158  * @sdev: scsi device struct
2159  */
2160 static void
2161 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2162 	u16 handle, struct scsi_device *sdev)
2163 {
2164 	Mpi2ConfigReply_t mpi_reply;
2165 	Mpi2SasDevicePage0_t sas_device_pg0;
2166 	u32 ioc_status;
2167 	u16 flags;
2168 	u32 device_info;
2169 
2170 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2171 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2172 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2173 			__FILE__, __LINE__, __func__);
2174 		return;
2175 	}
2176 
2177 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2178 	    MPI2_IOCSTATUS_MASK;
2179 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2180 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2181 			__FILE__, __LINE__, __func__);
2182 		return;
2183 	}
2184 
2185 	flags = le16_to_cpu(sas_device_pg0.Flags);
2186 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2187 
2188 	sdev_printk(KERN_INFO, sdev,
2189 	    "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2190 	    "sw_preserve(%s)\n",
2191 	    (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2192 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2193 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2194 	    "n",
2195 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2196 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2197 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2198 }
2199 
2200 /*
2201  * raid transport support -
2202  * Enabled for SLES11 and newer, in older kernels the driver will panic when
2203  * unloading the driver followed by a load - I believe that the subroutine
2204  * raid_class_release() is not cleaning up properly.
2205  */
2206 
2207 /**
2208  * scsih_is_raid - return boolean indicating device is raid volume
2209  * @dev: the device struct object
2210  */
2211 static int
2212 scsih_is_raid(struct device *dev)
2213 {
2214 	struct scsi_device *sdev = to_scsi_device(dev);
2215 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2216 
2217 	if (ioc->is_warpdrive)
2218 		return 0;
2219 	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2220 }
2221 
2222 static int
2223 scsih_is_nvme(struct device *dev)
2224 {
2225 	struct scsi_device *sdev = to_scsi_device(dev);
2226 
2227 	return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2228 }
2229 
2230 /**
2231  * scsih_get_resync - get raid volume resync percent complete
2232  * @dev: the device struct object
2233  */
2234 static void
2235 scsih_get_resync(struct device *dev)
2236 {
2237 	struct scsi_device *sdev = to_scsi_device(dev);
2238 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2239 	static struct _raid_device *raid_device;
2240 	unsigned long flags;
2241 	Mpi2RaidVolPage0_t vol_pg0;
2242 	Mpi2ConfigReply_t mpi_reply;
2243 	u32 volume_status_flags;
2244 	u8 percent_complete;
2245 	u16 handle;
2246 
2247 	percent_complete = 0;
2248 	handle = 0;
2249 	if (ioc->is_warpdrive)
2250 		goto out;
2251 
2252 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2253 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2254 	    sdev->channel);
2255 	if (raid_device) {
2256 		handle = raid_device->handle;
2257 		percent_complete = raid_device->percent_complete;
2258 	}
2259 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2260 
2261 	if (!handle)
2262 		goto out;
2263 
2264 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2265 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2266 	     sizeof(Mpi2RaidVolPage0_t))) {
2267 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2268 			__FILE__, __LINE__, __func__);
2269 		percent_complete = 0;
2270 		goto out;
2271 	}
2272 
2273 	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2274 	if (!(volume_status_flags &
2275 	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2276 		percent_complete = 0;
2277 
2278  out:
2279 
2280 	switch (ioc->hba_mpi_version_belonged) {
2281 	case MPI2_VERSION:
2282 		raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2283 		break;
2284 	case MPI25_VERSION:
2285 	case MPI26_VERSION:
2286 		raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2287 		break;
2288 	}
2289 }
2290 
2291 /**
2292  * scsih_get_state - get raid volume level
2293  * @dev: the device struct object
2294  */
2295 static void
2296 scsih_get_state(struct device *dev)
2297 {
2298 	struct scsi_device *sdev = to_scsi_device(dev);
2299 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2300 	static struct _raid_device *raid_device;
2301 	unsigned long flags;
2302 	Mpi2RaidVolPage0_t vol_pg0;
2303 	Mpi2ConfigReply_t mpi_reply;
2304 	u32 volstate;
2305 	enum raid_state state = RAID_STATE_UNKNOWN;
2306 	u16 handle = 0;
2307 
2308 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2309 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2310 	    sdev->channel);
2311 	if (raid_device)
2312 		handle = raid_device->handle;
2313 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2314 
2315 	if (!raid_device)
2316 		goto out;
2317 
2318 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2319 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2320 	     sizeof(Mpi2RaidVolPage0_t))) {
2321 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2322 			__FILE__, __LINE__, __func__);
2323 		goto out;
2324 	}
2325 
2326 	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2327 	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2328 		state = RAID_STATE_RESYNCING;
2329 		goto out;
2330 	}
2331 
2332 	switch (vol_pg0.VolumeState) {
2333 	case MPI2_RAID_VOL_STATE_OPTIMAL:
2334 	case MPI2_RAID_VOL_STATE_ONLINE:
2335 		state = RAID_STATE_ACTIVE;
2336 		break;
2337 	case  MPI2_RAID_VOL_STATE_DEGRADED:
2338 		state = RAID_STATE_DEGRADED;
2339 		break;
2340 	case MPI2_RAID_VOL_STATE_FAILED:
2341 	case MPI2_RAID_VOL_STATE_MISSING:
2342 		state = RAID_STATE_OFFLINE;
2343 		break;
2344 	}
2345  out:
2346 	switch (ioc->hba_mpi_version_belonged) {
2347 	case MPI2_VERSION:
2348 		raid_set_state(mpt2sas_raid_template, dev, state);
2349 		break;
2350 	case MPI25_VERSION:
2351 	case MPI26_VERSION:
2352 		raid_set_state(mpt3sas_raid_template, dev, state);
2353 		break;
2354 	}
2355 }
2356 
2357 /**
2358  * _scsih_set_level - set raid level
2359  * @ioc: ?
2360  * @sdev: scsi device struct
2361  * @volume_type: volume type
2362  */
2363 static void
2364 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2365 	struct scsi_device *sdev, u8 volume_type)
2366 {
2367 	enum raid_level level = RAID_LEVEL_UNKNOWN;
2368 
2369 	switch (volume_type) {
2370 	case MPI2_RAID_VOL_TYPE_RAID0:
2371 		level = RAID_LEVEL_0;
2372 		break;
2373 	case MPI2_RAID_VOL_TYPE_RAID10:
2374 		level = RAID_LEVEL_10;
2375 		break;
2376 	case MPI2_RAID_VOL_TYPE_RAID1E:
2377 		level = RAID_LEVEL_1E;
2378 		break;
2379 	case MPI2_RAID_VOL_TYPE_RAID1:
2380 		level = RAID_LEVEL_1;
2381 		break;
2382 	}
2383 
2384 	switch (ioc->hba_mpi_version_belonged) {
2385 	case MPI2_VERSION:
2386 		raid_set_level(mpt2sas_raid_template,
2387 			&sdev->sdev_gendev, level);
2388 		break;
2389 	case MPI25_VERSION:
2390 	case MPI26_VERSION:
2391 		raid_set_level(mpt3sas_raid_template,
2392 			&sdev->sdev_gendev, level);
2393 		break;
2394 	}
2395 }
2396 
2397 
2398 /**
2399  * _scsih_get_volume_capabilities - volume capabilities
2400  * @ioc: per adapter object
2401  * @raid_device: the raid_device object
2402  *
2403  * Return: 0 for success, else 1
2404  */
2405 static int
2406 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2407 	struct _raid_device *raid_device)
2408 {
2409 	Mpi2RaidVolPage0_t *vol_pg0;
2410 	Mpi2RaidPhysDiskPage0_t pd_pg0;
2411 	Mpi2SasDevicePage0_t sas_device_pg0;
2412 	Mpi2ConfigReply_t mpi_reply;
2413 	u16 sz;
2414 	u8 num_pds;
2415 
2416 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2417 	    &num_pds)) || !num_pds) {
2418 		dfailprintk(ioc,
2419 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2420 				     __FILE__, __LINE__, __func__));
2421 		return 1;
2422 	}
2423 
2424 	raid_device->num_pds = num_pds;
2425 	sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2426 	    sizeof(Mpi2RaidVol0PhysDisk_t));
2427 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
2428 	if (!vol_pg0) {
2429 		dfailprintk(ioc,
2430 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2431 				     __FILE__, __LINE__, __func__));
2432 		return 1;
2433 	}
2434 
2435 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2436 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2437 		dfailprintk(ioc,
2438 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2439 				     __FILE__, __LINE__, __func__));
2440 		kfree(vol_pg0);
2441 		return 1;
2442 	}
2443 
2444 	raid_device->volume_type = vol_pg0->VolumeType;
2445 
2446 	/* figure out what the underlying devices are by
2447 	 * obtaining the device_info bits for the 1st device
2448 	 */
2449 	if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2450 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2451 	    vol_pg0->PhysDisk[0].PhysDiskNum))) {
2452 		if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2453 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2454 		    le16_to_cpu(pd_pg0.DevHandle)))) {
2455 			raid_device->device_info =
2456 			    le32_to_cpu(sas_device_pg0.DeviceInfo);
2457 		}
2458 	}
2459 
2460 	kfree(vol_pg0);
2461 	return 0;
2462 }
2463 
2464 /**
2465  * _scsih_enable_tlr - setting TLR flags
2466  * @ioc: per adapter object
2467  * @sdev: scsi device struct
2468  *
2469  * Enabling Transaction Layer Retries for tape devices when
2470  * vpd page 0x90 is present
2471  *
2472  */
2473 static void
2474 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2475 {
2476 
2477 	/* only for TAPE */
2478 	if (sdev->type != TYPE_TAPE)
2479 		return;
2480 
2481 	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2482 		return;
2483 
2484 	sas_enable_tlr(sdev);
2485 	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2486 	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2487 	return;
2488 
2489 }
2490 
2491 /**
2492  * scsih_slave_configure - device configure routine.
2493  * @sdev: scsi device struct
2494  *
2495  * Return: 0 if ok. Any other return is assumed to be an error and
2496  * the device is ignored.
2497  */
2498 static int
2499 scsih_slave_configure(struct scsi_device *sdev)
2500 {
2501 	struct Scsi_Host *shost = sdev->host;
2502 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2503 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2504 	struct MPT3SAS_TARGET *sas_target_priv_data;
2505 	struct _sas_device *sas_device;
2506 	struct _pcie_device *pcie_device;
2507 	struct _raid_device *raid_device;
2508 	unsigned long flags;
2509 	int qdepth;
2510 	u8 ssp_target = 0;
2511 	char *ds = "";
2512 	char *r_level = "";
2513 	u16 handle, volume_handle = 0;
2514 	u64 volume_wwid = 0;
2515 
2516 	qdepth = 1;
2517 	sas_device_priv_data = sdev->hostdata;
2518 	sas_device_priv_data->configured_lun = 1;
2519 	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2520 	sas_target_priv_data = sas_device_priv_data->sas_target;
2521 	handle = sas_target_priv_data->handle;
2522 
2523 	/* raid volume handling */
2524 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2525 
2526 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2527 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2528 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2529 		if (!raid_device) {
2530 			dfailprintk(ioc,
2531 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2532 					     __FILE__, __LINE__, __func__));
2533 			return 1;
2534 		}
2535 
2536 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2537 			dfailprintk(ioc,
2538 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2539 					     __FILE__, __LINE__, __func__));
2540 			return 1;
2541 		}
2542 
2543 		/*
2544 		 * WARPDRIVE: Initialize the required data for Direct IO
2545 		 */
2546 		mpt3sas_init_warpdrive_properties(ioc, raid_device);
2547 
2548 		/* RAID Queue Depth Support
2549 		 * IS volume = underlying qdepth of drive type, either
2550 		 *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2551 		 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2552 		 */
2553 		if (raid_device->device_info &
2554 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2555 			qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2556 			ds = "SSP";
2557 		} else {
2558 			qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2559 			if (raid_device->device_info &
2560 			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2561 				ds = "SATA";
2562 			else
2563 				ds = "STP";
2564 		}
2565 
2566 		switch (raid_device->volume_type) {
2567 		case MPI2_RAID_VOL_TYPE_RAID0:
2568 			r_level = "RAID0";
2569 			break;
2570 		case MPI2_RAID_VOL_TYPE_RAID1E:
2571 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2572 			if (ioc->manu_pg10.OEMIdentifier &&
2573 			    (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2574 			    MFG10_GF0_R10_DISPLAY) &&
2575 			    !(raid_device->num_pds % 2))
2576 				r_level = "RAID10";
2577 			else
2578 				r_level = "RAID1E";
2579 			break;
2580 		case MPI2_RAID_VOL_TYPE_RAID1:
2581 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2582 			r_level = "RAID1";
2583 			break;
2584 		case MPI2_RAID_VOL_TYPE_RAID10:
2585 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2586 			r_level = "RAID10";
2587 			break;
2588 		case MPI2_RAID_VOL_TYPE_UNKNOWN:
2589 		default:
2590 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2591 			r_level = "RAIDX";
2592 			break;
2593 		}
2594 
2595 		if (!ioc->hide_ir_msg)
2596 			sdev_printk(KERN_INFO, sdev,
2597 			   "%s: handle(0x%04x), wwid(0x%016llx),"
2598 			    " pd_count(%d), type(%s)\n",
2599 			    r_level, raid_device->handle,
2600 			    (unsigned long long)raid_device->wwid,
2601 			    raid_device->num_pds, ds);
2602 
2603 		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2604 			blk_queue_max_hw_sectors(sdev->request_queue,
2605 						MPT3SAS_RAID_MAX_SECTORS);
2606 			sdev_printk(KERN_INFO, sdev,
2607 					"Set queue's max_sector to: %u\n",
2608 						MPT3SAS_RAID_MAX_SECTORS);
2609 		}
2610 
2611 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2612 
2613 		/* raid transport support */
2614 		if (!ioc->is_warpdrive)
2615 			_scsih_set_level(ioc, sdev, raid_device->volume_type);
2616 		return 0;
2617 	}
2618 
2619 	/* non-raid handling */
2620 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2621 		if (mpt3sas_config_get_volume_handle(ioc, handle,
2622 		    &volume_handle)) {
2623 			dfailprintk(ioc,
2624 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2625 					     __FILE__, __LINE__, __func__));
2626 			return 1;
2627 		}
2628 		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2629 		    volume_handle, &volume_wwid)) {
2630 			dfailprintk(ioc,
2631 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2632 					     __FILE__, __LINE__, __func__));
2633 			return 1;
2634 		}
2635 	}
2636 
2637 	/* PCIe handling */
2638 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2639 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2640 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2641 				sas_device_priv_data->sas_target->sas_address);
2642 		if (!pcie_device) {
2643 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2644 			dfailprintk(ioc,
2645 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2646 					     __FILE__, __LINE__, __func__));
2647 			return 1;
2648 		}
2649 
2650 		qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2651 		ds = "NVMe";
2652 		sdev_printk(KERN_INFO, sdev,
2653 			"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2654 			ds, handle, (unsigned long long)pcie_device->wwid,
2655 			pcie_device->port_num);
2656 		if (pcie_device->enclosure_handle != 0)
2657 			sdev_printk(KERN_INFO, sdev,
2658 			"%s: enclosure logical id(0x%016llx), slot(%d)\n",
2659 			ds,
2660 			(unsigned long long)pcie_device->enclosure_logical_id,
2661 			pcie_device->slot);
2662 		if (pcie_device->connector_name[0] != '\0')
2663 			sdev_printk(KERN_INFO, sdev,
2664 				"%s: enclosure level(0x%04x),"
2665 				"connector name( %s)\n", ds,
2666 				pcie_device->enclosure_level,
2667 				pcie_device->connector_name);
2668 
2669 		if (pcie_device->nvme_mdts)
2670 			blk_queue_max_hw_sectors(sdev->request_queue,
2671 					pcie_device->nvme_mdts/512);
2672 
2673 		pcie_device_put(pcie_device);
2674 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2675 		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2676 		/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2677 		 ** merged and can eliminate holes created during merging
2678 		 ** operation.
2679 		 **/
2680 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2681 				sdev->request_queue);
2682 		blk_queue_virt_boundary(sdev->request_queue,
2683 				ioc->page_size - 1);
2684 		return 0;
2685 	}
2686 
2687 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2688 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2689 	   sas_device_priv_data->sas_target->sas_address,
2690 	   sas_device_priv_data->sas_target->port);
2691 	if (!sas_device) {
2692 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2693 		dfailprintk(ioc,
2694 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2695 				     __FILE__, __LINE__, __func__));
2696 		return 1;
2697 	}
2698 
2699 	sas_device->volume_handle = volume_handle;
2700 	sas_device->volume_wwid = volume_wwid;
2701 	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2702 		qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2703 		ssp_target = 1;
2704 		if (sas_device->device_info &
2705 				MPI2_SAS_DEVICE_INFO_SEP) {
2706 			sdev_printk(KERN_WARNING, sdev,
2707 			"set ignore_delay_remove for handle(0x%04x)\n",
2708 			sas_device_priv_data->sas_target->handle);
2709 			sas_device_priv_data->ignore_delay_remove = 1;
2710 			ds = "SES";
2711 		} else
2712 			ds = "SSP";
2713 	} else {
2714 		qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2715 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2716 			ds = "STP";
2717 		else if (sas_device->device_info &
2718 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2719 			ds = "SATA";
2720 	}
2721 
2722 	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2723 	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2724 	    ds, handle, (unsigned long long)sas_device->sas_address,
2725 	    sas_device->phy, (unsigned long long)sas_device->device_name);
2726 
2727 	_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2728 
2729 	sas_device_put(sas_device);
2730 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2731 
2732 	if (!ssp_target)
2733 		_scsih_display_sata_capabilities(ioc, handle, sdev);
2734 
2735 
2736 	mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2737 
2738 	if (ssp_target) {
2739 		sas_read_port_mode_page(sdev);
2740 		_scsih_enable_tlr(ioc, sdev);
2741 	}
2742 
2743 	return 0;
2744 }
2745 
2746 /**
2747  * scsih_bios_param - fetch head, sector, cylinder info for a disk
2748  * @sdev: scsi device struct
2749  * @bdev: pointer to block device context
2750  * @capacity: device size (in 512 byte sectors)
2751  * @params: three element array to place output:
2752  *              params[0] number of heads (max 255)
2753  *              params[1] number of sectors (max 63)
2754  *              params[2] number of cylinders
2755  */
2756 static int
2757 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2758 	sector_t capacity, int params[])
2759 {
2760 	int		heads;
2761 	int		sectors;
2762 	sector_t	cylinders;
2763 	ulong		dummy;
2764 
2765 	heads = 64;
2766 	sectors = 32;
2767 
2768 	dummy = heads * sectors;
2769 	cylinders = capacity;
2770 	sector_div(cylinders, dummy);
2771 
2772 	/*
2773 	 * Handle extended translation size for logical drives
2774 	 * > 1Gb
2775 	 */
2776 	if ((ulong)capacity >= 0x200000) {
2777 		heads = 255;
2778 		sectors = 63;
2779 		dummy = heads * sectors;
2780 		cylinders = capacity;
2781 		sector_div(cylinders, dummy);
2782 	}
2783 
2784 	/* return result */
2785 	params[0] = heads;
2786 	params[1] = sectors;
2787 	params[2] = cylinders;
2788 
2789 	return 0;
2790 }
2791 
2792 /**
2793  * _scsih_response_code - translation of device response code
2794  * @ioc: per adapter object
2795  * @response_code: response code returned by the device
2796  */
2797 static void
2798 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2799 {
2800 	char *desc;
2801 
2802 	switch (response_code) {
2803 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2804 		desc = "task management request completed";
2805 		break;
2806 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2807 		desc = "invalid frame";
2808 		break;
2809 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2810 		desc = "task management request not supported";
2811 		break;
2812 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2813 		desc = "task management request failed";
2814 		break;
2815 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2816 		desc = "task management request succeeded";
2817 		break;
2818 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2819 		desc = "invalid lun";
2820 		break;
2821 	case 0xA:
2822 		desc = "overlapped tag attempted";
2823 		break;
2824 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2825 		desc = "task queued, however not sent to target";
2826 		break;
2827 	default:
2828 		desc = "unknown";
2829 		break;
2830 	}
2831 	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2832 }
2833 
2834 /**
2835  * _scsih_tm_done - tm completion routine
2836  * @ioc: per adapter object
2837  * @smid: system request message index
2838  * @msix_index: MSIX table index supplied by the OS
2839  * @reply: reply message frame(lower 32bit addr)
2840  * Context: none.
2841  *
2842  * The callback handler when using scsih_issue_tm.
2843  *
2844  * Return: 1 meaning mf should be freed from _base_interrupt
2845  *         0 means the mf is freed from this function.
2846  */
2847 static u8
2848 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2849 {
2850 	MPI2DefaultReply_t *mpi_reply;
2851 
2852 	if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2853 		return 1;
2854 	if (ioc->tm_cmds.smid != smid)
2855 		return 1;
2856 	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2857 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2858 	if (mpi_reply) {
2859 		memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2860 		ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2861 	}
2862 	ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2863 	complete(&ioc->tm_cmds.done);
2864 	return 1;
2865 }
2866 
2867 /**
2868  * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2869  * @ioc: per adapter object
2870  * @handle: device handle
2871  *
2872  * During taskmangement request, we need to freeze the device queue.
2873  */
2874 void
2875 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2876 {
2877 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2878 	struct scsi_device *sdev;
2879 	u8 skip = 0;
2880 
2881 	shost_for_each_device(sdev, ioc->shost) {
2882 		if (skip)
2883 			continue;
2884 		sas_device_priv_data = sdev->hostdata;
2885 		if (!sas_device_priv_data)
2886 			continue;
2887 		if (sas_device_priv_data->sas_target->handle == handle) {
2888 			sas_device_priv_data->sas_target->tm_busy = 1;
2889 			skip = 1;
2890 			ioc->ignore_loginfos = 1;
2891 		}
2892 	}
2893 }
2894 
2895 /**
2896  * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2897  * @ioc: per adapter object
2898  * @handle: device handle
2899  *
2900  * During taskmangement request, we need to freeze the device queue.
2901  */
2902 void
2903 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2904 {
2905 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2906 	struct scsi_device *sdev;
2907 	u8 skip = 0;
2908 
2909 	shost_for_each_device(sdev, ioc->shost) {
2910 		if (skip)
2911 			continue;
2912 		sas_device_priv_data = sdev->hostdata;
2913 		if (!sas_device_priv_data)
2914 			continue;
2915 		if (sas_device_priv_data->sas_target->handle == handle) {
2916 			sas_device_priv_data->sas_target->tm_busy = 0;
2917 			skip = 1;
2918 			ioc->ignore_loginfos = 0;
2919 		}
2920 	}
2921 }
2922 
2923 /**
2924  * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2925  * @ioc: per adapter object
2926  * @channel: the channel assigned by the OS
2927  * @id: the id assigned by the OS
2928  * @lun: lun number
2929  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2930  * @smid_task: smid assigned to the task
2931  *
2932  * Look whether TM has aborted the timed out SCSI command, if
2933  * TM has aborted the IO then return SUCCESS else return FAILED.
2934  */
2935 static int
2936 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2937 	uint id, uint lun, u8 type, u16 smid_task)
2938 {
2939 
2940 	if (smid_task <= ioc->shost->can_queue) {
2941 		switch (type) {
2942 		case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2943 			if (!(_scsih_scsi_lookup_find_by_target(ioc,
2944 			    id, channel)))
2945 				return SUCCESS;
2946 			break;
2947 		case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2948 		case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2949 			if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2950 			    lun, channel)))
2951 				return SUCCESS;
2952 			break;
2953 		default:
2954 			return SUCCESS;
2955 		}
2956 	} else if (smid_task == ioc->scsih_cmds.smid) {
2957 		if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2958 		    (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2959 			return SUCCESS;
2960 	} else if (smid_task == ioc->ctl_cmds.smid) {
2961 		if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2962 		    (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2963 			return SUCCESS;
2964 	}
2965 
2966 	return FAILED;
2967 }
2968 
2969 /**
2970  * scsih_tm_post_processing - post processing of target & LUN reset
2971  * @ioc: per adapter object
2972  * @handle: device handle
2973  * @channel: the channel assigned by the OS
2974  * @id: the id assigned by the OS
2975  * @lun: lun number
2976  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2977  * @smid_task: smid assigned to the task
2978  *
2979  * Post processing of target & LUN reset. Due to interrupt latency
2980  * issue it possible that interrupt for aborted IO might not be
2981  * received yet. So before returning failure status, poll the
2982  * reply descriptor pools for the reply of timed out SCSI command.
2983  * Return FAILED status if reply for timed out is not received
2984  * otherwise return SUCCESS.
2985  */
2986 static int
2987 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2988 	uint channel, uint id, uint lun, u8 type, u16 smid_task)
2989 {
2990 	int rc;
2991 
2992 	rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
2993 	if (rc == SUCCESS)
2994 		return rc;
2995 
2996 	ioc_info(ioc,
2997 	    "Poll ReplyDescriptor queues for completion of"
2998 	    " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
2999 	    smid_task, type, handle);
3000 
3001 	/*
3002 	 * Due to interrupt latency issues, driver may receive interrupt for
3003 	 * TM first and then for aborted SCSI IO command. So, poll all the
3004 	 * ReplyDescriptor pools before returning the FAILED status to SML.
3005 	 */
3006 	mpt3sas_base_mask_interrupts(ioc);
3007 	mpt3sas_base_sync_reply_irqs(ioc, 1);
3008 	mpt3sas_base_unmask_interrupts(ioc);
3009 
3010 	return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3011 }
3012 
3013 /**
3014  * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3015  * @ioc: per adapter struct
3016  * @handle: device handle
3017  * @channel: the channel assigned by the OS
3018  * @id: the id assigned by the OS
3019  * @lun: lun number
3020  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3021  * @smid_task: smid assigned to the task
3022  * @msix_task: MSIX table index supplied by the OS
3023  * @timeout: timeout in seconds
3024  * @tr_method: Target Reset Method
3025  * Context: user
3026  *
3027  * A generic API for sending task management requests to firmware.
3028  *
3029  * The callback index is set inside `ioc->tm_cb_idx`.
3030  * The caller is responsible to check for outstanding commands.
3031  *
3032  * Return: SUCCESS or FAILED.
3033  */
3034 int
3035 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3036 	uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3037 	u8 timeout, u8 tr_method)
3038 {
3039 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3040 	Mpi2SCSITaskManagementReply_t *mpi_reply;
3041 	Mpi25SCSIIORequest_t *request;
3042 	u16 smid = 0;
3043 	u32 ioc_state;
3044 	int rc;
3045 	u8 issue_reset = 0;
3046 
3047 	lockdep_assert_held(&ioc->tm_cmds.mutex);
3048 
3049 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3050 		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3051 		return FAILED;
3052 	}
3053 
3054 	if (ioc->shost_recovery || ioc->remove_host ||
3055 	    ioc->pci_error_recovery) {
3056 		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3057 		return FAILED;
3058 	}
3059 
3060 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3061 	if (ioc_state & MPI2_DOORBELL_USED) {
3062 		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3063 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3064 		return (!rc) ? SUCCESS : FAILED;
3065 	}
3066 
3067 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3068 		mpt3sas_print_fault_code(ioc, ioc_state &
3069 		    MPI2_DOORBELL_DATA_MASK);
3070 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3071 		return (!rc) ? SUCCESS : FAILED;
3072 	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3073 	    MPI2_IOC_STATE_COREDUMP) {
3074 		mpt3sas_print_coredump_info(ioc, ioc_state &
3075 		    MPI2_DOORBELL_DATA_MASK);
3076 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3077 		return (!rc) ? SUCCESS : FAILED;
3078 	}
3079 
3080 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3081 	if (!smid) {
3082 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3083 		return FAILED;
3084 	}
3085 
3086 	dtmprintk(ioc,
3087 		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3088 			   handle, type, smid_task, timeout, tr_method));
3089 	ioc->tm_cmds.status = MPT3_CMD_PENDING;
3090 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3091 	ioc->tm_cmds.smid = smid;
3092 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3093 	memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3094 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3095 	mpi_request->DevHandle = cpu_to_le16(handle);
3096 	mpi_request->TaskType = type;
3097 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3098 	    type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3099 		mpi_request->MsgFlags = tr_method;
3100 	mpi_request->TaskMID = cpu_to_le16(smid_task);
3101 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3102 	mpt3sas_scsih_set_tm_flag(ioc, handle);
3103 	init_completion(&ioc->tm_cmds.done);
3104 	ioc->put_smid_hi_priority(ioc, smid, msix_task);
3105 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3106 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3107 		mpt3sas_check_cmd_timeout(ioc,
3108 		    ioc->tm_cmds.status, mpi_request,
3109 		    sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3110 		if (issue_reset) {
3111 			rc = mpt3sas_base_hard_reset_handler(ioc,
3112 					FORCE_BIG_HAMMER);
3113 			rc = (!rc) ? SUCCESS : FAILED;
3114 			goto out;
3115 		}
3116 	}
3117 
3118 	/* sync IRQs in case those were busy during flush. */
3119 	mpt3sas_base_sync_reply_irqs(ioc, 0);
3120 
3121 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3122 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3123 		mpi_reply = ioc->tm_cmds.reply;
3124 		dtmprintk(ioc,
3125 			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3126 				   le16_to_cpu(mpi_reply->IOCStatus),
3127 				   le32_to_cpu(mpi_reply->IOCLogInfo),
3128 				   le32_to_cpu(mpi_reply->TerminationCount)));
3129 		if (ioc->logging_level & MPT_DEBUG_TM) {
3130 			_scsih_response_code(ioc, mpi_reply->ResponseCode);
3131 			if (mpi_reply->IOCStatus)
3132 				_debug_dump_mf(mpi_request,
3133 				    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3134 		}
3135 	}
3136 
3137 	switch (type) {
3138 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3139 		rc = SUCCESS;
3140 		/*
3141 		 * If DevHandle filed in smid_task's entry of request pool
3142 		 * doesn't match with device handle on which this task abort
3143 		 * TM is received then it means that TM has successfully
3144 		 * aborted the timed out command. Since smid_task's entry in
3145 		 * request pool will be memset to zero once the timed out
3146 		 * command is returned to the SML. If the command is not
3147 		 * aborted then smid_task’s entry won’t be cleared and it
3148 		 * will have same DevHandle value on which this task abort TM
3149 		 * is received and driver will return the TM status as FAILED.
3150 		 */
3151 		request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3152 		if (le16_to_cpu(request->DevHandle) != handle)
3153 			break;
3154 
3155 		ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3156 		    "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3157 		    handle, timeout, tr_method, smid_task, msix_task);
3158 		rc = FAILED;
3159 		break;
3160 
3161 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3162 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3163 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3164 		rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3165 		    type, smid_task);
3166 		break;
3167 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3168 		rc = SUCCESS;
3169 		break;
3170 	default:
3171 		rc = FAILED;
3172 		break;
3173 	}
3174 
3175 out:
3176 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
3177 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3178 	return rc;
3179 }
3180 
3181 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3182 		uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3183 		u16 msix_task, u8 timeout, u8 tr_method)
3184 {
3185 	int ret;
3186 
3187 	mutex_lock(&ioc->tm_cmds.mutex);
3188 	ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3189 			smid_task, msix_task, timeout, tr_method);
3190 	mutex_unlock(&ioc->tm_cmds.mutex);
3191 
3192 	return ret;
3193 }
3194 
3195 /**
3196  * _scsih_tm_display_info - displays info about the device
3197  * @ioc: per adapter struct
3198  * @scmd: pointer to scsi command object
3199  *
3200  * Called by task management callback handlers.
3201  */
3202 static void
3203 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3204 {
3205 	struct scsi_target *starget = scmd->device->sdev_target;
3206 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3207 	struct _sas_device *sas_device = NULL;
3208 	struct _pcie_device *pcie_device = NULL;
3209 	unsigned long flags;
3210 	char *device_str = NULL;
3211 
3212 	if (!priv_target)
3213 		return;
3214 	if (ioc->hide_ir_msg)
3215 		device_str = "WarpDrive";
3216 	else
3217 		device_str = "volume";
3218 
3219 	scsi_print_command(scmd);
3220 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3221 		starget_printk(KERN_INFO, starget,
3222 			"%s handle(0x%04x), %s wwid(0x%016llx)\n",
3223 			device_str, priv_target->handle,
3224 		    device_str, (unsigned long long)priv_target->sas_address);
3225 
3226 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3227 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3228 		pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3229 		if (pcie_device) {
3230 			starget_printk(KERN_INFO, starget,
3231 				"handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3232 				pcie_device->handle,
3233 				(unsigned long long)pcie_device->wwid,
3234 				pcie_device->port_num);
3235 			if (pcie_device->enclosure_handle != 0)
3236 				starget_printk(KERN_INFO, starget,
3237 					"enclosure logical id(0x%016llx), slot(%d)\n",
3238 					(unsigned long long)
3239 					pcie_device->enclosure_logical_id,
3240 					pcie_device->slot);
3241 			if (pcie_device->connector_name[0] != '\0')
3242 				starget_printk(KERN_INFO, starget,
3243 					"enclosure level(0x%04x), connector name( %s)\n",
3244 					pcie_device->enclosure_level,
3245 					pcie_device->connector_name);
3246 			pcie_device_put(pcie_device);
3247 		}
3248 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3249 
3250 	} else {
3251 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
3252 		sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3253 		if (sas_device) {
3254 			if (priv_target->flags &
3255 			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3256 				starget_printk(KERN_INFO, starget,
3257 				    "volume handle(0x%04x), "
3258 				    "volume wwid(0x%016llx)\n",
3259 				    sas_device->volume_handle,
3260 				   (unsigned long long)sas_device->volume_wwid);
3261 			}
3262 			starget_printk(KERN_INFO, starget,
3263 			    "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3264 			    sas_device->handle,
3265 			    (unsigned long long)sas_device->sas_address,
3266 			    sas_device->phy);
3267 
3268 			_scsih_display_enclosure_chassis_info(NULL, sas_device,
3269 			    NULL, starget);
3270 
3271 			sas_device_put(sas_device);
3272 		}
3273 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3274 	}
3275 }
3276 
3277 /**
3278  * scsih_abort - eh threads main abort routine
3279  * @scmd: pointer to scsi command object
3280  *
3281  * Return: SUCCESS if command aborted else FAILED
3282  */
3283 static int
3284 scsih_abort(struct scsi_cmnd *scmd)
3285 {
3286 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3287 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3288 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3289 	u16 handle;
3290 	int r;
3291 
3292 	u8 timeout = 30;
3293 	struct _pcie_device *pcie_device = NULL;
3294 	sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3295 	    "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3296 	    scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3297 	    (scmd->request->timeout / HZ) * 1000);
3298 	_scsih_tm_display_info(ioc, scmd);
3299 
3300 	sas_device_priv_data = scmd->device->hostdata;
3301 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3302 	    ioc->remove_host) {
3303 		sdev_printk(KERN_INFO, scmd->device,
3304 		    "device been deleted! scmd(0x%p)\n", scmd);
3305 		scmd->result = DID_NO_CONNECT << 16;
3306 		scmd->scsi_done(scmd);
3307 		r = SUCCESS;
3308 		goto out;
3309 	}
3310 
3311 	/* check for completed command */
3312 	if (st == NULL || st->cb_idx == 0xFF) {
3313 		sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3314 		    "driver, assuming scmd(0x%p) might have completed\n", scmd);
3315 		scmd->result = DID_RESET << 16;
3316 		r = SUCCESS;
3317 		goto out;
3318 	}
3319 
3320 	/* for hidden raid components and volumes this is not supported */
3321 	if (sas_device_priv_data->sas_target->flags &
3322 	    MPT_TARGET_FLAGS_RAID_COMPONENT ||
3323 	    sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3324 		scmd->result = DID_RESET << 16;
3325 		r = FAILED;
3326 		goto out;
3327 	}
3328 
3329 	mpt3sas_halt_firmware(ioc);
3330 
3331 	handle = sas_device_priv_data->sas_target->handle;
3332 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3333 	if (pcie_device && (!ioc->tm_custom_handling) &&
3334 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3335 		timeout = ioc->nvme_abort_timeout;
3336 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3337 		scmd->device->id, scmd->device->lun,
3338 		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3339 		st->smid, st->msix_io, timeout, 0);
3340 	/* Command must be cleared after abort */
3341 	if (r == SUCCESS && st->cb_idx != 0xFF)
3342 		r = FAILED;
3343  out:
3344 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3345 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3346 	if (pcie_device)
3347 		pcie_device_put(pcie_device);
3348 	return r;
3349 }
3350 
3351 /**
3352  * scsih_dev_reset - eh threads main device reset routine
3353  * @scmd: pointer to scsi command object
3354  *
3355  * Return: SUCCESS if command aborted else FAILED
3356  */
3357 static int
3358 scsih_dev_reset(struct scsi_cmnd *scmd)
3359 {
3360 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3361 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3362 	struct _sas_device *sas_device = NULL;
3363 	struct _pcie_device *pcie_device = NULL;
3364 	u16	handle;
3365 	u8	tr_method = 0;
3366 	u8	tr_timeout = 30;
3367 	int r;
3368 
3369 	struct scsi_target *starget = scmd->device->sdev_target;
3370 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3371 
3372 	sdev_printk(KERN_INFO, scmd->device,
3373 	    "attempting device reset! scmd(0x%p)\n", scmd);
3374 	_scsih_tm_display_info(ioc, scmd);
3375 
3376 	sas_device_priv_data = scmd->device->hostdata;
3377 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3378 	    ioc->remove_host) {
3379 		sdev_printk(KERN_INFO, scmd->device,
3380 		    "device been deleted! scmd(0x%p)\n", scmd);
3381 		scmd->result = DID_NO_CONNECT << 16;
3382 		scmd->scsi_done(scmd);
3383 		r = SUCCESS;
3384 		goto out;
3385 	}
3386 
3387 	/* for hidden raid components obtain the volume_handle */
3388 	handle = 0;
3389 	if (sas_device_priv_data->sas_target->flags &
3390 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3391 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3392 				target_priv_data);
3393 		if (sas_device)
3394 			handle = sas_device->volume_handle;
3395 	} else
3396 		handle = sas_device_priv_data->sas_target->handle;
3397 
3398 	if (!handle) {
3399 		scmd->result = DID_RESET << 16;
3400 		r = FAILED;
3401 		goto out;
3402 	}
3403 
3404 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3405 
3406 	if (pcie_device && (!ioc->tm_custom_handling) &&
3407 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3408 		tr_timeout = pcie_device->reset_timeout;
3409 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3410 	} else
3411 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3412 
3413 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3414 		scmd->device->id, scmd->device->lun,
3415 		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3416 		tr_timeout, tr_method);
3417 	/* Check for busy commands after reset */
3418 	if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
3419 		r = FAILED;
3420  out:
3421 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3422 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3423 
3424 	if (sas_device)
3425 		sas_device_put(sas_device);
3426 	if (pcie_device)
3427 		pcie_device_put(pcie_device);
3428 
3429 	return r;
3430 }
3431 
3432 /**
3433  * scsih_target_reset - eh threads main target reset routine
3434  * @scmd: pointer to scsi command object
3435  *
3436  * Return: SUCCESS if command aborted else FAILED
3437  */
3438 static int
3439 scsih_target_reset(struct scsi_cmnd *scmd)
3440 {
3441 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3442 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3443 	struct _sas_device *sas_device = NULL;
3444 	struct _pcie_device *pcie_device = NULL;
3445 	u16	handle;
3446 	u8	tr_method = 0;
3447 	u8	tr_timeout = 30;
3448 	int r;
3449 	struct scsi_target *starget = scmd->device->sdev_target;
3450 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3451 
3452 	starget_printk(KERN_INFO, starget,
3453 	    "attempting target reset! scmd(0x%p)\n", scmd);
3454 	_scsih_tm_display_info(ioc, scmd);
3455 
3456 	sas_device_priv_data = scmd->device->hostdata;
3457 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3458 	    ioc->remove_host) {
3459 		starget_printk(KERN_INFO, starget,
3460 		    "target been deleted! scmd(0x%p)\n", scmd);
3461 		scmd->result = DID_NO_CONNECT << 16;
3462 		scmd->scsi_done(scmd);
3463 		r = SUCCESS;
3464 		goto out;
3465 	}
3466 
3467 	/* for hidden raid components obtain the volume_handle */
3468 	handle = 0;
3469 	if (sas_device_priv_data->sas_target->flags &
3470 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3471 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3472 				target_priv_data);
3473 		if (sas_device)
3474 			handle = sas_device->volume_handle;
3475 	} else
3476 		handle = sas_device_priv_data->sas_target->handle;
3477 
3478 	if (!handle) {
3479 		scmd->result = DID_RESET << 16;
3480 		r = FAILED;
3481 		goto out;
3482 	}
3483 
3484 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3485 
3486 	if (pcie_device && (!ioc->tm_custom_handling) &&
3487 	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3488 		tr_timeout = pcie_device->reset_timeout;
3489 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3490 	} else
3491 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3492 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3493 		scmd->device->id, 0,
3494 		MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3495 	    tr_timeout, tr_method);
3496 	/* Check for busy commands after reset */
3497 	if (r == SUCCESS && atomic_read(&starget->target_busy))
3498 		r = FAILED;
3499  out:
3500 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3501 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3502 
3503 	if (sas_device)
3504 		sas_device_put(sas_device);
3505 	if (pcie_device)
3506 		pcie_device_put(pcie_device);
3507 	return r;
3508 }
3509 
3510 
3511 /**
3512  * scsih_host_reset - eh threads main host reset routine
3513  * @scmd: pointer to scsi command object
3514  *
3515  * Return: SUCCESS if command aborted else FAILED
3516  */
3517 static int
3518 scsih_host_reset(struct scsi_cmnd *scmd)
3519 {
3520 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3521 	int r, retval;
3522 
3523 	ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3524 	scsi_print_command(scmd);
3525 
3526 	if (ioc->is_driver_loading || ioc->remove_host) {
3527 		ioc_info(ioc, "Blocking the host reset\n");
3528 		r = FAILED;
3529 		goto out;
3530 	}
3531 
3532 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3533 	r = (retval < 0) ? FAILED : SUCCESS;
3534 out:
3535 	ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3536 		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3537 
3538 	return r;
3539 }
3540 
3541 /**
3542  * _scsih_fw_event_add - insert and queue up fw_event
3543  * @ioc: per adapter object
3544  * @fw_event: object describing the event
3545  * Context: This function will acquire ioc->fw_event_lock.
3546  *
3547  * This adds the firmware event object into link list, then queues it up to
3548  * be processed from user context.
3549  */
3550 static void
3551 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3552 {
3553 	unsigned long flags;
3554 
3555 	if (ioc->firmware_event_thread == NULL)
3556 		return;
3557 
3558 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3559 	fw_event_work_get(fw_event);
3560 	INIT_LIST_HEAD(&fw_event->list);
3561 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3562 	INIT_WORK(&fw_event->work, _firmware_event_work);
3563 	fw_event_work_get(fw_event);
3564 	queue_work(ioc->firmware_event_thread, &fw_event->work);
3565 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3566 }
3567 
3568 /**
3569  * _scsih_fw_event_del_from_list - delete fw_event from the list
3570  * @ioc: per adapter object
3571  * @fw_event: object describing the event
3572  * Context: This function will acquire ioc->fw_event_lock.
3573  *
3574  * If the fw_event is on the fw_event_list, remove it and do a put.
3575  */
3576 static void
3577 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3578 	*fw_event)
3579 {
3580 	unsigned long flags;
3581 
3582 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3583 	if (!list_empty(&fw_event->list)) {
3584 		list_del_init(&fw_event->list);
3585 		fw_event_work_put(fw_event);
3586 	}
3587 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3588 }
3589 
3590 
3591  /**
3592  * mpt3sas_send_trigger_data_event - send event for processing trigger data
3593  * @ioc: per adapter object
3594  * @event_data: trigger event data
3595  */
3596 void
3597 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3598 	struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3599 {
3600 	struct fw_event_work *fw_event;
3601 	u16 sz;
3602 
3603 	if (ioc->is_driver_loading)
3604 		return;
3605 	sz = sizeof(*event_data);
3606 	fw_event = alloc_fw_event_work(sz);
3607 	if (!fw_event)
3608 		return;
3609 	fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3610 	fw_event->ioc = ioc;
3611 	memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3612 	_scsih_fw_event_add(ioc, fw_event);
3613 	fw_event_work_put(fw_event);
3614 }
3615 
3616 /**
3617  * _scsih_error_recovery_delete_devices - remove devices not responding
3618  * @ioc: per adapter object
3619  */
3620 static void
3621 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3622 {
3623 	struct fw_event_work *fw_event;
3624 
3625 	if (ioc->is_driver_loading)
3626 		return;
3627 	fw_event = alloc_fw_event_work(0);
3628 	if (!fw_event)
3629 		return;
3630 	fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3631 	fw_event->ioc = ioc;
3632 	_scsih_fw_event_add(ioc, fw_event);
3633 	fw_event_work_put(fw_event);
3634 }
3635 
3636 /**
3637  * mpt3sas_port_enable_complete - port enable completed (fake event)
3638  * @ioc: per adapter object
3639  */
3640 void
3641 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3642 {
3643 	struct fw_event_work *fw_event;
3644 
3645 	fw_event = alloc_fw_event_work(0);
3646 	if (!fw_event)
3647 		return;
3648 	fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3649 	fw_event->ioc = ioc;
3650 	_scsih_fw_event_add(ioc, fw_event);
3651 	fw_event_work_put(fw_event);
3652 }
3653 
3654 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3655 {
3656 	unsigned long flags;
3657 	struct fw_event_work *fw_event = NULL;
3658 
3659 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3660 	if (!list_empty(&ioc->fw_event_list)) {
3661 		fw_event = list_first_entry(&ioc->fw_event_list,
3662 				struct fw_event_work, list);
3663 		list_del_init(&fw_event->list);
3664 	}
3665 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3666 
3667 	return fw_event;
3668 }
3669 
3670 /**
3671  * _scsih_fw_event_cleanup_queue - cleanup event queue
3672  * @ioc: per adapter object
3673  *
3674  * Walk the firmware event queue, either killing timers, or waiting
3675  * for outstanding events to complete
3676  *
3677  * Context: task, can sleep
3678  */
3679 static void
3680 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3681 {
3682 	struct fw_event_work *fw_event;
3683 
3684 	if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3685 	    !ioc->firmware_event_thread)
3686 		return;
3687 
3688 	ioc->fw_events_cleanup = 1;
3689 	while ((fw_event = dequeue_next_fw_event(ioc)) ||
3690 	     (fw_event = ioc->current_event)) {
3691 		/*
3692 		 * Wait on the fw_event to complete. If this returns 1, then
3693 		 * the event was never executed, and we need a put for the
3694 		 * reference the work had on the fw_event.
3695 		 *
3696 		 * If it did execute, we wait for it to finish, and the put will
3697 		 * happen from _firmware_event_work()
3698 		 */
3699 		if (cancel_work_sync(&fw_event->work))
3700 			fw_event_work_put(fw_event);
3701 
3702 		fw_event_work_put(fw_event);
3703 	}
3704 	ioc->fw_events_cleanup = 0;
3705 }
3706 
3707 /**
3708  * _scsih_internal_device_block - block the sdev device
3709  * @sdev: per device object
3710  * @sas_device_priv_data : per device driver private data
3711  *
3712  * make sure device is blocked without error, if not
3713  * print an error
3714  */
3715 static void
3716 _scsih_internal_device_block(struct scsi_device *sdev,
3717 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3718 {
3719 	int r = 0;
3720 
3721 	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3722 	    sas_device_priv_data->sas_target->handle);
3723 	sas_device_priv_data->block = 1;
3724 
3725 	r = scsi_internal_device_block_nowait(sdev);
3726 	if (r == -EINVAL)
3727 		sdev_printk(KERN_WARNING, sdev,
3728 		    "device_block failed with return(%d) for handle(0x%04x)\n",
3729 		    r, sas_device_priv_data->sas_target->handle);
3730 }
3731 
3732 /**
3733  * _scsih_internal_device_unblock - unblock the sdev device
3734  * @sdev: per device object
3735  * @sas_device_priv_data : per device driver private data
3736  * make sure device is unblocked without error, if not retry
3737  * by blocking and then unblocking
3738  */
3739 
3740 static void
3741 _scsih_internal_device_unblock(struct scsi_device *sdev,
3742 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3743 {
3744 	int r = 0;
3745 
3746 	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3747 	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3748 	sas_device_priv_data->block = 0;
3749 	r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3750 	if (r == -EINVAL) {
3751 		/* The device has been set to SDEV_RUNNING by SD layer during
3752 		 * device addition but the request queue is still stopped by
3753 		 * our earlier block call. We need to perform a block again
3754 		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3755 
3756 		sdev_printk(KERN_WARNING, sdev,
3757 		    "device_unblock failed with return(%d) for handle(0x%04x) "
3758 		    "performing a block followed by an unblock\n",
3759 		    r, sas_device_priv_data->sas_target->handle);
3760 		sas_device_priv_data->block = 1;
3761 		r = scsi_internal_device_block_nowait(sdev);
3762 		if (r)
3763 			sdev_printk(KERN_WARNING, sdev, "retried device_block "
3764 			    "failed with return(%d) for handle(0x%04x)\n",
3765 			    r, sas_device_priv_data->sas_target->handle);
3766 
3767 		sas_device_priv_data->block = 0;
3768 		r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3769 		if (r)
3770 			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3771 			    " failed with return(%d) for handle(0x%04x)\n",
3772 			    r, sas_device_priv_data->sas_target->handle);
3773 	}
3774 }
3775 
3776 /**
3777  * _scsih_ublock_io_all_device - unblock every device
3778  * @ioc: per adapter object
3779  *
3780  * change the device state from block to running
3781  */
3782 static void
3783 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3784 {
3785 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3786 	struct scsi_device *sdev;
3787 
3788 	shost_for_each_device(sdev, ioc->shost) {
3789 		sas_device_priv_data = sdev->hostdata;
3790 		if (!sas_device_priv_data)
3791 			continue;
3792 		if (!sas_device_priv_data->block)
3793 			continue;
3794 
3795 		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3796 			"device_running, handle(0x%04x)\n",
3797 		    sas_device_priv_data->sas_target->handle));
3798 		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3799 	}
3800 }
3801 
3802 
3803 /**
3804  * _scsih_ublock_io_device - prepare device to be deleted
3805  * @ioc: per adapter object
3806  * @sas_address: sas address
3807  * @port: hba port entry
3808  *
3809  * unblock then put device in offline state
3810  */
3811 static void
3812 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3813 	u64 sas_address, struct hba_port *port)
3814 {
3815 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3816 	struct scsi_device *sdev;
3817 
3818 	shost_for_each_device(sdev, ioc->shost) {
3819 		sas_device_priv_data = sdev->hostdata;
3820 		if (!sas_device_priv_data)
3821 			continue;
3822 		if (sas_device_priv_data->sas_target->sas_address
3823 		    != sas_address)
3824 			continue;
3825 		if (sas_device_priv_data->sas_target->port != port)
3826 			continue;
3827 		if (sas_device_priv_data->block)
3828 			_scsih_internal_device_unblock(sdev,
3829 				sas_device_priv_data);
3830 	}
3831 }
3832 
3833 /**
3834  * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3835  * @ioc: per adapter object
3836  *
3837  * During device pull we need to appropriately set the sdev state.
3838  */
3839 static void
3840 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3841 {
3842 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3843 	struct scsi_device *sdev;
3844 
3845 	shost_for_each_device(sdev, ioc->shost) {
3846 		sas_device_priv_data = sdev->hostdata;
3847 		if (!sas_device_priv_data)
3848 			continue;
3849 		if (sas_device_priv_data->block)
3850 			continue;
3851 		if (sas_device_priv_data->ignore_delay_remove) {
3852 			sdev_printk(KERN_INFO, sdev,
3853 			"%s skip device_block for SES handle(0x%04x)\n",
3854 			__func__, sas_device_priv_data->sas_target->handle);
3855 			continue;
3856 		}
3857 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3858 	}
3859 }
3860 
3861 /**
3862  * _scsih_block_io_device - set the device state to SDEV_BLOCK
3863  * @ioc: per adapter object
3864  * @handle: device handle
3865  *
3866  * During device pull we need to appropriately set the sdev state.
3867  */
3868 static void
3869 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3870 {
3871 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3872 	struct scsi_device *sdev;
3873 	struct _sas_device *sas_device;
3874 
3875 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3876 
3877 	shost_for_each_device(sdev, ioc->shost) {
3878 		sas_device_priv_data = sdev->hostdata;
3879 		if (!sas_device_priv_data)
3880 			continue;
3881 		if (sas_device_priv_data->sas_target->handle != handle)
3882 			continue;
3883 		if (sas_device_priv_data->block)
3884 			continue;
3885 		if (sas_device && sas_device->pend_sas_rphy_add)
3886 			continue;
3887 		if (sas_device_priv_data->ignore_delay_remove) {
3888 			sdev_printk(KERN_INFO, sdev,
3889 			"%s skip device_block for SES handle(0x%04x)\n",
3890 			__func__, sas_device_priv_data->sas_target->handle);
3891 			continue;
3892 		}
3893 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3894 	}
3895 
3896 	if (sas_device)
3897 		sas_device_put(sas_device);
3898 }
3899 
3900 /**
3901  * _scsih_block_io_to_children_attached_to_ex
3902  * @ioc: per adapter object
3903  * @sas_expander: the sas_device object
3904  *
3905  * This routine set sdev state to SDEV_BLOCK for all devices
3906  * attached to this expander. This function called when expander is
3907  * pulled.
3908  */
3909 static void
3910 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3911 	struct _sas_node *sas_expander)
3912 {
3913 	struct _sas_port *mpt3sas_port;
3914 	struct _sas_device *sas_device;
3915 	struct _sas_node *expander_sibling;
3916 	unsigned long flags;
3917 
3918 	if (!sas_expander)
3919 		return;
3920 
3921 	list_for_each_entry(mpt3sas_port,
3922 	   &sas_expander->sas_port_list, port_list) {
3923 		if (mpt3sas_port->remote_identify.device_type ==
3924 		    SAS_END_DEVICE) {
3925 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
3926 			sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3927 			    mpt3sas_port->remote_identify.sas_address,
3928 			    mpt3sas_port->hba_port);
3929 			if (sas_device) {
3930 				set_bit(sas_device->handle,
3931 						ioc->blocking_handles);
3932 				sas_device_put(sas_device);
3933 			}
3934 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3935 		}
3936 	}
3937 
3938 	list_for_each_entry(mpt3sas_port,
3939 	   &sas_expander->sas_port_list, port_list) {
3940 
3941 		if (mpt3sas_port->remote_identify.device_type ==
3942 		    SAS_EDGE_EXPANDER_DEVICE ||
3943 		    mpt3sas_port->remote_identify.device_type ==
3944 		    SAS_FANOUT_EXPANDER_DEVICE) {
3945 			expander_sibling =
3946 			    mpt3sas_scsih_expander_find_by_sas_address(
3947 			    ioc, mpt3sas_port->remote_identify.sas_address,
3948 			    mpt3sas_port->hba_port);
3949 			_scsih_block_io_to_children_attached_to_ex(ioc,
3950 			    expander_sibling);
3951 		}
3952 	}
3953 }
3954 
3955 /**
3956  * _scsih_block_io_to_children_attached_directly
3957  * @ioc: per adapter object
3958  * @event_data: topology change event data
3959  *
3960  * This routine set sdev state to SDEV_BLOCK for all devices
3961  * direct attached during device pull.
3962  */
3963 static void
3964 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3965 	Mpi2EventDataSasTopologyChangeList_t *event_data)
3966 {
3967 	int i;
3968 	u16 handle;
3969 	u16 reason_code;
3970 
3971 	for (i = 0; i < event_data->NumEntries; i++) {
3972 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3973 		if (!handle)
3974 			continue;
3975 		reason_code = event_data->PHY[i].PhyStatus &
3976 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
3977 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3978 			_scsih_block_io_device(ioc, handle);
3979 	}
3980 }
3981 
3982 /**
3983  * _scsih_block_io_to_pcie_children_attached_directly
3984  * @ioc: per adapter object
3985  * @event_data: topology change event data
3986  *
3987  * This routine set sdev state to SDEV_BLOCK for all devices
3988  * direct attached during device pull/reconnect.
3989  */
3990 static void
3991 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3992 		Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3993 {
3994 	int i;
3995 	u16 handle;
3996 	u16 reason_code;
3997 
3998 	for (i = 0; i < event_data->NumEntries; i++) {
3999 		handle =
4000 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4001 		if (!handle)
4002 			continue;
4003 		reason_code = event_data->PortEntry[i].PortStatus;
4004 		if (reason_code ==
4005 				MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4006 			_scsih_block_io_device(ioc, handle);
4007 	}
4008 }
4009 /**
4010  * _scsih_tm_tr_send - send task management request
4011  * @ioc: per adapter object
4012  * @handle: device handle
4013  * Context: interrupt time.
4014  *
4015  * This code is to initiate the device removal handshake protocol
4016  * with controller firmware.  This function will issue target reset
4017  * using high priority request queue.  It will send a sas iounit
4018  * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4019  *
4020  * This is designed to send muliple task management request at the same
4021  * time to the fifo. If the fifo is full, we will append the request,
4022  * and process it in a future completion.
4023  */
4024 static void
4025 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4026 {
4027 	Mpi2SCSITaskManagementRequest_t *mpi_request;
4028 	u16 smid;
4029 	struct _sas_device *sas_device = NULL;
4030 	struct _pcie_device *pcie_device = NULL;
4031 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4032 	u64 sas_address = 0;
4033 	unsigned long flags;
4034 	struct _tr_list *delayed_tr;
4035 	u32 ioc_state;
4036 	u8 tr_method = 0;
4037 	struct hba_port *port = NULL;
4038 
4039 	if (ioc->pci_error_recovery) {
4040 		dewtprintk(ioc,
4041 			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4042 				    __func__, handle));
4043 		return;
4044 	}
4045 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4046 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4047 		dewtprintk(ioc,
4048 			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4049 				    __func__, handle));
4050 		return;
4051 	}
4052 
4053 	/* if PD, then return */
4054 	if (test_bit(handle, ioc->pd_handles))
4055 		return;
4056 
4057 	clear_bit(handle, ioc->pend_os_device_add);
4058 
4059 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
4060 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4061 	if (sas_device && sas_device->starget &&
4062 	    sas_device->starget->hostdata) {
4063 		sas_target_priv_data = sas_device->starget->hostdata;
4064 		sas_target_priv_data->deleted = 1;
4065 		sas_address = sas_device->sas_address;
4066 		port = sas_device->port;
4067 	}
4068 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4069 	if (!sas_device) {
4070 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4071 		pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4072 		if (pcie_device && pcie_device->starget &&
4073 			pcie_device->starget->hostdata) {
4074 			sas_target_priv_data = pcie_device->starget->hostdata;
4075 			sas_target_priv_data->deleted = 1;
4076 			sas_address = pcie_device->wwid;
4077 		}
4078 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4079 		if (pcie_device && (!ioc->tm_custom_handling) &&
4080 		    (!(mpt3sas_scsih_is_pcie_scsi_device(
4081 		    pcie_device->device_info))))
4082 			tr_method =
4083 			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4084 		else
4085 			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4086 	}
4087 	if (sas_target_priv_data) {
4088 		dewtprintk(ioc,
4089 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4090 				    handle, (u64)sas_address));
4091 		if (sas_device) {
4092 			if (sas_device->enclosure_handle != 0)
4093 				dewtprintk(ioc,
4094 					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4095 						    (u64)sas_device->enclosure_logical_id,
4096 						    sas_device->slot));
4097 			if (sas_device->connector_name[0] != '\0')
4098 				dewtprintk(ioc,
4099 					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4100 						    sas_device->enclosure_level,
4101 						    sas_device->connector_name));
4102 		} else if (pcie_device) {
4103 			if (pcie_device->enclosure_handle != 0)
4104 				dewtprintk(ioc,
4105 					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4106 						    (u64)pcie_device->enclosure_logical_id,
4107 						    pcie_device->slot));
4108 			if (pcie_device->connector_name[0] != '\0')
4109 				dewtprintk(ioc,
4110 					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4111 						    pcie_device->enclosure_level,
4112 						    pcie_device->connector_name));
4113 		}
4114 		_scsih_ublock_io_device(ioc, sas_address, port);
4115 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4116 	}
4117 
4118 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4119 	if (!smid) {
4120 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4121 		if (!delayed_tr)
4122 			goto out;
4123 		INIT_LIST_HEAD(&delayed_tr->list);
4124 		delayed_tr->handle = handle;
4125 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4126 		dewtprintk(ioc,
4127 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4128 				    handle));
4129 		goto out;
4130 	}
4131 
4132 	dewtprintk(ioc,
4133 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4134 			    handle, smid, ioc->tm_tr_cb_idx));
4135 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4136 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4137 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4138 	mpi_request->DevHandle = cpu_to_le16(handle);
4139 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4140 	mpi_request->MsgFlags = tr_method;
4141 	set_bit(handle, ioc->device_remove_in_progress);
4142 	ioc->put_smid_hi_priority(ioc, smid, 0);
4143 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4144 
4145 out:
4146 	if (sas_device)
4147 		sas_device_put(sas_device);
4148 	if (pcie_device)
4149 		pcie_device_put(pcie_device);
4150 }
4151 
4152 /**
4153  * _scsih_tm_tr_complete -
4154  * @ioc: per adapter object
4155  * @smid: system request message index
4156  * @msix_index: MSIX table index supplied by the OS
4157  * @reply: reply message frame(lower 32bit addr)
4158  * Context: interrupt time.
4159  *
4160  * This is the target reset completion routine.
4161  * This code is part of the code to initiate the device removal
4162  * handshake protocol with controller firmware.
4163  * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4164  *
4165  * Return: 1 meaning mf should be freed from _base_interrupt
4166  *         0 means the mf is freed from this function.
4167  */
4168 static u8
4169 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4170 	u32 reply)
4171 {
4172 	u16 handle;
4173 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4174 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4175 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4176 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4177 	u16 smid_sas_ctrl;
4178 	u32 ioc_state;
4179 	struct _sc_list *delayed_sc;
4180 
4181 	if (ioc->pci_error_recovery) {
4182 		dewtprintk(ioc,
4183 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4184 				    __func__));
4185 		return 1;
4186 	}
4187 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4188 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4189 		dewtprintk(ioc,
4190 			   ioc_info(ioc, "%s: host is not operational\n",
4191 				    __func__));
4192 		return 1;
4193 	}
4194 	if (unlikely(!mpi_reply)) {
4195 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4196 			__FILE__, __LINE__, __func__);
4197 		return 1;
4198 	}
4199 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4200 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4201 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4202 		dewtprintk(ioc,
4203 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4204 				   handle,
4205 				   le16_to_cpu(mpi_reply->DevHandle), smid));
4206 		return 0;
4207 	}
4208 
4209 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4210 	dewtprintk(ioc,
4211 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4212 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4213 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4214 			    le32_to_cpu(mpi_reply->TerminationCount)));
4215 
4216 	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4217 	if (!smid_sas_ctrl) {
4218 		delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4219 		if (!delayed_sc)
4220 			return _scsih_check_for_pending_tm(ioc, smid);
4221 		INIT_LIST_HEAD(&delayed_sc->list);
4222 		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4223 		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4224 		dewtprintk(ioc,
4225 			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4226 				    handle));
4227 		return _scsih_check_for_pending_tm(ioc, smid);
4228 	}
4229 
4230 	dewtprintk(ioc,
4231 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4232 			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4233 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4234 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4235 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4236 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4237 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
4238 	ioc->put_smid_default(ioc, smid_sas_ctrl);
4239 
4240 	return _scsih_check_for_pending_tm(ioc, smid);
4241 }
4242 
4243 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4244  *				 issue to IOC or not.
4245  * @ioc: per adapter object
4246  * @scmd: pointer to scsi command object
4247  *
4248  * Returns true if scmd can be issued to IOC otherwise returns false.
4249  */
4250 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4251 	struct scsi_cmnd *scmd)
4252 {
4253 
4254 	if (ioc->pci_error_recovery)
4255 		return false;
4256 
4257 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4258 		if (ioc->remove_host)
4259 			return false;
4260 
4261 		return true;
4262 	}
4263 
4264 	if (ioc->remove_host) {
4265 
4266 		switch (scmd->cmnd[0]) {
4267 		case SYNCHRONIZE_CACHE:
4268 		case START_STOP:
4269 			return true;
4270 		default:
4271 			return false;
4272 		}
4273 	}
4274 
4275 	return true;
4276 }
4277 
4278 /**
4279  * _scsih_sas_control_complete - completion routine
4280  * @ioc: per adapter object
4281  * @smid: system request message index
4282  * @msix_index: MSIX table index supplied by the OS
4283  * @reply: reply message frame(lower 32bit addr)
4284  * Context: interrupt time.
4285  *
4286  * This is the sas iounit control completion routine.
4287  * This code is part of the code to initiate the device removal
4288  * handshake protocol with controller firmware.
4289  *
4290  * Return: 1 meaning mf should be freed from _base_interrupt
4291  *         0 means the mf is freed from this function.
4292  */
4293 static u8
4294 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4295 	u8 msix_index, u32 reply)
4296 {
4297 	Mpi2SasIoUnitControlReply_t *mpi_reply =
4298 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4299 
4300 	if (likely(mpi_reply)) {
4301 		dewtprintk(ioc,
4302 			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4303 				    le16_to_cpu(mpi_reply->DevHandle), smid,
4304 				    le16_to_cpu(mpi_reply->IOCStatus),
4305 				    le32_to_cpu(mpi_reply->IOCLogInfo)));
4306 		if (le16_to_cpu(mpi_reply->IOCStatus) ==
4307 		     MPI2_IOCSTATUS_SUCCESS) {
4308 			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4309 			    ioc->device_remove_in_progress);
4310 		}
4311 	} else {
4312 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4313 			__FILE__, __LINE__, __func__);
4314 	}
4315 	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4316 }
4317 
4318 /**
4319  * _scsih_tm_tr_volume_send - send target reset request for volumes
4320  * @ioc: per adapter object
4321  * @handle: device handle
4322  * Context: interrupt time.
4323  *
4324  * This is designed to send muliple task management request at the same
4325  * time to the fifo. If the fifo is full, we will append the request,
4326  * and process it in a future completion.
4327  */
4328 static void
4329 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4330 {
4331 	Mpi2SCSITaskManagementRequest_t *mpi_request;
4332 	u16 smid;
4333 	struct _tr_list *delayed_tr;
4334 
4335 	if (ioc->pci_error_recovery) {
4336 		dewtprintk(ioc,
4337 			   ioc_info(ioc, "%s: host reset in progress!\n",
4338 				    __func__));
4339 		return;
4340 	}
4341 
4342 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4343 	if (!smid) {
4344 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4345 		if (!delayed_tr)
4346 			return;
4347 		INIT_LIST_HEAD(&delayed_tr->list);
4348 		delayed_tr->handle = handle;
4349 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4350 		dewtprintk(ioc,
4351 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4352 				    handle));
4353 		return;
4354 	}
4355 
4356 	dewtprintk(ioc,
4357 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4358 			    handle, smid, ioc->tm_tr_volume_cb_idx));
4359 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4360 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4361 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4362 	mpi_request->DevHandle = cpu_to_le16(handle);
4363 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4364 	ioc->put_smid_hi_priority(ioc, smid, 0);
4365 }
4366 
4367 /**
4368  * _scsih_tm_volume_tr_complete - target reset completion
4369  * @ioc: per adapter object
4370  * @smid: system request message index
4371  * @msix_index: MSIX table index supplied by the OS
4372  * @reply: reply message frame(lower 32bit addr)
4373  * Context: interrupt time.
4374  *
4375  * Return: 1 meaning mf should be freed from _base_interrupt
4376  *         0 means the mf is freed from this function.
4377  */
4378 static u8
4379 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4380 	u8 msix_index, u32 reply)
4381 {
4382 	u16 handle;
4383 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4384 	Mpi2SCSITaskManagementReply_t *mpi_reply =
4385 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4386 
4387 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
4388 		dewtprintk(ioc,
4389 			   ioc_info(ioc, "%s: host reset in progress!\n",
4390 				    __func__));
4391 		return 1;
4392 	}
4393 	if (unlikely(!mpi_reply)) {
4394 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4395 			__FILE__, __LINE__, __func__);
4396 		return 1;
4397 	}
4398 
4399 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4400 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4401 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4402 		dewtprintk(ioc,
4403 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4404 				   handle, le16_to_cpu(mpi_reply->DevHandle),
4405 				   smid));
4406 		return 0;
4407 	}
4408 
4409 	dewtprintk(ioc,
4410 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4411 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4412 			    le32_to_cpu(mpi_reply->IOCLogInfo),
4413 			    le32_to_cpu(mpi_reply->TerminationCount)));
4414 
4415 	return _scsih_check_for_pending_tm(ioc, smid);
4416 }
4417 
4418 /**
4419  * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4420  * @ioc: per adapter object
4421  * @smid: system request message index
4422  * @event: Event ID
4423  * @event_context: used to track events uniquely
4424  *
4425  * Context - processed in interrupt context.
4426  */
4427 static void
4428 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4429 				U32 event_context)
4430 {
4431 	Mpi2EventAckRequest_t *ack_request;
4432 	int i = smid - ioc->internal_smid;
4433 	unsigned long flags;
4434 
4435 	/* Without releasing the smid just update the
4436 	 * call back index and reuse the same smid for
4437 	 * processing this delayed request
4438 	 */
4439 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4440 	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4441 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4442 
4443 	dewtprintk(ioc,
4444 		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4445 			    le16_to_cpu(event), smid, ioc->base_cb_idx));
4446 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4447 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4448 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4449 	ack_request->Event = event;
4450 	ack_request->EventContext = event_context;
4451 	ack_request->VF_ID = 0;  /* TODO */
4452 	ack_request->VP_ID = 0;
4453 	ioc->put_smid_default(ioc, smid);
4454 }
4455 
4456 /**
4457  * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4458  *				sas_io_unit_ctrl messages
4459  * @ioc: per adapter object
4460  * @smid: system request message index
4461  * @handle: device handle
4462  *
4463  * Context - processed in interrupt context.
4464  */
4465 static void
4466 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4467 					u16 smid, u16 handle)
4468 {
4469 	Mpi2SasIoUnitControlRequest_t *mpi_request;
4470 	u32 ioc_state;
4471 	int i = smid - ioc->internal_smid;
4472 	unsigned long flags;
4473 
4474 	if (ioc->remove_host) {
4475 		dewtprintk(ioc,
4476 			   ioc_info(ioc, "%s: host has been removed\n",
4477 				    __func__));
4478 		return;
4479 	} else if (ioc->pci_error_recovery) {
4480 		dewtprintk(ioc,
4481 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4482 				    __func__));
4483 		return;
4484 	}
4485 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4486 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4487 		dewtprintk(ioc,
4488 			   ioc_info(ioc, "%s: host is not operational\n",
4489 				    __func__));
4490 		return;
4491 	}
4492 
4493 	/* Without releasing the smid just update the
4494 	 * call back index and reuse the same smid for
4495 	 * processing this delayed request
4496 	 */
4497 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4498 	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4499 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4500 
4501 	dewtprintk(ioc,
4502 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4503 			    handle, smid, ioc->tm_sas_control_cb_idx));
4504 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4505 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4506 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4507 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4508 	mpi_request->DevHandle = cpu_to_le16(handle);
4509 	ioc->put_smid_default(ioc, smid);
4510 }
4511 
4512 /**
4513  * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4514  * @ioc: per adapter object
4515  * @smid: system request message index
4516  *
4517  * Context: Executed in interrupt context
4518  *
4519  * This will check delayed internal messages list, and process the
4520  * next request.
4521  *
4522  * Return: 1 meaning mf should be freed from _base_interrupt
4523  *         0 means the mf is freed from this function.
4524  */
4525 u8
4526 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4527 {
4528 	struct _sc_list *delayed_sc;
4529 	struct _event_ack_list *delayed_event_ack;
4530 
4531 	if (!list_empty(&ioc->delayed_event_ack_list)) {
4532 		delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4533 						struct _event_ack_list, list);
4534 		_scsih_issue_delayed_event_ack(ioc, smid,
4535 		  delayed_event_ack->Event, delayed_event_ack->EventContext);
4536 		list_del(&delayed_event_ack->list);
4537 		kfree(delayed_event_ack);
4538 		return 0;
4539 	}
4540 
4541 	if (!list_empty(&ioc->delayed_sc_list)) {
4542 		delayed_sc = list_entry(ioc->delayed_sc_list.next,
4543 						struct _sc_list, list);
4544 		_scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4545 						 delayed_sc->handle);
4546 		list_del(&delayed_sc->list);
4547 		kfree(delayed_sc);
4548 		return 0;
4549 	}
4550 	return 1;
4551 }
4552 
4553 /**
4554  * _scsih_check_for_pending_tm - check for pending task management
4555  * @ioc: per adapter object
4556  * @smid: system request message index
4557  *
4558  * This will check delayed target reset list, and feed the
4559  * next reqeust.
4560  *
4561  * Return: 1 meaning mf should be freed from _base_interrupt
4562  *         0 means the mf is freed from this function.
4563  */
4564 static u8
4565 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4566 {
4567 	struct _tr_list *delayed_tr;
4568 
4569 	if (!list_empty(&ioc->delayed_tr_volume_list)) {
4570 		delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4571 		    struct _tr_list, list);
4572 		mpt3sas_base_free_smid(ioc, smid);
4573 		_scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4574 		list_del(&delayed_tr->list);
4575 		kfree(delayed_tr);
4576 		return 0;
4577 	}
4578 
4579 	if (!list_empty(&ioc->delayed_tr_list)) {
4580 		delayed_tr = list_entry(ioc->delayed_tr_list.next,
4581 		    struct _tr_list, list);
4582 		mpt3sas_base_free_smid(ioc, smid);
4583 		_scsih_tm_tr_send(ioc, delayed_tr->handle);
4584 		list_del(&delayed_tr->list);
4585 		kfree(delayed_tr);
4586 		return 0;
4587 	}
4588 
4589 	return 1;
4590 }
4591 
4592 /**
4593  * _scsih_check_topo_delete_events - sanity check on topo events
4594  * @ioc: per adapter object
4595  * @event_data: the event data payload
4596  *
4597  * This routine added to better handle cable breaker.
4598  *
4599  * This handles the case where driver receives multiple expander
4600  * add and delete events in a single shot.  When there is a delete event
4601  * the routine will void any pending add events waiting in the event queue.
4602  */
4603 static void
4604 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4605 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4606 {
4607 	struct fw_event_work *fw_event;
4608 	Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4609 	u16 expander_handle;
4610 	struct _sas_node *sas_expander;
4611 	unsigned long flags;
4612 	int i, reason_code;
4613 	u16 handle;
4614 
4615 	for (i = 0 ; i < event_data->NumEntries; i++) {
4616 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4617 		if (!handle)
4618 			continue;
4619 		reason_code = event_data->PHY[i].PhyStatus &
4620 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4621 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4622 			_scsih_tm_tr_send(ioc, handle);
4623 	}
4624 
4625 	expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4626 	if (expander_handle < ioc->sas_hba.num_phys) {
4627 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4628 		return;
4629 	}
4630 	if (event_data->ExpStatus ==
4631 	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4632 		/* put expander attached devices into blocking state */
4633 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
4634 		sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4635 		    expander_handle);
4636 		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4637 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4638 		do {
4639 			handle = find_first_bit(ioc->blocking_handles,
4640 			    ioc->facts.MaxDevHandle);
4641 			if (handle < ioc->facts.MaxDevHandle)
4642 				_scsih_block_io_device(ioc, handle);
4643 		} while (test_and_clear_bit(handle, ioc->blocking_handles));
4644 	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4645 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4646 
4647 	if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4648 		return;
4649 
4650 	/* mark ignore flag for pending events */
4651 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4652 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4653 		if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4654 		    fw_event->ignore)
4655 			continue;
4656 		local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4657 				   fw_event->event_data;
4658 		if (local_event_data->ExpStatus ==
4659 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4660 		    local_event_data->ExpStatus ==
4661 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4662 			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4663 			    expander_handle) {
4664 				dewtprintk(ioc,
4665 					   ioc_info(ioc, "setting ignoring flag\n"));
4666 				fw_event->ignore = 1;
4667 			}
4668 		}
4669 	}
4670 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4671 }
4672 
4673 /**
4674  * _scsih_check_pcie_topo_remove_events - sanity check on topo
4675  * events
4676  * @ioc: per adapter object
4677  * @event_data: the event data payload
4678  *
4679  * This handles the case where driver receives multiple switch
4680  * or device add and delete events in a single shot.  When there
4681  * is a delete event the routine will void any pending add
4682  * events waiting in the event queue.
4683  */
4684 static void
4685 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4686 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4687 {
4688 	struct fw_event_work *fw_event;
4689 	Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4690 	unsigned long flags;
4691 	int i, reason_code;
4692 	u16 handle, switch_handle;
4693 
4694 	for (i = 0; i < event_data->NumEntries; i++) {
4695 		handle =
4696 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4697 		if (!handle)
4698 			continue;
4699 		reason_code = event_data->PortEntry[i].PortStatus;
4700 		if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4701 			_scsih_tm_tr_send(ioc, handle);
4702 	}
4703 
4704 	switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4705 	if (!switch_handle) {
4706 		_scsih_block_io_to_pcie_children_attached_directly(
4707 							ioc, event_data);
4708 		return;
4709 	}
4710     /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4711 	if ((event_data->SwitchStatus
4712 		== MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4713 		(event_data->SwitchStatus ==
4714 					MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4715 		_scsih_block_io_to_pcie_children_attached_directly(
4716 							ioc, event_data);
4717 
4718 	if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4719 		return;
4720 
4721 	/* mark ignore flag for pending events */
4722 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4723 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4724 		if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4725 			fw_event->ignore)
4726 			continue;
4727 		local_event_data =
4728 			(Mpi26EventDataPCIeTopologyChangeList_t *)
4729 			fw_event->event_data;
4730 		if (local_event_data->SwitchStatus ==
4731 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4732 		    local_event_data->SwitchStatus ==
4733 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4734 			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4735 				switch_handle) {
4736 				dewtprintk(ioc,
4737 					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
4738 				fw_event->ignore = 1;
4739 			}
4740 		}
4741 	}
4742 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4743 }
4744 
4745 /**
4746  * _scsih_set_volume_delete_flag - setting volume delete flag
4747  * @ioc: per adapter object
4748  * @handle: device handle
4749  *
4750  * This returns nothing.
4751  */
4752 static void
4753 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4754 {
4755 	struct _raid_device *raid_device;
4756 	struct MPT3SAS_TARGET *sas_target_priv_data;
4757 	unsigned long flags;
4758 
4759 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
4760 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4761 	if (raid_device && raid_device->starget &&
4762 	    raid_device->starget->hostdata) {
4763 		sas_target_priv_data =
4764 		    raid_device->starget->hostdata;
4765 		sas_target_priv_data->deleted = 1;
4766 		dewtprintk(ioc,
4767 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4768 				    handle, (u64)raid_device->wwid));
4769 	}
4770 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4771 }
4772 
4773 /**
4774  * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4775  * @handle: input handle
4776  * @a: handle for volume a
4777  * @b: handle for volume b
4778  *
4779  * IR firmware only supports two raid volumes.  The purpose of this
4780  * routine is to set the volume handle in either a or b. When the given
4781  * input handle is non-zero, or when a and b have not been set before.
4782  */
4783 static void
4784 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4785 {
4786 	if (!handle || handle == *a || handle == *b)
4787 		return;
4788 	if (!*a)
4789 		*a = handle;
4790 	else if (!*b)
4791 		*b = handle;
4792 }
4793 
4794 /**
4795  * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4796  * @ioc: per adapter object
4797  * @event_data: the event data payload
4798  * Context: interrupt time.
4799  *
4800  * This routine will send target reset to volume, followed by target
4801  * resets to the PDs. This is called when a PD has been removed, or
4802  * volume has been deleted or removed. When the target reset is sent
4803  * to volume, the PD target resets need to be queued to start upon
4804  * completion of the volume target reset.
4805  */
4806 static void
4807 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4808 	Mpi2EventDataIrConfigChangeList_t *event_data)
4809 {
4810 	Mpi2EventIrConfigElement_t *element;
4811 	int i;
4812 	u16 handle, volume_handle, a, b;
4813 	struct _tr_list *delayed_tr;
4814 
4815 	a = 0;
4816 	b = 0;
4817 
4818 	if (ioc->is_warpdrive)
4819 		return;
4820 
4821 	/* Volume Resets for Deleted or Removed */
4822 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4823 	for (i = 0; i < event_data->NumElements; i++, element++) {
4824 		if (le32_to_cpu(event_data->Flags) &
4825 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4826 			continue;
4827 		if (element->ReasonCode ==
4828 		    MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4829 		    element->ReasonCode ==
4830 		    MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4831 			volume_handle = le16_to_cpu(element->VolDevHandle);
4832 			_scsih_set_volume_delete_flag(ioc, volume_handle);
4833 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4834 		}
4835 	}
4836 
4837 	/* Volume Resets for UNHIDE events */
4838 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4839 	for (i = 0; i < event_data->NumElements; i++, element++) {
4840 		if (le32_to_cpu(event_data->Flags) &
4841 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4842 			continue;
4843 		if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4844 			volume_handle = le16_to_cpu(element->VolDevHandle);
4845 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4846 		}
4847 	}
4848 
4849 	if (a)
4850 		_scsih_tm_tr_volume_send(ioc, a);
4851 	if (b)
4852 		_scsih_tm_tr_volume_send(ioc, b);
4853 
4854 	/* PD target resets */
4855 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4856 	for (i = 0; i < event_data->NumElements; i++, element++) {
4857 		if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4858 			continue;
4859 		handle = le16_to_cpu(element->PhysDiskDevHandle);
4860 		volume_handle = le16_to_cpu(element->VolDevHandle);
4861 		clear_bit(handle, ioc->pd_handles);
4862 		if (!volume_handle)
4863 			_scsih_tm_tr_send(ioc, handle);
4864 		else if (volume_handle == a || volume_handle == b) {
4865 			delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4866 			BUG_ON(!delayed_tr);
4867 			INIT_LIST_HEAD(&delayed_tr->list);
4868 			delayed_tr->handle = handle;
4869 			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4870 			dewtprintk(ioc,
4871 				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4872 					    handle));
4873 		} else
4874 			_scsih_tm_tr_send(ioc, handle);
4875 	}
4876 }
4877 
4878 
4879 /**
4880  * _scsih_check_volume_delete_events - set delete flag for volumes
4881  * @ioc: per adapter object
4882  * @event_data: the event data payload
4883  * Context: interrupt time.
4884  *
4885  * This will handle the case when the cable connected to entire volume is
4886  * pulled. We will take care of setting the deleted flag so normal IO will
4887  * not be sent.
4888  */
4889 static void
4890 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4891 	Mpi2EventDataIrVolume_t *event_data)
4892 {
4893 	u32 state;
4894 
4895 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4896 		return;
4897 	state = le32_to_cpu(event_data->NewValue);
4898 	if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4899 	    MPI2_RAID_VOL_STATE_FAILED)
4900 		_scsih_set_volume_delete_flag(ioc,
4901 		    le16_to_cpu(event_data->VolDevHandle));
4902 }
4903 
4904 /**
4905  * _scsih_temp_threshold_events - display temperature threshold exceeded events
4906  * @ioc: per adapter object
4907  * @event_data: the temp threshold event data
4908  * Context: interrupt time.
4909  */
4910 static void
4911 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4912 	Mpi2EventDataTemperature_t *event_data)
4913 {
4914 	u32 doorbell;
4915 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
4916 		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4917 			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4918 			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4919 			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4920 			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4921 			event_data->SensorNum);
4922 		ioc_err(ioc, "Current Temp In Celsius: %d\n",
4923 			event_data->CurrentTemperature);
4924 		if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4925 			doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4926 			if ((doorbell & MPI2_IOC_STATE_MASK) ==
4927 			    MPI2_IOC_STATE_FAULT) {
4928 				mpt3sas_print_fault_code(ioc,
4929 				    doorbell & MPI2_DOORBELL_DATA_MASK);
4930 			} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4931 			    MPI2_IOC_STATE_COREDUMP) {
4932 				mpt3sas_print_coredump_info(ioc,
4933 				    doorbell & MPI2_DOORBELL_DATA_MASK);
4934 			}
4935 		}
4936 	}
4937 }
4938 
4939 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4940 {
4941 	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4942 
4943 	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4944 		return 0;
4945 
4946 	if (pending)
4947 		return test_and_set_bit(0, &priv->ata_command_pending);
4948 
4949 	clear_bit(0, &priv->ata_command_pending);
4950 	return 0;
4951 }
4952 
4953 /**
4954  * _scsih_flush_running_cmds - completing outstanding commands.
4955  * @ioc: per adapter object
4956  *
4957  * The flushing out of all pending scmd commands following host reset,
4958  * where all IO is dropped to the floor.
4959  */
4960 static void
4961 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4962 {
4963 	struct scsi_cmnd *scmd;
4964 	struct scsiio_tracker *st;
4965 	u16 smid;
4966 	int count = 0;
4967 
4968 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4969 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4970 		if (!scmd)
4971 			continue;
4972 		count++;
4973 		_scsih_set_satl_pending(scmd, false);
4974 		st = scsi_cmd_priv(scmd);
4975 		mpt3sas_base_clear_st(ioc, st);
4976 		scsi_dma_unmap(scmd);
4977 		if (ioc->pci_error_recovery || ioc->remove_host)
4978 			scmd->result = DID_NO_CONNECT << 16;
4979 		else
4980 			scmd->result = DID_RESET << 16;
4981 		scmd->scsi_done(scmd);
4982 	}
4983 	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4984 }
4985 
4986 /**
4987  * _scsih_setup_eedp - setup MPI request for EEDP transfer
4988  * @ioc: per adapter object
4989  * @scmd: pointer to scsi command object
4990  * @mpi_request: pointer to the SCSI_IO request message frame
4991  *
4992  * Supporting protection 1 and 3.
4993  */
4994 static void
4995 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4996 	Mpi25SCSIIORequest_t *mpi_request)
4997 {
4998 	u16 eedp_flags;
4999 	unsigned char prot_op = scsi_get_prot_op(scmd);
5000 	unsigned char prot_type = scsi_get_prot_type(scmd);
5001 	Mpi25SCSIIORequest_t *mpi_request_3v =
5002 	   (Mpi25SCSIIORequest_t *)mpi_request;
5003 
5004 	if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
5005 		return;
5006 
5007 	if (prot_op ==  SCSI_PROT_READ_STRIP)
5008 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5009 	else if (prot_op ==  SCSI_PROT_WRITE_INSERT)
5010 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5011 	else
5012 		return;
5013 
5014 	switch (prot_type) {
5015 	case SCSI_PROT_DIF_TYPE1:
5016 	case SCSI_PROT_DIF_TYPE2:
5017 
5018 		/*
5019 		* enable ref/guard checking
5020 		* auto increment ref tag
5021 		*/
5022 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
5023 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
5024 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5025 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5026 		    cpu_to_be32(t10_pi_ref_tag(scmd->request));
5027 		break;
5028 
5029 	case SCSI_PROT_DIF_TYPE3:
5030 
5031 		/*
5032 		* enable guard checking
5033 		*/
5034 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5035 
5036 		break;
5037 	}
5038 
5039 	mpi_request_3v->EEDPBlockSize =
5040 	    cpu_to_le16(scmd->device->sector_size);
5041 
5042 	if (ioc->is_gen35_ioc)
5043 		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5044 	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5045 }
5046 
5047 /**
5048  * _scsih_eedp_error_handling - return sense code for EEDP errors
5049  * @scmd: pointer to scsi command object
5050  * @ioc_status: ioc status
5051  */
5052 static void
5053 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5054 {
5055 	u8 ascq;
5056 
5057 	switch (ioc_status) {
5058 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5059 		ascq = 0x01;
5060 		break;
5061 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5062 		ascq = 0x02;
5063 		break;
5064 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5065 		ascq = 0x03;
5066 		break;
5067 	default:
5068 		ascq = 0x00;
5069 		break;
5070 	}
5071 	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
5072 	    ascq);
5073 	scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
5074 	    SAM_STAT_CHECK_CONDITION;
5075 }
5076 
5077 /**
5078  * scsih_qcmd - main scsi request entry point
5079  * @shost: SCSI host pointer
5080  * @scmd: pointer to scsi command object
5081  *
5082  * The callback index is set inside `ioc->scsi_io_cb_idx`.
5083  *
5084  * Return: 0 on success.  If there's a failure, return either:
5085  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5086  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5087  */
5088 static int
5089 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5090 {
5091 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5092 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5093 	struct MPT3SAS_TARGET *sas_target_priv_data;
5094 	struct _raid_device *raid_device;
5095 	struct request *rq = scmd->request;
5096 	int class;
5097 	Mpi25SCSIIORequest_t *mpi_request;
5098 	struct _pcie_device *pcie_device = NULL;
5099 	u32 mpi_control;
5100 	u16 smid;
5101 	u16 handle;
5102 
5103 	if (ioc->logging_level & MPT_DEBUG_SCSI)
5104 		scsi_print_command(scmd);
5105 
5106 	sas_device_priv_data = scmd->device->hostdata;
5107 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5108 		scmd->result = DID_NO_CONNECT << 16;
5109 		scmd->scsi_done(scmd);
5110 		return 0;
5111 	}
5112 
5113 	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5114 		scmd->result = DID_NO_CONNECT << 16;
5115 		scmd->scsi_done(scmd);
5116 		return 0;
5117 	}
5118 
5119 	sas_target_priv_data = sas_device_priv_data->sas_target;
5120 
5121 	/* invalid device handle */
5122 	handle = sas_target_priv_data->handle;
5123 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5124 		scmd->result = DID_NO_CONNECT << 16;
5125 		scmd->scsi_done(scmd);
5126 		return 0;
5127 	}
5128 
5129 
5130 	if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5131 		/* host recovery or link resets sent via IOCTLs */
5132 		return SCSI_MLQUEUE_HOST_BUSY;
5133 	} else if (sas_target_priv_data->deleted) {
5134 		/* device has been deleted */
5135 		scmd->result = DID_NO_CONNECT << 16;
5136 		scmd->scsi_done(scmd);
5137 		return 0;
5138 	} else if (sas_target_priv_data->tm_busy ||
5139 		   sas_device_priv_data->block) {
5140 		/* device busy with task management */
5141 		return SCSI_MLQUEUE_DEVICE_BUSY;
5142 	}
5143 
5144 	/*
5145 	 * Bug work around for firmware SATL handling.  The loop
5146 	 * is based on atomic operations and ensures consistency
5147 	 * since we're lockless at this point
5148 	 */
5149 	do {
5150 		if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5151 			return SCSI_MLQUEUE_DEVICE_BUSY;
5152 	} while (_scsih_set_satl_pending(scmd, true));
5153 
5154 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5155 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
5156 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5157 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5158 	else
5159 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5160 
5161 	/* set tags */
5162 	mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5163 	/* NCQ Prio supported, make sure control indicated high priority */
5164 	if (sas_device_priv_data->ncq_prio_enable) {
5165 		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5166 		if (class == IOPRIO_CLASS_RT)
5167 			mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5168 	}
5169 	/* Make sure Device is not raid volume.
5170 	 * We do not expose raid functionality to upper layer for warpdrive.
5171 	 */
5172 	if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5173 		&& !scsih_is_nvme(&scmd->device->sdev_gendev))
5174 		&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5175 		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5176 
5177 	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5178 	if (!smid) {
5179 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5180 		_scsih_set_satl_pending(scmd, false);
5181 		goto out;
5182 	}
5183 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5184 	memset(mpi_request, 0, ioc->request_sz);
5185 	_scsih_setup_eedp(ioc, scmd, mpi_request);
5186 
5187 	if (scmd->cmd_len == 32)
5188 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5189 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5190 	if (sas_device_priv_data->sas_target->flags &
5191 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
5192 		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5193 	else
5194 		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5195 	mpi_request->DevHandle = cpu_to_le16(handle);
5196 	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5197 	mpi_request->Control = cpu_to_le32(mpi_control);
5198 	mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5199 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5200 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5201 	mpi_request->SenseBufferLowAddress =
5202 	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5203 	mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5204 	int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5205 	    mpi_request->LUN);
5206 	memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5207 
5208 	if (mpi_request->DataLength) {
5209 		pcie_device = sas_target_priv_data->pcie_dev;
5210 		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5211 			mpt3sas_base_free_smid(ioc, smid);
5212 			_scsih_set_satl_pending(scmd, false);
5213 			goto out;
5214 		}
5215 	} else
5216 		ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5217 
5218 	raid_device = sas_target_priv_data->raid_device;
5219 	if (raid_device && raid_device->direct_io_enabled)
5220 		mpt3sas_setup_direct_io(ioc, scmd,
5221 			raid_device, mpi_request);
5222 
5223 	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5224 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5225 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5226 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5227 			ioc->put_smid_fast_path(ioc, smid, handle);
5228 		} else
5229 			ioc->put_smid_scsi_io(ioc, smid,
5230 			    le16_to_cpu(mpi_request->DevHandle));
5231 	} else
5232 		ioc->put_smid_default(ioc, smid);
5233 	return 0;
5234 
5235  out:
5236 	return SCSI_MLQUEUE_HOST_BUSY;
5237 }
5238 
5239 /**
5240  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5241  * @sense_buffer: sense data returned by target
5242  * @data: normalized skey/asc/ascq
5243  */
5244 static void
5245 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5246 {
5247 	if ((sense_buffer[0] & 0x7F) >= 0x72) {
5248 		/* descriptor format */
5249 		data->skey = sense_buffer[1] & 0x0F;
5250 		data->asc = sense_buffer[2];
5251 		data->ascq = sense_buffer[3];
5252 	} else {
5253 		/* fixed format */
5254 		data->skey = sense_buffer[2] & 0x0F;
5255 		data->asc = sense_buffer[12];
5256 		data->ascq = sense_buffer[13];
5257 	}
5258 }
5259 
5260 /**
5261  * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
5262  * @ioc: per adapter object
5263  * @scmd: pointer to scsi command object
5264  * @mpi_reply: reply mf payload returned from firmware
5265  * @smid: ?
5266  *
5267  * scsi_status - SCSI Status code returned from target device
5268  * scsi_state - state info associated with SCSI_IO determined by ioc
5269  * ioc_status - ioc supplied status info
5270  */
5271 static void
5272 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5273 	Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5274 {
5275 	u32 response_info;
5276 	u8 *response_bytes;
5277 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5278 	    MPI2_IOCSTATUS_MASK;
5279 	u8 scsi_state = mpi_reply->SCSIState;
5280 	u8 scsi_status = mpi_reply->SCSIStatus;
5281 	char *desc_ioc_state = NULL;
5282 	char *desc_scsi_status = NULL;
5283 	char *desc_scsi_state = ioc->tmp_string;
5284 	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5285 	struct _sas_device *sas_device = NULL;
5286 	struct _pcie_device *pcie_device = NULL;
5287 	struct scsi_target *starget = scmd->device->sdev_target;
5288 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5289 	char *device_str = NULL;
5290 
5291 	if (!priv_target)
5292 		return;
5293 	if (ioc->hide_ir_msg)
5294 		device_str = "WarpDrive";
5295 	else
5296 		device_str = "volume";
5297 
5298 	if (log_info == 0x31170000)
5299 		return;
5300 
5301 	switch (ioc_status) {
5302 	case MPI2_IOCSTATUS_SUCCESS:
5303 		desc_ioc_state = "success";
5304 		break;
5305 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5306 		desc_ioc_state = "invalid function";
5307 		break;
5308 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5309 		desc_ioc_state = "scsi recovered error";
5310 		break;
5311 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5312 		desc_ioc_state = "scsi invalid dev handle";
5313 		break;
5314 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5315 		desc_ioc_state = "scsi device not there";
5316 		break;
5317 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5318 		desc_ioc_state = "scsi data overrun";
5319 		break;
5320 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5321 		desc_ioc_state = "scsi data underrun";
5322 		break;
5323 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5324 		desc_ioc_state = "scsi io data error";
5325 		break;
5326 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5327 		desc_ioc_state = "scsi protocol error";
5328 		break;
5329 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5330 		desc_ioc_state = "scsi task terminated";
5331 		break;
5332 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5333 		desc_ioc_state = "scsi residual mismatch";
5334 		break;
5335 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5336 		desc_ioc_state = "scsi task mgmt failed";
5337 		break;
5338 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5339 		desc_ioc_state = "scsi ioc terminated";
5340 		break;
5341 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5342 		desc_ioc_state = "scsi ext terminated";
5343 		break;
5344 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5345 		desc_ioc_state = "eedp guard error";
5346 		break;
5347 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5348 		desc_ioc_state = "eedp ref tag error";
5349 		break;
5350 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5351 		desc_ioc_state = "eedp app tag error";
5352 		break;
5353 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5354 		desc_ioc_state = "insufficient power";
5355 		break;
5356 	default:
5357 		desc_ioc_state = "unknown";
5358 		break;
5359 	}
5360 
5361 	switch (scsi_status) {
5362 	case MPI2_SCSI_STATUS_GOOD:
5363 		desc_scsi_status = "good";
5364 		break;
5365 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
5366 		desc_scsi_status = "check condition";
5367 		break;
5368 	case MPI2_SCSI_STATUS_CONDITION_MET:
5369 		desc_scsi_status = "condition met";
5370 		break;
5371 	case MPI2_SCSI_STATUS_BUSY:
5372 		desc_scsi_status = "busy";
5373 		break;
5374 	case MPI2_SCSI_STATUS_INTERMEDIATE:
5375 		desc_scsi_status = "intermediate";
5376 		break;
5377 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5378 		desc_scsi_status = "intermediate condmet";
5379 		break;
5380 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5381 		desc_scsi_status = "reservation conflict";
5382 		break;
5383 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5384 		desc_scsi_status = "command terminated";
5385 		break;
5386 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5387 		desc_scsi_status = "task set full";
5388 		break;
5389 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
5390 		desc_scsi_status = "aca active";
5391 		break;
5392 	case MPI2_SCSI_STATUS_TASK_ABORTED:
5393 		desc_scsi_status = "task aborted";
5394 		break;
5395 	default:
5396 		desc_scsi_status = "unknown";
5397 		break;
5398 	}
5399 
5400 	desc_scsi_state[0] = '\0';
5401 	if (!scsi_state)
5402 		desc_scsi_state = " ";
5403 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5404 		strcat(desc_scsi_state, "response info ");
5405 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5406 		strcat(desc_scsi_state, "state terminated ");
5407 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5408 		strcat(desc_scsi_state, "no status ");
5409 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5410 		strcat(desc_scsi_state, "autosense failed ");
5411 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5412 		strcat(desc_scsi_state, "autosense valid ");
5413 
5414 	scsi_print_command(scmd);
5415 
5416 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5417 		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5418 			 device_str, (u64)priv_target->sas_address);
5419 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5420 		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5421 		if (pcie_device) {
5422 			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5423 				 (u64)pcie_device->wwid, pcie_device->port_num);
5424 			if (pcie_device->enclosure_handle != 0)
5425 				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5426 					 (u64)pcie_device->enclosure_logical_id,
5427 					 pcie_device->slot);
5428 			if (pcie_device->connector_name[0])
5429 				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5430 					 pcie_device->enclosure_level,
5431 					 pcie_device->connector_name);
5432 			pcie_device_put(pcie_device);
5433 		}
5434 	} else {
5435 		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5436 		if (sas_device) {
5437 			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5438 				 (u64)sas_device->sas_address, sas_device->phy);
5439 
5440 			_scsih_display_enclosure_chassis_info(ioc, sas_device,
5441 			    NULL, NULL);
5442 
5443 			sas_device_put(sas_device);
5444 		}
5445 	}
5446 
5447 	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5448 		 le16_to_cpu(mpi_reply->DevHandle),
5449 		 desc_ioc_state, ioc_status, smid);
5450 	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5451 		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5452 	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5453 		 le16_to_cpu(mpi_reply->TaskTag),
5454 		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5455 	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5456 		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5457 
5458 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5459 		struct sense_info data;
5460 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5461 		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5462 			 data.skey, data.asc, data.ascq,
5463 			 le32_to_cpu(mpi_reply->SenseCount));
5464 	}
5465 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5466 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5467 		response_bytes = (u8 *)&response_info;
5468 		_scsih_response_code(ioc, response_bytes[0]);
5469 	}
5470 }
5471 
5472 /**
5473  * _scsih_turn_on_pfa_led - illuminate PFA LED
5474  * @ioc: per adapter object
5475  * @handle: device handle
5476  * Context: process
5477  */
5478 static void
5479 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5480 {
5481 	Mpi2SepReply_t mpi_reply;
5482 	Mpi2SepRequest_t mpi_request;
5483 	struct _sas_device *sas_device;
5484 
5485 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5486 	if (!sas_device)
5487 		return;
5488 
5489 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5490 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5491 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5492 	mpi_request.SlotStatus =
5493 	    cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5494 	mpi_request.DevHandle = cpu_to_le16(handle);
5495 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5496 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5497 	    &mpi_request)) != 0) {
5498 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5499 			__FILE__, __LINE__, __func__);
5500 		goto out;
5501 	}
5502 	sas_device->pfa_led_on = 1;
5503 
5504 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5505 		dewtprintk(ioc,
5506 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5507 				    le16_to_cpu(mpi_reply.IOCStatus),
5508 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5509 		goto out;
5510 	}
5511 out:
5512 	sas_device_put(sas_device);
5513 }
5514 
5515 /**
5516  * _scsih_turn_off_pfa_led - turn off Fault LED
5517  * @ioc: per adapter object
5518  * @sas_device: sas device whose PFA LED has to turned off
5519  * Context: process
5520  */
5521 static void
5522 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5523 	struct _sas_device *sas_device)
5524 {
5525 	Mpi2SepReply_t mpi_reply;
5526 	Mpi2SepRequest_t mpi_request;
5527 
5528 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5529 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5530 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5531 	mpi_request.SlotStatus = 0;
5532 	mpi_request.Slot = cpu_to_le16(sas_device->slot);
5533 	mpi_request.DevHandle = 0;
5534 	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5535 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5536 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5537 		&mpi_request)) != 0) {
5538 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5539 			__FILE__, __LINE__, __func__);
5540 		return;
5541 	}
5542 
5543 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5544 		dewtprintk(ioc,
5545 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5546 				    le16_to_cpu(mpi_reply.IOCStatus),
5547 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5548 		return;
5549 	}
5550 }
5551 
5552 /**
5553  * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5554  * @ioc: per adapter object
5555  * @handle: device handle
5556  * Context: interrupt.
5557  */
5558 static void
5559 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5560 {
5561 	struct fw_event_work *fw_event;
5562 
5563 	fw_event = alloc_fw_event_work(0);
5564 	if (!fw_event)
5565 		return;
5566 	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5567 	fw_event->device_handle = handle;
5568 	fw_event->ioc = ioc;
5569 	_scsih_fw_event_add(ioc, fw_event);
5570 	fw_event_work_put(fw_event);
5571 }
5572 
5573 /**
5574  * _scsih_smart_predicted_fault - process smart errors
5575  * @ioc: per adapter object
5576  * @handle: device handle
5577  * Context: interrupt.
5578  */
5579 static void
5580 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5581 {
5582 	struct scsi_target *starget;
5583 	struct MPT3SAS_TARGET *sas_target_priv_data;
5584 	Mpi2EventNotificationReply_t *event_reply;
5585 	Mpi2EventDataSasDeviceStatusChange_t *event_data;
5586 	struct _sas_device *sas_device;
5587 	ssize_t sz;
5588 	unsigned long flags;
5589 
5590 	/* only handle non-raid devices */
5591 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5592 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5593 	if (!sas_device)
5594 		goto out_unlock;
5595 
5596 	starget = sas_device->starget;
5597 	sas_target_priv_data = starget->hostdata;
5598 
5599 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5600 	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5601 		goto out_unlock;
5602 
5603 	_scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5604 
5605 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5606 
5607 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5608 		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5609 
5610 	/* insert into event log */
5611 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5612 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5613 	event_reply = kzalloc(sz, GFP_ATOMIC);
5614 	if (!event_reply) {
5615 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5616 			__FILE__, __LINE__, __func__);
5617 		goto out;
5618 	}
5619 
5620 	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5621 	event_reply->Event =
5622 	    cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5623 	event_reply->MsgLength = sz/4;
5624 	event_reply->EventDataLength =
5625 	    cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5626 	event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5627 	    event_reply->EventData;
5628 	event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5629 	event_data->ASC = 0x5D;
5630 	event_data->DevHandle = cpu_to_le16(handle);
5631 	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5632 	mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5633 	kfree(event_reply);
5634 out:
5635 	if (sas_device)
5636 		sas_device_put(sas_device);
5637 	return;
5638 
5639 out_unlock:
5640 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5641 	goto out;
5642 }
5643 
5644 /**
5645  * _scsih_io_done - scsi request callback
5646  * @ioc: per adapter object
5647  * @smid: system request message index
5648  * @msix_index: MSIX table index supplied by the OS
5649  * @reply: reply message frame(lower 32bit addr)
5650  *
5651  * Callback handler when using _scsih_qcmd.
5652  *
5653  * Return: 1 meaning mf should be freed from _base_interrupt
5654  *         0 means the mf is freed from this function.
5655  */
5656 static u8
5657 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5658 {
5659 	Mpi25SCSIIORequest_t *mpi_request;
5660 	Mpi2SCSIIOReply_t *mpi_reply;
5661 	struct scsi_cmnd *scmd;
5662 	struct scsiio_tracker *st;
5663 	u16 ioc_status;
5664 	u32 xfer_cnt;
5665 	u8 scsi_state;
5666 	u8 scsi_status;
5667 	u32 log_info;
5668 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5669 	u32 response_code = 0;
5670 
5671 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5672 
5673 	scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5674 	if (scmd == NULL)
5675 		return 1;
5676 
5677 	_scsih_set_satl_pending(scmd, false);
5678 
5679 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5680 
5681 	if (mpi_reply == NULL) {
5682 		scmd->result = DID_OK << 16;
5683 		goto out;
5684 	}
5685 
5686 	sas_device_priv_data = scmd->device->hostdata;
5687 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5688 	     sas_device_priv_data->sas_target->deleted) {
5689 		scmd->result = DID_NO_CONNECT << 16;
5690 		goto out;
5691 	}
5692 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5693 
5694 	/*
5695 	 * WARPDRIVE: If direct_io is set then it is directIO,
5696 	 * the failed direct I/O should be redirected to volume
5697 	 */
5698 	st = scsi_cmd_priv(scmd);
5699 	if (st->direct_io &&
5700 	     ((ioc_status & MPI2_IOCSTATUS_MASK)
5701 	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5702 		st->direct_io = 0;
5703 		st->scmd = scmd;
5704 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5705 		mpi_request->DevHandle =
5706 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
5707 		ioc->put_smid_scsi_io(ioc, smid,
5708 		    sas_device_priv_data->sas_target->handle);
5709 		return 0;
5710 	}
5711 	/* turning off TLR */
5712 	scsi_state = mpi_reply->SCSIState;
5713 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5714 		response_code =
5715 		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5716 	if (!sas_device_priv_data->tlr_snoop_check) {
5717 		sas_device_priv_data->tlr_snoop_check++;
5718 		if ((!ioc->is_warpdrive &&
5719 		    !scsih_is_raid(&scmd->device->sdev_gendev) &&
5720 		    !scsih_is_nvme(&scmd->device->sdev_gendev))
5721 		    && sas_is_tlr_enabled(scmd->device) &&
5722 		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5723 			sas_disable_tlr(scmd->device);
5724 			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5725 		}
5726 	}
5727 
5728 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5729 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5730 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5731 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
5732 	else
5733 		log_info = 0;
5734 	ioc_status &= MPI2_IOCSTATUS_MASK;
5735 	scsi_status = mpi_reply->SCSIStatus;
5736 
5737 	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5738 	    (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5739 	     scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5740 	     scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5741 		ioc_status = MPI2_IOCSTATUS_SUCCESS;
5742 	}
5743 
5744 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5745 		struct sense_info data;
5746 		const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5747 		    smid);
5748 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5749 		    le32_to_cpu(mpi_reply->SenseCount));
5750 		memcpy(scmd->sense_buffer, sense_data, sz);
5751 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5752 		/* failure prediction threshold exceeded */
5753 		if (data.asc == 0x5D)
5754 			_scsih_smart_predicted_fault(ioc,
5755 			    le16_to_cpu(mpi_reply->DevHandle));
5756 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5757 
5758 		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5759 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5760 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5761 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5762 			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5763 	}
5764 	switch (ioc_status) {
5765 	case MPI2_IOCSTATUS_BUSY:
5766 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5767 		scmd->result = SAM_STAT_BUSY;
5768 		break;
5769 
5770 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5771 		scmd->result = DID_NO_CONNECT << 16;
5772 		break;
5773 
5774 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5775 		if (sas_device_priv_data->block) {
5776 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5777 			goto out;
5778 		}
5779 		if (log_info == 0x31110630) {
5780 			if (scmd->retries > 2) {
5781 				scmd->result = DID_NO_CONNECT << 16;
5782 				scsi_device_set_state(scmd->device,
5783 				    SDEV_OFFLINE);
5784 			} else {
5785 				scmd->result = DID_SOFT_ERROR << 16;
5786 				scmd->device->expecting_cc_ua = 1;
5787 			}
5788 			break;
5789 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5790 			scmd->result = DID_RESET << 16;
5791 			break;
5792 		} else if ((scmd->device->channel == RAID_CHANNEL) &&
5793 		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5794 		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5795 			scmd->result = DID_RESET << 16;
5796 			break;
5797 		}
5798 		scmd->result = DID_SOFT_ERROR << 16;
5799 		break;
5800 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5801 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5802 		scmd->result = DID_RESET << 16;
5803 		break;
5804 
5805 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5806 		if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5807 			scmd->result = DID_SOFT_ERROR << 16;
5808 		else
5809 			scmd->result = (DID_OK << 16) | scsi_status;
5810 		break;
5811 
5812 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5813 		scmd->result = (DID_OK << 16) | scsi_status;
5814 
5815 		if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5816 			break;
5817 
5818 		if (xfer_cnt < scmd->underflow) {
5819 			if (scsi_status == SAM_STAT_BUSY)
5820 				scmd->result = SAM_STAT_BUSY;
5821 			else
5822 				scmd->result = DID_SOFT_ERROR << 16;
5823 		} else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5824 		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
5825 			scmd->result = DID_SOFT_ERROR << 16;
5826 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5827 			scmd->result = DID_RESET << 16;
5828 		else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5829 			mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5830 			mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5831 			scmd->result = (DRIVER_SENSE << 24) |
5832 			    SAM_STAT_CHECK_CONDITION;
5833 			scmd->sense_buffer[0] = 0x70;
5834 			scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5835 			scmd->sense_buffer[12] = 0x20;
5836 			scmd->sense_buffer[13] = 0;
5837 		}
5838 		break;
5839 
5840 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5841 		scsi_set_resid(scmd, 0);
5842 		fallthrough;
5843 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5844 	case MPI2_IOCSTATUS_SUCCESS:
5845 		scmd->result = (DID_OK << 16) | scsi_status;
5846 		if (response_code ==
5847 		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5848 		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5849 		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5850 			scmd->result = DID_SOFT_ERROR << 16;
5851 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5852 			scmd->result = DID_RESET << 16;
5853 		break;
5854 
5855 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5856 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5857 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5858 		_scsih_eedp_error_handling(scmd, ioc_status);
5859 		break;
5860 
5861 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5862 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5863 	case MPI2_IOCSTATUS_INVALID_SGL:
5864 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
5865 	case MPI2_IOCSTATUS_INVALID_FIELD:
5866 	case MPI2_IOCSTATUS_INVALID_STATE:
5867 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5868 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5869 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5870 	default:
5871 		scmd->result = DID_SOFT_ERROR << 16;
5872 		break;
5873 
5874 	}
5875 
5876 	if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5877 		_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5878 
5879  out:
5880 
5881 	scsi_dma_unmap(scmd);
5882 	mpt3sas_base_free_smid(ioc, smid);
5883 	scmd->scsi_done(scmd);
5884 	return 0;
5885 }
5886 
5887 /**
5888  * _scsih_update_vphys_after_reset - update the Port's
5889  *			vphys_list after reset
5890  * @ioc: per adapter object
5891  *
5892  * Returns nothing.
5893  */
5894 static void
5895 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5896 {
5897 	u16 sz, ioc_status;
5898 	int i;
5899 	Mpi2ConfigReply_t mpi_reply;
5900 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5901 	u16 attached_handle;
5902 	u64 attached_sas_addr;
5903 	u8 found = 0, port_id;
5904 	Mpi2SasPhyPage0_t phy_pg0;
5905 	struct hba_port *port, *port_next, *mport;
5906 	struct virtual_phy *vphy, *vphy_next;
5907 	struct _sas_device *sas_device;
5908 
5909 	/*
5910 	 * Mark all the vphys objects as dirty.
5911 	 */
5912 	list_for_each_entry_safe(port, port_next,
5913 	    &ioc->port_table_list, list) {
5914 		if (!port->vphys_mask)
5915 			continue;
5916 		list_for_each_entry_safe(vphy, vphy_next,
5917 		    &port->vphys_list, list) {
5918 			vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5919 		}
5920 	}
5921 
5922 	/*
5923 	 * Read SASIOUnitPage0 to get each HBA Phy's data.
5924 	 */
5925 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
5926 	    (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
5927 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5928 	if (!sas_iounit_pg0) {
5929 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5930 		    __FILE__, __LINE__, __func__);
5931 		return;
5932 	}
5933 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5934 	    sas_iounit_pg0, sz)) != 0)
5935 		goto out;
5936 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5937 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5938 		goto out;
5939 	/*
5940 	 * Loop over each HBA Phy.
5941 	 */
5942 	for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5943 		/*
5944 		 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5945 		 */
5946 		if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5947 		    MPI2_SAS_NEG_LINK_RATE_1_5)
5948 			continue;
5949 		/*
5950 		 * Check whether Phy is connected to SEP device or not,
5951 		 * if it is SEP device then read the Phy's SASPHYPage0 data to
5952 		 * determine whether Phy is a virtual Phy or not. if it is
5953 		 * virtual phy then it is conformed that the attached remote
5954 		 * device is a HBA's vSES device.
5955 		 */
5956 		if (!(le32_to_cpu(
5957 		    sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
5958 		    MPI2_SAS_DEVICE_INFO_SEP))
5959 			continue;
5960 
5961 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5962 		    i))) {
5963 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5964 			    __FILE__, __LINE__, __func__);
5965 			continue;
5966 		}
5967 
5968 		if (!(le32_to_cpu(phy_pg0.PhyInfo) &
5969 		    MPI2_SAS_PHYINFO_VIRTUAL_PHY))
5970 			continue;
5971 		/*
5972 		 * Get the vSES device's SAS Address.
5973 		 */
5974 		attached_handle = le16_to_cpu(
5975 		    sas_iounit_pg0->PhyData[i].AttachedDevHandle);
5976 		if (_scsih_get_sas_address(ioc, attached_handle,
5977 		    &attached_sas_addr) != 0) {
5978 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5979 			    __FILE__, __LINE__, __func__);
5980 			continue;
5981 		}
5982 
5983 		found = 0;
5984 		port = port_next = NULL;
5985 		/*
5986 		 * Loop over each virtual_phy object from
5987 		 * each port's vphys_list.
5988 		 */
5989 		list_for_each_entry_safe(port,
5990 		    port_next, &ioc->port_table_list, list) {
5991 			if (!port->vphys_mask)
5992 				continue;
5993 			list_for_each_entry_safe(vphy, vphy_next,
5994 			    &port->vphys_list, list) {
5995 				/*
5996 				 * Continue with next virtual_phy object
5997 				 * if the object is not marked as dirty.
5998 				 */
5999 				if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6000 					continue;
6001 
6002 				/*
6003 				 * Continue with next virtual_phy object
6004 				 * if the object's SAS Address is not equals
6005 				 * to current Phy's vSES device SAS Address.
6006 				 */
6007 				if (vphy->sas_address != attached_sas_addr)
6008 					continue;
6009 				/*
6010 				 * Enable current Phy number bit in object's
6011 				 * phy_mask field.
6012 				 */
6013 				if (!(vphy->phy_mask & (1 << i)))
6014 					vphy->phy_mask = (1 << i);
6015 				/*
6016 				 * Get hba_port object from hba_port table
6017 				 * corresponding to current phy's Port ID.
6018 				 * if there is no hba_port object corresponding
6019 				 * to Phy's Port ID then create a new hba_port
6020 				 * object & add to hba_port table.
6021 				 */
6022 				port_id = sas_iounit_pg0->PhyData[i].Port;
6023 				mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6024 				if (!mport) {
6025 					mport = kzalloc(
6026 					    sizeof(struct hba_port), GFP_KERNEL);
6027 					if (!mport)
6028 						break;
6029 					mport->port_id = port_id;
6030 					ioc_info(ioc,
6031 					    "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6032 					    __func__, mport, mport->port_id);
6033 					list_add_tail(&mport->list,
6034 						&ioc->port_table_list);
6035 				}
6036 				/*
6037 				 * If mport & port pointers are not pointing to
6038 				 * same hba_port object then it means that vSES
6039 				 * device's Port ID got changed after reset and
6040 				 * hence move current virtual_phy object from
6041 				 * port's vphys_list to mport's vphys_list.
6042 				 */
6043 				if (port != mport) {
6044 					if (!mport->vphys_mask)
6045 						INIT_LIST_HEAD(
6046 						    &mport->vphys_list);
6047 					mport->vphys_mask |= (1 << i);
6048 					port->vphys_mask &= ~(1 << i);
6049 					list_move(&vphy->list,
6050 					    &mport->vphys_list);
6051 					sas_device = mpt3sas_get_sdev_by_addr(
6052 					    ioc, attached_sas_addr, port);
6053 					if (sas_device)
6054 						sas_device->port = mport;
6055 				}
6056 				/*
6057 				 * Earlier while updating the hba_port table,
6058 				 * it is determined that there is no other
6059 				 * direct attached device with mport's Port ID,
6060 				 * Hence mport was marked as dirty. Only vSES
6061 				 * device has this Port ID, so unmark the mport
6062 				 * as dirt.
6063 				 */
6064 				if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6065 					mport->sas_address = 0;
6066 					mport->phy_mask = 0;
6067 					mport->flags &=
6068 					    ~HBA_PORT_FLAG_DIRTY_PORT;
6069 				}
6070 				/*
6071 				 * Unmark current virtual_phy object as dirty.
6072 				 */
6073 				vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6074 				found = 1;
6075 				break;
6076 			}
6077 			if (found)
6078 				break;
6079 		}
6080 	}
6081 out:
6082 	kfree(sas_iounit_pg0);
6083 }
6084 
6085 /**
6086  * _scsih_get_port_table_after_reset - Construct temporary port table
6087  * @ioc: per adapter object
6088  * @port_table: address where port table needs to be constructed
6089  *
6090  * return number of HBA port entries available after reset.
6091  */
6092 static int
6093 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6094 	struct hba_port *port_table)
6095 {
6096 	u16 sz, ioc_status;
6097 	int i, j;
6098 	Mpi2ConfigReply_t mpi_reply;
6099 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6100 	u16 attached_handle;
6101 	u64 attached_sas_addr;
6102 	u8 found = 0, port_count = 0, port_id;
6103 
6104 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6105 	    * sizeof(Mpi2SasIOUnit0PhyData_t));
6106 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6107 	if (!sas_iounit_pg0) {
6108 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6109 		    __FILE__, __LINE__, __func__);
6110 		return port_count;
6111 	}
6112 
6113 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6114 	    sas_iounit_pg0, sz)) != 0)
6115 		goto out;
6116 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6117 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6118 		goto out;
6119 	for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6120 		found = 0;
6121 		if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6122 		    MPI2_SAS_NEG_LINK_RATE_1_5)
6123 			continue;
6124 		attached_handle =
6125 		    le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6126 		if (_scsih_get_sas_address(
6127 		    ioc, attached_handle, &attached_sas_addr) != 0) {
6128 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6129 			    __FILE__, __LINE__, __func__);
6130 			continue;
6131 		}
6132 
6133 		for (j = 0; j < port_count; j++) {
6134 			port_id = sas_iounit_pg0->PhyData[i].Port;
6135 			if (port_table[j].port_id == port_id &&
6136 			    port_table[j].sas_address == attached_sas_addr) {
6137 				port_table[j].phy_mask |= (1 << i);
6138 				found = 1;
6139 				break;
6140 			}
6141 		}
6142 
6143 		if (found)
6144 			continue;
6145 
6146 		port_id = sas_iounit_pg0->PhyData[i].Port;
6147 		port_table[port_count].port_id = port_id;
6148 		port_table[port_count].phy_mask = (1 << i);
6149 		port_table[port_count].sas_address = attached_sas_addr;
6150 		port_count++;
6151 	}
6152 out:
6153 	kfree(sas_iounit_pg0);
6154 	return port_count;
6155 }
6156 
6157 enum hba_port_matched_codes {
6158 	NOT_MATCHED = 0,
6159 	MATCHED_WITH_ADDR_AND_PHYMASK,
6160 	MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6161 	MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6162 	MATCHED_WITH_ADDR,
6163 };
6164 
6165 /**
6166  * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6167  *					from HBA port table
6168  * @ioc: per adapter object
6169  * @port_entry - hba port entry from temporary port table which needs to be
6170  *		searched for matched entry in the HBA port table
6171  * @matched_port_entry - save matched hba port entry here
6172  * @count - count of matched entries
6173  *
6174  * return type of matched entry found.
6175  */
6176 static enum hba_port_matched_codes
6177 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6178 	struct hba_port *port_entry,
6179 	struct hba_port **matched_port_entry, int *count)
6180 {
6181 	struct hba_port *port_table_entry, *matched_port = NULL;
6182 	enum hba_port_matched_codes matched_code = NOT_MATCHED;
6183 	int lcount = 0;
6184 	*matched_port_entry = NULL;
6185 
6186 	list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6187 		if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6188 			continue;
6189 
6190 		if ((port_table_entry->sas_address == port_entry->sas_address)
6191 		    && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6192 			matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6193 			matched_port = port_table_entry;
6194 			break;
6195 		}
6196 
6197 		if ((port_table_entry->sas_address == port_entry->sas_address)
6198 		    && (port_table_entry->phy_mask & port_entry->phy_mask)
6199 		    && (port_table_entry->port_id == port_entry->port_id)) {
6200 			matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6201 			matched_port = port_table_entry;
6202 			continue;
6203 		}
6204 
6205 		if ((port_table_entry->sas_address == port_entry->sas_address)
6206 		    && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6207 			if (matched_code ==
6208 			    MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6209 				continue;
6210 			matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6211 			matched_port = port_table_entry;
6212 			continue;
6213 		}
6214 
6215 		if (port_table_entry->sas_address == port_entry->sas_address) {
6216 			if (matched_code ==
6217 			    MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6218 				continue;
6219 			if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6220 				continue;
6221 			matched_code = MATCHED_WITH_ADDR;
6222 			matched_port = port_table_entry;
6223 			lcount++;
6224 		}
6225 	}
6226 
6227 	*matched_port_entry = matched_port;
6228 	if (matched_code ==  MATCHED_WITH_ADDR)
6229 		*count = lcount;
6230 	return matched_code;
6231 }
6232 
6233 /**
6234  * _scsih_del_phy_part_of_anther_port - remove phy if it
6235  *				is a part of anther port
6236  *@ioc: per adapter object
6237  *@port_table: port table after reset
6238  *@index: hba port entry index
6239  *@port_count: number of ports available after host reset
6240  *@offset: HBA phy bit offset
6241  *
6242  */
6243 static void
6244 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6245 	struct hba_port *port_table,
6246 	int index, u8 port_count, int offset)
6247 {
6248 	struct _sas_node *sas_node = &ioc->sas_hba;
6249 	u32 i, found = 0;
6250 
6251 	for (i = 0; i < port_count; i++) {
6252 		if (i == index)
6253 			continue;
6254 
6255 		if (port_table[i].phy_mask & (1 << offset)) {
6256 			mpt3sas_transport_del_phy_from_an_existing_port(
6257 			    ioc, sas_node, &sas_node->phy[offset]);
6258 			found = 1;
6259 			break;
6260 		}
6261 	}
6262 	if (!found)
6263 		port_table[index].phy_mask |= (1 << offset);
6264 }
6265 
6266 /**
6267  * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6268  *						right port
6269  *@ioc: per adapter object
6270  *@hba_port_entry: hba port table entry
6271  *@port_table: temporary port table
6272  *@index: hba port entry index
6273  *@port_count: number of ports available after host reset
6274  *
6275  */
6276 static void
6277 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6278 	struct hba_port *hba_port_entry, struct hba_port *port_table,
6279 	int index, int port_count)
6280 {
6281 	u32 phy_mask, offset = 0;
6282 	struct _sas_node *sas_node = &ioc->sas_hba;
6283 
6284 	phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6285 
6286 	for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6287 		if (phy_mask & (1 << offset)) {
6288 			if (!(port_table[index].phy_mask & (1 << offset))) {
6289 				_scsih_del_phy_part_of_anther_port(
6290 				    ioc, port_table, index, port_count,
6291 				    offset);
6292 				continue;
6293 			}
6294 			if (sas_node->phy[offset].phy_belongs_to_port)
6295 				mpt3sas_transport_del_phy_from_an_existing_port(
6296 				    ioc, sas_node, &sas_node->phy[offset]);
6297 			mpt3sas_transport_add_phy_to_an_existing_port(
6298 			    ioc, sas_node, &sas_node->phy[offset],
6299 			    hba_port_entry->sas_address,
6300 			    hba_port_entry);
6301 		}
6302 	}
6303 }
6304 
6305 /**
6306  * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6307  * @ioc: per adapter object
6308  *
6309  * Returns nothing.
6310  */
6311 static void
6312 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6313 {
6314 	struct hba_port *port, *port_next;
6315 	struct virtual_phy *vphy, *vphy_next;
6316 
6317 	list_for_each_entry_safe(port, port_next,
6318 	    &ioc->port_table_list, list) {
6319 		if (!port->vphys_mask)
6320 			continue;
6321 		list_for_each_entry_safe(vphy, vphy_next,
6322 		    &port->vphys_list, list) {
6323 			if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6324 				drsprintk(ioc, ioc_info(ioc,
6325 				    "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6326 				    vphy, port->port_id,
6327 				    vphy->phy_mask));
6328 				port->vphys_mask &= ~vphy->phy_mask;
6329 				list_del(&vphy->list);
6330 				kfree(vphy);
6331 			}
6332 		}
6333 		if (!port->vphys_mask && !port->sas_address)
6334 			port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6335 	}
6336 }
6337 
6338 /**
6339  * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6340  *					after host reset
6341  *@ioc: per adapter object
6342  *
6343  */
6344 static void
6345 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6346 {
6347 	struct hba_port *port, *port_next;
6348 
6349 	list_for_each_entry_safe(port, port_next,
6350 	    &ioc->port_table_list, list) {
6351 		if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6352 		    port->flags & HBA_PORT_FLAG_NEW_PORT)
6353 			continue;
6354 
6355 		drsprintk(ioc, ioc_info(ioc,
6356 		    "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6357 		    port, port->port_id, port->phy_mask));
6358 		list_del(&port->list);
6359 		kfree(port);
6360 	}
6361 }
6362 
6363 /**
6364  * _scsih_sas_port_refresh - Update HBA port table after host reset
6365  * @ioc: per adapter object
6366  */
6367 static void
6368 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6369 {
6370 	u32 port_count = 0;
6371 	struct hba_port *port_table;
6372 	struct hba_port *port_table_entry;
6373 	struct hba_port *port_entry = NULL;
6374 	int i, j, count = 0, lcount = 0;
6375 	int ret;
6376 	u64 sas_addr;
6377 
6378 	drsprintk(ioc, ioc_info(ioc,
6379 	    "updating ports for sas_host(0x%016llx)\n",
6380 	    (unsigned long long)ioc->sas_hba.sas_address));
6381 
6382 	port_table = kcalloc(ioc->sas_hba.num_phys,
6383 	    sizeof(struct hba_port), GFP_KERNEL);
6384 	if (!port_table)
6385 		return;
6386 
6387 	port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6388 	if (!port_count)
6389 		return;
6390 
6391 	drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6392 	for (j = 0; j < port_count; j++)
6393 		drsprintk(ioc, ioc_info(ioc,
6394 		    "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6395 		    port_table[j].port_id,
6396 		    port_table[j].phy_mask, port_table[j].sas_address));
6397 
6398 	list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6399 		port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6400 
6401 	drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6402 	port_table_entry = NULL;
6403 	list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6404 		drsprintk(ioc, ioc_info(ioc,
6405 		    "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6406 		    port_table_entry->port_id,
6407 		    port_table_entry->phy_mask,
6408 		    port_table_entry->sas_address));
6409 	}
6410 
6411 	for (j = 0; j < port_count; j++) {
6412 		ret = _scsih_look_and_get_matched_port_entry(ioc,
6413 		    &port_table[j], &port_entry, &count);
6414 		if (!port_entry) {
6415 			drsprintk(ioc, ioc_info(ioc,
6416 			    "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6417 			    port_table[j].sas_address,
6418 			    port_table[j].port_id));
6419 			continue;
6420 		}
6421 
6422 		switch (ret) {
6423 		case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6424 		case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6425 			_scsih_add_or_del_phys_from_existing_port(ioc,
6426 			    port_entry, port_table, j, port_count);
6427 			break;
6428 		case MATCHED_WITH_ADDR:
6429 			sas_addr = port_table[j].sas_address;
6430 			for (i = 0; i < port_count; i++) {
6431 				if (port_table[i].sas_address == sas_addr)
6432 					lcount++;
6433 			}
6434 
6435 			if (count > 1 || lcount > 1)
6436 				port_entry = NULL;
6437 			else
6438 				_scsih_add_or_del_phys_from_existing_port(ioc,
6439 				    port_entry, port_table, j, port_count);
6440 		}
6441 
6442 		if (!port_entry)
6443 			continue;
6444 
6445 		if (port_entry->port_id != port_table[j].port_id)
6446 			port_entry->port_id = port_table[j].port_id;
6447 		port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6448 		port_entry->phy_mask = port_table[j].phy_mask;
6449 	}
6450 
6451 	port_table_entry = NULL;
6452 }
6453 
6454 /**
6455  * _scsih_alloc_vphy - allocate virtual_phy object
6456  * @ioc: per adapter object
6457  * @port_id: Port ID number
6458  * @phy_num: HBA Phy number
6459  *
6460  * Returns allocated virtual_phy object.
6461  */
6462 static struct virtual_phy *
6463 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6464 {
6465 	struct virtual_phy *vphy;
6466 	struct hba_port *port;
6467 
6468 	port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6469 	if (!port)
6470 		return NULL;
6471 
6472 	vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6473 	if (!vphy) {
6474 		vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6475 		if (!vphy)
6476 			return NULL;
6477 
6478 		/*
6479 		 * Enable bit corresponding to HBA phy number on its
6480 		 * parent hba_port object's vphys_mask field.
6481 		 */
6482 		port->vphys_mask |= (1 << phy_num);
6483 		vphy->phy_mask |= (1 << phy_num);
6484 
6485 		INIT_LIST_HEAD(&port->vphys_list);
6486 		list_add_tail(&vphy->list, &port->vphys_list);
6487 
6488 		ioc_info(ioc,
6489 		    "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6490 		    vphy, port->port_id, phy_num);
6491 	}
6492 	return vphy;
6493 }
6494 
6495 /**
6496  * _scsih_sas_host_refresh - refreshing sas host object contents
6497  * @ioc: per adapter object
6498  * Context: user
6499  *
6500  * During port enable, fw will send topology events for every device. Its
6501  * possible that the handles may change from the previous setting, so this
6502  * code keeping handles updating if changed.
6503  */
6504 static void
6505 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6506 {
6507 	u16 sz;
6508 	u16 ioc_status;
6509 	int i;
6510 	Mpi2ConfigReply_t mpi_reply;
6511 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6512 	u16 attached_handle;
6513 	u8 link_rate, port_id;
6514 	struct hba_port *port;
6515 	Mpi2SasPhyPage0_t phy_pg0;
6516 
6517 	dtmprintk(ioc,
6518 		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6519 			   (u64)ioc->sas_hba.sas_address));
6520 
6521 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6522 	    * sizeof(Mpi2SasIOUnit0PhyData_t));
6523 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6524 	if (!sas_iounit_pg0) {
6525 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6526 			__FILE__, __LINE__, __func__);
6527 		return;
6528 	}
6529 
6530 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6531 	    sas_iounit_pg0, sz)) != 0)
6532 		goto out;
6533 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6534 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6535 		goto out;
6536 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6537 		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6538 		if (i == 0)
6539 			ioc->sas_hba.handle = le16_to_cpu(
6540 			    sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6541 		port_id = sas_iounit_pg0->PhyData[i].Port;
6542 		if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6543 			port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6544 			if (!port)
6545 				goto out;
6546 
6547 			port->port_id = port_id;
6548 			ioc_info(ioc,
6549 			    "hba_port entry: %p, port: %d is added to hba_port list\n",
6550 			    port, port->port_id);
6551 			if (ioc->shost_recovery)
6552 				port->flags = HBA_PORT_FLAG_NEW_PORT;
6553 			list_add_tail(&port->list, &ioc->port_table_list);
6554 		}
6555 		/*
6556 		 * Check whether current Phy belongs to HBA vSES device or not.
6557 		 */
6558 		if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6559 		    MPI2_SAS_DEVICE_INFO_SEP &&
6560 		    (link_rate >=  MPI2_SAS_NEG_LINK_RATE_1_5)) {
6561 			if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6562 			    &phy_pg0, i))) {
6563 				ioc_err(ioc,
6564 				    "failure at %s:%d/%s()!\n",
6565 				     __FILE__, __LINE__, __func__);
6566 				goto out;
6567 			}
6568 			if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6569 			    MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6570 				continue;
6571 			/*
6572 			 * Allocate a virtual_phy object for vSES device, if
6573 			 * this vSES device is hot added.
6574 			 */
6575 			if (!_scsih_alloc_vphy(ioc, port_id, i))
6576 				goto out;
6577 			ioc->sas_hba.phy[i].hba_vphy = 1;
6578 		}
6579 
6580 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6581 		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6582 		    AttachedDevHandle);
6583 		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6584 			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6585 		ioc->sas_hba.phy[i].port =
6586 		    mpt3sas_get_port_by_id(ioc, port_id, 0);
6587 		mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6588 		    attached_handle, i, link_rate,
6589 		    ioc->sas_hba.phy[i].port);
6590 	}
6591  out:
6592 	kfree(sas_iounit_pg0);
6593 }
6594 
6595 /**
6596  * _scsih_sas_host_add - create sas host object
6597  * @ioc: per adapter object
6598  *
6599  * Creating host side data object, stored in ioc->sas_hba
6600  */
6601 static void
6602 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6603 {
6604 	int i;
6605 	Mpi2ConfigReply_t mpi_reply;
6606 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6607 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6608 	Mpi2SasPhyPage0_t phy_pg0;
6609 	Mpi2SasDevicePage0_t sas_device_pg0;
6610 	Mpi2SasEnclosurePage0_t enclosure_pg0;
6611 	u16 ioc_status;
6612 	u16 sz;
6613 	u8 device_missing_delay;
6614 	u8 num_phys, port_id;
6615 	struct hba_port *port;
6616 
6617 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6618 	if (!num_phys) {
6619 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6620 			__FILE__, __LINE__, __func__);
6621 		return;
6622 	}
6623 	ioc->sas_hba.phy = kcalloc(num_phys,
6624 	    sizeof(struct _sas_phy), GFP_KERNEL);
6625 	if (!ioc->sas_hba.phy) {
6626 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6627 			__FILE__, __LINE__, __func__);
6628 		goto out;
6629 	}
6630 	ioc->sas_hba.num_phys = num_phys;
6631 
6632 	/* sas_iounit page 0 */
6633 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
6634 	    sizeof(Mpi2SasIOUnit0PhyData_t));
6635 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6636 	if (!sas_iounit_pg0) {
6637 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6638 			__FILE__, __LINE__, __func__);
6639 		return;
6640 	}
6641 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6642 	    sas_iounit_pg0, sz))) {
6643 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6644 			__FILE__, __LINE__, __func__);
6645 		goto out;
6646 	}
6647 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6648 	    MPI2_IOCSTATUS_MASK;
6649 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6650 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6651 			__FILE__, __LINE__, __func__);
6652 		goto out;
6653 	}
6654 
6655 	/* sas_iounit page 1 */
6656 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
6657 	    sizeof(Mpi2SasIOUnit1PhyData_t));
6658 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6659 	if (!sas_iounit_pg1) {
6660 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6661 			__FILE__, __LINE__, __func__);
6662 		goto out;
6663 	}
6664 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6665 	    sas_iounit_pg1, sz))) {
6666 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6667 			__FILE__, __LINE__, __func__);
6668 		goto out;
6669 	}
6670 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6671 	    MPI2_IOCSTATUS_MASK;
6672 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6673 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6674 			__FILE__, __LINE__, __func__);
6675 		goto out;
6676 	}
6677 
6678 	ioc->io_missing_delay =
6679 	    sas_iounit_pg1->IODeviceMissingDelay;
6680 	device_missing_delay =
6681 	    sas_iounit_pg1->ReportDeviceMissingDelay;
6682 	if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6683 		ioc->device_missing_delay = (device_missing_delay &
6684 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6685 	else
6686 		ioc->device_missing_delay = device_missing_delay &
6687 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6688 
6689 	ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6690 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6691 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6692 		    i))) {
6693 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6694 				__FILE__, __LINE__, __func__);
6695 			goto out;
6696 		}
6697 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6698 		    MPI2_IOCSTATUS_MASK;
6699 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6700 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6701 				__FILE__, __LINE__, __func__);
6702 			goto out;
6703 		}
6704 
6705 		if (i == 0)
6706 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6707 			    PhyData[0].ControllerDevHandle);
6708 
6709 		port_id = sas_iounit_pg0->PhyData[i].Port;
6710 		if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6711 			port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6712 			if (!port)
6713 				goto out;
6714 
6715 			port->port_id = port_id;
6716 			ioc_info(ioc,
6717 			   "hba_port entry: %p, port: %d is added to hba_port list\n",
6718 			   port, port->port_id);
6719 			list_add_tail(&port->list,
6720 			    &ioc->port_table_list);
6721 		}
6722 
6723 		/*
6724 		 * Check whether current Phy belongs to HBA vSES device or not.
6725 		 */
6726 		if ((le32_to_cpu(phy_pg0.PhyInfo) &
6727 		    MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6728 		    (phy_pg0.NegotiatedLinkRate >> 4) >=
6729 		    MPI2_SAS_NEG_LINK_RATE_1_5) {
6730 			/*
6731 			 * Allocate a virtual_phy object for vSES device.
6732 			 */
6733 			if (!_scsih_alloc_vphy(ioc, port_id, i))
6734 				goto out;
6735 			ioc->sas_hba.phy[i].hba_vphy = 1;
6736 		}
6737 
6738 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6739 		ioc->sas_hba.phy[i].phy_id = i;
6740 		ioc->sas_hba.phy[i].port =
6741 		    mpt3sas_get_port_by_id(ioc, port_id, 0);
6742 		mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6743 		    phy_pg0, ioc->sas_hba.parent_dev);
6744 	}
6745 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6746 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6747 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6748 			__FILE__, __LINE__, __func__);
6749 		goto out;
6750 	}
6751 	ioc->sas_hba.enclosure_handle =
6752 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
6753 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6754 	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6755 		 ioc->sas_hba.handle,
6756 		 (u64)ioc->sas_hba.sas_address,
6757 		 ioc->sas_hba.num_phys);
6758 
6759 	if (ioc->sas_hba.enclosure_handle) {
6760 		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6761 		    &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6762 		   ioc->sas_hba.enclosure_handle)))
6763 			ioc->sas_hba.enclosure_logical_id =
6764 			    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6765 	}
6766 
6767  out:
6768 	kfree(sas_iounit_pg1);
6769 	kfree(sas_iounit_pg0);
6770 }
6771 
6772 /**
6773  * _scsih_expander_add -  creating expander object
6774  * @ioc: per adapter object
6775  * @handle: expander handle
6776  *
6777  * Creating expander object, stored in ioc->sas_expander_list.
6778  *
6779  * Return: 0 for success, else error.
6780  */
6781 static int
6782 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6783 {
6784 	struct _sas_node *sas_expander;
6785 	struct _enclosure_node *enclosure_dev;
6786 	Mpi2ConfigReply_t mpi_reply;
6787 	Mpi2ExpanderPage0_t expander_pg0;
6788 	Mpi2ExpanderPage1_t expander_pg1;
6789 	u32 ioc_status;
6790 	u16 parent_handle;
6791 	u64 sas_address, sas_address_parent = 0;
6792 	int i;
6793 	unsigned long flags;
6794 	struct _sas_port *mpt3sas_port = NULL;
6795 	u8 port_id;
6796 
6797 	int rc = 0;
6798 
6799 	if (!handle)
6800 		return -1;
6801 
6802 	if (ioc->shost_recovery || ioc->pci_error_recovery)
6803 		return -1;
6804 
6805 	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6806 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6807 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6808 			__FILE__, __LINE__, __func__);
6809 		return -1;
6810 	}
6811 
6812 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6813 	    MPI2_IOCSTATUS_MASK;
6814 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6815 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6816 			__FILE__, __LINE__, __func__);
6817 		return -1;
6818 	}
6819 
6820 	/* handle out of order topology events */
6821 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6822 	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6823 	    != 0) {
6824 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6825 			__FILE__, __LINE__, __func__);
6826 		return -1;
6827 	}
6828 
6829 	port_id = expander_pg0.PhysicalPort;
6830 	if (sas_address_parent != ioc->sas_hba.sas_address) {
6831 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
6832 		sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6833 		    sas_address_parent,
6834 		    mpt3sas_get_port_by_id(ioc, port_id, 0));
6835 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6836 		if (!sas_expander) {
6837 			rc = _scsih_expander_add(ioc, parent_handle);
6838 			if (rc != 0)
6839 				return rc;
6840 		}
6841 	}
6842 
6843 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6844 	sas_address = le64_to_cpu(expander_pg0.SASAddress);
6845 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6846 	    sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6847 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6848 
6849 	if (sas_expander)
6850 		return 0;
6851 
6852 	sas_expander = kzalloc(sizeof(struct _sas_node),
6853 	    GFP_KERNEL);
6854 	if (!sas_expander) {
6855 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6856 			__FILE__, __LINE__, __func__);
6857 		return -1;
6858 	}
6859 
6860 	sas_expander->handle = handle;
6861 	sas_expander->num_phys = expander_pg0.NumPhys;
6862 	sas_expander->sas_address_parent = sas_address_parent;
6863 	sas_expander->sas_address = sas_address;
6864 	sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6865 	if (!sas_expander->port) {
6866 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6867 		    __FILE__, __LINE__, __func__);
6868 		rc = -1;
6869 		goto out_fail;
6870 	}
6871 
6872 	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6873 		 handle, parent_handle,
6874 		 (u64)sas_expander->sas_address, sas_expander->num_phys);
6875 
6876 	if (!sas_expander->num_phys)
6877 		goto out_fail;
6878 	sas_expander->phy = kcalloc(sas_expander->num_phys,
6879 	    sizeof(struct _sas_phy), GFP_KERNEL);
6880 	if (!sas_expander->phy) {
6881 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6882 			__FILE__, __LINE__, __func__);
6883 		rc = -1;
6884 		goto out_fail;
6885 	}
6886 
6887 	INIT_LIST_HEAD(&sas_expander->sas_port_list);
6888 	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6889 	    sas_address_parent, sas_expander->port);
6890 	if (!mpt3sas_port) {
6891 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6892 			__FILE__, __LINE__, __func__);
6893 		rc = -1;
6894 		goto out_fail;
6895 	}
6896 	sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6897 	sas_expander->rphy = mpt3sas_port->rphy;
6898 
6899 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
6900 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6901 		    &expander_pg1, i, handle))) {
6902 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6903 				__FILE__, __LINE__, __func__);
6904 			rc = -1;
6905 			goto out_fail;
6906 		}
6907 		sas_expander->phy[i].handle = handle;
6908 		sas_expander->phy[i].phy_id = i;
6909 		sas_expander->phy[i].port =
6910 		    mpt3sas_get_port_by_id(ioc, port_id, 0);
6911 
6912 		if ((mpt3sas_transport_add_expander_phy(ioc,
6913 		    &sas_expander->phy[i], expander_pg1,
6914 		    sas_expander->parent_dev))) {
6915 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6916 				__FILE__, __LINE__, __func__);
6917 			rc = -1;
6918 			goto out_fail;
6919 		}
6920 	}
6921 
6922 	if (sas_expander->enclosure_handle) {
6923 		enclosure_dev =
6924 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6925 						sas_expander->enclosure_handle);
6926 		if (enclosure_dev)
6927 			sas_expander->enclosure_logical_id =
6928 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6929 	}
6930 
6931 	_scsih_expander_node_add(ioc, sas_expander);
6932 	return 0;
6933 
6934  out_fail:
6935 
6936 	if (mpt3sas_port)
6937 		mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
6938 		    sas_address_parent, sas_expander->port);
6939 	kfree(sas_expander);
6940 	return rc;
6941 }
6942 
6943 /**
6944  * mpt3sas_expander_remove - removing expander object
6945  * @ioc: per adapter object
6946  * @sas_address: expander sas_address
6947  */
6948 void
6949 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
6950 	struct hba_port *port)
6951 {
6952 	struct _sas_node *sas_expander;
6953 	unsigned long flags;
6954 
6955 	if (ioc->shost_recovery)
6956 		return;
6957 
6958 	if (!port)
6959 		return;
6960 
6961 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6962 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6963 	    sas_address, port);
6964 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6965 	if (sas_expander)
6966 		_scsih_expander_node_remove(ioc, sas_expander);
6967 }
6968 
6969 /**
6970  * _scsih_done -  internal SCSI_IO callback handler.
6971  * @ioc: per adapter object
6972  * @smid: system request message index
6973  * @msix_index: MSIX table index supplied by the OS
6974  * @reply: reply message frame(lower 32bit addr)
6975  *
6976  * Callback handler when sending internal generated SCSI_IO.
6977  * The callback index passed is `ioc->scsih_cb_idx`
6978  *
6979  * Return: 1 meaning mf should be freed from _base_interrupt
6980  *         0 means the mf is freed from this function.
6981  */
6982 static u8
6983 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
6984 {
6985 	MPI2DefaultReply_t *mpi_reply;
6986 
6987 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
6988 	if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
6989 		return 1;
6990 	if (ioc->scsih_cmds.smid != smid)
6991 		return 1;
6992 	ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
6993 	if (mpi_reply) {
6994 		memcpy(ioc->scsih_cmds.reply, mpi_reply,
6995 		    mpi_reply->MsgLength*4);
6996 		ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
6997 	}
6998 	ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
6999 	complete(&ioc->scsih_cmds.done);
7000 	return 1;
7001 }
7002 
7003 
7004 
7005 
7006 #define MPT3_MAX_LUNS (255)
7007 
7008 
7009 /**
7010  * _scsih_check_access_status - check access flags
7011  * @ioc: per adapter object
7012  * @sas_address: sas address
7013  * @handle: sas device handle
7014  * @access_status: errors returned during discovery of the device
7015  *
7016  * Return: 0 for success, else failure
7017  */
7018 static u8
7019 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7020 	u16 handle, u8 access_status)
7021 {
7022 	u8 rc = 1;
7023 	char *desc = NULL;
7024 
7025 	switch (access_status) {
7026 	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7027 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7028 		rc = 0;
7029 		break;
7030 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7031 		desc = "sata capability failed";
7032 		break;
7033 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7034 		desc = "sata affiliation conflict";
7035 		break;
7036 	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7037 		desc = "route not addressable";
7038 		break;
7039 	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7040 		desc = "smp error not addressable";
7041 		break;
7042 	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7043 		desc = "device blocked";
7044 		break;
7045 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7046 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7047 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7048 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7049 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7050 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7051 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7052 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7053 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7054 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7055 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7056 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7057 		desc = "sata initialization failed";
7058 		break;
7059 	default:
7060 		desc = "unknown";
7061 		break;
7062 	}
7063 
7064 	if (!rc)
7065 		return 0;
7066 
7067 	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7068 		desc, (u64)sas_address, handle);
7069 	return rc;
7070 }
7071 
7072 /**
7073  * _scsih_check_device - checking device responsiveness
7074  * @ioc: per adapter object
7075  * @parent_sas_address: sas address of parent expander or sas host
7076  * @handle: attached device handle
7077  * @phy_number: phy number
7078  * @link_rate: new link rate
7079  */
7080 static void
7081 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7082 	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7083 {
7084 	Mpi2ConfigReply_t mpi_reply;
7085 	Mpi2SasDevicePage0_t sas_device_pg0;
7086 	struct _sas_device *sas_device = NULL;
7087 	struct _enclosure_node *enclosure_dev = NULL;
7088 	u32 ioc_status;
7089 	unsigned long flags;
7090 	u64 sas_address;
7091 	struct scsi_target *starget;
7092 	struct MPT3SAS_TARGET *sas_target_priv_data;
7093 	u32 device_info;
7094 	struct hba_port *port;
7095 
7096 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7097 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7098 		return;
7099 
7100 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7101 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7102 		return;
7103 
7104 	/* wide port handling ~ we need only handle device once for the phy that
7105 	 * is matched in sas device page zero
7106 	 */
7107 	if (phy_number != sas_device_pg0.PhyNum)
7108 		return;
7109 
7110 	/* check if this is end device */
7111 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7112 	if (!(_scsih_is_end_device(device_info)))
7113 		return;
7114 
7115 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7116 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7117 	port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7118 	if (!port)
7119 		goto out_unlock;
7120 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7121 	    sas_address, port);
7122 
7123 	if (!sas_device)
7124 		goto out_unlock;
7125 
7126 	if (unlikely(sas_device->handle != handle)) {
7127 		starget = sas_device->starget;
7128 		sas_target_priv_data = starget->hostdata;
7129 		starget_printk(KERN_INFO, starget,
7130 			"handle changed from(0x%04x) to (0x%04x)!!!\n",
7131 			sas_device->handle, handle);
7132 		sas_target_priv_data->handle = handle;
7133 		sas_device->handle = handle;
7134 		if (le16_to_cpu(sas_device_pg0.Flags) &
7135 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7136 			sas_device->enclosure_level =
7137 				sas_device_pg0.EnclosureLevel;
7138 			memcpy(sas_device->connector_name,
7139 				sas_device_pg0.ConnectorName, 4);
7140 			sas_device->connector_name[4] = '\0';
7141 		} else {
7142 			sas_device->enclosure_level = 0;
7143 			sas_device->connector_name[0] = '\0';
7144 		}
7145 
7146 		sas_device->enclosure_handle =
7147 				le16_to_cpu(sas_device_pg0.EnclosureHandle);
7148 		sas_device->is_chassis_slot_valid = 0;
7149 		enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7150 						sas_device->enclosure_handle);
7151 		if (enclosure_dev) {
7152 			sas_device->enclosure_logical_id =
7153 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7154 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7155 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7156 				sas_device->is_chassis_slot_valid = 1;
7157 				sas_device->chassis_slot =
7158 					enclosure_dev->pg0.ChassisSlot;
7159 			}
7160 		}
7161 	}
7162 
7163 	/* check if device is present */
7164 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
7165 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7166 		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7167 			handle);
7168 		goto out_unlock;
7169 	}
7170 
7171 	/* check if there were any issues with discovery */
7172 	if (_scsih_check_access_status(ioc, sas_address, handle,
7173 	    sas_device_pg0.AccessStatus))
7174 		goto out_unlock;
7175 
7176 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7177 	_scsih_ublock_io_device(ioc, sas_address, port);
7178 
7179 	if (sas_device)
7180 		sas_device_put(sas_device);
7181 	return;
7182 
7183 out_unlock:
7184 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7185 	if (sas_device)
7186 		sas_device_put(sas_device);
7187 }
7188 
7189 /**
7190  * _scsih_add_device -  creating sas device object
7191  * @ioc: per adapter object
7192  * @handle: sas device handle
7193  * @phy_num: phy number end device attached to
7194  * @is_pd: is this hidden raid component
7195  *
7196  * Creating end device object, stored in ioc->sas_device_list.
7197  *
7198  * Return: 0 for success, non-zero for failure.
7199  */
7200 static int
7201 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7202 	u8 is_pd)
7203 {
7204 	Mpi2ConfigReply_t mpi_reply;
7205 	Mpi2SasDevicePage0_t sas_device_pg0;
7206 	struct _sas_device *sas_device;
7207 	struct _enclosure_node *enclosure_dev = NULL;
7208 	u32 ioc_status;
7209 	u64 sas_address;
7210 	u32 device_info;
7211 	u8 port_id;
7212 
7213 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7214 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7215 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7216 			__FILE__, __LINE__, __func__);
7217 		return -1;
7218 	}
7219 
7220 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7221 	    MPI2_IOCSTATUS_MASK;
7222 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7223 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7224 			__FILE__, __LINE__, __func__);
7225 		return -1;
7226 	}
7227 
7228 	/* check if this is end device */
7229 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7230 	if (!(_scsih_is_end_device(device_info)))
7231 		return -1;
7232 	set_bit(handle, ioc->pend_os_device_add);
7233 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7234 
7235 	/* check if device is present */
7236 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
7237 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7238 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7239 			handle);
7240 		return -1;
7241 	}
7242 
7243 	/* check if there were any issues with discovery */
7244 	if (_scsih_check_access_status(ioc, sas_address, handle,
7245 	    sas_device_pg0.AccessStatus))
7246 		return -1;
7247 
7248 	port_id = sas_device_pg0.PhysicalPort;
7249 	sas_device = mpt3sas_get_sdev_by_addr(ioc,
7250 	    sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7251 	if (sas_device) {
7252 		clear_bit(handle, ioc->pend_os_device_add);
7253 		sas_device_put(sas_device);
7254 		return -1;
7255 	}
7256 
7257 	if (sas_device_pg0.EnclosureHandle) {
7258 		enclosure_dev =
7259 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7260 			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
7261 		if (enclosure_dev == NULL)
7262 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7263 				 sas_device_pg0.EnclosureHandle);
7264 	}
7265 
7266 	sas_device = kzalloc(sizeof(struct _sas_device),
7267 	    GFP_KERNEL);
7268 	if (!sas_device) {
7269 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7270 			__FILE__, __LINE__, __func__);
7271 		return 0;
7272 	}
7273 
7274 	kref_init(&sas_device->refcount);
7275 	sas_device->handle = handle;
7276 	if (_scsih_get_sas_address(ioc,
7277 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
7278 	    &sas_device->sas_address_parent) != 0)
7279 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7280 			__FILE__, __LINE__, __func__);
7281 	sas_device->enclosure_handle =
7282 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
7283 	if (sas_device->enclosure_handle != 0)
7284 		sas_device->slot =
7285 		    le16_to_cpu(sas_device_pg0.Slot);
7286 	sas_device->device_info = device_info;
7287 	sas_device->sas_address = sas_address;
7288 	sas_device->phy = sas_device_pg0.PhyNum;
7289 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7290 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7291 	sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7292 	if (!sas_device->port) {
7293 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7294 		    __FILE__, __LINE__, __func__);
7295 		goto out;
7296 	}
7297 
7298 	if (le16_to_cpu(sas_device_pg0.Flags)
7299 		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7300 		sas_device->enclosure_level =
7301 			sas_device_pg0.EnclosureLevel;
7302 		memcpy(sas_device->connector_name,
7303 			sas_device_pg0.ConnectorName, 4);
7304 		sas_device->connector_name[4] = '\0';
7305 	} else {
7306 		sas_device->enclosure_level = 0;
7307 		sas_device->connector_name[0] = '\0';
7308 	}
7309 	/* get enclosure_logical_id & chassis_slot*/
7310 	sas_device->is_chassis_slot_valid = 0;
7311 	if (enclosure_dev) {
7312 		sas_device->enclosure_logical_id =
7313 		    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7314 		if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7315 		    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7316 			sas_device->is_chassis_slot_valid = 1;
7317 			sas_device->chassis_slot =
7318 					enclosure_dev->pg0.ChassisSlot;
7319 		}
7320 	}
7321 
7322 	/* get device name */
7323 	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7324 
7325 	if (ioc->wait_for_discovery_to_complete)
7326 		_scsih_sas_device_init_add(ioc, sas_device);
7327 	else
7328 		_scsih_sas_device_add(ioc, sas_device);
7329 
7330 out:
7331 	sas_device_put(sas_device);
7332 	return 0;
7333 }
7334 
7335 /**
7336  * _scsih_remove_device -  removing sas device object
7337  * @ioc: per adapter object
7338  * @sas_device: the sas_device object
7339  */
7340 static void
7341 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7342 	struct _sas_device *sas_device)
7343 {
7344 	struct MPT3SAS_TARGET *sas_target_priv_data;
7345 
7346 	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7347 	     (sas_device->pfa_led_on)) {
7348 		_scsih_turn_off_pfa_led(ioc, sas_device);
7349 		sas_device->pfa_led_on = 0;
7350 	}
7351 
7352 	dewtprintk(ioc,
7353 		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7354 			    __func__,
7355 			    sas_device->handle, (u64)sas_device->sas_address));
7356 
7357 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7358 	    NULL, NULL));
7359 
7360 	if (sas_device->starget && sas_device->starget->hostdata) {
7361 		sas_target_priv_data = sas_device->starget->hostdata;
7362 		sas_target_priv_data->deleted = 1;
7363 		_scsih_ublock_io_device(ioc, sas_device->sas_address,
7364 		    sas_device->port);
7365 		sas_target_priv_data->handle =
7366 		     MPT3SAS_INVALID_DEVICE_HANDLE;
7367 	}
7368 
7369 	if (!ioc->hide_drives)
7370 		mpt3sas_transport_port_remove(ioc,
7371 		    sas_device->sas_address,
7372 		    sas_device->sas_address_parent,
7373 		    sas_device->port);
7374 
7375 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7376 		 sas_device->handle, (u64)sas_device->sas_address);
7377 
7378 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7379 
7380 	dewtprintk(ioc,
7381 		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7382 			    __func__,
7383 			    sas_device->handle, (u64)sas_device->sas_address));
7384 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7385 	    NULL, NULL));
7386 }
7387 
7388 /**
7389  * _scsih_sas_topology_change_event_debug - debug for topology event
7390  * @ioc: per adapter object
7391  * @event_data: event data payload
7392  * Context: user.
7393  */
7394 static void
7395 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7396 	Mpi2EventDataSasTopologyChangeList_t *event_data)
7397 {
7398 	int i;
7399 	u16 handle;
7400 	u16 reason_code;
7401 	u8 phy_number;
7402 	char *status_str = NULL;
7403 	u8 link_rate, prev_link_rate;
7404 
7405 	switch (event_data->ExpStatus) {
7406 	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7407 		status_str = "add";
7408 		break;
7409 	case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7410 		status_str = "remove";
7411 		break;
7412 	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7413 	case 0:
7414 		status_str =  "responding";
7415 		break;
7416 	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7417 		status_str = "remove delay";
7418 		break;
7419 	default:
7420 		status_str = "unknown status";
7421 		break;
7422 	}
7423 	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7424 	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7425 	    "start_phy(%02d), count(%d)\n",
7426 	    le16_to_cpu(event_data->ExpanderDevHandle),
7427 	    le16_to_cpu(event_data->EnclosureHandle),
7428 	    event_data->StartPhyNum, event_data->NumEntries);
7429 	for (i = 0; i < event_data->NumEntries; i++) {
7430 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7431 		if (!handle)
7432 			continue;
7433 		phy_number = event_data->StartPhyNum + i;
7434 		reason_code = event_data->PHY[i].PhyStatus &
7435 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
7436 		switch (reason_code) {
7437 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7438 			status_str = "target add";
7439 			break;
7440 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7441 			status_str = "target remove";
7442 			break;
7443 		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7444 			status_str = "delay target remove";
7445 			break;
7446 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7447 			status_str = "link rate change";
7448 			break;
7449 		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7450 			status_str = "target responding";
7451 			break;
7452 		default:
7453 			status_str = "unknown";
7454 			break;
7455 		}
7456 		link_rate = event_data->PHY[i].LinkRate >> 4;
7457 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7458 		pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7459 		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7460 		    handle, status_str, link_rate, prev_link_rate);
7461 
7462 	}
7463 }
7464 
7465 /**
7466  * _scsih_sas_topology_change_event - handle topology changes
7467  * @ioc: per adapter object
7468  * @fw_event: The fw_event_work object
7469  * Context: user.
7470  *
7471  */
7472 static int
7473 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7474 	struct fw_event_work *fw_event)
7475 {
7476 	int i;
7477 	u16 parent_handle, handle;
7478 	u16 reason_code;
7479 	u8 phy_number, max_phys;
7480 	struct _sas_node *sas_expander;
7481 	u64 sas_address;
7482 	unsigned long flags;
7483 	u8 link_rate, prev_link_rate;
7484 	struct hba_port *port;
7485 	Mpi2EventDataSasTopologyChangeList_t *event_data =
7486 		(Mpi2EventDataSasTopologyChangeList_t *)
7487 		fw_event->event_data;
7488 
7489 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7490 		_scsih_sas_topology_change_event_debug(ioc, event_data);
7491 
7492 	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7493 		return 0;
7494 
7495 	if (!ioc->sas_hba.num_phys)
7496 		_scsih_sas_host_add(ioc);
7497 	else
7498 		_scsih_sas_host_refresh(ioc);
7499 
7500 	if (fw_event->ignore) {
7501 		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7502 		return 0;
7503 	}
7504 
7505 	parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7506 	port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7507 
7508 	/* handle expander add */
7509 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7510 		if (_scsih_expander_add(ioc, parent_handle) != 0)
7511 			return 0;
7512 
7513 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
7514 	sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7515 	    parent_handle);
7516 	if (sas_expander) {
7517 		sas_address = sas_expander->sas_address;
7518 		max_phys = sas_expander->num_phys;
7519 		port = sas_expander->port;
7520 	} else if (parent_handle < ioc->sas_hba.num_phys) {
7521 		sas_address = ioc->sas_hba.sas_address;
7522 		max_phys = ioc->sas_hba.num_phys;
7523 	} else {
7524 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7525 		return 0;
7526 	}
7527 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7528 
7529 	/* handle siblings events */
7530 	for (i = 0; i < event_data->NumEntries; i++) {
7531 		if (fw_event->ignore) {
7532 			dewtprintk(ioc,
7533 				   ioc_info(ioc, "ignoring expander event\n"));
7534 			return 0;
7535 		}
7536 		if (ioc->remove_host || ioc->pci_error_recovery)
7537 			return 0;
7538 		phy_number = event_data->StartPhyNum + i;
7539 		if (phy_number >= max_phys)
7540 			continue;
7541 		reason_code = event_data->PHY[i].PhyStatus &
7542 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
7543 		if ((event_data->PHY[i].PhyStatus &
7544 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7545 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7546 				continue;
7547 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7548 		if (!handle)
7549 			continue;
7550 		link_rate = event_data->PHY[i].LinkRate >> 4;
7551 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7552 		switch (reason_code) {
7553 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7554 
7555 			if (ioc->shost_recovery)
7556 				break;
7557 
7558 			if (link_rate == prev_link_rate)
7559 				break;
7560 
7561 			mpt3sas_transport_update_links(ioc, sas_address,
7562 			    handle, phy_number, link_rate, port);
7563 
7564 			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7565 				break;
7566 
7567 			_scsih_check_device(ioc, sas_address, handle,
7568 			    phy_number, link_rate);
7569 
7570 			if (!test_bit(handle, ioc->pend_os_device_add))
7571 				break;
7572 
7573 			fallthrough;
7574 
7575 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7576 
7577 			if (ioc->shost_recovery)
7578 				break;
7579 
7580 			mpt3sas_transport_update_links(ioc, sas_address,
7581 			    handle, phy_number, link_rate, port);
7582 
7583 			_scsih_add_device(ioc, handle, phy_number, 0);
7584 
7585 			break;
7586 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7587 
7588 			_scsih_device_remove_by_handle(ioc, handle);
7589 			break;
7590 		}
7591 	}
7592 
7593 	/* handle expander removal */
7594 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7595 	    sas_expander)
7596 		mpt3sas_expander_remove(ioc, sas_address, port);
7597 
7598 	return 0;
7599 }
7600 
7601 /**
7602  * _scsih_sas_device_status_change_event_debug - debug for device event
7603  * @ioc: ?
7604  * @event_data: event data payload
7605  * Context: user.
7606  */
7607 static void
7608 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7609 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
7610 {
7611 	char *reason_str = NULL;
7612 
7613 	switch (event_data->ReasonCode) {
7614 	case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7615 		reason_str = "smart data";
7616 		break;
7617 	case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7618 		reason_str = "unsupported device discovered";
7619 		break;
7620 	case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7621 		reason_str = "internal device reset";
7622 		break;
7623 	case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7624 		reason_str = "internal task abort";
7625 		break;
7626 	case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7627 		reason_str = "internal task abort set";
7628 		break;
7629 	case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7630 		reason_str = "internal clear task set";
7631 		break;
7632 	case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7633 		reason_str = "internal query task";
7634 		break;
7635 	case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7636 		reason_str = "sata init failure";
7637 		break;
7638 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7639 		reason_str = "internal device reset complete";
7640 		break;
7641 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7642 		reason_str = "internal task abort complete";
7643 		break;
7644 	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7645 		reason_str = "internal async notification";
7646 		break;
7647 	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7648 		reason_str = "expander reduced functionality";
7649 		break;
7650 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7651 		reason_str = "expander reduced functionality complete";
7652 		break;
7653 	default:
7654 		reason_str = "unknown reason";
7655 		break;
7656 	}
7657 	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7658 		 reason_str, le16_to_cpu(event_data->DevHandle),
7659 		 (u64)le64_to_cpu(event_data->SASAddress),
7660 		 le16_to_cpu(event_data->TaskTag));
7661 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7662 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7663 			event_data->ASC, event_data->ASCQ);
7664 	pr_cont("\n");
7665 }
7666 
7667 /**
7668  * _scsih_sas_device_status_change_event - handle device status change
7669  * @ioc: per adapter object
7670  * @event_data: The fw event
7671  * Context: user.
7672  */
7673 static void
7674 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7675 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
7676 {
7677 	struct MPT3SAS_TARGET *target_priv_data;
7678 	struct _sas_device *sas_device;
7679 	u64 sas_address;
7680 	unsigned long flags;
7681 
7682 	/* In MPI Revision K (0xC), the internal device reset complete was
7683 	 * implemented, so avoid setting tm_busy flag for older firmware.
7684 	 */
7685 	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7686 		return;
7687 
7688 	if (event_data->ReasonCode !=
7689 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7690 	   event_data->ReasonCode !=
7691 	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7692 		return;
7693 
7694 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7695 	sas_address = le64_to_cpu(event_data->SASAddress);
7696 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7697 	    sas_address,
7698 	    mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7699 
7700 	if (!sas_device || !sas_device->starget)
7701 		goto out;
7702 
7703 	target_priv_data = sas_device->starget->hostdata;
7704 	if (!target_priv_data)
7705 		goto out;
7706 
7707 	if (event_data->ReasonCode ==
7708 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7709 		target_priv_data->tm_busy = 1;
7710 	else
7711 		target_priv_data->tm_busy = 0;
7712 
7713 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7714 		ioc_info(ioc,
7715 		    "%s tm_busy flag for handle(0x%04x)\n",
7716 		    (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7717 		    target_priv_data->handle);
7718 
7719 out:
7720 	if (sas_device)
7721 		sas_device_put(sas_device);
7722 
7723 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7724 }
7725 
7726 
7727 /**
7728  * _scsih_check_pcie_access_status - check access flags
7729  * @ioc: per adapter object
7730  * @wwid: wwid
7731  * @handle: sas device handle
7732  * @access_status: errors returned during discovery of the device
7733  *
7734  * Return: 0 for success, else failure
7735  */
7736 static u8
7737 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7738 	u16 handle, u8 access_status)
7739 {
7740 	u8 rc = 1;
7741 	char *desc = NULL;
7742 
7743 	switch (access_status) {
7744 	case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7745 	case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7746 		rc = 0;
7747 		break;
7748 	case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7749 		desc = "PCIe device capability failed";
7750 		break;
7751 	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7752 		desc = "PCIe device blocked";
7753 		ioc_info(ioc,
7754 		    "Device with Access Status (%s): wwid(0x%016llx), "
7755 		    "handle(0x%04x)\n ll only be added to the internal list",
7756 		    desc, (u64)wwid, handle);
7757 		rc = 0;
7758 		break;
7759 	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7760 		desc = "PCIe device mem space access failed";
7761 		break;
7762 	case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7763 		desc = "PCIe device unsupported";
7764 		break;
7765 	case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7766 		desc = "PCIe device MSIx Required";
7767 		break;
7768 	case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7769 		desc = "PCIe device init fail max";
7770 		break;
7771 	case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7772 		desc = "PCIe device status unknown";
7773 		break;
7774 	case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7775 		desc = "nvme ready timeout";
7776 		break;
7777 	case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7778 		desc = "nvme device configuration unsupported";
7779 		break;
7780 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7781 		desc = "nvme identify failed";
7782 		break;
7783 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7784 		desc = "nvme qconfig failed";
7785 		break;
7786 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7787 		desc = "nvme qcreation failed";
7788 		break;
7789 	case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7790 		desc = "nvme eventcfg failed";
7791 		break;
7792 	case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7793 		desc = "nvme get feature stat failed";
7794 		break;
7795 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7796 		desc = "nvme idle timeout";
7797 		break;
7798 	case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7799 		desc = "nvme failure status";
7800 		break;
7801 	default:
7802 		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7803 			access_status, (u64)wwid, handle);
7804 		return rc;
7805 	}
7806 
7807 	if (!rc)
7808 		return rc;
7809 
7810 	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7811 		 desc, (u64)wwid, handle);
7812 	return rc;
7813 }
7814 
7815 /**
7816  * _scsih_pcie_device_remove_from_sml -  removing pcie device
7817  * from SML and free up associated memory
7818  * @ioc: per adapter object
7819  * @pcie_device: the pcie_device object
7820  */
7821 static void
7822 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7823 	struct _pcie_device *pcie_device)
7824 {
7825 	struct MPT3SAS_TARGET *sas_target_priv_data;
7826 
7827 	dewtprintk(ioc,
7828 		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7829 			    __func__,
7830 			    pcie_device->handle, (u64)pcie_device->wwid));
7831 	if (pcie_device->enclosure_handle != 0)
7832 		dewtprintk(ioc,
7833 			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7834 				    __func__,
7835 				    (u64)pcie_device->enclosure_logical_id,
7836 				    pcie_device->slot));
7837 	if (pcie_device->connector_name[0] != '\0')
7838 		dewtprintk(ioc,
7839 			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7840 				    __func__,
7841 				    pcie_device->enclosure_level,
7842 				    pcie_device->connector_name));
7843 
7844 	if (pcie_device->starget && pcie_device->starget->hostdata) {
7845 		sas_target_priv_data = pcie_device->starget->hostdata;
7846 		sas_target_priv_data->deleted = 1;
7847 		_scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7848 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7849 	}
7850 
7851 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7852 		 pcie_device->handle, (u64)pcie_device->wwid);
7853 	if (pcie_device->enclosure_handle != 0)
7854 		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7855 			 (u64)pcie_device->enclosure_logical_id,
7856 			 pcie_device->slot);
7857 	if (pcie_device->connector_name[0] != '\0')
7858 		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7859 			 pcie_device->enclosure_level,
7860 			 pcie_device->connector_name);
7861 
7862 	if (pcie_device->starget && (pcie_device->access_status !=
7863 				MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7864 		scsi_remove_target(&pcie_device->starget->dev);
7865 	dewtprintk(ioc,
7866 		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7867 			    __func__,
7868 			    pcie_device->handle, (u64)pcie_device->wwid));
7869 	if (pcie_device->enclosure_handle != 0)
7870 		dewtprintk(ioc,
7871 			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7872 				    __func__,
7873 				    (u64)pcie_device->enclosure_logical_id,
7874 				    pcie_device->slot));
7875 	if (pcie_device->connector_name[0] != '\0')
7876 		dewtprintk(ioc,
7877 			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7878 				    __func__,
7879 				    pcie_device->enclosure_level,
7880 				    pcie_device->connector_name));
7881 
7882 	kfree(pcie_device->serial_number);
7883 }
7884 
7885 
7886 /**
7887  * _scsih_pcie_check_device - checking device responsiveness
7888  * @ioc: per adapter object
7889  * @handle: attached device handle
7890  */
7891 static void
7892 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7893 {
7894 	Mpi2ConfigReply_t mpi_reply;
7895 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
7896 	u32 ioc_status;
7897 	struct _pcie_device *pcie_device;
7898 	u64 wwid;
7899 	unsigned long flags;
7900 	struct scsi_target *starget;
7901 	struct MPT3SAS_TARGET *sas_target_priv_data;
7902 	u32 device_info;
7903 
7904 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7905 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
7906 		return;
7907 
7908 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7909 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7910 		return;
7911 
7912 	/* check if this is end device */
7913 	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7914 	if (!(_scsih_is_nvme_pciescsi_device(device_info)))
7915 		return;
7916 
7917 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
7918 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7919 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7920 
7921 	if (!pcie_device) {
7922 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7923 		return;
7924 	}
7925 
7926 	if (unlikely(pcie_device->handle != handle)) {
7927 		starget = pcie_device->starget;
7928 		sas_target_priv_data = starget->hostdata;
7929 		pcie_device->access_status = pcie_device_pg0.AccessStatus;
7930 		starget_printk(KERN_INFO, starget,
7931 		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
7932 		    pcie_device->handle, handle);
7933 		sas_target_priv_data->handle = handle;
7934 		pcie_device->handle = handle;
7935 
7936 		if (le32_to_cpu(pcie_device_pg0.Flags) &
7937 		    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7938 			pcie_device->enclosure_level =
7939 			    pcie_device_pg0.EnclosureLevel;
7940 			memcpy(&pcie_device->connector_name[0],
7941 			    &pcie_device_pg0.ConnectorName[0], 4);
7942 		} else {
7943 			pcie_device->enclosure_level = 0;
7944 			pcie_device->connector_name[0] = '\0';
7945 		}
7946 	}
7947 
7948 	/* check if device is present */
7949 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7950 	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7951 		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
7952 			 handle);
7953 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7954 		pcie_device_put(pcie_device);
7955 		return;
7956 	}
7957 
7958 	/* check if there were any issues with discovery */
7959 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7960 	    pcie_device_pg0.AccessStatus)) {
7961 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7962 		pcie_device_put(pcie_device);
7963 		return;
7964 	}
7965 
7966 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7967 	pcie_device_put(pcie_device);
7968 
7969 	_scsih_ublock_io_device(ioc, wwid, NULL);
7970 
7971 	return;
7972 }
7973 
7974 /**
7975  * _scsih_pcie_add_device -  creating pcie device object
7976  * @ioc: per adapter object
7977  * @handle: pcie device handle
7978  *
7979  * Creating end device object, stored in ioc->pcie_device_list.
7980  *
7981  * Return: 1 means queue the event later, 0 means complete the event
7982  */
7983 static int
7984 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7985 {
7986 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
7987 	Mpi26PCIeDevicePage2_t pcie_device_pg2;
7988 	Mpi2ConfigReply_t mpi_reply;
7989 	struct _pcie_device *pcie_device;
7990 	struct _enclosure_node *enclosure_dev;
7991 	u32 ioc_status;
7992 	u64 wwid;
7993 
7994 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7995 	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
7996 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7997 			__FILE__, __LINE__, __func__);
7998 		return 0;
7999 	}
8000 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8001 	    MPI2_IOCSTATUS_MASK;
8002 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8003 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8004 			__FILE__, __LINE__, __func__);
8005 		return 0;
8006 	}
8007 
8008 	set_bit(handle, ioc->pend_os_device_add);
8009 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
8010 
8011 	/* check if device is present */
8012 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8013 		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8014 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8015 			handle);
8016 		return 0;
8017 	}
8018 
8019 	/* check if there were any issues with discovery */
8020 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8021 	    pcie_device_pg0.AccessStatus))
8022 		return 0;
8023 
8024 	if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8025 	    (pcie_device_pg0.DeviceInfo))))
8026 		return 0;
8027 
8028 	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8029 	if (pcie_device) {
8030 		clear_bit(handle, ioc->pend_os_device_add);
8031 		pcie_device_put(pcie_device);
8032 		return 0;
8033 	}
8034 
8035 	/* PCIe Device Page 2 contains read-only information about a
8036 	 * specific NVMe device; therefore, this page is only
8037 	 * valid for NVMe devices and skip for pcie devices of type scsi.
8038 	 */
8039 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
8040 		le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8041 		if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8042 		    &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8043 		    handle)) {
8044 			ioc_err(ioc,
8045 			    "failure at %s:%d/%s()!\n", __FILE__,
8046 			    __LINE__, __func__);
8047 			return 0;
8048 		}
8049 
8050 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8051 					MPI2_IOCSTATUS_MASK;
8052 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8053 			ioc_err(ioc,
8054 			    "failure at %s:%d/%s()!\n", __FILE__,
8055 			    __LINE__, __func__);
8056 			return 0;
8057 		}
8058 	}
8059 
8060 	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8061 	if (!pcie_device) {
8062 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8063 			__FILE__, __LINE__, __func__);
8064 		return 0;
8065 	}
8066 
8067 	kref_init(&pcie_device->refcount);
8068 	pcie_device->id = ioc->pcie_target_id++;
8069 	pcie_device->channel = PCIE_CHANNEL;
8070 	pcie_device->handle = handle;
8071 	pcie_device->access_status = pcie_device_pg0.AccessStatus;
8072 	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8073 	pcie_device->wwid = wwid;
8074 	pcie_device->port_num = pcie_device_pg0.PortNum;
8075 	pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8076 	    MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8077 
8078 	pcie_device->enclosure_handle =
8079 	    le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8080 	if (pcie_device->enclosure_handle != 0)
8081 		pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8082 
8083 	if (le32_to_cpu(pcie_device_pg0.Flags) &
8084 	    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8085 		pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8086 		memcpy(&pcie_device->connector_name[0],
8087 		    &pcie_device_pg0.ConnectorName[0], 4);
8088 	} else {
8089 		pcie_device->enclosure_level = 0;
8090 		pcie_device->connector_name[0] = '\0';
8091 	}
8092 
8093 	/* get enclosure_logical_id */
8094 	if (pcie_device->enclosure_handle) {
8095 		enclosure_dev =
8096 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8097 						pcie_device->enclosure_handle);
8098 		if (enclosure_dev)
8099 			pcie_device->enclosure_logical_id =
8100 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8101 	}
8102 	/* TODO -- Add device name once FW supports it */
8103 	if (!(mpt3sas_scsih_is_pcie_scsi_device(
8104 	    le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8105 		pcie_device->nvme_mdts =
8106 		    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8107 		pcie_device->shutdown_latency =
8108 			le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8109 		/*
8110 		 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8111 		 * if drive's RTD3 Entry Latency is greater then IOC's
8112 		 * max_shutdown_latency.
8113 		 */
8114 		if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8115 			ioc->max_shutdown_latency =
8116 				pcie_device->shutdown_latency;
8117 		if (pcie_device_pg2.ControllerResetTO)
8118 			pcie_device->reset_timeout =
8119 			    pcie_device_pg2.ControllerResetTO;
8120 		else
8121 			pcie_device->reset_timeout = 30;
8122 	} else
8123 		pcie_device->reset_timeout = 30;
8124 
8125 	if (ioc->wait_for_discovery_to_complete)
8126 		_scsih_pcie_device_init_add(ioc, pcie_device);
8127 	else
8128 		_scsih_pcie_device_add(ioc, pcie_device);
8129 
8130 	pcie_device_put(pcie_device);
8131 	return 0;
8132 }
8133 
8134 /**
8135  * _scsih_pcie_topology_change_event_debug - debug for topology
8136  * event
8137  * @ioc: per adapter object
8138  * @event_data: event data payload
8139  * Context: user.
8140  */
8141 static void
8142 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8143 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8144 {
8145 	int i;
8146 	u16 handle;
8147 	u16 reason_code;
8148 	u8 port_number;
8149 	char *status_str = NULL;
8150 	u8 link_rate, prev_link_rate;
8151 
8152 	switch (event_data->SwitchStatus) {
8153 	case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8154 		status_str = "add";
8155 		break;
8156 	case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8157 		status_str = "remove";
8158 		break;
8159 	case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8160 	case 0:
8161 		status_str =  "responding";
8162 		break;
8163 	case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8164 		status_str = "remove delay";
8165 		break;
8166 	default:
8167 		status_str = "unknown status";
8168 		break;
8169 	}
8170 	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8171 	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8172 		"start_port(%02d), count(%d)\n",
8173 		le16_to_cpu(event_data->SwitchDevHandle),
8174 		le16_to_cpu(event_data->EnclosureHandle),
8175 		event_data->StartPortNum, event_data->NumEntries);
8176 	for (i = 0; i < event_data->NumEntries; i++) {
8177 		handle =
8178 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8179 		if (!handle)
8180 			continue;
8181 		port_number = event_data->StartPortNum + i;
8182 		reason_code = event_data->PortEntry[i].PortStatus;
8183 		switch (reason_code) {
8184 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8185 			status_str = "target add";
8186 			break;
8187 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8188 			status_str = "target remove";
8189 			break;
8190 		case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8191 			status_str = "delay target remove";
8192 			break;
8193 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8194 			status_str = "link rate change";
8195 			break;
8196 		case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8197 			status_str = "target responding";
8198 			break;
8199 		default:
8200 			status_str = "unknown";
8201 			break;
8202 		}
8203 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
8204 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8205 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8206 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8207 		pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8208 			" link rate: new(0x%02x), old(0x%02x)\n", port_number,
8209 			handle, status_str, link_rate, prev_link_rate);
8210 	}
8211 }
8212 
8213 /**
8214  * _scsih_pcie_topology_change_event - handle PCIe topology
8215  *  changes
8216  * @ioc: per adapter object
8217  * @fw_event: The fw_event_work object
8218  * Context: user.
8219  *
8220  */
8221 static void
8222 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8223 	struct fw_event_work *fw_event)
8224 {
8225 	int i;
8226 	u16 handle;
8227 	u16 reason_code;
8228 	u8 link_rate, prev_link_rate;
8229 	unsigned long flags;
8230 	int rc;
8231 	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8232 		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8233 	struct _pcie_device *pcie_device;
8234 
8235 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8236 		_scsih_pcie_topology_change_event_debug(ioc, event_data);
8237 
8238 	if (ioc->shost_recovery || ioc->remove_host ||
8239 		ioc->pci_error_recovery)
8240 		return;
8241 
8242 	if (fw_event->ignore) {
8243 		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8244 		return;
8245 	}
8246 
8247 	/* handle siblings events */
8248 	for (i = 0; i < event_data->NumEntries; i++) {
8249 		if (fw_event->ignore) {
8250 			dewtprintk(ioc,
8251 				   ioc_info(ioc, "ignoring switch event\n"));
8252 			return;
8253 		}
8254 		if (ioc->remove_host || ioc->pci_error_recovery)
8255 			return;
8256 		reason_code = event_data->PortEntry[i].PortStatus;
8257 		handle =
8258 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8259 		if (!handle)
8260 			continue;
8261 
8262 		link_rate = event_data->PortEntry[i].CurrentPortInfo
8263 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8264 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8265 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8266 
8267 		switch (reason_code) {
8268 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8269 			if (ioc->shost_recovery)
8270 				break;
8271 			if (link_rate == prev_link_rate)
8272 				break;
8273 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8274 				break;
8275 
8276 			_scsih_pcie_check_device(ioc, handle);
8277 
8278 			/* This code after this point handles the test case
8279 			 * where a device has been added, however its returning
8280 			 * BUSY for sometime.  Then before the Device Missing
8281 			 * Delay expires and the device becomes READY, the
8282 			 * device is removed and added back.
8283 			 */
8284 			spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8285 			pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8286 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8287 
8288 			if (pcie_device) {
8289 				pcie_device_put(pcie_device);
8290 				break;
8291 			}
8292 
8293 			if (!test_bit(handle, ioc->pend_os_device_add))
8294 				break;
8295 
8296 			dewtprintk(ioc,
8297 				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8298 					    handle));
8299 			event_data->PortEntry[i].PortStatus &= 0xF0;
8300 			event_data->PortEntry[i].PortStatus |=
8301 				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8302 			fallthrough;
8303 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8304 			if (ioc->shost_recovery)
8305 				break;
8306 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8307 				break;
8308 
8309 			rc = _scsih_pcie_add_device(ioc, handle);
8310 			if (!rc) {
8311 				/* mark entry vacant */
8312 				/* TODO This needs to be reviewed and fixed,
8313 				 * we dont have an entry
8314 				 * to make an event void like vacant
8315 				 */
8316 				event_data->PortEntry[i].PortStatus |=
8317 					MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8318 			}
8319 			break;
8320 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8321 			_scsih_pcie_device_remove_by_handle(ioc, handle);
8322 			break;
8323 		}
8324 	}
8325 }
8326 
8327 /**
8328  * _scsih_pcie_device_status_change_event_debug - debug for device event
8329  * @ioc: ?
8330  * @event_data: event data payload
8331  * Context: user.
8332  */
8333 static void
8334 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8335 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8336 {
8337 	char *reason_str = NULL;
8338 
8339 	switch (event_data->ReasonCode) {
8340 	case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8341 		reason_str = "smart data";
8342 		break;
8343 	case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8344 		reason_str = "unsupported device discovered";
8345 		break;
8346 	case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8347 		reason_str = "internal device reset";
8348 		break;
8349 	case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8350 		reason_str = "internal task abort";
8351 		break;
8352 	case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8353 		reason_str = "internal task abort set";
8354 		break;
8355 	case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8356 		reason_str = "internal clear task set";
8357 		break;
8358 	case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8359 		reason_str = "internal query task";
8360 		break;
8361 	case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8362 		reason_str = "device init failure";
8363 		break;
8364 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8365 		reason_str = "internal device reset complete";
8366 		break;
8367 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8368 		reason_str = "internal task abort complete";
8369 		break;
8370 	case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8371 		reason_str = "internal async notification";
8372 		break;
8373 	case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8374 		reason_str = "pcie hot reset failed";
8375 		break;
8376 	default:
8377 		reason_str = "unknown reason";
8378 		break;
8379 	}
8380 
8381 	ioc_info(ioc, "PCIE device status change: (%s)\n"
8382 		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8383 		 reason_str, le16_to_cpu(event_data->DevHandle),
8384 		 (u64)le64_to_cpu(event_data->WWID),
8385 		 le16_to_cpu(event_data->TaskTag));
8386 	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8387 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8388 			event_data->ASC, event_data->ASCQ);
8389 	pr_cont("\n");
8390 }
8391 
8392 /**
8393  * _scsih_pcie_device_status_change_event - handle device status
8394  * change
8395  * @ioc: per adapter object
8396  * @fw_event: The fw_event_work object
8397  * Context: user.
8398  */
8399 static void
8400 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8401 	struct fw_event_work *fw_event)
8402 {
8403 	struct MPT3SAS_TARGET *target_priv_data;
8404 	struct _pcie_device *pcie_device;
8405 	u64 wwid;
8406 	unsigned long flags;
8407 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8408 		(Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8409 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8410 		_scsih_pcie_device_status_change_event_debug(ioc,
8411 			event_data);
8412 
8413 	if (event_data->ReasonCode !=
8414 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8415 		event_data->ReasonCode !=
8416 		MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8417 		return;
8418 
8419 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8420 	wwid = le64_to_cpu(event_data->WWID);
8421 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8422 
8423 	if (!pcie_device || !pcie_device->starget)
8424 		goto out;
8425 
8426 	target_priv_data = pcie_device->starget->hostdata;
8427 	if (!target_priv_data)
8428 		goto out;
8429 
8430 	if (event_data->ReasonCode ==
8431 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8432 		target_priv_data->tm_busy = 1;
8433 	else
8434 		target_priv_data->tm_busy = 0;
8435 out:
8436 	if (pcie_device)
8437 		pcie_device_put(pcie_device);
8438 
8439 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8440 }
8441 
8442 /**
8443  * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8444  * event
8445  * @ioc: per adapter object
8446  * @event_data: event data payload
8447  * Context: user.
8448  */
8449 static void
8450 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8451 	Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8452 {
8453 	char *reason_str = NULL;
8454 
8455 	switch (event_data->ReasonCode) {
8456 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8457 		reason_str = "enclosure add";
8458 		break;
8459 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8460 		reason_str = "enclosure remove";
8461 		break;
8462 	default:
8463 		reason_str = "unknown reason";
8464 		break;
8465 	}
8466 
8467 	ioc_info(ioc, "enclosure status change: (%s)\n"
8468 		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8469 		 reason_str,
8470 		 le16_to_cpu(event_data->EnclosureHandle),
8471 		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8472 		 le16_to_cpu(event_data->StartSlot));
8473 }
8474 
8475 /**
8476  * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8477  * @ioc: per adapter object
8478  * @fw_event: The fw_event_work object
8479  * Context: user.
8480  */
8481 static void
8482 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8483 	struct fw_event_work *fw_event)
8484 {
8485 	Mpi2ConfigReply_t mpi_reply;
8486 	struct _enclosure_node *enclosure_dev = NULL;
8487 	Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8488 		(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8489 	int rc;
8490 	u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8491 
8492 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8493 		_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8494 		     (Mpi2EventDataSasEnclDevStatusChange_t *)
8495 		     fw_event->event_data);
8496 	if (ioc->shost_recovery)
8497 		return;
8498 
8499 	if (enclosure_handle)
8500 		enclosure_dev =
8501 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8502 						enclosure_handle);
8503 	switch (event_data->ReasonCode) {
8504 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8505 		if (!enclosure_dev) {
8506 			enclosure_dev =
8507 				kzalloc(sizeof(struct _enclosure_node),
8508 					GFP_KERNEL);
8509 			if (!enclosure_dev) {
8510 				ioc_info(ioc, "failure at %s:%d/%s()!\n",
8511 					 __FILE__, __LINE__, __func__);
8512 				return;
8513 			}
8514 			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8515 				&enclosure_dev->pg0,
8516 				MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8517 				enclosure_handle);
8518 
8519 			if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8520 						MPI2_IOCSTATUS_MASK)) {
8521 				kfree(enclosure_dev);
8522 				return;
8523 			}
8524 
8525 			list_add_tail(&enclosure_dev->list,
8526 							&ioc->enclosure_list);
8527 		}
8528 		break;
8529 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8530 		if (enclosure_dev) {
8531 			list_del(&enclosure_dev->list);
8532 			kfree(enclosure_dev);
8533 		}
8534 		break;
8535 	default:
8536 		break;
8537 	}
8538 }
8539 
8540 /**
8541  * _scsih_sas_broadcast_primitive_event - handle broadcast events
8542  * @ioc: per adapter object
8543  * @fw_event: The fw_event_work object
8544  * Context: user.
8545  */
8546 static void
8547 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8548 	struct fw_event_work *fw_event)
8549 {
8550 	struct scsi_cmnd *scmd;
8551 	struct scsi_device *sdev;
8552 	struct scsiio_tracker *st;
8553 	u16 smid, handle;
8554 	u32 lun;
8555 	struct MPT3SAS_DEVICE *sas_device_priv_data;
8556 	u32 termination_count;
8557 	u32 query_count;
8558 	Mpi2SCSITaskManagementReply_t *mpi_reply;
8559 	Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8560 		(Mpi2EventDataSasBroadcastPrimitive_t *)
8561 		fw_event->event_data;
8562 	u16 ioc_status;
8563 	unsigned long flags;
8564 	int r;
8565 	u8 max_retries = 0;
8566 	u8 task_abort_retries;
8567 
8568 	mutex_lock(&ioc->tm_cmds.mutex);
8569 	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8570 		 __func__, event_data->PhyNum, event_data->PortWidth);
8571 
8572 	_scsih_block_io_all_device(ioc);
8573 
8574 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8575 	mpi_reply = ioc->tm_cmds.reply;
8576  broadcast_aen_retry:
8577 
8578 	/* sanity checks for retrying this loop */
8579 	if (max_retries++ == 5) {
8580 		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8581 		goto out;
8582 	} else if (max_retries > 1)
8583 		dewtprintk(ioc,
8584 			   ioc_info(ioc, "%s: %d retry\n",
8585 				    __func__, max_retries - 1));
8586 
8587 	termination_count = 0;
8588 	query_count = 0;
8589 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8590 		if (ioc->shost_recovery)
8591 			goto out;
8592 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8593 		if (!scmd)
8594 			continue;
8595 		st = scsi_cmd_priv(scmd);
8596 		sdev = scmd->device;
8597 		sas_device_priv_data = sdev->hostdata;
8598 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8599 			continue;
8600 		 /* skip hidden raid components */
8601 		if (sas_device_priv_data->sas_target->flags &
8602 		    MPT_TARGET_FLAGS_RAID_COMPONENT)
8603 			continue;
8604 		 /* skip volumes */
8605 		if (sas_device_priv_data->sas_target->flags &
8606 		    MPT_TARGET_FLAGS_VOLUME)
8607 			continue;
8608 		 /* skip PCIe devices */
8609 		if (sas_device_priv_data->sas_target->flags &
8610 		    MPT_TARGET_FLAGS_PCIE_DEVICE)
8611 			continue;
8612 
8613 		handle = sas_device_priv_data->sas_target->handle;
8614 		lun = sas_device_priv_data->lun;
8615 		query_count++;
8616 
8617 		if (ioc->shost_recovery)
8618 			goto out;
8619 
8620 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8621 		r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8622 			MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8623 			st->msix_io, 30, 0);
8624 		if (r == FAILED) {
8625 			sdev_printk(KERN_WARNING, sdev,
8626 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
8627 			    "QUERY_TASK: scmd(%p)\n", scmd);
8628 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8629 			goto broadcast_aen_retry;
8630 		}
8631 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8632 		    & MPI2_IOCSTATUS_MASK;
8633 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8634 			sdev_printk(KERN_WARNING, sdev,
8635 				"query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8636 				ioc_status, scmd);
8637 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8638 			goto broadcast_aen_retry;
8639 		}
8640 
8641 		/* see if IO is still owned by IOC and target */
8642 		if (mpi_reply->ResponseCode ==
8643 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8644 		     mpi_reply->ResponseCode ==
8645 		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8646 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8647 			continue;
8648 		}
8649 		task_abort_retries = 0;
8650  tm_retry:
8651 		if (task_abort_retries++ == 60) {
8652 			dewtprintk(ioc,
8653 				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8654 					    __func__));
8655 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8656 			goto broadcast_aen_retry;
8657 		}
8658 
8659 		if (ioc->shost_recovery)
8660 			goto out_no_lock;
8661 
8662 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8663 			sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8664 			st->smid, st->msix_io, 30, 0);
8665 		if (r == FAILED || st->cb_idx != 0xFF) {
8666 			sdev_printk(KERN_WARNING, sdev,
8667 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8668 			    "scmd(%p)\n", scmd);
8669 			goto tm_retry;
8670 		}
8671 
8672 		if (task_abort_retries > 1)
8673 			sdev_printk(KERN_WARNING, sdev,
8674 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8675 			    " scmd(%p)\n",
8676 			    task_abort_retries - 1, scmd);
8677 
8678 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8679 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8680 	}
8681 
8682 	if (ioc->broadcast_aen_pending) {
8683 		dewtprintk(ioc,
8684 			   ioc_info(ioc,
8685 				    "%s: loop back due to pending AEN\n",
8686 				    __func__));
8687 		 ioc->broadcast_aen_pending = 0;
8688 		 goto broadcast_aen_retry;
8689 	}
8690 
8691  out:
8692 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8693  out_no_lock:
8694 
8695 	dewtprintk(ioc,
8696 		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8697 			    __func__, query_count, termination_count));
8698 
8699 	ioc->broadcast_aen_busy = 0;
8700 	if (!ioc->shost_recovery)
8701 		_scsih_ublock_io_all_device(ioc);
8702 	mutex_unlock(&ioc->tm_cmds.mutex);
8703 }
8704 
8705 /**
8706  * _scsih_sas_discovery_event - handle discovery events
8707  * @ioc: per adapter object
8708  * @fw_event: The fw_event_work object
8709  * Context: user.
8710  */
8711 static void
8712 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8713 	struct fw_event_work *fw_event)
8714 {
8715 	Mpi2EventDataSasDiscovery_t *event_data =
8716 		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8717 
8718 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8719 		ioc_info(ioc, "discovery event: (%s)",
8720 			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8721 			 "start" : "stop");
8722 		if (event_data->DiscoveryStatus)
8723 			pr_cont("discovery_status(0x%08x)",
8724 				le32_to_cpu(event_data->DiscoveryStatus));
8725 		pr_cont("\n");
8726 	}
8727 
8728 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8729 	    !ioc->sas_hba.num_phys) {
8730 		if (disable_discovery > 0 && ioc->shost_recovery) {
8731 			/* Wait for the reset to complete */
8732 			while (ioc->shost_recovery)
8733 				ssleep(1);
8734 		}
8735 		_scsih_sas_host_add(ioc);
8736 	}
8737 }
8738 
8739 /**
8740  * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8741  *						events
8742  * @ioc: per adapter object
8743  * @fw_event: The fw_event_work object
8744  * Context: user.
8745  */
8746 static void
8747 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8748 	struct fw_event_work *fw_event)
8749 {
8750 	Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8751 		(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8752 
8753 	switch (event_data->ReasonCode) {
8754 	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8755 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8756 			 le16_to_cpu(event_data->DevHandle),
8757 			 (u64)le64_to_cpu(event_data->SASAddress),
8758 			 event_data->PhysicalPort);
8759 		break;
8760 	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8761 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8762 			 le16_to_cpu(event_data->DevHandle),
8763 			 (u64)le64_to_cpu(event_data->SASAddress),
8764 			 event_data->PhysicalPort);
8765 		break;
8766 	default:
8767 		break;
8768 	}
8769 }
8770 
8771 /**
8772  * _scsih_pcie_enumeration_event - handle enumeration events
8773  * @ioc: per adapter object
8774  * @fw_event: The fw_event_work object
8775  * Context: user.
8776  */
8777 static void
8778 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8779 	struct fw_event_work *fw_event)
8780 {
8781 	Mpi26EventDataPCIeEnumeration_t *event_data =
8782 		(Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8783 
8784 	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8785 		return;
8786 
8787 	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8788 		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8789 		 "started" : "completed",
8790 		 event_data->Flags);
8791 	if (event_data->EnumerationStatus)
8792 		pr_cont("enumeration_status(0x%08x)",
8793 			le32_to_cpu(event_data->EnumerationStatus));
8794 	pr_cont("\n");
8795 }
8796 
8797 /**
8798  * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8799  * @ioc: per adapter object
8800  * @handle: device handle for physical disk
8801  * @phys_disk_num: physical disk number
8802  *
8803  * Return: 0 for success, else failure.
8804  */
8805 static int
8806 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8807 {
8808 	Mpi2RaidActionRequest_t *mpi_request;
8809 	Mpi2RaidActionReply_t *mpi_reply;
8810 	u16 smid;
8811 	u8 issue_reset = 0;
8812 	int rc = 0;
8813 	u16 ioc_status;
8814 	u32 log_info;
8815 
8816 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8817 		return rc;
8818 
8819 	mutex_lock(&ioc->scsih_cmds.mutex);
8820 
8821 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8822 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8823 		rc = -EAGAIN;
8824 		goto out;
8825 	}
8826 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8827 
8828 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8829 	if (!smid) {
8830 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8831 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8832 		rc = -EAGAIN;
8833 		goto out;
8834 	}
8835 
8836 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8837 	ioc->scsih_cmds.smid = smid;
8838 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8839 
8840 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8841 	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8842 	mpi_request->PhysDiskNum = phys_disk_num;
8843 
8844 	dewtprintk(ioc,
8845 		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8846 			    handle, phys_disk_num));
8847 
8848 	init_completion(&ioc->scsih_cmds.done);
8849 	ioc->put_smid_default(ioc, smid);
8850 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8851 
8852 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8853 		mpt3sas_check_cmd_timeout(ioc,
8854 		    ioc->scsih_cmds.status, mpi_request,
8855 		    sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8856 		rc = -EFAULT;
8857 		goto out;
8858 	}
8859 
8860 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8861 
8862 		mpi_reply = ioc->scsih_cmds.reply;
8863 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8864 		if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8865 			log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
8866 		else
8867 			log_info = 0;
8868 		ioc_status &= MPI2_IOCSTATUS_MASK;
8869 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8870 			dewtprintk(ioc,
8871 				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8872 					    ioc_status, log_info));
8873 			rc = -EFAULT;
8874 		} else
8875 			dewtprintk(ioc,
8876 				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8877 	}
8878 
8879  out:
8880 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8881 	mutex_unlock(&ioc->scsih_cmds.mutex);
8882 
8883 	if (issue_reset)
8884 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8885 	return rc;
8886 }
8887 
8888 /**
8889  * _scsih_reprobe_lun - reprobing lun
8890  * @sdev: scsi device struct
8891  * @no_uld_attach: sdev->no_uld_attach flag setting
8892  *
8893  **/
8894 static void
8895 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8896 {
8897 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8898 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8899 	    sdev->no_uld_attach ? "hiding" : "exposing");
8900 	WARN_ON(scsi_device_reprobe(sdev));
8901 }
8902 
8903 /**
8904  * _scsih_sas_volume_add - add new volume
8905  * @ioc: per adapter object
8906  * @element: IR config element data
8907  * Context: user.
8908  */
8909 static void
8910 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
8911 	Mpi2EventIrConfigElement_t *element)
8912 {
8913 	struct _raid_device *raid_device;
8914 	unsigned long flags;
8915 	u64 wwid;
8916 	u16 handle = le16_to_cpu(element->VolDevHandle);
8917 	int rc;
8918 
8919 	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8920 	if (!wwid) {
8921 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8922 			__FILE__, __LINE__, __func__);
8923 		return;
8924 	}
8925 
8926 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
8927 	raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
8928 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8929 
8930 	if (raid_device)
8931 		return;
8932 
8933 	raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8934 	if (!raid_device) {
8935 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8936 			__FILE__, __LINE__, __func__);
8937 		return;
8938 	}
8939 
8940 	raid_device->id = ioc->sas_id++;
8941 	raid_device->channel = RAID_CHANNEL;
8942 	raid_device->handle = handle;
8943 	raid_device->wwid = wwid;
8944 	_scsih_raid_device_add(ioc, raid_device);
8945 	if (!ioc->wait_for_discovery_to_complete) {
8946 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8947 		    raid_device->id, 0);
8948 		if (rc)
8949 			_scsih_raid_device_remove(ioc, raid_device);
8950 	} else {
8951 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8952 		_scsih_determine_boot_device(ioc, raid_device, 1);
8953 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8954 	}
8955 }
8956 
8957 /**
8958  * _scsih_sas_volume_delete - delete volume
8959  * @ioc: per adapter object
8960  * @handle: volume device handle
8961  * Context: user.
8962  */
8963 static void
8964 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8965 {
8966 	struct _raid_device *raid_device;
8967 	unsigned long flags;
8968 	struct MPT3SAS_TARGET *sas_target_priv_data;
8969 	struct scsi_target *starget = NULL;
8970 
8971 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
8972 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8973 	if (raid_device) {
8974 		if (raid_device->starget) {
8975 			starget = raid_device->starget;
8976 			sas_target_priv_data = starget->hostdata;
8977 			sas_target_priv_data->deleted = 1;
8978 		}
8979 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
8980 			 raid_device->handle, (u64)raid_device->wwid);
8981 		list_del(&raid_device->list);
8982 		kfree(raid_device);
8983 	}
8984 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8985 	if (starget)
8986 		scsi_remove_target(&starget->dev);
8987 }
8988 
8989 /**
8990  * _scsih_sas_pd_expose - expose pd component to /dev/sdX
8991  * @ioc: per adapter object
8992  * @element: IR config element data
8993  * Context: user.
8994  */
8995 static void
8996 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
8997 	Mpi2EventIrConfigElement_t *element)
8998 {
8999 	struct _sas_device *sas_device;
9000 	struct scsi_target *starget = NULL;
9001 	struct MPT3SAS_TARGET *sas_target_priv_data;
9002 	unsigned long flags;
9003 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9004 
9005 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9006 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9007 	if (sas_device) {
9008 		sas_device->volume_handle = 0;
9009 		sas_device->volume_wwid = 0;
9010 		clear_bit(handle, ioc->pd_handles);
9011 		if (sas_device->starget && sas_device->starget->hostdata) {
9012 			starget = sas_device->starget;
9013 			sas_target_priv_data = starget->hostdata;
9014 			sas_target_priv_data->flags &=
9015 			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9016 		}
9017 	}
9018 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9019 	if (!sas_device)
9020 		return;
9021 
9022 	/* exposing raid component */
9023 	if (starget)
9024 		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9025 
9026 	sas_device_put(sas_device);
9027 }
9028 
9029 /**
9030  * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9031  * @ioc: per adapter object
9032  * @element: IR config element data
9033  * Context: user.
9034  */
9035 static void
9036 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9037 	Mpi2EventIrConfigElement_t *element)
9038 {
9039 	struct _sas_device *sas_device;
9040 	struct scsi_target *starget = NULL;
9041 	struct MPT3SAS_TARGET *sas_target_priv_data;
9042 	unsigned long flags;
9043 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9044 	u16 volume_handle = 0;
9045 	u64 volume_wwid = 0;
9046 
9047 	mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9048 	if (volume_handle)
9049 		mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9050 		    &volume_wwid);
9051 
9052 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9053 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9054 	if (sas_device) {
9055 		set_bit(handle, ioc->pd_handles);
9056 		if (sas_device->starget && sas_device->starget->hostdata) {
9057 			starget = sas_device->starget;
9058 			sas_target_priv_data = starget->hostdata;
9059 			sas_target_priv_data->flags |=
9060 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
9061 			sas_device->volume_handle = volume_handle;
9062 			sas_device->volume_wwid = volume_wwid;
9063 		}
9064 	}
9065 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9066 	if (!sas_device)
9067 		return;
9068 
9069 	/* hiding raid component */
9070 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9071 
9072 	if (starget)
9073 		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9074 
9075 	sas_device_put(sas_device);
9076 }
9077 
9078 /**
9079  * _scsih_sas_pd_delete - delete pd component
9080  * @ioc: per adapter object
9081  * @element: IR config element data
9082  * Context: user.
9083  */
9084 static void
9085 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9086 	Mpi2EventIrConfigElement_t *element)
9087 {
9088 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9089 
9090 	_scsih_device_remove_by_handle(ioc, handle);
9091 }
9092 
9093 /**
9094  * _scsih_sas_pd_add - remove pd component
9095  * @ioc: per adapter object
9096  * @element: IR config element data
9097  * Context: user.
9098  */
9099 static void
9100 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9101 	Mpi2EventIrConfigElement_t *element)
9102 {
9103 	struct _sas_device *sas_device;
9104 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9105 	Mpi2ConfigReply_t mpi_reply;
9106 	Mpi2SasDevicePage0_t sas_device_pg0;
9107 	u32 ioc_status;
9108 	u64 sas_address;
9109 	u16 parent_handle;
9110 
9111 	set_bit(handle, ioc->pd_handles);
9112 
9113 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9114 	if (sas_device) {
9115 		_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9116 		sas_device_put(sas_device);
9117 		return;
9118 	}
9119 
9120 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9121 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9122 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9123 			__FILE__, __LINE__, __func__);
9124 		return;
9125 	}
9126 
9127 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9128 	    MPI2_IOCSTATUS_MASK;
9129 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9130 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9131 			__FILE__, __LINE__, __func__);
9132 		return;
9133 	}
9134 
9135 	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9136 	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9137 		mpt3sas_transport_update_links(ioc, sas_address, handle,
9138 		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9139 		    mpt3sas_get_port_by_id(ioc,
9140 		    sas_device_pg0.PhysicalPort, 0));
9141 
9142 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9143 	_scsih_add_device(ioc, handle, 0, 1);
9144 }
9145 
9146 /**
9147  * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9148  * @ioc: per adapter object
9149  * @event_data: event data payload
9150  * Context: user.
9151  */
9152 static void
9153 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9154 	Mpi2EventDataIrConfigChangeList_t *event_data)
9155 {
9156 	Mpi2EventIrConfigElement_t *element;
9157 	u8 element_type;
9158 	int i;
9159 	char *reason_str = NULL, *element_str = NULL;
9160 
9161 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9162 
9163 	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9164 		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9165 		 "foreign" : "native",
9166 		 event_data->NumElements);
9167 	for (i = 0; i < event_data->NumElements; i++, element++) {
9168 		switch (element->ReasonCode) {
9169 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9170 			reason_str = "add";
9171 			break;
9172 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9173 			reason_str = "remove";
9174 			break;
9175 		case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9176 			reason_str = "no change";
9177 			break;
9178 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9179 			reason_str = "hide";
9180 			break;
9181 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9182 			reason_str = "unhide";
9183 			break;
9184 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9185 			reason_str = "volume_created";
9186 			break;
9187 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9188 			reason_str = "volume_deleted";
9189 			break;
9190 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9191 			reason_str = "pd_created";
9192 			break;
9193 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9194 			reason_str = "pd_deleted";
9195 			break;
9196 		default:
9197 			reason_str = "unknown reason";
9198 			break;
9199 		}
9200 		element_type = le16_to_cpu(element->ElementFlags) &
9201 		    MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9202 		switch (element_type) {
9203 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9204 			element_str = "volume";
9205 			break;
9206 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9207 			element_str = "phys disk";
9208 			break;
9209 		case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9210 			element_str = "hot spare";
9211 			break;
9212 		default:
9213 			element_str = "unknown element";
9214 			break;
9215 		}
9216 		pr_info("\t(%s:%s), vol handle(0x%04x), " \
9217 		    "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9218 		    reason_str, le16_to_cpu(element->VolDevHandle),
9219 		    le16_to_cpu(element->PhysDiskDevHandle),
9220 		    element->PhysDiskNum);
9221 	}
9222 }
9223 
9224 /**
9225  * _scsih_sas_ir_config_change_event - handle ir configuration change events
9226  * @ioc: per adapter object
9227  * @fw_event: The fw_event_work object
9228  * Context: user.
9229  */
9230 static void
9231 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9232 	struct fw_event_work *fw_event)
9233 {
9234 	Mpi2EventIrConfigElement_t *element;
9235 	int i;
9236 	u8 foreign_config;
9237 	Mpi2EventDataIrConfigChangeList_t *event_data =
9238 		(Mpi2EventDataIrConfigChangeList_t *)
9239 		fw_event->event_data;
9240 
9241 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9242 	     (!ioc->hide_ir_msg))
9243 		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
9244 
9245 	foreign_config = (le32_to_cpu(event_data->Flags) &
9246 	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9247 
9248 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9249 	if (ioc->shost_recovery &&
9250 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9251 		for (i = 0; i < event_data->NumElements; i++, element++) {
9252 			if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9253 				_scsih_ir_fastpath(ioc,
9254 					le16_to_cpu(element->PhysDiskDevHandle),
9255 					element->PhysDiskNum);
9256 		}
9257 		return;
9258 	}
9259 
9260 	for (i = 0; i < event_data->NumElements; i++, element++) {
9261 
9262 		switch (element->ReasonCode) {
9263 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9264 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9265 			if (!foreign_config)
9266 				_scsih_sas_volume_add(ioc, element);
9267 			break;
9268 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9269 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9270 			if (!foreign_config)
9271 				_scsih_sas_volume_delete(ioc,
9272 				    le16_to_cpu(element->VolDevHandle));
9273 			break;
9274 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9275 			if (!ioc->is_warpdrive)
9276 				_scsih_sas_pd_hide(ioc, element);
9277 			break;
9278 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9279 			if (!ioc->is_warpdrive)
9280 				_scsih_sas_pd_expose(ioc, element);
9281 			break;
9282 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9283 			if (!ioc->is_warpdrive)
9284 				_scsih_sas_pd_add(ioc, element);
9285 			break;
9286 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9287 			if (!ioc->is_warpdrive)
9288 				_scsih_sas_pd_delete(ioc, element);
9289 			break;
9290 		}
9291 	}
9292 }
9293 
9294 /**
9295  * _scsih_sas_ir_volume_event - IR volume event
9296  * @ioc: per adapter object
9297  * @fw_event: The fw_event_work object
9298  * Context: user.
9299  */
9300 static void
9301 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9302 	struct fw_event_work *fw_event)
9303 {
9304 	u64 wwid;
9305 	unsigned long flags;
9306 	struct _raid_device *raid_device;
9307 	u16 handle;
9308 	u32 state;
9309 	int rc;
9310 	Mpi2EventDataIrVolume_t *event_data =
9311 		(Mpi2EventDataIrVolume_t *) fw_event->event_data;
9312 
9313 	if (ioc->shost_recovery)
9314 		return;
9315 
9316 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9317 		return;
9318 
9319 	handle = le16_to_cpu(event_data->VolDevHandle);
9320 	state = le32_to_cpu(event_data->NewValue);
9321 	if (!ioc->hide_ir_msg)
9322 		dewtprintk(ioc,
9323 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9324 				    __func__, handle,
9325 				    le32_to_cpu(event_data->PreviousValue),
9326 				    state));
9327 	switch (state) {
9328 	case MPI2_RAID_VOL_STATE_MISSING:
9329 	case MPI2_RAID_VOL_STATE_FAILED:
9330 		_scsih_sas_volume_delete(ioc, handle);
9331 		break;
9332 
9333 	case MPI2_RAID_VOL_STATE_ONLINE:
9334 	case MPI2_RAID_VOL_STATE_DEGRADED:
9335 	case MPI2_RAID_VOL_STATE_OPTIMAL:
9336 
9337 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9338 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9339 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9340 
9341 		if (raid_device)
9342 			break;
9343 
9344 		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9345 		if (!wwid) {
9346 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9347 				__FILE__, __LINE__, __func__);
9348 			break;
9349 		}
9350 
9351 		raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9352 		if (!raid_device) {
9353 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9354 				__FILE__, __LINE__, __func__);
9355 			break;
9356 		}
9357 
9358 		raid_device->id = ioc->sas_id++;
9359 		raid_device->channel = RAID_CHANNEL;
9360 		raid_device->handle = handle;
9361 		raid_device->wwid = wwid;
9362 		_scsih_raid_device_add(ioc, raid_device);
9363 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9364 		    raid_device->id, 0);
9365 		if (rc)
9366 			_scsih_raid_device_remove(ioc, raid_device);
9367 		break;
9368 
9369 	case MPI2_RAID_VOL_STATE_INITIALIZING:
9370 	default:
9371 		break;
9372 	}
9373 }
9374 
9375 /**
9376  * _scsih_sas_ir_physical_disk_event - PD event
9377  * @ioc: per adapter object
9378  * @fw_event: The fw_event_work object
9379  * Context: user.
9380  */
9381 static void
9382 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9383 	struct fw_event_work *fw_event)
9384 {
9385 	u16 handle, parent_handle;
9386 	u32 state;
9387 	struct _sas_device *sas_device;
9388 	Mpi2ConfigReply_t mpi_reply;
9389 	Mpi2SasDevicePage0_t sas_device_pg0;
9390 	u32 ioc_status;
9391 	Mpi2EventDataIrPhysicalDisk_t *event_data =
9392 		(Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9393 	u64 sas_address;
9394 
9395 	if (ioc->shost_recovery)
9396 		return;
9397 
9398 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9399 		return;
9400 
9401 	handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9402 	state = le32_to_cpu(event_data->NewValue);
9403 
9404 	if (!ioc->hide_ir_msg)
9405 		dewtprintk(ioc,
9406 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9407 				    __func__, handle,
9408 				    le32_to_cpu(event_data->PreviousValue),
9409 				    state));
9410 
9411 	switch (state) {
9412 	case MPI2_RAID_PD_STATE_ONLINE:
9413 	case MPI2_RAID_PD_STATE_DEGRADED:
9414 	case MPI2_RAID_PD_STATE_REBUILDING:
9415 	case MPI2_RAID_PD_STATE_OPTIMAL:
9416 	case MPI2_RAID_PD_STATE_HOT_SPARE:
9417 
9418 		if (!ioc->is_warpdrive)
9419 			set_bit(handle, ioc->pd_handles);
9420 
9421 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9422 		if (sas_device) {
9423 			sas_device_put(sas_device);
9424 			return;
9425 		}
9426 
9427 		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9428 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9429 		    handle))) {
9430 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9431 				__FILE__, __LINE__, __func__);
9432 			return;
9433 		}
9434 
9435 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9436 		    MPI2_IOCSTATUS_MASK;
9437 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9438 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9439 				__FILE__, __LINE__, __func__);
9440 			return;
9441 		}
9442 
9443 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9444 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9445 			mpt3sas_transport_update_links(ioc, sas_address, handle,
9446 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9447 			    mpt3sas_get_port_by_id(ioc,
9448 			    sas_device_pg0.PhysicalPort, 0));
9449 
9450 		_scsih_add_device(ioc, handle, 0, 1);
9451 
9452 		break;
9453 
9454 	case MPI2_RAID_PD_STATE_OFFLINE:
9455 	case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9456 	case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9457 	default:
9458 		break;
9459 	}
9460 }
9461 
9462 /**
9463  * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9464  * @ioc: per adapter object
9465  * @event_data: event data payload
9466  * Context: user.
9467  */
9468 static void
9469 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9470 	Mpi2EventDataIrOperationStatus_t *event_data)
9471 {
9472 	char *reason_str = NULL;
9473 
9474 	switch (event_data->RAIDOperation) {
9475 	case MPI2_EVENT_IR_RAIDOP_RESYNC:
9476 		reason_str = "resync";
9477 		break;
9478 	case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9479 		reason_str = "online capacity expansion";
9480 		break;
9481 	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9482 		reason_str = "consistency check";
9483 		break;
9484 	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9485 		reason_str = "background init";
9486 		break;
9487 	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9488 		reason_str = "make data consistent";
9489 		break;
9490 	}
9491 
9492 	if (!reason_str)
9493 		return;
9494 
9495 	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9496 		 reason_str,
9497 		 le16_to_cpu(event_data->VolDevHandle),
9498 		 event_data->PercentComplete);
9499 }
9500 
9501 /**
9502  * _scsih_sas_ir_operation_status_event - handle RAID operation events
9503  * @ioc: per adapter object
9504  * @fw_event: The fw_event_work object
9505  * Context: user.
9506  */
9507 static void
9508 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9509 	struct fw_event_work *fw_event)
9510 {
9511 	Mpi2EventDataIrOperationStatus_t *event_data =
9512 		(Mpi2EventDataIrOperationStatus_t *)
9513 		fw_event->event_data;
9514 	static struct _raid_device *raid_device;
9515 	unsigned long flags;
9516 	u16 handle;
9517 
9518 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9519 	    (!ioc->hide_ir_msg))
9520 		_scsih_sas_ir_operation_status_event_debug(ioc,
9521 		     event_data);
9522 
9523 	/* code added for raid transport support */
9524 	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9525 
9526 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9527 		handle = le16_to_cpu(event_data->VolDevHandle);
9528 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9529 		if (raid_device)
9530 			raid_device->percent_complete =
9531 			    event_data->PercentComplete;
9532 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9533 	}
9534 }
9535 
9536 /**
9537  * _scsih_prep_device_scan - initialize parameters prior to device scan
9538  * @ioc: per adapter object
9539  *
9540  * Set the deleted flag prior to device scan.  If the device is found during
9541  * the scan, then we clear the deleted flag.
9542  */
9543 static void
9544 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9545 {
9546 	struct MPT3SAS_DEVICE *sas_device_priv_data;
9547 	struct scsi_device *sdev;
9548 
9549 	shost_for_each_device(sdev, ioc->shost) {
9550 		sas_device_priv_data = sdev->hostdata;
9551 		if (sas_device_priv_data && sas_device_priv_data->sas_target)
9552 			sas_device_priv_data->sas_target->deleted = 1;
9553 	}
9554 }
9555 
9556 /**
9557  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9558  * @ioc: per adapter object
9559  * @sas_device_pg0: SAS Device page 0
9560  *
9561  * After host reset, find out whether devices are still responding.
9562  * Used in _scsih_remove_unresponsive_sas_devices.
9563  */
9564 static void
9565 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9566 Mpi2SasDevicePage0_t *sas_device_pg0)
9567 {
9568 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9569 	struct scsi_target *starget;
9570 	struct _sas_device *sas_device = NULL;
9571 	struct _enclosure_node *enclosure_dev = NULL;
9572 	unsigned long flags;
9573 	struct hba_port *port = mpt3sas_get_port_by_id(
9574 	    ioc, sas_device_pg0->PhysicalPort, 0);
9575 
9576 	if (sas_device_pg0->EnclosureHandle) {
9577 		enclosure_dev =
9578 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
9579 				le16_to_cpu(sas_device_pg0->EnclosureHandle));
9580 		if (enclosure_dev == NULL)
9581 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9582 				 sas_device_pg0->EnclosureHandle);
9583 	}
9584 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9585 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9586 		if (sas_device->sas_address != le64_to_cpu(
9587 		    sas_device_pg0->SASAddress))
9588 			continue;
9589 		if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9590 			continue;
9591 		if (sas_device->port != port)
9592 			continue;
9593 		sas_device->responding = 1;
9594 		starget = sas_device->starget;
9595 		if (starget && starget->hostdata) {
9596 			sas_target_priv_data = starget->hostdata;
9597 			sas_target_priv_data->tm_busy = 0;
9598 			sas_target_priv_data->deleted = 0;
9599 		} else
9600 			sas_target_priv_data = NULL;
9601 		if (starget) {
9602 			starget_printk(KERN_INFO, starget,
9603 			    "handle(0x%04x), sas_addr(0x%016llx)\n",
9604 			    le16_to_cpu(sas_device_pg0->DevHandle),
9605 			    (unsigned long long)
9606 			    sas_device->sas_address);
9607 
9608 			if (sas_device->enclosure_handle != 0)
9609 				starget_printk(KERN_INFO, starget,
9610 				 "enclosure logical id(0x%016llx), slot(%d)\n",
9611 				 (unsigned long long)
9612 				 sas_device->enclosure_logical_id,
9613 				 sas_device->slot);
9614 		}
9615 		if (le16_to_cpu(sas_device_pg0->Flags) &
9616 		      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9617 			sas_device->enclosure_level =
9618 			   sas_device_pg0->EnclosureLevel;
9619 			memcpy(&sas_device->connector_name[0],
9620 				&sas_device_pg0->ConnectorName[0], 4);
9621 		} else {
9622 			sas_device->enclosure_level = 0;
9623 			sas_device->connector_name[0] = '\0';
9624 		}
9625 
9626 		sas_device->enclosure_handle =
9627 			le16_to_cpu(sas_device_pg0->EnclosureHandle);
9628 		sas_device->is_chassis_slot_valid = 0;
9629 		if (enclosure_dev) {
9630 			sas_device->enclosure_logical_id = le64_to_cpu(
9631 				enclosure_dev->pg0.EnclosureLogicalID);
9632 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9633 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9634 				sas_device->is_chassis_slot_valid = 1;
9635 				sas_device->chassis_slot =
9636 					enclosure_dev->pg0.ChassisSlot;
9637 			}
9638 		}
9639 
9640 		if (sas_device->handle == le16_to_cpu(
9641 		    sas_device_pg0->DevHandle))
9642 			goto out;
9643 		pr_info("\thandle changed from(0x%04x)!!!\n",
9644 		    sas_device->handle);
9645 		sas_device->handle = le16_to_cpu(
9646 		    sas_device_pg0->DevHandle);
9647 		if (sas_target_priv_data)
9648 			sas_target_priv_data->handle =
9649 			    le16_to_cpu(sas_device_pg0->DevHandle);
9650 		goto out;
9651 	}
9652  out:
9653 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9654 }
9655 
9656 /**
9657  * _scsih_create_enclosure_list_after_reset - Free Existing list,
9658  *	And create enclosure list by scanning all Enclosure Page(0)s
9659  * @ioc: per adapter object
9660  */
9661 static void
9662 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9663 {
9664 	struct _enclosure_node *enclosure_dev;
9665 	Mpi2ConfigReply_t mpi_reply;
9666 	u16 enclosure_handle;
9667 	int rc;
9668 
9669 	/* Free existing enclosure list */
9670 	mpt3sas_free_enclosure_list(ioc);
9671 
9672 	/* Re constructing enclosure list after reset*/
9673 	enclosure_handle = 0xFFFF;
9674 	do {
9675 		enclosure_dev =
9676 			kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9677 		if (!enclosure_dev) {
9678 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9679 				__FILE__, __LINE__, __func__);
9680 			return;
9681 		}
9682 		rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9683 				&enclosure_dev->pg0,
9684 				MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9685 				enclosure_handle);
9686 
9687 		if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9688 						MPI2_IOCSTATUS_MASK)) {
9689 			kfree(enclosure_dev);
9690 			return;
9691 		}
9692 		list_add_tail(&enclosure_dev->list,
9693 						&ioc->enclosure_list);
9694 		enclosure_handle =
9695 			le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9696 	} while (1);
9697 }
9698 
9699 /**
9700  * _scsih_search_responding_sas_devices -
9701  * @ioc: per adapter object
9702  *
9703  * After host reset, find out whether devices are still responding.
9704  * If not remove.
9705  */
9706 static void
9707 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9708 {
9709 	Mpi2SasDevicePage0_t sas_device_pg0;
9710 	Mpi2ConfigReply_t mpi_reply;
9711 	u16 ioc_status;
9712 	u16 handle;
9713 	u32 device_info;
9714 
9715 	ioc_info(ioc, "search for end-devices: start\n");
9716 
9717 	if (list_empty(&ioc->sas_device_list))
9718 		goto out;
9719 
9720 	handle = 0xFFFF;
9721 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9722 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9723 	    handle))) {
9724 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9725 		    MPI2_IOCSTATUS_MASK;
9726 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9727 			break;
9728 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
9729 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9730 		if (!(_scsih_is_end_device(device_info)))
9731 			continue;
9732 		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9733 	}
9734 
9735  out:
9736 	ioc_info(ioc, "search for end-devices: complete\n");
9737 }
9738 
9739 /**
9740  * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9741  * @ioc: per adapter object
9742  * @pcie_device_pg0: PCIe Device page 0
9743  *
9744  * After host reset, find out whether devices are still responding.
9745  * Used in _scsih_remove_unresponding_devices.
9746  */
9747 static void
9748 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9749 	Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9750 {
9751 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9752 	struct scsi_target *starget;
9753 	struct _pcie_device *pcie_device;
9754 	unsigned long flags;
9755 
9756 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9757 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9758 		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9759 		    && (pcie_device->slot == le16_to_cpu(
9760 		    pcie_device_pg0->Slot))) {
9761 			pcie_device->access_status =
9762 					pcie_device_pg0->AccessStatus;
9763 			pcie_device->responding = 1;
9764 			starget = pcie_device->starget;
9765 			if (starget && starget->hostdata) {
9766 				sas_target_priv_data = starget->hostdata;
9767 				sas_target_priv_data->tm_busy = 0;
9768 				sas_target_priv_data->deleted = 0;
9769 			} else
9770 				sas_target_priv_data = NULL;
9771 			if (starget) {
9772 				starget_printk(KERN_INFO, starget,
9773 				    "handle(0x%04x), wwid(0x%016llx) ",
9774 				    pcie_device->handle,
9775 				    (unsigned long long)pcie_device->wwid);
9776 				if (pcie_device->enclosure_handle != 0)
9777 					starget_printk(KERN_INFO, starget,
9778 					    "enclosure logical id(0x%016llx), "
9779 					    "slot(%d)\n",
9780 					    (unsigned long long)
9781 					    pcie_device->enclosure_logical_id,
9782 					    pcie_device->slot);
9783 			}
9784 
9785 			if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9786 			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9787 			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9788 				pcie_device->enclosure_level =
9789 				    pcie_device_pg0->EnclosureLevel;
9790 				memcpy(&pcie_device->connector_name[0],
9791 				    &pcie_device_pg0->ConnectorName[0], 4);
9792 			} else {
9793 				pcie_device->enclosure_level = 0;
9794 				pcie_device->connector_name[0] = '\0';
9795 			}
9796 
9797 			if (pcie_device->handle == le16_to_cpu(
9798 			    pcie_device_pg0->DevHandle))
9799 				goto out;
9800 			pr_info("\thandle changed from(0x%04x)!!!\n",
9801 			    pcie_device->handle);
9802 			pcie_device->handle = le16_to_cpu(
9803 			    pcie_device_pg0->DevHandle);
9804 			if (sas_target_priv_data)
9805 				sas_target_priv_data->handle =
9806 				    le16_to_cpu(pcie_device_pg0->DevHandle);
9807 			goto out;
9808 		}
9809 	}
9810 
9811  out:
9812 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9813 }
9814 
9815 /**
9816  * _scsih_search_responding_pcie_devices -
9817  * @ioc: per adapter object
9818  *
9819  * After host reset, find out whether devices are still responding.
9820  * If not remove.
9821  */
9822 static void
9823 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9824 {
9825 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
9826 	Mpi2ConfigReply_t mpi_reply;
9827 	u16 ioc_status;
9828 	u16 handle;
9829 	u32 device_info;
9830 
9831 	ioc_info(ioc, "search for end-devices: start\n");
9832 
9833 	if (list_empty(&ioc->pcie_device_list))
9834 		goto out;
9835 
9836 	handle = 0xFFFF;
9837 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9838 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9839 		handle))) {
9840 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9841 		    MPI2_IOCSTATUS_MASK;
9842 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9843 			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9844 				 __func__, ioc_status,
9845 				 le32_to_cpu(mpi_reply.IOCLogInfo));
9846 			break;
9847 		}
9848 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9849 		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9850 		if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9851 			continue;
9852 		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9853 	}
9854 out:
9855 	ioc_info(ioc, "search for PCIe end-devices: complete\n");
9856 }
9857 
9858 /**
9859  * _scsih_mark_responding_raid_device - mark a raid_device as responding
9860  * @ioc: per adapter object
9861  * @wwid: world wide identifier for raid volume
9862  * @handle: device handle
9863  *
9864  * After host reset, find out whether devices are still responding.
9865  * Used in _scsih_remove_unresponsive_raid_devices.
9866  */
9867 static void
9868 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
9869 	u16 handle)
9870 {
9871 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9872 	struct scsi_target *starget;
9873 	struct _raid_device *raid_device;
9874 	unsigned long flags;
9875 
9876 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
9877 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
9878 		if (raid_device->wwid == wwid && raid_device->starget) {
9879 			starget = raid_device->starget;
9880 			if (starget && starget->hostdata) {
9881 				sas_target_priv_data = starget->hostdata;
9882 				sas_target_priv_data->deleted = 0;
9883 			} else
9884 				sas_target_priv_data = NULL;
9885 			raid_device->responding = 1;
9886 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9887 			starget_printk(KERN_INFO, raid_device->starget,
9888 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
9889 			    (unsigned long long)raid_device->wwid);
9890 
9891 			/*
9892 			 * WARPDRIVE: The handles of the PDs might have changed
9893 			 * across the host reset so re-initialize the
9894 			 * required data for Direct IO
9895 			 */
9896 			mpt3sas_init_warpdrive_properties(ioc, raid_device);
9897 			spin_lock_irqsave(&ioc->raid_device_lock, flags);
9898 			if (raid_device->handle == handle) {
9899 				spin_unlock_irqrestore(&ioc->raid_device_lock,
9900 				    flags);
9901 				return;
9902 			}
9903 			pr_info("\thandle changed from(0x%04x)!!!\n",
9904 			    raid_device->handle);
9905 			raid_device->handle = handle;
9906 			if (sas_target_priv_data)
9907 				sas_target_priv_data->handle = handle;
9908 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9909 			return;
9910 		}
9911 	}
9912 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9913 }
9914 
9915 /**
9916  * _scsih_search_responding_raid_devices -
9917  * @ioc: per adapter object
9918  *
9919  * After host reset, find out whether devices are still responding.
9920  * If not remove.
9921  */
9922 static void
9923 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
9924 {
9925 	Mpi2RaidVolPage1_t volume_pg1;
9926 	Mpi2RaidVolPage0_t volume_pg0;
9927 	Mpi2RaidPhysDiskPage0_t pd_pg0;
9928 	Mpi2ConfigReply_t mpi_reply;
9929 	u16 ioc_status;
9930 	u16 handle;
9931 	u8 phys_disk_num;
9932 
9933 	if (!ioc->ir_firmware)
9934 		return;
9935 
9936 	ioc_info(ioc, "search for raid volumes: start\n");
9937 
9938 	if (list_empty(&ioc->raid_device_list))
9939 		goto out;
9940 
9941 	handle = 0xFFFF;
9942 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9943 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9944 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9945 		    MPI2_IOCSTATUS_MASK;
9946 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9947 			break;
9948 		handle = le16_to_cpu(volume_pg1.DevHandle);
9949 
9950 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9951 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9952 		     sizeof(Mpi2RaidVolPage0_t)))
9953 			continue;
9954 
9955 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9956 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9957 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
9958 			_scsih_mark_responding_raid_device(ioc,
9959 			    le64_to_cpu(volume_pg1.WWID), handle);
9960 	}
9961 
9962 	/* refresh the pd_handles */
9963 	if (!ioc->is_warpdrive) {
9964 		phys_disk_num = 0xFF;
9965 		memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
9966 		while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9967 		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9968 		    phys_disk_num))) {
9969 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9970 			    MPI2_IOCSTATUS_MASK;
9971 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9972 				break;
9973 			phys_disk_num = pd_pg0.PhysDiskNum;
9974 			handle = le16_to_cpu(pd_pg0.DevHandle);
9975 			set_bit(handle, ioc->pd_handles);
9976 		}
9977 	}
9978  out:
9979 	ioc_info(ioc, "search for responding raid volumes: complete\n");
9980 }
9981 
9982 /**
9983  * _scsih_mark_responding_expander - mark a expander as responding
9984  * @ioc: per adapter object
9985  * @expander_pg0:SAS Expander Config Page0
9986  *
9987  * After host reset, find out whether devices are still responding.
9988  * Used in _scsih_remove_unresponsive_expanders.
9989  */
9990 static void
9991 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
9992 	Mpi2ExpanderPage0_t *expander_pg0)
9993 {
9994 	struct _sas_node *sas_expander = NULL;
9995 	unsigned long flags;
9996 	int i;
9997 	struct _enclosure_node *enclosure_dev = NULL;
9998 	u16 handle = le16_to_cpu(expander_pg0->DevHandle);
9999 	u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10000 	u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10001 	struct hba_port *port = mpt3sas_get_port_by_id(
10002 	    ioc, expander_pg0->PhysicalPort, 0);
10003 
10004 	if (enclosure_handle)
10005 		enclosure_dev =
10006 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
10007 							enclosure_handle);
10008 
10009 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
10010 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10011 		if (sas_expander->sas_address != sas_address)
10012 			continue;
10013 		if (sas_expander->port != port)
10014 			continue;
10015 		sas_expander->responding = 1;
10016 
10017 		if (enclosure_dev) {
10018 			sas_expander->enclosure_logical_id =
10019 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10020 			sas_expander->enclosure_handle =
10021 			    le16_to_cpu(expander_pg0->EnclosureHandle);
10022 		}
10023 
10024 		if (sas_expander->handle == handle)
10025 			goto out;
10026 		pr_info("\texpander(0x%016llx): handle changed" \
10027 		    " from(0x%04x) to (0x%04x)!!!\n",
10028 		    (unsigned long long)sas_expander->sas_address,
10029 		    sas_expander->handle, handle);
10030 		sas_expander->handle = handle;
10031 		for (i = 0 ; i < sas_expander->num_phys ; i++)
10032 			sas_expander->phy[i].handle = handle;
10033 		goto out;
10034 	}
10035  out:
10036 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10037 }
10038 
10039 /**
10040  * _scsih_search_responding_expanders -
10041  * @ioc: per adapter object
10042  *
10043  * After host reset, find out whether devices are still responding.
10044  * If not remove.
10045  */
10046 static void
10047 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10048 {
10049 	Mpi2ExpanderPage0_t expander_pg0;
10050 	Mpi2ConfigReply_t mpi_reply;
10051 	u16 ioc_status;
10052 	u64 sas_address;
10053 	u16 handle;
10054 	u8 port;
10055 
10056 	ioc_info(ioc, "search for expanders: start\n");
10057 
10058 	if (list_empty(&ioc->sas_expander_list))
10059 		goto out;
10060 
10061 	handle = 0xFFFF;
10062 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10063 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10064 
10065 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10066 		    MPI2_IOCSTATUS_MASK;
10067 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10068 			break;
10069 
10070 		handle = le16_to_cpu(expander_pg0.DevHandle);
10071 		sas_address = le64_to_cpu(expander_pg0.SASAddress);
10072 		port = expander_pg0.PhysicalPort;
10073 		pr_info(
10074 		    "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10075 		    handle, (unsigned long long)sas_address,
10076 		    (ioc->multipath_on_hba ?
10077 		    port : MULTIPATH_DISABLED_PORT_ID));
10078 		_scsih_mark_responding_expander(ioc, &expander_pg0);
10079 	}
10080 
10081  out:
10082 	ioc_info(ioc, "search for expanders: complete\n");
10083 }
10084 
10085 /**
10086  * _scsih_remove_unresponding_devices - removing unresponding devices
10087  * @ioc: per adapter object
10088  */
10089 static void
10090 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10091 {
10092 	struct _sas_device *sas_device, *sas_device_next;
10093 	struct _sas_node *sas_expander, *sas_expander_next;
10094 	struct _raid_device *raid_device, *raid_device_next;
10095 	struct _pcie_device *pcie_device, *pcie_device_next;
10096 	struct list_head tmp_list;
10097 	unsigned long flags;
10098 	LIST_HEAD(head);
10099 
10100 	ioc_info(ioc, "removing unresponding devices: start\n");
10101 
10102 	/* removing unresponding end devices */
10103 	ioc_info(ioc, "removing unresponding devices: end-devices\n");
10104 	/*
10105 	 * Iterate, pulling off devices marked as non-responding. We become the
10106 	 * owner for the reference the list had on any object we prune.
10107 	 */
10108 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10109 	list_for_each_entry_safe(sas_device, sas_device_next,
10110 	    &ioc->sas_device_list, list) {
10111 		if (!sas_device->responding)
10112 			list_move_tail(&sas_device->list, &head);
10113 		else
10114 			sas_device->responding = 0;
10115 	}
10116 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10117 
10118 	/*
10119 	 * Now, uninitialize and remove the unresponding devices we pruned.
10120 	 */
10121 	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10122 		_scsih_remove_device(ioc, sas_device);
10123 		list_del_init(&sas_device->list);
10124 		sas_device_put(sas_device);
10125 	}
10126 
10127 	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10128 	INIT_LIST_HEAD(&head);
10129 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10130 	list_for_each_entry_safe(pcie_device, pcie_device_next,
10131 	    &ioc->pcie_device_list, list) {
10132 		if (!pcie_device->responding)
10133 			list_move_tail(&pcie_device->list, &head);
10134 		else
10135 			pcie_device->responding = 0;
10136 	}
10137 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10138 
10139 	list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10140 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10141 		list_del_init(&pcie_device->list);
10142 		pcie_device_put(pcie_device);
10143 	}
10144 
10145 	/* removing unresponding volumes */
10146 	if (ioc->ir_firmware) {
10147 		ioc_info(ioc, "removing unresponding devices: volumes\n");
10148 		list_for_each_entry_safe(raid_device, raid_device_next,
10149 		    &ioc->raid_device_list, list) {
10150 			if (!raid_device->responding)
10151 				_scsih_sas_volume_delete(ioc,
10152 				    raid_device->handle);
10153 			else
10154 				raid_device->responding = 0;
10155 		}
10156 	}
10157 
10158 	/* removing unresponding expanders */
10159 	ioc_info(ioc, "removing unresponding devices: expanders\n");
10160 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
10161 	INIT_LIST_HEAD(&tmp_list);
10162 	list_for_each_entry_safe(sas_expander, sas_expander_next,
10163 	    &ioc->sas_expander_list, list) {
10164 		if (!sas_expander->responding)
10165 			list_move_tail(&sas_expander->list, &tmp_list);
10166 		else
10167 			sas_expander->responding = 0;
10168 	}
10169 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10170 	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10171 	    list) {
10172 		_scsih_expander_node_remove(ioc, sas_expander);
10173 	}
10174 
10175 	ioc_info(ioc, "removing unresponding devices: complete\n");
10176 
10177 	/* unblock devices */
10178 	_scsih_ublock_io_all_device(ioc);
10179 }
10180 
10181 static void
10182 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10183 	struct _sas_node *sas_expander, u16 handle)
10184 {
10185 	Mpi2ExpanderPage1_t expander_pg1;
10186 	Mpi2ConfigReply_t mpi_reply;
10187 	int i;
10188 
10189 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
10190 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10191 		    &expander_pg1, i, handle))) {
10192 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
10193 				__FILE__, __LINE__, __func__);
10194 			return;
10195 		}
10196 
10197 		mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10198 		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10199 		    expander_pg1.NegotiatedLinkRate >> 4,
10200 		    sas_expander->port);
10201 	}
10202 }
10203 
10204 /**
10205  * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10206  * @ioc: per adapter object
10207  */
10208 static void
10209 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10210 {
10211 	Mpi2ExpanderPage0_t expander_pg0;
10212 	Mpi2SasDevicePage0_t sas_device_pg0;
10213 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
10214 	Mpi2RaidVolPage1_t volume_pg1;
10215 	Mpi2RaidVolPage0_t volume_pg0;
10216 	Mpi2RaidPhysDiskPage0_t pd_pg0;
10217 	Mpi2EventIrConfigElement_t element;
10218 	Mpi2ConfigReply_t mpi_reply;
10219 	u8 phys_disk_num, port_id;
10220 	u16 ioc_status;
10221 	u16 handle, parent_handle;
10222 	u64 sas_address;
10223 	struct _sas_device *sas_device;
10224 	struct _pcie_device *pcie_device;
10225 	struct _sas_node *expander_device;
10226 	static struct _raid_device *raid_device;
10227 	u8 retry_count;
10228 	unsigned long flags;
10229 
10230 	ioc_info(ioc, "scan devices: start\n");
10231 
10232 	_scsih_sas_host_refresh(ioc);
10233 
10234 	ioc_info(ioc, "\tscan devices: expanders start\n");
10235 
10236 	/* expanders */
10237 	handle = 0xFFFF;
10238 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10239 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10240 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10241 		    MPI2_IOCSTATUS_MASK;
10242 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10243 			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10244 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10245 			break;
10246 		}
10247 		handle = le16_to_cpu(expander_pg0.DevHandle);
10248 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
10249 		port_id = expander_pg0.PhysicalPort;
10250 		expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10251 		    ioc, le64_to_cpu(expander_pg0.SASAddress),
10252 		    mpt3sas_get_port_by_id(ioc, port_id, 0));
10253 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10254 		if (expander_device)
10255 			_scsih_refresh_expander_links(ioc, expander_device,
10256 			    handle);
10257 		else {
10258 			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10259 				 handle,
10260 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
10261 			_scsih_expander_add(ioc, handle);
10262 			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10263 				 handle,
10264 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
10265 		}
10266 	}
10267 
10268 	ioc_info(ioc, "\tscan devices: expanders complete\n");
10269 
10270 	if (!ioc->ir_firmware)
10271 		goto skip_to_sas;
10272 
10273 	ioc_info(ioc, "\tscan devices: phys disk start\n");
10274 
10275 	/* phys disk */
10276 	phys_disk_num = 0xFF;
10277 	while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10278 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10279 	    phys_disk_num))) {
10280 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10281 		    MPI2_IOCSTATUS_MASK;
10282 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10283 			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10284 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10285 			break;
10286 		}
10287 		phys_disk_num = pd_pg0.PhysDiskNum;
10288 		handle = le16_to_cpu(pd_pg0.DevHandle);
10289 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10290 		if (sas_device) {
10291 			sas_device_put(sas_device);
10292 			continue;
10293 		}
10294 		if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10295 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10296 		    handle) != 0)
10297 			continue;
10298 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10299 		    MPI2_IOCSTATUS_MASK;
10300 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10301 			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10302 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10303 			break;
10304 		}
10305 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10306 		if (!_scsih_get_sas_address(ioc, parent_handle,
10307 		    &sas_address)) {
10308 			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10309 				 handle,
10310 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10311 			port_id = sas_device_pg0.PhysicalPort;
10312 			mpt3sas_transport_update_links(ioc, sas_address,
10313 			    handle, sas_device_pg0.PhyNum,
10314 			    MPI2_SAS_NEG_LINK_RATE_1_5,
10315 			    mpt3sas_get_port_by_id(ioc, port_id, 0));
10316 			set_bit(handle, ioc->pd_handles);
10317 			retry_count = 0;
10318 			/* This will retry adding the end device.
10319 			 * _scsih_add_device() will decide on retries and
10320 			 * return "1" when it should be retried
10321 			 */
10322 			while (_scsih_add_device(ioc, handle, retry_count++,
10323 			    1)) {
10324 				ssleep(1);
10325 			}
10326 			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10327 				 handle,
10328 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10329 		}
10330 	}
10331 
10332 	ioc_info(ioc, "\tscan devices: phys disk complete\n");
10333 
10334 	ioc_info(ioc, "\tscan devices: volumes start\n");
10335 
10336 	/* volumes */
10337 	handle = 0xFFFF;
10338 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10339 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10340 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10341 		    MPI2_IOCSTATUS_MASK;
10342 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10343 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10344 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10345 			break;
10346 		}
10347 		handle = le16_to_cpu(volume_pg1.DevHandle);
10348 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
10349 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
10350 		    le64_to_cpu(volume_pg1.WWID));
10351 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10352 		if (raid_device)
10353 			continue;
10354 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10355 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10356 		     sizeof(Mpi2RaidVolPage0_t)))
10357 			continue;
10358 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10359 		    MPI2_IOCSTATUS_MASK;
10360 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10361 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10362 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10363 			break;
10364 		}
10365 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10366 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10367 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10368 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10369 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10370 			element.VolDevHandle = volume_pg1.DevHandle;
10371 			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10372 				 volume_pg1.DevHandle);
10373 			_scsih_sas_volume_add(ioc, &element);
10374 			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10375 				 volume_pg1.DevHandle);
10376 		}
10377 	}
10378 
10379 	ioc_info(ioc, "\tscan devices: volumes complete\n");
10380 
10381  skip_to_sas:
10382 
10383 	ioc_info(ioc, "\tscan devices: end devices start\n");
10384 
10385 	/* sas devices */
10386 	handle = 0xFFFF;
10387 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10388 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10389 	    handle))) {
10390 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10391 		    MPI2_IOCSTATUS_MASK;
10392 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10393 			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10394 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10395 			break;
10396 		}
10397 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
10398 		if (!(_scsih_is_end_device(
10399 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
10400 			continue;
10401 		port_id = sas_device_pg0.PhysicalPort;
10402 		sas_device = mpt3sas_get_sdev_by_addr(ioc,
10403 		    le64_to_cpu(sas_device_pg0.SASAddress),
10404 		    mpt3sas_get_port_by_id(ioc, port_id, 0));
10405 		if (sas_device) {
10406 			sas_device_put(sas_device);
10407 			continue;
10408 		}
10409 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10410 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10411 			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10412 				 handle,
10413 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10414 			mpt3sas_transport_update_links(ioc, sas_address, handle,
10415 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10416 			    mpt3sas_get_port_by_id(ioc, port_id, 0));
10417 			retry_count = 0;
10418 			/* This will retry adding the end device.
10419 			 * _scsih_add_device() will decide on retries and
10420 			 * return "1" when it should be retried
10421 			 */
10422 			while (_scsih_add_device(ioc, handle, retry_count++,
10423 			    0)) {
10424 				ssleep(1);
10425 			}
10426 			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10427 				 handle,
10428 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10429 		}
10430 	}
10431 	ioc_info(ioc, "\tscan devices: end devices complete\n");
10432 	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10433 
10434 	/* pcie devices */
10435 	handle = 0xFFFF;
10436 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10437 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10438 		handle))) {
10439 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10440 				& MPI2_IOCSTATUS_MASK;
10441 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10442 			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10443 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10444 			break;
10445 		}
10446 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10447 		if (!(_scsih_is_nvme_pciescsi_device(
10448 			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10449 			continue;
10450 		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10451 				le64_to_cpu(pcie_device_pg0.WWID));
10452 		if (pcie_device) {
10453 			pcie_device_put(pcie_device);
10454 			continue;
10455 		}
10456 		retry_count = 0;
10457 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10458 		_scsih_pcie_add_device(ioc, handle);
10459 
10460 		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10461 			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10462 	}
10463 	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10464 	ioc_info(ioc, "scan devices: complete\n");
10465 }
10466 
10467 /**
10468  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
10469  * @ioc: per adapter object
10470  *
10471  * The handler for doing any required cleanup or initialization.
10472  */
10473 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10474 {
10475 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10476 }
10477 
10478 /**
10479  * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10480  *							scsi & tm cmds.
10481  * @ioc: per adapter object
10482  *
10483  * The handler for doing any required cleanup or initialization.
10484  */
10485 void
10486 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10487 {
10488 	dtmprintk(ioc,
10489 	    ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10490 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10491 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10492 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10493 		complete(&ioc->scsih_cmds.done);
10494 	}
10495 	if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10496 		ioc->tm_cmds.status |= MPT3_CMD_RESET;
10497 		mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10498 		complete(&ioc->tm_cmds.done);
10499 	}
10500 
10501 	memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10502 	memset(ioc->device_remove_in_progress, 0,
10503 	       ioc->device_remove_in_progress_sz);
10504 	_scsih_fw_event_cleanup_queue(ioc);
10505 	_scsih_flush_running_cmds(ioc);
10506 }
10507 
10508 /**
10509  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
10510  * @ioc: per adapter object
10511  *
10512  * The handler for doing any required cleanup or initialization.
10513  */
10514 void
10515 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10516 {
10517 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10518 	if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
10519 					   !ioc->sas_hba.num_phys)) {
10520 		if (ioc->multipath_on_hba) {
10521 			_scsih_sas_port_refresh(ioc);
10522 			_scsih_update_vphys_after_reset(ioc);
10523 		}
10524 		_scsih_prep_device_scan(ioc);
10525 		_scsih_create_enclosure_list_after_reset(ioc);
10526 		_scsih_search_responding_sas_devices(ioc);
10527 		_scsih_search_responding_pcie_devices(ioc);
10528 		_scsih_search_responding_raid_devices(ioc);
10529 		_scsih_search_responding_expanders(ioc);
10530 		_scsih_error_recovery_delete_devices(ioc);
10531 	}
10532 }
10533 
10534 /**
10535  * _mpt3sas_fw_work - delayed task for processing firmware events
10536  * @ioc: per adapter object
10537  * @fw_event: The fw_event_work object
10538  * Context: user.
10539  */
10540 static void
10541 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10542 {
10543 	ioc->current_event = fw_event;
10544 	_scsih_fw_event_del_from_list(ioc, fw_event);
10545 
10546 	/* the queue is being flushed so ignore this event */
10547 	if (ioc->remove_host || ioc->pci_error_recovery) {
10548 		fw_event_work_put(fw_event);
10549 		ioc->current_event = NULL;
10550 		return;
10551 	}
10552 
10553 	switch (fw_event->event) {
10554 	case MPT3SAS_PROCESS_TRIGGER_DIAG:
10555 		mpt3sas_process_trigger_data(ioc,
10556 			(struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10557 			fw_event->event_data);
10558 		break;
10559 	case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10560 		while (scsi_host_in_recovery(ioc->shost) ||
10561 					 ioc->shost_recovery) {
10562 			/*
10563 			 * If we're unloading or cancelling the work, bail.
10564 			 * Otherwise, this can become an infinite loop.
10565 			 */
10566 			if (ioc->remove_host || ioc->fw_events_cleanup)
10567 				goto out;
10568 			ssleep(1);
10569 		}
10570 		_scsih_remove_unresponding_devices(ioc);
10571 		_scsih_del_dirty_vphy(ioc);
10572 		_scsih_del_dirty_port_entries(ioc);
10573 		_scsih_scan_for_devices_after_reset(ioc);
10574 		_scsih_set_nvme_max_shutdown_latency(ioc);
10575 		break;
10576 	case MPT3SAS_PORT_ENABLE_COMPLETE:
10577 		ioc->start_scan = 0;
10578 		if (missing_delay[0] != -1 && missing_delay[1] != -1)
10579 			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10580 			    missing_delay[1]);
10581 		dewtprintk(ioc,
10582 			   ioc_info(ioc, "port enable: complete from worker thread\n"));
10583 		break;
10584 	case MPT3SAS_TURN_ON_PFA_LED:
10585 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10586 		break;
10587 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10588 		_scsih_sas_topology_change_event(ioc, fw_event);
10589 		break;
10590 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10591 		if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10592 			_scsih_sas_device_status_change_event_debug(ioc,
10593 			    (Mpi2EventDataSasDeviceStatusChange_t *)
10594 			    fw_event->event_data);
10595 		break;
10596 	case MPI2_EVENT_SAS_DISCOVERY:
10597 		_scsih_sas_discovery_event(ioc, fw_event);
10598 		break;
10599 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10600 		_scsih_sas_device_discovery_error_event(ioc, fw_event);
10601 		break;
10602 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10603 		_scsih_sas_broadcast_primitive_event(ioc, fw_event);
10604 		break;
10605 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10606 		_scsih_sas_enclosure_dev_status_change_event(ioc,
10607 		    fw_event);
10608 		break;
10609 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10610 		_scsih_sas_ir_config_change_event(ioc, fw_event);
10611 		break;
10612 	case MPI2_EVENT_IR_VOLUME:
10613 		_scsih_sas_ir_volume_event(ioc, fw_event);
10614 		break;
10615 	case MPI2_EVENT_IR_PHYSICAL_DISK:
10616 		_scsih_sas_ir_physical_disk_event(ioc, fw_event);
10617 		break;
10618 	case MPI2_EVENT_IR_OPERATION_STATUS:
10619 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
10620 		break;
10621 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10622 		_scsih_pcie_device_status_change_event(ioc, fw_event);
10623 		break;
10624 	case MPI2_EVENT_PCIE_ENUMERATION:
10625 		_scsih_pcie_enumeration_event(ioc, fw_event);
10626 		break;
10627 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10628 		_scsih_pcie_topology_change_event(ioc, fw_event);
10629 		ioc->current_event = NULL;
10630 			return;
10631 	break;
10632 	}
10633 out:
10634 	fw_event_work_put(fw_event);
10635 	ioc->current_event = NULL;
10636 }
10637 
10638 /**
10639  * _firmware_event_work
10640  * @work: The fw_event_work object
10641  * Context: user.
10642  *
10643  * wrappers for the work thread handling firmware events
10644  */
10645 
10646 static void
10647 _firmware_event_work(struct work_struct *work)
10648 {
10649 	struct fw_event_work *fw_event = container_of(work,
10650 	    struct fw_event_work, work);
10651 
10652 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
10653 }
10654 
10655 /**
10656  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10657  * @ioc: per adapter object
10658  * @msix_index: MSIX table index supplied by the OS
10659  * @reply: reply message frame(lower 32bit addr)
10660  * Context: interrupt.
10661  *
10662  * This function merely adds a new work task into ioc->firmware_event_thread.
10663  * The tasks are worked from _firmware_event_work in user context.
10664  *
10665  * Return: 1 meaning mf should be freed from _base_interrupt
10666  *         0 means the mf is freed from this function.
10667  */
10668 u8
10669 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10670 	u32 reply)
10671 {
10672 	struct fw_event_work *fw_event;
10673 	Mpi2EventNotificationReply_t *mpi_reply;
10674 	u16 event;
10675 	u16 sz;
10676 	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10677 
10678 	/* events turned off due to host reset */
10679 	if (ioc->pci_error_recovery)
10680 		return 1;
10681 
10682 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10683 
10684 	if (unlikely(!mpi_reply)) {
10685 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10686 			__FILE__, __LINE__, __func__);
10687 		return 1;
10688 	}
10689 
10690 	event = le16_to_cpu(mpi_reply->Event);
10691 
10692 	if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10693 		mpt3sas_trigger_event(ioc, event, 0);
10694 
10695 	switch (event) {
10696 	/* handle these */
10697 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10698 	{
10699 		Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10700 		    (Mpi2EventDataSasBroadcastPrimitive_t *)
10701 		    mpi_reply->EventData;
10702 
10703 		if (baen_data->Primitive !=
10704 		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10705 			return 1;
10706 
10707 		if (ioc->broadcast_aen_busy) {
10708 			ioc->broadcast_aen_pending++;
10709 			return 1;
10710 		} else
10711 			ioc->broadcast_aen_busy = 1;
10712 		break;
10713 	}
10714 
10715 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10716 		_scsih_check_topo_delete_events(ioc,
10717 		    (Mpi2EventDataSasTopologyChangeList_t *)
10718 		    mpi_reply->EventData);
10719 		break;
10720 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10721 	_scsih_check_pcie_topo_remove_events(ioc,
10722 		    (Mpi26EventDataPCIeTopologyChangeList_t *)
10723 		    mpi_reply->EventData);
10724 		break;
10725 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10726 		_scsih_check_ir_config_unhide_events(ioc,
10727 		    (Mpi2EventDataIrConfigChangeList_t *)
10728 		    mpi_reply->EventData);
10729 		break;
10730 	case MPI2_EVENT_IR_VOLUME:
10731 		_scsih_check_volume_delete_events(ioc,
10732 		    (Mpi2EventDataIrVolume_t *)
10733 		    mpi_reply->EventData);
10734 		break;
10735 	case MPI2_EVENT_LOG_ENTRY_ADDED:
10736 	{
10737 		Mpi2EventDataLogEntryAdded_t *log_entry;
10738 		u32 *log_code;
10739 
10740 		if (!ioc->is_warpdrive)
10741 			break;
10742 
10743 		log_entry = (Mpi2EventDataLogEntryAdded_t *)
10744 		    mpi_reply->EventData;
10745 		log_code = (u32 *)log_entry->LogData;
10746 
10747 		if (le16_to_cpu(log_entry->LogEntryQualifier)
10748 		    != MPT2_WARPDRIVE_LOGENTRY)
10749 			break;
10750 
10751 		switch (le32_to_cpu(*log_code)) {
10752 		case MPT2_WARPDRIVE_LC_SSDT:
10753 			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10754 			break;
10755 		case MPT2_WARPDRIVE_LC_SSDLW:
10756 			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10757 			break;
10758 		case MPT2_WARPDRIVE_LC_SSDLF:
10759 			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10760 			break;
10761 		case MPT2_WARPDRIVE_LC_BRMF:
10762 			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10763 			break;
10764 		}
10765 
10766 		break;
10767 	}
10768 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10769 		_scsih_sas_device_status_change_event(ioc,
10770 		    (Mpi2EventDataSasDeviceStatusChange_t *)
10771 		    mpi_reply->EventData);
10772 		break;
10773 	case MPI2_EVENT_IR_OPERATION_STATUS:
10774 	case MPI2_EVENT_SAS_DISCOVERY:
10775 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10776 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10777 	case MPI2_EVENT_IR_PHYSICAL_DISK:
10778 	case MPI2_EVENT_PCIE_ENUMERATION:
10779 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10780 		break;
10781 
10782 	case MPI2_EVENT_TEMP_THRESHOLD:
10783 		_scsih_temp_threshold_events(ioc,
10784 			(Mpi2EventDataTemperature_t *)
10785 			mpi_reply->EventData);
10786 		break;
10787 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10788 		ActiveCableEventData =
10789 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10790 		switch (ActiveCableEventData->ReasonCode) {
10791 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10792 			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10793 				   ActiveCableEventData->ReceptacleID);
10794 			pr_notice("cannot be powered and devices connected\n");
10795 			pr_notice("to this active cable will not be seen\n");
10796 			pr_notice("This active cable requires %d mW of power\n",
10797 			     ActiveCableEventData->ActiveCablePowerRequirement);
10798 			break;
10799 
10800 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10801 			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10802 				   ActiveCableEventData->ReceptacleID);
10803 			pr_notice(
10804 			    "is not running at optimal speed(12 Gb/s rate)\n");
10805 			break;
10806 		}
10807 
10808 		break;
10809 
10810 	default: /* ignore the rest */
10811 		return 1;
10812 	}
10813 
10814 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
10815 	fw_event = alloc_fw_event_work(sz);
10816 	if (!fw_event) {
10817 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10818 			__FILE__, __LINE__, __func__);
10819 		return 1;
10820 	}
10821 
10822 	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
10823 	fw_event->ioc = ioc;
10824 	fw_event->VF_ID = mpi_reply->VF_ID;
10825 	fw_event->VP_ID = mpi_reply->VP_ID;
10826 	fw_event->event = event;
10827 	_scsih_fw_event_add(ioc, fw_event);
10828 	fw_event_work_put(fw_event);
10829 	return 1;
10830 }
10831 
10832 /**
10833  * _scsih_expander_node_remove - removing expander device from list.
10834  * @ioc: per adapter object
10835  * @sas_expander: the sas_device object
10836  *
10837  * Removing object and freeing associated memory from the
10838  * ioc->sas_expander_list.
10839  */
10840 static void
10841 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
10842 	struct _sas_node *sas_expander)
10843 {
10844 	struct _sas_port *mpt3sas_port, *next;
10845 	unsigned long flags;
10846 
10847 	/* remove sibling ports attached to this expander */
10848 	list_for_each_entry_safe(mpt3sas_port, next,
10849 	   &sas_expander->sas_port_list, port_list) {
10850 		if (ioc->shost_recovery)
10851 			return;
10852 		if (mpt3sas_port->remote_identify.device_type ==
10853 		    SAS_END_DEVICE)
10854 			mpt3sas_device_remove_by_sas_address(ioc,
10855 			    mpt3sas_port->remote_identify.sas_address,
10856 			    mpt3sas_port->hba_port);
10857 		else if (mpt3sas_port->remote_identify.device_type ==
10858 		    SAS_EDGE_EXPANDER_DEVICE ||
10859 		    mpt3sas_port->remote_identify.device_type ==
10860 		    SAS_FANOUT_EXPANDER_DEVICE)
10861 			mpt3sas_expander_remove(ioc,
10862 			    mpt3sas_port->remote_identify.sas_address,
10863 			    mpt3sas_port->hba_port);
10864 	}
10865 
10866 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
10867 	    sas_expander->sas_address_parent, sas_expander->port);
10868 
10869 	ioc_info(ioc,
10870 	    "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10871 	    sas_expander->handle, (unsigned long long)
10872 	    sas_expander->sas_address,
10873 	    sas_expander->port->port_id);
10874 
10875 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
10876 	list_del(&sas_expander->list);
10877 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10878 
10879 	kfree(sas_expander->phy);
10880 	kfree(sas_expander);
10881 }
10882 
10883 /**
10884  * _scsih_nvme_shutdown - NVMe shutdown notification
10885  * @ioc: per adapter object
10886  *
10887  * Sending IoUnitControl request with shutdown operation code to alert IOC that
10888  * the host system is shutting down so that IOC can issue NVMe shutdown to
10889  * NVMe drives attached to it.
10890  */
10891 static void
10892 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
10893 {
10894 	Mpi26IoUnitControlRequest_t *mpi_request;
10895 	Mpi26IoUnitControlReply_t *mpi_reply;
10896 	u16 smid;
10897 
10898 	/* are there any NVMe devices ? */
10899 	if (list_empty(&ioc->pcie_device_list))
10900 		return;
10901 
10902 	mutex_lock(&ioc->scsih_cmds.mutex);
10903 
10904 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10905 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10906 		goto out;
10907 	}
10908 
10909 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10910 
10911 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10912 	if (!smid) {
10913 		ioc_err(ioc,
10914 		    "%s: failed obtaining a smid\n", __func__);
10915 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10916 		goto out;
10917 	}
10918 
10919 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10920 	ioc->scsih_cmds.smid = smid;
10921 	memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
10922 	mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
10923 	mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
10924 
10925 	init_completion(&ioc->scsih_cmds.done);
10926 	ioc->put_smid_default(ioc, smid);
10927 	/* Wait for max_shutdown_latency seconds */
10928 	ioc_info(ioc,
10929 		"Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
10930 		ioc->max_shutdown_latency);
10931 	wait_for_completion_timeout(&ioc->scsih_cmds.done,
10932 			ioc->max_shutdown_latency*HZ);
10933 
10934 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10935 		ioc_err(ioc, "%s: timeout\n", __func__);
10936 		goto out;
10937 	}
10938 
10939 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10940 		mpi_reply = ioc->scsih_cmds.reply;
10941 		ioc_info(ioc, "Io Unit Control shutdown (complete):"
10942 			"ioc_status(0x%04x), loginfo(0x%08x)\n",
10943 			le16_to_cpu(mpi_reply->IOCStatus),
10944 			le32_to_cpu(mpi_reply->IOCLogInfo));
10945 	}
10946  out:
10947 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10948 	mutex_unlock(&ioc->scsih_cmds.mutex);
10949 }
10950 
10951 
10952 /**
10953  * _scsih_ir_shutdown - IR shutdown notification
10954  * @ioc: per adapter object
10955  *
10956  * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
10957  * the host system is shutting down.
10958  */
10959 static void
10960 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
10961 {
10962 	Mpi2RaidActionRequest_t *mpi_request;
10963 	Mpi2RaidActionReply_t *mpi_reply;
10964 	u16 smid;
10965 
10966 	/* is IR firmware build loaded ? */
10967 	if (!ioc->ir_firmware)
10968 		return;
10969 
10970 	/* are there any volumes ? */
10971 	if (list_empty(&ioc->raid_device_list))
10972 		return;
10973 
10974 	mutex_lock(&ioc->scsih_cmds.mutex);
10975 
10976 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10977 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10978 		goto out;
10979 	}
10980 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10981 
10982 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10983 	if (!smid) {
10984 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
10985 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10986 		goto out;
10987 	}
10988 
10989 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10990 	ioc->scsih_cmds.smid = smid;
10991 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
10992 
10993 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
10994 	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
10995 
10996 	if (!ioc->hide_ir_msg)
10997 		ioc_info(ioc, "IR shutdown (sending)\n");
10998 	init_completion(&ioc->scsih_cmds.done);
10999 	ioc->put_smid_default(ioc, smid);
11000 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11001 
11002 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11003 		ioc_err(ioc, "%s: timeout\n", __func__);
11004 		goto out;
11005 	}
11006 
11007 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11008 		mpi_reply = ioc->scsih_cmds.reply;
11009 		if (!ioc->hide_ir_msg)
11010 			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11011 				 le16_to_cpu(mpi_reply->IOCStatus),
11012 				 le32_to_cpu(mpi_reply->IOCLogInfo));
11013 	}
11014 
11015  out:
11016 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11017 	mutex_unlock(&ioc->scsih_cmds.mutex);
11018 }
11019 
11020 /**
11021  * _scsih_get_shost_and_ioc - get shost and ioc
11022  *			and verify whether they are NULL or not
11023  * @pdev: PCI device struct
11024  * @shost: address of scsi host pointer
11025  * @ioc: address of HBA adapter pointer
11026  *
11027  * Return zero if *shost and *ioc are not NULL otherwise return error number.
11028  */
11029 static int
11030 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11031 	struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11032 {
11033 	*shost = pci_get_drvdata(pdev);
11034 	if (*shost == NULL) {
11035 		dev_err(&pdev->dev, "pdev's driver data is null\n");
11036 		return -ENXIO;
11037 	}
11038 
11039 	*ioc = shost_priv(*shost);
11040 	if (*ioc == NULL) {
11041 		dev_err(&pdev->dev, "shost's private data is null\n");
11042 		return -ENXIO;
11043 	}
11044 
11045 	return 0;
11046 }
11047 
11048 /**
11049  * scsih_remove - detach and remove add host
11050  * @pdev: PCI device struct
11051  *
11052  * Routine called when unloading the driver.
11053  */
11054 static void scsih_remove(struct pci_dev *pdev)
11055 {
11056 	struct Scsi_Host *shost;
11057 	struct MPT3SAS_ADAPTER *ioc;
11058 	struct _sas_port *mpt3sas_port, *next_port;
11059 	struct _raid_device *raid_device, *next;
11060 	struct MPT3SAS_TARGET *sas_target_priv_data;
11061 	struct _pcie_device *pcie_device, *pcienext;
11062 	struct workqueue_struct	*wq;
11063 	unsigned long flags;
11064 	Mpi2ConfigReply_t mpi_reply;
11065 	struct hba_port *port, *port_next;
11066 
11067 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11068 		return;
11069 
11070 	ioc->remove_host = 1;
11071 
11072 	if (!pci_device_is_present(pdev))
11073 		_scsih_flush_running_cmds(ioc);
11074 
11075 	_scsih_fw_event_cleanup_queue(ioc);
11076 
11077 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
11078 	wq = ioc->firmware_event_thread;
11079 	ioc->firmware_event_thread = NULL;
11080 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11081 	if (wq)
11082 		destroy_workqueue(wq);
11083 	/*
11084 	 * Copy back the unmodified ioc page1. so that on next driver load,
11085 	 * current modified changes on ioc page1 won't take effect.
11086 	 */
11087 	if (ioc->is_aero_ioc)
11088 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11089 				&ioc->ioc_pg1_copy);
11090 	/* release all the volumes */
11091 	_scsih_ir_shutdown(ioc);
11092 	mpt3sas_destroy_debugfs(ioc);
11093 	sas_remove_host(shost);
11094 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11095 	    list) {
11096 		if (raid_device->starget) {
11097 			sas_target_priv_data =
11098 			    raid_device->starget->hostdata;
11099 			sas_target_priv_data->deleted = 1;
11100 			scsi_remove_target(&raid_device->starget->dev);
11101 		}
11102 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11103 			 raid_device->handle, (u64)raid_device->wwid);
11104 		_scsih_raid_device_remove(ioc, raid_device);
11105 	}
11106 	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11107 		list) {
11108 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11109 		list_del_init(&pcie_device->list);
11110 		pcie_device_put(pcie_device);
11111 	}
11112 
11113 	/* free ports attached to the sas_host */
11114 	list_for_each_entry_safe(mpt3sas_port, next_port,
11115 	   &ioc->sas_hba.sas_port_list, port_list) {
11116 		if (mpt3sas_port->remote_identify.device_type ==
11117 		    SAS_END_DEVICE)
11118 			mpt3sas_device_remove_by_sas_address(ioc,
11119 			    mpt3sas_port->remote_identify.sas_address,
11120 			    mpt3sas_port->hba_port);
11121 		else if (mpt3sas_port->remote_identify.device_type ==
11122 		    SAS_EDGE_EXPANDER_DEVICE ||
11123 		    mpt3sas_port->remote_identify.device_type ==
11124 		    SAS_FANOUT_EXPANDER_DEVICE)
11125 			mpt3sas_expander_remove(ioc,
11126 			    mpt3sas_port->remote_identify.sas_address,
11127 			    mpt3sas_port->hba_port);
11128 	}
11129 
11130 	list_for_each_entry_safe(port, port_next,
11131 	    &ioc->port_table_list, list) {
11132 		list_del(&port->list);
11133 		kfree(port);
11134 	}
11135 
11136 	/* free phys attached to the sas_host */
11137 	if (ioc->sas_hba.num_phys) {
11138 		kfree(ioc->sas_hba.phy);
11139 		ioc->sas_hba.phy = NULL;
11140 		ioc->sas_hba.num_phys = 0;
11141 	}
11142 
11143 	mpt3sas_base_detach(ioc);
11144 	spin_lock(&gioc_lock);
11145 	list_del(&ioc->list);
11146 	spin_unlock(&gioc_lock);
11147 	scsi_host_put(shost);
11148 }
11149 
11150 /**
11151  * scsih_shutdown - routine call during system shutdown
11152  * @pdev: PCI device struct
11153  */
11154 static void
11155 scsih_shutdown(struct pci_dev *pdev)
11156 {
11157 	struct Scsi_Host *shost;
11158 	struct MPT3SAS_ADAPTER *ioc;
11159 	struct workqueue_struct	*wq;
11160 	unsigned long flags;
11161 	Mpi2ConfigReply_t mpi_reply;
11162 
11163 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11164 		return;
11165 
11166 	ioc->remove_host = 1;
11167 
11168 	if (!pci_device_is_present(pdev))
11169 		_scsih_flush_running_cmds(ioc);
11170 
11171 	_scsih_fw_event_cleanup_queue(ioc);
11172 
11173 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
11174 	wq = ioc->firmware_event_thread;
11175 	ioc->firmware_event_thread = NULL;
11176 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11177 	if (wq)
11178 		destroy_workqueue(wq);
11179 	/*
11180 	 * Copy back the unmodified ioc page1 so that on next driver load,
11181 	 * current modified changes on ioc page1 won't take effect.
11182 	 */
11183 	if (ioc->is_aero_ioc)
11184 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11185 				&ioc->ioc_pg1_copy);
11186 
11187 	_scsih_ir_shutdown(ioc);
11188 	_scsih_nvme_shutdown(ioc);
11189 	mpt3sas_base_detach(ioc);
11190 }
11191 
11192 
11193 /**
11194  * _scsih_probe_boot_devices - reports 1st device
11195  * @ioc: per adapter object
11196  *
11197  * If specified in bios page 2, this routine reports the 1st
11198  * device scsi-ml or sas transport for persistent boot device
11199  * purposes.  Please refer to function _scsih_determine_boot_device()
11200  */
11201 static void
11202 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11203 {
11204 	u32 channel;
11205 	void *device;
11206 	struct _sas_device *sas_device;
11207 	struct _raid_device *raid_device;
11208 	struct _pcie_device *pcie_device;
11209 	u16 handle;
11210 	u64 sas_address_parent;
11211 	u64 sas_address;
11212 	unsigned long flags;
11213 	int rc;
11214 	int tid;
11215 	struct hba_port *port;
11216 
11217 	 /* no Bios, return immediately */
11218 	if (!ioc->bios_pg3.BiosVersion)
11219 		return;
11220 
11221 	device = NULL;
11222 	if (ioc->req_boot_device.device) {
11223 		device =  ioc->req_boot_device.device;
11224 		channel = ioc->req_boot_device.channel;
11225 	} else if (ioc->req_alt_boot_device.device) {
11226 		device =  ioc->req_alt_boot_device.device;
11227 		channel = ioc->req_alt_boot_device.channel;
11228 	} else if (ioc->current_boot_device.device) {
11229 		device =  ioc->current_boot_device.device;
11230 		channel = ioc->current_boot_device.channel;
11231 	}
11232 
11233 	if (!device)
11234 		return;
11235 
11236 	if (channel == RAID_CHANNEL) {
11237 		raid_device = device;
11238 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11239 		    raid_device->id, 0);
11240 		if (rc)
11241 			_scsih_raid_device_remove(ioc, raid_device);
11242 	} else if (channel == PCIE_CHANNEL) {
11243 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11244 		pcie_device = device;
11245 		tid = pcie_device->id;
11246 		list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11247 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11248 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11249 		if (rc)
11250 			_scsih_pcie_device_remove(ioc, pcie_device);
11251 	} else {
11252 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
11253 		sas_device = device;
11254 		handle = sas_device->handle;
11255 		sas_address_parent = sas_device->sas_address_parent;
11256 		sas_address = sas_device->sas_address;
11257 		port = sas_device->port;
11258 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
11259 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11260 
11261 		if (ioc->hide_drives)
11262 			return;
11263 
11264 		if (!port)
11265 			return;
11266 
11267 		if (!mpt3sas_transport_port_add(ioc, handle,
11268 		    sas_address_parent, port)) {
11269 			_scsih_sas_device_remove(ioc, sas_device);
11270 		} else if (!sas_device->starget) {
11271 			if (!ioc->is_driver_loading) {
11272 				mpt3sas_transport_port_remove(ioc,
11273 				    sas_address,
11274 				    sas_address_parent, port);
11275 				_scsih_sas_device_remove(ioc, sas_device);
11276 			}
11277 		}
11278 	}
11279 }
11280 
11281 /**
11282  * _scsih_probe_raid - reporting raid volumes to scsi-ml
11283  * @ioc: per adapter object
11284  *
11285  * Called during initial loading of the driver.
11286  */
11287 static void
11288 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11289 {
11290 	struct _raid_device *raid_device, *raid_next;
11291 	int rc;
11292 
11293 	list_for_each_entry_safe(raid_device, raid_next,
11294 	    &ioc->raid_device_list, list) {
11295 		if (raid_device->starget)
11296 			continue;
11297 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11298 		    raid_device->id, 0);
11299 		if (rc)
11300 			_scsih_raid_device_remove(ioc, raid_device);
11301 	}
11302 }
11303 
11304 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11305 {
11306 	struct _sas_device *sas_device = NULL;
11307 	unsigned long flags;
11308 
11309 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
11310 	if (!list_empty(&ioc->sas_device_init_list)) {
11311 		sas_device = list_first_entry(&ioc->sas_device_init_list,
11312 				struct _sas_device, list);
11313 		sas_device_get(sas_device);
11314 	}
11315 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11316 
11317 	return sas_device;
11318 }
11319 
11320 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11321 		struct _sas_device *sas_device)
11322 {
11323 	unsigned long flags;
11324 
11325 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
11326 
11327 	/*
11328 	 * Since we dropped the lock during the call to port_add(), we need to
11329 	 * be careful here that somebody else didn't move or delete this item
11330 	 * while we were busy with other things.
11331 	 *
11332 	 * If it was on the list, we need a put() for the reference the list
11333 	 * had. Either way, we need a get() for the destination list.
11334 	 */
11335 	if (!list_empty(&sas_device->list)) {
11336 		list_del_init(&sas_device->list);
11337 		sas_device_put(sas_device);
11338 	}
11339 
11340 	sas_device_get(sas_device);
11341 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
11342 
11343 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11344 }
11345 
11346 /**
11347  * _scsih_probe_sas - reporting sas devices to sas transport
11348  * @ioc: per adapter object
11349  *
11350  * Called during initial loading of the driver.
11351  */
11352 static void
11353 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11354 {
11355 	struct _sas_device *sas_device;
11356 
11357 	if (ioc->hide_drives)
11358 		return;
11359 
11360 	while ((sas_device = get_next_sas_device(ioc))) {
11361 		if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11362 		    sas_device->sas_address_parent, sas_device->port)) {
11363 			_scsih_sas_device_remove(ioc, sas_device);
11364 			sas_device_put(sas_device);
11365 			continue;
11366 		} else if (!sas_device->starget) {
11367 			/*
11368 			 * When asyn scanning is enabled, its not possible to
11369 			 * remove devices while scanning is turned on due to an
11370 			 * oops in scsi_sysfs_add_sdev()->add_device()->
11371 			 * sysfs_addrm_start()
11372 			 */
11373 			if (!ioc->is_driver_loading) {
11374 				mpt3sas_transport_port_remove(ioc,
11375 				    sas_device->sas_address,
11376 				    sas_device->sas_address_parent,
11377 				    sas_device->port);
11378 				_scsih_sas_device_remove(ioc, sas_device);
11379 				sas_device_put(sas_device);
11380 				continue;
11381 			}
11382 		}
11383 		sas_device_make_active(ioc, sas_device);
11384 		sas_device_put(sas_device);
11385 	}
11386 }
11387 
11388 /**
11389  * get_next_pcie_device - Get the next pcie device
11390  * @ioc: per adapter object
11391  *
11392  * Get the next pcie device from pcie_device_init_list list.
11393  *
11394  * Return: pcie device structure if pcie_device_init_list list is not empty
11395  * otherwise returns NULL
11396  */
11397 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11398 {
11399 	struct _pcie_device *pcie_device = NULL;
11400 	unsigned long flags;
11401 
11402 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11403 	if (!list_empty(&ioc->pcie_device_init_list)) {
11404 		pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11405 				struct _pcie_device, list);
11406 		pcie_device_get(pcie_device);
11407 	}
11408 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11409 
11410 	return pcie_device;
11411 }
11412 
11413 /**
11414  * pcie_device_make_active - Add pcie device to pcie_device_list list
11415  * @ioc: per adapter object
11416  * @pcie_device: pcie device object
11417  *
11418  * Add the pcie device which has registered with SCSI Transport Later to
11419  * pcie_device_list list
11420  */
11421 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11422 		struct _pcie_device *pcie_device)
11423 {
11424 	unsigned long flags;
11425 
11426 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11427 
11428 	if (!list_empty(&pcie_device->list)) {
11429 		list_del_init(&pcie_device->list);
11430 		pcie_device_put(pcie_device);
11431 	}
11432 	pcie_device_get(pcie_device);
11433 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11434 
11435 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11436 }
11437 
11438 /**
11439  * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11440  * @ioc: per adapter object
11441  *
11442  * Called during initial loading of the driver.
11443  */
11444 static void
11445 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11446 {
11447 	struct _pcie_device *pcie_device;
11448 	int rc;
11449 
11450 	/* PCIe Device List */
11451 	while ((pcie_device = get_next_pcie_device(ioc))) {
11452 		if (pcie_device->starget) {
11453 			pcie_device_put(pcie_device);
11454 			continue;
11455 		}
11456 		if (pcie_device->access_status ==
11457 		    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11458 			pcie_device_make_active(ioc, pcie_device);
11459 			pcie_device_put(pcie_device);
11460 			continue;
11461 		}
11462 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11463 			pcie_device->id, 0);
11464 		if (rc) {
11465 			_scsih_pcie_device_remove(ioc, pcie_device);
11466 			pcie_device_put(pcie_device);
11467 			continue;
11468 		} else if (!pcie_device->starget) {
11469 			/*
11470 			 * When async scanning is enabled, its not possible to
11471 			 * remove devices while scanning is turned on due to an
11472 			 * oops in scsi_sysfs_add_sdev()->add_device()->
11473 			 * sysfs_addrm_start()
11474 			 */
11475 			if (!ioc->is_driver_loading) {
11476 			/* TODO-- Need to find out whether this condition will
11477 			 * occur or not
11478 			 */
11479 				_scsih_pcie_device_remove(ioc, pcie_device);
11480 				pcie_device_put(pcie_device);
11481 				continue;
11482 			}
11483 		}
11484 		pcie_device_make_active(ioc, pcie_device);
11485 		pcie_device_put(pcie_device);
11486 	}
11487 }
11488 
11489 /**
11490  * _scsih_probe_devices - probing for devices
11491  * @ioc: per adapter object
11492  *
11493  * Called during initial loading of the driver.
11494  */
11495 static void
11496 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11497 {
11498 	u16 volume_mapping_flags;
11499 
11500 	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11501 		return;  /* return when IOC doesn't support initiator mode */
11502 
11503 	_scsih_probe_boot_devices(ioc);
11504 
11505 	if (ioc->ir_firmware) {
11506 		volume_mapping_flags =
11507 		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11508 		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11509 		if (volume_mapping_flags ==
11510 		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11511 			_scsih_probe_raid(ioc);
11512 			_scsih_probe_sas(ioc);
11513 		} else {
11514 			_scsih_probe_sas(ioc);
11515 			_scsih_probe_raid(ioc);
11516 		}
11517 	} else {
11518 		_scsih_probe_sas(ioc);
11519 		_scsih_probe_pcie(ioc);
11520 	}
11521 }
11522 
11523 /**
11524  * scsih_scan_start - scsi lld callback for .scan_start
11525  * @shost: SCSI host pointer
11526  *
11527  * The shost has the ability to discover targets on its own instead
11528  * of scanning the entire bus.  In our implemention, we will kick off
11529  * firmware discovery.
11530  */
11531 static void
11532 scsih_scan_start(struct Scsi_Host *shost)
11533 {
11534 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11535 	int rc;
11536 	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11537 		mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11538 	else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11539 		mpt3sas_enable_diag_buffer(ioc, 1);
11540 
11541 	if (disable_discovery > 0)
11542 		return;
11543 
11544 	ioc->start_scan = 1;
11545 	rc = mpt3sas_port_enable(ioc);
11546 
11547 	if (rc != 0)
11548 		ioc_info(ioc, "port enable: FAILED\n");
11549 }
11550 
11551 /**
11552  * scsih_scan_finished - scsi lld callback for .scan_finished
11553  * @shost: SCSI host pointer
11554  * @time: elapsed time of the scan in jiffies
11555  *
11556  * This function will be called periodicallyn until it returns 1 with the
11557  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11558  * we wait for firmware discovery to complete, then return 1.
11559  */
11560 static int
11561 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11562 {
11563 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11564 
11565 	if (disable_discovery > 0) {
11566 		ioc->is_driver_loading = 0;
11567 		ioc->wait_for_discovery_to_complete = 0;
11568 		return 1;
11569 	}
11570 
11571 	if (time >= (300 * HZ)) {
11572 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11573 		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11574 		ioc->is_driver_loading = 0;
11575 		return 1;
11576 	}
11577 
11578 	if (ioc->start_scan)
11579 		return 0;
11580 
11581 	if (ioc->start_scan_failed) {
11582 		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11583 			 ioc->start_scan_failed);
11584 		ioc->is_driver_loading = 0;
11585 		ioc->wait_for_discovery_to_complete = 0;
11586 		ioc->remove_host = 1;
11587 		return 1;
11588 	}
11589 
11590 	ioc_info(ioc, "port enable: SUCCESS\n");
11591 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11592 
11593 	if (ioc->wait_for_discovery_to_complete) {
11594 		ioc->wait_for_discovery_to_complete = 0;
11595 		_scsih_probe_devices(ioc);
11596 	}
11597 	mpt3sas_base_start_watchdog(ioc);
11598 	ioc->is_driver_loading = 0;
11599 	return 1;
11600 }
11601 
11602 /* shost template for SAS 2.0 HBA devices */
11603 static struct scsi_host_template mpt2sas_driver_template = {
11604 	.module				= THIS_MODULE,
11605 	.name				= "Fusion MPT SAS Host",
11606 	.proc_name			= MPT2SAS_DRIVER_NAME,
11607 	.queuecommand			= scsih_qcmd,
11608 	.target_alloc			= scsih_target_alloc,
11609 	.slave_alloc			= scsih_slave_alloc,
11610 	.slave_configure		= scsih_slave_configure,
11611 	.target_destroy			= scsih_target_destroy,
11612 	.slave_destroy			= scsih_slave_destroy,
11613 	.scan_finished			= scsih_scan_finished,
11614 	.scan_start			= scsih_scan_start,
11615 	.change_queue_depth		= scsih_change_queue_depth,
11616 	.eh_abort_handler		= scsih_abort,
11617 	.eh_device_reset_handler	= scsih_dev_reset,
11618 	.eh_target_reset_handler	= scsih_target_reset,
11619 	.eh_host_reset_handler		= scsih_host_reset,
11620 	.bios_param			= scsih_bios_param,
11621 	.can_queue			= 1,
11622 	.this_id			= -1,
11623 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
11624 	.max_sectors			= 32767,
11625 	.cmd_per_lun			= 7,
11626 	.shost_attrs			= mpt3sas_host_attrs,
11627 	.sdev_attrs			= mpt3sas_dev_attrs,
11628 	.track_queue_depth		= 1,
11629 	.cmd_size			= sizeof(struct scsiio_tracker),
11630 };
11631 
11632 /* raid transport support for SAS 2.0 HBA devices */
11633 static struct raid_function_template mpt2sas_raid_functions = {
11634 	.cookie		= &mpt2sas_driver_template,
11635 	.is_raid	= scsih_is_raid,
11636 	.get_resync	= scsih_get_resync,
11637 	.get_state	= scsih_get_state,
11638 };
11639 
11640 /* shost template for SAS 3.0 HBA devices */
11641 static struct scsi_host_template mpt3sas_driver_template = {
11642 	.module				= THIS_MODULE,
11643 	.name				= "Fusion MPT SAS Host",
11644 	.proc_name			= MPT3SAS_DRIVER_NAME,
11645 	.queuecommand			= scsih_qcmd,
11646 	.target_alloc			= scsih_target_alloc,
11647 	.slave_alloc			= scsih_slave_alloc,
11648 	.slave_configure		= scsih_slave_configure,
11649 	.target_destroy			= scsih_target_destroy,
11650 	.slave_destroy			= scsih_slave_destroy,
11651 	.scan_finished			= scsih_scan_finished,
11652 	.scan_start			= scsih_scan_start,
11653 	.change_queue_depth		= scsih_change_queue_depth,
11654 	.eh_abort_handler		= scsih_abort,
11655 	.eh_device_reset_handler	= scsih_dev_reset,
11656 	.eh_target_reset_handler	= scsih_target_reset,
11657 	.eh_host_reset_handler		= scsih_host_reset,
11658 	.bios_param			= scsih_bios_param,
11659 	.can_queue			= 1,
11660 	.this_id			= -1,
11661 	.sg_tablesize			= MPT3SAS_SG_DEPTH,
11662 	.max_sectors			= 32767,
11663 	.max_segment_size		= 0xffffffff,
11664 	.cmd_per_lun			= 7,
11665 	.shost_attrs			= mpt3sas_host_attrs,
11666 	.sdev_attrs			= mpt3sas_dev_attrs,
11667 	.track_queue_depth		= 1,
11668 	.cmd_size			= sizeof(struct scsiio_tracker),
11669 };
11670 
11671 /* raid transport support for SAS 3.0 HBA devices */
11672 static struct raid_function_template mpt3sas_raid_functions = {
11673 	.cookie		= &mpt3sas_driver_template,
11674 	.is_raid	= scsih_is_raid,
11675 	.get_resync	= scsih_get_resync,
11676 	.get_state	= scsih_get_state,
11677 };
11678 
11679 /**
11680  * _scsih_determine_hba_mpi_version - determine in which MPI version class
11681  *					this device belongs to.
11682  * @pdev: PCI device struct
11683  *
11684  * return MPI2_VERSION for SAS 2.0 HBA devices,
11685  *	MPI25_VERSION for SAS 3.0 HBA devices, and
11686  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
11687  */
11688 static u16
11689 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
11690 {
11691 
11692 	switch (pdev->device) {
11693 	case MPI2_MFGPAGE_DEVID_SSS6200:
11694 	case MPI2_MFGPAGE_DEVID_SAS2004:
11695 	case MPI2_MFGPAGE_DEVID_SAS2008:
11696 	case MPI2_MFGPAGE_DEVID_SAS2108_1:
11697 	case MPI2_MFGPAGE_DEVID_SAS2108_2:
11698 	case MPI2_MFGPAGE_DEVID_SAS2108_3:
11699 	case MPI2_MFGPAGE_DEVID_SAS2116_1:
11700 	case MPI2_MFGPAGE_DEVID_SAS2116_2:
11701 	case MPI2_MFGPAGE_DEVID_SAS2208_1:
11702 	case MPI2_MFGPAGE_DEVID_SAS2208_2:
11703 	case MPI2_MFGPAGE_DEVID_SAS2208_3:
11704 	case MPI2_MFGPAGE_DEVID_SAS2208_4:
11705 	case MPI2_MFGPAGE_DEVID_SAS2208_5:
11706 	case MPI2_MFGPAGE_DEVID_SAS2208_6:
11707 	case MPI2_MFGPAGE_DEVID_SAS2308_1:
11708 	case MPI2_MFGPAGE_DEVID_SAS2308_2:
11709 	case MPI2_MFGPAGE_DEVID_SAS2308_3:
11710 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11711 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11712 		return MPI2_VERSION;
11713 	case MPI25_MFGPAGE_DEVID_SAS3004:
11714 	case MPI25_MFGPAGE_DEVID_SAS3008:
11715 	case MPI25_MFGPAGE_DEVID_SAS3108_1:
11716 	case MPI25_MFGPAGE_DEVID_SAS3108_2:
11717 	case MPI25_MFGPAGE_DEVID_SAS3108_5:
11718 	case MPI25_MFGPAGE_DEVID_SAS3108_6:
11719 		return MPI25_VERSION;
11720 	case MPI26_MFGPAGE_DEVID_SAS3216:
11721 	case MPI26_MFGPAGE_DEVID_SAS3224:
11722 	case MPI26_MFGPAGE_DEVID_SAS3316_1:
11723 	case MPI26_MFGPAGE_DEVID_SAS3316_2:
11724 	case MPI26_MFGPAGE_DEVID_SAS3316_3:
11725 	case MPI26_MFGPAGE_DEVID_SAS3316_4:
11726 	case MPI26_MFGPAGE_DEVID_SAS3324_1:
11727 	case MPI26_MFGPAGE_DEVID_SAS3324_2:
11728 	case MPI26_MFGPAGE_DEVID_SAS3324_3:
11729 	case MPI26_MFGPAGE_DEVID_SAS3324_4:
11730 	case MPI26_MFGPAGE_DEVID_SAS3508:
11731 	case MPI26_MFGPAGE_DEVID_SAS3508_1:
11732 	case MPI26_MFGPAGE_DEVID_SAS3408:
11733 	case MPI26_MFGPAGE_DEVID_SAS3516:
11734 	case MPI26_MFGPAGE_DEVID_SAS3516_1:
11735 	case MPI26_MFGPAGE_DEVID_SAS3416:
11736 	case MPI26_MFGPAGE_DEVID_SAS3616:
11737 	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11738 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11739 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11740 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11741 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11742 	case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11743 	case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11744 	case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11745 	case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11746 		return MPI26_VERSION;
11747 	}
11748 	return 0;
11749 }
11750 
11751 /**
11752  * _scsih_probe - attach and add scsi host
11753  * @pdev: PCI device struct
11754  * @id: pci device id
11755  *
11756  * Return: 0 success, anything else error.
11757  */
11758 static int
11759 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
11760 {
11761 	struct MPT3SAS_ADAPTER *ioc;
11762 	struct Scsi_Host *shost = NULL;
11763 	int rv;
11764 	u16 hba_mpi_version;
11765 
11766 	/* Determine in which MPI version class this pci device belongs */
11767 	hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
11768 	if (hba_mpi_version == 0)
11769 		return -ENODEV;
11770 
11771 	/* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
11772 	 * for other generation HBA's return with -ENODEV
11773 	 */
11774 	if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
11775 		return -ENODEV;
11776 
11777 	/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
11778 	 * for other generation HBA's return with -ENODEV
11779 	 */
11780 	if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
11781 		|| hba_mpi_version ==  MPI26_VERSION)))
11782 		return -ENODEV;
11783 
11784 	switch (hba_mpi_version) {
11785 	case MPI2_VERSION:
11786 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
11787 			PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
11788 		/* Use mpt2sas driver host template for SAS 2.0 HBA's */
11789 		shost = scsi_host_alloc(&mpt2sas_driver_template,
11790 		  sizeof(struct MPT3SAS_ADAPTER));
11791 		if (!shost)
11792 			return -ENODEV;
11793 		ioc = shost_priv(shost);
11794 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11795 		ioc->hba_mpi_version_belonged = hba_mpi_version;
11796 		ioc->id = mpt2_ids++;
11797 		sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
11798 		switch (pdev->device) {
11799 		case MPI2_MFGPAGE_DEVID_SSS6200:
11800 			ioc->is_warpdrive = 1;
11801 			ioc->hide_ir_msg = 1;
11802 			break;
11803 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
11804 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
11805 			ioc->is_mcpu_endpoint = 1;
11806 			break;
11807 		default:
11808 			ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
11809 			break;
11810 		}
11811 
11812 		if (multipath_on_hba == -1 || multipath_on_hba == 0)
11813 			ioc->multipath_on_hba = 0;
11814 		else
11815 			ioc->multipath_on_hba = 1;
11816 
11817 		break;
11818 	case MPI25_VERSION:
11819 	case MPI26_VERSION:
11820 		/* Use mpt3sas driver host template for SAS 3.0 HBA's */
11821 		shost = scsi_host_alloc(&mpt3sas_driver_template,
11822 		  sizeof(struct MPT3SAS_ADAPTER));
11823 		if (!shost)
11824 			return -ENODEV;
11825 		ioc = shost_priv(shost);
11826 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
11827 		ioc->hba_mpi_version_belonged = hba_mpi_version;
11828 		ioc->id = mpt3_ids++;
11829 		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
11830 		switch (pdev->device) {
11831 		case MPI26_MFGPAGE_DEVID_SAS3508:
11832 		case MPI26_MFGPAGE_DEVID_SAS3508_1:
11833 		case MPI26_MFGPAGE_DEVID_SAS3408:
11834 		case MPI26_MFGPAGE_DEVID_SAS3516:
11835 		case MPI26_MFGPAGE_DEVID_SAS3516_1:
11836 		case MPI26_MFGPAGE_DEVID_SAS3416:
11837 		case MPI26_MFGPAGE_DEVID_SAS3616:
11838 		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
11839 			ioc->is_gen35_ioc = 1;
11840 			break;
11841 		case MPI26_MFGPAGE_DEVID_INVALID0_3816:
11842 		case MPI26_MFGPAGE_DEVID_INVALID0_3916:
11843 			dev_err(&pdev->dev,
11844 			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
11845 			    pdev->device, pdev->subsystem_vendor,
11846 			    pdev->subsystem_device);
11847 			return 1;
11848 		case MPI26_MFGPAGE_DEVID_INVALID1_3816:
11849 		case MPI26_MFGPAGE_DEVID_INVALID1_3916:
11850 			dev_err(&pdev->dev,
11851 			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
11852 			    pdev->device, pdev->subsystem_vendor,
11853 			    pdev->subsystem_device);
11854 			return 1;
11855 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
11856 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
11857 			dev_info(&pdev->dev,
11858 			    "HBA is in Configurable Secure mode\n");
11859 			fallthrough;
11860 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
11861 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
11862 			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
11863 			break;
11864 		default:
11865 			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
11866 		}
11867 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
11868 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
11869 			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
11870 			ioc->combined_reply_queue = 1;
11871 			if (ioc->is_gen35_ioc)
11872 				ioc->combined_reply_index_count =
11873 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
11874 			else
11875 				ioc->combined_reply_index_count =
11876 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
11877 		}
11878 
11879 		switch (ioc->is_gen35_ioc) {
11880 		case 0:
11881 			if (multipath_on_hba == -1 || multipath_on_hba == 0)
11882 				ioc->multipath_on_hba = 0;
11883 			else
11884 				ioc->multipath_on_hba = 1;
11885 			break;
11886 		case 1:
11887 			if (multipath_on_hba == -1 || multipath_on_hba > 0)
11888 				ioc->multipath_on_hba = 1;
11889 			else
11890 				ioc->multipath_on_hba = 0;
11891 		default:
11892 			break;
11893 		}
11894 
11895 		break;
11896 	default:
11897 		return -ENODEV;
11898 	}
11899 
11900 	INIT_LIST_HEAD(&ioc->list);
11901 	spin_lock(&gioc_lock);
11902 	list_add_tail(&ioc->list, &mpt3sas_ioc_list);
11903 	spin_unlock(&gioc_lock);
11904 	ioc->shost = shost;
11905 	ioc->pdev = pdev;
11906 	ioc->scsi_io_cb_idx = scsi_io_cb_idx;
11907 	ioc->tm_cb_idx = tm_cb_idx;
11908 	ioc->ctl_cb_idx = ctl_cb_idx;
11909 	ioc->base_cb_idx = base_cb_idx;
11910 	ioc->port_enable_cb_idx = port_enable_cb_idx;
11911 	ioc->transport_cb_idx = transport_cb_idx;
11912 	ioc->scsih_cb_idx = scsih_cb_idx;
11913 	ioc->config_cb_idx = config_cb_idx;
11914 	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
11915 	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
11916 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
11917 	ioc->logging_level = logging_level;
11918 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
11919 	/* Host waits for minimum of six seconds */
11920 	ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
11921 	/*
11922 	 * Enable MEMORY MOVE support flag.
11923 	 */
11924 	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
11925 
11926 	ioc->enable_sdev_max_qd = enable_sdev_max_qd;
11927 
11928 	/* misc semaphores and spin locks */
11929 	mutex_init(&ioc->reset_in_progress_mutex);
11930 	/* initializing pci_access_mutex lock */
11931 	mutex_init(&ioc->pci_access_mutex);
11932 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
11933 	spin_lock_init(&ioc->scsi_lookup_lock);
11934 	spin_lock_init(&ioc->sas_device_lock);
11935 	spin_lock_init(&ioc->sas_node_lock);
11936 	spin_lock_init(&ioc->fw_event_lock);
11937 	spin_lock_init(&ioc->raid_device_lock);
11938 	spin_lock_init(&ioc->pcie_device_lock);
11939 	spin_lock_init(&ioc->diag_trigger_lock);
11940 
11941 	INIT_LIST_HEAD(&ioc->sas_device_list);
11942 	INIT_LIST_HEAD(&ioc->sas_device_init_list);
11943 	INIT_LIST_HEAD(&ioc->sas_expander_list);
11944 	INIT_LIST_HEAD(&ioc->enclosure_list);
11945 	INIT_LIST_HEAD(&ioc->pcie_device_list);
11946 	INIT_LIST_HEAD(&ioc->pcie_device_init_list);
11947 	INIT_LIST_HEAD(&ioc->fw_event_list);
11948 	INIT_LIST_HEAD(&ioc->raid_device_list);
11949 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
11950 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
11951 	INIT_LIST_HEAD(&ioc->delayed_sc_list);
11952 	INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
11953 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
11954 	INIT_LIST_HEAD(&ioc->reply_queue_list);
11955 	INIT_LIST_HEAD(&ioc->port_table_list);
11956 
11957 	sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
11958 
11959 	/* init shost parameters */
11960 	shost->max_cmd_len = 32;
11961 	shost->max_lun = max_lun;
11962 	shost->transportt = mpt3sas_transport_template;
11963 	shost->unique_id = ioc->id;
11964 
11965 	if (ioc->is_mcpu_endpoint) {
11966 		/* mCPU MPI support 64K max IO */
11967 		shost->max_sectors = 128;
11968 		ioc_info(ioc, "The max_sectors value is set to %d\n",
11969 			 shost->max_sectors);
11970 	} else {
11971 		if (max_sectors != 0xFFFF) {
11972 			if (max_sectors < 64) {
11973 				shost->max_sectors = 64;
11974 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
11975 					 max_sectors);
11976 			} else if (max_sectors > 32767) {
11977 				shost->max_sectors = 32767;
11978 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
11979 					 max_sectors);
11980 			} else {
11981 				shost->max_sectors = max_sectors & 0xFFFE;
11982 				ioc_info(ioc, "The max_sectors value is set to %d\n",
11983 					 shost->max_sectors);
11984 			}
11985 		}
11986 	}
11987 	/* register EEDP capabilities with SCSI layer */
11988 	if (prot_mask >= 0)
11989 		scsi_host_set_prot(shost, (prot_mask & 0x07));
11990 	else
11991 		scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
11992 				   | SHOST_DIF_TYPE2_PROTECTION
11993 				   | SHOST_DIF_TYPE3_PROTECTION);
11994 
11995 	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
11996 
11997 	/* event thread */
11998 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
11999 	    "fw_event_%s%d", ioc->driver_name, ioc->id);
12000 	ioc->firmware_event_thread = alloc_ordered_workqueue(
12001 	    ioc->firmware_event_name, 0);
12002 	if (!ioc->firmware_event_thread) {
12003 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12004 			__FILE__, __LINE__, __func__);
12005 		rv = -ENODEV;
12006 		goto out_thread_fail;
12007 	}
12008 
12009 	ioc->is_driver_loading = 1;
12010 	if ((mpt3sas_base_attach(ioc))) {
12011 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12012 			__FILE__, __LINE__, __func__);
12013 		rv = -ENODEV;
12014 		goto out_attach_fail;
12015 	}
12016 
12017 	if (ioc->is_warpdrive) {
12018 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
12019 			ioc->hide_drives = 0;
12020 		else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
12021 			ioc->hide_drives = 1;
12022 		else {
12023 			if (mpt3sas_get_num_volumes(ioc))
12024 				ioc->hide_drives = 1;
12025 			else
12026 				ioc->hide_drives = 0;
12027 		}
12028 	} else
12029 		ioc->hide_drives = 0;
12030 
12031 	rv = scsi_add_host(shost, &pdev->dev);
12032 	if (rv) {
12033 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12034 			__FILE__, __LINE__, __func__);
12035 		goto out_add_shost_fail;
12036 	}
12037 
12038 	scsi_scan_host(shost);
12039 	mpt3sas_setup_debugfs(ioc);
12040 	return 0;
12041 out_add_shost_fail:
12042 	mpt3sas_base_detach(ioc);
12043  out_attach_fail:
12044 	destroy_workqueue(ioc->firmware_event_thread);
12045  out_thread_fail:
12046 	spin_lock(&gioc_lock);
12047 	list_del(&ioc->list);
12048 	spin_unlock(&gioc_lock);
12049 	scsi_host_put(shost);
12050 	return rv;
12051 }
12052 
12053 /**
12054  * scsih_suspend - power management suspend main entry point
12055  * @dev: Device struct
12056  *
12057  * Return: 0 success, anything else error.
12058  */
12059 static int __maybe_unused
12060 scsih_suspend(struct device *dev)
12061 {
12062 	struct pci_dev *pdev = to_pci_dev(dev);
12063 	struct Scsi_Host *shost;
12064 	struct MPT3SAS_ADAPTER *ioc;
12065 	int rc;
12066 
12067 	rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12068 	if (rc)
12069 		return rc;
12070 
12071 	mpt3sas_base_stop_watchdog(ioc);
12072 	flush_scheduled_work();
12073 	scsi_block_requests(shost);
12074 	_scsih_nvme_shutdown(ioc);
12075 	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12076 		 pdev, pci_name(pdev));
12077 
12078 	mpt3sas_base_free_resources(ioc);
12079 	return 0;
12080 }
12081 
12082 /**
12083  * scsih_resume - power management resume main entry point
12084  * @dev: Device struct
12085  *
12086  * Return: 0 success, anything else error.
12087  */
12088 static int __maybe_unused
12089 scsih_resume(struct device *dev)
12090 {
12091 	struct pci_dev *pdev = to_pci_dev(dev);
12092 	struct Scsi_Host *shost;
12093 	struct MPT3SAS_ADAPTER *ioc;
12094 	pci_power_t device_state = pdev->current_state;
12095 	int r;
12096 
12097 	r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12098 	if (r)
12099 		return r;
12100 
12101 	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12102 		 pdev, pci_name(pdev), device_state);
12103 
12104 	ioc->pdev = pdev;
12105 	r = mpt3sas_base_map_resources(ioc);
12106 	if (r)
12107 		return r;
12108 	ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12109 	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12110 	scsi_unblock_requests(shost);
12111 	mpt3sas_base_start_watchdog(ioc);
12112 	return 0;
12113 }
12114 
12115 /**
12116  * scsih_pci_error_detected - Called when a PCI error is detected.
12117  * @pdev: PCI device struct
12118  * @state: PCI channel state
12119  *
12120  * Description: Called when a PCI error is detected.
12121  *
12122  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12123  */
12124 static pci_ers_result_t
12125 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12126 {
12127 	struct Scsi_Host *shost;
12128 	struct MPT3SAS_ADAPTER *ioc;
12129 
12130 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12131 		return PCI_ERS_RESULT_DISCONNECT;
12132 
12133 	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12134 
12135 	switch (state) {
12136 	case pci_channel_io_normal:
12137 		return PCI_ERS_RESULT_CAN_RECOVER;
12138 	case pci_channel_io_frozen:
12139 		/* Fatal error, prepare for slot reset */
12140 		ioc->pci_error_recovery = 1;
12141 		scsi_block_requests(ioc->shost);
12142 		mpt3sas_base_stop_watchdog(ioc);
12143 		mpt3sas_base_free_resources(ioc);
12144 		return PCI_ERS_RESULT_NEED_RESET;
12145 	case pci_channel_io_perm_failure:
12146 		/* Permanent error, prepare for device removal */
12147 		ioc->pci_error_recovery = 1;
12148 		mpt3sas_base_stop_watchdog(ioc);
12149 		_scsih_flush_running_cmds(ioc);
12150 		return PCI_ERS_RESULT_DISCONNECT;
12151 	}
12152 	return PCI_ERS_RESULT_NEED_RESET;
12153 }
12154 
12155 /**
12156  * scsih_pci_slot_reset - Called when PCI slot has been reset.
12157  * @pdev: PCI device struct
12158  *
12159  * Description: This routine is called by the pci error recovery
12160  * code after the PCI slot has been reset, just before we
12161  * should resume normal operations.
12162  */
12163 static pci_ers_result_t
12164 scsih_pci_slot_reset(struct pci_dev *pdev)
12165 {
12166 	struct Scsi_Host *shost;
12167 	struct MPT3SAS_ADAPTER *ioc;
12168 	int rc;
12169 
12170 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12171 		return PCI_ERS_RESULT_DISCONNECT;
12172 
12173 	ioc_info(ioc, "PCI error: slot reset callback!!\n");
12174 
12175 	ioc->pci_error_recovery = 0;
12176 	ioc->pdev = pdev;
12177 	pci_restore_state(pdev);
12178 	rc = mpt3sas_base_map_resources(ioc);
12179 	if (rc)
12180 		return PCI_ERS_RESULT_DISCONNECT;
12181 
12182 	ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12183 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12184 
12185 	ioc_warn(ioc, "hard reset: %s\n",
12186 		 (rc == 0) ? "success" : "failed");
12187 
12188 	if (!rc)
12189 		return PCI_ERS_RESULT_RECOVERED;
12190 	else
12191 		return PCI_ERS_RESULT_DISCONNECT;
12192 }
12193 
12194 /**
12195  * scsih_pci_resume() - resume normal ops after PCI reset
12196  * @pdev: pointer to PCI device
12197  *
12198  * Called when the error recovery driver tells us that its
12199  * OK to resume normal operation. Use completion to allow
12200  * halted scsi ops to resume.
12201  */
12202 static void
12203 scsih_pci_resume(struct pci_dev *pdev)
12204 {
12205 	struct Scsi_Host *shost;
12206 	struct MPT3SAS_ADAPTER *ioc;
12207 
12208 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12209 		return;
12210 
12211 	ioc_info(ioc, "PCI error: resume callback!!\n");
12212 
12213 	mpt3sas_base_start_watchdog(ioc);
12214 	scsi_unblock_requests(ioc->shost);
12215 }
12216 
12217 /**
12218  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12219  * @pdev: pointer to PCI device
12220  */
12221 static pci_ers_result_t
12222 scsih_pci_mmio_enabled(struct pci_dev *pdev)
12223 {
12224 	struct Scsi_Host *shost;
12225 	struct MPT3SAS_ADAPTER *ioc;
12226 
12227 	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12228 		return PCI_ERS_RESULT_DISCONNECT;
12229 
12230 	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12231 
12232 	/* TODO - dump whatever for debugging purposes */
12233 
12234 	/* This called only if scsih_pci_error_detected returns
12235 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12236 	 * works, no need to reset slot.
12237 	 */
12238 	return PCI_ERS_RESULT_RECOVERED;
12239 }
12240 
12241 /**
12242  * scsih__ncq_prio_supp - Check for NCQ command priority support
12243  * @sdev: scsi device struct
12244  *
12245  * This is called when a user indicates they would like to enable
12246  * ncq command priorities. This works only on SATA devices.
12247  */
12248 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12249 {
12250 	unsigned char *buf;
12251 	bool ncq_prio_supp = false;
12252 
12253 	if (!scsi_device_supports_vpd(sdev))
12254 		return ncq_prio_supp;
12255 
12256 	buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
12257 	if (!buf)
12258 		return ncq_prio_supp;
12259 
12260 	if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
12261 		ncq_prio_supp = (buf[213] >> 4) & 1;
12262 
12263 	kfree(buf);
12264 	return ncq_prio_supp;
12265 }
12266 /*
12267  * The pci device ids are defined in mpi/mpi2_cnfg.h.
12268  */
12269 static const struct pci_device_id mpt3sas_pci_table[] = {
12270 	/* Spitfire ~ 2004 */
12271 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12272 		PCI_ANY_ID, PCI_ANY_ID },
12273 	/* Falcon ~ 2008 */
12274 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12275 		PCI_ANY_ID, PCI_ANY_ID },
12276 	/* Liberator ~ 2108 */
12277 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12278 		PCI_ANY_ID, PCI_ANY_ID },
12279 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12280 		PCI_ANY_ID, PCI_ANY_ID },
12281 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12282 		PCI_ANY_ID, PCI_ANY_ID },
12283 	/* Meteor ~ 2116 */
12284 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12285 		PCI_ANY_ID, PCI_ANY_ID },
12286 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12287 		PCI_ANY_ID, PCI_ANY_ID },
12288 	/* Thunderbolt ~ 2208 */
12289 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12290 		PCI_ANY_ID, PCI_ANY_ID },
12291 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12292 		PCI_ANY_ID, PCI_ANY_ID },
12293 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12294 		PCI_ANY_ID, PCI_ANY_ID },
12295 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12296 		PCI_ANY_ID, PCI_ANY_ID },
12297 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12298 		PCI_ANY_ID, PCI_ANY_ID },
12299 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12300 		PCI_ANY_ID, PCI_ANY_ID },
12301 	/* Mustang ~ 2308 */
12302 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12303 		PCI_ANY_ID, PCI_ANY_ID },
12304 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12305 		PCI_ANY_ID, PCI_ANY_ID },
12306 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12307 		PCI_ANY_ID, PCI_ANY_ID },
12308 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12309 		PCI_ANY_ID, PCI_ANY_ID },
12310 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12311 		PCI_ANY_ID, PCI_ANY_ID },
12312 	/* SSS6200 */
12313 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12314 		PCI_ANY_ID, PCI_ANY_ID },
12315 	/* Fury ~ 3004 and 3008 */
12316 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12317 		PCI_ANY_ID, PCI_ANY_ID },
12318 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12319 		PCI_ANY_ID, PCI_ANY_ID },
12320 	/* Invader ~ 3108 */
12321 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12322 		PCI_ANY_ID, PCI_ANY_ID },
12323 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12324 		PCI_ANY_ID, PCI_ANY_ID },
12325 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12326 		PCI_ANY_ID, PCI_ANY_ID },
12327 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12328 		PCI_ANY_ID, PCI_ANY_ID },
12329 	/* Cutlass ~ 3216 and 3224 */
12330 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12331 		PCI_ANY_ID, PCI_ANY_ID },
12332 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12333 		PCI_ANY_ID, PCI_ANY_ID },
12334 	/* Intruder ~ 3316 and 3324 */
12335 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12336 		PCI_ANY_ID, PCI_ANY_ID },
12337 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12338 		PCI_ANY_ID, PCI_ANY_ID },
12339 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12340 		PCI_ANY_ID, PCI_ANY_ID },
12341 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12342 		PCI_ANY_ID, PCI_ANY_ID },
12343 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12344 		PCI_ANY_ID, PCI_ANY_ID },
12345 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12346 		PCI_ANY_ID, PCI_ANY_ID },
12347 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12348 		PCI_ANY_ID, PCI_ANY_ID },
12349 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12350 		PCI_ANY_ID, PCI_ANY_ID },
12351 	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12352 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12353 		PCI_ANY_ID, PCI_ANY_ID },
12354 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12355 		PCI_ANY_ID, PCI_ANY_ID },
12356 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12357 		PCI_ANY_ID, PCI_ANY_ID },
12358 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12359 		PCI_ANY_ID, PCI_ANY_ID },
12360 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12361 		PCI_ANY_ID, PCI_ANY_ID },
12362 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12363 		PCI_ANY_ID, PCI_ANY_ID },
12364 	/* Mercator ~ 3616*/
12365 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12366 		PCI_ANY_ID, PCI_ANY_ID },
12367 
12368 	/* Aero SI 0x00E1 Configurable Secure
12369 	 * 0x00E2 Hard Secure
12370 	 */
12371 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12372 		PCI_ANY_ID, PCI_ANY_ID },
12373 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12374 		PCI_ANY_ID, PCI_ANY_ID },
12375 
12376 	/*
12377 	 *  Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12378 	 */
12379 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12380 		PCI_ANY_ID, PCI_ANY_ID },
12381 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12382 		PCI_ANY_ID, PCI_ANY_ID },
12383 
12384 	/* Atlas PCIe Switch Management Port */
12385 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12386 		PCI_ANY_ID, PCI_ANY_ID },
12387 
12388 	/* Sea SI 0x00E5 Configurable Secure
12389 	 * 0x00E6 Hard Secure
12390 	 */
12391 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12392 		PCI_ANY_ID, PCI_ANY_ID },
12393 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12394 		PCI_ANY_ID, PCI_ANY_ID },
12395 
12396 	/*
12397 	 *  Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12398 	 */
12399 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12400 		PCI_ANY_ID, PCI_ANY_ID },
12401 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12402 		PCI_ANY_ID, PCI_ANY_ID },
12403 
12404 	{0}     /* Terminating entry */
12405 };
12406 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12407 
12408 static struct pci_error_handlers _mpt3sas_err_handler = {
12409 	.error_detected	= scsih_pci_error_detected,
12410 	.mmio_enabled	= scsih_pci_mmio_enabled,
12411 	.slot_reset	= scsih_pci_slot_reset,
12412 	.resume		= scsih_pci_resume,
12413 };
12414 
12415 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12416 
12417 static struct pci_driver mpt3sas_driver = {
12418 	.name		= MPT3SAS_DRIVER_NAME,
12419 	.id_table	= mpt3sas_pci_table,
12420 	.probe		= _scsih_probe,
12421 	.remove		= scsih_remove,
12422 	.shutdown	= scsih_shutdown,
12423 	.err_handler	= &_mpt3sas_err_handler,
12424 	.driver.pm	= &scsih_pm_ops,
12425 };
12426 
12427 /**
12428  * scsih_init - main entry point for this driver.
12429  *
12430  * Return: 0 success, anything else error.
12431  */
12432 static int
12433 scsih_init(void)
12434 {
12435 	mpt2_ids = 0;
12436 	mpt3_ids = 0;
12437 
12438 	mpt3sas_base_initialize_callback_handler();
12439 
12440 	 /* queuecommand callback hander */
12441 	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12442 
12443 	/* task management callback handler */
12444 	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12445 
12446 	/* base internal commands callback handler */
12447 	base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12448 	port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12449 	    mpt3sas_port_enable_done);
12450 
12451 	/* transport internal commands callback handler */
12452 	transport_cb_idx = mpt3sas_base_register_callback_handler(
12453 	    mpt3sas_transport_done);
12454 
12455 	/* scsih internal commands callback handler */
12456 	scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12457 
12458 	/* configuration page API internal commands callback handler */
12459 	config_cb_idx = mpt3sas_base_register_callback_handler(
12460 	    mpt3sas_config_done);
12461 
12462 	/* ctl module callback handler */
12463 	ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12464 
12465 	tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12466 	    _scsih_tm_tr_complete);
12467 
12468 	tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12469 	    _scsih_tm_volume_tr_complete);
12470 
12471 	tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12472 	    _scsih_sas_control_complete);
12473 
12474 	mpt3sas_init_debugfs();
12475 	return 0;
12476 }
12477 
12478 /**
12479  * scsih_exit - exit point for this driver (when it is a module).
12480  *
12481  * Return: 0 success, anything else error.
12482  */
12483 static void
12484 scsih_exit(void)
12485 {
12486 
12487 	mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12488 	mpt3sas_base_release_callback_handler(tm_cb_idx);
12489 	mpt3sas_base_release_callback_handler(base_cb_idx);
12490 	mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12491 	mpt3sas_base_release_callback_handler(transport_cb_idx);
12492 	mpt3sas_base_release_callback_handler(scsih_cb_idx);
12493 	mpt3sas_base_release_callback_handler(config_cb_idx);
12494 	mpt3sas_base_release_callback_handler(ctl_cb_idx);
12495 
12496 	mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12497 	mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12498 	mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12499 
12500 /* raid transport support */
12501 	if (hbas_to_enumerate != 1)
12502 		raid_class_release(mpt3sas_raid_template);
12503 	if (hbas_to_enumerate != 2)
12504 		raid_class_release(mpt2sas_raid_template);
12505 	sas_release_transport(mpt3sas_transport_template);
12506 	mpt3sas_exit_debugfs();
12507 }
12508 
12509 /**
12510  * _mpt3sas_init - main entry point for this driver.
12511  *
12512  * Return: 0 success, anything else error.
12513  */
12514 static int __init
12515 _mpt3sas_init(void)
12516 {
12517 	int error;
12518 
12519 	pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12520 					MPT3SAS_DRIVER_VERSION);
12521 
12522 	mpt3sas_transport_template =
12523 	    sas_attach_transport(&mpt3sas_transport_functions);
12524 	if (!mpt3sas_transport_template)
12525 		return -ENODEV;
12526 
12527 	/* No need attach mpt3sas raid functions template
12528 	 * if hbas_to_enumarate value is one.
12529 	 */
12530 	if (hbas_to_enumerate != 1) {
12531 		mpt3sas_raid_template =
12532 				raid_class_attach(&mpt3sas_raid_functions);
12533 		if (!mpt3sas_raid_template) {
12534 			sas_release_transport(mpt3sas_transport_template);
12535 			return -ENODEV;
12536 		}
12537 	}
12538 
12539 	/* No need to attach mpt2sas raid functions template
12540 	 * if hbas_to_enumarate value is two
12541 	 */
12542 	if (hbas_to_enumerate != 2) {
12543 		mpt2sas_raid_template =
12544 				raid_class_attach(&mpt2sas_raid_functions);
12545 		if (!mpt2sas_raid_template) {
12546 			sas_release_transport(mpt3sas_transport_template);
12547 			return -ENODEV;
12548 		}
12549 	}
12550 
12551 	error = scsih_init();
12552 	if (error) {
12553 		scsih_exit();
12554 		return error;
12555 	}
12556 
12557 	mpt3sas_ctl_init(hbas_to_enumerate);
12558 
12559 	error = pci_register_driver(&mpt3sas_driver);
12560 	if (error)
12561 		scsih_exit();
12562 
12563 	return error;
12564 }
12565 
12566 /**
12567  * _mpt3sas_exit - exit point for this driver (when it is a module).
12568  *
12569  */
12570 static void __exit
12571 _mpt3sas_exit(void)
12572 {
12573 	pr_info("mpt3sas version %s unloading\n",
12574 				MPT3SAS_DRIVER_VERSION);
12575 
12576 	mpt3sas_ctl_exit(hbas_to_enumerate);
12577 
12578 	pci_unregister_driver(&mpt3sas_driver);
12579 
12580 	scsih_exit();
12581 }
12582 
12583 module_init(_mpt3sas_init);
12584 module_exit(_mpt3sas_exit);
12585