1 /*
2  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3  *
4  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5  * Copyright (C) 2012-2014  LSI Corporation
6  * Copyright (C) 2013-2014 Avago Technologies
7  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/pci-aspm.h>
55 #include <linux/interrupt.h>
56 #include <linux/aer.h>
57 #include <linux/raid_class.h>
58 #include <asm/unaligned.h>
59 
60 #include "mpt3sas_base.h"
61 
62 #define RAID_CHANNEL 1
63 
64 #define PCIE_CHANNEL 2
65 
66 /* forward proto's */
67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 	struct _sas_node *sas_expander);
69 static void _firmware_event_work(struct work_struct *work);
70 
71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 	struct _sas_device *sas_device);
73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 	u8 retry_count, u8 is_pd);
75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 	struct _pcie_device *pcie_device);
78 static void
79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
81 
82 /* global parameters */
83 LIST_HEAD(mpt3sas_ioc_list);
84 /* global ioc lock for list operations */
85 DEFINE_SPINLOCK(gioc_lock);
86 
87 MODULE_AUTHOR(MPT3SAS_AUTHOR);
88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91 MODULE_ALIAS("mpt2sas");
92 
93 /* local parameters */
94 static u8 scsi_io_cb_idx = -1;
95 static u8 tm_cb_idx = -1;
96 static u8 ctl_cb_idx = -1;
97 static u8 base_cb_idx = -1;
98 static u8 port_enable_cb_idx = -1;
99 static u8 transport_cb_idx = -1;
100 static u8 scsih_cb_idx = -1;
101 static u8 config_cb_idx = -1;
102 static int mpt2_ids;
103 static int mpt3_ids;
104 
105 static u8 tm_tr_cb_idx = -1 ;
106 static u8 tm_tr_volume_cb_idx = -1 ;
107 static u8 tm_sas_control_cb_idx = -1;
108 
109 /* command line options */
110 static u32 logging_level;
111 MODULE_PARM_DESC(logging_level,
112 	" bits for enabling additional logging info (default=0)");
113 
114 
115 static ushort max_sectors = 0xFFFF;
116 module_param(max_sectors, ushort, 0444);
117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
118 
119 
120 static int missing_delay[2] = {-1, -1};
121 module_param_array(missing_delay, int, NULL, 0444);
122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
123 
124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
125 #define MPT3SAS_MAX_LUN (16895)
126 static u64 max_lun = MPT3SAS_MAX_LUN;
127 module_param(max_lun, ullong, 0444);
128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
129 
130 static ushort hbas_to_enumerate;
131 module_param(hbas_to_enumerate, ushort, 0444);
132 MODULE_PARM_DESC(hbas_to_enumerate,
133 		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134 		  1 - enumerates only SAS 2.0 generation HBAs\n \
135 		  2 - enumerates only SAS 3.0 generation HBAs (default=0)");
136 
137 /* diag_buffer_enable is bitwise
138  * bit 0 set = TRACE
139  * bit 1 set = SNAPSHOT
140  * bit 2 set = EXTENDED
141  *
142  * Either bit can be set, or both
143  */
144 static int diag_buffer_enable = -1;
145 module_param(diag_buffer_enable, int, 0444);
146 MODULE_PARM_DESC(diag_buffer_enable,
147 	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148 static int disable_discovery = -1;
149 module_param(disable_discovery, int, 0444);
150 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
151 
152 
153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
154 static int prot_mask = -1;
155 module_param(prot_mask, int, 0444);
156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
157 
158 
159 /* raid transport support */
160 static struct raid_template *mpt3sas_raid_template;
161 static struct raid_template *mpt2sas_raid_template;
162 
163 
164 /**
165  * struct sense_info - common structure for obtaining sense keys
166  * @skey: sense key
167  * @asc: additional sense code
168  * @ascq: additional sense code qualifier
169  */
170 struct sense_info {
171 	u8 skey;
172 	u8 asc;
173 	u8 ascq;
174 };
175 
176 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
177 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
178 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
179 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
180 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
181 /**
182  * struct fw_event_work - firmware event struct
183  * @list: link list framework
184  * @work: work object (ioc->fault_reset_work_q)
185  * @ioc: per adapter object
186  * @device_handle: device handle
187  * @VF_ID: virtual function id
188  * @VP_ID: virtual port id
189  * @ignore: flag meaning this event has been marked to ignore
190  * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
191  * @refcount: kref for this event
192  * @event_data: reply event data payload follows
193  *
194  * This object stored on ioc->fw_event_list.
195  */
196 struct fw_event_work {
197 	struct list_head	list;
198 	struct work_struct	work;
199 
200 	struct MPT3SAS_ADAPTER *ioc;
201 	u16			device_handle;
202 	u8			VF_ID;
203 	u8			VP_ID;
204 	u8			ignore;
205 	u16			event;
206 	struct kref		refcount;
207 	char			event_data[0] __aligned(4);
208 };
209 
210 static void fw_event_work_free(struct kref *r)
211 {
212 	kfree(container_of(r, struct fw_event_work, refcount));
213 }
214 
215 static void fw_event_work_get(struct fw_event_work *fw_work)
216 {
217 	kref_get(&fw_work->refcount);
218 }
219 
220 static void fw_event_work_put(struct fw_event_work *fw_work)
221 {
222 	kref_put(&fw_work->refcount, fw_event_work_free);
223 }
224 
225 static struct fw_event_work *alloc_fw_event_work(int len)
226 {
227 	struct fw_event_work *fw_event;
228 
229 	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
230 	if (!fw_event)
231 		return NULL;
232 
233 	kref_init(&fw_event->refcount);
234 	return fw_event;
235 }
236 
237 /**
238  * struct _scsi_io_transfer - scsi io transfer
239  * @handle: sas device handle (assigned by firmware)
240  * @is_raid: flag set for hidden raid components
241  * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
242  * @data_length: data transfer length
243  * @data_dma: dma pointer to data
244  * @sense: sense data
245  * @lun: lun number
246  * @cdb_length: cdb length
247  * @cdb: cdb contents
248  * @timeout: timeout for this command
249  * @VF_ID: virtual function id
250  * @VP_ID: virtual port id
251  * @valid_reply: flag set for reply message
252  * @sense_length: sense length
253  * @ioc_status: ioc status
254  * @scsi_state: scsi state
255  * @scsi_status: scsi staus
256  * @log_info: log information
257  * @transfer_length: data length transfer when there is a reply message
258  *
259  * Used for sending internal scsi commands to devices within this module.
260  * Refer to _scsi_send_scsi_io().
261  */
262 struct _scsi_io_transfer {
263 	u16	handle;
264 	u8	is_raid;
265 	enum dma_data_direction dir;
266 	u32	data_length;
267 	dma_addr_t data_dma;
268 	u8	sense[SCSI_SENSE_BUFFERSIZE];
269 	u32	lun;
270 	u8	cdb_length;
271 	u8	cdb[32];
272 	u8	timeout;
273 	u8	VF_ID;
274 	u8	VP_ID;
275 	u8	valid_reply;
276   /* the following bits are only valid when 'valid_reply = 1' */
277 	u32	sense_length;
278 	u16	ioc_status;
279 	u8	scsi_state;
280 	u8	scsi_status;
281 	u32	log_info;
282 	u32	transfer_length;
283 };
284 
285 /**
286  * _scsih_set_debug_level - global setting of ioc->logging_level.
287  * @val: ?
288  * @kp: ?
289  *
290  * Note: The logging levels are defined in mpt3sas_debug.h.
291  */
292 static int
293 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
294 {
295 	int ret = param_set_int(val, kp);
296 	struct MPT3SAS_ADAPTER *ioc;
297 
298 	if (ret)
299 		return ret;
300 
301 	pr_info("setting logging_level(0x%08x)\n", logging_level);
302 	spin_lock(&gioc_lock);
303 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
304 		ioc->logging_level = logging_level;
305 	spin_unlock(&gioc_lock);
306 	return 0;
307 }
308 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
309 	&logging_level, 0644);
310 
311 /**
312  * _scsih_srch_boot_sas_address - search based on sas_address
313  * @sas_address: sas address
314  * @boot_device: boot device object from bios page 2
315  *
316  * Return: 1 when there's a match, 0 means no match.
317  */
318 static inline int
319 _scsih_srch_boot_sas_address(u64 sas_address,
320 	Mpi2BootDeviceSasWwid_t *boot_device)
321 {
322 	return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
323 }
324 
325 /**
326  * _scsih_srch_boot_device_name - search based on device name
327  * @device_name: device name specified in INDENTIFY fram
328  * @boot_device: boot device object from bios page 2
329  *
330  * Return: 1 when there's a match, 0 means no match.
331  */
332 static inline int
333 _scsih_srch_boot_device_name(u64 device_name,
334 	Mpi2BootDeviceDeviceName_t *boot_device)
335 {
336 	return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
337 }
338 
339 /**
340  * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
341  * @enclosure_logical_id: enclosure logical id
342  * @slot_number: slot number
343  * @boot_device: boot device object from bios page 2
344  *
345  * Return: 1 when there's a match, 0 means no match.
346  */
347 static inline int
348 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
349 	Mpi2BootDeviceEnclosureSlot_t *boot_device)
350 {
351 	return (enclosure_logical_id == le64_to_cpu(boot_device->
352 	    EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
353 	    SlotNumber)) ? 1 : 0;
354 }
355 
356 /**
357  * _scsih_is_boot_device - search for matching boot device.
358  * @sas_address: sas address
359  * @device_name: device name specified in INDENTIFY fram
360  * @enclosure_logical_id: enclosure logical id
361  * @slot: slot number
362  * @form: specifies boot device form
363  * @boot_device: boot device object from bios page 2
364  *
365  * Return: 1 when there's a match, 0 means no match.
366  */
367 static int
368 _scsih_is_boot_device(u64 sas_address, u64 device_name,
369 	u64 enclosure_logical_id, u16 slot, u8 form,
370 	Mpi2BiosPage2BootDevice_t *boot_device)
371 {
372 	int rc = 0;
373 
374 	switch (form) {
375 	case MPI2_BIOSPAGE2_FORM_SAS_WWID:
376 		if (!sas_address)
377 			break;
378 		rc = _scsih_srch_boot_sas_address(
379 		    sas_address, &boot_device->SasWwid);
380 		break;
381 	case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
382 		if (!enclosure_logical_id)
383 			break;
384 		rc = _scsih_srch_boot_encl_slot(
385 		    enclosure_logical_id,
386 		    slot, &boot_device->EnclosureSlot);
387 		break;
388 	case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
389 		if (!device_name)
390 			break;
391 		rc = _scsih_srch_boot_device_name(
392 		    device_name, &boot_device->DeviceName);
393 		break;
394 	case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
395 		break;
396 	}
397 
398 	return rc;
399 }
400 
401 /**
402  * _scsih_get_sas_address - set the sas_address for given device handle
403  * @ioc: ?
404  * @handle: device handle
405  * @sas_address: sas address
406  *
407  * Return: 0 success, non-zero when failure
408  */
409 static int
410 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
411 	u64 *sas_address)
412 {
413 	Mpi2SasDevicePage0_t sas_device_pg0;
414 	Mpi2ConfigReply_t mpi_reply;
415 	u32 ioc_status;
416 
417 	*sas_address = 0;
418 
419 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
420 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
421 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
422 			__FILE__, __LINE__, __func__);
423 		return -ENXIO;
424 	}
425 
426 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
427 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
428 		/* For HBA, vSES doesn't return HBA SAS address. Instead return
429 		 * vSES's sas address.
430 		 */
431 		if ((handle <= ioc->sas_hba.num_phys) &&
432 		   (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
433 		   MPI2_SAS_DEVICE_INFO_SEP)))
434 			*sas_address = ioc->sas_hba.sas_address;
435 		else
436 			*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
437 		return 0;
438 	}
439 
440 	/* we hit this because the given parent handle doesn't exist */
441 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
442 		return -ENXIO;
443 
444 	/* else error case */
445 	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
446 		handle, ioc_status, __FILE__, __LINE__, __func__);
447 	return -EIO;
448 }
449 
450 /**
451  * _scsih_determine_boot_device - determine boot device.
452  * @ioc: per adapter object
453  * @device: sas_device or pcie_device object
454  * @channel: SAS or PCIe channel
455  *
456  * Determines whether this device should be first reported device to
457  * to scsi-ml or sas transport, this purpose is for persistent boot device.
458  * There are primary, alternate, and current entries in bios page 2. The order
459  * priority is primary, alternate, then current.  This routine saves
460  * the corresponding device object.
461  * The saved data to be used later in _scsih_probe_boot_devices().
462  */
463 static void
464 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
465 	u32 channel)
466 {
467 	struct _sas_device *sas_device;
468 	struct _pcie_device *pcie_device;
469 	struct _raid_device *raid_device;
470 	u64 sas_address;
471 	u64 device_name;
472 	u64 enclosure_logical_id;
473 	u16 slot;
474 
475 	 /* only process this function when driver loads */
476 	if (!ioc->is_driver_loading)
477 		return;
478 
479 	 /* no Bios, return immediately */
480 	if (!ioc->bios_pg3.BiosVersion)
481 		return;
482 
483 	if (channel == RAID_CHANNEL) {
484 		raid_device = device;
485 		sas_address = raid_device->wwid;
486 		device_name = 0;
487 		enclosure_logical_id = 0;
488 		slot = 0;
489 	} else if (channel == PCIE_CHANNEL) {
490 		pcie_device = device;
491 		sas_address = pcie_device->wwid;
492 		device_name = 0;
493 		enclosure_logical_id = 0;
494 		slot = 0;
495 	} else {
496 		sas_device = device;
497 		sas_address = sas_device->sas_address;
498 		device_name = sas_device->device_name;
499 		enclosure_logical_id = sas_device->enclosure_logical_id;
500 		slot = sas_device->slot;
501 	}
502 
503 	if (!ioc->req_boot_device.device) {
504 		if (_scsih_is_boot_device(sas_address, device_name,
505 		    enclosure_logical_id, slot,
506 		    (ioc->bios_pg2.ReqBootDeviceForm &
507 		    MPI2_BIOSPAGE2_FORM_MASK),
508 		    &ioc->bios_pg2.RequestedBootDevice)) {
509 			dinitprintk(ioc,
510 				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
511 					     __func__, (u64)sas_address));
512 			ioc->req_boot_device.device = device;
513 			ioc->req_boot_device.channel = channel;
514 		}
515 	}
516 
517 	if (!ioc->req_alt_boot_device.device) {
518 		if (_scsih_is_boot_device(sas_address, device_name,
519 		    enclosure_logical_id, slot,
520 		    (ioc->bios_pg2.ReqAltBootDeviceForm &
521 		    MPI2_BIOSPAGE2_FORM_MASK),
522 		    &ioc->bios_pg2.RequestedAltBootDevice)) {
523 			dinitprintk(ioc,
524 				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
525 					     __func__, (u64)sas_address));
526 			ioc->req_alt_boot_device.device = device;
527 			ioc->req_alt_boot_device.channel = channel;
528 		}
529 	}
530 
531 	if (!ioc->current_boot_device.device) {
532 		if (_scsih_is_boot_device(sas_address, device_name,
533 		    enclosure_logical_id, slot,
534 		    (ioc->bios_pg2.CurrentBootDeviceForm &
535 		    MPI2_BIOSPAGE2_FORM_MASK),
536 		    &ioc->bios_pg2.CurrentBootDevice)) {
537 			dinitprintk(ioc,
538 				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
539 					     __func__, (u64)sas_address));
540 			ioc->current_boot_device.device = device;
541 			ioc->current_boot_device.channel = channel;
542 		}
543 	}
544 }
545 
546 static struct _sas_device *
547 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
548 		struct MPT3SAS_TARGET *tgt_priv)
549 {
550 	struct _sas_device *ret;
551 
552 	assert_spin_locked(&ioc->sas_device_lock);
553 
554 	ret = tgt_priv->sas_dev;
555 	if (ret)
556 		sas_device_get(ret);
557 
558 	return ret;
559 }
560 
561 static struct _sas_device *
562 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
563 		struct MPT3SAS_TARGET *tgt_priv)
564 {
565 	struct _sas_device *ret;
566 	unsigned long flags;
567 
568 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
569 	ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
570 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
571 
572 	return ret;
573 }
574 
575 static struct _pcie_device *
576 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
577 	struct MPT3SAS_TARGET *tgt_priv)
578 {
579 	struct _pcie_device *ret;
580 
581 	assert_spin_locked(&ioc->pcie_device_lock);
582 
583 	ret = tgt_priv->pcie_dev;
584 	if (ret)
585 		pcie_device_get(ret);
586 
587 	return ret;
588 }
589 
590 /**
591  * mpt3sas_get_pdev_from_target - pcie device search
592  * @ioc: per adapter object
593  * @tgt_priv: starget private object
594  *
595  * Context: This function will acquire ioc->pcie_device_lock and will release
596  * before returning the pcie_device object.
597  *
598  * This searches for pcie_device from target, then return pcie_device object.
599  */
600 static struct _pcie_device *
601 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
602 	struct MPT3SAS_TARGET *tgt_priv)
603 {
604 	struct _pcie_device *ret;
605 	unsigned long flags;
606 
607 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
608 	ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
609 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
610 
611 	return ret;
612 }
613 
614 struct _sas_device *
615 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
616 					u64 sas_address)
617 {
618 	struct _sas_device *sas_device;
619 
620 	assert_spin_locked(&ioc->sas_device_lock);
621 
622 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
623 		if (sas_device->sas_address == sas_address)
624 			goto found_device;
625 
626 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
627 		if (sas_device->sas_address == sas_address)
628 			goto found_device;
629 
630 	return NULL;
631 
632 found_device:
633 	sas_device_get(sas_device);
634 	return sas_device;
635 }
636 
637 /**
638  * mpt3sas_get_sdev_by_addr - sas device search
639  * @ioc: per adapter object
640  * @sas_address: sas address
641  * Context: Calling function should acquire ioc->sas_device_lock
642  *
643  * This searches for sas_device based on sas_address, then return sas_device
644  * object.
645  */
646 struct _sas_device *
647 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
648 	u64 sas_address)
649 {
650 	struct _sas_device *sas_device;
651 	unsigned long flags;
652 
653 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
654 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
655 			sas_address);
656 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
657 
658 	return sas_device;
659 }
660 
661 static struct _sas_device *
662 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
663 {
664 	struct _sas_device *sas_device;
665 
666 	assert_spin_locked(&ioc->sas_device_lock);
667 
668 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
669 		if (sas_device->handle == handle)
670 			goto found_device;
671 
672 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
673 		if (sas_device->handle == handle)
674 			goto found_device;
675 
676 	return NULL;
677 
678 found_device:
679 	sas_device_get(sas_device);
680 	return sas_device;
681 }
682 
683 /**
684  * mpt3sas_get_sdev_by_handle - sas device search
685  * @ioc: per adapter object
686  * @handle: sas device handle (assigned by firmware)
687  * Context: Calling function should acquire ioc->sas_device_lock
688  *
689  * This searches for sas_device based on sas_address, then return sas_device
690  * object.
691  */
692 struct _sas_device *
693 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
694 {
695 	struct _sas_device *sas_device;
696 	unsigned long flags;
697 
698 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
699 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
700 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
701 
702 	return sas_device;
703 }
704 
705 /**
706  * _scsih_display_enclosure_chassis_info - display device location info
707  * @ioc: per adapter object
708  * @sas_device: per sas device object
709  * @sdev: scsi device struct
710  * @starget: scsi target struct
711  */
712 static void
713 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
714 	struct _sas_device *sas_device, struct scsi_device *sdev,
715 	struct scsi_target *starget)
716 {
717 	if (sdev) {
718 		if (sas_device->enclosure_handle != 0)
719 			sdev_printk(KERN_INFO, sdev,
720 			    "enclosure logical id (0x%016llx), slot(%d) \n",
721 			    (unsigned long long)
722 			    sas_device->enclosure_logical_id,
723 			    sas_device->slot);
724 		if (sas_device->connector_name[0] != '\0')
725 			sdev_printk(KERN_INFO, sdev,
726 			    "enclosure level(0x%04x), connector name( %s)\n",
727 			    sas_device->enclosure_level,
728 			    sas_device->connector_name);
729 		if (sas_device->is_chassis_slot_valid)
730 			sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
731 			    sas_device->chassis_slot);
732 	} else if (starget) {
733 		if (sas_device->enclosure_handle != 0)
734 			starget_printk(KERN_INFO, starget,
735 			    "enclosure logical id(0x%016llx), slot(%d) \n",
736 			    (unsigned long long)
737 			    sas_device->enclosure_logical_id,
738 			    sas_device->slot);
739 		if (sas_device->connector_name[0] != '\0')
740 			starget_printk(KERN_INFO, starget,
741 			    "enclosure level(0x%04x), connector name( %s)\n",
742 			    sas_device->enclosure_level,
743 			    sas_device->connector_name);
744 		if (sas_device->is_chassis_slot_valid)
745 			starget_printk(KERN_INFO, starget,
746 			    "chassis slot(0x%04x)\n",
747 			    sas_device->chassis_slot);
748 	} else {
749 		if (sas_device->enclosure_handle != 0)
750 			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
751 				 (u64)sas_device->enclosure_logical_id,
752 				 sas_device->slot);
753 		if (sas_device->connector_name[0] != '\0')
754 			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
755 				 sas_device->enclosure_level,
756 				 sas_device->connector_name);
757 		if (sas_device->is_chassis_slot_valid)
758 			ioc_info(ioc, "chassis slot(0x%04x)\n",
759 				 sas_device->chassis_slot);
760 	}
761 }
762 
763 /**
764  * _scsih_sas_device_remove - remove sas_device from list.
765  * @ioc: per adapter object
766  * @sas_device: the sas_device object
767  * Context: This function will acquire ioc->sas_device_lock.
768  *
769  * If sas_device is on the list, remove it and decrement its reference count.
770  */
771 static void
772 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
773 	struct _sas_device *sas_device)
774 {
775 	unsigned long flags;
776 
777 	if (!sas_device)
778 		return;
779 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
780 		 sas_device->handle, (u64)sas_device->sas_address);
781 
782 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
783 
784 	/*
785 	 * The lock serializes access to the list, but we still need to verify
786 	 * that nobody removed the entry while we were waiting on the lock.
787 	 */
788 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
789 	if (!list_empty(&sas_device->list)) {
790 		list_del_init(&sas_device->list);
791 		sas_device_put(sas_device);
792 	}
793 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
794 }
795 
796 /**
797  * _scsih_device_remove_by_handle - removing device object by handle
798  * @ioc: per adapter object
799  * @handle: device handle
800  */
801 static void
802 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
803 {
804 	struct _sas_device *sas_device;
805 	unsigned long flags;
806 
807 	if (ioc->shost_recovery)
808 		return;
809 
810 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
811 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
812 	if (sas_device) {
813 		list_del_init(&sas_device->list);
814 		sas_device_put(sas_device);
815 	}
816 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
817 	if (sas_device) {
818 		_scsih_remove_device(ioc, sas_device);
819 		sas_device_put(sas_device);
820 	}
821 }
822 
823 /**
824  * mpt3sas_device_remove_by_sas_address - removing device object by sas address
825  * @ioc: per adapter object
826  * @sas_address: device sas_address
827  */
828 void
829 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
830 	u64 sas_address)
831 {
832 	struct _sas_device *sas_device;
833 	unsigned long flags;
834 
835 	if (ioc->shost_recovery)
836 		return;
837 
838 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
839 	sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
840 	if (sas_device) {
841 		list_del_init(&sas_device->list);
842 		sas_device_put(sas_device);
843 	}
844 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
845 	if (sas_device) {
846 		_scsih_remove_device(ioc, sas_device);
847 		sas_device_put(sas_device);
848 	}
849 }
850 
851 /**
852  * _scsih_sas_device_add - insert sas_device to the list.
853  * @ioc: per adapter object
854  * @sas_device: the sas_device object
855  * Context: This function will acquire ioc->sas_device_lock.
856  *
857  * Adding new object to the ioc->sas_device_list.
858  */
859 static void
860 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
861 	struct _sas_device *sas_device)
862 {
863 	unsigned long flags;
864 
865 	dewtprintk(ioc,
866 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
867 			    __func__, sas_device->handle,
868 			    (u64)sas_device->sas_address));
869 
870 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
871 	    NULL, NULL));
872 
873 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
874 	sas_device_get(sas_device);
875 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
876 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
877 
878 	if (ioc->hide_drives) {
879 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
880 		return;
881 	}
882 
883 	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
884 	     sas_device->sas_address_parent)) {
885 		_scsih_sas_device_remove(ioc, sas_device);
886 	} else if (!sas_device->starget) {
887 		/*
888 		 * When asyn scanning is enabled, its not possible to remove
889 		 * devices while scanning is turned on due to an oops in
890 		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
891 		 */
892 		if (!ioc->is_driver_loading) {
893 			mpt3sas_transport_port_remove(ioc,
894 			    sas_device->sas_address,
895 			    sas_device->sas_address_parent);
896 			_scsih_sas_device_remove(ioc, sas_device);
897 		}
898 	} else
899 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
900 }
901 
902 /**
903  * _scsih_sas_device_init_add - insert sas_device to the list.
904  * @ioc: per adapter object
905  * @sas_device: the sas_device object
906  * Context: This function will acquire ioc->sas_device_lock.
907  *
908  * Adding new object at driver load time to the ioc->sas_device_init_list.
909  */
910 static void
911 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
912 	struct _sas_device *sas_device)
913 {
914 	unsigned long flags;
915 
916 	dewtprintk(ioc,
917 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
918 			    __func__, sas_device->handle,
919 			    (u64)sas_device->sas_address));
920 
921 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
922 	    NULL, NULL));
923 
924 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
925 	sas_device_get(sas_device);
926 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
927 	_scsih_determine_boot_device(ioc, sas_device, 0);
928 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
929 }
930 
931 
932 static struct _pcie_device *
933 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
934 {
935 	struct _pcie_device *pcie_device;
936 
937 	assert_spin_locked(&ioc->pcie_device_lock);
938 
939 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
940 		if (pcie_device->wwid == wwid)
941 			goto found_device;
942 
943 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
944 		if (pcie_device->wwid == wwid)
945 			goto found_device;
946 
947 	return NULL;
948 
949 found_device:
950 	pcie_device_get(pcie_device);
951 	return pcie_device;
952 }
953 
954 
955 /**
956  * mpt3sas_get_pdev_by_wwid - pcie device search
957  * @ioc: per adapter object
958  * @wwid: wwid
959  *
960  * Context: This function will acquire ioc->pcie_device_lock and will release
961  * before returning the pcie_device object.
962  *
963  * This searches for pcie_device based on wwid, then return pcie_device object.
964  */
965 static struct _pcie_device *
966 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
967 {
968 	struct _pcie_device *pcie_device;
969 	unsigned long flags;
970 
971 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
972 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
973 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
974 
975 	return pcie_device;
976 }
977 
978 
979 static struct _pcie_device *
980 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
981 	int channel)
982 {
983 	struct _pcie_device *pcie_device;
984 
985 	assert_spin_locked(&ioc->pcie_device_lock);
986 
987 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
988 		if (pcie_device->id == id && pcie_device->channel == channel)
989 			goto found_device;
990 
991 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
992 		if (pcie_device->id == id && pcie_device->channel == channel)
993 			goto found_device;
994 
995 	return NULL;
996 
997 found_device:
998 	pcie_device_get(pcie_device);
999 	return pcie_device;
1000 }
1001 
1002 static struct _pcie_device *
1003 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1004 {
1005 	struct _pcie_device *pcie_device;
1006 
1007 	assert_spin_locked(&ioc->pcie_device_lock);
1008 
1009 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1010 		if (pcie_device->handle == handle)
1011 			goto found_device;
1012 
1013 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1014 		if (pcie_device->handle == handle)
1015 			goto found_device;
1016 
1017 	return NULL;
1018 
1019 found_device:
1020 	pcie_device_get(pcie_device);
1021 	return pcie_device;
1022 }
1023 
1024 
1025 /**
1026  * mpt3sas_get_pdev_by_handle - pcie device search
1027  * @ioc: per adapter object
1028  * @handle: Firmware device handle
1029  *
1030  * Context: This function will acquire ioc->pcie_device_lock and will release
1031  * before returning the pcie_device object.
1032  *
1033  * This searches for pcie_device based on handle, then return pcie_device
1034  * object.
1035  */
1036 struct _pcie_device *
1037 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1038 {
1039 	struct _pcie_device *pcie_device;
1040 	unsigned long flags;
1041 
1042 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1043 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1044 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1045 
1046 	return pcie_device;
1047 }
1048 
1049 /**
1050  * _scsih_pcie_device_remove - remove pcie_device from list.
1051  * @ioc: per adapter object
1052  * @pcie_device: the pcie_device object
1053  * Context: This function will acquire ioc->pcie_device_lock.
1054  *
1055  * If pcie_device is on the list, remove it and decrement its reference count.
1056  */
1057 static void
1058 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1059 	struct _pcie_device *pcie_device)
1060 {
1061 	unsigned long flags;
1062 	int was_on_pcie_device_list = 0;
1063 
1064 	if (!pcie_device)
1065 		return;
1066 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1067 		 pcie_device->handle, (u64)pcie_device->wwid);
1068 	if (pcie_device->enclosure_handle != 0)
1069 		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1070 			 (u64)pcie_device->enclosure_logical_id,
1071 			 pcie_device->slot);
1072 	if (pcie_device->connector_name[0] != '\0')
1073 		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1074 			 pcie_device->enclosure_level,
1075 			 pcie_device->connector_name);
1076 
1077 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1078 	if (!list_empty(&pcie_device->list)) {
1079 		list_del_init(&pcie_device->list);
1080 		was_on_pcie_device_list = 1;
1081 	}
1082 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1083 	if (was_on_pcie_device_list) {
1084 		kfree(pcie_device->serial_number);
1085 		pcie_device_put(pcie_device);
1086 	}
1087 }
1088 
1089 
1090 /**
1091  * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1092  * @ioc: per adapter object
1093  * @handle: device handle
1094  */
1095 static void
1096 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1097 {
1098 	struct _pcie_device *pcie_device;
1099 	unsigned long flags;
1100 	int was_on_pcie_device_list = 0;
1101 
1102 	if (ioc->shost_recovery)
1103 		return;
1104 
1105 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1106 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1107 	if (pcie_device) {
1108 		if (!list_empty(&pcie_device->list)) {
1109 			list_del_init(&pcie_device->list);
1110 			was_on_pcie_device_list = 1;
1111 			pcie_device_put(pcie_device);
1112 		}
1113 	}
1114 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1115 	if (was_on_pcie_device_list) {
1116 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1117 		pcie_device_put(pcie_device);
1118 	}
1119 }
1120 
1121 /**
1122  * _scsih_pcie_device_add - add pcie_device object
1123  * @ioc: per adapter object
1124  * @pcie_device: pcie_device object
1125  *
1126  * This is added to the pcie_device_list link list.
1127  */
1128 static void
1129 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1130 	struct _pcie_device *pcie_device)
1131 {
1132 	unsigned long flags;
1133 
1134 	dewtprintk(ioc,
1135 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1136 			    __func__,
1137 			    pcie_device->handle, (u64)pcie_device->wwid));
1138 	if (pcie_device->enclosure_handle != 0)
1139 		dewtprintk(ioc,
1140 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1141 				    __func__,
1142 				    (u64)pcie_device->enclosure_logical_id,
1143 				    pcie_device->slot));
1144 	if (pcie_device->connector_name[0] != '\0')
1145 		dewtprintk(ioc,
1146 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1147 				    __func__, pcie_device->enclosure_level,
1148 				    pcie_device->connector_name));
1149 
1150 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1151 	pcie_device_get(pcie_device);
1152 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1153 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1154 
1155 	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1156 		_scsih_pcie_device_remove(ioc, pcie_device);
1157 	} else if (!pcie_device->starget) {
1158 		if (!ioc->is_driver_loading) {
1159 /*TODO-- Need to find out whether this condition will occur or not*/
1160 			clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1161 		}
1162 	} else
1163 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1164 }
1165 
1166 /*
1167  * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1168  * @ioc: per adapter object
1169  * @pcie_device: the pcie_device object
1170  * Context: This function will acquire ioc->pcie_device_lock.
1171  *
1172  * Adding new object at driver load time to the ioc->pcie_device_init_list.
1173  */
1174 static void
1175 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1176 				struct _pcie_device *pcie_device)
1177 {
1178 	unsigned long flags;
1179 
1180 	dewtprintk(ioc,
1181 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1182 			    __func__,
1183 			    pcie_device->handle, (u64)pcie_device->wwid));
1184 	if (pcie_device->enclosure_handle != 0)
1185 		dewtprintk(ioc,
1186 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1187 				    __func__,
1188 				    (u64)pcie_device->enclosure_logical_id,
1189 				    pcie_device->slot));
1190 	if (pcie_device->connector_name[0] != '\0')
1191 		dewtprintk(ioc,
1192 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1193 				    __func__, pcie_device->enclosure_level,
1194 				    pcie_device->connector_name));
1195 
1196 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1197 	pcie_device_get(pcie_device);
1198 	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1199 	_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1200 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1201 }
1202 /**
1203  * _scsih_raid_device_find_by_id - raid device search
1204  * @ioc: per adapter object
1205  * @id: sas device target id
1206  * @channel: sas device channel
1207  * Context: Calling function should acquire ioc->raid_device_lock
1208  *
1209  * This searches for raid_device based on target id, then return raid_device
1210  * object.
1211  */
1212 static struct _raid_device *
1213 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1214 {
1215 	struct _raid_device *raid_device, *r;
1216 
1217 	r = NULL;
1218 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1219 		if (raid_device->id == id && raid_device->channel == channel) {
1220 			r = raid_device;
1221 			goto out;
1222 		}
1223 	}
1224 
1225  out:
1226 	return r;
1227 }
1228 
1229 /**
1230  * mpt3sas_raid_device_find_by_handle - raid device search
1231  * @ioc: per adapter object
1232  * @handle: sas device handle (assigned by firmware)
1233  * Context: Calling function should acquire ioc->raid_device_lock
1234  *
1235  * This searches for raid_device based on handle, then return raid_device
1236  * object.
1237  */
1238 struct _raid_device *
1239 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1240 {
1241 	struct _raid_device *raid_device, *r;
1242 
1243 	r = NULL;
1244 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1245 		if (raid_device->handle != handle)
1246 			continue;
1247 		r = raid_device;
1248 		goto out;
1249 	}
1250 
1251  out:
1252 	return r;
1253 }
1254 
1255 /**
1256  * _scsih_raid_device_find_by_wwid - raid device search
1257  * @ioc: per adapter object
1258  * @wwid: ?
1259  * Context: Calling function should acquire ioc->raid_device_lock
1260  *
1261  * This searches for raid_device based on wwid, then return raid_device
1262  * object.
1263  */
1264 static struct _raid_device *
1265 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1266 {
1267 	struct _raid_device *raid_device, *r;
1268 
1269 	r = NULL;
1270 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1271 		if (raid_device->wwid != wwid)
1272 			continue;
1273 		r = raid_device;
1274 		goto out;
1275 	}
1276 
1277  out:
1278 	return r;
1279 }
1280 
1281 /**
1282  * _scsih_raid_device_add - add raid_device object
1283  * @ioc: per adapter object
1284  * @raid_device: raid_device object
1285  *
1286  * This is added to the raid_device_list link list.
1287  */
1288 static void
1289 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1290 	struct _raid_device *raid_device)
1291 {
1292 	unsigned long flags;
1293 
1294 	dewtprintk(ioc,
1295 		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1296 			    __func__,
1297 			    raid_device->handle, (u64)raid_device->wwid));
1298 
1299 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1300 	list_add_tail(&raid_device->list, &ioc->raid_device_list);
1301 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1302 }
1303 
1304 /**
1305  * _scsih_raid_device_remove - delete raid_device object
1306  * @ioc: per adapter object
1307  * @raid_device: raid_device object
1308  *
1309  */
1310 static void
1311 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1312 	struct _raid_device *raid_device)
1313 {
1314 	unsigned long flags;
1315 
1316 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1317 	list_del(&raid_device->list);
1318 	kfree(raid_device);
1319 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1320 }
1321 
1322 /**
1323  * mpt3sas_scsih_expander_find_by_handle - expander device search
1324  * @ioc: per adapter object
1325  * @handle: expander handle (assigned by firmware)
1326  * Context: Calling function should acquire ioc->sas_device_lock
1327  *
1328  * This searches for expander device based on handle, then returns the
1329  * sas_node object.
1330  */
1331 struct _sas_node *
1332 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1333 {
1334 	struct _sas_node *sas_expander, *r;
1335 
1336 	r = NULL;
1337 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1338 		if (sas_expander->handle != handle)
1339 			continue;
1340 		r = sas_expander;
1341 		goto out;
1342 	}
1343  out:
1344 	return r;
1345 }
1346 
1347 /**
1348  * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1349  * @ioc: per adapter object
1350  * @handle: enclosure handle (assigned by firmware)
1351  * Context: Calling function should acquire ioc->sas_device_lock
1352  *
1353  * This searches for enclosure device based on handle, then returns the
1354  * enclosure object.
1355  */
1356 static struct _enclosure_node *
1357 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1358 {
1359 	struct _enclosure_node *enclosure_dev, *r;
1360 
1361 	r = NULL;
1362 	list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1363 		if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1364 			continue;
1365 		r = enclosure_dev;
1366 		goto out;
1367 	}
1368 out:
1369 	return r;
1370 }
1371 /**
1372  * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1373  * @ioc: per adapter object
1374  * @sas_address: sas address
1375  * Context: Calling function should acquire ioc->sas_node_lock.
1376  *
1377  * This searches for expander device based on sas_address, then returns the
1378  * sas_node object.
1379  */
1380 struct _sas_node *
1381 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1382 	u64 sas_address)
1383 {
1384 	struct _sas_node *sas_expander, *r;
1385 
1386 	r = NULL;
1387 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1388 		if (sas_expander->sas_address != sas_address)
1389 			continue;
1390 		r = sas_expander;
1391 		goto out;
1392 	}
1393  out:
1394 	return r;
1395 }
1396 
1397 /**
1398  * _scsih_expander_node_add - insert expander device to the list.
1399  * @ioc: per adapter object
1400  * @sas_expander: the sas_device object
1401  * Context: This function will acquire ioc->sas_node_lock.
1402  *
1403  * Adding new object to the ioc->sas_expander_list.
1404  */
1405 static void
1406 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1407 	struct _sas_node *sas_expander)
1408 {
1409 	unsigned long flags;
1410 
1411 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
1412 	list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1413 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1414 }
1415 
1416 /**
1417  * _scsih_is_end_device - determines if device is an end device
1418  * @device_info: bitfield providing information about the device.
1419  * Context: none
1420  *
1421  * Return: 1 if end device.
1422  */
1423 static int
1424 _scsih_is_end_device(u32 device_info)
1425 {
1426 	if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1427 		((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1428 		(device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1429 		(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1430 		return 1;
1431 	else
1432 		return 0;
1433 }
1434 
1435 /**
1436  * _scsih_is_nvme_device - determines if device is an nvme device
1437  * @device_info: bitfield providing information about the device.
1438  * Context: none
1439  *
1440  * Return: 1 if nvme device.
1441  */
1442 static int
1443 _scsih_is_nvme_device(u32 device_info)
1444 {
1445 	if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1446 					== MPI26_PCIE_DEVINFO_NVME)
1447 		return 1;
1448 	else
1449 		return 0;
1450 }
1451 
1452 /**
1453  * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1454  * @ioc: per adapter object
1455  * @smid: system request message index
1456  *
1457  * Return: the smid stored scmd pointer.
1458  * Then will dereference the stored scmd pointer.
1459  */
1460 struct scsi_cmnd *
1461 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1462 {
1463 	struct scsi_cmnd *scmd = NULL;
1464 	struct scsiio_tracker *st;
1465 	Mpi25SCSIIORequest_t *mpi_request;
1466 
1467 	if (smid > 0  &&
1468 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1469 		u32 unique_tag = smid - 1;
1470 
1471 		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1472 
1473 		/*
1474 		 * If SCSI IO request is outstanding at driver level then
1475 		 * DevHandle filed must be non-zero. If DevHandle is zero
1476 		 * then it means that this smid is free at driver level,
1477 		 * so return NULL.
1478 		 */
1479 		if (!mpi_request->DevHandle)
1480 			return scmd;
1481 
1482 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1483 		if (scmd) {
1484 			st = scsi_cmd_priv(scmd);
1485 			if (st->cb_idx == 0xFF || st->smid == 0)
1486 				scmd = NULL;
1487 		}
1488 	}
1489 	return scmd;
1490 }
1491 
1492 /**
1493  * scsih_change_queue_depth - setting device queue depth
1494  * @sdev: scsi device struct
1495  * @qdepth: requested queue depth
1496  *
1497  * Return: queue depth.
1498  */
1499 static int
1500 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1501 {
1502 	struct Scsi_Host *shost = sdev->host;
1503 	int max_depth;
1504 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1505 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1506 	struct MPT3SAS_TARGET *sas_target_priv_data;
1507 	struct _sas_device *sas_device;
1508 	unsigned long flags;
1509 
1510 	max_depth = shost->can_queue;
1511 
1512 	/* limit max device queue for SATA to 32 */
1513 	sas_device_priv_data = sdev->hostdata;
1514 	if (!sas_device_priv_data)
1515 		goto not_sata;
1516 	sas_target_priv_data = sas_device_priv_data->sas_target;
1517 	if (!sas_target_priv_data)
1518 		goto not_sata;
1519 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1520 		goto not_sata;
1521 
1522 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1523 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1524 	if (sas_device) {
1525 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1526 			max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1527 
1528 		sas_device_put(sas_device);
1529 	}
1530 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1531 
1532  not_sata:
1533 
1534 	if (!sdev->tagged_supported)
1535 		max_depth = 1;
1536 	if (qdepth > max_depth)
1537 		qdepth = max_depth;
1538 	return scsi_change_queue_depth(sdev, qdepth);
1539 }
1540 
1541 /**
1542  * scsih_target_alloc - target add routine
1543  * @starget: scsi target struct
1544  *
1545  * Return: 0 if ok. Any other return is assumed to be an error and
1546  * the device is ignored.
1547  */
1548 static int
1549 scsih_target_alloc(struct scsi_target *starget)
1550 {
1551 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1552 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1553 	struct MPT3SAS_TARGET *sas_target_priv_data;
1554 	struct _sas_device *sas_device;
1555 	struct _raid_device *raid_device;
1556 	struct _pcie_device *pcie_device;
1557 	unsigned long flags;
1558 	struct sas_rphy *rphy;
1559 
1560 	sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1561 				       GFP_KERNEL);
1562 	if (!sas_target_priv_data)
1563 		return -ENOMEM;
1564 
1565 	starget->hostdata = sas_target_priv_data;
1566 	sas_target_priv_data->starget = starget;
1567 	sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1568 
1569 	/* RAID volumes */
1570 	if (starget->channel == RAID_CHANNEL) {
1571 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1572 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1573 		    starget->channel);
1574 		if (raid_device) {
1575 			sas_target_priv_data->handle = raid_device->handle;
1576 			sas_target_priv_data->sas_address = raid_device->wwid;
1577 			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1578 			if (ioc->is_warpdrive)
1579 				sas_target_priv_data->raid_device = raid_device;
1580 			raid_device->starget = starget;
1581 		}
1582 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1583 		return 0;
1584 	}
1585 
1586 	/* PCIe devices */
1587 	if (starget->channel == PCIE_CHANNEL) {
1588 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1589 		pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1590 			starget->channel);
1591 		if (pcie_device) {
1592 			sas_target_priv_data->handle = pcie_device->handle;
1593 			sas_target_priv_data->sas_address = pcie_device->wwid;
1594 			sas_target_priv_data->pcie_dev = pcie_device;
1595 			pcie_device->starget = starget;
1596 			pcie_device->id = starget->id;
1597 			pcie_device->channel = starget->channel;
1598 			sas_target_priv_data->flags |=
1599 				MPT_TARGET_FLAGS_PCIE_DEVICE;
1600 			if (pcie_device->fast_path)
1601 				sas_target_priv_data->flags |=
1602 					MPT_TARGET_FASTPATH_IO;
1603 		}
1604 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1605 		return 0;
1606 	}
1607 
1608 	/* sas/sata devices */
1609 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1610 	rphy = dev_to_rphy(starget->dev.parent);
1611 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1612 	   rphy->identify.sas_address);
1613 
1614 	if (sas_device) {
1615 		sas_target_priv_data->handle = sas_device->handle;
1616 		sas_target_priv_data->sas_address = sas_device->sas_address;
1617 		sas_target_priv_data->sas_dev = sas_device;
1618 		sas_device->starget = starget;
1619 		sas_device->id = starget->id;
1620 		sas_device->channel = starget->channel;
1621 		if (test_bit(sas_device->handle, ioc->pd_handles))
1622 			sas_target_priv_data->flags |=
1623 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
1624 		if (sas_device->fast_path)
1625 			sas_target_priv_data->flags |=
1626 					MPT_TARGET_FASTPATH_IO;
1627 	}
1628 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1629 
1630 	return 0;
1631 }
1632 
1633 /**
1634  * scsih_target_destroy - target destroy routine
1635  * @starget: scsi target struct
1636  */
1637 static void
1638 scsih_target_destroy(struct scsi_target *starget)
1639 {
1640 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1641 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1642 	struct MPT3SAS_TARGET *sas_target_priv_data;
1643 	struct _sas_device *sas_device;
1644 	struct _raid_device *raid_device;
1645 	struct _pcie_device *pcie_device;
1646 	unsigned long flags;
1647 
1648 	sas_target_priv_data = starget->hostdata;
1649 	if (!sas_target_priv_data)
1650 		return;
1651 
1652 	if (starget->channel == RAID_CHANNEL) {
1653 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1654 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1655 		    starget->channel);
1656 		if (raid_device) {
1657 			raid_device->starget = NULL;
1658 			raid_device->sdev = NULL;
1659 		}
1660 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1661 		goto out;
1662 	}
1663 
1664 	if (starget->channel == PCIE_CHANNEL) {
1665 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1666 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1667 							sas_target_priv_data);
1668 		if (pcie_device && (pcie_device->starget == starget) &&
1669 			(pcie_device->id == starget->id) &&
1670 			(pcie_device->channel == starget->channel))
1671 			pcie_device->starget = NULL;
1672 
1673 		if (pcie_device) {
1674 			/*
1675 			 * Corresponding get() is in _scsih_target_alloc()
1676 			 */
1677 			sas_target_priv_data->pcie_dev = NULL;
1678 			pcie_device_put(pcie_device);
1679 			pcie_device_put(pcie_device);
1680 		}
1681 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1682 		goto out;
1683 	}
1684 
1685 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1686 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1687 	if (sas_device && (sas_device->starget == starget) &&
1688 	    (sas_device->id == starget->id) &&
1689 	    (sas_device->channel == starget->channel))
1690 		sas_device->starget = NULL;
1691 
1692 	if (sas_device) {
1693 		/*
1694 		 * Corresponding get() is in _scsih_target_alloc()
1695 		 */
1696 		sas_target_priv_data->sas_dev = NULL;
1697 		sas_device_put(sas_device);
1698 
1699 		sas_device_put(sas_device);
1700 	}
1701 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1702 
1703  out:
1704 	kfree(sas_target_priv_data);
1705 	starget->hostdata = NULL;
1706 }
1707 
1708 /**
1709  * scsih_slave_alloc - device add routine
1710  * @sdev: scsi device struct
1711  *
1712  * Return: 0 if ok. Any other return is assumed to be an error and
1713  * the device is ignored.
1714  */
1715 static int
1716 scsih_slave_alloc(struct scsi_device *sdev)
1717 {
1718 	struct Scsi_Host *shost;
1719 	struct MPT3SAS_ADAPTER *ioc;
1720 	struct MPT3SAS_TARGET *sas_target_priv_data;
1721 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1722 	struct scsi_target *starget;
1723 	struct _raid_device *raid_device;
1724 	struct _sas_device *sas_device;
1725 	struct _pcie_device *pcie_device;
1726 	unsigned long flags;
1727 
1728 	sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
1729 				       GFP_KERNEL);
1730 	if (!sas_device_priv_data)
1731 		return -ENOMEM;
1732 
1733 	sas_device_priv_data->lun = sdev->lun;
1734 	sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
1735 
1736 	starget = scsi_target(sdev);
1737 	sas_target_priv_data = starget->hostdata;
1738 	sas_target_priv_data->num_luns++;
1739 	sas_device_priv_data->sas_target = sas_target_priv_data;
1740 	sdev->hostdata = sas_device_priv_data;
1741 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
1742 		sdev->no_uld_attach = 1;
1743 
1744 	shost = dev_to_shost(&starget->dev);
1745 	ioc = shost_priv(shost);
1746 	if (starget->channel == RAID_CHANNEL) {
1747 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1748 		raid_device = _scsih_raid_device_find_by_id(ioc,
1749 		    starget->id, starget->channel);
1750 		if (raid_device)
1751 			raid_device->sdev = sdev; /* raid is single lun */
1752 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1753 	}
1754 	if (starget->channel == PCIE_CHANNEL) {
1755 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1756 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
1757 				sas_target_priv_data->sas_address);
1758 		if (pcie_device && (pcie_device->starget == NULL)) {
1759 			sdev_printk(KERN_INFO, sdev,
1760 			    "%s : pcie_device->starget set to starget @ %d\n",
1761 			    __func__, __LINE__);
1762 			pcie_device->starget = starget;
1763 		}
1764 
1765 		if (pcie_device)
1766 			pcie_device_put(pcie_device);
1767 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1768 
1769 	} else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1770 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1771 		sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1772 					sas_target_priv_data->sas_address);
1773 		if (sas_device && (sas_device->starget == NULL)) {
1774 			sdev_printk(KERN_INFO, sdev,
1775 			"%s : sas_device->starget set to starget @ %d\n",
1776 			     __func__, __LINE__);
1777 			sas_device->starget = starget;
1778 		}
1779 
1780 		if (sas_device)
1781 			sas_device_put(sas_device);
1782 
1783 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1784 	}
1785 
1786 	return 0;
1787 }
1788 
1789 /**
1790  * scsih_slave_destroy - device destroy routine
1791  * @sdev: scsi device struct
1792  */
1793 static void
1794 scsih_slave_destroy(struct scsi_device *sdev)
1795 {
1796 	struct MPT3SAS_TARGET *sas_target_priv_data;
1797 	struct scsi_target *starget;
1798 	struct Scsi_Host *shost;
1799 	struct MPT3SAS_ADAPTER *ioc;
1800 	struct _sas_device *sas_device;
1801 	struct _pcie_device *pcie_device;
1802 	unsigned long flags;
1803 
1804 	if (!sdev->hostdata)
1805 		return;
1806 
1807 	starget = scsi_target(sdev);
1808 	sas_target_priv_data = starget->hostdata;
1809 	sas_target_priv_data->num_luns--;
1810 
1811 	shost = dev_to_shost(&starget->dev);
1812 	ioc = shost_priv(shost);
1813 
1814 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
1815 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1816 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1817 				sas_target_priv_data);
1818 		if (pcie_device && !sas_target_priv_data->num_luns)
1819 			pcie_device->starget = NULL;
1820 
1821 		if (pcie_device)
1822 			pcie_device_put(pcie_device);
1823 
1824 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1825 
1826 	} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1827 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1828 		sas_device = __mpt3sas_get_sdev_from_target(ioc,
1829 				sas_target_priv_data);
1830 		if (sas_device && !sas_target_priv_data->num_luns)
1831 			sas_device->starget = NULL;
1832 
1833 		if (sas_device)
1834 			sas_device_put(sas_device);
1835 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1836 	}
1837 
1838 	kfree(sdev->hostdata);
1839 	sdev->hostdata = NULL;
1840 }
1841 
1842 /**
1843  * _scsih_display_sata_capabilities - sata capabilities
1844  * @ioc: per adapter object
1845  * @handle: device handle
1846  * @sdev: scsi device struct
1847  */
1848 static void
1849 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
1850 	u16 handle, struct scsi_device *sdev)
1851 {
1852 	Mpi2ConfigReply_t mpi_reply;
1853 	Mpi2SasDevicePage0_t sas_device_pg0;
1854 	u32 ioc_status;
1855 	u16 flags;
1856 	u32 device_info;
1857 
1858 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1859 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
1860 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1861 			__FILE__, __LINE__, __func__);
1862 		return;
1863 	}
1864 
1865 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1866 	    MPI2_IOCSTATUS_MASK;
1867 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1868 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1869 			__FILE__, __LINE__, __func__);
1870 		return;
1871 	}
1872 
1873 	flags = le16_to_cpu(sas_device_pg0.Flags);
1874 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
1875 
1876 	sdev_printk(KERN_INFO, sdev,
1877 	    "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
1878 	    "sw_preserve(%s)\n",
1879 	    (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
1880 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
1881 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
1882 	    "n",
1883 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
1884 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
1885 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
1886 }
1887 
1888 /*
1889  * raid transport support -
1890  * Enabled for SLES11 and newer, in older kernels the driver will panic when
1891  * unloading the driver followed by a load - I believe that the subroutine
1892  * raid_class_release() is not cleaning up properly.
1893  */
1894 
1895 /**
1896  * scsih_is_raid - return boolean indicating device is raid volume
1897  * @dev: the device struct object
1898  */
1899 static int
1900 scsih_is_raid(struct device *dev)
1901 {
1902 	struct scsi_device *sdev = to_scsi_device(dev);
1903 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1904 
1905 	if (ioc->is_warpdrive)
1906 		return 0;
1907 	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
1908 }
1909 
1910 static int
1911 scsih_is_nvme(struct device *dev)
1912 {
1913 	struct scsi_device *sdev = to_scsi_device(dev);
1914 
1915 	return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
1916 }
1917 
1918 /**
1919  * scsih_get_resync - get raid volume resync percent complete
1920  * @dev: the device struct object
1921  */
1922 static void
1923 scsih_get_resync(struct device *dev)
1924 {
1925 	struct scsi_device *sdev = to_scsi_device(dev);
1926 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1927 	static struct _raid_device *raid_device;
1928 	unsigned long flags;
1929 	Mpi2RaidVolPage0_t vol_pg0;
1930 	Mpi2ConfigReply_t mpi_reply;
1931 	u32 volume_status_flags;
1932 	u8 percent_complete;
1933 	u16 handle;
1934 
1935 	percent_complete = 0;
1936 	handle = 0;
1937 	if (ioc->is_warpdrive)
1938 		goto out;
1939 
1940 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1941 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1942 	    sdev->channel);
1943 	if (raid_device) {
1944 		handle = raid_device->handle;
1945 		percent_complete = raid_device->percent_complete;
1946 	}
1947 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1948 
1949 	if (!handle)
1950 		goto out;
1951 
1952 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1953 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
1954 	     sizeof(Mpi2RaidVolPage0_t))) {
1955 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1956 			__FILE__, __LINE__, __func__);
1957 		percent_complete = 0;
1958 		goto out;
1959 	}
1960 
1961 	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
1962 	if (!(volume_status_flags &
1963 	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
1964 		percent_complete = 0;
1965 
1966  out:
1967 
1968 	switch (ioc->hba_mpi_version_belonged) {
1969 	case MPI2_VERSION:
1970 		raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
1971 		break;
1972 	case MPI25_VERSION:
1973 	case MPI26_VERSION:
1974 		raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
1975 		break;
1976 	}
1977 }
1978 
1979 /**
1980  * scsih_get_state - get raid volume level
1981  * @dev: the device struct object
1982  */
1983 static void
1984 scsih_get_state(struct device *dev)
1985 {
1986 	struct scsi_device *sdev = to_scsi_device(dev);
1987 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1988 	static struct _raid_device *raid_device;
1989 	unsigned long flags;
1990 	Mpi2RaidVolPage0_t vol_pg0;
1991 	Mpi2ConfigReply_t mpi_reply;
1992 	u32 volstate;
1993 	enum raid_state state = RAID_STATE_UNKNOWN;
1994 	u16 handle = 0;
1995 
1996 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1997 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1998 	    sdev->channel);
1999 	if (raid_device)
2000 		handle = raid_device->handle;
2001 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2002 
2003 	if (!raid_device)
2004 		goto out;
2005 
2006 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2007 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2008 	     sizeof(Mpi2RaidVolPage0_t))) {
2009 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2010 			__FILE__, __LINE__, __func__);
2011 		goto out;
2012 	}
2013 
2014 	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2015 	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2016 		state = RAID_STATE_RESYNCING;
2017 		goto out;
2018 	}
2019 
2020 	switch (vol_pg0.VolumeState) {
2021 	case MPI2_RAID_VOL_STATE_OPTIMAL:
2022 	case MPI2_RAID_VOL_STATE_ONLINE:
2023 		state = RAID_STATE_ACTIVE;
2024 		break;
2025 	case  MPI2_RAID_VOL_STATE_DEGRADED:
2026 		state = RAID_STATE_DEGRADED;
2027 		break;
2028 	case MPI2_RAID_VOL_STATE_FAILED:
2029 	case MPI2_RAID_VOL_STATE_MISSING:
2030 		state = RAID_STATE_OFFLINE;
2031 		break;
2032 	}
2033  out:
2034 	switch (ioc->hba_mpi_version_belonged) {
2035 	case MPI2_VERSION:
2036 		raid_set_state(mpt2sas_raid_template, dev, state);
2037 		break;
2038 	case MPI25_VERSION:
2039 	case MPI26_VERSION:
2040 		raid_set_state(mpt3sas_raid_template, dev, state);
2041 		break;
2042 	}
2043 }
2044 
2045 /**
2046  * _scsih_set_level - set raid level
2047  * @ioc: ?
2048  * @sdev: scsi device struct
2049  * @volume_type: volume type
2050  */
2051 static void
2052 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2053 	struct scsi_device *sdev, u8 volume_type)
2054 {
2055 	enum raid_level level = RAID_LEVEL_UNKNOWN;
2056 
2057 	switch (volume_type) {
2058 	case MPI2_RAID_VOL_TYPE_RAID0:
2059 		level = RAID_LEVEL_0;
2060 		break;
2061 	case MPI2_RAID_VOL_TYPE_RAID10:
2062 		level = RAID_LEVEL_10;
2063 		break;
2064 	case MPI2_RAID_VOL_TYPE_RAID1E:
2065 		level = RAID_LEVEL_1E;
2066 		break;
2067 	case MPI2_RAID_VOL_TYPE_RAID1:
2068 		level = RAID_LEVEL_1;
2069 		break;
2070 	}
2071 
2072 	switch (ioc->hba_mpi_version_belonged) {
2073 	case MPI2_VERSION:
2074 		raid_set_level(mpt2sas_raid_template,
2075 			&sdev->sdev_gendev, level);
2076 		break;
2077 	case MPI25_VERSION:
2078 	case MPI26_VERSION:
2079 		raid_set_level(mpt3sas_raid_template,
2080 			&sdev->sdev_gendev, level);
2081 		break;
2082 	}
2083 }
2084 
2085 
2086 /**
2087  * _scsih_get_volume_capabilities - volume capabilities
2088  * @ioc: per adapter object
2089  * @raid_device: the raid_device object
2090  *
2091  * Return: 0 for success, else 1
2092  */
2093 static int
2094 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2095 	struct _raid_device *raid_device)
2096 {
2097 	Mpi2RaidVolPage0_t *vol_pg0;
2098 	Mpi2RaidPhysDiskPage0_t pd_pg0;
2099 	Mpi2SasDevicePage0_t sas_device_pg0;
2100 	Mpi2ConfigReply_t mpi_reply;
2101 	u16 sz;
2102 	u8 num_pds;
2103 
2104 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2105 	    &num_pds)) || !num_pds) {
2106 		dfailprintk(ioc,
2107 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2108 				     __FILE__, __LINE__, __func__));
2109 		return 1;
2110 	}
2111 
2112 	raid_device->num_pds = num_pds;
2113 	sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2114 	    sizeof(Mpi2RaidVol0PhysDisk_t));
2115 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
2116 	if (!vol_pg0) {
2117 		dfailprintk(ioc,
2118 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2119 				     __FILE__, __LINE__, __func__));
2120 		return 1;
2121 	}
2122 
2123 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2124 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2125 		dfailprintk(ioc,
2126 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2127 				     __FILE__, __LINE__, __func__));
2128 		kfree(vol_pg0);
2129 		return 1;
2130 	}
2131 
2132 	raid_device->volume_type = vol_pg0->VolumeType;
2133 
2134 	/* figure out what the underlying devices are by
2135 	 * obtaining the device_info bits for the 1st device
2136 	 */
2137 	if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2138 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2139 	    vol_pg0->PhysDisk[0].PhysDiskNum))) {
2140 		if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2141 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2142 		    le16_to_cpu(pd_pg0.DevHandle)))) {
2143 			raid_device->device_info =
2144 			    le32_to_cpu(sas_device_pg0.DeviceInfo);
2145 		}
2146 	}
2147 
2148 	kfree(vol_pg0);
2149 	return 0;
2150 }
2151 
2152 /**
2153  * _scsih_enable_tlr - setting TLR flags
2154  * @ioc: per adapter object
2155  * @sdev: scsi device struct
2156  *
2157  * Enabling Transaction Layer Retries for tape devices when
2158  * vpd page 0x90 is present
2159  *
2160  */
2161 static void
2162 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2163 {
2164 
2165 	/* only for TAPE */
2166 	if (sdev->type != TYPE_TAPE)
2167 		return;
2168 
2169 	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2170 		return;
2171 
2172 	sas_enable_tlr(sdev);
2173 	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2174 	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2175 	return;
2176 
2177 }
2178 
2179 /**
2180  * scsih_slave_configure - device configure routine.
2181  * @sdev: scsi device struct
2182  *
2183  * Return: 0 if ok. Any other return is assumed to be an error and
2184  * the device is ignored.
2185  */
2186 static int
2187 scsih_slave_configure(struct scsi_device *sdev)
2188 {
2189 	struct Scsi_Host *shost = sdev->host;
2190 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2191 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2192 	struct MPT3SAS_TARGET *sas_target_priv_data;
2193 	struct _sas_device *sas_device;
2194 	struct _pcie_device *pcie_device;
2195 	struct _raid_device *raid_device;
2196 	unsigned long flags;
2197 	int qdepth;
2198 	u8 ssp_target = 0;
2199 	char *ds = "";
2200 	char *r_level = "";
2201 	u16 handle, volume_handle = 0;
2202 	u64 volume_wwid = 0;
2203 
2204 	qdepth = 1;
2205 	sas_device_priv_data = sdev->hostdata;
2206 	sas_device_priv_data->configured_lun = 1;
2207 	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2208 	sas_target_priv_data = sas_device_priv_data->sas_target;
2209 	handle = sas_target_priv_data->handle;
2210 
2211 	/* raid volume handling */
2212 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2213 
2214 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2215 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2216 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2217 		if (!raid_device) {
2218 			dfailprintk(ioc,
2219 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2220 					     __FILE__, __LINE__, __func__));
2221 			return 1;
2222 		}
2223 
2224 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2225 			dfailprintk(ioc,
2226 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2227 					     __FILE__, __LINE__, __func__));
2228 			return 1;
2229 		}
2230 
2231 		/*
2232 		 * WARPDRIVE: Initialize the required data for Direct IO
2233 		 */
2234 		mpt3sas_init_warpdrive_properties(ioc, raid_device);
2235 
2236 		/* RAID Queue Depth Support
2237 		 * IS volume = underlying qdepth of drive type, either
2238 		 *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2239 		 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2240 		 */
2241 		if (raid_device->device_info &
2242 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2243 			qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2244 			ds = "SSP";
2245 		} else {
2246 			qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2247 			if (raid_device->device_info &
2248 			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2249 				ds = "SATA";
2250 			else
2251 				ds = "STP";
2252 		}
2253 
2254 		switch (raid_device->volume_type) {
2255 		case MPI2_RAID_VOL_TYPE_RAID0:
2256 			r_level = "RAID0";
2257 			break;
2258 		case MPI2_RAID_VOL_TYPE_RAID1E:
2259 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2260 			if (ioc->manu_pg10.OEMIdentifier &&
2261 			    (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2262 			    MFG10_GF0_R10_DISPLAY) &&
2263 			    !(raid_device->num_pds % 2))
2264 				r_level = "RAID10";
2265 			else
2266 				r_level = "RAID1E";
2267 			break;
2268 		case MPI2_RAID_VOL_TYPE_RAID1:
2269 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2270 			r_level = "RAID1";
2271 			break;
2272 		case MPI2_RAID_VOL_TYPE_RAID10:
2273 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2274 			r_level = "RAID10";
2275 			break;
2276 		case MPI2_RAID_VOL_TYPE_UNKNOWN:
2277 		default:
2278 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2279 			r_level = "RAIDX";
2280 			break;
2281 		}
2282 
2283 		if (!ioc->hide_ir_msg)
2284 			sdev_printk(KERN_INFO, sdev,
2285 			   "%s: handle(0x%04x), wwid(0x%016llx),"
2286 			    " pd_count(%d), type(%s)\n",
2287 			    r_level, raid_device->handle,
2288 			    (unsigned long long)raid_device->wwid,
2289 			    raid_device->num_pds, ds);
2290 
2291 		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2292 			blk_queue_max_hw_sectors(sdev->request_queue,
2293 						MPT3SAS_RAID_MAX_SECTORS);
2294 			sdev_printk(KERN_INFO, sdev,
2295 					"Set queue's max_sector to: %u\n",
2296 						MPT3SAS_RAID_MAX_SECTORS);
2297 		}
2298 
2299 		scsih_change_queue_depth(sdev, qdepth);
2300 
2301 		/* raid transport support */
2302 		if (!ioc->is_warpdrive)
2303 			_scsih_set_level(ioc, sdev, raid_device->volume_type);
2304 		return 0;
2305 	}
2306 
2307 	/* non-raid handling */
2308 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2309 		if (mpt3sas_config_get_volume_handle(ioc, handle,
2310 		    &volume_handle)) {
2311 			dfailprintk(ioc,
2312 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2313 					     __FILE__, __LINE__, __func__));
2314 			return 1;
2315 		}
2316 		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2317 		    volume_handle, &volume_wwid)) {
2318 			dfailprintk(ioc,
2319 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2320 					     __FILE__, __LINE__, __func__));
2321 			return 1;
2322 		}
2323 	}
2324 
2325 	/* PCIe handling */
2326 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2327 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2328 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2329 				sas_device_priv_data->sas_target->sas_address);
2330 		if (!pcie_device) {
2331 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2332 			dfailprintk(ioc,
2333 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2334 					     __FILE__, __LINE__, __func__));
2335 			return 1;
2336 		}
2337 
2338 		qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2339 		ds = "NVMe";
2340 		sdev_printk(KERN_INFO, sdev,
2341 			"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2342 			ds, handle, (unsigned long long)pcie_device->wwid,
2343 			pcie_device->port_num);
2344 		if (pcie_device->enclosure_handle != 0)
2345 			sdev_printk(KERN_INFO, sdev,
2346 			"%s: enclosure logical id(0x%016llx), slot(%d)\n",
2347 			ds,
2348 			(unsigned long long)pcie_device->enclosure_logical_id,
2349 			pcie_device->slot);
2350 		if (pcie_device->connector_name[0] != '\0')
2351 			sdev_printk(KERN_INFO, sdev,
2352 				"%s: enclosure level(0x%04x),"
2353 				"connector name( %s)\n", ds,
2354 				pcie_device->enclosure_level,
2355 				pcie_device->connector_name);
2356 
2357 		if (pcie_device->nvme_mdts)
2358 			blk_queue_max_hw_sectors(sdev->request_queue,
2359 					pcie_device->nvme_mdts/512);
2360 
2361 		pcie_device_put(pcie_device);
2362 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2363 		scsih_change_queue_depth(sdev, qdepth);
2364 		/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2365 		 ** merged and can eliminate holes created during merging
2366 		 ** operation.
2367 		 **/
2368 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2369 				sdev->request_queue);
2370 		blk_queue_virt_boundary(sdev->request_queue,
2371 				ioc->page_size - 1);
2372 		return 0;
2373 	}
2374 
2375 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2376 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2377 	   sas_device_priv_data->sas_target->sas_address);
2378 	if (!sas_device) {
2379 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2380 		dfailprintk(ioc,
2381 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2382 				     __FILE__, __LINE__, __func__));
2383 		return 1;
2384 	}
2385 
2386 	sas_device->volume_handle = volume_handle;
2387 	sas_device->volume_wwid = volume_wwid;
2388 	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2389 		qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2390 		ssp_target = 1;
2391 		if (sas_device->device_info &
2392 				MPI2_SAS_DEVICE_INFO_SEP) {
2393 			sdev_printk(KERN_WARNING, sdev,
2394 			"set ignore_delay_remove for handle(0x%04x)\n",
2395 			sas_device_priv_data->sas_target->handle);
2396 			sas_device_priv_data->ignore_delay_remove = 1;
2397 			ds = "SES";
2398 		} else
2399 			ds = "SSP";
2400 	} else {
2401 		qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2402 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2403 			ds = "STP";
2404 		else if (sas_device->device_info &
2405 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2406 			ds = "SATA";
2407 	}
2408 
2409 	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2410 	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2411 	    ds, handle, (unsigned long long)sas_device->sas_address,
2412 	    sas_device->phy, (unsigned long long)sas_device->device_name);
2413 
2414 	_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2415 
2416 	sas_device_put(sas_device);
2417 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2418 
2419 	if (!ssp_target)
2420 		_scsih_display_sata_capabilities(ioc, handle, sdev);
2421 
2422 
2423 	scsih_change_queue_depth(sdev, qdepth);
2424 
2425 	if (ssp_target) {
2426 		sas_read_port_mode_page(sdev);
2427 		_scsih_enable_tlr(ioc, sdev);
2428 	}
2429 
2430 	return 0;
2431 }
2432 
2433 /**
2434  * scsih_bios_param - fetch head, sector, cylinder info for a disk
2435  * @sdev: scsi device struct
2436  * @bdev: pointer to block device context
2437  * @capacity: device size (in 512 byte sectors)
2438  * @params: three element array to place output:
2439  *              params[0] number of heads (max 255)
2440  *              params[1] number of sectors (max 63)
2441  *              params[2] number of cylinders
2442  */
2443 static int
2444 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2445 	sector_t capacity, int params[])
2446 {
2447 	int		heads;
2448 	int		sectors;
2449 	sector_t	cylinders;
2450 	ulong		dummy;
2451 
2452 	heads = 64;
2453 	sectors = 32;
2454 
2455 	dummy = heads * sectors;
2456 	cylinders = capacity;
2457 	sector_div(cylinders, dummy);
2458 
2459 	/*
2460 	 * Handle extended translation size for logical drives
2461 	 * > 1Gb
2462 	 */
2463 	if ((ulong)capacity >= 0x200000) {
2464 		heads = 255;
2465 		sectors = 63;
2466 		dummy = heads * sectors;
2467 		cylinders = capacity;
2468 		sector_div(cylinders, dummy);
2469 	}
2470 
2471 	/* return result */
2472 	params[0] = heads;
2473 	params[1] = sectors;
2474 	params[2] = cylinders;
2475 
2476 	return 0;
2477 }
2478 
2479 /**
2480  * _scsih_response_code - translation of device response code
2481  * @ioc: per adapter object
2482  * @response_code: response code returned by the device
2483  */
2484 static void
2485 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2486 {
2487 	char *desc;
2488 
2489 	switch (response_code) {
2490 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2491 		desc = "task management request completed";
2492 		break;
2493 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2494 		desc = "invalid frame";
2495 		break;
2496 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2497 		desc = "task management request not supported";
2498 		break;
2499 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2500 		desc = "task management request failed";
2501 		break;
2502 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2503 		desc = "task management request succeeded";
2504 		break;
2505 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2506 		desc = "invalid lun";
2507 		break;
2508 	case 0xA:
2509 		desc = "overlapped tag attempted";
2510 		break;
2511 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2512 		desc = "task queued, however not sent to target";
2513 		break;
2514 	default:
2515 		desc = "unknown";
2516 		break;
2517 	}
2518 	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2519 }
2520 
2521 /**
2522  * _scsih_tm_done - tm completion routine
2523  * @ioc: per adapter object
2524  * @smid: system request message index
2525  * @msix_index: MSIX table index supplied by the OS
2526  * @reply: reply message frame(lower 32bit addr)
2527  * Context: none.
2528  *
2529  * The callback handler when using scsih_issue_tm.
2530  *
2531  * Return: 1 meaning mf should be freed from _base_interrupt
2532  *         0 means the mf is freed from this function.
2533  */
2534 static u8
2535 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2536 {
2537 	MPI2DefaultReply_t *mpi_reply;
2538 
2539 	if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2540 		return 1;
2541 	if (ioc->tm_cmds.smid != smid)
2542 		return 1;
2543 	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2544 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2545 	if (mpi_reply) {
2546 		memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2547 		ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2548 	}
2549 	ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2550 	complete(&ioc->tm_cmds.done);
2551 	return 1;
2552 }
2553 
2554 /**
2555  * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2556  * @ioc: per adapter object
2557  * @handle: device handle
2558  *
2559  * During taskmangement request, we need to freeze the device queue.
2560  */
2561 void
2562 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2563 {
2564 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2565 	struct scsi_device *sdev;
2566 	u8 skip = 0;
2567 
2568 	shost_for_each_device(sdev, ioc->shost) {
2569 		if (skip)
2570 			continue;
2571 		sas_device_priv_data = sdev->hostdata;
2572 		if (!sas_device_priv_data)
2573 			continue;
2574 		if (sas_device_priv_data->sas_target->handle == handle) {
2575 			sas_device_priv_data->sas_target->tm_busy = 1;
2576 			skip = 1;
2577 			ioc->ignore_loginfos = 1;
2578 		}
2579 	}
2580 }
2581 
2582 /**
2583  * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2584  * @ioc: per adapter object
2585  * @handle: device handle
2586  *
2587  * During taskmangement request, we need to freeze the device queue.
2588  */
2589 void
2590 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2591 {
2592 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2593 	struct scsi_device *sdev;
2594 	u8 skip = 0;
2595 
2596 	shost_for_each_device(sdev, ioc->shost) {
2597 		if (skip)
2598 			continue;
2599 		sas_device_priv_data = sdev->hostdata;
2600 		if (!sas_device_priv_data)
2601 			continue;
2602 		if (sas_device_priv_data->sas_target->handle == handle) {
2603 			sas_device_priv_data->sas_target->tm_busy = 0;
2604 			skip = 1;
2605 			ioc->ignore_loginfos = 0;
2606 		}
2607 	}
2608 }
2609 
2610 /**
2611  * mpt3sas_scsih_issue_tm - main routine for sending tm requests
2612  * @ioc: per adapter struct
2613  * @handle: device handle
2614  * @lun: lun number
2615  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2616  * @smid_task: smid assigned to the task
2617  * @msix_task: MSIX table index supplied by the OS
2618  * @timeout: timeout in seconds
2619  * @tr_method: Target Reset Method
2620  * Context: user
2621  *
2622  * A generic API for sending task management requests to firmware.
2623  *
2624  * The callback index is set inside `ioc->tm_cb_idx`.
2625  * The caller is responsible to check for outstanding commands.
2626  *
2627  * Return: SUCCESS or FAILED.
2628  */
2629 int
2630 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
2631 	u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
2632 {
2633 	Mpi2SCSITaskManagementRequest_t *mpi_request;
2634 	Mpi2SCSITaskManagementReply_t *mpi_reply;
2635 	u16 smid = 0;
2636 	u32 ioc_state;
2637 	int rc;
2638 
2639 	lockdep_assert_held(&ioc->tm_cmds.mutex);
2640 
2641 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2642 		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
2643 		return FAILED;
2644 	}
2645 
2646 	if (ioc->shost_recovery || ioc->remove_host ||
2647 	    ioc->pci_error_recovery) {
2648 		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
2649 		return FAILED;
2650 	}
2651 
2652 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2653 	if (ioc_state & MPI2_DOORBELL_USED) {
2654 		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
2655 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2656 		return (!rc) ? SUCCESS : FAILED;
2657 	}
2658 
2659 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2660 		mpt3sas_base_fault_info(ioc, ioc_state &
2661 		    MPI2_DOORBELL_DATA_MASK);
2662 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2663 		return (!rc) ? SUCCESS : FAILED;
2664 	}
2665 
2666 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2667 	if (!smid) {
2668 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2669 		return FAILED;
2670 	}
2671 
2672 	dtmprintk(ioc,
2673 		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2674 			   handle, type, smid_task, timeout, tr_method));
2675 	ioc->tm_cmds.status = MPT3_CMD_PENDING;
2676 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2677 	ioc->tm_cmds.smid = smid;
2678 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2679 	memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
2680 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2681 	mpi_request->DevHandle = cpu_to_le16(handle);
2682 	mpi_request->TaskType = type;
2683 	mpi_request->MsgFlags = tr_method;
2684 	mpi_request->TaskMID = cpu_to_le16(smid_task);
2685 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2686 	mpt3sas_scsih_set_tm_flag(ioc, handle);
2687 	init_completion(&ioc->tm_cmds.done);
2688 	ioc->put_smid_hi_priority(ioc, smid, msix_task);
2689 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2690 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2691 		if (mpt3sas_base_check_cmd_timeout(ioc,
2692 			ioc->tm_cmds.status, mpi_request,
2693 			sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
2694 			rc = mpt3sas_base_hard_reset_handler(ioc,
2695 					FORCE_BIG_HAMMER);
2696 			rc = (!rc) ? SUCCESS : FAILED;
2697 			goto out;
2698 		}
2699 	}
2700 
2701 	/* sync IRQs in case those were busy during flush. */
2702 	mpt3sas_base_sync_reply_irqs(ioc);
2703 
2704 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
2705 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
2706 		mpi_reply = ioc->tm_cmds.reply;
2707 		dtmprintk(ioc,
2708 			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
2709 				   le16_to_cpu(mpi_reply->IOCStatus),
2710 				   le32_to_cpu(mpi_reply->IOCLogInfo),
2711 				   le32_to_cpu(mpi_reply->TerminationCount)));
2712 		if (ioc->logging_level & MPT_DEBUG_TM) {
2713 			_scsih_response_code(ioc, mpi_reply->ResponseCode);
2714 			if (mpi_reply->IOCStatus)
2715 				_debug_dump_mf(mpi_request,
2716 				    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2717 		}
2718 	}
2719 	rc = SUCCESS;
2720 
2721 out:
2722 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
2723 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
2724 	return rc;
2725 }
2726 
2727 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2728 		u64 lun, u8 type, u16 smid_task, u16 msix_task,
2729 		u8 timeout, u8 tr_method)
2730 {
2731 	int ret;
2732 
2733 	mutex_lock(&ioc->tm_cmds.mutex);
2734 	ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
2735 			msix_task, timeout, tr_method);
2736 	mutex_unlock(&ioc->tm_cmds.mutex);
2737 
2738 	return ret;
2739 }
2740 
2741 /**
2742  * _scsih_tm_display_info - displays info about the device
2743  * @ioc: per adapter struct
2744  * @scmd: pointer to scsi command object
2745  *
2746  * Called by task management callback handlers.
2747  */
2748 static void
2749 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2750 {
2751 	struct scsi_target *starget = scmd->device->sdev_target;
2752 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
2753 	struct _sas_device *sas_device = NULL;
2754 	struct _pcie_device *pcie_device = NULL;
2755 	unsigned long flags;
2756 	char *device_str = NULL;
2757 
2758 	if (!priv_target)
2759 		return;
2760 	if (ioc->hide_ir_msg)
2761 		device_str = "WarpDrive";
2762 	else
2763 		device_str = "volume";
2764 
2765 	scsi_print_command(scmd);
2766 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2767 		starget_printk(KERN_INFO, starget,
2768 			"%s handle(0x%04x), %s wwid(0x%016llx)\n",
2769 			device_str, priv_target->handle,
2770 		    device_str, (unsigned long long)priv_target->sas_address);
2771 
2772 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2773 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2774 		pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
2775 		if (pcie_device) {
2776 			starget_printk(KERN_INFO, starget,
2777 				"handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2778 				pcie_device->handle,
2779 				(unsigned long long)pcie_device->wwid,
2780 				pcie_device->port_num);
2781 			if (pcie_device->enclosure_handle != 0)
2782 				starget_printk(KERN_INFO, starget,
2783 					"enclosure logical id(0x%016llx), slot(%d)\n",
2784 					(unsigned long long)
2785 					pcie_device->enclosure_logical_id,
2786 					pcie_device->slot);
2787 			if (pcie_device->connector_name[0] != '\0')
2788 				starget_printk(KERN_INFO, starget,
2789 					"enclosure level(0x%04x), connector name( %s)\n",
2790 					pcie_device->enclosure_level,
2791 					pcie_device->connector_name);
2792 			pcie_device_put(pcie_device);
2793 		}
2794 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2795 
2796 	} else {
2797 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2798 		sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
2799 		if (sas_device) {
2800 			if (priv_target->flags &
2801 			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
2802 				starget_printk(KERN_INFO, starget,
2803 				    "volume handle(0x%04x), "
2804 				    "volume wwid(0x%016llx)\n",
2805 				    sas_device->volume_handle,
2806 				   (unsigned long long)sas_device->volume_wwid);
2807 			}
2808 			starget_printk(KERN_INFO, starget,
2809 			    "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
2810 			    sas_device->handle,
2811 			    (unsigned long long)sas_device->sas_address,
2812 			    sas_device->phy);
2813 
2814 			_scsih_display_enclosure_chassis_info(NULL, sas_device,
2815 			    NULL, starget);
2816 
2817 			sas_device_put(sas_device);
2818 		}
2819 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2820 	}
2821 }
2822 
2823 /**
2824  * scsih_abort - eh threads main abort routine
2825  * @scmd: pointer to scsi command object
2826  *
2827  * Return: SUCCESS if command aborted else FAILED
2828  */
2829 static int
2830 scsih_abort(struct scsi_cmnd *scmd)
2831 {
2832 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2833 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2834 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2835 	u16 handle;
2836 	int r;
2837 
2838 	u8 timeout = 30;
2839 	struct _pcie_device *pcie_device = NULL;
2840 	sdev_printk(KERN_INFO, scmd->device,
2841 		"attempting task abort! scmd(%p)\n", scmd);
2842 	_scsih_tm_display_info(ioc, scmd);
2843 
2844 	sas_device_priv_data = scmd->device->hostdata;
2845 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2846 	    ioc->remove_host) {
2847 		sdev_printk(KERN_INFO, scmd->device,
2848 			"device been deleted! scmd(%p)\n", scmd);
2849 		scmd->result = DID_NO_CONNECT << 16;
2850 		scmd->scsi_done(scmd);
2851 		r = SUCCESS;
2852 		goto out;
2853 	}
2854 
2855 	/* check for completed command */
2856 	if (st == NULL || st->cb_idx == 0xFF) {
2857 		scmd->result = DID_RESET << 16;
2858 		r = SUCCESS;
2859 		goto out;
2860 	}
2861 
2862 	/* for hidden raid components and volumes this is not supported */
2863 	if (sas_device_priv_data->sas_target->flags &
2864 	    MPT_TARGET_FLAGS_RAID_COMPONENT ||
2865 	    sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2866 		scmd->result = DID_RESET << 16;
2867 		r = FAILED;
2868 		goto out;
2869 	}
2870 
2871 	mpt3sas_halt_firmware(ioc);
2872 
2873 	handle = sas_device_priv_data->sas_target->handle;
2874 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2875 	if (pcie_device && (!ioc->tm_custom_handling))
2876 		timeout = ioc->nvme_abort_timeout;
2877 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2878 		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
2879 		st->smid, st->msix_io, timeout, 0);
2880 	/* Command must be cleared after abort */
2881 	if (r == SUCCESS && st->cb_idx != 0xFF)
2882 		r = FAILED;
2883  out:
2884 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
2885 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2886 	if (pcie_device)
2887 		pcie_device_put(pcie_device);
2888 	return r;
2889 }
2890 
2891 /**
2892  * scsih_dev_reset - eh threads main device reset routine
2893  * @scmd: pointer to scsi command object
2894  *
2895  * Return: SUCCESS if command aborted else FAILED
2896  */
2897 static int
2898 scsih_dev_reset(struct scsi_cmnd *scmd)
2899 {
2900 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2901 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2902 	struct _sas_device *sas_device = NULL;
2903 	struct _pcie_device *pcie_device = NULL;
2904 	u16	handle;
2905 	u8	tr_method = 0;
2906 	u8	tr_timeout = 30;
2907 	int r;
2908 
2909 	struct scsi_target *starget = scmd->device->sdev_target;
2910 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
2911 
2912 	sdev_printk(KERN_INFO, scmd->device,
2913 		"attempting device reset! scmd(%p)\n", scmd);
2914 	_scsih_tm_display_info(ioc, scmd);
2915 
2916 	sas_device_priv_data = scmd->device->hostdata;
2917 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2918 	    ioc->remove_host) {
2919 		sdev_printk(KERN_INFO, scmd->device,
2920 			"device been deleted! scmd(%p)\n", scmd);
2921 		scmd->result = DID_NO_CONNECT << 16;
2922 		scmd->scsi_done(scmd);
2923 		r = SUCCESS;
2924 		goto out;
2925 	}
2926 
2927 	/* for hidden raid components obtain the volume_handle */
2928 	handle = 0;
2929 	if (sas_device_priv_data->sas_target->flags &
2930 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
2931 		sas_device = mpt3sas_get_sdev_from_target(ioc,
2932 				target_priv_data);
2933 		if (sas_device)
2934 			handle = sas_device->volume_handle;
2935 	} else
2936 		handle = sas_device_priv_data->sas_target->handle;
2937 
2938 	if (!handle) {
2939 		scmd->result = DID_RESET << 16;
2940 		r = FAILED;
2941 		goto out;
2942 	}
2943 
2944 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2945 
2946 	if (pcie_device && (!ioc->tm_custom_handling)) {
2947 		tr_timeout = pcie_device->reset_timeout;
2948 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
2949 	} else
2950 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2951 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2952 		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
2953 		tr_timeout, tr_method);
2954 	/* Check for busy commands after reset */
2955 	if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
2956 		r = FAILED;
2957  out:
2958 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
2959 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2960 
2961 	if (sas_device)
2962 		sas_device_put(sas_device);
2963 	if (pcie_device)
2964 		pcie_device_put(pcie_device);
2965 
2966 	return r;
2967 }
2968 
2969 /**
2970  * scsih_target_reset - eh threads main target reset routine
2971  * @scmd: pointer to scsi command object
2972  *
2973  * Return: SUCCESS if command aborted else FAILED
2974  */
2975 static int
2976 scsih_target_reset(struct scsi_cmnd *scmd)
2977 {
2978 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2979 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2980 	struct _sas_device *sas_device = NULL;
2981 	struct _pcie_device *pcie_device = NULL;
2982 	u16	handle;
2983 	u8	tr_method = 0;
2984 	u8	tr_timeout = 30;
2985 	int r;
2986 	struct scsi_target *starget = scmd->device->sdev_target;
2987 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
2988 
2989 	starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
2990 		scmd);
2991 	_scsih_tm_display_info(ioc, scmd);
2992 
2993 	sas_device_priv_data = scmd->device->hostdata;
2994 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2995 	    ioc->remove_host) {
2996 		starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
2997 			scmd);
2998 		scmd->result = DID_NO_CONNECT << 16;
2999 		scmd->scsi_done(scmd);
3000 		r = SUCCESS;
3001 		goto out;
3002 	}
3003 
3004 	/* for hidden raid components obtain the volume_handle */
3005 	handle = 0;
3006 	if (sas_device_priv_data->sas_target->flags &
3007 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3008 		sas_device = mpt3sas_get_sdev_from_target(ioc,
3009 				target_priv_data);
3010 		if (sas_device)
3011 			handle = sas_device->volume_handle;
3012 	} else
3013 		handle = sas_device_priv_data->sas_target->handle;
3014 
3015 	if (!handle) {
3016 		scmd->result = DID_RESET << 16;
3017 		r = FAILED;
3018 		goto out;
3019 	}
3020 
3021 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3022 
3023 	if (pcie_device && (!ioc->tm_custom_handling)) {
3024 		tr_timeout = pcie_device->reset_timeout;
3025 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3026 	} else
3027 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3028 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
3029 		MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3030 	    tr_timeout, tr_method);
3031 	/* Check for busy commands after reset */
3032 	if (r == SUCCESS && atomic_read(&starget->target_busy))
3033 		r = FAILED;
3034  out:
3035 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
3036 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3037 
3038 	if (sas_device)
3039 		sas_device_put(sas_device);
3040 	if (pcie_device)
3041 		pcie_device_put(pcie_device);
3042 	return r;
3043 }
3044 
3045 
3046 /**
3047  * scsih_host_reset - eh threads main host reset routine
3048  * @scmd: pointer to scsi command object
3049  *
3050  * Return: SUCCESS if command aborted else FAILED
3051  */
3052 static int
3053 scsih_host_reset(struct scsi_cmnd *scmd)
3054 {
3055 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3056 	int r, retval;
3057 
3058 	ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
3059 	scsi_print_command(scmd);
3060 
3061 	if (ioc->is_driver_loading || ioc->remove_host) {
3062 		ioc_info(ioc, "Blocking the host reset\n");
3063 		r = FAILED;
3064 		goto out;
3065 	}
3066 
3067 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3068 	r = (retval < 0) ? FAILED : SUCCESS;
3069 out:
3070 	ioc_info(ioc, "host reset: %s scmd(%p)\n",
3071 		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3072 
3073 	return r;
3074 }
3075 
3076 /**
3077  * _scsih_fw_event_add - insert and queue up fw_event
3078  * @ioc: per adapter object
3079  * @fw_event: object describing the event
3080  * Context: This function will acquire ioc->fw_event_lock.
3081  *
3082  * This adds the firmware event object into link list, then queues it up to
3083  * be processed from user context.
3084  */
3085 static void
3086 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3087 {
3088 	unsigned long flags;
3089 
3090 	if (ioc->firmware_event_thread == NULL)
3091 		return;
3092 
3093 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3094 	fw_event_work_get(fw_event);
3095 	INIT_LIST_HEAD(&fw_event->list);
3096 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3097 	INIT_WORK(&fw_event->work, _firmware_event_work);
3098 	fw_event_work_get(fw_event);
3099 	queue_work(ioc->firmware_event_thread, &fw_event->work);
3100 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3101 }
3102 
3103 /**
3104  * _scsih_fw_event_del_from_list - delete fw_event from the list
3105  * @ioc: per adapter object
3106  * @fw_event: object describing the event
3107  * Context: This function will acquire ioc->fw_event_lock.
3108  *
3109  * If the fw_event is on the fw_event_list, remove it and do a put.
3110  */
3111 static void
3112 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3113 	*fw_event)
3114 {
3115 	unsigned long flags;
3116 
3117 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3118 	if (!list_empty(&fw_event->list)) {
3119 		list_del_init(&fw_event->list);
3120 		fw_event_work_put(fw_event);
3121 	}
3122 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3123 }
3124 
3125 
3126  /**
3127  * mpt3sas_send_trigger_data_event - send event for processing trigger data
3128  * @ioc: per adapter object
3129  * @event_data: trigger event data
3130  */
3131 void
3132 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3133 	struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3134 {
3135 	struct fw_event_work *fw_event;
3136 	u16 sz;
3137 
3138 	if (ioc->is_driver_loading)
3139 		return;
3140 	sz = sizeof(*event_data);
3141 	fw_event = alloc_fw_event_work(sz);
3142 	if (!fw_event)
3143 		return;
3144 	fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3145 	fw_event->ioc = ioc;
3146 	memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3147 	_scsih_fw_event_add(ioc, fw_event);
3148 	fw_event_work_put(fw_event);
3149 }
3150 
3151 /**
3152  * _scsih_error_recovery_delete_devices - remove devices not responding
3153  * @ioc: per adapter object
3154  */
3155 static void
3156 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3157 {
3158 	struct fw_event_work *fw_event;
3159 
3160 	if (ioc->is_driver_loading)
3161 		return;
3162 	fw_event = alloc_fw_event_work(0);
3163 	if (!fw_event)
3164 		return;
3165 	fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3166 	fw_event->ioc = ioc;
3167 	_scsih_fw_event_add(ioc, fw_event);
3168 	fw_event_work_put(fw_event);
3169 }
3170 
3171 /**
3172  * mpt3sas_port_enable_complete - port enable completed (fake event)
3173  * @ioc: per adapter object
3174  */
3175 void
3176 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3177 {
3178 	struct fw_event_work *fw_event;
3179 
3180 	fw_event = alloc_fw_event_work(0);
3181 	if (!fw_event)
3182 		return;
3183 	fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3184 	fw_event->ioc = ioc;
3185 	_scsih_fw_event_add(ioc, fw_event);
3186 	fw_event_work_put(fw_event);
3187 }
3188 
3189 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3190 {
3191 	unsigned long flags;
3192 	struct fw_event_work *fw_event = NULL;
3193 
3194 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3195 	if (!list_empty(&ioc->fw_event_list)) {
3196 		fw_event = list_first_entry(&ioc->fw_event_list,
3197 				struct fw_event_work, list);
3198 		list_del_init(&fw_event->list);
3199 	}
3200 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3201 
3202 	return fw_event;
3203 }
3204 
3205 /**
3206  * _scsih_fw_event_cleanup_queue - cleanup event queue
3207  * @ioc: per adapter object
3208  *
3209  * Walk the firmware event queue, either killing timers, or waiting
3210  * for outstanding events to complete
3211  */
3212 static void
3213 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3214 {
3215 	struct fw_event_work *fw_event;
3216 
3217 	if (list_empty(&ioc->fw_event_list) ||
3218 	     !ioc->firmware_event_thread || in_interrupt())
3219 		return;
3220 
3221 	while ((fw_event = dequeue_next_fw_event(ioc))) {
3222 		/*
3223 		 * Wait on the fw_event to complete. If this returns 1, then
3224 		 * the event was never executed, and we need a put for the
3225 		 * reference the work had on the fw_event.
3226 		 *
3227 		 * If it did execute, we wait for it to finish, and the put will
3228 		 * happen from _firmware_event_work()
3229 		 */
3230 		if (cancel_work_sync(&fw_event->work))
3231 			fw_event_work_put(fw_event);
3232 
3233 		fw_event_work_put(fw_event);
3234 	}
3235 }
3236 
3237 /**
3238  * _scsih_internal_device_block - block the sdev device
3239  * @sdev: per device object
3240  * @sas_device_priv_data : per device driver private data
3241  *
3242  * make sure device is blocked without error, if not
3243  * print an error
3244  */
3245 static void
3246 _scsih_internal_device_block(struct scsi_device *sdev,
3247 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3248 {
3249 	int r = 0;
3250 
3251 	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3252 	    sas_device_priv_data->sas_target->handle);
3253 	sas_device_priv_data->block = 1;
3254 
3255 	r = scsi_internal_device_block_nowait(sdev);
3256 	if (r == -EINVAL)
3257 		sdev_printk(KERN_WARNING, sdev,
3258 		    "device_block failed with return(%d) for handle(0x%04x)\n",
3259 		    r, sas_device_priv_data->sas_target->handle);
3260 }
3261 
3262 /**
3263  * _scsih_internal_device_unblock - unblock the sdev device
3264  * @sdev: per device object
3265  * @sas_device_priv_data : per device driver private data
3266  * make sure device is unblocked without error, if not retry
3267  * by blocking and then unblocking
3268  */
3269 
3270 static void
3271 _scsih_internal_device_unblock(struct scsi_device *sdev,
3272 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3273 {
3274 	int r = 0;
3275 
3276 	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3277 	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3278 	sas_device_priv_data->block = 0;
3279 	r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3280 	if (r == -EINVAL) {
3281 		/* The device has been set to SDEV_RUNNING by SD layer during
3282 		 * device addition but the request queue is still stopped by
3283 		 * our earlier block call. We need to perform a block again
3284 		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3285 
3286 		sdev_printk(KERN_WARNING, sdev,
3287 		    "device_unblock failed with return(%d) for handle(0x%04x) "
3288 		    "performing a block followed by an unblock\n",
3289 		    r, sas_device_priv_data->sas_target->handle);
3290 		sas_device_priv_data->block = 1;
3291 		r = scsi_internal_device_block_nowait(sdev);
3292 		if (r)
3293 			sdev_printk(KERN_WARNING, sdev, "retried device_block "
3294 			    "failed with return(%d) for handle(0x%04x)\n",
3295 			    r, sas_device_priv_data->sas_target->handle);
3296 
3297 		sas_device_priv_data->block = 0;
3298 		r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3299 		if (r)
3300 			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3301 			    " failed with return(%d) for handle(0x%04x)\n",
3302 			    r, sas_device_priv_data->sas_target->handle);
3303 	}
3304 }
3305 
3306 /**
3307  * _scsih_ublock_io_all_device - unblock every device
3308  * @ioc: per adapter object
3309  *
3310  * change the device state from block to running
3311  */
3312 static void
3313 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3314 {
3315 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3316 	struct scsi_device *sdev;
3317 
3318 	shost_for_each_device(sdev, ioc->shost) {
3319 		sas_device_priv_data = sdev->hostdata;
3320 		if (!sas_device_priv_data)
3321 			continue;
3322 		if (!sas_device_priv_data->block)
3323 			continue;
3324 
3325 		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3326 			"device_running, handle(0x%04x)\n",
3327 		    sas_device_priv_data->sas_target->handle));
3328 		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3329 	}
3330 }
3331 
3332 
3333 /**
3334  * _scsih_ublock_io_device - prepare device to be deleted
3335  * @ioc: per adapter object
3336  * @sas_address: sas address
3337  *
3338  * unblock then put device in offline state
3339  */
3340 static void
3341 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
3342 {
3343 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3344 	struct scsi_device *sdev;
3345 
3346 	shost_for_each_device(sdev, ioc->shost) {
3347 		sas_device_priv_data = sdev->hostdata;
3348 		if (!sas_device_priv_data)
3349 			continue;
3350 		if (sas_device_priv_data->sas_target->sas_address
3351 		    != sas_address)
3352 			continue;
3353 		if (sas_device_priv_data->block)
3354 			_scsih_internal_device_unblock(sdev,
3355 				sas_device_priv_data);
3356 	}
3357 }
3358 
3359 /**
3360  * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3361  * @ioc: per adapter object
3362  *
3363  * During device pull we need to appropriately set the sdev state.
3364  */
3365 static void
3366 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3367 {
3368 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3369 	struct scsi_device *sdev;
3370 
3371 	shost_for_each_device(sdev, ioc->shost) {
3372 		sas_device_priv_data = sdev->hostdata;
3373 		if (!sas_device_priv_data)
3374 			continue;
3375 		if (sas_device_priv_data->block)
3376 			continue;
3377 		if (sas_device_priv_data->ignore_delay_remove) {
3378 			sdev_printk(KERN_INFO, sdev,
3379 			"%s skip device_block for SES handle(0x%04x)\n",
3380 			__func__, sas_device_priv_data->sas_target->handle);
3381 			continue;
3382 		}
3383 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3384 	}
3385 }
3386 
3387 /**
3388  * _scsih_block_io_device - set the device state to SDEV_BLOCK
3389  * @ioc: per adapter object
3390  * @handle: device handle
3391  *
3392  * During device pull we need to appropriately set the sdev state.
3393  */
3394 static void
3395 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3396 {
3397 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3398 	struct scsi_device *sdev;
3399 	struct _sas_device *sas_device;
3400 
3401 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3402 
3403 	shost_for_each_device(sdev, ioc->shost) {
3404 		sas_device_priv_data = sdev->hostdata;
3405 		if (!sas_device_priv_data)
3406 			continue;
3407 		if (sas_device_priv_data->sas_target->handle != handle)
3408 			continue;
3409 		if (sas_device_priv_data->block)
3410 			continue;
3411 		if (sas_device && sas_device->pend_sas_rphy_add)
3412 			continue;
3413 		if (sas_device_priv_data->ignore_delay_remove) {
3414 			sdev_printk(KERN_INFO, sdev,
3415 			"%s skip device_block for SES handle(0x%04x)\n",
3416 			__func__, sas_device_priv_data->sas_target->handle);
3417 			continue;
3418 		}
3419 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3420 	}
3421 
3422 	if (sas_device)
3423 		sas_device_put(sas_device);
3424 }
3425 
3426 /**
3427  * _scsih_block_io_to_children_attached_to_ex
3428  * @ioc: per adapter object
3429  * @sas_expander: the sas_device object
3430  *
3431  * This routine set sdev state to SDEV_BLOCK for all devices
3432  * attached to this expander. This function called when expander is
3433  * pulled.
3434  */
3435 static void
3436 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3437 	struct _sas_node *sas_expander)
3438 {
3439 	struct _sas_port *mpt3sas_port;
3440 	struct _sas_device *sas_device;
3441 	struct _sas_node *expander_sibling;
3442 	unsigned long flags;
3443 
3444 	if (!sas_expander)
3445 		return;
3446 
3447 	list_for_each_entry(mpt3sas_port,
3448 	   &sas_expander->sas_port_list, port_list) {
3449 		if (mpt3sas_port->remote_identify.device_type ==
3450 		    SAS_END_DEVICE) {
3451 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
3452 			sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3453 			    mpt3sas_port->remote_identify.sas_address);
3454 			if (sas_device) {
3455 				set_bit(sas_device->handle,
3456 						ioc->blocking_handles);
3457 				sas_device_put(sas_device);
3458 			}
3459 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3460 		}
3461 	}
3462 
3463 	list_for_each_entry(mpt3sas_port,
3464 	   &sas_expander->sas_port_list, port_list) {
3465 
3466 		if (mpt3sas_port->remote_identify.device_type ==
3467 		    SAS_EDGE_EXPANDER_DEVICE ||
3468 		    mpt3sas_port->remote_identify.device_type ==
3469 		    SAS_FANOUT_EXPANDER_DEVICE) {
3470 			expander_sibling =
3471 			    mpt3sas_scsih_expander_find_by_sas_address(
3472 			    ioc, mpt3sas_port->remote_identify.sas_address);
3473 			_scsih_block_io_to_children_attached_to_ex(ioc,
3474 			    expander_sibling);
3475 		}
3476 	}
3477 }
3478 
3479 /**
3480  * _scsih_block_io_to_children_attached_directly
3481  * @ioc: per adapter object
3482  * @event_data: topology change event data
3483  *
3484  * This routine set sdev state to SDEV_BLOCK for all devices
3485  * direct attached during device pull.
3486  */
3487 static void
3488 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3489 	Mpi2EventDataSasTopologyChangeList_t *event_data)
3490 {
3491 	int i;
3492 	u16 handle;
3493 	u16 reason_code;
3494 
3495 	for (i = 0; i < event_data->NumEntries; i++) {
3496 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3497 		if (!handle)
3498 			continue;
3499 		reason_code = event_data->PHY[i].PhyStatus &
3500 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
3501 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3502 			_scsih_block_io_device(ioc, handle);
3503 	}
3504 }
3505 
3506 /**
3507  * _scsih_block_io_to_pcie_children_attached_directly
3508  * @ioc: per adapter object
3509  * @event_data: topology change event data
3510  *
3511  * This routine set sdev state to SDEV_BLOCK for all devices
3512  * direct attached during device pull/reconnect.
3513  */
3514 static void
3515 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3516 		Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3517 {
3518 	int i;
3519 	u16 handle;
3520 	u16 reason_code;
3521 
3522 	for (i = 0; i < event_data->NumEntries; i++) {
3523 		handle =
3524 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
3525 		if (!handle)
3526 			continue;
3527 		reason_code = event_data->PortEntry[i].PortStatus;
3528 		if (reason_code ==
3529 				MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
3530 			_scsih_block_io_device(ioc, handle);
3531 	}
3532 }
3533 /**
3534  * _scsih_tm_tr_send - send task management request
3535  * @ioc: per adapter object
3536  * @handle: device handle
3537  * Context: interrupt time.
3538  *
3539  * This code is to initiate the device removal handshake protocol
3540  * with controller firmware.  This function will issue target reset
3541  * using high priority request queue.  It will send a sas iounit
3542  * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
3543  *
3544  * This is designed to send muliple task management request at the same
3545  * time to the fifo. If the fifo is full, we will append the request,
3546  * and process it in a future completion.
3547  */
3548 static void
3549 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3550 {
3551 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3552 	u16 smid;
3553 	struct _sas_device *sas_device = NULL;
3554 	struct _pcie_device *pcie_device = NULL;
3555 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
3556 	u64 sas_address = 0;
3557 	unsigned long flags;
3558 	struct _tr_list *delayed_tr;
3559 	u32 ioc_state;
3560 	u8 tr_method = 0;
3561 
3562 	if (ioc->pci_error_recovery) {
3563 		dewtprintk(ioc,
3564 			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
3565 				    __func__, handle));
3566 		return;
3567 	}
3568 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3569 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3570 		dewtprintk(ioc,
3571 			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
3572 				    __func__, handle));
3573 		return;
3574 	}
3575 
3576 	/* if PD, then return */
3577 	if (test_bit(handle, ioc->pd_handles))
3578 		return;
3579 
3580 	clear_bit(handle, ioc->pend_os_device_add);
3581 
3582 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
3583 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
3584 	if (sas_device && sas_device->starget &&
3585 	    sas_device->starget->hostdata) {
3586 		sas_target_priv_data = sas_device->starget->hostdata;
3587 		sas_target_priv_data->deleted = 1;
3588 		sas_address = sas_device->sas_address;
3589 	}
3590 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3591 	if (!sas_device) {
3592 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3593 		pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
3594 		if (pcie_device && pcie_device->starget &&
3595 			pcie_device->starget->hostdata) {
3596 			sas_target_priv_data = pcie_device->starget->hostdata;
3597 			sas_target_priv_data->deleted = 1;
3598 			sas_address = pcie_device->wwid;
3599 		}
3600 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3601 		if (pcie_device && (!ioc->tm_custom_handling))
3602 			tr_method =
3603 			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3604 		else
3605 			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3606 	}
3607 	if (sas_target_priv_data) {
3608 		dewtprintk(ioc,
3609 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3610 				    handle, (u64)sas_address));
3611 		if (sas_device) {
3612 			if (sas_device->enclosure_handle != 0)
3613 				dewtprintk(ioc,
3614 					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
3615 						    (u64)sas_device->enclosure_logical_id,
3616 						    sas_device->slot));
3617 			if (sas_device->connector_name[0] != '\0')
3618 				dewtprintk(ioc,
3619 					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
3620 						    sas_device->enclosure_level,
3621 						    sas_device->connector_name));
3622 		} else if (pcie_device) {
3623 			if (pcie_device->enclosure_handle != 0)
3624 				dewtprintk(ioc,
3625 					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
3626 						    (u64)pcie_device->enclosure_logical_id,
3627 						    pcie_device->slot));
3628 			if (pcie_device->connector_name[0] != '\0')
3629 				dewtprintk(ioc,
3630 					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
3631 						    pcie_device->enclosure_level,
3632 						    pcie_device->connector_name));
3633 		}
3634 		_scsih_ublock_io_device(ioc, sas_address);
3635 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
3636 	}
3637 
3638 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
3639 	if (!smid) {
3640 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3641 		if (!delayed_tr)
3642 			goto out;
3643 		INIT_LIST_HEAD(&delayed_tr->list);
3644 		delayed_tr->handle = handle;
3645 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3646 		dewtprintk(ioc,
3647 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3648 				    handle));
3649 		goto out;
3650 	}
3651 
3652 	dewtprintk(ioc,
3653 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3654 			    handle, smid, ioc->tm_tr_cb_idx));
3655 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3656 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3657 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3658 	mpi_request->DevHandle = cpu_to_le16(handle);
3659 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3660 	mpi_request->MsgFlags = tr_method;
3661 	set_bit(handle, ioc->device_remove_in_progress);
3662 	ioc->put_smid_hi_priority(ioc, smid, 0);
3663 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
3664 
3665 out:
3666 	if (sas_device)
3667 		sas_device_put(sas_device);
3668 	if (pcie_device)
3669 		pcie_device_put(pcie_device);
3670 }
3671 
3672 /**
3673  * _scsih_tm_tr_complete -
3674  * @ioc: per adapter object
3675  * @smid: system request message index
3676  * @msix_index: MSIX table index supplied by the OS
3677  * @reply: reply message frame(lower 32bit addr)
3678  * Context: interrupt time.
3679  *
3680  * This is the target reset completion routine.
3681  * This code is part of the code to initiate the device removal
3682  * handshake protocol with controller firmware.
3683  * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
3684  *
3685  * Return: 1 meaning mf should be freed from _base_interrupt
3686  *         0 means the mf is freed from this function.
3687  */
3688 static u8
3689 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3690 	u32 reply)
3691 {
3692 	u16 handle;
3693 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3694 	Mpi2SCSITaskManagementReply_t *mpi_reply =
3695 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3696 	Mpi2SasIoUnitControlRequest_t *mpi_request;
3697 	u16 smid_sas_ctrl;
3698 	u32 ioc_state;
3699 	struct _sc_list *delayed_sc;
3700 
3701 	if (ioc->pci_error_recovery) {
3702 		dewtprintk(ioc,
3703 			   ioc_info(ioc, "%s: host in pci error recovery\n",
3704 				    __func__));
3705 		return 1;
3706 	}
3707 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3708 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3709 		dewtprintk(ioc,
3710 			   ioc_info(ioc, "%s: host is not operational\n",
3711 				    __func__));
3712 		return 1;
3713 	}
3714 	if (unlikely(!mpi_reply)) {
3715 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3716 			__FILE__, __LINE__, __func__);
3717 		return 1;
3718 	}
3719 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3720 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
3721 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3722 		dewtprintk(ioc,
3723 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3724 				   handle,
3725 				   le16_to_cpu(mpi_reply->DevHandle), smid));
3726 		return 0;
3727 	}
3728 
3729 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3730 	dewtprintk(ioc,
3731 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3732 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3733 			    le32_to_cpu(mpi_reply->IOCLogInfo),
3734 			    le32_to_cpu(mpi_reply->TerminationCount)));
3735 
3736 	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
3737 	if (!smid_sas_ctrl) {
3738 		delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
3739 		if (!delayed_sc)
3740 			return _scsih_check_for_pending_tm(ioc, smid);
3741 		INIT_LIST_HEAD(&delayed_sc->list);
3742 		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
3743 		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
3744 		dewtprintk(ioc,
3745 			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
3746 				    handle));
3747 		return _scsih_check_for_pending_tm(ioc, smid);
3748 	}
3749 
3750 	dewtprintk(ioc,
3751 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3752 			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
3753 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
3754 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
3755 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3756 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3757 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
3758 	ioc->put_smid_default(ioc, smid_sas_ctrl);
3759 
3760 	return _scsih_check_for_pending_tm(ioc, smid);
3761 }
3762 
3763 /** _scsih_allow_scmd_to_device - check whether scmd needs to
3764  *				 issue to IOC or not.
3765  * @ioc: per adapter object
3766  * @scmd: pointer to scsi command object
3767  *
3768  * Returns true if scmd can be issued to IOC otherwise returns false.
3769  */
3770 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
3771 	struct scsi_cmnd *scmd)
3772 {
3773 
3774 	if (ioc->pci_error_recovery)
3775 		return false;
3776 
3777 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
3778 		if (ioc->remove_host)
3779 			return false;
3780 
3781 		return true;
3782 	}
3783 
3784 	if (ioc->remove_host) {
3785 
3786 		switch (scmd->cmnd[0]) {
3787 		case SYNCHRONIZE_CACHE:
3788 		case START_STOP:
3789 			return true;
3790 		default:
3791 			return false;
3792 		}
3793 	}
3794 
3795 	return true;
3796 }
3797 
3798 /**
3799  * _scsih_sas_control_complete - completion routine
3800  * @ioc: per adapter object
3801  * @smid: system request message index
3802  * @msix_index: MSIX table index supplied by the OS
3803  * @reply: reply message frame(lower 32bit addr)
3804  * Context: interrupt time.
3805  *
3806  * This is the sas iounit control completion routine.
3807  * This code is part of the code to initiate the device removal
3808  * handshake protocol with controller firmware.
3809  *
3810  * Return: 1 meaning mf should be freed from _base_interrupt
3811  *         0 means the mf is freed from this function.
3812  */
3813 static u8
3814 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3815 	u8 msix_index, u32 reply)
3816 {
3817 	Mpi2SasIoUnitControlReply_t *mpi_reply =
3818 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3819 
3820 	if (likely(mpi_reply)) {
3821 		dewtprintk(ioc,
3822 			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
3823 				    le16_to_cpu(mpi_reply->DevHandle), smid,
3824 				    le16_to_cpu(mpi_reply->IOCStatus),
3825 				    le32_to_cpu(mpi_reply->IOCLogInfo)));
3826 		if (le16_to_cpu(mpi_reply->IOCStatus) ==
3827 		     MPI2_IOCSTATUS_SUCCESS) {
3828 			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
3829 			    ioc->device_remove_in_progress);
3830 		}
3831 	} else {
3832 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3833 			__FILE__, __LINE__, __func__);
3834 	}
3835 	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
3836 }
3837 
3838 /**
3839  * _scsih_tm_tr_volume_send - send target reset request for volumes
3840  * @ioc: per adapter object
3841  * @handle: device handle
3842  * Context: interrupt time.
3843  *
3844  * This is designed to send muliple task management request at the same
3845  * time to the fifo. If the fifo is full, we will append the request,
3846  * and process it in a future completion.
3847  */
3848 static void
3849 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3850 {
3851 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3852 	u16 smid;
3853 	struct _tr_list *delayed_tr;
3854 
3855 	if (ioc->pci_error_recovery) {
3856 		dewtprintk(ioc,
3857 			   ioc_info(ioc, "%s: host reset in progress!\n",
3858 				    __func__));
3859 		return;
3860 	}
3861 
3862 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
3863 	if (!smid) {
3864 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3865 		if (!delayed_tr)
3866 			return;
3867 		INIT_LIST_HEAD(&delayed_tr->list);
3868 		delayed_tr->handle = handle;
3869 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
3870 		dewtprintk(ioc,
3871 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3872 				    handle));
3873 		return;
3874 	}
3875 
3876 	dewtprintk(ioc,
3877 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3878 			    handle, smid, ioc->tm_tr_volume_cb_idx));
3879 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3880 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3881 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3882 	mpi_request->DevHandle = cpu_to_le16(handle);
3883 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3884 	ioc->put_smid_hi_priority(ioc, smid, 0);
3885 }
3886 
3887 /**
3888  * _scsih_tm_volume_tr_complete - target reset completion
3889  * @ioc: per adapter object
3890  * @smid: system request message index
3891  * @msix_index: MSIX table index supplied by the OS
3892  * @reply: reply message frame(lower 32bit addr)
3893  * Context: interrupt time.
3894  *
3895  * Return: 1 meaning mf should be freed from _base_interrupt
3896  *         0 means the mf is freed from this function.
3897  */
3898 static u8
3899 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3900 	u8 msix_index, u32 reply)
3901 {
3902 	u16 handle;
3903 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3904 	Mpi2SCSITaskManagementReply_t *mpi_reply =
3905 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3906 
3907 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
3908 		dewtprintk(ioc,
3909 			   ioc_info(ioc, "%s: host reset in progress!\n",
3910 				    __func__));
3911 		return 1;
3912 	}
3913 	if (unlikely(!mpi_reply)) {
3914 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3915 			__FILE__, __LINE__, __func__);
3916 		return 1;
3917 	}
3918 
3919 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3920 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
3921 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3922 		dewtprintk(ioc,
3923 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3924 				   handle, le16_to_cpu(mpi_reply->DevHandle),
3925 				   smid));
3926 		return 0;
3927 	}
3928 
3929 	dewtprintk(ioc,
3930 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3931 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3932 			    le32_to_cpu(mpi_reply->IOCLogInfo),
3933 			    le32_to_cpu(mpi_reply->TerminationCount)));
3934 
3935 	return _scsih_check_for_pending_tm(ioc, smid);
3936 }
3937 
3938 /**
3939  * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
3940  * @ioc: per adapter object
3941  * @smid: system request message index
3942  * @event: Event ID
3943  * @event_context: used to track events uniquely
3944  *
3945  * Context - processed in interrupt context.
3946  */
3947 static void
3948 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
3949 				U32 event_context)
3950 {
3951 	Mpi2EventAckRequest_t *ack_request;
3952 	int i = smid - ioc->internal_smid;
3953 	unsigned long flags;
3954 
3955 	/* Without releasing the smid just update the
3956 	 * call back index and reuse the same smid for
3957 	 * processing this delayed request
3958 	 */
3959 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3960 	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
3961 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3962 
3963 	dewtprintk(ioc,
3964 		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
3965 			    le16_to_cpu(event), smid, ioc->base_cb_idx));
3966 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
3967 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
3968 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
3969 	ack_request->Event = event;
3970 	ack_request->EventContext = event_context;
3971 	ack_request->VF_ID = 0;  /* TODO */
3972 	ack_request->VP_ID = 0;
3973 	ioc->put_smid_default(ioc, smid);
3974 }
3975 
3976 /**
3977  * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
3978  *				sas_io_unit_ctrl messages
3979  * @ioc: per adapter object
3980  * @smid: system request message index
3981  * @handle: device handle
3982  *
3983  * Context - processed in interrupt context.
3984  */
3985 static void
3986 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
3987 					u16 smid, u16 handle)
3988 {
3989 	Mpi2SasIoUnitControlRequest_t *mpi_request;
3990 	u32 ioc_state;
3991 	int i = smid - ioc->internal_smid;
3992 	unsigned long flags;
3993 
3994 	if (ioc->remove_host) {
3995 		dewtprintk(ioc,
3996 			   ioc_info(ioc, "%s: host has been removed\n",
3997 				    __func__));
3998 		return;
3999 	} else if (ioc->pci_error_recovery) {
4000 		dewtprintk(ioc,
4001 			   ioc_info(ioc, "%s: host in pci error recovery\n",
4002 				    __func__));
4003 		return;
4004 	}
4005 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4006 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4007 		dewtprintk(ioc,
4008 			   ioc_info(ioc, "%s: host is not operational\n",
4009 				    __func__));
4010 		return;
4011 	}
4012 
4013 	/* Without releasing the smid just update the
4014 	 * call back index and reuse the same smid for
4015 	 * processing this delayed request
4016 	 */
4017 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4018 	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4019 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4020 
4021 	dewtprintk(ioc,
4022 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4023 			    handle, smid, ioc->tm_sas_control_cb_idx));
4024 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4025 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4026 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4027 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4028 	mpi_request->DevHandle = cpu_to_le16(handle);
4029 	ioc->put_smid_default(ioc, smid);
4030 }
4031 
4032 /**
4033  * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4034  * @ioc: per adapter object
4035  * @smid: system request message index
4036  *
4037  * Context: Executed in interrupt context
4038  *
4039  * This will check delayed internal messages list, and process the
4040  * next request.
4041  *
4042  * Return: 1 meaning mf should be freed from _base_interrupt
4043  *         0 means the mf is freed from this function.
4044  */
4045 u8
4046 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4047 {
4048 	struct _sc_list *delayed_sc;
4049 	struct _event_ack_list *delayed_event_ack;
4050 
4051 	if (!list_empty(&ioc->delayed_event_ack_list)) {
4052 		delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4053 						struct _event_ack_list, list);
4054 		_scsih_issue_delayed_event_ack(ioc, smid,
4055 		  delayed_event_ack->Event, delayed_event_ack->EventContext);
4056 		list_del(&delayed_event_ack->list);
4057 		kfree(delayed_event_ack);
4058 		return 0;
4059 	}
4060 
4061 	if (!list_empty(&ioc->delayed_sc_list)) {
4062 		delayed_sc = list_entry(ioc->delayed_sc_list.next,
4063 						struct _sc_list, list);
4064 		_scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4065 						 delayed_sc->handle);
4066 		list_del(&delayed_sc->list);
4067 		kfree(delayed_sc);
4068 		return 0;
4069 	}
4070 	return 1;
4071 }
4072 
4073 /**
4074  * _scsih_check_for_pending_tm - check for pending task management
4075  * @ioc: per adapter object
4076  * @smid: system request message index
4077  *
4078  * This will check delayed target reset list, and feed the
4079  * next reqeust.
4080  *
4081  * Return: 1 meaning mf should be freed from _base_interrupt
4082  *         0 means the mf is freed from this function.
4083  */
4084 static u8
4085 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4086 {
4087 	struct _tr_list *delayed_tr;
4088 
4089 	if (!list_empty(&ioc->delayed_tr_volume_list)) {
4090 		delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4091 		    struct _tr_list, list);
4092 		mpt3sas_base_free_smid(ioc, smid);
4093 		_scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4094 		list_del(&delayed_tr->list);
4095 		kfree(delayed_tr);
4096 		return 0;
4097 	}
4098 
4099 	if (!list_empty(&ioc->delayed_tr_list)) {
4100 		delayed_tr = list_entry(ioc->delayed_tr_list.next,
4101 		    struct _tr_list, list);
4102 		mpt3sas_base_free_smid(ioc, smid);
4103 		_scsih_tm_tr_send(ioc, delayed_tr->handle);
4104 		list_del(&delayed_tr->list);
4105 		kfree(delayed_tr);
4106 		return 0;
4107 	}
4108 
4109 	return 1;
4110 }
4111 
4112 /**
4113  * _scsih_check_topo_delete_events - sanity check on topo events
4114  * @ioc: per adapter object
4115  * @event_data: the event data payload
4116  *
4117  * This routine added to better handle cable breaker.
4118  *
4119  * This handles the case where driver receives multiple expander
4120  * add and delete events in a single shot.  When there is a delete event
4121  * the routine will void any pending add events waiting in the event queue.
4122  */
4123 static void
4124 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4125 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4126 {
4127 	struct fw_event_work *fw_event;
4128 	Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4129 	u16 expander_handle;
4130 	struct _sas_node *sas_expander;
4131 	unsigned long flags;
4132 	int i, reason_code;
4133 	u16 handle;
4134 
4135 	for (i = 0 ; i < event_data->NumEntries; i++) {
4136 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4137 		if (!handle)
4138 			continue;
4139 		reason_code = event_data->PHY[i].PhyStatus &
4140 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4141 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4142 			_scsih_tm_tr_send(ioc, handle);
4143 	}
4144 
4145 	expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4146 	if (expander_handle < ioc->sas_hba.num_phys) {
4147 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4148 		return;
4149 	}
4150 	if (event_data->ExpStatus ==
4151 	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4152 		/* put expander attached devices into blocking state */
4153 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
4154 		sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4155 		    expander_handle);
4156 		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4157 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4158 		do {
4159 			handle = find_first_bit(ioc->blocking_handles,
4160 			    ioc->facts.MaxDevHandle);
4161 			if (handle < ioc->facts.MaxDevHandle)
4162 				_scsih_block_io_device(ioc, handle);
4163 		} while (test_and_clear_bit(handle, ioc->blocking_handles));
4164 	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4165 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4166 
4167 	if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4168 		return;
4169 
4170 	/* mark ignore flag for pending events */
4171 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4172 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4173 		if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4174 		    fw_event->ignore)
4175 			continue;
4176 		local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4177 				   fw_event->event_data;
4178 		if (local_event_data->ExpStatus ==
4179 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4180 		    local_event_data->ExpStatus ==
4181 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4182 			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4183 			    expander_handle) {
4184 				dewtprintk(ioc,
4185 					   ioc_info(ioc, "setting ignoring flag\n"));
4186 				fw_event->ignore = 1;
4187 			}
4188 		}
4189 	}
4190 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4191 }
4192 
4193 /**
4194  * _scsih_check_pcie_topo_remove_events - sanity check on topo
4195  * events
4196  * @ioc: per adapter object
4197  * @event_data: the event data payload
4198  *
4199  * This handles the case where driver receives multiple switch
4200  * or device add and delete events in a single shot.  When there
4201  * is a delete event the routine will void any pending add
4202  * events waiting in the event queue.
4203  */
4204 static void
4205 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4206 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4207 {
4208 	struct fw_event_work *fw_event;
4209 	Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4210 	unsigned long flags;
4211 	int i, reason_code;
4212 	u16 handle, switch_handle;
4213 
4214 	for (i = 0; i < event_data->NumEntries; i++) {
4215 		handle =
4216 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4217 		if (!handle)
4218 			continue;
4219 		reason_code = event_data->PortEntry[i].PortStatus;
4220 		if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4221 			_scsih_tm_tr_send(ioc, handle);
4222 	}
4223 
4224 	switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4225 	if (!switch_handle) {
4226 		_scsih_block_io_to_pcie_children_attached_directly(
4227 							ioc, event_data);
4228 		return;
4229 	}
4230     /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4231 	if ((event_data->SwitchStatus
4232 		== MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4233 		(event_data->SwitchStatus ==
4234 					MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4235 		_scsih_block_io_to_pcie_children_attached_directly(
4236 							ioc, event_data);
4237 
4238 	if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4239 		return;
4240 
4241 	/* mark ignore flag for pending events */
4242 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4243 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4244 		if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4245 			fw_event->ignore)
4246 			continue;
4247 		local_event_data =
4248 			(Mpi26EventDataPCIeTopologyChangeList_t *)
4249 			fw_event->event_data;
4250 		if (local_event_data->SwitchStatus ==
4251 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4252 		    local_event_data->SwitchStatus ==
4253 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4254 			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4255 				switch_handle) {
4256 				dewtprintk(ioc,
4257 					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
4258 				fw_event->ignore = 1;
4259 			}
4260 		}
4261 	}
4262 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4263 }
4264 
4265 /**
4266  * _scsih_set_volume_delete_flag - setting volume delete flag
4267  * @ioc: per adapter object
4268  * @handle: device handle
4269  *
4270  * This returns nothing.
4271  */
4272 static void
4273 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4274 {
4275 	struct _raid_device *raid_device;
4276 	struct MPT3SAS_TARGET *sas_target_priv_data;
4277 	unsigned long flags;
4278 
4279 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
4280 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4281 	if (raid_device && raid_device->starget &&
4282 	    raid_device->starget->hostdata) {
4283 		sas_target_priv_data =
4284 		    raid_device->starget->hostdata;
4285 		sas_target_priv_data->deleted = 1;
4286 		dewtprintk(ioc,
4287 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4288 				    handle, (u64)raid_device->wwid));
4289 	}
4290 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4291 }
4292 
4293 /**
4294  * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4295  * @handle: input handle
4296  * @a: handle for volume a
4297  * @b: handle for volume b
4298  *
4299  * IR firmware only supports two raid volumes.  The purpose of this
4300  * routine is to set the volume handle in either a or b. When the given
4301  * input handle is non-zero, or when a and b have not been set before.
4302  */
4303 static void
4304 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4305 {
4306 	if (!handle || handle == *a || handle == *b)
4307 		return;
4308 	if (!*a)
4309 		*a = handle;
4310 	else if (!*b)
4311 		*b = handle;
4312 }
4313 
4314 /**
4315  * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4316  * @ioc: per adapter object
4317  * @event_data: the event data payload
4318  * Context: interrupt time.
4319  *
4320  * This routine will send target reset to volume, followed by target
4321  * resets to the PDs. This is called when a PD has been removed, or
4322  * volume has been deleted or removed. When the target reset is sent
4323  * to volume, the PD target resets need to be queued to start upon
4324  * completion of the volume target reset.
4325  */
4326 static void
4327 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4328 	Mpi2EventDataIrConfigChangeList_t *event_data)
4329 {
4330 	Mpi2EventIrConfigElement_t *element;
4331 	int i;
4332 	u16 handle, volume_handle, a, b;
4333 	struct _tr_list *delayed_tr;
4334 
4335 	a = 0;
4336 	b = 0;
4337 
4338 	if (ioc->is_warpdrive)
4339 		return;
4340 
4341 	/* Volume Resets for Deleted or Removed */
4342 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4343 	for (i = 0; i < event_data->NumElements; i++, element++) {
4344 		if (le32_to_cpu(event_data->Flags) &
4345 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4346 			continue;
4347 		if (element->ReasonCode ==
4348 		    MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4349 		    element->ReasonCode ==
4350 		    MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4351 			volume_handle = le16_to_cpu(element->VolDevHandle);
4352 			_scsih_set_volume_delete_flag(ioc, volume_handle);
4353 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4354 		}
4355 	}
4356 
4357 	/* Volume Resets for UNHIDE events */
4358 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4359 	for (i = 0; i < event_data->NumElements; i++, element++) {
4360 		if (le32_to_cpu(event_data->Flags) &
4361 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4362 			continue;
4363 		if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4364 			volume_handle = le16_to_cpu(element->VolDevHandle);
4365 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4366 		}
4367 	}
4368 
4369 	if (a)
4370 		_scsih_tm_tr_volume_send(ioc, a);
4371 	if (b)
4372 		_scsih_tm_tr_volume_send(ioc, b);
4373 
4374 	/* PD target resets */
4375 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4376 	for (i = 0; i < event_data->NumElements; i++, element++) {
4377 		if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4378 			continue;
4379 		handle = le16_to_cpu(element->PhysDiskDevHandle);
4380 		volume_handle = le16_to_cpu(element->VolDevHandle);
4381 		clear_bit(handle, ioc->pd_handles);
4382 		if (!volume_handle)
4383 			_scsih_tm_tr_send(ioc, handle);
4384 		else if (volume_handle == a || volume_handle == b) {
4385 			delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4386 			BUG_ON(!delayed_tr);
4387 			INIT_LIST_HEAD(&delayed_tr->list);
4388 			delayed_tr->handle = handle;
4389 			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4390 			dewtprintk(ioc,
4391 				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4392 					    handle));
4393 		} else
4394 			_scsih_tm_tr_send(ioc, handle);
4395 	}
4396 }
4397 
4398 
4399 /**
4400  * _scsih_check_volume_delete_events - set delete flag for volumes
4401  * @ioc: per adapter object
4402  * @event_data: the event data payload
4403  * Context: interrupt time.
4404  *
4405  * This will handle the case when the cable connected to entire volume is
4406  * pulled. We will take care of setting the deleted flag so normal IO will
4407  * not be sent.
4408  */
4409 static void
4410 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4411 	Mpi2EventDataIrVolume_t *event_data)
4412 {
4413 	u32 state;
4414 
4415 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4416 		return;
4417 	state = le32_to_cpu(event_data->NewValue);
4418 	if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4419 	    MPI2_RAID_VOL_STATE_FAILED)
4420 		_scsih_set_volume_delete_flag(ioc,
4421 		    le16_to_cpu(event_data->VolDevHandle));
4422 }
4423 
4424 /**
4425  * _scsih_temp_threshold_events - display temperature threshold exceeded events
4426  * @ioc: per adapter object
4427  * @event_data: the temp threshold event data
4428  * Context: interrupt time.
4429  */
4430 static void
4431 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4432 	Mpi2EventDataTemperature_t *event_data)
4433 {
4434 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
4435 		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4436 			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4437 			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4438 			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4439 			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4440 			event_data->SensorNum);
4441 		ioc_err(ioc, "Current Temp In Celsius: %d\n",
4442 			event_data->CurrentTemperature);
4443 	}
4444 }
4445 
4446 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4447 {
4448 	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4449 
4450 	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4451 		return 0;
4452 
4453 	if (pending)
4454 		return test_and_set_bit(0, &priv->ata_command_pending);
4455 
4456 	clear_bit(0, &priv->ata_command_pending);
4457 	return 0;
4458 }
4459 
4460 /**
4461  * _scsih_flush_running_cmds - completing outstanding commands.
4462  * @ioc: per adapter object
4463  *
4464  * The flushing out of all pending scmd commands following host reset,
4465  * where all IO is dropped to the floor.
4466  */
4467 static void
4468 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4469 {
4470 	struct scsi_cmnd *scmd;
4471 	struct scsiio_tracker *st;
4472 	u16 smid;
4473 	int count = 0;
4474 
4475 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4476 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4477 		if (!scmd)
4478 			continue;
4479 		count++;
4480 		_scsih_set_satl_pending(scmd, false);
4481 		st = scsi_cmd_priv(scmd);
4482 		mpt3sas_base_clear_st(ioc, st);
4483 		scsi_dma_unmap(scmd);
4484 		if (ioc->pci_error_recovery || ioc->remove_host)
4485 			scmd->result = DID_NO_CONNECT << 16;
4486 		else
4487 			scmd->result = DID_RESET << 16;
4488 		scmd->scsi_done(scmd);
4489 	}
4490 	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4491 }
4492 
4493 /**
4494  * _scsih_setup_eedp - setup MPI request for EEDP transfer
4495  * @ioc: per adapter object
4496  * @scmd: pointer to scsi command object
4497  * @mpi_request: pointer to the SCSI_IO request message frame
4498  *
4499  * Supporting protection 1 and 3.
4500  */
4501 static void
4502 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4503 	Mpi25SCSIIORequest_t *mpi_request)
4504 {
4505 	u16 eedp_flags;
4506 	unsigned char prot_op = scsi_get_prot_op(scmd);
4507 	unsigned char prot_type = scsi_get_prot_type(scmd);
4508 	Mpi25SCSIIORequest_t *mpi_request_3v =
4509 	   (Mpi25SCSIIORequest_t *)mpi_request;
4510 
4511 	if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
4512 		return;
4513 
4514 	if (prot_op ==  SCSI_PROT_READ_STRIP)
4515 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
4516 	else if (prot_op ==  SCSI_PROT_WRITE_INSERT)
4517 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
4518 	else
4519 		return;
4520 
4521 	switch (prot_type) {
4522 	case SCSI_PROT_DIF_TYPE1:
4523 	case SCSI_PROT_DIF_TYPE2:
4524 
4525 		/*
4526 		* enable ref/guard checking
4527 		* auto increment ref tag
4528 		*/
4529 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
4530 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
4531 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4532 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
4533 		    cpu_to_be32(t10_pi_ref_tag(scmd->request));
4534 		break;
4535 
4536 	case SCSI_PROT_DIF_TYPE3:
4537 
4538 		/*
4539 		* enable guard checking
4540 		*/
4541 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4542 
4543 		break;
4544 	}
4545 
4546 	mpi_request_3v->EEDPBlockSize =
4547 	    cpu_to_le16(scmd->device->sector_size);
4548 
4549 	if (ioc->is_gen35_ioc)
4550 		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
4551 	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
4552 }
4553 
4554 /**
4555  * _scsih_eedp_error_handling - return sense code for EEDP errors
4556  * @scmd: pointer to scsi command object
4557  * @ioc_status: ioc status
4558  */
4559 static void
4560 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4561 {
4562 	u8 ascq;
4563 
4564 	switch (ioc_status) {
4565 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4566 		ascq = 0x01;
4567 		break;
4568 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4569 		ascq = 0x02;
4570 		break;
4571 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4572 		ascq = 0x03;
4573 		break;
4574 	default:
4575 		ascq = 0x00;
4576 		break;
4577 	}
4578 	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
4579 	    ascq);
4580 	scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
4581 	    SAM_STAT_CHECK_CONDITION;
4582 }
4583 
4584 /**
4585  * scsih_qcmd - main scsi request entry point
4586  * @shost: SCSI host pointer
4587  * @scmd: pointer to scsi command object
4588  *
4589  * The callback index is set inside `ioc->scsi_io_cb_idx`.
4590  *
4591  * Return: 0 on success.  If there's a failure, return either:
4592  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
4593  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
4594  */
4595 static int
4596 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4597 {
4598 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4599 	struct MPT3SAS_DEVICE *sas_device_priv_data;
4600 	struct MPT3SAS_TARGET *sas_target_priv_data;
4601 	struct _raid_device *raid_device;
4602 	struct request *rq = scmd->request;
4603 	int class;
4604 	Mpi25SCSIIORequest_t *mpi_request;
4605 	struct _pcie_device *pcie_device = NULL;
4606 	u32 mpi_control;
4607 	u16 smid;
4608 	u16 handle;
4609 
4610 	if (ioc->logging_level & MPT_DEBUG_SCSI)
4611 		scsi_print_command(scmd);
4612 
4613 	sas_device_priv_data = scmd->device->hostdata;
4614 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4615 		scmd->result = DID_NO_CONNECT << 16;
4616 		scmd->scsi_done(scmd);
4617 		return 0;
4618 	}
4619 
4620 	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
4621 		scmd->result = DID_NO_CONNECT << 16;
4622 		scmd->scsi_done(scmd);
4623 		return 0;
4624 	}
4625 
4626 	sas_target_priv_data = sas_device_priv_data->sas_target;
4627 
4628 	/* invalid device handle */
4629 	handle = sas_target_priv_data->handle;
4630 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
4631 		scmd->result = DID_NO_CONNECT << 16;
4632 		scmd->scsi_done(scmd);
4633 		return 0;
4634 	}
4635 
4636 
4637 	if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
4638 		/* host recovery or link resets sent via IOCTLs */
4639 		return SCSI_MLQUEUE_HOST_BUSY;
4640 	} else if (sas_target_priv_data->deleted) {
4641 		/* device has been deleted */
4642 		scmd->result = DID_NO_CONNECT << 16;
4643 		scmd->scsi_done(scmd);
4644 		return 0;
4645 	} else if (sas_target_priv_data->tm_busy ||
4646 		   sas_device_priv_data->block) {
4647 		/* device busy with task management */
4648 		return SCSI_MLQUEUE_DEVICE_BUSY;
4649 	}
4650 
4651 	/*
4652 	 * Bug work around for firmware SATL handling.  The loop
4653 	 * is based on atomic operations and ensures consistency
4654 	 * since we're lockless at this point
4655 	 */
4656 	do {
4657 		if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
4658 			scmd->result = SAM_STAT_BUSY;
4659 			scmd->scsi_done(scmd);
4660 			return 0;
4661 		}
4662 	} while (_scsih_set_satl_pending(scmd, true));
4663 
4664 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4665 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
4666 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4667 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
4668 	else
4669 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
4670 
4671 	/* set tags */
4672 	mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
4673 	/* NCQ Prio supported, make sure control indicated high priority */
4674 	if (sas_device_priv_data->ncq_prio_enable) {
4675 		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4676 		if (class == IOPRIO_CLASS_RT)
4677 			mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
4678 	}
4679 	/* Make sure Device is not raid volume.
4680 	 * We do not expose raid functionality to upper layer for warpdrive.
4681 	 */
4682 	if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
4683 		&& !scsih_is_nvme(&scmd->device->sdev_gendev))
4684 		&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
4685 		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
4686 
4687 	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
4688 	if (!smid) {
4689 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4690 		_scsih_set_satl_pending(scmd, false);
4691 		goto out;
4692 	}
4693 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4694 	memset(mpi_request, 0, ioc->request_sz);
4695 	_scsih_setup_eedp(ioc, scmd, mpi_request);
4696 
4697 	if (scmd->cmd_len == 32)
4698 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
4699 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4700 	if (sas_device_priv_data->sas_target->flags &
4701 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
4702 		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
4703 	else
4704 		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4705 	mpi_request->DevHandle = cpu_to_le16(handle);
4706 	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
4707 	mpi_request->Control = cpu_to_le32(mpi_control);
4708 	mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
4709 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
4710 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
4711 	mpi_request->SenseBufferLowAddress =
4712 	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
4713 	mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
4714 	int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
4715 	    mpi_request->LUN);
4716 	memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4717 
4718 	if (mpi_request->DataLength) {
4719 		pcie_device = sas_target_priv_data->pcie_dev;
4720 		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
4721 			mpt3sas_base_free_smid(ioc, smid);
4722 			_scsih_set_satl_pending(scmd, false);
4723 			goto out;
4724 		}
4725 	} else
4726 		ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
4727 
4728 	raid_device = sas_target_priv_data->raid_device;
4729 	if (raid_device && raid_device->direct_io_enabled)
4730 		mpt3sas_setup_direct_io(ioc, scmd,
4731 			raid_device, mpi_request);
4732 
4733 	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
4734 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
4735 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
4736 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
4737 			ioc->put_smid_fast_path(ioc, smid, handle);
4738 		} else
4739 			ioc->put_smid_scsi_io(ioc, smid,
4740 			    le16_to_cpu(mpi_request->DevHandle));
4741 	} else
4742 		ioc->put_smid_default(ioc, smid);
4743 	return 0;
4744 
4745  out:
4746 	return SCSI_MLQUEUE_HOST_BUSY;
4747 }
4748 
4749 /**
4750  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
4751  * @sense_buffer: sense data returned by target
4752  * @data: normalized skey/asc/ascq
4753  */
4754 static void
4755 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
4756 {
4757 	if ((sense_buffer[0] & 0x7F) >= 0x72) {
4758 		/* descriptor format */
4759 		data->skey = sense_buffer[1] & 0x0F;
4760 		data->asc = sense_buffer[2];
4761 		data->ascq = sense_buffer[3];
4762 	} else {
4763 		/* fixed format */
4764 		data->skey = sense_buffer[2] & 0x0F;
4765 		data->asc = sense_buffer[12];
4766 		data->ascq = sense_buffer[13];
4767 	}
4768 }
4769 
4770 /**
4771  * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
4772  * @ioc: per adapter object
4773  * @scmd: pointer to scsi command object
4774  * @mpi_reply: reply mf payload returned from firmware
4775  * @smid: ?
4776  *
4777  * scsi_status - SCSI Status code returned from target device
4778  * scsi_state - state info associated with SCSI_IO determined by ioc
4779  * ioc_status - ioc supplied status info
4780  */
4781 static void
4782 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4783 	Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
4784 {
4785 	u32 response_info;
4786 	u8 *response_bytes;
4787 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
4788 	    MPI2_IOCSTATUS_MASK;
4789 	u8 scsi_state = mpi_reply->SCSIState;
4790 	u8 scsi_status = mpi_reply->SCSIStatus;
4791 	char *desc_ioc_state = NULL;
4792 	char *desc_scsi_status = NULL;
4793 	char *desc_scsi_state = ioc->tmp_string;
4794 	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
4795 	struct _sas_device *sas_device = NULL;
4796 	struct _pcie_device *pcie_device = NULL;
4797 	struct scsi_target *starget = scmd->device->sdev_target;
4798 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
4799 	char *device_str = NULL;
4800 
4801 	if (!priv_target)
4802 		return;
4803 	if (ioc->hide_ir_msg)
4804 		device_str = "WarpDrive";
4805 	else
4806 		device_str = "volume";
4807 
4808 	if (log_info == 0x31170000)
4809 		return;
4810 
4811 	switch (ioc_status) {
4812 	case MPI2_IOCSTATUS_SUCCESS:
4813 		desc_ioc_state = "success";
4814 		break;
4815 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
4816 		desc_ioc_state = "invalid function";
4817 		break;
4818 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
4819 		desc_ioc_state = "scsi recovered error";
4820 		break;
4821 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
4822 		desc_ioc_state = "scsi invalid dev handle";
4823 		break;
4824 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4825 		desc_ioc_state = "scsi device not there";
4826 		break;
4827 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
4828 		desc_ioc_state = "scsi data overrun";
4829 		break;
4830 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
4831 		desc_ioc_state = "scsi data underrun";
4832 		break;
4833 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
4834 		desc_ioc_state = "scsi io data error";
4835 		break;
4836 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4837 		desc_ioc_state = "scsi protocol error";
4838 		break;
4839 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
4840 		desc_ioc_state = "scsi task terminated";
4841 		break;
4842 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4843 		desc_ioc_state = "scsi residual mismatch";
4844 		break;
4845 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4846 		desc_ioc_state = "scsi task mgmt failed";
4847 		break;
4848 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
4849 		desc_ioc_state = "scsi ioc terminated";
4850 		break;
4851 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
4852 		desc_ioc_state = "scsi ext terminated";
4853 		break;
4854 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4855 		desc_ioc_state = "eedp guard error";
4856 		break;
4857 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4858 		desc_ioc_state = "eedp ref tag error";
4859 		break;
4860 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4861 		desc_ioc_state = "eedp app tag error";
4862 		break;
4863 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
4864 		desc_ioc_state = "insufficient power";
4865 		break;
4866 	default:
4867 		desc_ioc_state = "unknown";
4868 		break;
4869 	}
4870 
4871 	switch (scsi_status) {
4872 	case MPI2_SCSI_STATUS_GOOD:
4873 		desc_scsi_status = "good";
4874 		break;
4875 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
4876 		desc_scsi_status = "check condition";
4877 		break;
4878 	case MPI2_SCSI_STATUS_CONDITION_MET:
4879 		desc_scsi_status = "condition met";
4880 		break;
4881 	case MPI2_SCSI_STATUS_BUSY:
4882 		desc_scsi_status = "busy";
4883 		break;
4884 	case MPI2_SCSI_STATUS_INTERMEDIATE:
4885 		desc_scsi_status = "intermediate";
4886 		break;
4887 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
4888 		desc_scsi_status = "intermediate condmet";
4889 		break;
4890 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
4891 		desc_scsi_status = "reservation conflict";
4892 		break;
4893 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
4894 		desc_scsi_status = "command terminated";
4895 		break;
4896 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
4897 		desc_scsi_status = "task set full";
4898 		break;
4899 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
4900 		desc_scsi_status = "aca active";
4901 		break;
4902 	case MPI2_SCSI_STATUS_TASK_ABORTED:
4903 		desc_scsi_status = "task aborted";
4904 		break;
4905 	default:
4906 		desc_scsi_status = "unknown";
4907 		break;
4908 	}
4909 
4910 	desc_scsi_state[0] = '\0';
4911 	if (!scsi_state)
4912 		desc_scsi_state = " ";
4913 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
4914 		strcat(desc_scsi_state, "response info ");
4915 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
4916 		strcat(desc_scsi_state, "state terminated ");
4917 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
4918 		strcat(desc_scsi_state, "no status ");
4919 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
4920 		strcat(desc_scsi_state, "autosense failed ");
4921 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
4922 		strcat(desc_scsi_state, "autosense valid ");
4923 
4924 	scsi_print_command(scmd);
4925 
4926 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
4927 		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
4928 			 device_str, (u64)priv_target->sas_address);
4929 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
4930 		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
4931 		if (pcie_device) {
4932 			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
4933 				 (u64)pcie_device->wwid, pcie_device->port_num);
4934 			if (pcie_device->enclosure_handle != 0)
4935 				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
4936 					 (u64)pcie_device->enclosure_logical_id,
4937 					 pcie_device->slot);
4938 			if (pcie_device->connector_name[0])
4939 				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
4940 					 pcie_device->enclosure_level,
4941 					 pcie_device->connector_name);
4942 			pcie_device_put(pcie_device);
4943 		}
4944 	} else {
4945 		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
4946 		if (sas_device) {
4947 			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
4948 				 (u64)sas_device->sas_address, sas_device->phy);
4949 
4950 			_scsih_display_enclosure_chassis_info(ioc, sas_device,
4951 			    NULL, NULL);
4952 
4953 			sas_device_put(sas_device);
4954 		}
4955 	}
4956 
4957 	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
4958 		 le16_to_cpu(mpi_reply->DevHandle),
4959 		 desc_ioc_state, ioc_status, smid);
4960 	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
4961 		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
4962 	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
4963 		 le16_to_cpu(mpi_reply->TaskTag),
4964 		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
4965 	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
4966 		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
4967 
4968 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
4969 		struct sense_info data;
4970 		_scsih_normalize_sense(scmd->sense_buffer, &data);
4971 		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
4972 			 data.skey, data.asc, data.ascq,
4973 			 le32_to_cpu(mpi_reply->SenseCount));
4974 	}
4975 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
4976 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
4977 		response_bytes = (u8 *)&response_info;
4978 		_scsih_response_code(ioc, response_bytes[0]);
4979 	}
4980 }
4981 
4982 /**
4983  * _scsih_turn_on_pfa_led - illuminate PFA LED
4984  * @ioc: per adapter object
4985  * @handle: device handle
4986  * Context: process
4987  */
4988 static void
4989 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4990 {
4991 	Mpi2SepReply_t mpi_reply;
4992 	Mpi2SepRequest_t mpi_request;
4993 	struct _sas_device *sas_device;
4994 
4995 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
4996 	if (!sas_device)
4997 		return;
4998 
4999 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5000 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5001 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5002 	mpi_request.SlotStatus =
5003 	    cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5004 	mpi_request.DevHandle = cpu_to_le16(handle);
5005 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5006 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5007 	    &mpi_request)) != 0) {
5008 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5009 			__FILE__, __LINE__, __func__);
5010 		goto out;
5011 	}
5012 	sas_device->pfa_led_on = 1;
5013 
5014 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5015 		dewtprintk(ioc,
5016 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5017 				    le16_to_cpu(mpi_reply.IOCStatus),
5018 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5019 		goto out;
5020 	}
5021 out:
5022 	sas_device_put(sas_device);
5023 }
5024 
5025 /**
5026  * _scsih_turn_off_pfa_led - turn off Fault LED
5027  * @ioc: per adapter object
5028  * @sas_device: sas device whose PFA LED has to turned off
5029  * Context: process
5030  */
5031 static void
5032 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5033 	struct _sas_device *sas_device)
5034 {
5035 	Mpi2SepReply_t mpi_reply;
5036 	Mpi2SepRequest_t mpi_request;
5037 
5038 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5039 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5040 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5041 	mpi_request.SlotStatus = 0;
5042 	mpi_request.Slot = cpu_to_le16(sas_device->slot);
5043 	mpi_request.DevHandle = 0;
5044 	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5045 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5046 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5047 		&mpi_request)) != 0) {
5048 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5049 			__FILE__, __LINE__, __func__);
5050 		return;
5051 	}
5052 
5053 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5054 		dewtprintk(ioc,
5055 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5056 				    le16_to_cpu(mpi_reply.IOCStatus),
5057 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5058 		return;
5059 	}
5060 }
5061 
5062 /**
5063  * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5064  * @ioc: per adapter object
5065  * @handle: device handle
5066  * Context: interrupt.
5067  */
5068 static void
5069 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5070 {
5071 	struct fw_event_work *fw_event;
5072 
5073 	fw_event = alloc_fw_event_work(0);
5074 	if (!fw_event)
5075 		return;
5076 	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5077 	fw_event->device_handle = handle;
5078 	fw_event->ioc = ioc;
5079 	_scsih_fw_event_add(ioc, fw_event);
5080 	fw_event_work_put(fw_event);
5081 }
5082 
5083 /**
5084  * _scsih_smart_predicted_fault - process smart errors
5085  * @ioc: per adapter object
5086  * @handle: device handle
5087  * Context: interrupt.
5088  */
5089 static void
5090 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5091 {
5092 	struct scsi_target *starget;
5093 	struct MPT3SAS_TARGET *sas_target_priv_data;
5094 	Mpi2EventNotificationReply_t *event_reply;
5095 	Mpi2EventDataSasDeviceStatusChange_t *event_data;
5096 	struct _sas_device *sas_device;
5097 	ssize_t sz;
5098 	unsigned long flags;
5099 
5100 	/* only handle non-raid devices */
5101 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5102 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5103 	if (!sas_device)
5104 		goto out_unlock;
5105 
5106 	starget = sas_device->starget;
5107 	sas_target_priv_data = starget->hostdata;
5108 
5109 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5110 	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5111 		goto out_unlock;
5112 
5113 	_scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5114 
5115 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5116 
5117 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5118 		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5119 
5120 	/* insert into event log */
5121 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5122 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5123 	event_reply = kzalloc(sz, GFP_KERNEL);
5124 	if (!event_reply) {
5125 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5126 			__FILE__, __LINE__, __func__);
5127 		goto out;
5128 	}
5129 
5130 	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5131 	event_reply->Event =
5132 	    cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5133 	event_reply->MsgLength = sz/4;
5134 	event_reply->EventDataLength =
5135 	    cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5136 	event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5137 	    event_reply->EventData;
5138 	event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5139 	event_data->ASC = 0x5D;
5140 	event_data->DevHandle = cpu_to_le16(handle);
5141 	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5142 	mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5143 	kfree(event_reply);
5144 out:
5145 	if (sas_device)
5146 		sas_device_put(sas_device);
5147 	return;
5148 
5149 out_unlock:
5150 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5151 	goto out;
5152 }
5153 
5154 /**
5155  * _scsih_io_done - scsi request callback
5156  * @ioc: per adapter object
5157  * @smid: system request message index
5158  * @msix_index: MSIX table index supplied by the OS
5159  * @reply: reply message frame(lower 32bit addr)
5160  *
5161  * Callback handler when using _scsih_qcmd.
5162  *
5163  * Return: 1 meaning mf should be freed from _base_interrupt
5164  *         0 means the mf is freed from this function.
5165  */
5166 static u8
5167 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5168 {
5169 	Mpi25SCSIIORequest_t *mpi_request;
5170 	Mpi2SCSIIOReply_t *mpi_reply;
5171 	struct scsi_cmnd *scmd;
5172 	struct scsiio_tracker *st;
5173 	u16 ioc_status;
5174 	u32 xfer_cnt;
5175 	u8 scsi_state;
5176 	u8 scsi_status;
5177 	u32 log_info;
5178 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5179 	u32 response_code = 0;
5180 
5181 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5182 
5183 	scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5184 	if (scmd == NULL)
5185 		return 1;
5186 
5187 	_scsih_set_satl_pending(scmd, false);
5188 
5189 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5190 
5191 	if (mpi_reply == NULL) {
5192 		scmd->result = DID_OK << 16;
5193 		goto out;
5194 	}
5195 
5196 	sas_device_priv_data = scmd->device->hostdata;
5197 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5198 	     sas_device_priv_data->sas_target->deleted) {
5199 		scmd->result = DID_NO_CONNECT << 16;
5200 		goto out;
5201 	}
5202 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5203 
5204 	/*
5205 	 * WARPDRIVE: If direct_io is set then it is directIO,
5206 	 * the failed direct I/O should be redirected to volume
5207 	 */
5208 	st = scsi_cmd_priv(scmd);
5209 	if (st->direct_io &&
5210 	     ((ioc_status & MPI2_IOCSTATUS_MASK)
5211 	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5212 		st->direct_io = 0;
5213 		st->scmd = scmd;
5214 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5215 		mpi_request->DevHandle =
5216 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
5217 		ioc->put_smid_scsi_io(ioc, smid,
5218 		    sas_device_priv_data->sas_target->handle);
5219 		return 0;
5220 	}
5221 	/* turning off TLR */
5222 	scsi_state = mpi_reply->SCSIState;
5223 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5224 		response_code =
5225 		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5226 	if (!sas_device_priv_data->tlr_snoop_check) {
5227 		sas_device_priv_data->tlr_snoop_check++;
5228 		if ((!ioc->is_warpdrive &&
5229 		    !scsih_is_raid(&scmd->device->sdev_gendev) &&
5230 		    !scsih_is_nvme(&scmd->device->sdev_gendev))
5231 		    && sas_is_tlr_enabled(scmd->device) &&
5232 		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5233 			sas_disable_tlr(scmd->device);
5234 			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5235 		}
5236 	}
5237 
5238 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5239 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5240 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5241 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
5242 	else
5243 		log_info = 0;
5244 	ioc_status &= MPI2_IOCSTATUS_MASK;
5245 	scsi_status = mpi_reply->SCSIStatus;
5246 
5247 	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5248 	    (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5249 	     scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5250 	     scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5251 		ioc_status = MPI2_IOCSTATUS_SUCCESS;
5252 	}
5253 
5254 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5255 		struct sense_info data;
5256 		const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5257 		    smid);
5258 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5259 		    le32_to_cpu(mpi_reply->SenseCount));
5260 		memcpy(scmd->sense_buffer, sense_data, sz);
5261 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5262 		/* failure prediction threshold exceeded */
5263 		if (data.asc == 0x5D)
5264 			_scsih_smart_predicted_fault(ioc,
5265 			    le16_to_cpu(mpi_reply->DevHandle));
5266 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5267 
5268 		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5269 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5270 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5271 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5272 			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5273 	}
5274 	switch (ioc_status) {
5275 	case MPI2_IOCSTATUS_BUSY:
5276 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5277 		scmd->result = SAM_STAT_BUSY;
5278 		break;
5279 
5280 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5281 		scmd->result = DID_NO_CONNECT << 16;
5282 		break;
5283 
5284 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5285 		if (sas_device_priv_data->block) {
5286 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5287 			goto out;
5288 		}
5289 		if (log_info == 0x31110630) {
5290 			if (scmd->retries > 2) {
5291 				scmd->result = DID_NO_CONNECT << 16;
5292 				scsi_device_set_state(scmd->device,
5293 				    SDEV_OFFLINE);
5294 			} else {
5295 				scmd->result = DID_SOFT_ERROR << 16;
5296 				scmd->device->expecting_cc_ua = 1;
5297 			}
5298 			break;
5299 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5300 			scmd->result = DID_RESET << 16;
5301 			break;
5302 		} else if ((scmd->device->channel == RAID_CHANNEL) &&
5303 		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5304 		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5305 			scmd->result = DID_RESET << 16;
5306 			break;
5307 		}
5308 		scmd->result = DID_SOFT_ERROR << 16;
5309 		break;
5310 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5311 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5312 		scmd->result = DID_RESET << 16;
5313 		break;
5314 
5315 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5316 		if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5317 			scmd->result = DID_SOFT_ERROR << 16;
5318 		else
5319 			scmd->result = (DID_OK << 16) | scsi_status;
5320 		break;
5321 
5322 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5323 		scmd->result = (DID_OK << 16) | scsi_status;
5324 
5325 		if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5326 			break;
5327 
5328 		if (xfer_cnt < scmd->underflow) {
5329 			if (scsi_status == SAM_STAT_BUSY)
5330 				scmd->result = SAM_STAT_BUSY;
5331 			else
5332 				scmd->result = DID_SOFT_ERROR << 16;
5333 		} else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5334 		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
5335 			scmd->result = DID_SOFT_ERROR << 16;
5336 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5337 			scmd->result = DID_RESET << 16;
5338 		else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5339 			mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5340 			mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5341 			scmd->result = (DRIVER_SENSE << 24) |
5342 			    SAM_STAT_CHECK_CONDITION;
5343 			scmd->sense_buffer[0] = 0x70;
5344 			scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5345 			scmd->sense_buffer[12] = 0x20;
5346 			scmd->sense_buffer[13] = 0;
5347 		}
5348 		break;
5349 
5350 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5351 		scsi_set_resid(scmd, 0);
5352 		/* fall through */
5353 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5354 	case MPI2_IOCSTATUS_SUCCESS:
5355 		scmd->result = (DID_OK << 16) | scsi_status;
5356 		if (response_code ==
5357 		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5358 		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5359 		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5360 			scmd->result = DID_SOFT_ERROR << 16;
5361 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5362 			scmd->result = DID_RESET << 16;
5363 		break;
5364 
5365 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5366 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5367 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5368 		_scsih_eedp_error_handling(scmd, ioc_status);
5369 		break;
5370 
5371 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5372 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5373 	case MPI2_IOCSTATUS_INVALID_SGL:
5374 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
5375 	case MPI2_IOCSTATUS_INVALID_FIELD:
5376 	case MPI2_IOCSTATUS_INVALID_STATE:
5377 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5378 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5379 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5380 	default:
5381 		scmd->result = DID_SOFT_ERROR << 16;
5382 		break;
5383 
5384 	}
5385 
5386 	if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5387 		_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5388 
5389  out:
5390 
5391 	scsi_dma_unmap(scmd);
5392 	mpt3sas_base_free_smid(ioc, smid);
5393 	scmd->scsi_done(scmd);
5394 	return 0;
5395 }
5396 
5397 /**
5398  * _scsih_sas_host_refresh - refreshing sas host object contents
5399  * @ioc: per adapter object
5400  * Context: user
5401  *
5402  * During port enable, fw will send topology events for every device. Its
5403  * possible that the handles may change from the previous setting, so this
5404  * code keeping handles updating if changed.
5405  */
5406 static void
5407 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
5408 {
5409 	u16 sz;
5410 	u16 ioc_status;
5411 	int i;
5412 	Mpi2ConfigReply_t mpi_reply;
5413 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5414 	u16 attached_handle;
5415 	u8 link_rate;
5416 
5417 	dtmprintk(ioc,
5418 		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
5419 			   (u64)ioc->sas_hba.sas_address));
5420 
5421 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
5422 	    * sizeof(Mpi2SasIOUnit0PhyData_t));
5423 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5424 	if (!sas_iounit_pg0) {
5425 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5426 			__FILE__, __LINE__, __func__);
5427 		return;
5428 	}
5429 
5430 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5431 	    sas_iounit_pg0, sz)) != 0)
5432 		goto out;
5433 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5434 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5435 		goto out;
5436 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5437 		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
5438 		if (i == 0)
5439 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5440 			    PhyData[0].ControllerDevHandle);
5441 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5442 		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
5443 		    AttachedDevHandle);
5444 		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
5445 			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
5446 		mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
5447 		    attached_handle, i, link_rate);
5448 	}
5449  out:
5450 	kfree(sas_iounit_pg0);
5451 }
5452 
5453 /**
5454  * _scsih_sas_host_add - create sas host object
5455  * @ioc: per adapter object
5456  *
5457  * Creating host side data object, stored in ioc->sas_hba
5458  */
5459 static void
5460 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5461 {
5462 	int i;
5463 	Mpi2ConfigReply_t mpi_reply;
5464 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5465 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5466 	Mpi2SasPhyPage0_t phy_pg0;
5467 	Mpi2SasDevicePage0_t sas_device_pg0;
5468 	Mpi2SasEnclosurePage0_t enclosure_pg0;
5469 	u16 ioc_status;
5470 	u16 sz;
5471 	u8 device_missing_delay;
5472 	u8 num_phys;
5473 
5474 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
5475 	if (!num_phys) {
5476 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5477 			__FILE__, __LINE__, __func__);
5478 		return;
5479 	}
5480 	ioc->sas_hba.phy = kcalloc(num_phys,
5481 	    sizeof(struct _sas_phy), GFP_KERNEL);
5482 	if (!ioc->sas_hba.phy) {
5483 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5484 			__FILE__, __LINE__, __func__);
5485 		goto out;
5486 	}
5487 	ioc->sas_hba.num_phys = num_phys;
5488 
5489 	/* sas_iounit page 0 */
5490 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
5491 	    sizeof(Mpi2SasIOUnit0PhyData_t));
5492 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5493 	if (!sas_iounit_pg0) {
5494 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5495 			__FILE__, __LINE__, __func__);
5496 		return;
5497 	}
5498 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5499 	    sas_iounit_pg0, sz))) {
5500 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5501 			__FILE__, __LINE__, __func__);
5502 		goto out;
5503 	}
5504 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5505 	    MPI2_IOCSTATUS_MASK;
5506 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5507 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5508 			__FILE__, __LINE__, __func__);
5509 		goto out;
5510 	}
5511 
5512 	/* sas_iounit page 1 */
5513 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
5514 	    sizeof(Mpi2SasIOUnit1PhyData_t));
5515 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
5516 	if (!sas_iounit_pg1) {
5517 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5518 			__FILE__, __LINE__, __func__);
5519 		goto out;
5520 	}
5521 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5522 	    sas_iounit_pg1, sz))) {
5523 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5524 			__FILE__, __LINE__, __func__);
5525 		goto out;
5526 	}
5527 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5528 	    MPI2_IOCSTATUS_MASK;
5529 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5530 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5531 			__FILE__, __LINE__, __func__);
5532 		goto out;
5533 	}
5534 
5535 	ioc->io_missing_delay =
5536 	    sas_iounit_pg1->IODeviceMissingDelay;
5537 	device_missing_delay =
5538 	    sas_iounit_pg1->ReportDeviceMissingDelay;
5539 	if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
5540 		ioc->device_missing_delay = (device_missing_delay &
5541 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
5542 	else
5543 		ioc->device_missing_delay = device_missing_delay &
5544 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
5545 
5546 	ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
5547 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5548 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5549 		    i))) {
5550 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5551 				__FILE__, __LINE__, __func__);
5552 			goto out;
5553 		}
5554 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5555 		    MPI2_IOCSTATUS_MASK;
5556 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5557 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5558 				__FILE__, __LINE__, __func__);
5559 			goto out;
5560 		}
5561 
5562 		if (i == 0)
5563 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5564 			    PhyData[0].ControllerDevHandle);
5565 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5566 		ioc->sas_hba.phy[i].phy_id = i;
5567 		mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
5568 		    phy_pg0, ioc->sas_hba.parent_dev);
5569 	}
5570 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5571 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
5572 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5573 			__FILE__, __LINE__, __func__);
5574 		goto out;
5575 	}
5576 	ioc->sas_hba.enclosure_handle =
5577 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
5578 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5579 	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5580 		 ioc->sas_hba.handle,
5581 		 (u64)ioc->sas_hba.sas_address,
5582 		 ioc->sas_hba.num_phys);
5583 
5584 	if (ioc->sas_hba.enclosure_handle) {
5585 		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
5586 		    &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
5587 		   ioc->sas_hba.enclosure_handle)))
5588 			ioc->sas_hba.enclosure_logical_id =
5589 			    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5590 	}
5591 
5592  out:
5593 	kfree(sas_iounit_pg1);
5594 	kfree(sas_iounit_pg0);
5595 }
5596 
5597 /**
5598  * _scsih_expander_add -  creating expander object
5599  * @ioc: per adapter object
5600  * @handle: expander handle
5601  *
5602  * Creating expander object, stored in ioc->sas_expander_list.
5603  *
5604  * Return: 0 for success, else error.
5605  */
5606 static int
5607 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5608 {
5609 	struct _sas_node *sas_expander;
5610 	struct _enclosure_node *enclosure_dev;
5611 	Mpi2ConfigReply_t mpi_reply;
5612 	Mpi2ExpanderPage0_t expander_pg0;
5613 	Mpi2ExpanderPage1_t expander_pg1;
5614 	u32 ioc_status;
5615 	u16 parent_handle;
5616 	u64 sas_address, sas_address_parent = 0;
5617 	int i;
5618 	unsigned long flags;
5619 	struct _sas_port *mpt3sas_port = NULL;
5620 
5621 	int rc = 0;
5622 
5623 	if (!handle)
5624 		return -1;
5625 
5626 	if (ioc->shost_recovery || ioc->pci_error_recovery)
5627 		return -1;
5628 
5629 	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
5630 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
5631 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5632 			__FILE__, __LINE__, __func__);
5633 		return -1;
5634 	}
5635 
5636 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5637 	    MPI2_IOCSTATUS_MASK;
5638 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5639 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5640 			__FILE__, __LINE__, __func__);
5641 		return -1;
5642 	}
5643 
5644 	/* handle out of order topology events */
5645 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
5646 	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
5647 	    != 0) {
5648 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5649 			__FILE__, __LINE__, __func__);
5650 		return -1;
5651 	}
5652 	if (sas_address_parent != ioc->sas_hba.sas_address) {
5653 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
5654 		sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5655 		    sas_address_parent);
5656 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5657 		if (!sas_expander) {
5658 			rc = _scsih_expander_add(ioc, parent_handle);
5659 			if (rc != 0)
5660 				return rc;
5661 		}
5662 	}
5663 
5664 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
5665 	sas_address = le64_to_cpu(expander_pg0.SASAddress);
5666 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5667 	    sas_address);
5668 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5669 
5670 	if (sas_expander)
5671 		return 0;
5672 
5673 	sas_expander = kzalloc(sizeof(struct _sas_node),
5674 	    GFP_KERNEL);
5675 	if (!sas_expander) {
5676 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5677 			__FILE__, __LINE__, __func__);
5678 		return -1;
5679 	}
5680 
5681 	sas_expander->handle = handle;
5682 	sas_expander->num_phys = expander_pg0.NumPhys;
5683 	sas_expander->sas_address_parent = sas_address_parent;
5684 	sas_expander->sas_address = sas_address;
5685 
5686 	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5687 		 handle, parent_handle,
5688 		 (u64)sas_expander->sas_address, sas_expander->num_phys);
5689 
5690 	if (!sas_expander->num_phys)
5691 		goto out_fail;
5692 	sas_expander->phy = kcalloc(sas_expander->num_phys,
5693 	    sizeof(struct _sas_phy), GFP_KERNEL);
5694 	if (!sas_expander->phy) {
5695 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5696 			__FILE__, __LINE__, __func__);
5697 		rc = -1;
5698 		goto out_fail;
5699 	}
5700 
5701 	INIT_LIST_HEAD(&sas_expander->sas_port_list);
5702 	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
5703 	    sas_address_parent);
5704 	if (!mpt3sas_port) {
5705 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5706 			__FILE__, __LINE__, __func__);
5707 		rc = -1;
5708 		goto out_fail;
5709 	}
5710 	sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
5711 
5712 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
5713 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
5714 		    &expander_pg1, i, handle))) {
5715 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5716 				__FILE__, __LINE__, __func__);
5717 			rc = -1;
5718 			goto out_fail;
5719 		}
5720 		sas_expander->phy[i].handle = handle;
5721 		sas_expander->phy[i].phy_id = i;
5722 
5723 		if ((mpt3sas_transport_add_expander_phy(ioc,
5724 		    &sas_expander->phy[i], expander_pg1,
5725 		    sas_expander->parent_dev))) {
5726 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5727 				__FILE__, __LINE__, __func__);
5728 			rc = -1;
5729 			goto out_fail;
5730 		}
5731 	}
5732 
5733 	if (sas_expander->enclosure_handle) {
5734 		enclosure_dev =
5735 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
5736 						sas_expander->enclosure_handle);
5737 		if (enclosure_dev)
5738 			sas_expander->enclosure_logical_id =
5739 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5740 	}
5741 
5742 	_scsih_expander_node_add(ioc, sas_expander);
5743 	return 0;
5744 
5745  out_fail:
5746 
5747 	if (mpt3sas_port)
5748 		mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
5749 		    sas_address_parent);
5750 	kfree(sas_expander);
5751 	return rc;
5752 }
5753 
5754 /**
5755  * mpt3sas_expander_remove - removing expander object
5756  * @ioc: per adapter object
5757  * @sas_address: expander sas_address
5758  */
5759 void
5760 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
5761 {
5762 	struct _sas_node *sas_expander;
5763 	unsigned long flags;
5764 
5765 	if (ioc->shost_recovery)
5766 		return;
5767 
5768 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
5769 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5770 	    sas_address);
5771 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5772 	if (sas_expander)
5773 		_scsih_expander_node_remove(ioc, sas_expander);
5774 }
5775 
5776 /**
5777  * _scsih_done -  internal SCSI_IO callback handler.
5778  * @ioc: per adapter object
5779  * @smid: system request message index
5780  * @msix_index: MSIX table index supplied by the OS
5781  * @reply: reply message frame(lower 32bit addr)
5782  *
5783  * Callback handler when sending internal generated SCSI_IO.
5784  * The callback index passed is `ioc->scsih_cb_idx`
5785  *
5786  * Return: 1 meaning mf should be freed from _base_interrupt
5787  *         0 means the mf is freed from this function.
5788  */
5789 static u8
5790 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5791 {
5792 	MPI2DefaultReply_t *mpi_reply;
5793 
5794 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
5795 	if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
5796 		return 1;
5797 	if (ioc->scsih_cmds.smid != smid)
5798 		return 1;
5799 	ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
5800 	if (mpi_reply) {
5801 		memcpy(ioc->scsih_cmds.reply, mpi_reply,
5802 		    mpi_reply->MsgLength*4);
5803 		ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
5804 	}
5805 	ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
5806 	complete(&ioc->scsih_cmds.done);
5807 	return 1;
5808 }
5809 
5810 
5811 
5812 
5813 #define MPT3_MAX_LUNS (255)
5814 
5815 
5816 /**
5817  * _scsih_check_access_status - check access flags
5818  * @ioc: per adapter object
5819  * @sas_address: sas address
5820  * @handle: sas device handle
5821  * @access_status: errors returned during discovery of the device
5822  *
5823  * Return: 0 for success, else failure
5824  */
5825 static u8
5826 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
5827 	u16 handle, u8 access_status)
5828 {
5829 	u8 rc = 1;
5830 	char *desc = NULL;
5831 
5832 	switch (access_status) {
5833 	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
5834 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
5835 		rc = 0;
5836 		break;
5837 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
5838 		desc = "sata capability failed";
5839 		break;
5840 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
5841 		desc = "sata affiliation conflict";
5842 		break;
5843 	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
5844 		desc = "route not addressable";
5845 		break;
5846 	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
5847 		desc = "smp error not addressable";
5848 		break;
5849 	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
5850 		desc = "device blocked";
5851 		break;
5852 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
5853 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
5854 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
5855 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
5856 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
5857 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
5858 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
5859 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
5860 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
5861 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
5862 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
5863 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
5864 		desc = "sata initialization failed";
5865 		break;
5866 	default:
5867 		desc = "unknown";
5868 		break;
5869 	}
5870 
5871 	if (!rc)
5872 		return 0;
5873 
5874 	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
5875 		desc, (u64)sas_address, handle);
5876 	return rc;
5877 }
5878 
5879 /**
5880  * _scsih_check_device - checking device responsiveness
5881  * @ioc: per adapter object
5882  * @parent_sas_address: sas address of parent expander or sas host
5883  * @handle: attached device handle
5884  * @phy_number: phy number
5885  * @link_rate: new link rate
5886  */
5887 static void
5888 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5889 	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
5890 {
5891 	Mpi2ConfigReply_t mpi_reply;
5892 	Mpi2SasDevicePage0_t sas_device_pg0;
5893 	struct _sas_device *sas_device;
5894 	struct _enclosure_node *enclosure_dev = NULL;
5895 	u32 ioc_status;
5896 	unsigned long flags;
5897 	u64 sas_address;
5898 	struct scsi_target *starget;
5899 	struct MPT3SAS_TARGET *sas_target_priv_data;
5900 	u32 device_info;
5901 
5902 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5903 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
5904 		return;
5905 
5906 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5907 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5908 		return;
5909 
5910 	/* wide port handling ~ we need only handle device once for the phy that
5911 	 * is matched in sas device page zero
5912 	 */
5913 	if (phy_number != sas_device_pg0.PhyNum)
5914 		return;
5915 
5916 	/* check if this is end device */
5917 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
5918 	if (!(_scsih_is_end_device(device_info)))
5919 		return;
5920 
5921 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5922 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5923 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
5924 	    sas_address);
5925 
5926 	if (!sas_device)
5927 		goto out_unlock;
5928 
5929 	if (unlikely(sas_device->handle != handle)) {
5930 		starget = sas_device->starget;
5931 		sas_target_priv_data = starget->hostdata;
5932 		starget_printk(KERN_INFO, starget,
5933 			"handle changed from(0x%04x) to (0x%04x)!!!\n",
5934 			sas_device->handle, handle);
5935 		sas_target_priv_data->handle = handle;
5936 		sas_device->handle = handle;
5937 		if (le16_to_cpu(sas_device_pg0.Flags) &
5938 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
5939 			sas_device->enclosure_level =
5940 				sas_device_pg0.EnclosureLevel;
5941 			memcpy(sas_device->connector_name,
5942 				sas_device_pg0.ConnectorName, 4);
5943 			sas_device->connector_name[4] = '\0';
5944 		} else {
5945 			sas_device->enclosure_level = 0;
5946 			sas_device->connector_name[0] = '\0';
5947 		}
5948 
5949 		sas_device->enclosure_handle =
5950 				le16_to_cpu(sas_device_pg0.EnclosureHandle);
5951 		sas_device->is_chassis_slot_valid = 0;
5952 		enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
5953 						sas_device->enclosure_handle);
5954 		if (enclosure_dev) {
5955 			sas_device->enclosure_logical_id =
5956 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5957 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
5958 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
5959 				sas_device->is_chassis_slot_valid = 1;
5960 				sas_device->chassis_slot =
5961 					enclosure_dev->pg0.ChassisSlot;
5962 			}
5963 		}
5964 	}
5965 
5966 	/* check if device is present */
5967 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
5968 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
5969 		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
5970 			handle);
5971 		goto out_unlock;
5972 	}
5973 
5974 	/* check if there were any issues with discovery */
5975 	if (_scsih_check_access_status(ioc, sas_address, handle,
5976 	    sas_device_pg0.AccessStatus))
5977 		goto out_unlock;
5978 
5979 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5980 	_scsih_ublock_io_device(ioc, sas_address);
5981 
5982 	if (sas_device)
5983 		sas_device_put(sas_device);
5984 	return;
5985 
5986 out_unlock:
5987 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5988 	if (sas_device)
5989 		sas_device_put(sas_device);
5990 }
5991 
5992 /**
5993  * _scsih_add_device -  creating sas device object
5994  * @ioc: per adapter object
5995  * @handle: sas device handle
5996  * @phy_num: phy number end device attached to
5997  * @is_pd: is this hidden raid component
5998  *
5999  * Creating end device object, stored in ioc->sas_device_list.
6000  *
6001  * Return: 0 for success, non-zero for failure.
6002  */
6003 static int
6004 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6005 	u8 is_pd)
6006 {
6007 	Mpi2ConfigReply_t mpi_reply;
6008 	Mpi2SasDevicePage0_t sas_device_pg0;
6009 	struct _sas_device *sas_device;
6010 	struct _enclosure_node *enclosure_dev = NULL;
6011 	u32 ioc_status;
6012 	u64 sas_address;
6013 	u32 device_info;
6014 
6015 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6016 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
6017 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6018 			__FILE__, __LINE__, __func__);
6019 		return -1;
6020 	}
6021 
6022 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6023 	    MPI2_IOCSTATUS_MASK;
6024 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6025 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6026 			__FILE__, __LINE__, __func__);
6027 		return -1;
6028 	}
6029 
6030 	/* check if this is end device */
6031 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6032 	if (!(_scsih_is_end_device(device_info)))
6033 		return -1;
6034 	set_bit(handle, ioc->pend_os_device_add);
6035 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6036 
6037 	/* check if device is present */
6038 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
6039 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6040 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6041 			handle);
6042 		return -1;
6043 	}
6044 
6045 	/* check if there were any issues with discovery */
6046 	if (_scsih_check_access_status(ioc, sas_address, handle,
6047 	    sas_device_pg0.AccessStatus))
6048 		return -1;
6049 
6050 	sas_device = mpt3sas_get_sdev_by_addr(ioc,
6051 					sas_address);
6052 	if (sas_device) {
6053 		clear_bit(handle, ioc->pend_os_device_add);
6054 		sas_device_put(sas_device);
6055 		return -1;
6056 	}
6057 
6058 	if (sas_device_pg0.EnclosureHandle) {
6059 		enclosure_dev =
6060 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6061 			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
6062 		if (enclosure_dev == NULL)
6063 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
6064 				 sas_device_pg0.EnclosureHandle);
6065 	}
6066 
6067 	sas_device = kzalloc(sizeof(struct _sas_device),
6068 	    GFP_KERNEL);
6069 	if (!sas_device) {
6070 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6071 			__FILE__, __LINE__, __func__);
6072 		return 0;
6073 	}
6074 
6075 	kref_init(&sas_device->refcount);
6076 	sas_device->handle = handle;
6077 	if (_scsih_get_sas_address(ioc,
6078 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
6079 	    &sas_device->sas_address_parent) != 0)
6080 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6081 			__FILE__, __LINE__, __func__);
6082 	sas_device->enclosure_handle =
6083 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
6084 	if (sas_device->enclosure_handle != 0)
6085 		sas_device->slot =
6086 		    le16_to_cpu(sas_device_pg0.Slot);
6087 	sas_device->device_info = device_info;
6088 	sas_device->sas_address = sas_address;
6089 	sas_device->phy = sas_device_pg0.PhyNum;
6090 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
6091 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6092 
6093 	if (le16_to_cpu(sas_device_pg0.Flags)
6094 		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6095 		sas_device->enclosure_level =
6096 			sas_device_pg0.EnclosureLevel;
6097 		memcpy(sas_device->connector_name,
6098 			sas_device_pg0.ConnectorName, 4);
6099 		sas_device->connector_name[4] = '\0';
6100 	} else {
6101 		sas_device->enclosure_level = 0;
6102 		sas_device->connector_name[0] = '\0';
6103 	}
6104 	/* get enclosure_logical_id & chassis_slot*/
6105 	sas_device->is_chassis_slot_valid = 0;
6106 	if (enclosure_dev) {
6107 		sas_device->enclosure_logical_id =
6108 		    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6109 		if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6110 		    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6111 			sas_device->is_chassis_slot_valid = 1;
6112 			sas_device->chassis_slot =
6113 					enclosure_dev->pg0.ChassisSlot;
6114 		}
6115 	}
6116 
6117 	/* get device name */
6118 	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
6119 
6120 	if (ioc->wait_for_discovery_to_complete)
6121 		_scsih_sas_device_init_add(ioc, sas_device);
6122 	else
6123 		_scsih_sas_device_add(ioc, sas_device);
6124 
6125 	sas_device_put(sas_device);
6126 	return 0;
6127 }
6128 
6129 /**
6130  * _scsih_remove_device -  removing sas device object
6131  * @ioc: per adapter object
6132  * @sas_device: the sas_device object
6133  */
6134 static void
6135 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6136 	struct _sas_device *sas_device)
6137 {
6138 	struct MPT3SAS_TARGET *sas_target_priv_data;
6139 
6140 	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
6141 	     (sas_device->pfa_led_on)) {
6142 		_scsih_turn_off_pfa_led(ioc, sas_device);
6143 		sas_device->pfa_led_on = 0;
6144 	}
6145 
6146 	dewtprintk(ioc,
6147 		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
6148 			    __func__,
6149 			    sas_device->handle, (u64)sas_device->sas_address));
6150 
6151 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6152 	    NULL, NULL));
6153 
6154 	if (sas_device->starget && sas_device->starget->hostdata) {
6155 		sas_target_priv_data = sas_device->starget->hostdata;
6156 		sas_target_priv_data->deleted = 1;
6157 		_scsih_ublock_io_device(ioc, sas_device->sas_address);
6158 		sas_target_priv_data->handle =
6159 		     MPT3SAS_INVALID_DEVICE_HANDLE;
6160 	}
6161 
6162 	if (!ioc->hide_drives)
6163 		mpt3sas_transport_port_remove(ioc,
6164 		    sas_device->sas_address,
6165 		    sas_device->sas_address_parent);
6166 
6167 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
6168 		 sas_device->handle, (u64)sas_device->sas_address);
6169 
6170 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
6171 
6172 	dewtprintk(ioc,
6173 		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
6174 			    __func__,
6175 			    sas_device->handle, (u64)sas_device->sas_address));
6176 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6177 	    NULL, NULL));
6178 }
6179 
6180 /**
6181  * _scsih_sas_topology_change_event_debug - debug for topology event
6182  * @ioc: per adapter object
6183  * @event_data: event data payload
6184  * Context: user.
6185  */
6186 static void
6187 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6188 	Mpi2EventDataSasTopologyChangeList_t *event_data)
6189 {
6190 	int i;
6191 	u16 handle;
6192 	u16 reason_code;
6193 	u8 phy_number;
6194 	char *status_str = NULL;
6195 	u8 link_rate, prev_link_rate;
6196 
6197 	switch (event_data->ExpStatus) {
6198 	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6199 		status_str = "add";
6200 		break;
6201 	case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6202 		status_str = "remove";
6203 		break;
6204 	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6205 	case 0:
6206 		status_str =  "responding";
6207 		break;
6208 	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6209 		status_str = "remove delay";
6210 		break;
6211 	default:
6212 		status_str = "unknown status";
6213 		break;
6214 	}
6215 	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
6216 	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
6217 	    "start_phy(%02d), count(%d)\n",
6218 	    le16_to_cpu(event_data->ExpanderDevHandle),
6219 	    le16_to_cpu(event_data->EnclosureHandle),
6220 	    event_data->StartPhyNum, event_data->NumEntries);
6221 	for (i = 0; i < event_data->NumEntries; i++) {
6222 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6223 		if (!handle)
6224 			continue;
6225 		phy_number = event_data->StartPhyNum + i;
6226 		reason_code = event_data->PHY[i].PhyStatus &
6227 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6228 		switch (reason_code) {
6229 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6230 			status_str = "target add";
6231 			break;
6232 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6233 			status_str = "target remove";
6234 			break;
6235 		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6236 			status_str = "delay target remove";
6237 			break;
6238 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6239 			status_str = "link rate change";
6240 			break;
6241 		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6242 			status_str = "target responding";
6243 			break;
6244 		default:
6245 			status_str = "unknown";
6246 			break;
6247 		}
6248 		link_rate = event_data->PHY[i].LinkRate >> 4;
6249 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6250 		pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
6251 		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
6252 		    handle, status_str, link_rate, prev_link_rate);
6253 
6254 	}
6255 }
6256 
6257 /**
6258  * _scsih_sas_topology_change_event - handle topology changes
6259  * @ioc: per adapter object
6260  * @fw_event: The fw_event_work object
6261  * Context: user.
6262  *
6263  */
6264 static int
6265 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6266 	struct fw_event_work *fw_event)
6267 {
6268 	int i;
6269 	u16 parent_handle, handle;
6270 	u16 reason_code;
6271 	u8 phy_number, max_phys;
6272 	struct _sas_node *sas_expander;
6273 	u64 sas_address;
6274 	unsigned long flags;
6275 	u8 link_rate, prev_link_rate;
6276 	Mpi2EventDataSasTopologyChangeList_t *event_data =
6277 		(Mpi2EventDataSasTopologyChangeList_t *)
6278 		fw_event->event_data;
6279 
6280 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6281 		_scsih_sas_topology_change_event_debug(ioc, event_data);
6282 
6283 	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
6284 		return 0;
6285 
6286 	if (!ioc->sas_hba.num_phys)
6287 		_scsih_sas_host_add(ioc);
6288 	else
6289 		_scsih_sas_host_refresh(ioc);
6290 
6291 	if (fw_event->ignore) {
6292 		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
6293 		return 0;
6294 	}
6295 
6296 	parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
6297 
6298 	/* handle expander add */
6299 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
6300 		if (_scsih_expander_add(ioc, parent_handle) != 0)
6301 			return 0;
6302 
6303 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6304 	sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
6305 	    parent_handle);
6306 	if (sas_expander) {
6307 		sas_address = sas_expander->sas_address;
6308 		max_phys = sas_expander->num_phys;
6309 	} else if (parent_handle < ioc->sas_hba.num_phys) {
6310 		sas_address = ioc->sas_hba.sas_address;
6311 		max_phys = ioc->sas_hba.num_phys;
6312 	} else {
6313 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6314 		return 0;
6315 	}
6316 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6317 
6318 	/* handle siblings events */
6319 	for (i = 0; i < event_data->NumEntries; i++) {
6320 		if (fw_event->ignore) {
6321 			dewtprintk(ioc,
6322 				   ioc_info(ioc, "ignoring expander event\n"));
6323 			return 0;
6324 		}
6325 		if (ioc->remove_host || ioc->pci_error_recovery)
6326 			return 0;
6327 		phy_number = event_data->StartPhyNum + i;
6328 		if (phy_number >= max_phys)
6329 			continue;
6330 		reason_code = event_data->PHY[i].PhyStatus &
6331 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6332 		if ((event_data->PHY[i].PhyStatus &
6333 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
6334 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
6335 				continue;
6336 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6337 		if (!handle)
6338 			continue;
6339 		link_rate = event_data->PHY[i].LinkRate >> 4;
6340 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6341 		switch (reason_code) {
6342 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6343 
6344 			if (ioc->shost_recovery)
6345 				break;
6346 
6347 			if (link_rate == prev_link_rate)
6348 				break;
6349 
6350 			mpt3sas_transport_update_links(ioc, sas_address,
6351 			    handle, phy_number, link_rate);
6352 
6353 			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6354 				break;
6355 
6356 			_scsih_check_device(ioc, sas_address, handle,
6357 			    phy_number, link_rate);
6358 
6359 			if (!test_bit(handle, ioc->pend_os_device_add))
6360 				break;
6361 
6362 			/* fall through */
6363 
6364 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6365 
6366 			if (ioc->shost_recovery)
6367 				break;
6368 
6369 			mpt3sas_transport_update_links(ioc, sas_address,
6370 			    handle, phy_number, link_rate);
6371 
6372 			_scsih_add_device(ioc, handle, phy_number, 0);
6373 
6374 			break;
6375 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6376 
6377 			_scsih_device_remove_by_handle(ioc, handle);
6378 			break;
6379 		}
6380 	}
6381 
6382 	/* handle expander removal */
6383 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
6384 	    sas_expander)
6385 		mpt3sas_expander_remove(ioc, sas_address);
6386 
6387 	return 0;
6388 }
6389 
6390 /**
6391  * _scsih_sas_device_status_change_event_debug - debug for device event
6392  * @ioc: ?
6393  * @event_data: event data payload
6394  * Context: user.
6395  */
6396 static void
6397 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6398 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
6399 {
6400 	char *reason_str = NULL;
6401 
6402 	switch (event_data->ReasonCode) {
6403 	case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
6404 		reason_str = "smart data";
6405 		break;
6406 	case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
6407 		reason_str = "unsupported device discovered";
6408 		break;
6409 	case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
6410 		reason_str = "internal device reset";
6411 		break;
6412 	case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
6413 		reason_str = "internal task abort";
6414 		break;
6415 	case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
6416 		reason_str = "internal task abort set";
6417 		break;
6418 	case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
6419 		reason_str = "internal clear task set";
6420 		break;
6421 	case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
6422 		reason_str = "internal query task";
6423 		break;
6424 	case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
6425 		reason_str = "sata init failure";
6426 		break;
6427 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
6428 		reason_str = "internal device reset complete";
6429 		break;
6430 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
6431 		reason_str = "internal task abort complete";
6432 		break;
6433 	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
6434 		reason_str = "internal async notification";
6435 		break;
6436 	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
6437 		reason_str = "expander reduced functionality";
6438 		break;
6439 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
6440 		reason_str = "expander reduced functionality complete";
6441 		break;
6442 	default:
6443 		reason_str = "unknown reason";
6444 		break;
6445 	}
6446 	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
6447 		 reason_str, le16_to_cpu(event_data->DevHandle),
6448 		 (u64)le64_to_cpu(event_data->SASAddress),
6449 		 le16_to_cpu(event_data->TaskTag));
6450 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
6451 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
6452 			event_data->ASC, event_data->ASCQ);
6453 	pr_cont("\n");
6454 }
6455 
6456 /**
6457  * _scsih_sas_device_status_change_event - handle device status change
6458  * @ioc: per adapter object
6459  * @fw_event: The fw_event_work object
6460  * Context: user.
6461  */
6462 static void
6463 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6464 	struct fw_event_work *fw_event)
6465 {
6466 	struct MPT3SAS_TARGET *target_priv_data;
6467 	struct _sas_device *sas_device;
6468 	u64 sas_address;
6469 	unsigned long flags;
6470 	Mpi2EventDataSasDeviceStatusChange_t *event_data =
6471 		(Mpi2EventDataSasDeviceStatusChange_t *)
6472 		fw_event->event_data;
6473 
6474 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6475 		_scsih_sas_device_status_change_event_debug(ioc,
6476 		     event_data);
6477 
6478 	/* In MPI Revision K (0xC), the internal device reset complete was
6479 	 * implemented, so avoid setting tm_busy flag for older firmware.
6480 	 */
6481 	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
6482 		return;
6483 
6484 	if (event_data->ReasonCode !=
6485 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
6486 	   event_data->ReasonCode !=
6487 	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
6488 		return;
6489 
6490 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
6491 	sas_address = le64_to_cpu(event_data->SASAddress);
6492 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6493 	    sas_address);
6494 
6495 	if (!sas_device || !sas_device->starget)
6496 		goto out;
6497 
6498 	target_priv_data = sas_device->starget->hostdata;
6499 	if (!target_priv_data)
6500 		goto out;
6501 
6502 	if (event_data->ReasonCode ==
6503 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
6504 		target_priv_data->tm_busy = 1;
6505 	else
6506 		target_priv_data->tm_busy = 0;
6507 
6508 out:
6509 	if (sas_device)
6510 		sas_device_put(sas_device);
6511 
6512 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6513 }
6514 
6515 
6516 /**
6517  * _scsih_check_pcie_access_status - check access flags
6518  * @ioc: per adapter object
6519  * @wwid: wwid
6520  * @handle: sas device handle
6521  * @access_status: errors returned during discovery of the device
6522  *
6523  * Return: 0 for success, else failure
6524  */
6525 static u8
6526 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6527 	u16 handle, u8 access_status)
6528 {
6529 	u8 rc = 1;
6530 	char *desc = NULL;
6531 
6532 	switch (access_status) {
6533 	case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
6534 	case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
6535 		rc = 0;
6536 		break;
6537 	case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
6538 		desc = "PCIe device capability failed";
6539 		break;
6540 	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6541 		desc = "PCIe device blocked";
6542 		break;
6543 	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6544 		desc = "PCIe device mem space access failed";
6545 		break;
6546 	case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
6547 		desc = "PCIe device unsupported";
6548 		break;
6549 	case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
6550 		desc = "PCIe device MSIx Required";
6551 		break;
6552 	case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
6553 		desc = "PCIe device init fail max";
6554 		break;
6555 	case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
6556 		desc = "PCIe device status unknown";
6557 		break;
6558 	case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
6559 		desc = "nvme ready timeout";
6560 		break;
6561 	case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
6562 		desc = "nvme device configuration unsupported";
6563 		break;
6564 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
6565 		desc = "nvme identify failed";
6566 		break;
6567 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
6568 		desc = "nvme qconfig failed";
6569 		break;
6570 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
6571 		desc = "nvme qcreation failed";
6572 		break;
6573 	case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
6574 		desc = "nvme eventcfg failed";
6575 		break;
6576 	case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
6577 		desc = "nvme get feature stat failed";
6578 		break;
6579 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
6580 		desc = "nvme idle timeout";
6581 		break;
6582 	case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
6583 		desc = "nvme failure status";
6584 		break;
6585 	default:
6586 		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
6587 			access_status, (u64)wwid, handle);
6588 		return rc;
6589 	}
6590 
6591 	if (!rc)
6592 		return rc;
6593 
6594 	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6595 		 desc, (u64)wwid, handle);
6596 	return rc;
6597 }
6598 
6599 /**
6600  * _scsih_pcie_device_remove_from_sml -  removing pcie device
6601  * from SML and free up associated memory
6602  * @ioc: per adapter object
6603  * @pcie_device: the pcie_device object
6604  */
6605 static void
6606 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6607 	struct _pcie_device *pcie_device)
6608 {
6609 	struct MPT3SAS_TARGET *sas_target_priv_data;
6610 
6611 	dewtprintk(ioc,
6612 		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
6613 			    __func__,
6614 			    pcie_device->handle, (u64)pcie_device->wwid));
6615 	if (pcie_device->enclosure_handle != 0)
6616 		dewtprintk(ioc,
6617 			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6618 				    __func__,
6619 				    (u64)pcie_device->enclosure_logical_id,
6620 				    pcie_device->slot));
6621 	if (pcie_device->connector_name[0] != '\0')
6622 		dewtprintk(ioc,
6623 			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
6624 				    __func__,
6625 				    pcie_device->enclosure_level,
6626 				    pcie_device->connector_name));
6627 
6628 	if (pcie_device->starget && pcie_device->starget->hostdata) {
6629 		sas_target_priv_data = pcie_device->starget->hostdata;
6630 		sas_target_priv_data->deleted = 1;
6631 		_scsih_ublock_io_device(ioc, pcie_device->wwid);
6632 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6633 	}
6634 
6635 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
6636 		 pcie_device->handle, (u64)pcie_device->wwid);
6637 	if (pcie_device->enclosure_handle != 0)
6638 		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6639 			 (u64)pcie_device->enclosure_logical_id,
6640 			 pcie_device->slot);
6641 	if (pcie_device->connector_name[0] != '\0')
6642 		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
6643 			 pcie_device->enclosure_level,
6644 			 pcie_device->connector_name);
6645 
6646 	if (pcie_device->starget)
6647 		scsi_remove_target(&pcie_device->starget->dev);
6648 	dewtprintk(ioc,
6649 		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
6650 			    __func__,
6651 			    pcie_device->handle, (u64)pcie_device->wwid));
6652 	if (pcie_device->enclosure_handle != 0)
6653 		dewtprintk(ioc,
6654 			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
6655 				    __func__,
6656 				    (u64)pcie_device->enclosure_logical_id,
6657 				    pcie_device->slot));
6658 	if (pcie_device->connector_name[0] != '\0')
6659 		dewtprintk(ioc,
6660 			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
6661 				    __func__,
6662 				    pcie_device->enclosure_level,
6663 				    pcie_device->connector_name));
6664 
6665 	kfree(pcie_device->serial_number);
6666 }
6667 
6668 
6669 /**
6670  * _scsih_pcie_check_device - checking device responsiveness
6671  * @ioc: per adapter object
6672  * @handle: attached device handle
6673  */
6674 static void
6675 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6676 {
6677 	Mpi2ConfigReply_t mpi_reply;
6678 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
6679 	u32 ioc_status;
6680 	struct _pcie_device *pcie_device;
6681 	u64 wwid;
6682 	unsigned long flags;
6683 	struct scsi_target *starget;
6684 	struct MPT3SAS_TARGET *sas_target_priv_data;
6685 	u32 device_info;
6686 
6687 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6688 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
6689 		return;
6690 
6691 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6692 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6693 		return;
6694 
6695 	/* check if this is end device */
6696 	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6697 	if (!(_scsih_is_nvme_device(device_info)))
6698 		return;
6699 
6700 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
6701 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
6702 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
6703 
6704 	if (!pcie_device) {
6705 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6706 		return;
6707 	}
6708 
6709 	if (unlikely(pcie_device->handle != handle)) {
6710 		starget = pcie_device->starget;
6711 		sas_target_priv_data = starget->hostdata;
6712 		starget_printk(KERN_INFO, starget,
6713 		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
6714 		    pcie_device->handle, handle);
6715 		sas_target_priv_data->handle = handle;
6716 		pcie_device->handle = handle;
6717 
6718 		if (le32_to_cpu(pcie_device_pg0.Flags) &
6719 		    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6720 			pcie_device->enclosure_level =
6721 			    pcie_device_pg0.EnclosureLevel;
6722 			memcpy(&pcie_device->connector_name[0],
6723 			    &pcie_device_pg0.ConnectorName[0], 4);
6724 		} else {
6725 			pcie_device->enclosure_level = 0;
6726 			pcie_device->connector_name[0] = '\0';
6727 		}
6728 	}
6729 
6730 	/* check if device is present */
6731 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6732 	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6733 		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
6734 			 handle);
6735 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6736 		pcie_device_put(pcie_device);
6737 		return;
6738 	}
6739 
6740 	/* check if there were any issues with discovery */
6741 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6742 	    pcie_device_pg0.AccessStatus)) {
6743 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6744 		pcie_device_put(pcie_device);
6745 		return;
6746 	}
6747 
6748 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6749 	pcie_device_put(pcie_device);
6750 
6751 	_scsih_ublock_io_device(ioc, wwid);
6752 
6753 	return;
6754 }
6755 
6756 /**
6757  * _scsih_pcie_add_device -  creating pcie device object
6758  * @ioc: per adapter object
6759  * @handle: pcie device handle
6760  *
6761  * Creating end device object, stored in ioc->pcie_device_list.
6762  *
6763  * Return: 1 means queue the event later, 0 means complete the event
6764  */
6765 static int
6766 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6767 {
6768 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
6769 	Mpi26PCIeDevicePage2_t pcie_device_pg2;
6770 	Mpi2ConfigReply_t mpi_reply;
6771 	struct _pcie_device *pcie_device;
6772 	struct _enclosure_node *enclosure_dev;
6773 	u32 ioc_status;
6774 	u64 wwid;
6775 
6776 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6777 	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
6778 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6779 			__FILE__, __LINE__, __func__);
6780 		return 0;
6781 	}
6782 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6783 	    MPI2_IOCSTATUS_MASK;
6784 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6785 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6786 			__FILE__, __LINE__, __func__);
6787 		return 0;
6788 	}
6789 
6790 	set_bit(handle, ioc->pend_os_device_add);
6791 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
6792 
6793 	/* check if device is present */
6794 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6795 		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6796 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6797 			handle);
6798 		return 0;
6799 	}
6800 
6801 	/* check if there were any issues with discovery */
6802 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6803 	    pcie_device_pg0.AccessStatus))
6804 		return 0;
6805 
6806 	if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo))))
6807 		return 0;
6808 
6809 	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
6810 	if (pcie_device) {
6811 		clear_bit(handle, ioc->pend_os_device_add);
6812 		pcie_device_put(pcie_device);
6813 		return 0;
6814 	}
6815 
6816 	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
6817 	if (!pcie_device) {
6818 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6819 			__FILE__, __LINE__, __func__);
6820 		return 0;
6821 	}
6822 
6823 	kref_init(&pcie_device->refcount);
6824 	pcie_device->id = ioc->pcie_target_id++;
6825 	pcie_device->channel = PCIE_CHANNEL;
6826 	pcie_device->handle = handle;
6827 	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6828 	pcie_device->wwid = wwid;
6829 	pcie_device->port_num = pcie_device_pg0.PortNum;
6830 	pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
6831 	    MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6832 
6833 	pcie_device->enclosure_handle =
6834 	    le16_to_cpu(pcie_device_pg0.EnclosureHandle);
6835 	if (pcie_device->enclosure_handle != 0)
6836 		pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
6837 
6838 	if (le32_to_cpu(pcie_device_pg0.Flags) &
6839 	    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6840 		pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
6841 		memcpy(&pcie_device->connector_name[0],
6842 		    &pcie_device_pg0.ConnectorName[0], 4);
6843 	} else {
6844 		pcie_device->enclosure_level = 0;
6845 		pcie_device->connector_name[0] = '\0';
6846 	}
6847 
6848 	/* get enclosure_logical_id */
6849 	if (pcie_device->enclosure_handle) {
6850 		enclosure_dev =
6851 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6852 						pcie_device->enclosure_handle);
6853 		if (enclosure_dev)
6854 			pcie_device->enclosure_logical_id =
6855 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6856 	}
6857 	/* TODO -- Add device name once FW supports it */
6858 	if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
6859 		&pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
6860 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6861 			__FILE__, __LINE__, __func__);
6862 		kfree(pcie_device);
6863 		return 0;
6864 	}
6865 
6866 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6867 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6868 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6869 			__FILE__, __LINE__, __func__);
6870 		kfree(pcie_device);
6871 		return 0;
6872 	}
6873 	pcie_device->nvme_mdts =
6874 		le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
6875 	if (pcie_device_pg2.ControllerResetTO)
6876 		pcie_device->reset_timeout =
6877 			pcie_device_pg2.ControllerResetTO;
6878 	else
6879 		pcie_device->reset_timeout = 30;
6880 
6881 	if (ioc->wait_for_discovery_to_complete)
6882 		_scsih_pcie_device_init_add(ioc, pcie_device);
6883 	else
6884 		_scsih_pcie_device_add(ioc, pcie_device);
6885 
6886 	pcie_device_put(pcie_device);
6887 	return 0;
6888 }
6889 
6890 /**
6891  * _scsih_pcie_topology_change_event_debug - debug for topology
6892  * event
6893  * @ioc: per adapter object
6894  * @event_data: event data payload
6895  * Context: user.
6896  */
6897 static void
6898 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6899 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
6900 {
6901 	int i;
6902 	u16 handle;
6903 	u16 reason_code;
6904 	u8 port_number;
6905 	char *status_str = NULL;
6906 	u8 link_rate, prev_link_rate;
6907 
6908 	switch (event_data->SwitchStatus) {
6909 	case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
6910 		status_str = "add";
6911 		break;
6912 	case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
6913 		status_str = "remove";
6914 		break;
6915 	case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
6916 	case 0:
6917 		status_str =  "responding";
6918 		break;
6919 	case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
6920 		status_str = "remove delay";
6921 		break;
6922 	default:
6923 		status_str = "unknown status";
6924 		break;
6925 	}
6926 	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
6927 	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
6928 		"start_port(%02d), count(%d)\n",
6929 		le16_to_cpu(event_data->SwitchDevHandle),
6930 		le16_to_cpu(event_data->EnclosureHandle),
6931 		event_data->StartPortNum, event_data->NumEntries);
6932 	for (i = 0; i < event_data->NumEntries; i++) {
6933 		handle =
6934 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
6935 		if (!handle)
6936 			continue;
6937 		port_number = event_data->StartPortNum + i;
6938 		reason_code = event_data->PortEntry[i].PortStatus;
6939 		switch (reason_code) {
6940 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
6941 			status_str = "target add";
6942 			break;
6943 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
6944 			status_str = "target remove";
6945 			break;
6946 		case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
6947 			status_str = "delay target remove";
6948 			break;
6949 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
6950 			status_str = "link rate change";
6951 			break;
6952 		case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
6953 			status_str = "target responding";
6954 			break;
6955 		default:
6956 			status_str = "unknown";
6957 			break;
6958 		}
6959 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
6960 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
6961 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
6962 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
6963 		pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
6964 			" link rate: new(0x%02x), old(0x%02x)\n", port_number,
6965 			handle, status_str, link_rate, prev_link_rate);
6966 	}
6967 }
6968 
6969 /**
6970  * _scsih_pcie_topology_change_event - handle PCIe topology
6971  *  changes
6972  * @ioc: per adapter object
6973  * @fw_event: The fw_event_work object
6974  * Context: user.
6975  *
6976  */
6977 static void
6978 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6979 	struct fw_event_work *fw_event)
6980 {
6981 	int i;
6982 	u16 handle;
6983 	u16 reason_code;
6984 	u8 link_rate, prev_link_rate;
6985 	unsigned long flags;
6986 	int rc;
6987 	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
6988 		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
6989 	struct _pcie_device *pcie_device;
6990 
6991 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6992 		_scsih_pcie_topology_change_event_debug(ioc, event_data);
6993 
6994 	if (ioc->shost_recovery || ioc->remove_host ||
6995 		ioc->pci_error_recovery)
6996 		return;
6997 
6998 	if (fw_event->ignore) {
6999 		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
7000 		return;
7001 	}
7002 
7003 	/* handle siblings events */
7004 	for (i = 0; i < event_data->NumEntries; i++) {
7005 		if (fw_event->ignore) {
7006 			dewtprintk(ioc,
7007 				   ioc_info(ioc, "ignoring switch event\n"));
7008 			return;
7009 		}
7010 		if (ioc->remove_host || ioc->pci_error_recovery)
7011 			return;
7012 		reason_code = event_data->PortEntry[i].PortStatus;
7013 		handle =
7014 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7015 		if (!handle)
7016 			continue;
7017 
7018 		link_rate = event_data->PortEntry[i].CurrentPortInfo
7019 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7020 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
7021 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7022 
7023 		switch (reason_code) {
7024 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7025 			if (ioc->shost_recovery)
7026 				break;
7027 			if (link_rate == prev_link_rate)
7028 				break;
7029 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7030 				break;
7031 
7032 			_scsih_pcie_check_device(ioc, handle);
7033 
7034 			/* This code after this point handles the test case
7035 			 * where a device has been added, however its returning
7036 			 * BUSY for sometime.  Then before the Device Missing
7037 			 * Delay expires and the device becomes READY, the
7038 			 * device is removed and added back.
7039 			 */
7040 			spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7041 			pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
7042 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7043 
7044 			if (pcie_device) {
7045 				pcie_device_put(pcie_device);
7046 				break;
7047 			}
7048 
7049 			if (!test_bit(handle, ioc->pend_os_device_add))
7050 				break;
7051 
7052 			dewtprintk(ioc,
7053 				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
7054 					    handle));
7055 			event_data->PortEntry[i].PortStatus &= 0xF0;
7056 			event_data->PortEntry[i].PortStatus |=
7057 				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
7058 			/* fall through */
7059 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7060 			if (ioc->shost_recovery)
7061 				break;
7062 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7063 				break;
7064 
7065 			rc = _scsih_pcie_add_device(ioc, handle);
7066 			if (!rc) {
7067 				/* mark entry vacant */
7068 				/* TODO This needs to be reviewed and fixed,
7069 				 * we dont have an entry
7070 				 * to make an event void like vacant
7071 				 */
7072 				event_data->PortEntry[i].PortStatus |=
7073 					MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
7074 			}
7075 			break;
7076 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7077 			_scsih_pcie_device_remove_by_handle(ioc, handle);
7078 			break;
7079 		}
7080 	}
7081 }
7082 
7083 /**
7084  * _scsih_pcie_device_status_change_event_debug - debug for device event
7085  * @ioc: ?
7086  * @event_data: event data payload
7087  * Context: user.
7088  */
7089 static void
7090 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7091 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
7092 {
7093 	char *reason_str = NULL;
7094 
7095 	switch (event_data->ReasonCode) {
7096 	case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
7097 		reason_str = "smart data";
7098 		break;
7099 	case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
7100 		reason_str = "unsupported device discovered";
7101 		break;
7102 	case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
7103 		reason_str = "internal device reset";
7104 		break;
7105 	case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
7106 		reason_str = "internal task abort";
7107 		break;
7108 	case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7109 		reason_str = "internal task abort set";
7110 		break;
7111 	case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7112 		reason_str = "internal clear task set";
7113 		break;
7114 	case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
7115 		reason_str = "internal query task";
7116 		break;
7117 	case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
7118 		reason_str = "device init failure";
7119 		break;
7120 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7121 		reason_str = "internal device reset complete";
7122 		break;
7123 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7124 		reason_str = "internal task abort complete";
7125 		break;
7126 	case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7127 		reason_str = "internal async notification";
7128 		break;
7129 	case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
7130 		reason_str = "pcie hot reset failed";
7131 		break;
7132 	default:
7133 		reason_str = "unknown reason";
7134 		break;
7135 	}
7136 
7137 	ioc_info(ioc, "PCIE device status change: (%s)\n"
7138 		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7139 		 reason_str, le16_to_cpu(event_data->DevHandle),
7140 		 (u64)le64_to_cpu(event_data->WWID),
7141 		 le16_to_cpu(event_data->TaskTag));
7142 	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7143 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7144 			event_data->ASC, event_data->ASCQ);
7145 	pr_cont("\n");
7146 }
7147 
7148 /**
7149  * _scsih_pcie_device_status_change_event - handle device status
7150  * change
7151  * @ioc: per adapter object
7152  * @fw_event: The fw_event_work object
7153  * Context: user.
7154  */
7155 static void
7156 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7157 	struct fw_event_work *fw_event)
7158 {
7159 	struct MPT3SAS_TARGET *target_priv_data;
7160 	struct _pcie_device *pcie_device;
7161 	u64 wwid;
7162 	unsigned long flags;
7163 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
7164 		(Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
7165 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7166 		_scsih_pcie_device_status_change_event_debug(ioc,
7167 			event_data);
7168 
7169 	if (event_data->ReasonCode !=
7170 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7171 		event_data->ReasonCode !=
7172 		MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7173 		return;
7174 
7175 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7176 	wwid = le64_to_cpu(event_data->WWID);
7177 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7178 
7179 	if (!pcie_device || !pcie_device->starget)
7180 		goto out;
7181 
7182 	target_priv_data = pcie_device->starget->hostdata;
7183 	if (!target_priv_data)
7184 		goto out;
7185 
7186 	if (event_data->ReasonCode ==
7187 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
7188 		target_priv_data->tm_busy = 1;
7189 	else
7190 		target_priv_data->tm_busy = 0;
7191 out:
7192 	if (pcie_device)
7193 		pcie_device_put(pcie_device);
7194 
7195 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7196 }
7197 
7198 /**
7199  * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
7200  * event
7201  * @ioc: per adapter object
7202  * @event_data: event data payload
7203  * Context: user.
7204  */
7205 static void
7206 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7207 	Mpi2EventDataSasEnclDevStatusChange_t *event_data)
7208 {
7209 	char *reason_str = NULL;
7210 
7211 	switch (event_data->ReasonCode) {
7212 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7213 		reason_str = "enclosure add";
7214 		break;
7215 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7216 		reason_str = "enclosure remove";
7217 		break;
7218 	default:
7219 		reason_str = "unknown reason";
7220 		break;
7221 	}
7222 
7223 	ioc_info(ioc, "enclosure status change: (%s)\n"
7224 		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
7225 		 reason_str,
7226 		 le16_to_cpu(event_data->EnclosureHandle),
7227 		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
7228 		 le16_to_cpu(event_data->StartSlot));
7229 }
7230 
7231 /**
7232  * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
7233  * @ioc: per adapter object
7234  * @fw_event: The fw_event_work object
7235  * Context: user.
7236  */
7237 static void
7238 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7239 	struct fw_event_work *fw_event)
7240 {
7241 	Mpi2ConfigReply_t mpi_reply;
7242 	struct _enclosure_node *enclosure_dev = NULL;
7243 	Mpi2EventDataSasEnclDevStatusChange_t *event_data =
7244 		(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
7245 	int rc;
7246 	u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
7247 
7248 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7249 		_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
7250 		     (Mpi2EventDataSasEnclDevStatusChange_t *)
7251 		     fw_event->event_data);
7252 	if (ioc->shost_recovery)
7253 		return;
7254 
7255 	if (enclosure_handle)
7256 		enclosure_dev =
7257 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7258 						enclosure_handle);
7259 	switch (event_data->ReasonCode) {
7260 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7261 		if (!enclosure_dev) {
7262 			enclosure_dev =
7263 				kzalloc(sizeof(struct _enclosure_node),
7264 					GFP_KERNEL);
7265 			if (!enclosure_dev) {
7266 				ioc_info(ioc, "failure at %s:%d/%s()!\n",
7267 					 __FILE__, __LINE__, __func__);
7268 				return;
7269 			}
7270 			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7271 				&enclosure_dev->pg0,
7272 				MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7273 				enclosure_handle);
7274 
7275 			if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
7276 						MPI2_IOCSTATUS_MASK)) {
7277 				kfree(enclosure_dev);
7278 				return;
7279 			}
7280 
7281 			list_add_tail(&enclosure_dev->list,
7282 							&ioc->enclosure_list);
7283 		}
7284 		break;
7285 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7286 		if (enclosure_dev) {
7287 			list_del(&enclosure_dev->list);
7288 			kfree(enclosure_dev);
7289 		}
7290 		break;
7291 	default:
7292 		break;
7293 	}
7294 }
7295 
7296 /**
7297  * _scsih_sas_broadcast_primitive_event - handle broadcast events
7298  * @ioc: per adapter object
7299  * @fw_event: The fw_event_work object
7300  * Context: user.
7301  */
7302 static void
7303 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7304 	struct fw_event_work *fw_event)
7305 {
7306 	struct scsi_cmnd *scmd;
7307 	struct scsi_device *sdev;
7308 	struct scsiio_tracker *st;
7309 	u16 smid, handle;
7310 	u32 lun;
7311 	struct MPT3SAS_DEVICE *sas_device_priv_data;
7312 	u32 termination_count;
7313 	u32 query_count;
7314 	Mpi2SCSITaskManagementReply_t *mpi_reply;
7315 	Mpi2EventDataSasBroadcastPrimitive_t *event_data =
7316 		(Mpi2EventDataSasBroadcastPrimitive_t *)
7317 		fw_event->event_data;
7318 	u16 ioc_status;
7319 	unsigned long flags;
7320 	int r;
7321 	u8 max_retries = 0;
7322 	u8 task_abort_retries;
7323 
7324 	mutex_lock(&ioc->tm_cmds.mutex);
7325 	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
7326 		 __func__, event_data->PhyNum, event_data->PortWidth);
7327 
7328 	_scsih_block_io_all_device(ioc);
7329 
7330 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7331 	mpi_reply = ioc->tm_cmds.reply;
7332  broadcast_aen_retry:
7333 
7334 	/* sanity checks for retrying this loop */
7335 	if (max_retries++ == 5) {
7336 		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
7337 		goto out;
7338 	} else if (max_retries > 1)
7339 		dewtprintk(ioc,
7340 			   ioc_info(ioc, "%s: %d retry\n",
7341 				    __func__, max_retries - 1));
7342 
7343 	termination_count = 0;
7344 	query_count = 0;
7345 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
7346 		if (ioc->shost_recovery)
7347 			goto out;
7348 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
7349 		if (!scmd)
7350 			continue;
7351 		st = scsi_cmd_priv(scmd);
7352 		sdev = scmd->device;
7353 		sas_device_priv_data = sdev->hostdata;
7354 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
7355 			continue;
7356 		 /* skip hidden raid components */
7357 		if (sas_device_priv_data->sas_target->flags &
7358 		    MPT_TARGET_FLAGS_RAID_COMPONENT)
7359 			continue;
7360 		 /* skip volumes */
7361 		if (sas_device_priv_data->sas_target->flags &
7362 		    MPT_TARGET_FLAGS_VOLUME)
7363 			continue;
7364 		 /* skip PCIe devices */
7365 		if (sas_device_priv_data->sas_target->flags &
7366 		    MPT_TARGET_FLAGS_PCIE_DEVICE)
7367 			continue;
7368 
7369 		handle = sas_device_priv_data->sas_target->handle;
7370 		lun = sas_device_priv_data->lun;
7371 		query_count++;
7372 
7373 		if (ioc->shost_recovery)
7374 			goto out;
7375 
7376 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7377 		r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
7378 			MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7379 			st->msix_io, 30, 0);
7380 		if (r == FAILED) {
7381 			sdev_printk(KERN_WARNING, sdev,
7382 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
7383 			    "QUERY_TASK: scmd(%p)\n", scmd);
7384 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7385 			goto broadcast_aen_retry;
7386 		}
7387 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
7388 		    & MPI2_IOCSTATUS_MASK;
7389 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7390 			sdev_printk(KERN_WARNING, sdev,
7391 				"query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
7392 				ioc_status, scmd);
7393 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7394 			goto broadcast_aen_retry;
7395 		}
7396 
7397 		/* see if IO is still owned by IOC and target */
7398 		if (mpi_reply->ResponseCode ==
7399 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
7400 		     mpi_reply->ResponseCode ==
7401 		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
7402 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7403 			continue;
7404 		}
7405 		task_abort_retries = 0;
7406  tm_retry:
7407 		if (task_abort_retries++ == 60) {
7408 			dewtprintk(ioc,
7409 				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
7410 					    __func__));
7411 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7412 			goto broadcast_aen_retry;
7413 		}
7414 
7415 		if (ioc->shost_recovery)
7416 			goto out_no_lock;
7417 
7418 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
7419 			MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
7420 			st->msix_io, 30, 0);
7421 		if (r == FAILED || st->cb_idx != 0xFF) {
7422 			sdev_printk(KERN_WARNING, sdev,
7423 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
7424 			    "scmd(%p)\n", scmd);
7425 			goto tm_retry;
7426 		}
7427 
7428 		if (task_abort_retries > 1)
7429 			sdev_printk(KERN_WARNING, sdev,
7430 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
7431 			    " scmd(%p)\n",
7432 			    task_abort_retries - 1, scmd);
7433 
7434 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
7435 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7436 	}
7437 
7438 	if (ioc->broadcast_aen_pending) {
7439 		dewtprintk(ioc,
7440 			   ioc_info(ioc,
7441 				    "%s: loop back due to pending AEN\n",
7442 				    __func__));
7443 		 ioc->broadcast_aen_pending = 0;
7444 		 goto broadcast_aen_retry;
7445 	}
7446 
7447  out:
7448 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7449  out_no_lock:
7450 
7451 	dewtprintk(ioc,
7452 		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
7453 			    __func__, query_count, termination_count));
7454 
7455 	ioc->broadcast_aen_busy = 0;
7456 	if (!ioc->shost_recovery)
7457 		_scsih_ublock_io_all_device(ioc);
7458 	mutex_unlock(&ioc->tm_cmds.mutex);
7459 }
7460 
7461 /**
7462  * _scsih_sas_discovery_event - handle discovery events
7463  * @ioc: per adapter object
7464  * @fw_event: The fw_event_work object
7465  * Context: user.
7466  */
7467 static void
7468 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7469 	struct fw_event_work *fw_event)
7470 {
7471 	Mpi2EventDataSasDiscovery_t *event_data =
7472 		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
7473 
7474 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7475 		ioc_info(ioc, "discovery event: (%s)",
7476 			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
7477 			 "start" : "stop");
7478 		if (event_data->DiscoveryStatus)
7479 			pr_cont("discovery_status(0x%08x)",
7480 				le32_to_cpu(event_data->DiscoveryStatus));
7481 		pr_cont("\n");
7482 	}
7483 
7484 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
7485 	    !ioc->sas_hba.num_phys) {
7486 		if (disable_discovery > 0 && ioc->shost_recovery) {
7487 			/* Wait for the reset to complete */
7488 			while (ioc->shost_recovery)
7489 				ssleep(1);
7490 		}
7491 		_scsih_sas_host_add(ioc);
7492 	}
7493 }
7494 
7495 /**
7496  * _scsih_sas_device_discovery_error_event - display SAS device discovery error
7497  *						events
7498  * @ioc: per adapter object
7499  * @fw_event: The fw_event_work object
7500  * Context: user.
7501  */
7502 static void
7503 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7504 	struct fw_event_work *fw_event)
7505 {
7506 	Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
7507 		(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
7508 
7509 	switch (event_data->ReasonCode) {
7510 	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7511 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
7512 			 le16_to_cpu(event_data->DevHandle),
7513 			 (u64)le64_to_cpu(event_data->SASAddress),
7514 			 event_data->PhysicalPort);
7515 		break;
7516 	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7517 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
7518 			 le16_to_cpu(event_data->DevHandle),
7519 			 (u64)le64_to_cpu(event_data->SASAddress),
7520 			 event_data->PhysicalPort);
7521 		break;
7522 	default:
7523 		break;
7524 	}
7525 }
7526 
7527 /**
7528  * _scsih_pcie_enumeration_event - handle enumeration events
7529  * @ioc: per adapter object
7530  * @fw_event: The fw_event_work object
7531  * Context: user.
7532  */
7533 static void
7534 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7535 	struct fw_event_work *fw_event)
7536 {
7537 	Mpi26EventDataPCIeEnumeration_t *event_data =
7538 		(Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
7539 
7540 	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7541 		return;
7542 
7543 	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
7544 		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7545 		 "started" : "completed",
7546 		 event_data->Flags);
7547 	if (event_data->EnumerationStatus)
7548 		pr_cont("enumeration_status(0x%08x)",
7549 			le32_to_cpu(event_data->EnumerationStatus));
7550 	pr_cont("\n");
7551 }
7552 
7553 /**
7554  * _scsih_ir_fastpath - turn on fastpath for IR physdisk
7555  * @ioc: per adapter object
7556  * @handle: device handle for physical disk
7557  * @phys_disk_num: physical disk number
7558  *
7559  * Return: 0 for success, else failure.
7560  */
7561 static int
7562 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7563 {
7564 	Mpi2RaidActionRequest_t *mpi_request;
7565 	Mpi2RaidActionReply_t *mpi_reply;
7566 	u16 smid;
7567 	u8 issue_reset = 0;
7568 	int rc = 0;
7569 	u16 ioc_status;
7570 	u32 log_info;
7571 
7572 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
7573 		return rc;
7574 
7575 	mutex_lock(&ioc->scsih_cmds.mutex);
7576 
7577 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7578 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
7579 		rc = -EAGAIN;
7580 		goto out;
7581 	}
7582 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
7583 
7584 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
7585 	if (!smid) {
7586 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7587 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7588 		rc = -EAGAIN;
7589 		goto out;
7590 	}
7591 
7592 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7593 	ioc->scsih_cmds.smid = smid;
7594 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
7595 
7596 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
7597 	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
7598 	mpi_request->PhysDiskNum = phys_disk_num;
7599 
7600 	dewtprintk(ioc,
7601 		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
7602 			    handle, phys_disk_num));
7603 
7604 	init_completion(&ioc->scsih_cmds.done);
7605 	ioc->put_smid_default(ioc, smid);
7606 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
7607 
7608 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
7609 		issue_reset =
7610 			mpt3sas_base_check_cmd_timeout(ioc,
7611 				ioc->scsih_cmds.status, mpi_request,
7612 				sizeof(Mpi2RaidActionRequest_t)/4);
7613 		rc = -EFAULT;
7614 		goto out;
7615 	}
7616 
7617 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
7618 
7619 		mpi_reply = ioc->scsih_cmds.reply;
7620 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
7621 		if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
7622 			log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
7623 		else
7624 			log_info = 0;
7625 		ioc_status &= MPI2_IOCSTATUS_MASK;
7626 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7627 			dewtprintk(ioc,
7628 				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
7629 					    ioc_status, log_info));
7630 			rc = -EFAULT;
7631 		} else
7632 			dewtprintk(ioc,
7633 				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
7634 	}
7635 
7636  out:
7637 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7638 	mutex_unlock(&ioc->scsih_cmds.mutex);
7639 
7640 	if (issue_reset)
7641 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7642 	return rc;
7643 }
7644 
7645 /**
7646  * _scsih_reprobe_lun - reprobing lun
7647  * @sdev: scsi device struct
7648  * @no_uld_attach: sdev->no_uld_attach flag setting
7649  *
7650  **/
7651 static void
7652 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
7653 {
7654 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
7655 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
7656 	    sdev->no_uld_attach ? "hiding" : "exposing");
7657 	WARN_ON(scsi_device_reprobe(sdev));
7658 }
7659 
7660 /**
7661  * _scsih_sas_volume_add - add new volume
7662  * @ioc: per adapter object
7663  * @element: IR config element data
7664  * Context: user.
7665  */
7666 static void
7667 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
7668 	Mpi2EventIrConfigElement_t *element)
7669 {
7670 	struct _raid_device *raid_device;
7671 	unsigned long flags;
7672 	u64 wwid;
7673 	u16 handle = le16_to_cpu(element->VolDevHandle);
7674 	int rc;
7675 
7676 	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
7677 	if (!wwid) {
7678 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7679 			__FILE__, __LINE__, __func__);
7680 		return;
7681 	}
7682 
7683 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
7684 	raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
7685 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7686 
7687 	if (raid_device)
7688 		return;
7689 
7690 	raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
7691 	if (!raid_device) {
7692 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7693 			__FILE__, __LINE__, __func__);
7694 		return;
7695 	}
7696 
7697 	raid_device->id = ioc->sas_id++;
7698 	raid_device->channel = RAID_CHANNEL;
7699 	raid_device->handle = handle;
7700 	raid_device->wwid = wwid;
7701 	_scsih_raid_device_add(ioc, raid_device);
7702 	if (!ioc->wait_for_discovery_to_complete) {
7703 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
7704 		    raid_device->id, 0);
7705 		if (rc)
7706 			_scsih_raid_device_remove(ioc, raid_device);
7707 	} else {
7708 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
7709 		_scsih_determine_boot_device(ioc, raid_device, 1);
7710 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7711 	}
7712 }
7713 
7714 /**
7715  * _scsih_sas_volume_delete - delete volume
7716  * @ioc: per adapter object
7717  * @handle: volume device handle
7718  * Context: user.
7719  */
7720 static void
7721 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7722 {
7723 	struct _raid_device *raid_device;
7724 	unsigned long flags;
7725 	struct MPT3SAS_TARGET *sas_target_priv_data;
7726 	struct scsi_target *starget = NULL;
7727 
7728 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
7729 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
7730 	if (raid_device) {
7731 		if (raid_device->starget) {
7732 			starget = raid_device->starget;
7733 			sas_target_priv_data = starget->hostdata;
7734 			sas_target_priv_data->deleted = 1;
7735 		}
7736 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7737 			 raid_device->handle, (u64)raid_device->wwid);
7738 		list_del(&raid_device->list);
7739 		kfree(raid_device);
7740 	}
7741 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7742 	if (starget)
7743 		scsi_remove_target(&starget->dev);
7744 }
7745 
7746 /**
7747  * _scsih_sas_pd_expose - expose pd component to /dev/sdX
7748  * @ioc: per adapter object
7749  * @element: IR config element data
7750  * Context: user.
7751  */
7752 static void
7753 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
7754 	Mpi2EventIrConfigElement_t *element)
7755 {
7756 	struct _sas_device *sas_device;
7757 	struct scsi_target *starget = NULL;
7758 	struct MPT3SAS_TARGET *sas_target_priv_data;
7759 	unsigned long flags;
7760 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7761 
7762 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7763 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7764 	if (sas_device) {
7765 		sas_device->volume_handle = 0;
7766 		sas_device->volume_wwid = 0;
7767 		clear_bit(handle, ioc->pd_handles);
7768 		if (sas_device->starget && sas_device->starget->hostdata) {
7769 			starget = sas_device->starget;
7770 			sas_target_priv_data = starget->hostdata;
7771 			sas_target_priv_data->flags &=
7772 			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
7773 		}
7774 	}
7775 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7776 	if (!sas_device)
7777 		return;
7778 
7779 	/* exposing raid component */
7780 	if (starget)
7781 		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
7782 
7783 	sas_device_put(sas_device);
7784 }
7785 
7786 /**
7787  * _scsih_sas_pd_hide - hide pd component from /dev/sdX
7788  * @ioc: per adapter object
7789  * @element: IR config element data
7790  * Context: user.
7791  */
7792 static void
7793 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
7794 	Mpi2EventIrConfigElement_t *element)
7795 {
7796 	struct _sas_device *sas_device;
7797 	struct scsi_target *starget = NULL;
7798 	struct MPT3SAS_TARGET *sas_target_priv_data;
7799 	unsigned long flags;
7800 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7801 	u16 volume_handle = 0;
7802 	u64 volume_wwid = 0;
7803 
7804 	mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
7805 	if (volume_handle)
7806 		mpt3sas_config_get_volume_wwid(ioc, volume_handle,
7807 		    &volume_wwid);
7808 
7809 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7810 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7811 	if (sas_device) {
7812 		set_bit(handle, ioc->pd_handles);
7813 		if (sas_device->starget && sas_device->starget->hostdata) {
7814 			starget = sas_device->starget;
7815 			sas_target_priv_data = starget->hostdata;
7816 			sas_target_priv_data->flags |=
7817 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
7818 			sas_device->volume_handle = volume_handle;
7819 			sas_device->volume_wwid = volume_wwid;
7820 		}
7821 	}
7822 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7823 	if (!sas_device)
7824 		return;
7825 
7826 	/* hiding raid component */
7827 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7828 
7829 	if (starget)
7830 		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
7831 
7832 	sas_device_put(sas_device);
7833 }
7834 
7835 /**
7836  * _scsih_sas_pd_delete - delete pd component
7837  * @ioc: per adapter object
7838  * @element: IR config element data
7839  * Context: user.
7840  */
7841 static void
7842 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
7843 	Mpi2EventIrConfigElement_t *element)
7844 {
7845 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7846 
7847 	_scsih_device_remove_by_handle(ioc, handle);
7848 }
7849 
7850 /**
7851  * _scsih_sas_pd_add - remove pd component
7852  * @ioc: per adapter object
7853  * @element: IR config element data
7854  * Context: user.
7855  */
7856 static void
7857 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
7858 	Mpi2EventIrConfigElement_t *element)
7859 {
7860 	struct _sas_device *sas_device;
7861 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7862 	Mpi2ConfigReply_t mpi_reply;
7863 	Mpi2SasDevicePage0_t sas_device_pg0;
7864 	u32 ioc_status;
7865 	u64 sas_address;
7866 	u16 parent_handle;
7867 
7868 	set_bit(handle, ioc->pd_handles);
7869 
7870 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
7871 	if (sas_device) {
7872 		_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7873 		sas_device_put(sas_device);
7874 		return;
7875 	}
7876 
7877 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7878 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7879 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7880 			__FILE__, __LINE__, __func__);
7881 		return;
7882 	}
7883 
7884 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7885 	    MPI2_IOCSTATUS_MASK;
7886 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7887 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7888 			__FILE__, __LINE__, __func__);
7889 		return;
7890 	}
7891 
7892 	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
7893 	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
7894 		mpt3sas_transport_update_links(ioc, sas_address, handle,
7895 		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
7896 
7897 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7898 	_scsih_add_device(ioc, handle, 0, 1);
7899 }
7900 
7901 /**
7902  * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
7903  * @ioc: per adapter object
7904  * @event_data: event data payload
7905  * Context: user.
7906  */
7907 static void
7908 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7909 	Mpi2EventDataIrConfigChangeList_t *event_data)
7910 {
7911 	Mpi2EventIrConfigElement_t *element;
7912 	u8 element_type;
7913 	int i;
7914 	char *reason_str = NULL, *element_str = NULL;
7915 
7916 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
7917 
7918 	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
7919 		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
7920 		 "foreign" : "native",
7921 		 event_data->NumElements);
7922 	for (i = 0; i < event_data->NumElements; i++, element++) {
7923 		switch (element->ReasonCode) {
7924 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7925 			reason_str = "add";
7926 			break;
7927 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7928 			reason_str = "remove";
7929 			break;
7930 		case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
7931 			reason_str = "no change";
7932 			break;
7933 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7934 			reason_str = "hide";
7935 			break;
7936 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7937 			reason_str = "unhide";
7938 			break;
7939 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7940 			reason_str = "volume_created";
7941 			break;
7942 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7943 			reason_str = "volume_deleted";
7944 			break;
7945 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7946 			reason_str = "pd_created";
7947 			break;
7948 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7949 			reason_str = "pd_deleted";
7950 			break;
7951 		default:
7952 			reason_str = "unknown reason";
7953 			break;
7954 		}
7955 		element_type = le16_to_cpu(element->ElementFlags) &
7956 		    MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
7957 		switch (element_type) {
7958 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
7959 			element_str = "volume";
7960 			break;
7961 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
7962 			element_str = "phys disk";
7963 			break;
7964 		case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
7965 			element_str = "hot spare";
7966 			break;
7967 		default:
7968 			element_str = "unknown element";
7969 			break;
7970 		}
7971 		pr_info("\t(%s:%s), vol handle(0x%04x), " \
7972 		    "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
7973 		    reason_str, le16_to_cpu(element->VolDevHandle),
7974 		    le16_to_cpu(element->PhysDiskDevHandle),
7975 		    element->PhysDiskNum);
7976 	}
7977 }
7978 
7979 /**
7980  * _scsih_sas_ir_config_change_event - handle ir configuration change events
7981  * @ioc: per adapter object
7982  * @fw_event: The fw_event_work object
7983  * Context: user.
7984  */
7985 static void
7986 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
7987 	struct fw_event_work *fw_event)
7988 {
7989 	Mpi2EventIrConfigElement_t *element;
7990 	int i;
7991 	u8 foreign_config;
7992 	Mpi2EventDataIrConfigChangeList_t *event_data =
7993 		(Mpi2EventDataIrConfigChangeList_t *)
7994 		fw_event->event_data;
7995 
7996 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
7997 	     (!ioc->hide_ir_msg))
7998 		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
7999 
8000 	foreign_config = (le32_to_cpu(event_data->Flags) &
8001 	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
8002 
8003 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8004 	if (ioc->shost_recovery &&
8005 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8006 		for (i = 0; i < event_data->NumElements; i++, element++) {
8007 			if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
8008 				_scsih_ir_fastpath(ioc,
8009 					le16_to_cpu(element->PhysDiskDevHandle),
8010 					element->PhysDiskNum);
8011 		}
8012 		return;
8013 	}
8014 
8015 	for (i = 0; i < event_data->NumElements; i++, element++) {
8016 
8017 		switch (element->ReasonCode) {
8018 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8019 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8020 			if (!foreign_config)
8021 				_scsih_sas_volume_add(ioc, element);
8022 			break;
8023 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8024 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8025 			if (!foreign_config)
8026 				_scsih_sas_volume_delete(ioc,
8027 				    le16_to_cpu(element->VolDevHandle));
8028 			break;
8029 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8030 			if (!ioc->is_warpdrive)
8031 				_scsih_sas_pd_hide(ioc, element);
8032 			break;
8033 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8034 			if (!ioc->is_warpdrive)
8035 				_scsih_sas_pd_expose(ioc, element);
8036 			break;
8037 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8038 			if (!ioc->is_warpdrive)
8039 				_scsih_sas_pd_add(ioc, element);
8040 			break;
8041 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8042 			if (!ioc->is_warpdrive)
8043 				_scsih_sas_pd_delete(ioc, element);
8044 			break;
8045 		}
8046 	}
8047 }
8048 
8049 /**
8050  * _scsih_sas_ir_volume_event - IR volume event
8051  * @ioc: per adapter object
8052  * @fw_event: The fw_event_work object
8053  * Context: user.
8054  */
8055 static void
8056 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8057 	struct fw_event_work *fw_event)
8058 {
8059 	u64 wwid;
8060 	unsigned long flags;
8061 	struct _raid_device *raid_device;
8062 	u16 handle;
8063 	u32 state;
8064 	int rc;
8065 	Mpi2EventDataIrVolume_t *event_data =
8066 		(Mpi2EventDataIrVolume_t *) fw_event->event_data;
8067 
8068 	if (ioc->shost_recovery)
8069 		return;
8070 
8071 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
8072 		return;
8073 
8074 	handle = le16_to_cpu(event_data->VolDevHandle);
8075 	state = le32_to_cpu(event_data->NewValue);
8076 	if (!ioc->hide_ir_msg)
8077 		dewtprintk(ioc,
8078 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8079 				    __func__, handle,
8080 				    le32_to_cpu(event_data->PreviousValue),
8081 				    state));
8082 	switch (state) {
8083 	case MPI2_RAID_VOL_STATE_MISSING:
8084 	case MPI2_RAID_VOL_STATE_FAILED:
8085 		_scsih_sas_volume_delete(ioc, handle);
8086 		break;
8087 
8088 	case MPI2_RAID_VOL_STATE_ONLINE:
8089 	case MPI2_RAID_VOL_STATE_DEGRADED:
8090 	case MPI2_RAID_VOL_STATE_OPTIMAL:
8091 
8092 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8093 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8094 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8095 
8096 		if (raid_device)
8097 			break;
8098 
8099 		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8100 		if (!wwid) {
8101 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8102 				__FILE__, __LINE__, __func__);
8103 			break;
8104 		}
8105 
8106 		raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8107 		if (!raid_device) {
8108 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8109 				__FILE__, __LINE__, __func__);
8110 			break;
8111 		}
8112 
8113 		raid_device->id = ioc->sas_id++;
8114 		raid_device->channel = RAID_CHANNEL;
8115 		raid_device->handle = handle;
8116 		raid_device->wwid = wwid;
8117 		_scsih_raid_device_add(ioc, raid_device);
8118 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8119 		    raid_device->id, 0);
8120 		if (rc)
8121 			_scsih_raid_device_remove(ioc, raid_device);
8122 		break;
8123 
8124 	case MPI2_RAID_VOL_STATE_INITIALIZING:
8125 	default:
8126 		break;
8127 	}
8128 }
8129 
8130 /**
8131  * _scsih_sas_ir_physical_disk_event - PD event
8132  * @ioc: per adapter object
8133  * @fw_event: The fw_event_work object
8134  * Context: user.
8135  */
8136 static void
8137 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8138 	struct fw_event_work *fw_event)
8139 {
8140 	u16 handle, parent_handle;
8141 	u32 state;
8142 	struct _sas_device *sas_device;
8143 	Mpi2ConfigReply_t mpi_reply;
8144 	Mpi2SasDevicePage0_t sas_device_pg0;
8145 	u32 ioc_status;
8146 	Mpi2EventDataIrPhysicalDisk_t *event_data =
8147 		(Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
8148 	u64 sas_address;
8149 
8150 	if (ioc->shost_recovery)
8151 		return;
8152 
8153 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
8154 		return;
8155 
8156 	handle = le16_to_cpu(event_data->PhysDiskDevHandle);
8157 	state = le32_to_cpu(event_data->NewValue);
8158 
8159 	if (!ioc->hide_ir_msg)
8160 		dewtprintk(ioc,
8161 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8162 				    __func__, handle,
8163 				    le32_to_cpu(event_data->PreviousValue),
8164 				    state));
8165 
8166 	switch (state) {
8167 	case MPI2_RAID_PD_STATE_ONLINE:
8168 	case MPI2_RAID_PD_STATE_DEGRADED:
8169 	case MPI2_RAID_PD_STATE_REBUILDING:
8170 	case MPI2_RAID_PD_STATE_OPTIMAL:
8171 	case MPI2_RAID_PD_STATE_HOT_SPARE:
8172 
8173 		if (!ioc->is_warpdrive)
8174 			set_bit(handle, ioc->pd_handles);
8175 
8176 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8177 		if (sas_device) {
8178 			sas_device_put(sas_device);
8179 			return;
8180 		}
8181 
8182 		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8183 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8184 		    handle))) {
8185 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8186 				__FILE__, __LINE__, __func__);
8187 			return;
8188 		}
8189 
8190 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8191 		    MPI2_IOCSTATUS_MASK;
8192 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8193 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8194 				__FILE__, __LINE__, __func__);
8195 			return;
8196 		}
8197 
8198 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8199 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8200 			mpt3sas_transport_update_links(ioc, sas_address, handle,
8201 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8202 
8203 		_scsih_add_device(ioc, handle, 0, 1);
8204 
8205 		break;
8206 
8207 	case MPI2_RAID_PD_STATE_OFFLINE:
8208 	case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
8209 	case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
8210 	default:
8211 		break;
8212 	}
8213 }
8214 
8215 /**
8216  * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
8217  * @ioc: per adapter object
8218  * @event_data: event data payload
8219  * Context: user.
8220  */
8221 static void
8222 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
8223 	Mpi2EventDataIrOperationStatus_t *event_data)
8224 {
8225 	char *reason_str = NULL;
8226 
8227 	switch (event_data->RAIDOperation) {
8228 	case MPI2_EVENT_IR_RAIDOP_RESYNC:
8229 		reason_str = "resync";
8230 		break;
8231 	case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8232 		reason_str = "online capacity expansion";
8233 		break;
8234 	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8235 		reason_str = "consistency check";
8236 		break;
8237 	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
8238 		reason_str = "background init";
8239 		break;
8240 	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
8241 		reason_str = "make data consistent";
8242 		break;
8243 	}
8244 
8245 	if (!reason_str)
8246 		return;
8247 
8248 	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
8249 		 reason_str,
8250 		 le16_to_cpu(event_data->VolDevHandle),
8251 		 event_data->PercentComplete);
8252 }
8253 
8254 /**
8255  * _scsih_sas_ir_operation_status_event - handle RAID operation events
8256  * @ioc: per adapter object
8257  * @fw_event: The fw_event_work object
8258  * Context: user.
8259  */
8260 static void
8261 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
8262 	struct fw_event_work *fw_event)
8263 {
8264 	Mpi2EventDataIrOperationStatus_t *event_data =
8265 		(Mpi2EventDataIrOperationStatus_t *)
8266 		fw_event->event_data;
8267 	static struct _raid_device *raid_device;
8268 	unsigned long flags;
8269 	u16 handle;
8270 
8271 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8272 	    (!ioc->hide_ir_msg))
8273 		_scsih_sas_ir_operation_status_event_debug(ioc,
8274 		     event_data);
8275 
8276 	/* code added for raid transport support */
8277 	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
8278 
8279 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8280 		handle = le16_to_cpu(event_data->VolDevHandle);
8281 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8282 		if (raid_device)
8283 			raid_device->percent_complete =
8284 			    event_data->PercentComplete;
8285 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8286 	}
8287 }
8288 
8289 /**
8290  * _scsih_prep_device_scan - initialize parameters prior to device scan
8291  * @ioc: per adapter object
8292  *
8293  * Set the deleted flag prior to device scan.  If the device is found during
8294  * the scan, then we clear the deleted flag.
8295  */
8296 static void
8297 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
8298 {
8299 	struct MPT3SAS_DEVICE *sas_device_priv_data;
8300 	struct scsi_device *sdev;
8301 
8302 	shost_for_each_device(sdev, ioc->shost) {
8303 		sas_device_priv_data = sdev->hostdata;
8304 		if (sas_device_priv_data && sas_device_priv_data->sas_target)
8305 			sas_device_priv_data->sas_target->deleted = 1;
8306 	}
8307 }
8308 
8309 /**
8310  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
8311  * @ioc: per adapter object
8312  * @sas_device_pg0: SAS Device page 0
8313  *
8314  * After host reset, find out whether devices are still responding.
8315  * Used in _scsih_remove_unresponsive_sas_devices.
8316  */
8317 static void
8318 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
8319 Mpi2SasDevicePage0_t *sas_device_pg0)
8320 {
8321 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8322 	struct scsi_target *starget;
8323 	struct _sas_device *sas_device = NULL;
8324 	struct _enclosure_node *enclosure_dev = NULL;
8325 	unsigned long flags;
8326 
8327 	if (sas_device_pg0->EnclosureHandle) {
8328 		enclosure_dev =
8329 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8330 				le16_to_cpu(sas_device_pg0->EnclosureHandle));
8331 		if (enclosure_dev == NULL)
8332 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
8333 				 sas_device_pg0->EnclosureHandle);
8334 	}
8335 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8336 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
8337 		if ((sas_device->sas_address == le64_to_cpu(
8338 		    sas_device_pg0->SASAddress)) && (sas_device->slot ==
8339 		    le16_to_cpu(sas_device_pg0->Slot))) {
8340 			sas_device->responding = 1;
8341 			starget = sas_device->starget;
8342 			if (starget && starget->hostdata) {
8343 				sas_target_priv_data = starget->hostdata;
8344 				sas_target_priv_data->tm_busy = 0;
8345 				sas_target_priv_data->deleted = 0;
8346 			} else
8347 				sas_target_priv_data = NULL;
8348 			if (starget) {
8349 				starget_printk(KERN_INFO, starget,
8350 				    "handle(0x%04x), sas_addr(0x%016llx)\n",
8351 				    le16_to_cpu(sas_device_pg0->DevHandle),
8352 				    (unsigned long long)
8353 				    sas_device->sas_address);
8354 
8355 				if (sas_device->enclosure_handle != 0)
8356 					starget_printk(KERN_INFO, starget,
8357 					 "enclosure logical id(0x%016llx),"
8358 					 " slot(%d)\n",
8359 					 (unsigned long long)
8360 					 sas_device->enclosure_logical_id,
8361 					 sas_device->slot);
8362 			}
8363 			if (le16_to_cpu(sas_device_pg0->Flags) &
8364 			      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8365 				sas_device->enclosure_level =
8366 				   sas_device_pg0->EnclosureLevel;
8367 				memcpy(&sas_device->connector_name[0],
8368 					&sas_device_pg0->ConnectorName[0], 4);
8369 			} else {
8370 				sas_device->enclosure_level = 0;
8371 				sas_device->connector_name[0] = '\0';
8372 			}
8373 
8374 			sas_device->enclosure_handle =
8375 				le16_to_cpu(sas_device_pg0->EnclosureHandle);
8376 			sas_device->is_chassis_slot_valid = 0;
8377 			if (enclosure_dev) {
8378 				sas_device->enclosure_logical_id = le64_to_cpu(
8379 					enclosure_dev->pg0.EnclosureLogicalID);
8380 				if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8381 				    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8382 					sas_device->is_chassis_slot_valid = 1;
8383 					sas_device->chassis_slot =
8384 						enclosure_dev->pg0.ChassisSlot;
8385 				}
8386 			}
8387 
8388 			if (sas_device->handle == le16_to_cpu(
8389 			    sas_device_pg0->DevHandle))
8390 				goto out;
8391 			pr_info("\thandle changed from(0x%04x)!!!\n",
8392 			    sas_device->handle);
8393 			sas_device->handle = le16_to_cpu(
8394 			    sas_device_pg0->DevHandle);
8395 			if (sas_target_priv_data)
8396 				sas_target_priv_data->handle =
8397 				    le16_to_cpu(sas_device_pg0->DevHandle);
8398 			goto out;
8399 		}
8400 	}
8401  out:
8402 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8403 }
8404 
8405 /**
8406  * _scsih_create_enclosure_list_after_reset - Free Existing list,
8407  *	And create enclosure list by scanning all Enclosure Page(0)s
8408  * @ioc: per adapter object
8409  */
8410 static void
8411 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8412 {
8413 	struct _enclosure_node *enclosure_dev;
8414 	Mpi2ConfigReply_t mpi_reply;
8415 	u16 enclosure_handle;
8416 	int rc;
8417 
8418 	/* Free existing enclosure list */
8419 	mpt3sas_free_enclosure_list(ioc);
8420 
8421 	/* Re constructing enclosure list after reset*/
8422 	enclosure_handle = 0xFFFF;
8423 	do {
8424 		enclosure_dev =
8425 			kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8426 		if (!enclosure_dev) {
8427 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8428 				__FILE__, __LINE__, __func__);
8429 			return;
8430 		}
8431 		rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8432 				&enclosure_dev->pg0,
8433 				MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
8434 				enclosure_handle);
8435 
8436 		if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8437 						MPI2_IOCSTATUS_MASK)) {
8438 			kfree(enclosure_dev);
8439 			return;
8440 		}
8441 		list_add_tail(&enclosure_dev->list,
8442 						&ioc->enclosure_list);
8443 		enclosure_handle =
8444 			le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
8445 	} while (1);
8446 }
8447 
8448 /**
8449  * _scsih_search_responding_sas_devices -
8450  * @ioc: per adapter object
8451  *
8452  * After host reset, find out whether devices are still responding.
8453  * If not remove.
8454  */
8455 static void
8456 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8457 {
8458 	Mpi2SasDevicePage0_t sas_device_pg0;
8459 	Mpi2ConfigReply_t mpi_reply;
8460 	u16 ioc_status;
8461 	u16 handle;
8462 	u32 device_info;
8463 
8464 	ioc_info(ioc, "search for end-devices: start\n");
8465 
8466 	if (list_empty(&ioc->sas_device_list))
8467 		goto out;
8468 
8469 	handle = 0xFFFF;
8470 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8471 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8472 	    handle))) {
8473 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8474 		    MPI2_IOCSTATUS_MASK;
8475 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8476 			break;
8477 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
8478 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8479 		if (!(_scsih_is_end_device(device_info)))
8480 			continue;
8481 		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
8482 	}
8483 
8484  out:
8485 	ioc_info(ioc, "search for end-devices: complete\n");
8486 }
8487 
8488 /**
8489  * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
8490  * @ioc: per adapter object
8491  * @pcie_device_pg0: PCIe Device page 0
8492  *
8493  * After host reset, find out whether devices are still responding.
8494  * Used in _scsih_remove_unresponding_devices.
8495  */
8496 static void
8497 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8498 	Mpi26PCIeDevicePage0_t *pcie_device_pg0)
8499 {
8500 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8501 	struct scsi_target *starget;
8502 	struct _pcie_device *pcie_device;
8503 	unsigned long flags;
8504 
8505 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8506 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8507 		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8508 		    && (pcie_device->slot == le16_to_cpu(
8509 		    pcie_device_pg0->Slot))) {
8510 			pcie_device->responding = 1;
8511 			starget = pcie_device->starget;
8512 			if (starget && starget->hostdata) {
8513 				sas_target_priv_data = starget->hostdata;
8514 				sas_target_priv_data->tm_busy = 0;
8515 				sas_target_priv_data->deleted = 0;
8516 			} else
8517 				sas_target_priv_data = NULL;
8518 			if (starget) {
8519 				starget_printk(KERN_INFO, starget,
8520 				    "handle(0x%04x), wwid(0x%016llx) ",
8521 				    pcie_device->handle,
8522 				    (unsigned long long)pcie_device->wwid);
8523 				if (pcie_device->enclosure_handle != 0)
8524 					starget_printk(KERN_INFO, starget,
8525 					    "enclosure logical id(0x%016llx), "
8526 					    "slot(%d)\n",
8527 					    (unsigned long long)
8528 					    pcie_device->enclosure_logical_id,
8529 					    pcie_device->slot);
8530 			}
8531 
8532 			if (((le32_to_cpu(pcie_device_pg0->Flags)) &
8533 			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
8534 			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8535 				pcie_device->enclosure_level =
8536 				    pcie_device_pg0->EnclosureLevel;
8537 				memcpy(&pcie_device->connector_name[0],
8538 				    &pcie_device_pg0->ConnectorName[0], 4);
8539 			} else {
8540 				pcie_device->enclosure_level = 0;
8541 				pcie_device->connector_name[0] = '\0';
8542 			}
8543 
8544 			if (pcie_device->handle == le16_to_cpu(
8545 			    pcie_device_pg0->DevHandle))
8546 				goto out;
8547 			pr_info("\thandle changed from(0x%04x)!!!\n",
8548 			    pcie_device->handle);
8549 			pcie_device->handle = le16_to_cpu(
8550 			    pcie_device_pg0->DevHandle);
8551 			if (sas_target_priv_data)
8552 				sas_target_priv_data->handle =
8553 				    le16_to_cpu(pcie_device_pg0->DevHandle);
8554 			goto out;
8555 		}
8556 	}
8557 
8558  out:
8559 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8560 }
8561 
8562 /**
8563  * _scsih_search_responding_pcie_devices -
8564  * @ioc: per adapter object
8565  *
8566  * After host reset, find out whether devices are still responding.
8567  * If not remove.
8568  */
8569 static void
8570 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8571 {
8572 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8573 	Mpi2ConfigReply_t mpi_reply;
8574 	u16 ioc_status;
8575 	u16 handle;
8576 	u32 device_info;
8577 
8578 	ioc_info(ioc, "search for end-devices: start\n");
8579 
8580 	if (list_empty(&ioc->pcie_device_list))
8581 		goto out;
8582 
8583 	handle = 0xFFFF;
8584 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8585 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8586 		handle))) {
8587 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8588 		    MPI2_IOCSTATUS_MASK;
8589 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8590 			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
8591 				 __func__, ioc_status,
8592 				 le32_to_cpu(mpi_reply.IOCLogInfo));
8593 			break;
8594 		}
8595 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8596 		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8597 		if (!(_scsih_is_nvme_device(device_info)))
8598 			continue;
8599 		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8600 	}
8601 out:
8602 	ioc_info(ioc, "search for PCIe end-devices: complete\n");
8603 }
8604 
8605 /**
8606  * _scsih_mark_responding_raid_device - mark a raid_device as responding
8607  * @ioc: per adapter object
8608  * @wwid: world wide identifier for raid volume
8609  * @handle: device handle
8610  *
8611  * After host reset, find out whether devices are still responding.
8612  * Used in _scsih_remove_unresponsive_raid_devices.
8613  */
8614 static void
8615 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
8616 	u16 handle)
8617 {
8618 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8619 	struct scsi_target *starget;
8620 	struct _raid_device *raid_device;
8621 	unsigned long flags;
8622 
8623 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
8624 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
8625 		if (raid_device->wwid == wwid && raid_device->starget) {
8626 			starget = raid_device->starget;
8627 			if (starget && starget->hostdata) {
8628 				sas_target_priv_data = starget->hostdata;
8629 				sas_target_priv_data->deleted = 0;
8630 			} else
8631 				sas_target_priv_data = NULL;
8632 			raid_device->responding = 1;
8633 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8634 			starget_printk(KERN_INFO, raid_device->starget,
8635 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
8636 			    (unsigned long long)raid_device->wwid);
8637 
8638 			/*
8639 			 * WARPDRIVE: The handles of the PDs might have changed
8640 			 * across the host reset so re-initialize the
8641 			 * required data for Direct IO
8642 			 */
8643 			mpt3sas_init_warpdrive_properties(ioc, raid_device);
8644 			spin_lock_irqsave(&ioc->raid_device_lock, flags);
8645 			if (raid_device->handle == handle) {
8646 				spin_unlock_irqrestore(&ioc->raid_device_lock,
8647 				    flags);
8648 				return;
8649 			}
8650 			pr_info("\thandle changed from(0x%04x)!!!\n",
8651 			    raid_device->handle);
8652 			raid_device->handle = handle;
8653 			if (sas_target_priv_data)
8654 				sas_target_priv_data->handle = handle;
8655 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8656 			return;
8657 		}
8658 	}
8659 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8660 }
8661 
8662 /**
8663  * _scsih_search_responding_raid_devices -
8664  * @ioc: per adapter object
8665  *
8666  * After host reset, find out whether devices are still responding.
8667  * If not remove.
8668  */
8669 static void
8670 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
8671 {
8672 	Mpi2RaidVolPage1_t volume_pg1;
8673 	Mpi2RaidVolPage0_t volume_pg0;
8674 	Mpi2RaidPhysDiskPage0_t pd_pg0;
8675 	Mpi2ConfigReply_t mpi_reply;
8676 	u16 ioc_status;
8677 	u16 handle;
8678 	u8 phys_disk_num;
8679 
8680 	if (!ioc->ir_firmware)
8681 		return;
8682 
8683 	ioc_info(ioc, "search for raid volumes: start\n");
8684 
8685 	if (list_empty(&ioc->raid_device_list))
8686 		goto out;
8687 
8688 	handle = 0xFFFF;
8689 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
8690 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
8691 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8692 		    MPI2_IOCSTATUS_MASK;
8693 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8694 			break;
8695 		handle = le16_to_cpu(volume_pg1.DevHandle);
8696 
8697 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
8698 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
8699 		     sizeof(Mpi2RaidVolPage0_t)))
8700 			continue;
8701 
8702 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
8703 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
8704 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
8705 			_scsih_mark_responding_raid_device(ioc,
8706 			    le64_to_cpu(volume_pg1.WWID), handle);
8707 	}
8708 
8709 	/* refresh the pd_handles */
8710 	if (!ioc->is_warpdrive) {
8711 		phys_disk_num = 0xFF;
8712 		memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
8713 		while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
8714 		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
8715 		    phys_disk_num))) {
8716 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8717 			    MPI2_IOCSTATUS_MASK;
8718 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8719 				break;
8720 			phys_disk_num = pd_pg0.PhysDiskNum;
8721 			handle = le16_to_cpu(pd_pg0.DevHandle);
8722 			set_bit(handle, ioc->pd_handles);
8723 		}
8724 	}
8725  out:
8726 	ioc_info(ioc, "search for responding raid volumes: complete\n");
8727 }
8728 
8729 /**
8730  * _scsih_mark_responding_expander - mark a expander as responding
8731  * @ioc: per adapter object
8732  * @expander_pg0:SAS Expander Config Page0
8733  *
8734  * After host reset, find out whether devices are still responding.
8735  * Used in _scsih_remove_unresponsive_expanders.
8736  */
8737 static void
8738 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
8739 	Mpi2ExpanderPage0_t *expander_pg0)
8740 {
8741 	struct _sas_node *sas_expander = NULL;
8742 	unsigned long flags;
8743 	int i;
8744 	struct _enclosure_node *enclosure_dev = NULL;
8745 	u16 handle = le16_to_cpu(expander_pg0->DevHandle);
8746 	u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
8747 	u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
8748 
8749 	if (enclosure_handle)
8750 		enclosure_dev =
8751 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8752 							enclosure_handle);
8753 
8754 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
8755 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
8756 		if (sas_expander->sas_address != sas_address)
8757 			continue;
8758 		sas_expander->responding = 1;
8759 
8760 		if (enclosure_dev) {
8761 			sas_expander->enclosure_logical_id =
8762 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8763 			sas_expander->enclosure_handle =
8764 			    le16_to_cpu(expander_pg0->EnclosureHandle);
8765 		}
8766 
8767 		if (sas_expander->handle == handle)
8768 			goto out;
8769 		pr_info("\texpander(0x%016llx): handle changed" \
8770 		    " from(0x%04x) to (0x%04x)!!!\n",
8771 		    (unsigned long long)sas_expander->sas_address,
8772 		    sas_expander->handle, handle);
8773 		sas_expander->handle = handle;
8774 		for (i = 0 ; i < sas_expander->num_phys ; i++)
8775 			sas_expander->phy[i].handle = handle;
8776 		goto out;
8777 	}
8778  out:
8779 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8780 }
8781 
8782 /**
8783  * _scsih_search_responding_expanders -
8784  * @ioc: per adapter object
8785  *
8786  * After host reset, find out whether devices are still responding.
8787  * If not remove.
8788  */
8789 static void
8790 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
8791 {
8792 	Mpi2ExpanderPage0_t expander_pg0;
8793 	Mpi2ConfigReply_t mpi_reply;
8794 	u16 ioc_status;
8795 	u64 sas_address;
8796 	u16 handle;
8797 
8798 	ioc_info(ioc, "search for expanders: start\n");
8799 
8800 	if (list_empty(&ioc->sas_expander_list))
8801 		goto out;
8802 
8803 	handle = 0xFFFF;
8804 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
8805 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
8806 
8807 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8808 		    MPI2_IOCSTATUS_MASK;
8809 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8810 			break;
8811 
8812 		handle = le16_to_cpu(expander_pg0.DevHandle);
8813 		sas_address = le64_to_cpu(expander_pg0.SASAddress);
8814 		pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
8815 			handle,
8816 		    (unsigned long long)sas_address);
8817 		_scsih_mark_responding_expander(ioc, &expander_pg0);
8818 	}
8819 
8820  out:
8821 	ioc_info(ioc, "search for expanders: complete\n");
8822 }
8823 
8824 /**
8825  * _scsih_remove_unresponding_devices - removing unresponding devices
8826  * @ioc: per adapter object
8827  */
8828 static void
8829 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8830 {
8831 	struct _sas_device *sas_device, *sas_device_next;
8832 	struct _sas_node *sas_expander, *sas_expander_next;
8833 	struct _raid_device *raid_device, *raid_device_next;
8834 	struct _pcie_device *pcie_device, *pcie_device_next;
8835 	struct list_head tmp_list;
8836 	unsigned long flags;
8837 	LIST_HEAD(head);
8838 
8839 	ioc_info(ioc, "removing unresponding devices: start\n");
8840 
8841 	/* removing unresponding end devices */
8842 	ioc_info(ioc, "removing unresponding devices: end-devices\n");
8843 	/*
8844 	 * Iterate, pulling off devices marked as non-responding. We become the
8845 	 * owner for the reference the list had on any object we prune.
8846 	 */
8847 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8848 	list_for_each_entry_safe(sas_device, sas_device_next,
8849 	    &ioc->sas_device_list, list) {
8850 		if (!sas_device->responding)
8851 			list_move_tail(&sas_device->list, &head);
8852 		else
8853 			sas_device->responding = 0;
8854 	}
8855 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8856 
8857 	/*
8858 	 * Now, uninitialize and remove the unresponding devices we pruned.
8859 	 */
8860 	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
8861 		_scsih_remove_device(ioc, sas_device);
8862 		list_del_init(&sas_device->list);
8863 		sas_device_put(sas_device);
8864 	}
8865 
8866 	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
8867 	INIT_LIST_HEAD(&head);
8868 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8869 	list_for_each_entry_safe(pcie_device, pcie_device_next,
8870 	    &ioc->pcie_device_list, list) {
8871 		if (!pcie_device->responding)
8872 			list_move_tail(&pcie_device->list, &head);
8873 		else
8874 			pcie_device->responding = 0;
8875 	}
8876 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8877 
8878 	list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
8879 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
8880 		list_del_init(&pcie_device->list);
8881 		pcie_device_put(pcie_device);
8882 	}
8883 
8884 	/* removing unresponding volumes */
8885 	if (ioc->ir_firmware) {
8886 		ioc_info(ioc, "removing unresponding devices: volumes\n");
8887 		list_for_each_entry_safe(raid_device, raid_device_next,
8888 		    &ioc->raid_device_list, list) {
8889 			if (!raid_device->responding)
8890 				_scsih_sas_volume_delete(ioc,
8891 				    raid_device->handle);
8892 			else
8893 				raid_device->responding = 0;
8894 		}
8895 	}
8896 
8897 	/* removing unresponding expanders */
8898 	ioc_info(ioc, "removing unresponding devices: expanders\n");
8899 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
8900 	INIT_LIST_HEAD(&tmp_list);
8901 	list_for_each_entry_safe(sas_expander, sas_expander_next,
8902 	    &ioc->sas_expander_list, list) {
8903 		if (!sas_expander->responding)
8904 			list_move_tail(&sas_expander->list, &tmp_list);
8905 		else
8906 			sas_expander->responding = 0;
8907 	}
8908 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8909 	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
8910 	    list) {
8911 		_scsih_expander_node_remove(ioc, sas_expander);
8912 	}
8913 
8914 	ioc_info(ioc, "removing unresponding devices: complete\n");
8915 
8916 	/* unblock devices */
8917 	_scsih_ublock_io_all_device(ioc);
8918 }
8919 
8920 static void
8921 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
8922 	struct _sas_node *sas_expander, u16 handle)
8923 {
8924 	Mpi2ExpanderPage1_t expander_pg1;
8925 	Mpi2ConfigReply_t mpi_reply;
8926 	int i;
8927 
8928 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
8929 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
8930 		    &expander_pg1, i, handle))) {
8931 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8932 				__FILE__, __LINE__, __func__);
8933 			return;
8934 		}
8935 
8936 		mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
8937 		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
8938 		    expander_pg1.NegotiatedLinkRate >> 4);
8939 	}
8940 }
8941 
8942 /**
8943  * _scsih_scan_for_devices_after_reset - scan for devices after host reset
8944  * @ioc: per adapter object
8945  */
8946 static void
8947 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
8948 {
8949 	Mpi2ExpanderPage0_t expander_pg0;
8950 	Mpi2SasDevicePage0_t sas_device_pg0;
8951 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8952 	Mpi2RaidVolPage1_t volume_pg1;
8953 	Mpi2RaidVolPage0_t volume_pg0;
8954 	Mpi2RaidPhysDiskPage0_t pd_pg0;
8955 	Mpi2EventIrConfigElement_t element;
8956 	Mpi2ConfigReply_t mpi_reply;
8957 	u8 phys_disk_num;
8958 	u16 ioc_status;
8959 	u16 handle, parent_handle;
8960 	u64 sas_address;
8961 	struct _sas_device *sas_device;
8962 	struct _pcie_device *pcie_device;
8963 	struct _sas_node *expander_device;
8964 	static struct _raid_device *raid_device;
8965 	u8 retry_count;
8966 	unsigned long flags;
8967 
8968 	ioc_info(ioc, "scan devices: start\n");
8969 
8970 	_scsih_sas_host_refresh(ioc);
8971 
8972 	ioc_info(ioc, "\tscan devices: expanders start\n");
8973 
8974 	/* expanders */
8975 	handle = 0xFFFF;
8976 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
8977 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
8978 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8979 		    MPI2_IOCSTATUS_MASK;
8980 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8981 			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
8982 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
8983 			break;
8984 		}
8985 		handle = le16_to_cpu(expander_pg0.DevHandle);
8986 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
8987 		expander_device = mpt3sas_scsih_expander_find_by_sas_address(
8988 		    ioc, le64_to_cpu(expander_pg0.SASAddress));
8989 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8990 		if (expander_device)
8991 			_scsih_refresh_expander_links(ioc, expander_device,
8992 			    handle);
8993 		else {
8994 			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
8995 				 handle,
8996 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
8997 			_scsih_expander_add(ioc, handle);
8998 			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
8999 				 handle,
9000 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
9001 		}
9002 	}
9003 
9004 	ioc_info(ioc, "\tscan devices: expanders complete\n");
9005 
9006 	if (!ioc->ir_firmware)
9007 		goto skip_to_sas;
9008 
9009 	ioc_info(ioc, "\tscan devices: phys disk start\n");
9010 
9011 	/* phys disk */
9012 	phys_disk_num = 0xFF;
9013 	while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9014 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9015 	    phys_disk_num))) {
9016 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9017 		    MPI2_IOCSTATUS_MASK;
9018 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9019 			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9020 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9021 			break;
9022 		}
9023 		phys_disk_num = pd_pg0.PhysDiskNum;
9024 		handle = le16_to_cpu(pd_pg0.DevHandle);
9025 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9026 		if (sas_device) {
9027 			sas_device_put(sas_device);
9028 			continue;
9029 		}
9030 		if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9031 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9032 		    handle) != 0)
9033 			continue;
9034 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9035 		    MPI2_IOCSTATUS_MASK;
9036 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9037 			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
9038 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9039 			break;
9040 		}
9041 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9042 		if (!_scsih_get_sas_address(ioc, parent_handle,
9043 		    &sas_address)) {
9044 			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9045 				 handle,
9046 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9047 			mpt3sas_transport_update_links(ioc, sas_address,
9048 			    handle, sas_device_pg0.PhyNum,
9049 			    MPI2_SAS_NEG_LINK_RATE_1_5);
9050 			set_bit(handle, ioc->pd_handles);
9051 			retry_count = 0;
9052 			/* This will retry adding the end device.
9053 			 * _scsih_add_device() will decide on retries and
9054 			 * return "1" when it should be retried
9055 			 */
9056 			while (_scsih_add_device(ioc, handle, retry_count++,
9057 			    1)) {
9058 				ssleep(1);
9059 			}
9060 			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9061 				 handle,
9062 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9063 		}
9064 	}
9065 
9066 	ioc_info(ioc, "\tscan devices: phys disk complete\n");
9067 
9068 	ioc_info(ioc, "\tscan devices: volumes start\n");
9069 
9070 	/* volumes */
9071 	handle = 0xFFFF;
9072 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9073 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9074 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9075 		    MPI2_IOCSTATUS_MASK;
9076 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9077 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9078 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9079 			break;
9080 		}
9081 		handle = le16_to_cpu(volume_pg1.DevHandle);
9082 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9083 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
9084 		    le64_to_cpu(volume_pg1.WWID));
9085 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9086 		if (raid_device)
9087 			continue;
9088 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9089 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9090 		     sizeof(Mpi2RaidVolPage0_t)))
9091 			continue;
9092 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9093 		    MPI2_IOCSTATUS_MASK;
9094 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9095 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9096 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9097 			break;
9098 		}
9099 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9100 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9101 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
9102 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
9103 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
9104 			element.VolDevHandle = volume_pg1.DevHandle;
9105 			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
9106 				 volume_pg1.DevHandle);
9107 			_scsih_sas_volume_add(ioc, &element);
9108 			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
9109 				 volume_pg1.DevHandle);
9110 		}
9111 	}
9112 
9113 	ioc_info(ioc, "\tscan devices: volumes complete\n");
9114 
9115  skip_to_sas:
9116 
9117 	ioc_info(ioc, "\tscan devices: end devices start\n");
9118 
9119 	/* sas devices */
9120 	handle = 0xFFFF;
9121 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9122 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9123 	    handle))) {
9124 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9125 		    MPI2_IOCSTATUS_MASK;
9126 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9127 			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9128 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9129 			break;
9130 		}
9131 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
9132 		if (!(_scsih_is_end_device(
9133 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
9134 			continue;
9135 		sas_device = mpt3sas_get_sdev_by_addr(ioc,
9136 		    le64_to_cpu(sas_device_pg0.SASAddress));
9137 		if (sas_device) {
9138 			sas_device_put(sas_device);
9139 			continue;
9140 		}
9141 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9142 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
9143 			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9144 				 handle,
9145 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9146 			mpt3sas_transport_update_links(ioc, sas_address, handle,
9147 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
9148 			retry_count = 0;
9149 			/* This will retry adding the end device.
9150 			 * _scsih_add_device() will decide on retries and
9151 			 * return "1" when it should be retried
9152 			 */
9153 			while (_scsih_add_device(ioc, handle, retry_count++,
9154 			    0)) {
9155 				ssleep(1);
9156 			}
9157 			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9158 				 handle,
9159 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9160 		}
9161 	}
9162 	ioc_info(ioc, "\tscan devices: end devices complete\n");
9163 	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
9164 
9165 	/* pcie devices */
9166 	handle = 0xFFFF;
9167 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9168 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9169 		handle))) {
9170 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9171 				& MPI2_IOCSTATUS_MASK;
9172 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9173 			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9174 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9175 			break;
9176 		}
9177 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9178 		if (!(_scsih_is_nvme_device(
9179 			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9180 			continue;
9181 		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
9182 				le64_to_cpu(pcie_device_pg0.WWID));
9183 		if (pcie_device) {
9184 			pcie_device_put(pcie_device);
9185 			continue;
9186 		}
9187 		retry_count = 0;
9188 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9189 		_scsih_pcie_add_device(ioc, handle);
9190 
9191 		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
9192 			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
9193 	}
9194 	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
9195 	ioc_info(ioc, "scan devices: complete\n");
9196 }
9197 
9198 /**
9199  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9200  * @ioc: per adapter object
9201  *
9202  * The handler for doing any required cleanup or initialization.
9203  */
9204 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9205 {
9206 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
9207 }
9208 
9209 /**
9210  * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
9211  * @ioc: per adapter object
9212  *
9213  * The handler for doing any required cleanup or initialization.
9214  */
9215 void
9216 mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9217 {
9218 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
9219 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
9220 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
9221 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
9222 		complete(&ioc->scsih_cmds.done);
9223 	}
9224 	if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
9225 		ioc->tm_cmds.status |= MPT3_CMD_RESET;
9226 		mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
9227 		complete(&ioc->tm_cmds.done);
9228 	}
9229 
9230 	memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
9231 	memset(ioc->device_remove_in_progress, 0,
9232 	       ioc->device_remove_in_progress_sz);
9233 	_scsih_fw_event_cleanup_queue(ioc);
9234 	_scsih_flush_running_cmds(ioc);
9235 }
9236 
9237 /**
9238  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9239  * @ioc: per adapter object
9240  *
9241  * The handler for doing any required cleanup or initialization.
9242  */
9243 void
9244 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
9245 {
9246 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
9247 	if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9248 					   !ioc->sas_hba.num_phys)) {
9249 		_scsih_prep_device_scan(ioc);
9250 		_scsih_create_enclosure_list_after_reset(ioc);
9251 		_scsih_search_responding_sas_devices(ioc);
9252 		_scsih_search_responding_pcie_devices(ioc);
9253 		_scsih_search_responding_raid_devices(ioc);
9254 		_scsih_search_responding_expanders(ioc);
9255 		_scsih_error_recovery_delete_devices(ioc);
9256 	}
9257 }
9258 
9259 /**
9260  * _mpt3sas_fw_work - delayed task for processing firmware events
9261  * @ioc: per adapter object
9262  * @fw_event: The fw_event_work object
9263  * Context: user.
9264  */
9265 static void
9266 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9267 {
9268 	_scsih_fw_event_del_from_list(ioc, fw_event);
9269 
9270 	/* the queue is being flushed so ignore this event */
9271 	if (ioc->remove_host || ioc->pci_error_recovery) {
9272 		fw_event_work_put(fw_event);
9273 		return;
9274 	}
9275 
9276 	switch (fw_event->event) {
9277 	case MPT3SAS_PROCESS_TRIGGER_DIAG:
9278 		mpt3sas_process_trigger_data(ioc,
9279 			(struct SL_WH_TRIGGERS_EVENT_DATA_T *)
9280 			fw_event->event_data);
9281 		break;
9282 	case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
9283 		while (scsi_host_in_recovery(ioc->shost) ||
9284 					 ioc->shost_recovery) {
9285 			/*
9286 			 * If we're unloading, bail. Otherwise, this can become
9287 			 * an infinite loop.
9288 			 */
9289 			if (ioc->remove_host)
9290 				goto out;
9291 			ssleep(1);
9292 		}
9293 		_scsih_remove_unresponding_devices(ioc);
9294 		_scsih_scan_for_devices_after_reset(ioc);
9295 		break;
9296 	case MPT3SAS_PORT_ENABLE_COMPLETE:
9297 		ioc->start_scan = 0;
9298 		if (missing_delay[0] != -1 && missing_delay[1] != -1)
9299 			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
9300 			    missing_delay[1]);
9301 		dewtprintk(ioc,
9302 			   ioc_info(ioc, "port enable: complete from worker thread\n"));
9303 		break;
9304 	case MPT3SAS_TURN_ON_PFA_LED:
9305 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
9306 		break;
9307 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9308 		_scsih_sas_topology_change_event(ioc, fw_event);
9309 		break;
9310 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9311 		_scsih_sas_device_status_change_event(ioc, fw_event);
9312 		break;
9313 	case MPI2_EVENT_SAS_DISCOVERY:
9314 		_scsih_sas_discovery_event(ioc, fw_event);
9315 		break;
9316 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9317 		_scsih_sas_device_discovery_error_event(ioc, fw_event);
9318 		break;
9319 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9320 		_scsih_sas_broadcast_primitive_event(ioc, fw_event);
9321 		break;
9322 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9323 		_scsih_sas_enclosure_dev_status_change_event(ioc,
9324 		    fw_event);
9325 		break;
9326 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9327 		_scsih_sas_ir_config_change_event(ioc, fw_event);
9328 		break;
9329 	case MPI2_EVENT_IR_VOLUME:
9330 		_scsih_sas_ir_volume_event(ioc, fw_event);
9331 		break;
9332 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9333 		_scsih_sas_ir_physical_disk_event(ioc, fw_event);
9334 		break;
9335 	case MPI2_EVENT_IR_OPERATION_STATUS:
9336 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
9337 		break;
9338 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9339 		_scsih_pcie_device_status_change_event(ioc, fw_event);
9340 		break;
9341 	case MPI2_EVENT_PCIE_ENUMERATION:
9342 		_scsih_pcie_enumeration_event(ioc, fw_event);
9343 		break;
9344 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9345 		_scsih_pcie_topology_change_event(ioc, fw_event);
9346 			return;
9347 	break;
9348 	}
9349 out:
9350 	fw_event_work_put(fw_event);
9351 }
9352 
9353 /**
9354  * _firmware_event_work
9355  * @work: The fw_event_work object
9356  * Context: user.
9357  *
9358  * wrappers for the work thread handling firmware events
9359  */
9360 
9361 static void
9362 _firmware_event_work(struct work_struct *work)
9363 {
9364 	struct fw_event_work *fw_event = container_of(work,
9365 	    struct fw_event_work, work);
9366 
9367 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
9368 }
9369 
9370 /**
9371  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
9372  * @ioc: per adapter object
9373  * @msix_index: MSIX table index supplied by the OS
9374  * @reply: reply message frame(lower 32bit addr)
9375  * Context: interrupt.
9376  *
9377  * This function merely adds a new work task into ioc->firmware_event_thread.
9378  * The tasks are worked from _firmware_event_work in user context.
9379  *
9380  * Return: 1 meaning mf should be freed from _base_interrupt
9381  *         0 means the mf is freed from this function.
9382  */
9383 u8
9384 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9385 	u32 reply)
9386 {
9387 	struct fw_event_work *fw_event;
9388 	Mpi2EventNotificationReply_t *mpi_reply;
9389 	u16 event;
9390 	u16 sz;
9391 	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
9392 
9393 	/* events turned off due to host reset */
9394 	if (ioc->pci_error_recovery)
9395 		return 1;
9396 
9397 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
9398 
9399 	if (unlikely(!mpi_reply)) {
9400 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
9401 			__FILE__, __LINE__, __func__);
9402 		return 1;
9403 	}
9404 
9405 	event = le16_to_cpu(mpi_reply->Event);
9406 
9407 	if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
9408 		mpt3sas_trigger_event(ioc, event, 0);
9409 
9410 	switch (event) {
9411 	/* handle these */
9412 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9413 	{
9414 		Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
9415 		    (Mpi2EventDataSasBroadcastPrimitive_t *)
9416 		    mpi_reply->EventData;
9417 
9418 		if (baen_data->Primitive !=
9419 		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
9420 			return 1;
9421 
9422 		if (ioc->broadcast_aen_busy) {
9423 			ioc->broadcast_aen_pending++;
9424 			return 1;
9425 		} else
9426 			ioc->broadcast_aen_busy = 1;
9427 		break;
9428 	}
9429 
9430 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9431 		_scsih_check_topo_delete_events(ioc,
9432 		    (Mpi2EventDataSasTopologyChangeList_t *)
9433 		    mpi_reply->EventData);
9434 		break;
9435 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9436 	_scsih_check_pcie_topo_remove_events(ioc,
9437 		    (Mpi26EventDataPCIeTopologyChangeList_t *)
9438 		    mpi_reply->EventData);
9439 		break;
9440 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9441 		_scsih_check_ir_config_unhide_events(ioc,
9442 		    (Mpi2EventDataIrConfigChangeList_t *)
9443 		    mpi_reply->EventData);
9444 		break;
9445 	case MPI2_EVENT_IR_VOLUME:
9446 		_scsih_check_volume_delete_events(ioc,
9447 		    (Mpi2EventDataIrVolume_t *)
9448 		    mpi_reply->EventData);
9449 		break;
9450 	case MPI2_EVENT_LOG_ENTRY_ADDED:
9451 	{
9452 		Mpi2EventDataLogEntryAdded_t *log_entry;
9453 		u32 *log_code;
9454 
9455 		if (!ioc->is_warpdrive)
9456 			break;
9457 
9458 		log_entry = (Mpi2EventDataLogEntryAdded_t *)
9459 		    mpi_reply->EventData;
9460 		log_code = (u32 *)log_entry->LogData;
9461 
9462 		if (le16_to_cpu(log_entry->LogEntryQualifier)
9463 		    != MPT2_WARPDRIVE_LOGENTRY)
9464 			break;
9465 
9466 		switch (le32_to_cpu(*log_code)) {
9467 		case MPT2_WARPDRIVE_LC_SSDT:
9468 			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9469 			break;
9470 		case MPT2_WARPDRIVE_LC_SSDLW:
9471 			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
9472 			break;
9473 		case MPT2_WARPDRIVE_LC_SSDLF:
9474 			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
9475 			break;
9476 		case MPT2_WARPDRIVE_LC_BRMF:
9477 			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9478 			break;
9479 		}
9480 
9481 		break;
9482 	}
9483 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9484 	case MPI2_EVENT_IR_OPERATION_STATUS:
9485 	case MPI2_EVENT_SAS_DISCOVERY:
9486 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9487 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9488 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9489 	case MPI2_EVENT_PCIE_ENUMERATION:
9490 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9491 		break;
9492 
9493 	case MPI2_EVENT_TEMP_THRESHOLD:
9494 		_scsih_temp_threshold_events(ioc,
9495 			(Mpi2EventDataTemperature_t *)
9496 			mpi_reply->EventData);
9497 		break;
9498 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
9499 		ActiveCableEventData =
9500 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
9501 		switch (ActiveCableEventData->ReasonCode) {
9502 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
9503 			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
9504 				   ActiveCableEventData->ReceptacleID);
9505 			pr_notice("cannot be powered and devices connected\n");
9506 			pr_notice("to this active cable will not be seen\n");
9507 			pr_notice("This active cable requires %d mW of power\n",
9508 			     ActiveCableEventData->ActiveCablePowerRequirement);
9509 			break;
9510 
9511 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
9512 			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
9513 				   ActiveCableEventData->ReceptacleID);
9514 			pr_notice(
9515 			    "is not running at optimal speed(12 Gb/s rate)\n");
9516 			break;
9517 		}
9518 
9519 		break;
9520 
9521 	default: /* ignore the rest */
9522 		return 1;
9523 	}
9524 
9525 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
9526 	fw_event = alloc_fw_event_work(sz);
9527 	if (!fw_event) {
9528 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9529 			__FILE__, __LINE__, __func__);
9530 		return 1;
9531 	}
9532 
9533 	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
9534 	fw_event->ioc = ioc;
9535 	fw_event->VF_ID = mpi_reply->VF_ID;
9536 	fw_event->VP_ID = mpi_reply->VP_ID;
9537 	fw_event->event = event;
9538 	_scsih_fw_event_add(ioc, fw_event);
9539 	fw_event_work_put(fw_event);
9540 	return 1;
9541 }
9542 
9543 /**
9544  * _scsih_expander_node_remove - removing expander device from list.
9545  * @ioc: per adapter object
9546  * @sas_expander: the sas_device object
9547  *
9548  * Removing object and freeing associated memory from the
9549  * ioc->sas_expander_list.
9550  */
9551 static void
9552 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
9553 	struct _sas_node *sas_expander)
9554 {
9555 	struct _sas_port *mpt3sas_port, *next;
9556 	unsigned long flags;
9557 
9558 	/* remove sibling ports attached to this expander */
9559 	list_for_each_entry_safe(mpt3sas_port, next,
9560 	   &sas_expander->sas_port_list, port_list) {
9561 		if (ioc->shost_recovery)
9562 			return;
9563 		if (mpt3sas_port->remote_identify.device_type ==
9564 		    SAS_END_DEVICE)
9565 			mpt3sas_device_remove_by_sas_address(ioc,
9566 			    mpt3sas_port->remote_identify.sas_address);
9567 		else if (mpt3sas_port->remote_identify.device_type ==
9568 		    SAS_EDGE_EXPANDER_DEVICE ||
9569 		    mpt3sas_port->remote_identify.device_type ==
9570 		    SAS_FANOUT_EXPANDER_DEVICE)
9571 			mpt3sas_expander_remove(ioc,
9572 			    mpt3sas_port->remote_identify.sas_address);
9573 	}
9574 
9575 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
9576 	    sas_expander->sas_address_parent);
9577 
9578 	ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
9579 		 sas_expander->handle, (unsigned long long)
9580 		 sas_expander->sas_address);
9581 
9582 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
9583 	list_del(&sas_expander->list);
9584 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9585 
9586 	kfree(sas_expander->phy);
9587 	kfree(sas_expander);
9588 }
9589 
9590 /**
9591  * _scsih_ir_shutdown - IR shutdown notification
9592  * @ioc: per adapter object
9593  *
9594  * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
9595  * the host system is shutting down.
9596  */
9597 static void
9598 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
9599 {
9600 	Mpi2RaidActionRequest_t *mpi_request;
9601 	Mpi2RaidActionReply_t *mpi_reply;
9602 	u16 smid;
9603 
9604 	/* is IR firmware build loaded ? */
9605 	if (!ioc->ir_firmware)
9606 		return;
9607 
9608 	/* are there any volumes ? */
9609 	if (list_empty(&ioc->raid_device_list))
9610 		return;
9611 
9612 	mutex_lock(&ioc->scsih_cmds.mutex);
9613 
9614 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
9615 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
9616 		goto out;
9617 	}
9618 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
9619 
9620 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
9621 	if (!smid) {
9622 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
9623 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9624 		goto out;
9625 	}
9626 
9627 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
9628 	ioc->scsih_cmds.smid = smid;
9629 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
9630 
9631 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
9632 	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
9633 
9634 	if (!ioc->hide_ir_msg)
9635 		ioc_info(ioc, "IR shutdown (sending)\n");
9636 	init_completion(&ioc->scsih_cmds.done);
9637 	ioc->put_smid_default(ioc, smid);
9638 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
9639 
9640 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
9641 		ioc_err(ioc, "%s: timeout\n", __func__);
9642 		goto out;
9643 	}
9644 
9645 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
9646 		mpi_reply = ioc->scsih_cmds.reply;
9647 		if (!ioc->hide_ir_msg)
9648 			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
9649 				 le16_to_cpu(mpi_reply->IOCStatus),
9650 				 le32_to_cpu(mpi_reply->IOCLogInfo));
9651 	}
9652 
9653  out:
9654 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9655 	mutex_unlock(&ioc->scsih_cmds.mutex);
9656 }
9657 
9658 /**
9659  * scsih_remove - detach and remove add host
9660  * @pdev: PCI device struct
9661  *
9662  * Routine called when unloading the driver.
9663  */
9664 static void scsih_remove(struct pci_dev *pdev)
9665 {
9666 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9667 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9668 	struct _sas_port *mpt3sas_port, *next_port;
9669 	struct _raid_device *raid_device, *next;
9670 	struct MPT3SAS_TARGET *sas_target_priv_data;
9671 	struct _pcie_device *pcie_device, *pcienext;
9672 	struct workqueue_struct	*wq;
9673 	unsigned long flags;
9674 	Mpi2ConfigReply_t mpi_reply;
9675 
9676 	ioc->remove_host = 1;
9677 
9678 	mpt3sas_wait_for_commands_to_complete(ioc);
9679 	_scsih_flush_running_cmds(ioc);
9680 
9681 	_scsih_fw_event_cleanup_queue(ioc);
9682 
9683 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
9684 	wq = ioc->firmware_event_thread;
9685 	ioc->firmware_event_thread = NULL;
9686 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9687 	if (wq)
9688 		destroy_workqueue(wq);
9689 	/*
9690 	 * Copy back the unmodified ioc page1. so that on next driver load,
9691 	 * current modified changes on ioc page1 won't take effect.
9692 	 */
9693 	if (ioc->is_aero_ioc)
9694 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
9695 				&ioc->ioc_pg1_copy);
9696 	/* release all the volumes */
9697 	_scsih_ir_shutdown(ioc);
9698 	sas_remove_host(shost);
9699 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
9700 	    list) {
9701 		if (raid_device->starget) {
9702 			sas_target_priv_data =
9703 			    raid_device->starget->hostdata;
9704 			sas_target_priv_data->deleted = 1;
9705 			scsi_remove_target(&raid_device->starget->dev);
9706 		}
9707 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9708 			 raid_device->handle, (u64)raid_device->wwid);
9709 		_scsih_raid_device_remove(ioc, raid_device);
9710 	}
9711 	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
9712 		list) {
9713 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9714 		list_del_init(&pcie_device->list);
9715 		pcie_device_put(pcie_device);
9716 	}
9717 
9718 	/* free ports attached to the sas_host */
9719 	list_for_each_entry_safe(mpt3sas_port, next_port,
9720 	   &ioc->sas_hba.sas_port_list, port_list) {
9721 		if (mpt3sas_port->remote_identify.device_type ==
9722 		    SAS_END_DEVICE)
9723 			mpt3sas_device_remove_by_sas_address(ioc,
9724 			    mpt3sas_port->remote_identify.sas_address);
9725 		else if (mpt3sas_port->remote_identify.device_type ==
9726 		    SAS_EDGE_EXPANDER_DEVICE ||
9727 		    mpt3sas_port->remote_identify.device_type ==
9728 		    SAS_FANOUT_EXPANDER_DEVICE)
9729 			mpt3sas_expander_remove(ioc,
9730 			    mpt3sas_port->remote_identify.sas_address);
9731 	}
9732 
9733 	/* free phys attached to the sas_host */
9734 	if (ioc->sas_hba.num_phys) {
9735 		kfree(ioc->sas_hba.phy);
9736 		ioc->sas_hba.phy = NULL;
9737 		ioc->sas_hba.num_phys = 0;
9738 	}
9739 
9740 	mpt3sas_base_detach(ioc);
9741 	spin_lock(&gioc_lock);
9742 	list_del(&ioc->list);
9743 	spin_unlock(&gioc_lock);
9744 	scsi_host_put(shost);
9745 }
9746 
9747 /**
9748  * scsih_shutdown - routine call during system shutdown
9749  * @pdev: PCI device struct
9750  */
9751 static void
9752 scsih_shutdown(struct pci_dev *pdev)
9753 {
9754 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9755 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9756 	struct workqueue_struct	*wq;
9757 	unsigned long flags;
9758 	Mpi2ConfigReply_t mpi_reply;
9759 
9760 	ioc->remove_host = 1;
9761 
9762 	mpt3sas_wait_for_commands_to_complete(ioc);
9763 	_scsih_flush_running_cmds(ioc);
9764 
9765 	_scsih_fw_event_cleanup_queue(ioc);
9766 
9767 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
9768 	wq = ioc->firmware_event_thread;
9769 	ioc->firmware_event_thread = NULL;
9770 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9771 	if (wq)
9772 		destroy_workqueue(wq);
9773 	/*
9774 	 * Copy back the unmodified ioc page1 so that on next driver load,
9775 	 * current modified changes on ioc page1 won't take effect.
9776 	 */
9777 	if (ioc->is_aero_ioc)
9778 		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
9779 				&ioc->ioc_pg1_copy);
9780 
9781 	_scsih_ir_shutdown(ioc);
9782 	mpt3sas_base_detach(ioc);
9783 }
9784 
9785 
9786 /**
9787  * _scsih_probe_boot_devices - reports 1st device
9788  * @ioc: per adapter object
9789  *
9790  * If specified in bios page 2, this routine reports the 1st
9791  * device scsi-ml or sas transport for persistent boot device
9792  * purposes.  Please refer to function _scsih_determine_boot_device()
9793  */
9794 static void
9795 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
9796 {
9797 	u32 channel;
9798 	void *device;
9799 	struct _sas_device *sas_device;
9800 	struct _raid_device *raid_device;
9801 	struct _pcie_device *pcie_device;
9802 	u16 handle;
9803 	u64 sas_address_parent;
9804 	u64 sas_address;
9805 	unsigned long flags;
9806 	int rc;
9807 	int tid;
9808 
9809 	 /* no Bios, return immediately */
9810 	if (!ioc->bios_pg3.BiosVersion)
9811 		return;
9812 
9813 	device = NULL;
9814 	if (ioc->req_boot_device.device) {
9815 		device =  ioc->req_boot_device.device;
9816 		channel = ioc->req_boot_device.channel;
9817 	} else if (ioc->req_alt_boot_device.device) {
9818 		device =  ioc->req_alt_boot_device.device;
9819 		channel = ioc->req_alt_boot_device.channel;
9820 	} else if (ioc->current_boot_device.device) {
9821 		device =  ioc->current_boot_device.device;
9822 		channel = ioc->current_boot_device.channel;
9823 	}
9824 
9825 	if (!device)
9826 		return;
9827 
9828 	if (channel == RAID_CHANNEL) {
9829 		raid_device = device;
9830 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9831 		    raid_device->id, 0);
9832 		if (rc)
9833 			_scsih_raid_device_remove(ioc, raid_device);
9834 	} else if (channel == PCIE_CHANNEL) {
9835 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9836 		pcie_device = device;
9837 		tid = pcie_device->id;
9838 		list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
9839 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9840 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
9841 		if (rc)
9842 			_scsih_pcie_device_remove(ioc, pcie_device);
9843 	} else {
9844 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
9845 		sas_device = device;
9846 		handle = sas_device->handle;
9847 		sas_address_parent = sas_device->sas_address_parent;
9848 		sas_address = sas_device->sas_address;
9849 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
9850 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9851 
9852 		if (ioc->hide_drives)
9853 			return;
9854 		if (!mpt3sas_transport_port_add(ioc, handle,
9855 		    sas_address_parent)) {
9856 			_scsih_sas_device_remove(ioc, sas_device);
9857 		} else if (!sas_device->starget) {
9858 			if (!ioc->is_driver_loading) {
9859 				mpt3sas_transport_port_remove(ioc,
9860 				    sas_address,
9861 				    sas_address_parent);
9862 				_scsih_sas_device_remove(ioc, sas_device);
9863 			}
9864 		}
9865 	}
9866 }
9867 
9868 /**
9869  * _scsih_probe_raid - reporting raid volumes to scsi-ml
9870  * @ioc: per adapter object
9871  *
9872  * Called during initial loading of the driver.
9873  */
9874 static void
9875 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
9876 {
9877 	struct _raid_device *raid_device, *raid_next;
9878 	int rc;
9879 
9880 	list_for_each_entry_safe(raid_device, raid_next,
9881 	    &ioc->raid_device_list, list) {
9882 		if (raid_device->starget)
9883 			continue;
9884 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9885 		    raid_device->id, 0);
9886 		if (rc)
9887 			_scsih_raid_device_remove(ioc, raid_device);
9888 	}
9889 }
9890 
9891 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
9892 {
9893 	struct _sas_device *sas_device = NULL;
9894 	unsigned long flags;
9895 
9896 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9897 	if (!list_empty(&ioc->sas_device_init_list)) {
9898 		sas_device = list_first_entry(&ioc->sas_device_init_list,
9899 				struct _sas_device, list);
9900 		sas_device_get(sas_device);
9901 	}
9902 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9903 
9904 	return sas_device;
9905 }
9906 
9907 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
9908 		struct _sas_device *sas_device)
9909 {
9910 	unsigned long flags;
9911 
9912 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9913 
9914 	/*
9915 	 * Since we dropped the lock during the call to port_add(), we need to
9916 	 * be careful here that somebody else didn't move or delete this item
9917 	 * while we were busy with other things.
9918 	 *
9919 	 * If it was on the list, we need a put() for the reference the list
9920 	 * had. Either way, we need a get() for the destination list.
9921 	 */
9922 	if (!list_empty(&sas_device->list)) {
9923 		list_del_init(&sas_device->list);
9924 		sas_device_put(sas_device);
9925 	}
9926 
9927 	sas_device_get(sas_device);
9928 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
9929 
9930 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9931 }
9932 
9933 /**
9934  * _scsih_probe_sas - reporting sas devices to sas transport
9935  * @ioc: per adapter object
9936  *
9937  * Called during initial loading of the driver.
9938  */
9939 static void
9940 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
9941 {
9942 	struct _sas_device *sas_device;
9943 
9944 	if (ioc->hide_drives)
9945 		return;
9946 
9947 	while ((sas_device = get_next_sas_device(ioc))) {
9948 		if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
9949 		    sas_device->sas_address_parent)) {
9950 			_scsih_sas_device_remove(ioc, sas_device);
9951 			sas_device_put(sas_device);
9952 			continue;
9953 		} else if (!sas_device->starget) {
9954 			/*
9955 			 * When asyn scanning is enabled, its not possible to
9956 			 * remove devices while scanning is turned on due to an
9957 			 * oops in scsi_sysfs_add_sdev()->add_device()->
9958 			 * sysfs_addrm_start()
9959 			 */
9960 			if (!ioc->is_driver_loading) {
9961 				mpt3sas_transport_port_remove(ioc,
9962 				    sas_device->sas_address,
9963 				    sas_device->sas_address_parent);
9964 				_scsih_sas_device_remove(ioc, sas_device);
9965 				sas_device_put(sas_device);
9966 				continue;
9967 			}
9968 		}
9969 		sas_device_make_active(ioc, sas_device);
9970 		sas_device_put(sas_device);
9971 	}
9972 }
9973 
9974 /**
9975  * get_next_pcie_device - Get the next pcie device
9976  * @ioc: per adapter object
9977  *
9978  * Get the next pcie device from pcie_device_init_list list.
9979  *
9980  * Return: pcie device structure if pcie_device_init_list list is not empty
9981  * otherwise returns NULL
9982  */
9983 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
9984 {
9985 	struct _pcie_device *pcie_device = NULL;
9986 	unsigned long flags;
9987 
9988 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9989 	if (!list_empty(&ioc->pcie_device_init_list)) {
9990 		pcie_device = list_first_entry(&ioc->pcie_device_init_list,
9991 				struct _pcie_device, list);
9992 		pcie_device_get(pcie_device);
9993 	}
9994 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9995 
9996 	return pcie_device;
9997 }
9998 
9999 /**
10000  * pcie_device_make_active - Add pcie device to pcie_device_list list
10001  * @ioc: per adapter object
10002  * @pcie_device: pcie device object
10003  *
10004  * Add the pcie device which has registered with SCSI Transport Later to
10005  * pcie_device_list list
10006  */
10007 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10008 		struct _pcie_device *pcie_device)
10009 {
10010 	unsigned long flags;
10011 
10012 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10013 
10014 	if (!list_empty(&pcie_device->list)) {
10015 		list_del_init(&pcie_device->list);
10016 		pcie_device_put(pcie_device);
10017 	}
10018 	pcie_device_get(pcie_device);
10019 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
10020 
10021 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10022 }
10023 
10024 /**
10025  * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
10026  * @ioc: per adapter object
10027  *
10028  * Called during initial loading of the driver.
10029  */
10030 static void
10031 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10032 {
10033 	struct _pcie_device *pcie_device;
10034 	int rc;
10035 
10036 	/* PCIe Device List */
10037 	while ((pcie_device = get_next_pcie_device(ioc))) {
10038 		if (pcie_device->starget) {
10039 			pcie_device_put(pcie_device);
10040 			continue;
10041 		}
10042 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10043 			pcie_device->id, 0);
10044 		if (rc) {
10045 			_scsih_pcie_device_remove(ioc, pcie_device);
10046 			pcie_device_put(pcie_device);
10047 			continue;
10048 		} else if (!pcie_device->starget) {
10049 			/*
10050 			 * When async scanning is enabled, its not possible to
10051 			 * remove devices while scanning is turned on due to an
10052 			 * oops in scsi_sysfs_add_sdev()->add_device()->
10053 			 * sysfs_addrm_start()
10054 			 */
10055 			if (!ioc->is_driver_loading) {
10056 			/* TODO-- Need to find out whether this condition will
10057 			 * occur or not
10058 			 */
10059 				_scsih_pcie_device_remove(ioc, pcie_device);
10060 				pcie_device_put(pcie_device);
10061 				continue;
10062 			}
10063 		}
10064 		pcie_device_make_active(ioc, pcie_device);
10065 		pcie_device_put(pcie_device);
10066 	}
10067 }
10068 
10069 /**
10070  * _scsih_probe_devices - probing for devices
10071  * @ioc: per adapter object
10072  *
10073  * Called during initial loading of the driver.
10074  */
10075 static void
10076 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
10077 {
10078 	u16 volume_mapping_flags;
10079 
10080 	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
10081 		return;  /* return when IOC doesn't support initiator mode */
10082 
10083 	_scsih_probe_boot_devices(ioc);
10084 
10085 	if (ioc->ir_firmware) {
10086 		volume_mapping_flags =
10087 		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
10088 		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
10089 		if (volume_mapping_flags ==
10090 		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
10091 			_scsih_probe_raid(ioc);
10092 			_scsih_probe_sas(ioc);
10093 		} else {
10094 			_scsih_probe_sas(ioc);
10095 			_scsih_probe_raid(ioc);
10096 		}
10097 	} else {
10098 		_scsih_probe_sas(ioc);
10099 		_scsih_probe_pcie(ioc);
10100 	}
10101 }
10102 
10103 /**
10104  * scsih_scan_start - scsi lld callback for .scan_start
10105  * @shost: SCSI host pointer
10106  *
10107  * The shost has the ability to discover targets on its own instead
10108  * of scanning the entire bus.  In our implemention, we will kick off
10109  * firmware discovery.
10110  */
10111 static void
10112 scsih_scan_start(struct Scsi_Host *shost)
10113 {
10114 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10115 	int rc;
10116 	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
10117 		mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
10118 
10119 	if (disable_discovery > 0)
10120 		return;
10121 
10122 	ioc->start_scan = 1;
10123 	rc = mpt3sas_port_enable(ioc);
10124 
10125 	if (rc != 0)
10126 		ioc_info(ioc, "port enable: FAILED\n");
10127 }
10128 
10129 /**
10130  * scsih_scan_finished - scsi lld callback for .scan_finished
10131  * @shost: SCSI host pointer
10132  * @time: elapsed time of the scan in jiffies
10133  *
10134  * This function will be called periodicallyn until it returns 1 with the
10135  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
10136  * we wait for firmware discovery to complete, then return 1.
10137  */
10138 static int
10139 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10140 {
10141 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10142 
10143 	if (disable_discovery > 0) {
10144 		ioc->is_driver_loading = 0;
10145 		ioc->wait_for_discovery_to_complete = 0;
10146 		return 1;
10147 	}
10148 
10149 	if (time >= (300 * HZ)) {
10150 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10151 		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
10152 		ioc->is_driver_loading = 0;
10153 		return 1;
10154 	}
10155 
10156 	if (ioc->start_scan)
10157 		return 0;
10158 
10159 	if (ioc->start_scan_failed) {
10160 		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
10161 			 ioc->start_scan_failed);
10162 		ioc->is_driver_loading = 0;
10163 		ioc->wait_for_discovery_to_complete = 0;
10164 		ioc->remove_host = 1;
10165 		return 1;
10166 	}
10167 
10168 	ioc_info(ioc, "port enable: SUCCESS\n");
10169 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10170 
10171 	if (ioc->wait_for_discovery_to_complete) {
10172 		ioc->wait_for_discovery_to_complete = 0;
10173 		_scsih_probe_devices(ioc);
10174 	}
10175 	mpt3sas_base_start_watchdog(ioc);
10176 	ioc->is_driver_loading = 0;
10177 	return 1;
10178 }
10179 
10180 /* shost template for SAS 2.0 HBA devices */
10181 static struct scsi_host_template mpt2sas_driver_template = {
10182 	.module				= THIS_MODULE,
10183 	.name				= "Fusion MPT SAS Host",
10184 	.proc_name			= MPT2SAS_DRIVER_NAME,
10185 	.queuecommand			= scsih_qcmd,
10186 	.target_alloc			= scsih_target_alloc,
10187 	.slave_alloc			= scsih_slave_alloc,
10188 	.slave_configure		= scsih_slave_configure,
10189 	.target_destroy			= scsih_target_destroy,
10190 	.slave_destroy			= scsih_slave_destroy,
10191 	.scan_finished			= scsih_scan_finished,
10192 	.scan_start			= scsih_scan_start,
10193 	.change_queue_depth		= scsih_change_queue_depth,
10194 	.eh_abort_handler		= scsih_abort,
10195 	.eh_device_reset_handler	= scsih_dev_reset,
10196 	.eh_target_reset_handler	= scsih_target_reset,
10197 	.eh_host_reset_handler		= scsih_host_reset,
10198 	.bios_param			= scsih_bios_param,
10199 	.can_queue			= 1,
10200 	.this_id			= -1,
10201 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
10202 	.max_sectors			= 32767,
10203 	.cmd_per_lun			= 7,
10204 	.shost_attrs			= mpt3sas_host_attrs,
10205 	.sdev_attrs			= mpt3sas_dev_attrs,
10206 	.track_queue_depth		= 1,
10207 	.cmd_size			= sizeof(struct scsiio_tracker),
10208 };
10209 
10210 /* raid transport support for SAS 2.0 HBA devices */
10211 static struct raid_function_template mpt2sas_raid_functions = {
10212 	.cookie		= &mpt2sas_driver_template,
10213 	.is_raid	= scsih_is_raid,
10214 	.get_resync	= scsih_get_resync,
10215 	.get_state	= scsih_get_state,
10216 };
10217 
10218 /* shost template for SAS 3.0 HBA devices */
10219 static struct scsi_host_template mpt3sas_driver_template = {
10220 	.module				= THIS_MODULE,
10221 	.name				= "Fusion MPT SAS Host",
10222 	.proc_name			= MPT3SAS_DRIVER_NAME,
10223 	.queuecommand			= scsih_qcmd,
10224 	.target_alloc			= scsih_target_alloc,
10225 	.slave_alloc			= scsih_slave_alloc,
10226 	.slave_configure		= scsih_slave_configure,
10227 	.target_destroy			= scsih_target_destroy,
10228 	.slave_destroy			= scsih_slave_destroy,
10229 	.scan_finished			= scsih_scan_finished,
10230 	.scan_start			= scsih_scan_start,
10231 	.change_queue_depth		= scsih_change_queue_depth,
10232 	.eh_abort_handler		= scsih_abort,
10233 	.eh_device_reset_handler	= scsih_dev_reset,
10234 	.eh_target_reset_handler	= scsih_target_reset,
10235 	.eh_host_reset_handler		= scsih_host_reset,
10236 	.bios_param			= scsih_bios_param,
10237 	.can_queue			= 1,
10238 	.this_id			= -1,
10239 	.sg_tablesize			= MPT3SAS_SG_DEPTH,
10240 	.max_sectors			= 32767,
10241 	.max_segment_size		= 0xffffffff,
10242 	.cmd_per_lun			= 7,
10243 	.shost_attrs			= mpt3sas_host_attrs,
10244 	.sdev_attrs			= mpt3sas_dev_attrs,
10245 	.track_queue_depth		= 1,
10246 	.cmd_size			= sizeof(struct scsiio_tracker),
10247 };
10248 
10249 /* raid transport support for SAS 3.0 HBA devices */
10250 static struct raid_function_template mpt3sas_raid_functions = {
10251 	.cookie		= &mpt3sas_driver_template,
10252 	.is_raid	= scsih_is_raid,
10253 	.get_resync	= scsih_get_resync,
10254 	.get_state	= scsih_get_state,
10255 };
10256 
10257 /**
10258  * _scsih_determine_hba_mpi_version - determine in which MPI version class
10259  *					this device belongs to.
10260  * @pdev: PCI device struct
10261  *
10262  * return MPI2_VERSION for SAS 2.0 HBA devices,
10263  *	MPI25_VERSION for SAS 3.0 HBA devices, and
10264  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
10265  */
10266 static u16
10267 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
10268 {
10269 
10270 	switch (pdev->device) {
10271 	case MPI2_MFGPAGE_DEVID_SSS6200:
10272 	case MPI2_MFGPAGE_DEVID_SAS2004:
10273 	case MPI2_MFGPAGE_DEVID_SAS2008:
10274 	case MPI2_MFGPAGE_DEVID_SAS2108_1:
10275 	case MPI2_MFGPAGE_DEVID_SAS2108_2:
10276 	case MPI2_MFGPAGE_DEVID_SAS2108_3:
10277 	case MPI2_MFGPAGE_DEVID_SAS2116_1:
10278 	case MPI2_MFGPAGE_DEVID_SAS2116_2:
10279 	case MPI2_MFGPAGE_DEVID_SAS2208_1:
10280 	case MPI2_MFGPAGE_DEVID_SAS2208_2:
10281 	case MPI2_MFGPAGE_DEVID_SAS2208_3:
10282 	case MPI2_MFGPAGE_DEVID_SAS2208_4:
10283 	case MPI2_MFGPAGE_DEVID_SAS2208_5:
10284 	case MPI2_MFGPAGE_DEVID_SAS2208_6:
10285 	case MPI2_MFGPAGE_DEVID_SAS2308_1:
10286 	case MPI2_MFGPAGE_DEVID_SAS2308_2:
10287 	case MPI2_MFGPAGE_DEVID_SAS2308_3:
10288 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10289 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10290 		return MPI2_VERSION;
10291 	case MPI25_MFGPAGE_DEVID_SAS3004:
10292 	case MPI25_MFGPAGE_DEVID_SAS3008:
10293 	case MPI25_MFGPAGE_DEVID_SAS3108_1:
10294 	case MPI25_MFGPAGE_DEVID_SAS3108_2:
10295 	case MPI25_MFGPAGE_DEVID_SAS3108_5:
10296 	case MPI25_MFGPAGE_DEVID_SAS3108_6:
10297 		return MPI25_VERSION;
10298 	case MPI26_MFGPAGE_DEVID_SAS3216:
10299 	case MPI26_MFGPAGE_DEVID_SAS3224:
10300 	case MPI26_MFGPAGE_DEVID_SAS3316_1:
10301 	case MPI26_MFGPAGE_DEVID_SAS3316_2:
10302 	case MPI26_MFGPAGE_DEVID_SAS3316_3:
10303 	case MPI26_MFGPAGE_DEVID_SAS3316_4:
10304 	case MPI26_MFGPAGE_DEVID_SAS3324_1:
10305 	case MPI26_MFGPAGE_DEVID_SAS3324_2:
10306 	case MPI26_MFGPAGE_DEVID_SAS3324_3:
10307 	case MPI26_MFGPAGE_DEVID_SAS3324_4:
10308 	case MPI26_MFGPAGE_DEVID_SAS3508:
10309 	case MPI26_MFGPAGE_DEVID_SAS3508_1:
10310 	case MPI26_MFGPAGE_DEVID_SAS3408:
10311 	case MPI26_MFGPAGE_DEVID_SAS3516:
10312 	case MPI26_MFGPAGE_DEVID_SAS3516_1:
10313 	case MPI26_MFGPAGE_DEVID_SAS3416:
10314 	case MPI26_MFGPAGE_DEVID_SAS3616:
10315 	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10316 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10317 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10318 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10319 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10320 		return MPI26_VERSION;
10321 	}
10322 	return 0;
10323 }
10324 
10325 /**
10326  * _scsih_probe - attach and add scsi host
10327  * @pdev: PCI device struct
10328  * @id: pci device id
10329  *
10330  * Return: 0 success, anything else error.
10331  */
10332 static int
10333 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10334 {
10335 	struct MPT3SAS_ADAPTER *ioc;
10336 	struct Scsi_Host *shost = NULL;
10337 	int rv;
10338 	u16 hba_mpi_version;
10339 
10340 	/* Determine in which MPI version class this pci device belongs */
10341 	hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
10342 	if (hba_mpi_version == 0)
10343 		return -ENODEV;
10344 
10345 	/* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
10346 	 * for other generation HBA's return with -ENODEV
10347 	 */
10348 	if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
10349 		return -ENODEV;
10350 
10351 	/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
10352 	 * for other generation HBA's return with -ENODEV
10353 	 */
10354 	if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
10355 		|| hba_mpi_version ==  MPI26_VERSION)))
10356 		return -ENODEV;
10357 
10358 	switch (hba_mpi_version) {
10359 	case MPI2_VERSION:
10360 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
10361 			PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
10362 		/* Use mpt2sas driver host template for SAS 2.0 HBA's */
10363 		shost = scsi_host_alloc(&mpt2sas_driver_template,
10364 		  sizeof(struct MPT3SAS_ADAPTER));
10365 		if (!shost)
10366 			return -ENODEV;
10367 		ioc = shost_priv(shost);
10368 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10369 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10370 		ioc->id = mpt2_ids++;
10371 		sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
10372 		switch (pdev->device) {
10373 		case MPI2_MFGPAGE_DEVID_SSS6200:
10374 			ioc->is_warpdrive = 1;
10375 			ioc->hide_ir_msg = 1;
10376 			break;
10377 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10378 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10379 			ioc->is_mcpu_endpoint = 1;
10380 			break;
10381 		default:
10382 			ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
10383 			break;
10384 		}
10385 		break;
10386 	case MPI25_VERSION:
10387 	case MPI26_VERSION:
10388 		/* Use mpt3sas driver host template for SAS 3.0 HBA's */
10389 		shost = scsi_host_alloc(&mpt3sas_driver_template,
10390 		  sizeof(struct MPT3SAS_ADAPTER));
10391 		if (!shost)
10392 			return -ENODEV;
10393 		ioc = shost_priv(shost);
10394 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10395 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10396 		ioc->id = mpt3_ids++;
10397 		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
10398 		switch (pdev->device) {
10399 		case MPI26_MFGPAGE_DEVID_SAS3508:
10400 		case MPI26_MFGPAGE_DEVID_SAS3508_1:
10401 		case MPI26_MFGPAGE_DEVID_SAS3408:
10402 		case MPI26_MFGPAGE_DEVID_SAS3516:
10403 		case MPI26_MFGPAGE_DEVID_SAS3516_1:
10404 		case MPI26_MFGPAGE_DEVID_SAS3416:
10405 		case MPI26_MFGPAGE_DEVID_SAS3616:
10406 		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10407 			ioc->is_gen35_ioc = 1;
10408 			break;
10409 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10410 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10411 			dev_info(&pdev->dev,
10412 			    "HBA is in Configurable Secure mode\n");
10413 			/* fall through */
10414 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10415 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10416 			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
10417 			break;
10418 		default:
10419 			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
10420 		}
10421 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
10422 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
10423 			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
10424 			ioc->combined_reply_queue = 1;
10425 			if (ioc->is_gen35_ioc)
10426 				ioc->combined_reply_index_count =
10427 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
10428 			else
10429 				ioc->combined_reply_index_count =
10430 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
10431 		}
10432 		break;
10433 	default:
10434 		return -ENODEV;
10435 	}
10436 
10437 	INIT_LIST_HEAD(&ioc->list);
10438 	spin_lock(&gioc_lock);
10439 	list_add_tail(&ioc->list, &mpt3sas_ioc_list);
10440 	spin_unlock(&gioc_lock);
10441 	ioc->shost = shost;
10442 	ioc->pdev = pdev;
10443 	ioc->scsi_io_cb_idx = scsi_io_cb_idx;
10444 	ioc->tm_cb_idx = tm_cb_idx;
10445 	ioc->ctl_cb_idx = ctl_cb_idx;
10446 	ioc->base_cb_idx = base_cb_idx;
10447 	ioc->port_enable_cb_idx = port_enable_cb_idx;
10448 	ioc->transport_cb_idx = transport_cb_idx;
10449 	ioc->scsih_cb_idx = scsih_cb_idx;
10450 	ioc->config_cb_idx = config_cb_idx;
10451 	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
10452 	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
10453 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
10454 	ioc->logging_level = logging_level;
10455 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
10456 	/* misc semaphores and spin locks */
10457 	mutex_init(&ioc->reset_in_progress_mutex);
10458 	/* initializing pci_access_mutex lock */
10459 	mutex_init(&ioc->pci_access_mutex);
10460 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
10461 	spin_lock_init(&ioc->scsi_lookup_lock);
10462 	spin_lock_init(&ioc->sas_device_lock);
10463 	spin_lock_init(&ioc->sas_node_lock);
10464 	spin_lock_init(&ioc->fw_event_lock);
10465 	spin_lock_init(&ioc->raid_device_lock);
10466 	spin_lock_init(&ioc->pcie_device_lock);
10467 	spin_lock_init(&ioc->diag_trigger_lock);
10468 
10469 	INIT_LIST_HEAD(&ioc->sas_device_list);
10470 	INIT_LIST_HEAD(&ioc->sas_device_init_list);
10471 	INIT_LIST_HEAD(&ioc->sas_expander_list);
10472 	INIT_LIST_HEAD(&ioc->enclosure_list);
10473 	INIT_LIST_HEAD(&ioc->pcie_device_list);
10474 	INIT_LIST_HEAD(&ioc->pcie_device_init_list);
10475 	INIT_LIST_HEAD(&ioc->fw_event_list);
10476 	INIT_LIST_HEAD(&ioc->raid_device_list);
10477 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
10478 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
10479 	INIT_LIST_HEAD(&ioc->delayed_sc_list);
10480 	INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
10481 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
10482 	INIT_LIST_HEAD(&ioc->reply_queue_list);
10483 
10484 	sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
10485 
10486 	/* init shost parameters */
10487 	shost->max_cmd_len = 32;
10488 	shost->max_lun = max_lun;
10489 	shost->transportt = mpt3sas_transport_template;
10490 	shost->unique_id = ioc->id;
10491 
10492 	if (ioc->is_mcpu_endpoint) {
10493 		/* mCPU MPI support 64K max IO */
10494 		shost->max_sectors = 128;
10495 		ioc_info(ioc, "The max_sectors value is set to %d\n",
10496 			 shost->max_sectors);
10497 	} else {
10498 		if (max_sectors != 0xFFFF) {
10499 			if (max_sectors < 64) {
10500 				shost->max_sectors = 64;
10501 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
10502 					 max_sectors);
10503 			} else if (max_sectors > 32767) {
10504 				shost->max_sectors = 32767;
10505 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
10506 					 max_sectors);
10507 			} else {
10508 				shost->max_sectors = max_sectors & 0xFFFE;
10509 				ioc_info(ioc, "The max_sectors value is set to %d\n",
10510 					 shost->max_sectors);
10511 			}
10512 		}
10513 	}
10514 	/* register EEDP capabilities with SCSI layer */
10515 	if (prot_mask > 0)
10516 		scsi_host_set_prot(shost, prot_mask);
10517 	else
10518 		scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
10519 				   | SHOST_DIF_TYPE2_PROTECTION
10520 				   | SHOST_DIF_TYPE3_PROTECTION);
10521 
10522 	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
10523 
10524 	/* event thread */
10525 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
10526 	    "fw_event_%s%d", ioc->driver_name, ioc->id);
10527 	ioc->firmware_event_thread = alloc_ordered_workqueue(
10528 	    ioc->firmware_event_name, 0);
10529 	if (!ioc->firmware_event_thread) {
10530 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10531 			__FILE__, __LINE__, __func__);
10532 		rv = -ENODEV;
10533 		goto out_thread_fail;
10534 	}
10535 
10536 	ioc->is_driver_loading = 1;
10537 	if ((mpt3sas_base_attach(ioc))) {
10538 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10539 			__FILE__, __LINE__, __func__);
10540 		rv = -ENODEV;
10541 		goto out_attach_fail;
10542 	}
10543 
10544 	if (ioc->is_warpdrive) {
10545 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
10546 			ioc->hide_drives = 0;
10547 		else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
10548 			ioc->hide_drives = 1;
10549 		else {
10550 			if (mpt3sas_get_num_volumes(ioc))
10551 				ioc->hide_drives = 1;
10552 			else
10553 				ioc->hide_drives = 0;
10554 		}
10555 	} else
10556 		ioc->hide_drives = 0;
10557 
10558 	rv = scsi_add_host(shost, &pdev->dev);
10559 	if (rv) {
10560 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10561 			__FILE__, __LINE__, __func__);
10562 		goto out_add_shost_fail;
10563 	}
10564 
10565 	scsi_scan_host(shost);
10566 	return 0;
10567 out_add_shost_fail:
10568 	mpt3sas_base_detach(ioc);
10569  out_attach_fail:
10570 	destroy_workqueue(ioc->firmware_event_thread);
10571  out_thread_fail:
10572 	spin_lock(&gioc_lock);
10573 	list_del(&ioc->list);
10574 	spin_unlock(&gioc_lock);
10575 	scsi_host_put(shost);
10576 	return rv;
10577 }
10578 
10579 #ifdef CONFIG_PM
10580 /**
10581  * scsih_suspend - power management suspend main entry point
10582  * @pdev: PCI device struct
10583  * @state: PM state change to (usually PCI_D3)
10584  *
10585  * Return: 0 success, anything else error.
10586  */
10587 static int
10588 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
10589 {
10590 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10591 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10592 	pci_power_t device_state;
10593 
10594 	mpt3sas_base_stop_watchdog(ioc);
10595 	flush_scheduled_work();
10596 	scsi_block_requests(shost);
10597 	device_state = pci_choose_state(pdev, state);
10598 	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
10599 		 pdev, pci_name(pdev), device_state);
10600 
10601 	pci_save_state(pdev);
10602 	mpt3sas_base_free_resources(ioc);
10603 	pci_set_power_state(pdev, device_state);
10604 	return 0;
10605 }
10606 
10607 /**
10608  * scsih_resume - power management resume main entry point
10609  * @pdev: PCI device struct
10610  *
10611  * Return: 0 success, anything else error.
10612  */
10613 static int
10614 scsih_resume(struct pci_dev *pdev)
10615 {
10616 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10617 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10618 	pci_power_t device_state = pdev->current_state;
10619 	int r;
10620 
10621 	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
10622 		 pdev, pci_name(pdev), device_state);
10623 
10624 	pci_set_power_state(pdev, PCI_D0);
10625 	pci_enable_wake(pdev, PCI_D0, 0);
10626 	pci_restore_state(pdev);
10627 	ioc->pdev = pdev;
10628 	r = mpt3sas_base_map_resources(ioc);
10629 	if (r)
10630 		return r;
10631 
10632 	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
10633 	scsi_unblock_requests(shost);
10634 	mpt3sas_base_start_watchdog(ioc);
10635 	return 0;
10636 }
10637 #endif /* CONFIG_PM */
10638 
10639 /**
10640  * scsih_pci_error_detected - Called when a PCI error is detected.
10641  * @pdev: PCI device struct
10642  * @state: PCI channel state
10643  *
10644  * Description: Called when a PCI error is detected.
10645  *
10646  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
10647  */
10648 static pci_ers_result_t
10649 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10650 {
10651 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10652 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10653 
10654 	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
10655 
10656 	switch (state) {
10657 	case pci_channel_io_normal:
10658 		return PCI_ERS_RESULT_CAN_RECOVER;
10659 	case pci_channel_io_frozen:
10660 		/* Fatal error, prepare for slot reset */
10661 		ioc->pci_error_recovery = 1;
10662 		scsi_block_requests(ioc->shost);
10663 		mpt3sas_base_stop_watchdog(ioc);
10664 		mpt3sas_base_free_resources(ioc);
10665 		return PCI_ERS_RESULT_NEED_RESET;
10666 	case pci_channel_io_perm_failure:
10667 		/* Permanent error, prepare for device removal */
10668 		ioc->pci_error_recovery = 1;
10669 		mpt3sas_base_stop_watchdog(ioc);
10670 		_scsih_flush_running_cmds(ioc);
10671 		return PCI_ERS_RESULT_DISCONNECT;
10672 	}
10673 	return PCI_ERS_RESULT_NEED_RESET;
10674 }
10675 
10676 /**
10677  * scsih_pci_slot_reset - Called when PCI slot has been reset.
10678  * @pdev: PCI device struct
10679  *
10680  * Description: This routine is called by the pci error recovery
10681  * code after the PCI slot has been reset, just before we
10682  * should resume normal operations.
10683  */
10684 static pci_ers_result_t
10685 scsih_pci_slot_reset(struct pci_dev *pdev)
10686 {
10687 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10688 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10689 	int rc;
10690 
10691 	ioc_info(ioc, "PCI error: slot reset callback!!\n");
10692 
10693 	ioc->pci_error_recovery = 0;
10694 	ioc->pdev = pdev;
10695 	pci_restore_state(pdev);
10696 	rc = mpt3sas_base_map_resources(ioc);
10697 	if (rc)
10698 		return PCI_ERS_RESULT_DISCONNECT;
10699 
10700 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
10701 
10702 	ioc_warn(ioc, "hard reset: %s\n",
10703 		 (rc == 0) ? "success" : "failed");
10704 
10705 	if (!rc)
10706 		return PCI_ERS_RESULT_RECOVERED;
10707 	else
10708 		return PCI_ERS_RESULT_DISCONNECT;
10709 }
10710 
10711 /**
10712  * scsih_pci_resume() - resume normal ops after PCI reset
10713  * @pdev: pointer to PCI device
10714  *
10715  * Called when the error recovery driver tells us that its
10716  * OK to resume normal operation. Use completion to allow
10717  * halted scsi ops to resume.
10718  */
10719 static void
10720 scsih_pci_resume(struct pci_dev *pdev)
10721 {
10722 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10723 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10724 
10725 	ioc_info(ioc, "PCI error: resume callback!!\n");
10726 
10727 	mpt3sas_base_start_watchdog(ioc);
10728 	scsi_unblock_requests(ioc->shost);
10729 }
10730 
10731 /**
10732  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
10733  * @pdev: pointer to PCI device
10734  */
10735 static pci_ers_result_t
10736 scsih_pci_mmio_enabled(struct pci_dev *pdev)
10737 {
10738 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10739 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10740 
10741 	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
10742 
10743 	/* TODO - dump whatever for debugging purposes */
10744 
10745 	/* This called only if scsih_pci_error_detected returns
10746 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
10747 	 * works, no need to reset slot.
10748 	 */
10749 	return PCI_ERS_RESULT_RECOVERED;
10750 }
10751 
10752 /**
10753  * scsih__ncq_prio_supp - Check for NCQ command priority support
10754  * @sdev: scsi device struct
10755  *
10756  * This is called when a user indicates they would like to enable
10757  * ncq command priorities. This works only on SATA devices.
10758  */
10759 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
10760 {
10761 	unsigned char *buf;
10762 	bool ncq_prio_supp = false;
10763 
10764 	if (!scsi_device_supports_vpd(sdev))
10765 		return ncq_prio_supp;
10766 
10767 	buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
10768 	if (!buf)
10769 		return ncq_prio_supp;
10770 
10771 	if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
10772 		ncq_prio_supp = (buf[213] >> 4) & 1;
10773 
10774 	kfree(buf);
10775 	return ncq_prio_supp;
10776 }
10777 /*
10778  * The pci device ids are defined in mpi/mpi2_cnfg.h.
10779  */
10780 static const struct pci_device_id mpt3sas_pci_table[] = {
10781 	/* Spitfire ~ 2004 */
10782 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
10783 		PCI_ANY_ID, PCI_ANY_ID },
10784 	/* Falcon ~ 2008 */
10785 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
10786 		PCI_ANY_ID, PCI_ANY_ID },
10787 	/* Liberator ~ 2108 */
10788 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
10789 		PCI_ANY_ID, PCI_ANY_ID },
10790 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
10791 		PCI_ANY_ID, PCI_ANY_ID },
10792 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
10793 		PCI_ANY_ID, PCI_ANY_ID },
10794 	/* Meteor ~ 2116 */
10795 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
10796 		PCI_ANY_ID, PCI_ANY_ID },
10797 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
10798 		PCI_ANY_ID, PCI_ANY_ID },
10799 	/* Thunderbolt ~ 2208 */
10800 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
10801 		PCI_ANY_ID, PCI_ANY_ID },
10802 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
10803 		PCI_ANY_ID, PCI_ANY_ID },
10804 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
10805 		PCI_ANY_ID, PCI_ANY_ID },
10806 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
10807 		PCI_ANY_ID, PCI_ANY_ID },
10808 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
10809 		PCI_ANY_ID, PCI_ANY_ID },
10810 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
10811 		PCI_ANY_ID, PCI_ANY_ID },
10812 	/* Mustang ~ 2308 */
10813 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
10814 		PCI_ANY_ID, PCI_ANY_ID },
10815 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
10816 		PCI_ANY_ID, PCI_ANY_ID },
10817 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
10818 		PCI_ANY_ID, PCI_ANY_ID },
10819 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
10820 		PCI_ANY_ID, PCI_ANY_ID },
10821 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
10822 		PCI_ANY_ID, PCI_ANY_ID },
10823 	/* SSS6200 */
10824 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
10825 		PCI_ANY_ID, PCI_ANY_ID },
10826 	/* Fury ~ 3004 and 3008 */
10827 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
10828 		PCI_ANY_ID, PCI_ANY_ID },
10829 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
10830 		PCI_ANY_ID, PCI_ANY_ID },
10831 	/* Invader ~ 3108 */
10832 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
10833 		PCI_ANY_ID, PCI_ANY_ID },
10834 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
10835 		PCI_ANY_ID, PCI_ANY_ID },
10836 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
10837 		PCI_ANY_ID, PCI_ANY_ID },
10838 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
10839 		PCI_ANY_ID, PCI_ANY_ID },
10840 	/* Cutlass ~ 3216 and 3224 */
10841 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
10842 		PCI_ANY_ID, PCI_ANY_ID },
10843 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
10844 		PCI_ANY_ID, PCI_ANY_ID },
10845 	/* Intruder ~ 3316 and 3324 */
10846 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
10847 		PCI_ANY_ID, PCI_ANY_ID },
10848 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
10849 		PCI_ANY_ID, PCI_ANY_ID },
10850 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
10851 		PCI_ANY_ID, PCI_ANY_ID },
10852 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
10853 		PCI_ANY_ID, PCI_ANY_ID },
10854 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
10855 		PCI_ANY_ID, PCI_ANY_ID },
10856 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
10857 		PCI_ANY_ID, PCI_ANY_ID },
10858 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
10859 		PCI_ANY_ID, PCI_ANY_ID },
10860 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
10861 		PCI_ANY_ID, PCI_ANY_ID },
10862 	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
10863 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
10864 		PCI_ANY_ID, PCI_ANY_ID },
10865 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
10866 		PCI_ANY_ID, PCI_ANY_ID },
10867 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
10868 		PCI_ANY_ID, PCI_ANY_ID },
10869 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
10870 		PCI_ANY_ID, PCI_ANY_ID },
10871 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
10872 		PCI_ANY_ID, PCI_ANY_ID },
10873 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
10874 		PCI_ANY_ID, PCI_ANY_ID },
10875 	/* Mercator ~ 3616*/
10876 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
10877 		PCI_ANY_ID, PCI_ANY_ID },
10878 
10879 	/* Aero SI 0x00E1 Configurable Secure
10880 	 * 0x00E2 Hard Secure
10881 	 */
10882 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
10883 		PCI_ANY_ID, PCI_ANY_ID },
10884 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
10885 		PCI_ANY_ID, PCI_ANY_ID },
10886 
10887 	/* Atlas PCIe Switch Management Port */
10888 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
10889 		PCI_ANY_ID, PCI_ANY_ID },
10890 
10891 	/* Sea SI 0x00E5 Configurable Secure
10892 	 * 0x00E6 Hard Secure
10893 	 */
10894 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
10895 		PCI_ANY_ID, PCI_ANY_ID },
10896 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
10897 		PCI_ANY_ID, PCI_ANY_ID },
10898 
10899 	{0}     /* Terminating entry */
10900 };
10901 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
10902 
10903 static struct pci_error_handlers _mpt3sas_err_handler = {
10904 	.error_detected	= scsih_pci_error_detected,
10905 	.mmio_enabled	= scsih_pci_mmio_enabled,
10906 	.slot_reset	= scsih_pci_slot_reset,
10907 	.resume		= scsih_pci_resume,
10908 };
10909 
10910 static struct pci_driver mpt3sas_driver = {
10911 	.name		= MPT3SAS_DRIVER_NAME,
10912 	.id_table	= mpt3sas_pci_table,
10913 	.probe		= _scsih_probe,
10914 	.remove		= scsih_remove,
10915 	.shutdown	= scsih_shutdown,
10916 	.err_handler	= &_mpt3sas_err_handler,
10917 #ifdef CONFIG_PM
10918 	.suspend	= scsih_suspend,
10919 	.resume		= scsih_resume,
10920 #endif
10921 };
10922 
10923 /**
10924  * scsih_init - main entry point for this driver.
10925  *
10926  * Return: 0 success, anything else error.
10927  */
10928 static int
10929 scsih_init(void)
10930 {
10931 	mpt2_ids = 0;
10932 	mpt3_ids = 0;
10933 
10934 	mpt3sas_base_initialize_callback_handler();
10935 
10936 	 /* queuecommand callback hander */
10937 	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
10938 
10939 	/* task management callback handler */
10940 	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
10941 
10942 	/* base internal commands callback handler */
10943 	base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
10944 	port_enable_cb_idx = mpt3sas_base_register_callback_handler(
10945 	    mpt3sas_port_enable_done);
10946 
10947 	/* transport internal commands callback handler */
10948 	transport_cb_idx = mpt3sas_base_register_callback_handler(
10949 	    mpt3sas_transport_done);
10950 
10951 	/* scsih internal commands callback handler */
10952 	scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
10953 
10954 	/* configuration page API internal commands callback handler */
10955 	config_cb_idx = mpt3sas_base_register_callback_handler(
10956 	    mpt3sas_config_done);
10957 
10958 	/* ctl module callback handler */
10959 	ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
10960 
10961 	tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
10962 	    _scsih_tm_tr_complete);
10963 
10964 	tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
10965 	    _scsih_tm_volume_tr_complete);
10966 
10967 	tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
10968 	    _scsih_sas_control_complete);
10969 
10970 	return 0;
10971 }
10972 
10973 /**
10974  * scsih_exit - exit point for this driver (when it is a module).
10975  *
10976  * Return: 0 success, anything else error.
10977  */
10978 static void
10979 scsih_exit(void)
10980 {
10981 
10982 	mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
10983 	mpt3sas_base_release_callback_handler(tm_cb_idx);
10984 	mpt3sas_base_release_callback_handler(base_cb_idx);
10985 	mpt3sas_base_release_callback_handler(port_enable_cb_idx);
10986 	mpt3sas_base_release_callback_handler(transport_cb_idx);
10987 	mpt3sas_base_release_callback_handler(scsih_cb_idx);
10988 	mpt3sas_base_release_callback_handler(config_cb_idx);
10989 	mpt3sas_base_release_callback_handler(ctl_cb_idx);
10990 
10991 	mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
10992 	mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
10993 	mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
10994 
10995 /* raid transport support */
10996 	if (hbas_to_enumerate != 1)
10997 		raid_class_release(mpt3sas_raid_template);
10998 	if (hbas_to_enumerate != 2)
10999 		raid_class_release(mpt2sas_raid_template);
11000 	sas_release_transport(mpt3sas_transport_template);
11001 }
11002 
11003 /**
11004  * _mpt3sas_init - main entry point for this driver.
11005  *
11006  * Return: 0 success, anything else error.
11007  */
11008 static int __init
11009 _mpt3sas_init(void)
11010 {
11011 	int error;
11012 
11013 	pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
11014 					MPT3SAS_DRIVER_VERSION);
11015 
11016 	mpt3sas_transport_template =
11017 	    sas_attach_transport(&mpt3sas_transport_functions);
11018 	if (!mpt3sas_transport_template)
11019 		return -ENODEV;
11020 
11021 	/* No need attach mpt3sas raid functions template
11022 	 * if hbas_to_enumarate value is one.
11023 	 */
11024 	if (hbas_to_enumerate != 1) {
11025 		mpt3sas_raid_template =
11026 				raid_class_attach(&mpt3sas_raid_functions);
11027 		if (!mpt3sas_raid_template) {
11028 			sas_release_transport(mpt3sas_transport_template);
11029 			return -ENODEV;
11030 		}
11031 	}
11032 
11033 	/* No need to attach mpt2sas raid functions template
11034 	 * if hbas_to_enumarate value is two
11035 	 */
11036 	if (hbas_to_enumerate != 2) {
11037 		mpt2sas_raid_template =
11038 				raid_class_attach(&mpt2sas_raid_functions);
11039 		if (!mpt2sas_raid_template) {
11040 			sas_release_transport(mpt3sas_transport_template);
11041 			return -ENODEV;
11042 		}
11043 	}
11044 
11045 	error = scsih_init();
11046 	if (error) {
11047 		scsih_exit();
11048 		return error;
11049 	}
11050 
11051 	mpt3sas_ctl_init(hbas_to_enumerate);
11052 
11053 	error = pci_register_driver(&mpt3sas_driver);
11054 	if (error)
11055 		scsih_exit();
11056 
11057 	return error;
11058 }
11059 
11060 /**
11061  * _mpt3sas_exit - exit point for this driver (when it is a module).
11062  *
11063  */
11064 static void __exit
11065 _mpt3sas_exit(void)
11066 {
11067 	pr_info("mpt3sas version %s unloading\n",
11068 				MPT3SAS_DRIVER_VERSION);
11069 
11070 	mpt3sas_ctl_exit(hbas_to_enumerate);
11071 
11072 	pci_unregister_driver(&mpt3sas_driver);
11073 
11074 	scsih_exit();
11075 }
11076 
11077 module_init(_mpt3sas_init);
11078 module_exit(_mpt3sas_exit);
11079