1 /*
2  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3  *
4  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5  * Copyright (C) 2012-2014  LSI Corporation
6  * Copyright (C) 2013-2014 Avago Technologies
7  *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version 2
12  * of the License, or (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * NO WARRANTY
20  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24  * solely responsible for determining the appropriateness of using and
25  * distributing the Program and assumes all risks associated with its
26  * exercise of rights under this Agreement, including but not limited to
27  * the risks and costs of program errors, damage to or loss of data,
28  * programs or equipment, and unavailability or interruption of operations.
29 
30  * DISCLAIMER OF LIABILITY
31  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 
39  * You should have received a copy of the GNU General Public License
40  * along with this program; if not, write to the Free Software
41  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42  * USA.
43  */
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/pci-aspm.h>
55 #include <linux/interrupt.h>
56 #include <linux/aer.h>
57 #include <linux/raid_class.h>
58 #include <asm/unaligned.h>
59 
60 #include "mpt3sas_base.h"
61 
62 #define RAID_CHANNEL 1
63 
64 #define PCIE_CHANNEL 2
65 
66 /* forward proto's */
67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 	struct _sas_node *sas_expander);
69 static void _firmware_event_work(struct work_struct *work);
70 
71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 	struct _sas_device *sas_device);
73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 	u8 retry_count, u8 is_pd);
75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 	struct _pcie_device *pcie_device);
78 static void
79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
81 
82 /* global parameters */
83 LIST_HEAD(mpt3sas_ioc_list);
84 /* global ioc lock for list operations */
85 DEFINE_SPINLOCK(gioc_lock);
86 
87 MODULE_AUTHOR(MPT3SAS_AUTHOR);
88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91 MODULE_ALIAS("mpt2sas");
92 
93 /* local parameters */
94 static u8 scsi_io_cb_idx = -1;
95 static u8 tm_cb_idx = -1;
96 static u8 ctl_cb_idx = -1;
97 static u8 base_cb_idx = -1;
98 static u8 port_enable_cb_idx = -1;
99 static u8 transport_cb_idx = -1;
100 static u8 scsih_cb_idx = -1;
101 static u8 config_cb_idx = -1;
102 static int mpt2_ids;
103 static int mpt3_ids;
104 
105 static u8 tm_tr_cb_idx = -1 ;
106 static u8 tm_tr_volume_cb_idx = -1 ;
107 static u8 tm_sas_control_cb_idx = -1;
108 
109 /* command line options */
110 static u32 logging_level;
111 MODULE_PARM_DESC(logging_level,
112 	" bits for enabling additional logging info (default=0)");
113 
114 
115 static ushort max_sectors = 0xFFFF;
116 module_param(max_sectors, ushort, 0);
117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
118 
119 
120 static int missing_delay[2] = {-1, -1};
121 module_param_array(missing_delay, int, NULL, 0);
122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
123 
124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
125 #define MPT3SAS_MAX_LUN (16895)
126 static u64 max_lun = MPT3SAS_MAX_LUN;
127 module_param(max_lun, ullong, 0);
128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
129 
130 static ushort hbas_to_enumerate;
131 module_param(hbas_to_enumerate, ushort, 0);
132 MODULE_PARM_DESC(hbas_to_enumerate,
133 		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134 		  1 - enumerates only SAS 2.0 generation HBAs\n \
135 		  2 - enumerates only SAS 3.0 generation HBAs (default=0)");
136 
137 /* diag_buffer_enable is bitwise
138  * bit 0 set = TRACE
139  * bit 1 set = SNAPSHOT
140  * bit 2 set = EXTENDED
141  *
142  * Either bit can be set, or both
143  */
144 static int diag_buffer_enable = -1;
145 module_param(diag_buffer_enable, int, 0);
146 MODULE_PARM_DESC(diag_buffer_enable,
147 	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148 static int disable_discovery = -1;
149 module_param(disable_discovery, int, 0);
150 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
151 
152 
153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
154 static int prot_mask = -1;
155 module_param(prot_mask, int, 0);
156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
157 
158 
159 /* raid transport support */
160 static struct raid_template *mpt3sas_raid_template;
161 static struct raid_template *mpt2sas_raid_template;
162 
163 
164 /**
165  * struct sense_info - common structure for obtaining sense keys
166  * @skey: sense key
167  * @asc: additional sense code
168  * @ascq: additional sense code qualifier
169  */
170 struct sense_info {
171 	u8 skey;
172 	u8 asc;
173 	u8 ascq;
174 };
175 
176 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
177 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
178 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
179 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
180 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
181 /**
182  * struct fw_event_work - firmware event struct
183  * @list: link list framework
184  * @work: work object (ioc->fault_reset_work_q)
185  * @ioc: per adapter object
186  * @device_handle: device handle
187  * @VF_ID: virtual function id
188  * @VP_ID: virtual port id
189  * @ignore: flag meaning this event has been marked to ignore
190  * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
191  * @refcount: kref for this event
192  * @event_data: reply event data payload follows
193  *
194  * This object stored on ioc->fw_event_list.
195  */
196 struct fw_event_work {
197 	struct list_head	list;
198 	struct work_struct	work;
199 
200 	struct MPT3SAS_ADAPTER *ioc;
201 	u16			device_handle;
202 	u8			VF_ID;
203 	u8			VP_ID;
204 	u8			ignore;
205 	u16			event;
206 	struct kref		refcount;
207 	char			event_data[0] __aligned(4);
208 };
209 
210 static void fw_event_work_free(struct kref *r)
211 {
212 	kfree(container_of(r, struct fw_event_work, refcount));
213 }
214 
215 static void fw_event_work_get(struct fw_event_work *fw_work)
216 {
217 	kref_get(&fw_work->refcount);
218 }
219 
220 static void fw_event_work_put(struct fw_event_work *fw_work)
221 {
222 	kref_put(&fw_work->refcount, fw_event_work_free);
223 }
224 
225 static struct fw_event_work *alloc_fw_event_work(int len)
226 {
227 	struct fw_event_work *fw_event;
228 
229 	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
230 	if (!fw_event)
231 		return NULL;
232 
233 	kref_init(&fw_event->refcount);
234 	return fw_event;
235 }
236 
237 /**
238  * struct _scsi_io_transfer - scsi io transfer
239  * @handle: sas device handle (assigned by firmware)
240  * @is_raid: flag set for hidden raid components
241  * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
242  * @data_length: data transfer length
243  * @data_dma: dma pointer to data
244  * @sense: sense data
245  * @lun: lun number
246  * @cdb_length: cdb length
247  * @cdb: cdb contents
248  * @timeout: timeout for this command
249  * @VF_ID: virtual function id
250  * @VP_ID: virtual port id
251  * @valid_reply: flag set for reply message
252  * @sense_length: sense length
253  * @ioc_status: ioc status
254  * @scsi_state: scsi state
255  * @scsi_status: scsi staus
256  * @log_info: log information
257  * @transfer_length: data length transfer when there is a reply message
258  *
259  * Used for sending internal scsi commands to devices within this module.
260  * Refer to _scsi_send_scsi_io().
261  */
262 struct _scsi_io_transfer {
263 	u16	handle;
264 	u8	is_raid;
265 	enum dma_data_direction dir;
266 	u32	data_length;
267 	dma_addr_t data_dma;
268 	u8	sense[SCSI_SENSE_BUFFERSIZE];
269 	u32	lun;
270 	u8	cdb_length;
271 	u8	cdb[32];
272 	u8	timeout;
273 	u8	VF_ID;
274 	u8	VP_ID;
275 	u8	valid_reply;
276   /* the following bits are only valid when 'valid_reply = 1' */
277 	u32	sense_length;
278 	u16	ioc_status;
279 	u8	scsi_state;
280 	u8	scsi_status;
281 	u32	log_info;
282 	u32	transfer_length;
283 };
284 
285 /**
286  * _scsih_set_debug_level - global setting of ioc->logging_level.
287  * @val: ?
288  * @kp: ?
289  *
290  * Note: The logging levels are defined in mpt3sas_debug.h.
291  */
292 static int
293 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
294 {
295 	int ret = param_set_int(val, kp);
296 	struct MPT3SAS_ADAPTER *ioc;
297 
298 	if (ret)
299 		return ret;
300 
301 	pr_info("setting logging_level(0x%08x)\n", logging_level);
302 	spin_lock(&gioc_lock);
303 	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
304 		ioc->logging_level = logging_level;
305 	spin_unlock(&gioc_lock);
306 	return 0;
307 }
308 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
309 	&logging_level, 0644);
310 
311 /**
312  * _scsih_srch_boot_sas_address - search based on sas_address
313  * @sas_address: sas address
314  * @boot_device: boot device object from bios page 2
315  *
316  * Return: 1 when there's a match, 0 means no match.
317  */
318 static inline int
319 _scsih_srch_boot_sas_address(u64 sas_address,
320 	Mpi2BootDeviceSasWwid_t *boot_device)
321 {
322 	return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
323 }
324 
325 /**
326  * _scsih_srch_boot_device_name - search based on device name
327  * @device_name: device name specified in INDENTIFY fram
328  * @boot_device: boot device object from bios page 2
329  *
330  * Return: 1 when there's a match, 0 means no match.
331  */
332 static inline int
333 _scsih_srch_boot_device_name(u64 device_name,
334 	Mpi2BootDeviceDeviceName_t *boot_device)
335 {
336 	return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
337 }
338 
339 /**
340  * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
341  * @enclosure_logical_id: enclosure logical id
342  * @slot_number: slot number
343  * @boot_device: boot device object from bios page 2
344  *
345  * Return: 1 when there's a match, 0 means no match.
346  */
347 static inline int
348 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
349 	Mpi2BootDeviceEnclosureSlot_t *boot_device)
350 {
351 	return (enclosure_logical_id == le64_to_cpu(boot_device->
352 	    EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
353 	    SlotNumber)) ? 1 : 0;
354 }
355 
356 /**
357  * _scsih_is_boot_device - search for matching boot device.
358  * @sas_address: sas address
359  * @device_name: device name specified in INDENTIFY fram
360  * @enclosure_logical_id: enclosure logical id
361  * @slot: slot number
362  * @form: specifies boot device form
363  * @boot_device: boot device object from bios page 2
364  *
365  * Return: 1 when there's a match, 0 means no match.
366  */
367 static int
368 _scsih_is_boot_device(u64 sas_address, u64 device_name,
369 	u64 enclosure_logical_id, u16 slot, u8 form,
370 	Mpi2BiosPage2BootDevice_t *boot_device)
371 {
372 	int rc = 0;
373 
374 	switch (form) {
375 	case MPI2_BIOSPAGE2_FORM_SAS_WWID:
376 		if (!sas_address)
377 			break;
378 		rc = _scsih_srch_boot_sas_address(
379 		    sas_address, &boot_device->SasWwid);
380 		break;
381 	case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
382 		if (!enclosure_logical_id)
383 			break;
384 		rc = _scsih_srch_boot_encl_slot(
385 		    enclosure_logical_id,
386 		    slot, &boot_device->EnclosureSlot);
387 		break;
388 	case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
389 		if (!device_name)
390 			break;
391 		rc = _scsih_srch_boot_device_name(
392 		    device_name, &boot_device->DeviceName);
393 		break;
394 	case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
395 		break;
396 	}
397 
398 	return rc;
399 }
400 
401 /**
402  * _scsih_get_sas_address - set the sas_address for given device handle
403  * @ioc: ?
404  * @handle: device handle
405  * @sas_address: sas address
406  *
407  * Return: 0 success, non-zero when failure
408  */
409 static int
410 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
411 	u64 *sas_address)
412 {
413 	Mpi2SasDevicePage0_t sas_device_pg0;
414 	Mpi2ConfigReply_t mpi_reply;
415 	u32 ioc_status;
416 
417 	*sas_address = 0;
418 
419 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
420 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
421 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
422 			__FILE__, __LINE__, __func__);
423 		return -ENXIO;
424 	}
425 
426 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
427 	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
428 		/* For HBA, vSES doesn't return HBA SAS address. Instead return
429 		 * vSES's sas address.
430 		 */
431 		if ((handle <= ioc->sas_hba.num_phys) &&
432 		   (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
433 		   MPI2_SAS_DEVICE_INFO_SEP)))
434 			*sas_address = ioc->sas_hba.sas_address;
435 		else
436 			*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
437 		return 0;
438 	}
439 
440 	/* we hit this because the given parent handle doesn't exist */
441 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
442 		return -ENXIO;
443 
444 	/* else error case */
445 	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
446 		handle, ioc_status, __FILE__, __LINE__, __func__);
447 	return -EIO;
448 }
449 
450 /**
451  * _scsih_determine_boot_device - determine boot device.
452  * @ioc: per adapter object
453  * @device: sas_device or pcie_device object
454  * @channel: SAS or PCIe channel
455  *
456  * Determines whether this device should be first reported device to
457  * to scsi-ml or sas transport, this purpose is for persistent boot device.
458  * There are primary, alternate, and current entries in bios page 2. The order
459  * priority is primary, alternate, then current.  This routine saves
460  * the corresponding device object.
461  * The saved data to be used later in _scsih_probe_boot_devices().
462  */
463 static void
464 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
465 	u32 channel)
466 {
467 	struct _sas_device *sas_device;
468 	struct _pcie_device *pcie_device;
469 	struct _raid_device *raid_device;
470 	u64 sas_address;
471 	u64 device_name;
472 	u64 enclosure_logical_id;
473 	u16 slot;
474 
475 	 /* only process this function when driver loads */
476 	if (!ioc->is_driver_loading)
477 		return;
478 
479 	 /* no Bios, return immediately */
480 	if (!ioc->bios_pg3.BiosVersion)
481 		return;
482 
483 	if (channel == RAID_CHANNEL) {
484 		raid_device = device;
485 		sas_address = raid_device->wwid;
486 		device_name = 0;
487 		enclosure_logical_id = 0;
488 		slot = 0;
489 	} else if (channel == PCIE_CHANNEL) {
490 		pcie_device = device;
491 		sas_address = pcie_device->wwid;
492 		device_name = 0;
493 		enclosure_logical_id = 0;
494 		slot = 0;
495 	} else {
496 		sas_device = device;
497 		sas_address = sas_device->sas_address;
498 		device_name = sas_device->device_name;
499 		enclosure_logical_id = sas_device->enclosure_logical_id;
500 		slot = sas_device->slot;
501 	}
502 
503 	if (!ioc->req_boot_device.device) {
504 		if (_scsih_is_boot_device(sas_address, device_name,
505 		    enclosure_logical_id, slot,
506 		    (ioc->bios_pg2.ReqBootDeviceForm &
507 		    MPI2_BIOSPAGE2_FORM_MASK),
508 		    &ioc->bios_pg2.RequestedBootDevice)) {
509 			dinitprintk(ioc,
510 				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
511 					     __func__, (u64)sas_address));
512 			ioc->req_boot_device.device = device;
513 			ioc->req_boot_device.channel = channel;
514 		}
515 	}
516 
517 	if (!ioc->req_alt_boot_device.device) {
518 		if (_scsih_is_boot_device(sas_address, device_name,
519 		    enclosure_logical_id, slot,
520 		    (ioc->bios_pg2.ReqAltBootDeviceForm &
521 		    MPI2_BIOSPAGE2_FORM_MASK),
522 		    &ioc->bios_pg2.RequestedAltBootDevice)) {
523 			dinitprintk(ioc,
524 				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
525 					     __func__, (u64)sas_address));
526 			ioc->req_alt_boot_device.device = device;
527 			ioc->req_alt_boot_device.channel = channel;
528 		}
529 	}
530 
531 	if (!ioc->current_boot_device.device) {
532 		if (_scsih_is_boot_device(sas_address, device_name,
533 		    enclosure_logical_id, slot,
534 		    (ioc->bios_pg2.CurrentBootDeviceForm &
535 		    MPI2_BIOSPAGE2_FORM_MASK),
536 		    &ioc->bios_pg2.CurrentBootDevice)) {
537 			dinitprintk(ioc,
538 				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
539 					     __func__, (u64)sas_address));
540 			ioc->current_boot_device.device = device;
541 			ioc->current_boot_device.channel = channel;
542 		}
543 	}
544 }
545 
546 static struct _sas_device *
547 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
548 		struct MPT3SAS_TARGET *tgt_priv)
549 {
550 	struct _sas_device *ret;
551 
552 	assert_spin_locked(&ioc->sas_device_lock);
553 
554 	ret = tgt_priv->sas_dev;
555 	if (ret)
556 		sas_device_get(ret);
557 
558 	return ret;
559 }
560 
561 static struct _sas_device *
562 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
563 		struct MPT3SAS_TARGET *tgt_priv)
564 {
565 	struct _sas_device *ret;
566 	unsigned long flags;
567 
568 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
569 	ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
570 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
571 
572 	return ret;
573 }
574 
575 static struct _pcie_device *
576 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
577 	struct MPT3SAS_TARGET *tgt_priv)
578 {
579 	struct _pcie_device *ret;
580 
581 	assert_spin_locked(&ioc->pcie_device_lock);
582 
583 	ret = tgt_priv->pcie_dev;
584 	if (ret)
585 		pcie_device_get(ret);
586 
587 	return ret;
588 }
589 
590 /**
591  * mpt3sas_get_pdev_from_target - pcie device search
592  * @ioc: per adapter object
593  * @tgt_priv: starget private object
594  *
595  * Context: This function will acquire ioc->pcie_device_lock and will release
596  * before returning the pcie_device object.
597  *
598  * This searches for pcie_device from target, then return pcie_device object.
599  */
600 static struct _pcie_device *
601 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
602 	struct MPT3SAS_TARGET *tgt_priv)
603 {
604 	struct _pcie_device *ret;
605 	unsigned long flags;
606 
607 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
608 	ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
609 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
610 
611 	return ret;
612 }
613 
614 struct _sas_device *
615 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
616 					u64 sas_address)
617 {
618 	struct _sas_device *sas_device;
619 
620 	assert_spin_locked(&ioc->sas_device_lock);
621 
622 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
623 		if (sas_device->sas_address == sas_address)
624 			goto found_device;
625 
626 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
627 		if (sas_device->sas_address == sas_address)
628 			goto found_device;
629 
630 	return NULL;
631 
632 found_device:
633 	sas_device_get(sas_device);
634 	return sas_device;
635 }
636 
637 /**
638  * mpt3sas_get_sdev_by_addr - sas device search
639  * @ioc: per adapter object
640  * @sas_address: sas address
641  * Context: Calling function should acquire ioc->sas_device_lock
642  *
643  * This searches for sas_device based on sas_address, then return sas_device
644  * object.
645  */
646 struct _sas_device *
647 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
648 	u64 sas_address)
649 {
650 	struct _sas_device *sas_device;
651 	unsigned long flags;
652 
653 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
654 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
655 			sas_address);
656 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
657 
658 	return sas_device;
659 }
660 
661 static struct _sas_device *
662 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
663 {
664 	struct _sas_device *sas_device;
665 
666 	assert_spin_locked(&ioc->sas_device_lock);
667 
668 	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
669 		if (sas_device->handle == handle)
670 			goto found_device;
671 
672 	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
673 		if (sas_device->handle == handle)
674 			goto found_device;
675 
676 	return NULL;
677 
678 found_device:
679 	sas_device_get(sas_device);
680 	return sas_device;
681 }
682 
683 /**
684  * mpt3sas_get_sdev_by_handle - sas device search
685  * @ioc: per adapter object
686  * @handle: sas device handle (assigned by firmware)
687  * Context: Calling function should acquire ioc->sas_device_lock
688  *
689  * This searches for sas_device based on sas_address, then return sas_device
690  * object.
691  */
692 struct _sas_device *
693 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
694 {
695 	struct _sas_device *sas_device;
696 	unsigned long flags;
697 
698 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
699 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
700 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
701 
702 	return sas_device;
703 }
704 
705 /**
706  * _scsih_display_enclosure_chassis_info - display device location info
707  * @ioc: per adapter object
708  * @sas_device: per sas device object
709  * @sdev: scsi device struct
710  * @starget: scsi target struct
711  */
712 static void
713 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
714 	struct _sas_device *sas_device, struct scsi_device *sdev,
715 	struct scsi_target *starget)
716 {
717 	if (sdev) {
718 		if (sas_device->enclosure_handle != 0)
719 			sdev_printk(KERN_INFO, sdev,
720 			    "enclosure logical id (0x%016llx), slot(%d) \n",
721 			    (unsigned long long)
722 			    sas_device->enclosure_logical_id,
723 			    sas_device->slot);
724 		if (sas_device->connector_name[0] != '\0')
725 			sdev_printk(KERN_INFO, sdev,
726 			    "enclosure level(0x%04x), connector name( %s)\n",
727 			    sas_device->enclosure_level,
728 			    sas_device->connector_name);
729 		if (sas_device->is_chassis_slot_valid)
730 			sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
731 			    sas_device->chassis_slot);
732 	} else if (starget) {
733 		if (sas_device->enclosure_handle != 0)
734 			starget_printk(KERN_INFO, starget,
735 			    "enclosure logical id(0x%016llx), slot(%d) \n",
736 			    (unsigned long long)
737 			    sas_device->enclosure_logical_id,
738 			    sas_device->slot);
739 		if (sas_device->connector_name[0] != '\0')
740 			starget_printk(KERN_INFO, starget,
741 			    "enclosure level(0x%04x), connector name( %s)\n",
742 			    sas_device->enclosure_level,
743 			    sas_device->connector_name);
744 		if (sas_device->is_chassis_slot_valid)
745 			starget_printk(KERN_INFO, starget,
746 			    "chassis slot(0x%04x)\n",
747 			    sas_device->chassis_slot);
748 	} else {
749 		if (sas_device->enclosure_handle != 0)
750 			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
751 				 (u64)sas_device->enclosure_logical_id,
752 				 sas_device->slot);
753 		if (sas_device->connector_name[0] != '\0')
754 			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
755 				 sas_device->enclosure_level,
756 				 sas_device->connector_name);
757 		if (sas_device->is_chassis_slot_valid)
758 			ioc_info(ioc, "chassis slot(0x%04x)\n",
759 				 sas_device->chassis_slot);
760 	}
761 }
762 
763 /**
764  * _scsih_sas_device_remove - remove sas_device from list.
765  * @ioc: per adapter object
766  * @sas_device: the sas_device object
767  * Context: This function will acquire ioc->sas_device_lock.
768  *
769  * If sas_device is on the list, remove it and decrement its reference count.
770  */
771 static void
772 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
773 	struct _sas_device *sas_device)
774 {
775 	unsigned long flags;
776 
777 	if (!sas_device)
778 		return;
779 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
780 		 sas_device->handle, (u64)sas_device->sas_address);
781 
782 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
783 
784 	/*
785 	 * The lock serializes access to the list, but we still need to verify
786 	 * that nobody removed the entry while we were waiting on the lock.
787 	 */
788 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
789 	if (!list_empty(&sas_device->list)) {
790 		list_del_init(&sas_device->list);
791 		sas_device_put(sas_device);
792 	}
793 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
794 }
795 
796 /**
797  * _scsih_device_remove_by_handle - removing device object by handle
798  * @ioc: per adapter object
799  * @handle: device handle
800  */
801 static void
802 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
803 {
804 	struct _sas_device *sas_device;
805 	unsigned long flags;
806 
807 	if (ioc->shost_recovery)
808 		return;
809 
810 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
811 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
812 	if (sas_device) {
813 		list_del_init(&sas_device->list);
814 		sas_device_put(sas_device);
815 	}
816 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
817 	if (sas_device) {
818 		_scsih_remove_device(ioc, sas_device);
819 		sas_device_put(sas_device);
820 	}
821 }
822 
823 /**
824  * mpt3sas_device_remove_by_sas_address - removing device object by sas address
825  * @ioc: per adapter object
826  * @sas_address: device sas_address
827  */
828 void
829 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
830 	u64 sas_address)
831 {
832 	struct _sas_device *sas_device;
833 	unsigned long flags;
834 
835 	if (ioc->shost_recovery)
836 		return;
837 
838 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
839 	sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
840 	if (sas_device) {
841 		list_del_init(&sas_device->list);
842 		sas_device_put(sas_device);
843 	}
844 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
845 	if (sas_device) {
846 		_scsih_remove_device(ioc, sas_device);
847 		sas_device_put(sas_device);
848 	}
849 }
850 
851 /**
852  * _scsih_sas_device_add - insert sas_device to the list.
853  * @ioc: per adapter object
854  * @sas_device: the sas_device object
855  * Context: This function will acquire ioc->sas_device_lock.
856  *
857  * Adding new object to the ioc->sas_device_list.
858  */
859 static void
860 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
861 	struct _sas_device *sas_device)
862 {
863 	unsigned long flags;
864 
865 	dewtprintk(ioc,
866 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
867 			    __func__, sas_device->handle,
868 			    (u64)sas_device->sas_address));
869 
870 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
871 	    NULL, NULL));
872 
873 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
874 	sas_device_get(sas_device);
875 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
876 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
877 
878 	if (ioc->hide_drives) {
879 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
880 		return;
881 	}
882 
883 	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
884 	     sas_device->sas_address_parent)) {
885 		_scsih_sas_device_remove(ioc, sas_device);
886 	} else if (!sas_device->starget) {
887 		/*
888 		 * When asyn scanning is enabled, its not possible to remove
889 		 * devices while scanning is turned on due to an oops in
890 		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
891 		 */
892 		if (!ioc->is_driver_loading) {
893 			mpt3sas_transport_port_remove(ioc,
894 			    sas_device->sas_address,
895 			    sas_device->sas_address_parent);
896 			_scsih_sas_device_remove(ioc, sas_device);
897 		}
898 	} else
899 		clear_bit(sas_device->handle, ioc->pend_os_device_add);
900 }
901 
902 /**
903  * _scsih_sas_device_init_add - insert sas_device to the list.
904  * @ioc: per adapter object
905  * @sas_device: the sas_device object
906  * Context: This function will acquire ioc->sas_device_lock.
907  *
908  * Adding new object at driver load time to the ioc->sas_device_init_list.
909  */
910 static void
911 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
912 	struct _sas_device *sas_device)
913 {
914 	unsigned long flags;
915 
916 	dewtprintk(ioc,
917 		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
918 			    __func__, sas_device->handle,
919 			    (u64)sas_device->sas_address));
920 
921 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
922 	    NULL, NULL));
923 
924 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
925 	sas_device_get(sas_device);
926 	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
927 	_scsih_determine_boot_device(ioc, sas_device, 0);
928 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
929 }
930 
931 
932 static struct _pcie_device *
933 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
934 {
935 	struct _pcie_device *pcie_device;
936 
937 	assert_spin_locked(&ioc->pcie_device_lock);
938 
939 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
940 		if (pcie_device->wwid == wwid)
941 			goto found_device;
942 
943 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
944 		if (pcie_device->wwid == wwid)
945 			goto found_device;
946 
947 	return NULL;
948 
949 found_device:
950 	pcie_device_get(pcie_device);
951 	return pcie_device;
952 }
953 
954 
955 /**
956  * mpt3sas_get_pdev_by_wwid - pcie device search
957  * @ioc: per adapter object
958  * @wwid: wwid
959  *
960  * Context: This function will acquire ioc->pcie_device_lock and will release
961  * before returning the pcie_device object.
962  *
963  * This searches for pcie_device based on wwid, then return pcie_device object.
964  */
965 static struct _pcie_device *
966 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
967 {
968 	struct _pcie_device *pcie_device;
969 	unsigned long flags;
970 
971 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
972 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
973 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
974 
975 	return pcie_device;
976 }
977 
978 
979 static struct _pcie_device *
980 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
981 	int channel)
982 {
983 	struct _pcie_device *pcie_device;
984 
985 	assert_spin_locked(&ioc->pcie_device_lock);
986 
987 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
988 		if (pcie_device->id == id && pcie_device->channel == channel)
989 			goto found_device;
990 
991 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
992 		if (pcie_device->id == id && pcie_device->channel == channel)
993 			goto found_device;
994 
995 	return NULL;
996 
997 found_device:
998 	pcie_device_get(pcie_device);
999 	return pcie_device;
1000 }
1001 
1002 static struct _pcie_device *
1003 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1004 {
1005 	struct _pcie_device *pcie_device;
1006 
1007 	assert_spin_locked(&ioc->pcie_device_lock);
1008 
1009 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1010 		if (pcie_device->handle == handle)
1011 			goto found_device;
1012 
1013 	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1014 		if (pcie_device->handle == handle)
1015 			goto found_device;
1016 
1017 	return NULL;
1018 
1019 found_device:
1020 	pcie_device_get(pcie_device);
1021 	return pcie_device;
1022 }
1023 
1024 
1025 /**
1026  * mpt3sas_get_pdev_by_handle - pcie device search
1027  * @ioc: per adapter object
1028  * @handle: Firmware device handle
1029  *
1030  * Context: This function will acquire ioc->pcie_device_lock and will release
1031  * before returning the pcie_device object.
1032  *
1033  * This searches for pcie_device based on handle, then return pcie_device
1034  * object.
1035  */
1036 struct _pcie_device *
1037 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1038 {
1039 	struct _pcie_device *pcie_device;
1040 	unsigned long flags;
1041 
1042 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1043 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1044 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1045 
1046 	return pcie_device;
1047 }
1048 
1049 /**
1050  * _scsih_pcie_device_remove - remove pcie_device from list.
1051  * @ioc: per adapter object
1052  * @pcie_device: the pcie_device object
1053  * Context: This function will acquire ioc->pcie_device_lock.
1054  *
1055  * If pcie_device is on the list, remove it and decrement its reference count.
1056  */
1057 static void
1058 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1059 	struct _pcie_device *pcie_device)
1060 {
1061 	unsigned long flags;
1062 	int was_on_pcie_device_list = 0;
1063 
1064 	if (!pcie_device)
1065 		return;
1066 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1067 		 pcie_device->handle, (u64)pcie_device->wwid);
1068 	if (pcie_device->enclosure_handle != 0)
1069 		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1070 			 (u64)pcie_device->enclosure_logical_id,
1071 			 pcie_device->slot);
1072 	if (pcie_device->connector_name[0] != '\0')
1073 		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1074 			 pcie_device->enclosure_level,
1075 			 pcie_device->connector_name);
1076 
1077 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1078 	if (!list_empty(&pcie_device->list)) {
1079 		list_del_init(&pcie_device->list);
1080 		was_on_pcie_device_list = 1;
1081 	}
1082 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1083 	if (was_on_pcie_device_list) {
1084 		kfree(pcie_device->serial_number);
1085 		pcie_device_put(pcie_device);
1086 	}
1087 }
1088 
1089 
1090 /**
1091  * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1092  * @ioc: per adapter object
1093  * @handle: device handle
1094  */
1095 static void
1096 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1097 {
1098 	struct _pcie_device *pcie_device;
1099 	unsigned long flags;
1100 	int was_on_pcie_device_list = 0;
1101 
1102 	if (ioc->shost_recovery)
1103 		return;
1104 
1105 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1106 	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1107 	if (pcie_device) {
1108 		if (!list_empty(&pcie_device->list)) {
1109 			list_del_init(&pcie_device->list);
1110 			was_on_pcie_device_list = 1;
1111 			pcie_device_put(pcie_device);
1112 		}
1113 	}
1114 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1115 	if (was_on_pcie_device_list) {
1116 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1117 		pcie_device_put(pcie_device);
1118 	}
1119 }
1120 
1121 /**
1122  * _scsih_pcie_device_add - add pcie_device object
1123  * @ioc: per adapter object
1124  * @pcie_device: pcie_device object
1125  *
1126  * This is added to the pcie_device_list link list.
1127  */
1128 static void
1129 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1130 	struct _pcie_device *pcie_device)
1131 {
1132 	unsigned long flags;
1133 
1134 	dewtprintk(ioc,
1135 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1136 			    __func__,
1137 			    pcie_device->handle, (u64)pcie_device->wwid));
1138 	if (pcie_device->enclosure_handle != 0)
1139 		dewtprintk(ioc,
1140 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1141 				    __func__,
1142 				    (u64)pcie_device->enclosure_logical_id,
1143 				    pcie_device->slot));
1144 	if (pcie_device->connector_name[0] != '\0')
1145 		dewtprintk(ioc,
1146 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1147 				    __func__, pcie_device->enclosure_level,
1148 				    pcie_device->connector_name));
1149 
1150 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1151 	pcie_device_get(pcie_device);
1152 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1153 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1154 
1155 	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1156 		_scsih_pcie_device_remove(ioc, pcie_device);
1157 	} else if (!pcie_device->starget) {
1158 		if (!ioc->is_driver_loading) {
1159 /*TODO-- Need to find out whether this condition will occur or not*/
1160 			clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1161 		}
1162 	} else
1163 		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1164 }
1165 
1166 /*
1167  * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1168  * @ioc: per adapter object
1169  * @pcie_device: the pcie_device object
1170  * Context: This function will acquire ioc->pcie_device_lock.
1171  *
1172  * Adding new object at driver load time to the ioc->pcie_device_init_list.
1173  */
1174 static void
1175 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1176 				struct _pcie_device *pcie_device)
1177 {
1178 	unsigned long flags;
1179 
1180 	dewtprintk(ioc,
1181 		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1182 			    __func__,
1183 			    pcie_device->handle, (u64)pcie_device->wwid));
1184 	if (pcie_device->enclosure_handle != 0)
1185 		dewtprintk(ioc,
1186 			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1187 				    __func__,
1188 				    (u64)pcie_device->enclosure_logical_id,
1189 				    pcie_device->slot));
1190 	if (pcie_device->connector_name[0] != '\0')
1191 		dewtprintk(ioc,
1192 			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1193 				    __func__, pcie_device->enclosure_level,
1194 				    pcie_device->connector_name));
1195 
1196 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1197 	pcie_device_get(pcie_device);
1198 	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1199 	_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1200 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1201 }
1202 /**
1203  * _scsih_raid_device_find_by_id - raid device search
1204  * @ioc: per adapter object
1205  * @id: sas device target id
1206  * @channel: sas device channel
1207  * Context: Calling function should acquire ioc->raid_device_lock
1208  *
1209  * This searches for raid_device based on target id, then return raid_device
1210  * object.
1211  */
1212 static struct _raid_device *
1213 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1214 {
1215 	struct _raid_device *raid_device, *r;
1216 
1217 	r = NULL;
1218 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1219 		if (raid_device->id == id && raid_device->channel == channel) {
1220 			r = raid_device;
1221 			goto out;
1222 		}
1223 	}
1224 
1225  out:
1226 	return r;
1227 }
1228 
1229 /**
1230  * mpt3sas_raid_device_find_by_handle - raid device search
1231  * @ioc: per adapter object
1232  * @handle: sas device handle (assigned by firmware)
1233  * Context: Calling function should acquire ioc->raid_device_lock
1234  *
1235  * This searches for raid_device based on handle, then return raid_device
1236  * object.
1237  */
1238 struct _raid_device *
1239 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1240 {
1241 	struct _raid_device *raid_device, *r;
1242 
1243 	r = NULL;
1244 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1245 		if (raid_device->handle != handle)
1246 			continue;
1247 		r = raid_device;
1248 		goto out;
1249 	}
1250 
1251  out:
1252 	return r;
1253 }
1254 
1255 /**
1256  * _scsih_raid_device_find_by_wwid - raid device search
1257  * @ioc: per adapter object
1258  * @wwid: ?
1259  * Context: Calling function should acquire ioc->raid_device_lock
1260  *
1261  * This searches for raid_device based on wwid, then return raid_device
1262  * object.
1263  */
1264 static struct _raid_device *
1265 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1266 {
1267 	struct _raid_device *raid_device, *r;
1268 
1269 	r = NULL;
1270 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1271 		if (raid_device->wwid != wwid)
1272 			continue;
1273 		r = raid_device;
1274 		goto out;
1275 	}
1276 
1277  out:
1278 	return r;
1279 }
1280 
1281 /**
1282  * _scsih_raid_device_add - add raid_device object
1283  * @ioc: per adapter object
1284  * @raid_device: raid_device object
1285  *
1286  * This is added to the raid_device_list link list.
1287  */
1288 static void
1289 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1290 	struct _raid_device *raid_device)
1291 {
1292 	unsigned long flags;
1293 
1294 	dewtprintk(ioc,
1295 		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1296 			    __func__,
1297 			    raid_device->handle, (u64)raid_device->wwid));
1298 
1299 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1300 	list_add_tail(&raid_device->list, &ioc->raid_device_list);
1301 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1302 }
1303 
1304 /**
1305  * _scsih_raid_device_remove - delete raid_device object
1306  * @ioc: per adapter object
1307  * @raid_device: raid_device object
1308  *
1309  */
1310 static void
1311 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1312 	struct _raid_device *raid_device)
1313 {
1314 	unsigned long flags;
1315 
1316 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1317 	list_del(&raid_device->list);
1318 	kfree(raid_device);
1319 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1320 }
1321 
1322 /**
1323  * mpt3sas_scsih_expander_find_by_handle - expander device search
1324  * @ioc: per adapter object
1325  * @handle: expander handle (assigned by firmware)
1326  * Context: Calling function should acquire ioc->sas_device_lock
1327  *
1328  * This searches for expander device based on handle, then returns the
1329  * sas_node object.
1330  */
1331 struct _sas_node *
1332 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1333 {
1334 	struct _sas_node *sas_expander, *r;
1335 
1336 	r = NULL;
1337 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1338 		if (sas_expander->handle != handle)
1339 			continue;
1340 		r = sas_expander;
1341 		goto out;
1342 	}
1343  out:
1344 	return r;
1345 }
1346 
1347 /**
1348  * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1349  * @ioc: per adapter object
1350  * @handle: enclosure handle (assigned by firmware)
1351  * Context: Calling function should acquire ioc->sas_device_lock
1352  *
1353  * This searches for enclosure device based on handle, then returns the
1354  * enclosure object.
1355  */
1356 static struct _enclosure_node *
1357 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1358 {
1359 	struct _enclosure_node *enclosure_dev, *r;
1360 
1361 	r = NULL;
1362 	list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1363 		if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1364 			continue;
1365 		r = enclosure_dev;
1366 		goto out;
1367 	}
1368 out:
1369 	return r;
1370 }
1371 /**
1372  * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1373  * @ioc: per adapter object
1374  * @sas_address: sas address
1375  * Context: Calling function should acquire ioc->sas_node_lock.
1376  *
1377  * This searches for expander device based on sas_address, then returns the
1378  * sas_node object.
1379  */
1380 struct _sas_node *
1381 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1382 	u64 sas_address)
1383 {
1384 	struct _sas_node *sas_expander, *r;
1385 
1386 	r = NULL;
1387 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1388 		if (sas_expander->sas_address != sas_address)
1389 			continue;
1390 		r = sas_expander;
1391 		goto out;
1392 	}
1393  out:
1394 	return r;
1395 }
1396 
1397 /**
1398  * _scsih_expander_node_add - insert expander device to the list.
1399  * @ioc: per adapter object
1400  * @sas_expander: the sas_device object
1401  * Context: This function will acquire ioc->sas_node_lock.
1402  *
1403  * Adding new object to the ioc->sas_expander_list.
1404  */
1405 static void
1406 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1407 	struct _sas_node *sas_expander)
1408 {
1409 	unsigned long flags;
1410 
1411 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
1412 	list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1413 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1414 }
1415 
1416 /**
1417  * _scsih_is_end_device - determines if device is an end device
1418  * @device_info: bitfield providing information about the device.
1419  * Context: none
1420  *
1421  * Return: 1 if end device.
1422  */
1423 static int
1424 _scsih_is_end_device(u32 device_info)
1425 {
1426 	if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1427 		((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1428 		(device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1429 		(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1430 		return 1;
1431 	else
1432 		return 0;
1433 }
1434 
1435 /**
1436  * _scsih_is_nvme_device - determines if device is an nvme device
1437  * @device_info: bitfield providing information about the device.
1438  * Context: none
1439  *
1440  * Return: 1 if nvme device.
1441  */
1442 static int
1443 _scsih_is_nvme_device(u32 device_info)
1444 {
1445 	if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1446 					== MPI26_PCIE_DEVINFO_NVME)
1447 		return 1;
1448 	else
1449 		return 0;
1450 }
1451 
1452 /**
1453  * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1454  * @ioc: per adapter object
1455  * @smid: system request message index
1456  *
1457  * Return: the smid stored scmd pointer.
1458  * Then will dereference the stored scmd pointer.
1459  */
1460 struct scsi_cmnd *
1461 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1462 {
1463 	struct scsi_cmnd *scmd = NULL;
1464 	struct scsiio_tracker *st;
1465 
1466 	if (smid > 0  &&
1467 	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1468 		u32 unique_tag = smid - 1;
1469 
1470 		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1471 		if (scmd) {
1472 			st = scsi_cmd_priv(scmd);
1473 			if (st->cb_idx == 0xFF || st->smid == 0)
1474 				scmd = NULL;
1475 		}
1476 	}
1477 	return scmd;
1478 }
1479 
1480 /**
1481  * scsih_change_queue_depth - setting device queue depth
1482  * @sdev: scsi device struct
1483  * @qdepth: requested queue depth
1484  *
1485  * Return: queue depth.
1486  */
1487 static int
1488 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1489 {
1490 	struct Scsi_Host *shost = sdev->host;
1491 	int max_depth;
1492 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1493 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1494 	struct MPT3SAS_TARGET *sas_target_priv_data;
1495 	struct _sas_device *sas_device;
1496 	unsigned long flags;
1497 
1498 	max_depth = shost->can_queue;
1499 
1500 	/* limit max device queue for SATA to 32 */
1501 	sas_device_priv_data = sdev->hostdata;
1502 	if (!sas_device_priv_data)
1503 		goto not_sata;
1504 	sas_target_priv_data = sas_device_priv_data->sas_target;
1505 	if (!sas_target_priv_data)
1506 		goto not_sata;
1507 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1508 		goto not_sata;
1509 
1510 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1511 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1512 	if (sas_device) {
1513 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1514 			max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1515 
1516 		sas_device_put(sas_device);
1517 	}
1518 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1519 
1520  not_sata:
1521 
1522 	if (!sdev->tagged_supported)
1523 		max_depth = 1;
1524 	if (qdepth > max_depth)
1525 		qdepth = max_depth;
1526 	return scsi_change_queue_depth(sdev, qdepth);
1527 }
1528 
1529 /**
1530  * scsih_target_alloc - target add routine
1531  * @starget: scsi target struct
1532  *
1533  * Return: 0 if ok. Any other return is assumed to be an error and
1534  * the device is ignored.
1535  */
1536 static int
1537 scsih_target_alloc(struct scsi_target *starget)
1538 {
1539 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1540 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1541 	struct MPT3SAS_TARGET *sas_target_priv_data;
1542 	struct _sas_device *sas_device;
1543 	struct _raid_device *raid_device;
1544 	struct _pcie_device *pcie_device;
1545 	unsigned long flags;
1546 	struct sas_rphy *rphy;
1547 
1548 	sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1549 				       GFP_KERNEL);
1550 	if (!sas_target_priv_data)
1551 		return -ENOMEM;
1552 
1553 	starget->hostdata = sas_target_priv_data;
1554 	sas_target_priv_data->starget = starget;
1555 	sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1556 
1557 	/* RAID volumes */
1558 	if (starget->channel == RAID_CHANNEL) {
1559 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1560 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1561 		    starget->channel);
1562 		if (raid_device) {
1563 			sas_target_priv_data->handle = raid_device->handle;
1564 			sas_target_priv_data->sas_address = raid_device->wwid;
1565 			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1566 			if (ioc->is_warpdrive)
1567 				sas_target_priv_data->raid_device = raid_device;
1568 			raid_device->starget = starget;
1569 		}
1570 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1571 		return 0;
1572 	}
1573 
1574 	/* PCIe devices */
1575 	if (starget->channel == PCIE_CHANNEL) {
1576 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1577 		pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1578 			starget->channel);
1579 		if (pcie_device) {
1580 			sas_target_priv_data->handle = pcie_device->handle;
1581 			sas_target_priv_data->sas_address = pcie_device->wwid;
1582 			sas_target_priv_data->pcie_dev = pcie_device;
1583 			pcie_device->starget = starget;
1584 			pcie_device->id = starget->id;
1585 			pcie_device->channel = starget->channel;
1586 			sas_target_priv_data->flags |=
1587 				MPT_TARGET_FLAGS_PCIE_DEVICE;
1588 			if (pcie_device->fast_path)
1589 				sas_target_priv_data->flags |=
1590 					MPT_TARGET_FASTPATH_IO;
1591 		}
1592 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1593 		return 0;
1594 	}
1595 
1596 	/* sas/sata devices */
1597 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1598 	rphy = dev_to_rphy(starget->dev.parent);
1599 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1600 	   rphy->identify.sas_address);
1601 
1602 	if (sas_device) {
1603 		sas_target_priv_data->handle = sas_device->handle;
1604 		sas_target_priv_data->sas_address = sas_device->sas_address;
1605 		sas_target_priv_data->sas_dev = sas_device;
1606 		sas_device->starget = starget;
1607 		sas_device->id = starget->id;
1608 		sas_device->channel = starget->channel;
1609 		if (test_bit(sas_device->handle, ioc->pd_handles))
1610 			sas_target_priv_data->flags |=
1611 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
1612 		if (sas_device->fast_path)
1613 			sas_target_priv_data->flags |=
1614 					MPT_TARGET_FASTPATH_IO;
1615 	}
1616 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1617 
1618 	return 0;
1619 }
1620 
1621 /**
1622  * scsih_target_destroy - target destroy routine
1623  * @starget: scsi target struct
1624  */
1625 static void
1626 scsih_target_destroy(struct scsi_target *starget)
1627 {
1628 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1629 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1630 	struct MPT3SAS_TARGET *sas_target_priv_data;
1631 	struct _sas_device *sas_device;
1632 	struct _raid_device *raid_device;
1633 	struct _pcie_device *pcie_device;
1634 	unsigned long flags;
1635 
1636 	sas_target_priv_data = starget->hostdata;
1637 	if (!sas_target_priv_data)
1638 		return;
1639 
1640 	if (starget->channel == RAID_CHANNEL) {
1641 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1642 		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1643 		    starget->channel);
1644 		if (raid_device) {
1645 			raid_device->starget = NULL;
1646 			raid_device->sdev = NULL;
1647 		}
1648 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1649 		goto out;
1650 	}
1651 
1652 	if (starget->channel == PCIE_CHANNEL) {
1653 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1654 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1655 							sas_target_priv_data);
1656 		if (pcie_device && (pcie_device->starget == starget) &&
1657 			(pcie_device->id == starget->id) &&
1658 			(pcie_device->channel == starget->channel))
1659 			pcie_device->starget = NULL;
1660 
1661 		if (pcie_device) {
1662 			/*
1663 			 * Corresponding get() is in _scsih_target_alloc()
1664 			 */
1665 			sas_target_priv_data->pcie_dev = NULL;
1666 			pcie_device_put(pcie_device);
1667 			pcie_device_put(pcie_device);
1668 		}
1669 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1670 		goto out;
1671 	}
1672 
1673 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1674 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1675 	if (sas_device && (sas_device->starget == starget) &&
1676 	    (sas_device->id == starget->id) &&
1677 	    (sas_device->channel == starget->channel))
1678 		sas_device->starget = NULL;
1679 
1680 	if (sas_device) {
1681 		/*
1682 		 * Corresponding get() is in _scsih_target_alloc()
1683 		 */
1684 		sas_target_priv_data->sas_dev = NULL;
1685 		sas_device_put(sas_device);
1686 
1687 		sas_device_put(sas_device);
1688 	}
1689 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1690 
1691  out:
1692 	kfree(sas_target_priv_data);
1693 	starget->hostdata = NULL;
1694 }
1695 
1696 /**
1697  * scsih_slave_alloc - device add routine
1698  * @sdev: scsi device struct
1699  *
1700  * Return: 0 if ok. Any other return is assumed to be an error and
1701  * the device is ignored.
1702  */
1703 static int
1704 scsih_slave_alloc(struct scsi_device *sdev)
1705 {
1706 	struct Scsi_Host *shost;
1707 	struct MPT3SAS_ADAPTER *ioc;
1708 	struct MPT3SAS_TARGET *sas_target_priv_data;
1709 	struct MPT3SAS_DEVICE *sas_device_priv_data;
1710 	struct scsi_target *starget;
1711 	struct _raid_device *raid_device;
1712 	struct _sas_device *sas_device;
1713 	struct _pcie_device *pcie_device;
1714 	unsigned long flags;
1715 
1716 	sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
1717 				       GFP_KERNEL);
1718 	if (!sas_device_priv_data)
1719 		return -ENOMEM;
1720 
1721 	sas_device_priv_data->lun = sdev->lun;
1722 	sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
1723 
1724 	starget = scsi_target(sdev);
1725 	sas_target_priv_data = starget->hostdata;
1726 	sas_target_priv_data->num_luns++;
1727 	sas_device_priv_data->sas_target = sas_target_priv_data;
1728 	sdev->hostdata = sas_device_priv_data;
1729 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
1730 		sdev->no_uld_attach = 1;
1731 
1732 	shost = dev_to_shost(&starget->dev);
1733 	ioc = shost_priv(shost);
1734 	if (starget->channel == RAID_CHANNEL) {
1735 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1736 		raid_device = _scsih_raid_device_find_by_id(ioc,
1737 		    starget->id, starget->channel);
1738 		if (raid_device)
1739 			raid_device->sdev = sdev; /* raid is single lun */
1740 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1741 	}
1742 	if (starget->channel == PCIE_CHANNEL) {
1743 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1744 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
1745 				sas_target_priv_data->sas_address);
1746 		if (pcie_device && (pcie_device->starget == NULL)) {
1747 			sdev_printk(KERN_INFO, sdev,
1748 			    "%s : pcie_device->starget set to starget @ %d\n",
1749 			    __func__, __LINE__);
1750 			pcie_device->starget = starget;
1751 		}
1752 
1753 		if (pcie_device)
1754 			pcie_device_put(pcie_device);
1755 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1756 
1757 	} else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1758 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1759 		sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1760 					sas_target_priv_data->sas_address);
1761 		if (sas_device && (sas_device->starget == NULL)) {
1762 			sdev_printk(KERN_INFO, sdev,
1763 			"%s : sas_device->starget set to starget @ %d\n",
1764 			     __func__, __LINE__);
1765 			sas_device->starget = starget;
1766 		}
1767 
1768 		if (sas_device)
1769 			sas_device_put(sas_device);
1770 
1771 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1772 	}
1773 
1774 	return 0;
1775 }
1776 
1777 /**
1778  * scsih_slave_destroy - device destroy routine
1779  * @sdev: scsi device struct
1780  */
1781 static void
1782 scsih_slave_destroy(struct scsi_device *sdev)
1783 {
1784 	struct MPT3SAS_TARGET *sas_target_priv_data;
1785 	struct scsi_target *starget;
1786 	struct Scsi_Host *shost;
1787 	struct MPT3SAS_ADAPTER *ioc;
1788 	struct _sas_device *sas_device;
1789 	struct _pcie_device *pcie_device;
1790 	unsigned long flags;
1791 
1792 	if (!sdev->hostdata)
1793 		return;
1794 
1795 	starget = scsi_target(sdev);
1796 	sas_target_priv_data = starget->hostdata;
1797 	sas_target_priv_data->num_luns--;
1798 
1799 	shost = dev_to_shost(&starget->dev);
1800 	ioc = shost_priv(shost);
1801 
1802 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
1803 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1804 		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1805 				sas_target_priv_data);
1806 		if (pcie_device && !sas_target_priv_data->num_luns)
1807 			pcie_device->starget = NULL;
1808 
1809 		if (pcie_device)
1810 			pcie_device_put(pcie_device);
1811 
1812 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1813 
1814 	} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1815 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
1816 		sas_device = __mpt3sas_get_sdev_from_target(ioc,
1817 				sas_target_priv_data);
1818 		if (sas_device && !sas_target_priv_data->num_luns)
1819 			sas_device->starget = NULL;
1820 
1821 		if (sas_device)
1822 			sas_device_put(sas_device);
1823 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1824 	}
1825 
1826 	kfree(sdev->hostdata);
1827 	sdev->hostdata = NULL;
1828 }
1829 
1830 /**
1831  * _scsih_display_sata_capabilities - sata capabilities
1832  * @ioc: per adapter object
1833  * @handle: device handle
1834  * @sdev: scsi device struct
1835  */
1836 static void
1837 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
1838 	u16 handle, struct scsi_device *sdev)
1839 {
1840 	Mpi2ConfigReply_t mpi_reply;
1841 	Mpi2SasDevicePage0_t sas_device_pg0;
1842 	u32 ioc_status;
1843 	u16 flags;
1844 	u32 device_info;
1845 
1846 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1847 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
1848 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1849 			__FILE__, __LINE__, __func__);
1850 		return;
1851 	}
1852 
1853 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1854 	    MPI2_IOCSTATUS_MASK;
1855 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1856 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1857 			__FILE__, __LINE__, __func__);
1858 		return;
1859 	}
1860 
1861 	flags = le16_to_cpu(sas_device_pg0.Flags);
1862 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
1863 
1864 	sdev_printk(KERN_INFO, sdev,
1865 	    "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
1866 	    "sw_preserve(%s)\n",
1867 	    (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
1868 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
1869 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
1870 	    "n",
1871 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
1872 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
1873 	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
1874 }
1875 
1876 /*
1877  * raid transport support -
1878  * Enabled for SLES11 and newer, in older kernels the driver will panic when
1879  * unloading the driver followed by a load - I believe that the subroutine
1880  * raid_class_release() is not cleaning up properly.
1881  */
1882 
1883 /**
1884  * scsih_is_raid - return boolean indicating device is raid volume
1885  * @dev: the device struct object
1886  */
1887 static int
1888 scsih_is_raid(struct device *dev)
1889 {
1890 	struct scsi_device *sdev = to_scsi_device(dev);
1891 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1892 
1893 	if (ioc->is_warpdrive)
1894 		return 0;
1895 	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
1896 }
1897 
1898 static int
1899 scsih_is_nvme(struct device *dev)
1900 {
1901 	struct scsi_device *sdev = to_scsi_device(dev);
1902 
1903 	return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
1904 }
1905 
1906 /**
1907  * scsih_get_resync - get raid volume resync percent complete
1908  * @dev: the device struct object
1909  */
1910 static void
1911 scsih_get_resync(struct device *dev)
1912 {
1913 	struct scsi_device *sdev = to_scsi_device(dev);
1914 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1915 	static struct _raid_device *raid_device;
1916 	unsigned long flags;
1917 	Mpi2RaidVolPage0_t vol_pg0;
1918 	Mpi2ConfigReply_t mpi_reply;
1919 	u32 volume_status_flags;
1920 	u8 percent_complete;
1921 	u16 handle;
1922 
1923 	percent_complete = 0;
1924 	handle = 0;
1925 	if (ioc->is_warpdrive)
1926 		goto out;
1927 
1928 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1929 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1930 	    sdev->channel);
1931 	if (raid_device) {
1932 		handle = raid_device->handle;
1933 		percent_complete = raid_device->percent_complete;
1934 	}
1935 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1936 
1937 	if (!handle)
1938 		goto out;
1939 
1940 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1941 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
1942 	     sizeof(Mpi2RaidVolPage0_t))) {
1943 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1944 			__FILE__, __LINE__, __func__);
1945 		percent_complete = 0;
1946 		goto out;
1947 	}
1948 
1949 	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
1950 	if (!(volume_status_flags &
1951 	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
1952 		percent_complete = 0;
1953 
1954  out:
1955 
1956 	switch (ioc->hba_mpi_version_belonged) {
1957 	case MPI2_VERSION:
1958 		raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
1959 		break;
1960 	case MPI25_VERSION:
1961 	case MPI26_VERSION:
1962 		raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
1963 		break;
1964 	}
1965 }
1966 
1967 /**
1968  * scsih_get_state - get raid volume level
1969  * @dev: the device struct object
1970  */
1971 static void
1972 scsih_get_state(struct device *dev)
1973 {
1974 	struct scsi_device *sdev = to_scsi_device(dev);
1975 	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1976 	static struct _raid_device *raid_device;
1977 	unsigned long flags;
1978 	Mpi2RaidVolPage0_t vol_pg0;
1979 	Mpi2ConfigReply_t mpi_reply;
1980 	u32 volstate;
1981 	enum raid_state state = RAID_STATE_UNKNOWN;
1982 	u16 handle = 0;
1983 
1984 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1985 	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1986 	    sdev->channel);
1987 	if (raid_device)
1988 		handle = raid_device->handle;
1989 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1990 
1991 	if (!raid_device)
1992 		goto out;
1993 
1994 	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1995 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
1996 	     sizeof(Mpi2RaidVolPage0_t))) {
1997 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
1998 			__FILE__, __LINE__, __func__);
1999 		goto out;
2000 	}
2001 
2002 	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2003 	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2004 		state = RAID_STATE_RESYNCING;
2005 		goto out;
2006 	}
2007 
2008 	switch (vol_pg0.VolumeState) {
2009 	case MPI2_RAID_VOL_STATE_OPTIMAL:
2010 	case MPI2_RAID_VOL_STATE_ONLINE:
2011 		state = RAID_STATE_ACTIVE;
2012 		break;
2013 	case  MPI2_RAID_VOL_STATE_DEGRADED:
2014 		state = RAID_STATE_DEGRADED;
2015 		break;
2016 	case MPI2_RAID_VOL_STATE_FAILED:
2017 	case MPI2_RAID_VOL_STATE_MISSING:
2018 		state = RAID_STATE_OFFLINE;
2019 		break;
2020 	}
2021  out:
2022 	switch (ioc->hba_mpi_version_belonged) {
2023 	case MPI2_VERSION:
2024 		raid_set_state(mpt2sas_raid_template, dev, state);
2025 		break;
2026 	case MPI25_VERSION:
2027 	case MPI26_VERSION:
2028 		raid_set_state(mpt3sas_raid_template, dev, state);
2029 		break;
2030 	}
2031 }
2032 
2033 /**
2034  * _scsih_set_level - set raid level
2035  * @ioc: ?
2036  * @sdev: scsi device struct
2037  * @volume_type: volume type
2038  */
2039 static void
2040 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2041 	struct scsi_device *sdev, u8 volume_type)
2042 {
2043 	enum raid_level level = RAID_LEVEL_UNKNOWN;
2044 
2045 	switch (volume_type) {
2046 	case MPI2_RAID_VOL_TYPE_RAID0:
2047 		level = RAID_LEVEL_0;
2048 		break;
2049 	case MPI2_RAID_VOL_TYPE_RAID10:
2050 		level = RAID_LEVEL_10;
2051 		break;
2052 	case MPI2_RAID_VOL_TYPE_RAID1E:
2053 		level = RAID_LEVEL_1E;
2054 		break;
2055 	case MPI2_RAID_VOL_TYPE_RAID1:
2056 		level = RAID_LEVEL_1;
2057 		break;
2058 	}
2059 
2060 	switch (ioc->hba_mpi_version_belonged) {
2061 	case MPI2_VERSION:
2062 		raid_set_level(mpt2sas_raid_template,
2063 			&sdev->sdev_gendev, level);
2064 		break;
2065 	case MPI25_VERSION:
2066 	case MPI26_VERSION:
2067 		raid_set_level(mpt3sas_raid_template,
2068 			&sdev->sdev_gendev, level);
2069 		break;
2070 	}
2071 }
2072 
2073 
2074 /**
2075  * _scsih_get_volume_capabilities - volume capabilities
2076  * @ioc: per adapter object
2077  * @raid_device: the raid_device object
2078  *
2079  * Return: 0 for success, else 1
2080  */
2081 static int
2082 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2083 	struct _raid_device *raid_device)
2084 {
2085 	Mpi2RaidVolPage0_t *vol_pg0;
2086 	Mpi2RaidPhysDiskPage0_t pd_pg0;
2087 	Mpi2SasDevicePage0_t sas_device_pg0;
2088 	Mpi2ConfigReply_t mpi_reply;
2089 	u16 sz;
2090 	u8 num_pds;
2091 
2092 	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2093 	    &num_pds)) || !num_pds) {
2094 		dfailprintk(ioc,
2095 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2096 				     __FILE__, __LINE__, __func__));
2097 		return 1;
2098 	}
2099 
2100 	raid_device->num_pds = num_pds;
2101 	sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2102 	    sizeof(Mpi2RaidVol0PhysDisk_t));
2103 	vol_pg0 = kzalloc(sz, GFP_KERNEL);
2104 	if (!vol_pg0) {
2105 		dfailprintk(ioc,
2106 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2107 				     __FILE__, __LINE__, __func__));
2108 		return 1;
2109 	}
2110 
2111 	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2112 	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2113 		dfailprintk(ioc,
2114 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2115 				     __FILE__, __LINE__, __func__));
2116 		kfree(vol_pg0);
2117 		return 1;
2118 	}
2119 
2120 	raid_device->volume_type = vol_pg0->VolumeType;
2121 
2122 	/* figure out what the underlying devices are by
2123 	 * obtaining the device_info bits for the 1st device
2124 	 */
2125 	if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2126 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2127 	    vol_pg0->PhysDisk[0].PhysDiskNum))) {
2128 		if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2129 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2130 		    le16_to_cpu(pd_pg0.DevHandle)))) {
2131 			raid_device->device_info =
2132 			    le32_to_cpu(sas_device_pg0.DeviceInfo);
2133 		}
2134 	}
2135 
2136 	kfree(vol_pg0);
2137 	return 0;
2138 }
2139 
2140 /**
2141  * _scsih_enable_tlr - setting TLR flags
2142  * @ioc: per adapter object
2143  * @sdev: scsi device struct
2144  *
2145  * Enabling Transaction Layer Retries for tape devices when
2146  * vpd page 0x90 is present
2147  *
2148  */
2149 static void
2150 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2151 {
2152 
2153 	/* only for TAPE */
2154 	if (sdev->type != TYPE_TAPE)
2155 		return;
2156 
2157 	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2158 		return;
2159 
2160 	sas_enable_tlr(sdev);
2161 	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2162 	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2163 	return;
2164 
2165 }
2166 
2167 /**
2168  * scsih_slave_configure - device configure routine.
2169  * @sdev: scsi device struct
2170  *
2171  * Return: 0 if ok. Any other return is assumed to be an error and
2172  * the device is ignored.
2173  */
2174 static int
2175 scsih_slave_configure(struct scsi_device *sdev)
2176 {
2177 	struct Scsi_Host *shost = sdev->host;
2178 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2179 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2180 	struct MPT3SAS_TARGET *sas_target_priv_data;
2181 	struct _sas_device *sas_device;
2182 	struct _pcie_device *pcie_device;
2183 	struct _raid_device *raid_device;
2184 	unsigned long flags;
2185 	int qdepth;
2186 	u8 ssp_target = 0;
2187 	char *ds = "";
2188 	char *r_level = "";
2189 	u16 handle, volume_handle = 0;
2190 	u64 volume_wwid = 0;
2191 
2192 	qdepth = 1;
2193 	sas_device_priv_data = sdev->hostdata;
2194 	sas_device_priv_data->configured_lun = 1;
2195 	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2196 	sas_target_priv_data = sas_device_priv_data->sas_target;
2197 	handle = sas_target_priv_data->handle;
2198 
2199 	/* raid volume handling */
2200 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2201 
2202 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2203 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2204 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2205 		if (!raid_device) {
2206 			dfailprintk(ioc,
2207 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2208 					     __FILE__, __LINE__, __func__));
2209 			return 1;
2210 		}
2211 
2212 		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2213 			dfailprintk(ioc,
2214 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2215 					     __FILE__, __LINE__, __func__));
2216 			return 1;
2217 		}
2218 
2219 		/*
2220 		 * WARPDRIVE: Initialize the required data for Direct IO
2221 		 */
2222 		mpt3sas_init_warpdrive_properties(ioc, raid_device);
2223 
2224 		/* RAID Queue Depth Support
2225 		 * IS volume = underlying qdepth of drive type, either
2226 		 *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2227 		 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2228 		 */
2229 		if (raid_device->device_info &
2230 		    MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2231 			qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2232 			ds = "SSP";
2233 		} else {
2234 			qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2235 			if (raid_device->device_info &
2236 			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2237 				ds = "SATA";
2238 			else
2239 				ds = "STP";
2240 		}
2241 
2242 		switch (raid_device->volume_type) {
2243 		case MPI2_RAID_VOL_TYPE_RAID0:
2244 			r_level = "RAID0";
2245 			break;
2246 		case MPI2_RAID_VOL_TYPE_RAID1E:
2247 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2248 			if (ioc->manu_pg10.OEMIdentifier &&
2249 			    (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2250 			    MFG10_GF0_R10_DISPLAY) &&
2251 			    !(raid_device->num_pds % 2))
2252 				r_level = "RAID10";
2253 			else
2254 				r_level = "RAID1E";
2255 			break;
2256 		case MPI2_RAID_VOL_TYPE_RAID1:
2257 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2258 			r_level = "RAID1";
2259 			break;
2260 		case MPI2_RAID_VOL_TYPE_RAID10:
2261 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2262 			r_level = "RAID10";
2263 			break;
2264 		case MPI2_RAID_VOL_TYPE_UNKNOWN:
2265 		default:
2266 			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2267 			r_level = "RAIDX";
2268 			break;
2269 		}
2270 
2271 		if (!ioc->hide_ir_msg)
2272 			sdev_printk(KERN_INFO, sdev,
2273 			   "%s: handle(0x%04x), wwid(0x%016llx),"
2274 			    " pd_count(%d), type(%s)\n",
2275 			    r_level, raid_device->handle,
2276 			    (unsigned long long)raid_device->wwid,
2277 			    raid_device->num_pds, ds);
2278 
2279 		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2280 			blk_queue_max_hw_sectors(sdev->request_queue,
2281 						MPT3SAS_RAID_MAX_SECTORS);
2282 			sdev_printk(KERN_INFO, sdev,
2283 					"Set queue's max_sector to: %u\n",
2284 						MPT3SAS_RAID_MAX_SECTORS);
2285 		}
2286 
2287 		scsih_change_queue_depth(sdev, qdepth);
2288 
2289 		/* raid transport support */
2290 		if (!ioc->is_warpdrive)
2291 			_scsih_set_level(ioc, sdev, raid_device->volume_type);
2292 		return 0;
2293 	}
2294 
2295 	/* non-raid handling */
2296 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2297 		if (mpt3sas_config_get_volume_handle(ioc, handle,
2298 		    &volume_handle)) {
2299 			dfailprintk(ioc,
2300 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2301 					     __FILE__, __LINE__, __func__));
2302 			return 1;
2303 		}
2304 		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2305 		    volume_handle, &volume_wwid)) {
2306 			dfailprintk(ioc,
2307 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2308 					     __FILE__, __LINE__, __func__));
2309 			return 1;
2310 		}
2311 	}
2312 
2313 	/* PCIe handling */
2314 	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2315 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2316 		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2317 				sas_device_priv_data->sas_target->sas_address);
2318 		if (!pcie_device) {
2319 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2320 			dfailprintk(ioc,
2321 				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2322 					     __FILE__, __LINE__, __func__));
2323 			return 1;
2324 		}
2325 
2326 		qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2327 		ds = "NVMe";
2328 		sdev_printk(KERN_INFO, sdev,
2329 			"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2330 			ds, handle, (unsigned long long)pcie_device->wwid,
2331 			pcie_device->port_num);
2332 		if (pcie_device->enclosure_handle != 0)
2333 			sdev_printk(KERN_INFO, sdev,
2334 			"%s: enclosure logical id(0x%016llx), slot(%d)\n",
2335 			ds,
2336 			(unsigned long long)pcie_device->enclosure_logical_id,
2337 			pcie_device->slot);
2338 		if (pcie_device->connector_name[0] != '\0')
2339 			sdev_printk(KERN_INFO, sdev,
2340 				"%s: enclosure level(0x%04x),"
2341 				"connector name( %s)\n", ds,
2342 				pcie_device->enclosure_level,
2343 				pcie_device->connector_name);
2344 
2345 		if (pcie_device->nvme_mdts)
2346 			blk_queue_max_hw_sectors(sdev->request_queue,
2347 					pcie_device->nvme_mdts/512);
2348 
2349 		pcie_device_put(pcie_device);
2350 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2351 		scsih_change_queue_depth(sdev, qdepth);
2352 		/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2353 		 ** merged and can eliminate holes created during merging
2354 		 ** operation.
2355 		 **/
2356 		blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2357 				sdev->request_queue);
2358 		blk_queue_virt_boundary(sdev->request_queue,
2359 				ioc->page_size - 1);
2360 		return 0;
2361 	}
2362 
2363 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2364 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2365 	   sas_device_priv_data->sas_target->sas_address);
2366 	if (!sas_device) {
2367 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2368 		dfailprintk(ioc,
2369 			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2370 				     __FILE__, __LINE__, __func__));
2371 		return 1;
2372 	}
2373 
2374 	sas_device->volume_handle = volume_handle;
2375 	sas_device->volume_wwid = volume_wwid;
2376 	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2377 		qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2378 		ssp_target = 1;
2379 		if (sas_device->device_info &
2380 				MPI2_SAS_DEVICE_INFO_SEP) {
2381 			sdev_printk(KERN_WARNING, sdev,
2382 			"set ignore_delay_remove for handle(0x%04x)\n",
2383 			sas_device_priv_data->sas_target->handle);
2384 			sas_device_priv_data->ignore_delay_remove = 1;
2385 			ds = "SES";
2386 		} else
2387 			ds = "SSP";
2388 	} else {
2389 		qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2390 		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2391 			ds = "STP";
2392 		else if (sas_device->device_info &
2393 		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2394 			ds = "SATA";
2395 	}
2396 
2397 	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2398 	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2399 	    ds, handle, (unsigned long long)sas_device->sas_address,
2400 	    sas_device->phy, (unsigned long long)sas_device->device_name);
2401 
2402 	_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2403 
2404 	sas_device_put(sas_device);
2405 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2406 
2407 	if (!ssp_target)
2408 		_scsih_display_sata_capabilities(ioc, handle, sdev);
2409 
2410 
2411 	scsih_change_queue_depth(sdev, qdepth);
2412 
2413 	if (ssp_target) {
2414 		sas_read_port_mode_page(sdev);
2415 		_scsih_enable_tlr(ioc, sdev);
2416 	}
2417 
2418 	return 0;
2419 }
2420 
2421 /**
2422  * scsih_bios_param - fetch head, sector, cylinder info for a disk
2423  * @sdev: scsi device struct
2424  * @bdev: pointer to block device context
2425  * @capacity: device size (in 512 byte sectors)
2426  * @params: three element array to place output:
2427  *              params[0] number of heads (max 255)
2428  *              params[1] number of sectors (max 63)
2429  *              params[2] number of cylinders
2430  */
2431 static int
2432 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2433 	sector_t capacity, int params[])
2434 {
2435 	int		heads;
2436 	int		sectors;
2437 	sector_t	cylinders;
2438 	ulong		dummy;
2439 
2440 	heads = 64;
2441 	sectors = 32;
2442 
2443 	dummy = heads * sectors;
2444 	cylinders = capacity;
2445 	sector_div(cylinders, dummy);
2446 
2447 	/*
2448 	 * Handle extended translation size for logical drives
2449 	 * > 1Gb
2450 	 */
2451 	if ((ulong)capacity >= 0x200000) {
2452 		heads = 255;
2453 		sectors = 63;
2454 		dummy = heads * sectors;
2455 		cylinders = capacity;
2456 		sector_div(cylinders, dummy);
2457 	}
2458 
2459 	/* return result */
2460 	params[0] = heads;
2461 	params[1] = sectors;
2462 	params[2] = cylinders;
2463 
2464 	return 0;
2465 }
2466 
2467 /**
2468  * _scsih_response_code - translation of device response code
2469  * @ioc: per adapter object
2470  * @response_code: response code returned by the device
2471  */
2472 static void
2473 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2474 {
2475 	char *desc;
2476 
2477 	switch (response_code) {
2478 	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2479 		desc = "task management request completed";
2480 		break;
2481 	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2482 		desc = "invalid frame";
2483 		break;
2484 	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2485 		desc = "task management request not supported";
2486 		break;
2487 	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2488 		desc = "task management request failed";
2489 		break;
2490 	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2491 		desc = "task management request succeeded";
2492 		break;
2493 	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2494 		desc = "invalid lun";
2495 		break;
2496 	case 0xA:
2497 		desc = "overlapped tag attempted";
2498 		break;
2499 	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2500 		desc = "task queued, however not sent to target";
2501 		break;
2502 	default:
2503 		desc = "unknown";
2504 		break;
2505 	}
2506 	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2507 }
2508 
2509 /**
2510  * _scsih_tm_done - tm completion routine
2511  * @ioc: per adapter object
2512  * @smid: system request message index
2513  * @msix_index: MSIX table index supplied by the OS
2514  * @reply: reply message frame(lower 32bit addr)
2515  * Context: none.
2516  *
2517  * The callback handler when using scsih_issue_tm.
2518  *
2519  * Return: 1 meaning mf should be freed from _base_interrupt
2520  *         0 means the mf is freed from this function.
2521  */
2522 static u8
2523 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2524 {
2525 	MPI2DefaultReply_t *mpi_reply;
2526 
2527 	if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2528 		return 1;
2529 	if (ioc->tm_cmds.smid != smid)
2530 		return 1;
2531 	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2532 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2533 	if (mpi_reply) {
2534 		memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2535 		ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2536 	}
2537 	ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2538 	complete(&ioc->tm_cmds.done);
2539 	return 1;
2540 }
2541 
2542 /**
2543  * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2544  * @ioc: per adapter object
2545  * @handle: device handle
2546  *
2547  * During taskmangement request, we need to freeze the device queue.
2548  */
2549 void
2550 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2551 {
2552 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2553 	struct scsi_device *sdev;
2554 	u8 skip = 0;
2555 
2556 	shost_for_each_device(sdev, ioc->shost) {
2557 		if (skip)
2558 			continue;
2559 		sas_device_priv_data = sdev->hostdata;
2560 		if (!sas_device_priv_data)
2561 			continue;
2562 		if (sas_device_priv_data->sas_target->handle == handle) {
2563 			sas_device_priv_data->sas_target->tm_busy = 1;
2564 			skip = 1;
2565 			ioc->ignore_loginfos = 1;
2566 		}
2567 	}
2568 }
2569 
2570 /**
2571  * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2572  * @ioc: per adapter object
2573  * @handle: device handle
2574  *
2575  * During taskmangement request, we need to freeze the device queue.
2576  */
2577 void
2578 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2579 {
2580 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2581 	struct scsi_device *sdev;
2582 	u8 skip = 0;
2583 
2584 	shost_for_each_device(sdev, ioc->shost) {
2585 		if (skip)
2586 			continue;
2587 		sas_device_priv_data = sdev->hostdata;
2588 		if (!sas_device_priv_data)
2589 			continue;
2590 		if (sas_device_priv_data->sas_target->handle == handle) {
2591 			sas_device_priv_data->sas_target->tm_busy = 0;
2592 			skip = 1;
2593 			ioc->ignore_loginfos = 0;
2594 		}
2595 	}
2596 }
2597 
2598 /**
2599  * mpt3sas_scsih_issue_tm - main routine for sending tm requests
2600  * @ioc: per adapter struct
2601  * @handle: device handle
2602  * @lun: lun number
2603  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2604  * @smid_task: smid assigned to the task
2605  * @msix_task: MSIX table index supplied by the OS
2606  * @timeout: timeout in seconds
2607  * @tr_method: Target Reset Method
2608  * Context: user
2609  *
2610  * A generic API for sending task management requests to firmware.
2611  *
2612  * The callback index is set inside `ioc->tm_cb_idx`.
2613  * The caller is responsible to check for outstanding commands.
2614  *
2615  * Return: SUCCESS or FAILED.
2616  */
2617 int
2618 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
2619 	u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
2620 {
2621 	Mpi2SCSITaskManagementRequest_t *mpi_request;
2622 	Mpi2SCSITaskManagementReply_t *mpi_reply;
2623 	u16 smid = 0;
2624 	u32 ioc_state;
2625 	int rc;
2626 
2627 	lockdep_assert_held(&ioc->tm_cmds.mutex);
2628 
2629 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2630 		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
2631 		return FAILED;
2632 	}
2633 
2634 	if (ioc->shost_recovery || ioc->remove_host ||
2635 	    ioc->pci_error_recovery) {
2636 		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
2637 		return FAILED;
2638 	}
2639 
2640 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2641 	if (ioc_state & MPI2_DOORBELL_USED) {
2642 		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
2643 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2644 		return (!rc) ? SUCCESS : FAILED;
2645 	}
2646 
2647 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2648 		mpt3sas_base_fault_info(ioc, ioc_state &
2649 		    MPI2_DOORBELL_DATA_MASK);
2650 		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2651 		return (!rc) ? SUCCESS : FAILED;
2652 	}
2653 
2654 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2655 	if (!smid) {
2656 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2657 		return FAILED;
2658 	}
2659 
2660 	dtmprintk(ioc,
2661 		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2662 			   handle, type, smid_task, timeout, tr_method));
2663 	ioc->tm_cmds.status = MPT3_CMD_PENDING;
2664 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2665 	ioc->tm_cmds.smid = smid;
2666 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2667 	memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
2668 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2669 	mpi_request->DevHandle = cpu_to_le16(handle);
2670 	mpi_request->TaskType = type;
2671 	mpi_request->MsgFlags = tr_method;
2672 	mpi_request->TaskMID = cpu_to_le16(smid_task);
2673 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2674 	mpt3sas_scsih_set_tm_flag(ioc, handle);
2675 	init_completion(&ioc->tm_cmds.done);
2676 	mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
2677 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2678 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2679 		if (mpt3sas_base_check_cmd_timeout(ioc,
2680 			ioc->tm_cmds.status, mpi_request,
2681 			sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
2682 			rc = mpt3sas_base_hard_reset_handler(ioc,
2683 					FORCE_BIG_HAMMER);
2684 			rc = (!rc) ? SUCCESS : FAILED;
2685 			goto out;
2686 		}
2687 	}
2688 
2689 	/* sync IRQs in case those were busy during flush. */
2690 	mpt3sas_base_sync_reply_irqs(ioc);
2691 
2692 	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
2693 		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
2694 		mpi_reply = ioc->tm_cmds.reply;
2695 		dtmprintk(ioc,
2696 			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
2697 				   le16_to_cpu(mpi_reply->IOCStatus),
2698 				   le32_to_cpu(mpi_reply->IOCLogInfo),
2699 				   le32_to_cpu(mpi_reply->TerminationCount)));
2700 		if (ioc->logging_level & MPT_DEBUG_TM) {
2701 			_scsih_response_code(ioc, mpi_reply->ResponseCode);
2702 			if (mpi_reply->IOCStatus)
2703 				_debug_dump_mf(mpi_request,
2704 				    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2705 		}
2706 	}
2707 	rc = SUCCESS;
2708 
2709 out:
2710 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
2711 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
2712 	return rc;
2713 }
2714 
2715 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2716 		u64 lun, u8 type, u16 smid_task, u16 msix_task,
2717 		u8 timeout, u8 tr_method)
2718 {
2719 	int ret;
2720 
2721 	mutex_lock(&ioc->tm_cmds.mutex);
2722 	ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
2723 			msix_task, timeout, tr_method);
2724 	mutex_unlock(&ioc->tm_cmds.mutex);
2725 
2726 	return ret;
2727 }
2728 
2729 /**
2730  * _scsih_tm_display_info - displays info about the device
2731  * @ioc: per adapter struct
2732  * @scmd: pointer to scsi command object
2733  *
2734  * Called by task management callback handlers.
2735  */
2736 static void
2737 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2738 {
2739 	struct scsi_target *starget = scmd->device->sdev_target;
2740 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
2741 	struct _sas_device *sas_device = NULL;
2742 	struct _pcie_device *pcie_device = NULL;
2743 	unsigned long flags;
2744 	char *device_str = NULL;
2745 
2746 	if (!priv_target)
2747 		return;
2748 	if (ioc->hide_ir_msg)
2749 		device_str = "WarpDrive";
2750 	else
2751 		device_str = "volume";
2752 
2753 	scsi_print_command(scmd);
2754 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2755 		starget_printk(KERN_INFO, starget,
2756 			"%s handle(0x%04x), %s wwid(0x%016llx)\n",
2757 			device_str, priv_target->handle,
2758 		    device_str, (unsigned long long)priv_target->sas_address);
2759 
2760 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2761 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2762 		pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
2763 		if (pcie_device) {
2764 			starget_printk(KERN_INFO, starget,
2765 				"handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2766 				pcie_device->handle,
2767 				(unsigned long long)pcie_device->wwid,
2768 				pcie_device->port_num);
2769 			if (pcie_device->enclosure_handle != 0)
2770 				starget_printk(KERN_INFO, starget,
2771 					"enclosure logical id(0x%016llx), slot(%d)\n",
2772 					(unsigned long long)
2773 					pcie_device->enclosure_logical_id,
2774 					pcie_device->slot);
2775 			if (pcie_device->connector_name[0] != '\0')
2776 				starget_printk(KERN_INFO, starget,
2777 					"enclosure level(0x%04x), connector name( %s)\n",
2778 					pcie_device->enclosure_level,
2779 					pcie_device->connector_name);
2780 			pcie_device_put(pcie_device);
2781 		}
2782 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2783 
2784 	} else {
2785 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2786 		sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
2787 		if (sas_device) {
2788 			if (priv_target->flags &
2789 			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
2790 				starget_printk(KERN_INFO, starget,
2791 				    "volume handle(0x%04x), "
2792 				    "volume wwid(0x%016llx)\n",
2793 				    sas_device->volume_handle,
2794 				   (unsigned long long)sas_device->volume_wwid);
2795 			}
2796 			starget_printk(KERN_INFO, starget,
2797 			    "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
2798 			    sas_device->handle,
2799 			    (unsigned long long)sas_device->sas_address,
2800 			    sas_device->phy);
2801 
2802 			_scsih_display_enclosure_chassis_info(NULL, sas_device,
2803 			    NULL, starget);
2804 
2805 			sas_device_put(sas_device);
2806 		}
2807 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2808 	}
2809 }
2810 
2811 /**
2812  * scsih_abort - eh threads main abort routine
2813  * @scmd: pointer to scsi command object
2814  *
2815  * Return: SUCCESS if command aborted else FAILED
2816  */
2817 static int
2818 scsih_abort(struct scsi_cmnd *scmd)
2819 {
2820 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2821 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2822 	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2823 	u16 handle;
2824 	int r;
2825 
2826 	u8 timeout = 30;
2827 	struct _pcie_device *pcie_device = NULL;
2828 	sdev_printk(KERN_INFO, scmd->device,
2829 		"attempting task abort! scmd(%p)\n", scmd);
2830 	_scsih_tm_display_info(ioc, scmd);
2831 
2832 	sas_device_priv_data = scmd->device->hostdata;
2833 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2834 	    ioc->remove_host) {
2835 		sdev_printk(KERN_INFO, scmd->device,
2836 			"device been deleted! scmd(%p)\n", scmd);
2837 		scmd->result = DID_NO_CONNECT << 16;
2838 		scmd->scsi_done(scmd);
2839 		r = SUCCESS;
2840 		goto out;
2841 	}
2842 
2843 	/* check for completed command */
2844 	if (st == NULL || st->cb_idx == 0xFF) {
2845 		scmd->result = DID_RESET << 16;
2846 		r = SUCCESS;
2847 		goto out;
2848 	}
2849 
2850 	/* for hidden raid components and volumes this is not supported */
2851 	if (sas_device_priv_data->sas_target->flags &
2852 	    MPT_TARGET_FLAGS_RAID_COMPONENT ||
2853 	    sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2854 		scmd->result = DID_RESET << 16;
2855 		r = FAILED;
2856 		goto out;
2857 	}
2858 
2859 	mpt3sas_halt_firmware(ioc);
2860 
2861 	handle = sas_device_priv_data->sas_target->handle;
2862 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2863 	if (pcie_device && (!ioc->tm_custom_handling))
2864 		timeout = ioc->nvme_abort_timeout;
2865 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2866 		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
2867 		st->smid, st->msix_io, timeout, 0);
2868 	/* Command must be cleared after abort */
2869 	if (r == SUCCESS && st->cb_idx != 0xFF)
2870 		r = FAILED;
2871  out:
2872 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
2873 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2874 	if (pcie_device)
2875 		pcie_device_put(pcie_device);
2876 	return r;
2877 }
2878 
2879 /**
2880  * scsih_dev_reset - eh threads main device reset routine
2881  * @scmd: pointer to scsi command object
2882  *
2883  * Return: SUCCESS if command aborted else FAILED
2884  */
2885 static int
2886 scsih_dev_reset(struct scsi_cmnd *scmd)
2887 {
2888 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2889 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2890 	struct _sas_device *sas_device = NULL;
2891 	struct _pcie_device *pcie_device = NULL;
2892 	u16	handle;
2893 	u8	tr_method = 0;
2894 	u8	tr_timeout = 30;
2895 	int r;
2896 
2897 	struct scsi_target *starget = scmd->device->sdev_target;
2898 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
2899 
2900 	sdev_printk(KERN_INFO, scmd->device,
2901 		"attempting device reset! scmd(%p)\n", scmd);
2902 	_scsih_tm_display_info(ioc, scmd);
2903 
2904 	sas_device_priv_data = scmd->device->hostdata;
2905 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2906 	    ioc->remove_host) {
2907 		sdev_printk(KERN_INFO, scmd->device,
2908 			"device been deleted! scmd(%p)\n", scmd);
2909 		scmd->result = DID_NO_CONNECT << 16;
2910 		scmd->scsi_done(scmd);
2911 		r = SUCCESS;
2912 		goto out;
2913 	}
2914 
2915 	/* for hidden raid components obtain the volume_handle */
2916 	handle = 0;
2917 	if (sas_device_priv_data->sas_target->flags &
2918 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
2919 		sas_device = mpt3sas_get_sdev_from_target(ioc,
2920 				target_priv_data);
2921 		if (sas_device)
2922 			handle = sas_device->volume_handle;
2923 	} else
2924 		handle = sas_device_priv_data->sas_target->handle;
2925 
2926 	if (!handle) {
2927 		scmd->result = DID_RESET << 16;
2928 		r = FAILED;
2929 		goto out;
2930 	}
2931 
2932 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2933 
2934 	if (pcie_device && (!ioc->tm_custom_handling)) {
2935 		tr_timeout = pcie_device->reset_timeout;
2936 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
2937 	} else
2938 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2939 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2940 		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
2941 		tr_timeout, tr_method);
2942 	/* Check for busy commands after reset */
2943 	if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
2944 		r = FAILED;
2945  out:
2946 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
2947 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2948 
2949 	if (sas_device)
2950 		sas_device_put(sas_device);
2951 	if (pcie_device)
2952 		pcie_device_put(pcie_device);
2953 
2954 	return r;
2955 }
2956 
2957 /**
2958  * scsih_target_reset - eh threads main target reset routine
2959  * @scmd: pointer to scsi command object
2960  *
2961  * Return: SUCCESS if command aborted else FAILED
2962  */
2963 static int
2964 scsih_target_reset(struct scsi_cmnd *scmd)
2965 {
2966 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2967 	struct MPT3SAS_DEVICE *sas_device_priv_data;
2968 	struct _sas_device *sas_device = NULL;
2969 	struct _pcie_device *pcie_device = NULL;
2970 	u16	handle;
2971 	u8	tr_method = 0;
2972 	u8	tr_timeout = 30;
2973 	int r;
2974 	struct scsi_target *starget = scmd->device->sdev_target;
2975 	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
2976 
2977 	starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
2978 		scmd);
2979 	_scsih_tm_display_info(ioc, scmd);
2980 
2981 	sas_device_priv_data = scmd->device->hostdata;
2982 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2983 	    ioc->remove_host) {
2984 		starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
2985 			scmd);
2986 		scmd->result = DID_NO_CONNECT << 16;
2987 		scmd->scsi_done(scmd);
2988 		r = SUCCESS;
2989 		goto out;
2990 	}
2991 
2992 	/* for hidden raid components obtain the volume_handle */
2993 	handle = 0;
2994 	if (sas_device_priv_data->sas_target->flags &
2995 	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
2996 		sas_device = mpt3sas_get_sdev_from_target(ioc,
2997 				target_priv_data);
2998 		if (sas_device)
2999 			handle = sas_device->volume_handle;
3000 	} else
3001 		handle = sas_device_priv_data->sas_target->handle;
3002 
3003 	if (!handle) {
3004 		scmd->result = DID_RESET << 16;
3005 		r = FAILED;
3006 		goto out;
3007 	}
3008 
3009 	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3010 
3011 	if (pcie_device && (!ioc->tm_custom_handling)) {
3012 		tr_timeout = pcie_device->reset_timeout;
3013 		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3014 	} else
3015 		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3016 	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
3017 		MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3018 	    tr_timeout, tr_method);
3019 	/* Check for busy commands after reset */
3020 	if (r == SUCCESS && atomic_read(&starget->target_busy))
3021 		r = FAILED;
3022  out:
3023 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
3024 	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3025 
3026 	if (sas_device)
3027 		sas_device_put(sas_device);
3028 	if (pcie_device)
3029 		pcie_device_put(pcie_device);
3030 	return r;
3031 }
3032 
3033 
3034 /**
3035  * scsih_host_reset - eh threads main host reset routine
3036  * @scmd: pointer to scsi command object
3037  *
3038  * Return: SUCCESS if command aborted else FAILED
3039  */
3040 static int
3041 scsih_host_reset(struct scsi_cmnd *scmd)
3042 {
3043 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3044 	int r, retval;
3045 
3046 	ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
3047 	scsi_print_command(scmd);
3048 
3049 	if (ioc->is_driver_loading || ioc->remove_host) {
3050 		ioc_info(ioc, "Blocking the host reset\n");
3051 		r = FAILED;
3052 		goto out;
3053 	}
3054 
3055 	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3056 	r = (retval < 0) ? FAILED : SUCCESS;
3057 out:
3058 	ioc_info(ioc, "host reset: %s scmd(%p)\n",
3059 		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3060 
3061 	return r;
3062 }
3063 
3064 /**
3065  * _scsih_fw_event_add - insert and queue up fw_event
3066  * @ioc: per adapter object
3067  * @fw_event: object describing the event
3068  * Context: This function will acquire ioc->fw_event_lock.
3069  *
3070  * This adds the firmware event object into link list, then queues it up to
3071  * be processed from user context.
3072  */
3073 static void
3074 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3075 {
3076 	unsigned long flags;
3077 
3078 	if (ioc->firmware_event_thread == NULL)
3079 		return;
3080 
3081 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3082 	fw_event_work_get(fw_event);
3083 	INIT_LIST_HEAD(&fw_event->list);
3084 	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3085 	INIT_WORK(&fw_event->work, _firmware_event_work);
3086 	fw_event_work_get(fw_event);
3087 	queue_work(ioc->firmware_event_thread, &fw_event->work);
3088 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3089 }
3090 
3091 /**
3092  * _scsih_fw_event_del_from_list - delete fw_event from the list
3093  * @ioc: per adapter object
3094  * @fw_event: object describing the event
3095  * Context: This function will acquire ioc->fw_event_lock.
3096  *
3097  * If the fw_event is on the fw_event_list, remove it and do a put.
3098  */
3099 static void
3100 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3101 	*fw_event)
3102 {
3103 	unsigned long flags;
3104 
3105 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3106 	if (!list_empty(&fw_event->list)) {
3107 		list_del_init(&fw_event->list);
3108 		fw_event_work_put(fw_event);
3109 	}
3110 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3111 }
3112 
3113 
3114  /**
3115  * mpt3sas_send_trigger_data_event - send event for processing trigger data
3116  * @ioc: per adapter object
3117  * @event_data: trigger event data
3118  */
3119 void
3120 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3121 	struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3122 {
3123 	struct fw_event_work *fw_event;
3124 	u16 sz;
3125 
3126 	if (ioc->is_driver_loading)
3127 		return;
3128 	sz = sizeof(*event_data);
3129 	fw_event = alloc_fw_event_work(sz);
3130 	if (!fw_event)
3131 		return;
3132 	fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3133 	fw_event->ioc = ioc;
3134 	memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3135 	_scsih_fw_event_add(ioc, fw_event);
3136 	fw_event_work_put(fw_event);
3137 }
3138 
3139 /**
3140  * _scsih_error_recovery_delete_devices - remove devices not responding
3141  * @ioc: per adapter object
3142  */
3143 static void
3144 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3145 {
3146 	struct fw_event_work *fw_event;
3147 
3148 	if (ioc->is_driver_loading)
3149 		return;
3150 	fw_event = alloc_fw_event_work(0);
3151 	if (!fw_event)
3152 		return;
3153 	fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3154 	fw_event->ioc = ioc;
3155 	_scsih_fw_event_add(ioc, fw_event);
3156 	fw_event_work_put(fw_event);
3157 }
3158 
3159 /**
3160  * mpt3sas_port_enable_complete - port enable completed (fake event)
3161  * @ioc: per adapter object
3162  */
3163 void
3164 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3165 {
3166 	struct fw_event_work *fw_event;
3167 
3168 	fw_event = alloc_fw_event_work(0);
3169 	if (!fw_event)
3170 		return;
3171 	fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3172 	fw_event->ioc = ioc;
3173 	_scsih_fw_event_add(ioc, fw_event);
3174 	fw_event_work_put(fw_event);
3175 }
3176 
3177 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3178 {
3179 	unsigned long flags;
3180 	struct fw_event_work *fw_event = NULL;
3181 
3182 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3183 	if (!list_empty(&ioc->fw_event_list)) {
3184 		fw_event = list_first_entry(&ioc->fw_event_list,
3185 				struct fw_event_work, list);
3186 		list_del_init(&fw_event->list);
3187 	}
3188 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3189 
3190 	return fw_event;
3191 }
3192 
3193 /**
3194  * _scsih_fw_event_cleanup_queue - cleanup event queue
3195  * @ioc: per adapter object
3196  *
3197  * Walk the firmware event queue, either killing timers, or waiting
3198  * for outstanding events to complete
3199  */
3200 static void
3201 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3202 {
3203 	struct fw_event_work *fw_event;
3204 
3205 	if (list_empty(&ioc->fw_event_list) ||
3206 	     !ioc->firmware_event_thread || in_interrupt())
3207 		return;
3208 
3209 	while ((fw_event = dequeue_next_fw_event(ioc))) {
3210 		/*
3211 		 * Wait on the fw_event to complete. If this returns 1, then
3212 		 * the event was never executed, and we need a put for the
3213 		 * reference the work had on the fw_event.
3214 		 *
3215 		 * If it did execute, we wait for it to finish, and the put will
3216 		 * happen from _firmware_event_work()
3217 		 */
3218 		if (cancel_work_sync(&fw_event->work))
3219 			fw_event_work_put(fw_event);
3220 
3221 		fw_event_work_put(fw_event);
3222 	}
3223 }
3224 
3225 /**
3226  * _scsih_internal_device_block - block the sdev device
3227  * @sdev: per device object
3228  * @sas_device_priv_data : per device driver private data
3229  *
3230  * make sure device is blocked without error, if not
3231  * print an error
3232  */
3233 static void
3234 _scsih_internal_device_block(struct scsi_device *sdev,
3235 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3236 {
3237 	int r = 0;
3238 
3239 	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3240 	    sas_device_priv_data->sas_target->handle);
3241 	sas_device_priv_data->block = 1;
3242 
3243 	r = scsi_internal_device_block_nowait(sdev);
3244 	if (r == -EINVAL)
3245 		sdev_printk(KERN_WARNING, sdev,
3246 		    "device_block failed with return(%d) for handle(0x%04x)\n",
3247 		    r, sas_device_priv_data->sas_target->handle);
3248 }
3249 
3250 /**
3251  * _scsih_internal_device_unblock - unblock the sdev device
3252  * @sdev: per device object
3253  * @sas_device_priv_data : per device driver private data
3254  * make sure device is unblocked without error, if not retry
3255  * by blocking and then unblocking
3256  */
3257 
3258 static void
3259 _scsih_internal_device_unblock(struct scsi_device *sdev,
3260 			struct MPT3SAS_DEVICE *sas_device_priv_data)
3261 {
3262 	int r = 0;
3263 
3264 	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3265 	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3266 	sas_device_priv_data->block = 0;
3267 	r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3268 	if (r == -EINVAL) {
3269 		/* The device has been set to SDEV_RUNNING by SD layer during
3270 		 * device addition but the request queue is still stopped by
3271 		 * our earlier block call. We need to perform a block again
3272 		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3273 
3274 		sdev_printk(KERN_WARNING, sdev,
3275 		    "device_unblock failed with return(%d) for handle(0x%04x) "
3276 		    "performing a block followed by an unblock\n",
3277 		    r, sas_device_priv_data->sas_target->handle);
3278 		sas_device_priv_data->block = 1;
3279 		r = scsi_internal_device_block_nowait(sdev);
3280 		if (r)
3281 			sdev_printk(KERN_WARNING, sdev, "retried device_block "
3282 			    "failed with return(%d) for handle(0x%04x)\n",
3283 			    r, sas_device_priv_data->sas_target->handle);
3284 
3285 		sas_device_priv_data->block = 0;
3286 		r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3287 		if (r)
3288 			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3289 			    " failed with return(%d) for handle(0x%04x)\n",
3290 			    r, sas_device_priv_data->sas_target->handle);
3291 	}
3292 }
3293 
3294 /**
3295  * _scsih_ublock_io_all_device - unblock every device
3296  * @ioc: per adapter object
3297  *
3298  * change the device state from block to running
3299  */
3300 static void
3301 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3302 {
3303 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3304 	struct scsi_device *sdev;
3305 
3306 	shost_for_each_device(sdev, ioc->shost) {
3307 		sas_device_priv_data = sdev->hostdata;
3308 		if (!sas_device_priv_data)
3309 			continue;
3310 		if (!sas_device_priv_data->block)
3311 			continue;
3312 
3313 		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3314 			"device_running, handle(0x%04x)\n",
3315 		    sas_device_priv_data->sas_target->handle));
3316 		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3317 	}
3318 }
3319 
3320 
3321 /**
3322  * _scsih_ublock_io_device - prepare device to be deleted
3323  * @ioc: per adapter object
3324  * @sas_address: sas address
3325  *
3326  * unblock then put device in offline state
3327  */
3328 static void
3329 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
3330 {
3331 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3332 	struct scsi_device *sdev;
3333 
3334 	shost_for_each_device(sdev, ioc->shost) {
3335 		sas_device_priv_data = sdev->hostdata;
3336 		if (!sas_device_priv_data)
3337 			continue;
3338 		if (sas_device_priv_data->sas_target->sas_address
3339 		    != sas_address)
3340 			continue;
3341 		if (sas_device_priv_data->block)
3342 			_scsih_internal_device_unblock(sdev,
3343 				sas_device_priv_data);
3344 	}
3345 }
3346 
3347 /**
3348  * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3349  * @ioc: per adapter object
3350  *
3351  * During device pull we need to appropriately set the sdev state.
3352  */
3353 static void
3354 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3355 {
3356 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3357 	struct scsi_device *sdev;
3358 
3359 	shost_for_each_device(sdev, ioc->shost) {
3360 		sas_device_priv_data = sdev->hostdata;
3361 		if (!sas_device_priv_data)
3362 			continue;
3363 		if (sas_device_priv_data->block)
3364 			continue;
3365 		if (sas_device_priv_data->ignore_delay_remove) {
3366 			sdev_printk(KERN_INFO, sdev,
3367 			"%s skip device_block for SES handle(0x%04x)\n",
3368 			__func__, sas_device_priv_data->sas_target->handle);
3369 			continue;
3370 		}
3371 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3372 	}
3373 }
3374 
3375 /**
3376  * _scsih_block_io_device - set the device state to SDEV_BLOCK
3377  * @ioc: per adapter object
3378  * @handle: device handle
3379  *
3380  * During device pull we need to appropriately set the sdev state.
3381  */
3382 static void
3383 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3384 {
3385 	struct MPT3SAS_DEVICE *sas_device_priv_data;
3386 	struct scsi_device *sdev;
3387 	struct _sas_device *sas_device;
3388 
3389 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3390 
3391 	shost_for_each_device(sdev, ioc->shost) {
3392 		sas_device_priv_data = sdev->hostdata;
3393 		if (!sas_device_priv_data)
3394 			continue;
3395 		if (sas_device_priv_data->sas_target->handle != handle)
3396 			continue;
3397 		if (sas_device_priv_data->block)
3398 			continue;
3399 		if (sas_device && sas_device->pend_sas_rphy_add)
3400 			continue;
3401 		if (sas_device_priv_data->ignore_delay_remove) {
3402 			sdev_printk(KERN_INFO, sdev,
3403 			"%s skip device_block for SES handle(0x%04x)\n",
3404 			__func__, sas_device_priv_data->sas_target->handle);
3405 			continue;
3406 		}
3407 		_scsih_internal_device_block(sdev, sas_device_priv_data);
3408 	}
3409 
3410 	if (sas_device)
3411 		sas_device_put(sas_device);
3412 }
3413 
3414 /**
3415  * _scsih_block_io_to_children_attached_to_ex
3416  * @ioc: per adapter object
3417  * @sas_expander: the sas_device object
3418  *
3419  * This routine set sdev state to SDEV_BLOCK for all devices
3420  * attached to this expander. This function called when expander is
3421  * pulled.
3422  */
3423 static void
3424 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3425 	struct _sas_node *sas_expander)
3426 {
3427 	struct _sas_port *mpt3sas_port;
3428 	struct _sas_device *sas_device;
3429 	struct _sas_node *expander_sibling;
3430 	unsigned long flags;
3431 
3432 	if (!sas_expander)
3433 		return;
3434 
3435 	list_for_each_entry(mpt3sas_port,
3436 	   &sas_expander->sas_port_list, port_list) {
3437 		if (mpt3sas_port->remote_identify.device_type ==
3438 		    SAS_END_DEVICE) {
3439 			spin_lock_irqsave(&ioc->sas_device_lock, flags);
3440 			sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3441 			    mpt3sas_port->remote_identify.sas_address);
3442 			if (sas_device) {
3443 				set_bit(sas_device->handle,
3444 						ioc->blocking_handles);
3445 				sas_device_put(sas_device);
3446 			}
3447 			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3448 		}
3449 	}
3450 
3451 	list_for_each_entry(mpt3sas_port,
3452 	   &sas_expander->sas_port_list, port_list) {
3453 
3454 		if (mpt3sas_port->remote_identify.device_type ==
3455 		    SAS_EDGE_EXPANDER_DEVICE ||
3456 		    mpt3sas_port->remote_identify.device_type ==
3457 		    SAS_FANOUT_EXPANDER_DEVICE) {
3458 			expander_sibling =
3459 			    mpt3sas_scsih_expander_find_by_sas_address(
3460 			    ioc, mpt3sas_port->remote_identify.sas_address);
3461 			_scsih_block_io_to_children_attached_to_ex(ioc,
3462 			    expander_sibling);
3463 		}
3464 	}
3465 }
3466 
3467 /**
3468  * _scsih_block_io_to_children_attached_directly
3469  * @ioc: per adapter object
3470  * @event_data: topology change event data
3471  *
3472  * This routine set sdev state to SDEV_BLOCK for all devices
3473  * direct attached during device pull.
3474  */
3475 static void
3476 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3477 	Mpi2EventDataSasTopologyChangeList_t *event_data)
3478 {
3479 	int i;
3480 	u16 handle;
3481 	u16 reason_code;
3482 
3483 	for (i = 0; i < event_data->NumEntries; i++) {
3484 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3485 		if (!handle)
3486 			continue;
3487 		reason_code = event_data->PHY[i].PhyStatus &
3488 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
3489 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3490 			_scsih_block_io_device(ioc, handle);
3491 	}
3492 }
3493 
3494 /**
3495  * _scsih_block_io_to_pcie_children_attached_directly
3496  * @ioc: per adapter object
3497  * @event_data: topology change event data
3498  *
3499  * This routine set sdev state to SDEV_BLOCK for all devices
3500  * direct attached during device pull/reconnect.
3501  */
3502 static void
3503 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3504 		Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3505 {
3506 	int i;
3507 	u16 handle;
3508 	u16 reason_code;
3509 
3510 	for (i = 0; i < event_data->NumEntries; i++) {
3511 		handle =
3512 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
3513 		if (!handle)
3514 			continue;
3515 		reason_code = event_data->PortEntry[i].PortStatus;
3516 		if (reason_code ==
3517 				MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
3518 			_scsih_block_io_device(ioc, handle);
3519 	}
3520 }
3521 /**
3522  * _scsih_tm_tr_send - send task management request
3523  * @ioc: per adapter object
3524  * @handle: device handle
3525  * Context: interrupt time.
3526  *
3527  * This code is to initiate the device removal handshake protocol
3528  * with controller firmware.  This function will issue target reset
3529  * using high priority request queue.  It will send a sas iounit
3530  * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
3531  *
3532  * This is designed to send muliple task management request at the same
3533  * time to the fifo. If the fifo is full, we will append the request,
3534  * and process it in a future completion.
3535  */
3536 static void
3537 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3538 {
3539 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3540 	u16 smid;
3541 	struct _sas_device *sas_device = NULL;
3542 	struct _pcie_device *pcie_device = NULL;
3543 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
3544 	u64 sas_address = 0;
3545 	unsigned long flags;
3546 	struct _tr_list *delayed_tr;
3547 	u32 ioc_state;
3548 	u8 tr_method = 0;
3549 
3550 	if (ioc->pci_error_recovery) {
3551 		dewtprintk(ioc,
3552 			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
3553 				    __func__, handle));
3554 		return;
3555 	}
3556 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3557 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3558 		dewtprintk(ioc,
3559 			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
3560 				    __func__, handle));
3561 		return;
3562 	}
3563 
3564 	/* if PD, then return */
3565 	if (test_bit(handle, ioc->pd_handles))
3566 		return;
3567 
3568 	clear_bit(handle, ioc->pend_os_device_add);
3569 
3570 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
3571 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
3572 	if (sas_device && sas_device->starget &&
3573 	    sas_device->starget->hostdata) {
3574 		sas_target_priv_data = sas_device->starget->hostdata;
3575 		sas_target_priv_data->deleted = 1;
3576 		sas_address = sas_device->sas_address;
3577 	}
3578 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3579 	if (!sas_device) {
3580 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3581 		pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
3582 		if (pcie_device && pcie_device->starget &&
3583 			pcie_device->starget->hostdata) {
3584 			sas_target_priv_data = pcie_device->starget->hostdata;
3585 			sas_target_priv_data->deleted = 1;
3586 			sas_address = pcie_device->wwid;
3587 		}
3588 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3589 		if (pcie_device && (!ioc->tm_custom_handling))
3590 			tr_method =
3591 			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3592 		else
3593 			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3594 	}
3595 	if (sas_target_priv_data) {
3596 		dewtprintk(ioc,
3597 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3598 				    handle, (u64)sas_address));
3599 		if (sas_device) {
3600 			if (sas_device->enclosure_handle != 0)
3601 				dewtprintk(ioc,
3602 					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
3603 						    (u64)sas_device->enclosure_logical_id,
3604 						    sas_device->slot));
3605 			if (sas_device->connector_name[0] != '\0')
3606 				dewtprintk(ioc,
3607 					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
3608 						    sas_device->enclosure_level,
3609 						    sas_device->connector_name));
3610 		} else if (pcie_device) {
3611 			if (pcie_device->enclosure_handle != 0)
3612 				dewtprintk(ioc,
3613 					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
3614 						    (u64)pcie_device->enclosure_logical_id,
3615 						    pcie_device->slot));
3616 			if (pcie_device->connector_name[0] != '\0')
3617 				dewtprintk(ioc,
3618 					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
3619 						    pcie_device->enclosure_level,
3620 						    pcie_device->connector_name));
3621 		}
3622 		_scsih_ublock_io_device(ioc, sas_address);
3623 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
3624 	}
3625 
3626 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
3627 	if (!smid) {
3628 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3629 		if (!delayed_tr)
3630 			goto out;
3631 		INIT_LIST_HEAD(&delayed_tr->list);
3632 		delayed_tr->handle = handle;
3633 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3634 		dewtprintk(ioc,
3635 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3636 				    handle));
3637 		goto out;
3638 	}
3639 
3640 	dewtprintk(ioc,
3641 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3642 			    handle, smid, ioc->tm_tr_cb_idx));
3643 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3644 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3645 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3646 	mpi_request->DevHandle = cpu_to_le16(handle);
3647 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3648 	mpi_request->MsgFlags = tr_method;
3649 	set_bit(handle, ioc->device_remove_in_progress);
3650 	mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
3651 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
3652 
3653 out:
3654 	if (sas_device)
3655 		sas_device_put(sas_device);
3656 	if (pcie_device)
3657 		pcie_device_put(pcie_device);
3658 }
3659 
3660 /**
3661  * _scsih_tm_tr_complete -
3662  * @ioc: per adapter object
3663  * @smid: system request message index
3664  * @msix_index: MSIX table index supplied by the OS
3665  * @reply: reply message frame(lower 32bit addr)
3666  * Context: interrupt time.
3667  *
3668  * This is the target reset completion routine.
3669  * This code is part of the code to initiate the device removal
3670  * handshake protocol with controller firmware.
3671  * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
3672  *
3673  * Return: 1 meaning mf should be freed from _base_interrupt
3674  *         0 means the mf is freed from this function.
3675  */
3676 static u8
3677 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3678 	u32 reply)
3679 {
3680 	u16 handle;
3681 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3682 	Mpi2SCSITaskManagementReply_t *mpi_reply =
3683 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3684 	Mpi2SasIoUnitControlRequest_t *mpi_request;
3685 	u16 smid_sas_ctrl;
3686 	u32 ioc_state;
3687 	struct _sc_list *delayed_sc;
3688 
3689 	if (ioc->pci_error_recovery) {
3690 		dewtprintk(ioc,
3691 			   ioc_info(ioc, "%s: host in pci error recovery\n",
3692 				    __func__));
3693 		return 1;
3694 	}
3695 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3696 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3697 		dewtprintk(ioc,
3698 			   ioc_info(ioc, "%s: host is not operational\n",
3699 				    __func__));
3700 		return 1;
3701 	}
3702 	if (unlikely(!mpi_reply)) {
3703 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3704 			__FILE__, __LINE__, __func__);
3705 		return 1;
3706 	}
3707 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3708 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
3709 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3710 		dewtprintk(ioc,
3711 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3712 				   handle,
3713 				   le16_to_cpu(mpi_reply->DevHandle), smid));
3714 		return 0;
3715 	}
3716 
3717 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3718 	dewtprintk(ioc,
3719 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3720 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3721 			    le32_to_cpu(mpi_reply->IOCLogInfo),
3722 			    le32_to_cpu(mpi_reply->TerminationCount)));
3723 
3724 	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
3725 	if (!smid_sas_ctrl) {
3726 		delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
3727 		if (!delayed_sc)
3728 			return _scsih_check_for_pending_tm(ioc, smid);
3729 		INIT_LIST_HEAD(&delayed_sc->list);
3730 		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
3731 		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
3732 		dewtprintk(ioc,
3733 			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
3734 				    handle));
3735 		return _scsih_check_for_pending_tm(ioc, smid);
3736 	}
3737 
3738 	dewtprintk(ioc,
3739 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3740 			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
3741 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
3742 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
3743 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3744 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3745 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
3746 	mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
3747 
3748 	return _scsih_check_for_pending_tm(ioc, smid);
3749 }
3750 
3751 /** _scsih_allow_scmd_to_device - check whether scmd needs to
3752  *				 issue to IOC or not.
3753  * @ioc: per adapter object
3754  * @scmd: pointer to scsi command object
3755  *
3756  * Returns true if scmd can be issued to IOC otherwise returns false.
3757  */
3758 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
3759 	struct scsi_cmnd *scmd)
3760 {
3761 
3762 	if (ioc->pci_error_recovery)
3763 		return false;
3764 
3765 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
3766 		if (ioc->remove_host)
3767 			return false;
3768 
3769 		return true;
3770 	}
3771 
3772 	if (ioc->remove_host) {
3773 
3774 		switch (scmd->cmnd[0]) {
3775 		case SYNCHRONIZE_CACHE:
3776 		case START_STOP:
3777 			return true;
3778 		default:
3779 			return false;
3780 		}
3781 	}
3782 
3783 	return true;
3784 }
3785 
3786 /**
3787  * _scsih_sas_control_complete - completion routine
3788  * @ioc: per adapter object
3789  * @smid: system request message index
3790  * @msix_index: MSIX table index supplied by the OS
3791  * @reply: reply message frame(lower 32bit addr)
3792  * Context: interrupt time.
3793  *
3794  * This is the sas iounit control completion routine.
3795  * This code is part of the code to initiate the device removal
3796  * handshake protocol with controller firmware.
3797  *
3798  * Return: 1 meaning mf should be freed from _base_interrupt
3799  *         0 means the mf is freed from this function.
3800  */
3801 static u8
3802 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3803 	u8 msix_index, u32 reply)
3804 {
3805 	Mpi2SasIoUnitControlReply_t *mpi_reply =
3806 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3807 
3808 	if (likely(mpi_reply)) {
3809 		dewtprintk(ioc,
3810 			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
3811 				    le16_to_cpu(mpi_reply->DevHandle), smid,
3812 				    le16_to_cpu(mpi_reply->IOCStatus),
3813 				    le32_to_cpu(mpi_reply->IOCLogInfo)));
3814 		if (le16_to_cpu(mpi_reply->IOCStatus) ==
3815 		     MPI2_IOCSTATUS_SUCCESS) {
3816 			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
3817 			    ioc->device_remove_in_progress);
3818 		}
3819 	} else {
3820 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3821 			__FILE__, __LINE__, __func__);
3822 	}
3823 	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
3824 }
3825 
3826 /**
3827  * _scsih_tm_tr_volume_send - send target reset request for volumes
3828  * @ioc: per adapter object
3829  * @handle: device handle
3830  * Context: interrupt time.
3831  *
3832  * This is designed to send muliple task management request at the same
3833  * time to the fifo. If the fifo is full, we will append the request,
3834  * and process it in a future completion.
3835  */
3836 static void
3837 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3838 {
3839 	Mpi2SCSITaskManagementRequest_t *mpi_request;
3840 	u16 smid;
3841 	struct _tr_list *delayed_tr;
3842 
3843 	if (ioc->pci_error_recovery) {
3844 		dewtprintk(ioc,
3845 			   ioc_info(ioc, "%s: host reset in progress!\n",
3846 				    __func__));
3847 		return;
3848 	}
3849 
3850 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
3851 	if (!smid) {
3852 		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3853 		if (!delayed_tr)
3854 			return;
3855 		INIT_LIST_HEAD(&delayed_tr->list);
3856 		delayed_tr->handle = handle;
3857 		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
3858 		dewtprintk(ioc,
3859 			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3860 				    handle));
3861 		return;
3862 	}
3863 
3864 	dewtprintk(ioc,
3865 		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3866 			    handle, smid, ioc->tm_tr_volume_cb_idx));
3867 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3868 	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3869 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3870 	mpi_request->DevHandle = cpu_to_le16(handle);
3871 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3872 	mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
3873 }
3874 
3875 /**
3876  * _scsih_tm_volume_tr_complete - target reset completion
3877  * @ioc: per adapter object
3878  * @smid: system request message index
3879  * @msix_index: MSIX table index supplied by the OS
3880  * @reply: reply message frame(lower 32bit addr)
3881  * Context: interrupt time.
3882  *
3883  * Return: 1 meaning mf should be freed from _base_interrupt
3884  *         0 means the mf is freed from this function.
3885  */
3886 static u8
3887 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3888 	u8 msix_index, u32 reply)
3889 {
3890 	u16 handle;
3891 	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3892 	Mpi2SCSITaskManagementReply_t *mpi_reply =
3893 	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
3894 
3895 	if (ioc->shost_recovery || ioc->pci_error_recovery) {
3896 		dewtprintk(ioc,
3897 			   ioc_info(ioc, "%s: host reset in progress!\n",
3898 				    __func__));
3899 		return 1;
3900 	}
3901 	if (unlikely(!mpi_reply)) {
3902 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3903 			__FILE__, __LINE__, __func__);
3904 		return 1;
3905 	}
3906 
3907 	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3908 	handle = le16_to_cpu(mpi_request_tm->DevHandle);
3909 	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3910 		dewtprintk(ioc,
3911 			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3912 				   handle, le16_to_cpu(mpi_reply->DevHandle),
3913 				   smid));
3914 		return 0;
3915 	}
3916 
3917 	dewtprintk(ioc,
3918 		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3919 			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3920 			    le32_to_cpu(mpi_reply->IOCLogInfo),
3921 			    le32_to_cpu(mpi_reply->TerminationCount)));
3922 
3923 	return _scsih_check_for_pending_tm(ioc, smid);
3924 }
3925 
3926 /**
3927  * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
3928  * @ioc: per adapter object
3929  * @smid: system request message index
3930  * @event: Event ID
3931  * @event_context: used to track events uniquely
3932  *
3933  * Context - processed in interrupt context.
3934  */
3935 static void
3936 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
3937 				U32 event_context)
3938 {
3939 	Mpi2EventAckRequest_t *ack_request;
3940 	int i = smid - ioc->internal_smid;
3941 	unsigned long flags;
3942 
3943 	/* Without releasing the smid just update the
3944 	 * call back index and reuse the same smid for
3945 	 * processing this delayed request
3946 	 */
3947 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3948 	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
3949 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3950 
3951 	dewtprintk(ioc,
3952 		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
3953 			    le16_to_cpu(event), smid, ioc->base_cb_idx));
3954 	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
3955 	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
3956 	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
3957 	ack_request->Event = event;
3958 	ack_request->EventContext = event_context;
3959 	ack_request->VF_ID = 0;  /* TODO */
3960 	ack_request->VP_ID = 0;
3961 	mpt3sas_base_put_smid_default(ioc, smid);
3962 }
3963 
3964 /**
3965  * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
3966  *				sas_io_unit_ctrl messages
3967  * @ioc: per adapter object
3968  * @smid: system request message index
3969  * @handle: device handle
3970  *
3971  * Context - processed in interrupt context.
3972  */
3973 static void
3974 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
3975 					u16 smid, u16 handle)
3976 {
3977 	Mpi2SasIoUnitControlRequest_t *mpi_request;
3978 	u32 ioc_state;
3979 	int i = smid - ioc->internal_smid;
3980 	unsigned long flags;
3981 
3982 	if (ioc->remove_host) {
3983 		dewtprintk(ioc,
3984 			   ioc_info(ioc, "%s: host has been removed\n",
3985 				    __func__));
3986 		return;
3987 	} else if (ioc->pci_error_recovery) {
3988 		dewtprintk(ioc,
3989 			   ioc_info(ioc, "%s: host in pci error recovery\n",
3990 				    __func__));
3991 		return;
3992 	}
3993 	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3994 	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3995 		dewtprintk(ioc,
3996 			   ioc_info(ioc, "%s: host is not operational\n",
3997 				    __func__));
3998 		return;
3999 	}
4000 
4001 	/* Without releasing the smid just update the
4002 	 * call back index and reuse the same smid for
4003 	 * processing this delayed request
4004 	 */
4005 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4006 	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4007 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4008 
4009 	dewtprintk(ioc,
4010 		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4011 			    handle, smid, ioc->tm_sas_control_cb_idx));
4012 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4013 	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4014 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4015 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4016 	mpi_request->DevHandle = cpu_to_le16(handle);
4017 	mpt3sas_base_put_smid_default(ioc, smid);
4018 }
4019 
4020 /**
4021  * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4022  * @ioc: per adapter object
4023  * @smid: system request message index
4024  *
4025  * Context: Executed in interrupt context
4026  *
4027  * This will check delayed internal messages list, and process the
4028  * next request.
4029  *
4030  * Return: 1 meaning mf should be freed from _base_interrupt
4031  *         0 means the mf is freed from this function.
4032  */
4033 u8
4034 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4035 {
4036 	struct _sc_list *delayed_sc;
4037 	struct _event_ack_list *delayed_event_ack;
4038 
4039 	if (!list_empty(&ioc->delayed_event_ack_list)) {
4040 		delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4041 						struct _event_ack_list, list);
4042 		_scsih_issue_delayed_event_ack(ioc, smid,
4043 		  delayed_event_ack->Event, delayed_event_ack->EventContext);
4044 		list_del(&delayed_event_ack->list);
4045 		kfree(delayed_event_ack);
4046 		return 0;
4047 	}
4048 
4049 	if (!list_empty(&ioc->delayed_sc_list)) {
4050 		delayed_sc = list_entry(ioc->delayed_sc_list.next,
4051 						struct _sc_list, list);
4052 		_scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4053 						 delayed_sc->handle);
4054 		list_del(&delayed_sc->list);
4055 		kfree(delayed_sc);
4056 		return 0;
4057 	}
4058 	return 1;
4059 }
4060 
4061 /**
4062  * _scsih_check_for_pending_tm - check for pending task management
4063  * @ioc: per adapter object
4064  * @smid: system request message index
4065  *
4066  * This will check delayed target reset list, and feed the
4067  * next reqeust.
4068  *
4069  * Return: 1 meaning mf should be freed from _base_interrupt
4070  *         0 means the mf is freed from this function.
4071  */
4072 static u8
4073 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4074 {
4075 	struct _tr_list *delayed_tr;
4076 
4077 	if (!list_empty(&ioc->delayed_tr_volume_list)) {
4078 		delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4079 		    struct _tr_list, list);
4080 		mpt3sas_base_free_smid(ioc, smid);
4081 		_scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4082 		list_del(&delayed_tr->list);
4083 		kfree(delayed_tr);
4084 		return 0;
4085 	}
4086 
4087 	if (!list_empty(&ioc->delayed_tr_list)) {
4088 		delayed_tr = list_entry(ioc->delayed_tr_list.next,
4089 		    struct _tr_list, list);
4090 		mpt3sas_base_free_smid(ioc, smid);
4091 		_scsih_tm_tr_send(ioc, delayed_tr->handle);
4092 		list_del(&delayed_tr->list);
4093 		kfree(delayed_tr);
4094 		return 0;
4095 	}
4096 
4097 	return 1;
4098 }
4099 
4100 /**
4101  * _scsih_check_topo_delete_events - sanity check on topo events
4102  * @ioc: per adapter object
4103  * @event_data: the event data payload
4104  *
4105  * This routine added to better handle cable breaker.
4106  *
4107  * This handles the case where driver receives multiple expander
4108  * add and delete events in a single shot.  When there is a delete event
4109  * the routine will void any pending add events waiting in the event queue.
4110  */
4111 static void
4112 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4113 	Mpi2EventDataSasTopologyChangeList_t *event_data)
4114 {
4115 	struct fw_event_work *fw_event;
4116 	Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4117 	u16 expander_handle;
4118 	struct _sas_node *sas_expander;
4119 	unsigned long flags;
4120 	int i, reason_code;
4121 	u16 handle;
4122 
4123 	for (i = 0 ; i < event_data->NumEntries; i++) {
4124 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4125 		if (!handle)
4126 			continue;
4127 		reason_code = event_data->PHY[i].PhyStatus &
4128 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4129 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4130 			_scsih_tm_tr_send(ioc, handle);
4131 	}
4132 
4133 	expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4134 	if (expander_handle < ioc->sas_hba.num_phys) {
4135 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4136 		return;
4137 	}
4138 	if (event_data->ExpStatus ==
4139 	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4140 		/* put expander attached devices into blocking state */
4141 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
4142 		sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4143 		    expander_handle);
4144 		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4145 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4146 		do {
4147 			handle = find_first_bit(ioc->blocking_handles,
4148 			    ioc->facts.MaxDevHandle);
4149 			if (handle < ioc->facts.MaxDevHandle)
4150 				_scsih_block_io_device(ioc, handle);
4151 		} while (test_and_clear_bit(handle, ioc->blocking_handles));
4152 	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4153 		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4154 
4155 	if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4156 		return;
4157 
4158 	/* mark ignore flag for pending events */
4159 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4160 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4161 		if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4162 		    fw_event->ignore)
4163 			continue;
4164 		local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4165 				   fw_event->event_data;
4166 		if (local_event_data->ExpStatus ==
4167 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4168 		    local_event_data->ExpStatus ==
4169 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4170 			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4171 			    expander_handle) {
4172 				dewtprintk(ioc,
4173 					   ioc_info(ioc, "setting ignoring flag\n"));
4174 				fw_event->ignore = 1;
4175 			}
4176 		}
4177 	}
4178 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4179 }
4180 
4181 /**
4182  * _scsih_check_pcie_topo_remove_events - sanity check on topo
4183  * events
4184  * @ioc: per adapter object
4185  * @event_data: the event data payload
4186  *
4187  * This handles the case where driver receives multiple switch
4188  * or device add and delete events in a single shot.  When there
4189  * is a delete event the routine will void any pending add
4190  * events waiting in the event queue.
4191  */
4192 static void
4193 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4194 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4195 {
4196 	struct fw_event_work *fw_event;
4197 	Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4198 	unsigned long flags;
4199 	int i, reason_code;
4200 	u16 handle, switch_handle;
4201 
4202 	for (i = 0; i < event_data->NumEntries; i++) {
4203 		handle =
4204 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4205 		if (!handle)
4206 			continue;
4207 		reason_code = event_data->PortEntry[i].PortStatus;
4208 		if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4209 			_scsih_tm_tr_send(ioc, handle);
4210 	}
4211 
4212 	switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4213 	if (!switch_handle) {
4214 		_scsih_block_io_to_pcie_children_attached_directly(
4215 							ioc, event_data);
4216 		return;
4217 	}
4218     /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4219 	if ((event_data->SwitchStatus
4220 		== MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4221 		(event_data->SwitchStatus ==
4222 					MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4223 		_scsih_block_io_to_pcie_children_attached_directly(
4224 							ioc, event_data);
4225 
4226 	if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4227 		return;
4228 
4229 	/* mark ignore flag for pending events */
4230 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4231 	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4232 		if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4233 			fw_event->ignore)
4234 			continue;
4235 		local_event_data =
4236 			(Mpi26EventDataPCIeTopologyChangeList_t *)
4237 			fw_event->event_data;
4238 		if (local_event_data->SwitchStatus ==
4239 		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4240 		    local_event_data->SwitchStatus ==
4241 		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4242 			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4243 				switch_handle) {
4244 				dewtprintk(ioc,
4245 					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
4246 				fw_event->ignore = 1;
4247 			}
4248 		}
4249 	}
4250 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4251 }
4252 
4253 /**
4254  * _scsih_set_volume_delete_flag - setting volume delete flag
4255  * @ioc: per adapter object
4256  * @handle: device handle
4257  *
4258  * This returns nothing.
4259  */
4260 static void
4261 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4262 {
4263 	struct _raid_device *raid_device;
4264 	struct MPT3SAS_TARGET *sas_target_priv_data;
4265 	unsigned long flags;
4266 
4267 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
4268 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4269 	if (raid_device && raid_device->starget &&
4270 	    raid_device->starget->hostdata) {
4271 		sas_target_priv_data =
4272 		    raid_device->starget->hostdata;
4273 		sas_target_priv_data->deleted = 1;
4274 		dewtprintk(ioc,
4275 			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4276 				    handle, (u64)raid_device->wwid));
4277 	}
4278 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4279 }
4280 
4281 /**
4282  * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4283  * @handle: input handle
4284  * @a: handle for volume a
4285  * @b: handle for volume b
4286  *
4287  * IR firmware only supports two raid volumes.  The purpose of this
4288  * routine is to set the volume handle in either a or b. When the given
4289  * input handle is non-zero, or when a and b have not been set before.
4290  */
4291 static void
4292 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4293 {
4294 	if (!handle || handle == *a || handle == *b)
4295 		return;
4296 	if (!*a)
4297 		*a = handle;
4298 	else if (!*b)
4299 		*b = handle;
4300 }
4301 
4302 /**
4303  * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4304  * @ioc: per adapter object
4305  * @event_data: the event data payload
4306  * Context: interrupt time.
4307  *
4308  * This routine will send target reset to volume, followed by target
4309  * resets to the PDs. This is called when a PD has been removed, or
4310  * volume has been deleted or removed. When the target reset is sent
4311  * to volume, the PD target resets need to be queued to start upon
4312  * completion of the volume target reset.
4313  */
4314 static void
4315 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4316 	Mpi2EventDataIrConfigChangeList_t *event_data)
4317 {
4318 	Mpi2EventIrConfigElement_t *element;
4319 	int i;
4320 	u16 handle, volume_handle, a, b;
4321 	struct _tr_list *delayed_tr;
4322 
4323 	a = 0;
4324 	b = 0;
4325 
4326 	if (ioc->is_warpdrive)
4327 		return;
4328 
4329 	/* Volume Resets for Deleted or Removed */
4330 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4331 	for (i = 0; i < event_data->NumElements; i++, element++) {
4332 		if (le32_to_cpu(event_data->Flags) &
4333 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4334 			continue;
4335 		if (element->ReasonCode ==
4336 		    MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4337 		    element->ReasonCode ==
4338 		    MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4339 			volume_handle = le16_to_cpu(element->VolDevHandle);
4340 			_scsih_set_volume_delete_flag(ioc, volume_handle);
4341 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4342 		}
4343 	}
4344 
4345 	/* Volume Resets for UNHIDE events */
4346 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4347 	for (i = 0; i < event_data->NumElements; i++, element++) {
4348 		if (le32_to_cpu(event_data->Flags) &
4349 		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4350 			continue;
4351 		if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4352 			volume_handle = le16_to_cpu(element->VolDevHandle);
4353 			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4354 		}
4355 	}
4356 
4357 	if (a)
4358 		_scsih_tm_tr_volume_send(ioc, a);
4359 	if (b)
4360 		_scsih_tm_tr_volume_send(ioc, b);
4361 
4362 	/* PD target resets */
4363 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4364 	for (i = 0; i < event_data->NumElements; i++, element++) {
4365 		if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4366 			continue;
4367 		handle = le16_to_cpu(element->PhysDiskDevHandle);
4368 		volume_handle = le16_to_cpu(element->VolDevHandle);
4369 		clear_bit(handle, ioc->pd_handles);
4370 		if (!volume_handle)
4371 			_scsih_tm_tr_send(ioc, handle);
4372 		else if (volume_handle == a || volume_handle == b) {
4373 			delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4374 			BUG_ON(!delayed_tr);
4375 			INIT_LIST_HEAD(&delayed_tr->list);
4376 			delayed_tr->handle = handle;
4377 			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4378 			dewtprintk(ioc,
4379 				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4380 					    handle));
4381 		} else
4382 			_scsih_tm_tr_send(ioc, handle);
4383 	}
4384 }
4385 
4386 
4387 /**
4388  * _scsih_check_volume_delete_events - set delete flag for volumes
4389  * @ioc: per adapter object
4390  * @event_data: the event data payload
4391  * Context: interrupt time.
4392  *
4393  * This will handle the case when the cable connected to entire volume is
4394  * pulled. We will take care of setting the deleted flag so normal IO will
4395  * not be sent.
4396  */
4397 static void
4398 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4399 	Mpi2EventDataIrVolume_t *event_data)
4400 {
4401 	u32 state;
4402 
4403 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4404 		return;
4405 	state = le32_to_cpu(event_data->NewValue);
4406 	if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4407 	    MPI2_RAID_VOL_STATE_FAILED)
4408 		_scsih_set_volume_delete_flag(ioc,
4409 		    le16_to_cpu(event_data->VolDevHandle));
4410 }
4411 
4412 /**
4413  * _scsih_temp_threshold_events - display temperature threshold exceeded events
4414  * @ioc: per adapter object
4415  * @event_data: the temp threshold event data
4416  * Context: interrupt time.
4417  */
4418 static void
4419 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4420 	Mpi2EventDataTemperature_t *event_data)
4421 {
4422 	if (ioc->temp_sensors_count >= event_data->SensorNum) {
4423 		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4424 			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4425 			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4426 			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4427 			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4428 			event_data->SensorNum);
4429 		ioc_err(ioc, "Current Temp In Celsius: %d\n",
4430 			event_data->CurrentTemperature);
4431 	}
4432 }
4433 
4434 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4435 {
4436 	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4437 
4438 	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4439 		return 0;
4440 
4441 	if (pending)
4442 		return test_and_set_bit(0, &priv->ata_command_pending);
4443 
4444 	clear_bit(0, &priv->ata_command_pending);
4445 	return 0;
4446 }
4447 
4448 /**
4449  * _scsih_flush_running_cmds - completing outstanding commands.
4450  * @ioc: per adapter object
4451  *
4452  * The flushing out of all pending scmd commands following host reset,
4453  * where all IO is dropped to the floor.
4454  */
4455 static void
4456 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4457 {
4458 	struct scsi_cmnd *scmd;
4459 	struct scsiio_tracker *st;
4460 	u16 smid;
4461 	int count = 0;
4462 
4463 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4464 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4465 		if (!scmd)
4466 			continue;
4467 		count++;
4468 		_scsih_set_satl_pending(scmd, false);
4469 		st = scsi_cmd_priv(scmd);
4470 		mpt3sas_base_clear_st(ioc, st);
4471 		scsi_dma_unmap(scmd);
4472 		if (ioc->pci_error_recovery || ioc->remove_host)
4473 			scmd->result = DID_NO_CONNECT << 16;
4474 		else
4475 			scmd->result = DID_RESET << 16;
4476 		scmd->scsi_done(scmd);
4477 	}
4478 	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4479 }
4480 
4481 /**
4482  * _scsih_setup_eedp - setup MPI request for EEDP transfer
4483  * @ioc: per adapter object
4484  * @scmd: pointer to scsi command object
4485  * @mpi_request: pointer to the SCSI_IO request message frame
4486  *
4487  * Supporting protection 1 and 3.
4488  */
4489 static void
4490 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4491 	Mpi25SCSIIORequest_t *mpi_request)
4492 {
4493 	u16 eedp_flags;
4494 	unsigned char prot_op = scsi_get_prot_op(scmd);
4495 	unsigned char prot_type = scsi_get_prot_type(scmd);
4496 	Mpi25SCSIIORequest_t *mpi_request_3v =
4497 	   (Mpi25SCSIIORequest_t *)mpi_request;
4498 
4499 	if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
4500 		return;
4501 
4502 	if (prot_op ==  SCSI_PROT_READ_STRIP)
4503 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
4504 	else if (prot_op ==  SCSI_PROT_WRITE_INSERT)
4505 		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
4506 	else
4507 		return;
4508 
4509 	switch (prot_type) {
4510 	case SCSI_PROT_DIF_TYPE1:
4511 	case SCSI_PROT_DIF_TYPE2:
4512 
4513 		/*
4514 		* enable ref/guard checking
4515 		* auto increment ref tag
4516 		*/
4517 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
4518 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
4519 		    MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4520 		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
4521 		    cpu_to_be32(t10_pi_ref_tag(scmd->request));
4522 		break;
4523 
4524 	case SCSI_PROT_DIF_TYPE3:
4525 
4526 		/*
4527 		* enable guard checking
4528 		*/
4529 		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4530 
4531 		break;
4532 	}
4533 
4534 	mpi_request_3v->EEDPBlockSize =
4535 	    cpu_to_le16(scmd->device->sector_size);
4536 
4537 	if (ioc->is_gen35_ioc)
4538 		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
4539 	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
4540 }
4541 
4542 /**
4543  * _scsih_eedp_error_handling - return sense code for EEDP errors
4544  * @scmd: pointer to scsi command object
4545  * @ioc_status: ioc status
4546  */
4547 static void
4548 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4549 {
4550 	u8 ascq;
4551 
4552 	switch (ioc_status) {
4553 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4554 		ascq = 0x01;
4555 		break;
4556 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4557 		ascq = 0x02;
4558 		break;
4559 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4560 		ascq = 0x03;
4561 		break;
4562 	default:
4563 		ascq = 0x00;
4564 		break;
4565 	}
4566 	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
4567 	    ascq);
4568 	scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
4569 	    SAM_STAT_CHECK_CONDITION;
4570 }
4571 
4572 /**
4573  * scsih_qcmd - main scsi request entry point
4574  * @shost: SCSI host pointer
4575  * @scmd: pointer to scsi command object
4576  *
4577  * The callback index is set inside `ioc->scsi_io_cb_idx`.
4578  *
4579  * Return: 0 on success.  If there's a failure, return either:
4580  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
4581  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
4582  */
4583 static int
4584 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4585 {
4586 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4587 	struct MPT3SAS_DEVICE *sas_device_priv_data;
4588 	struct MPT3SAS_TARGET *sas_target_priv_data;
4589 	struct _raid_device *raid_device;
4590 	struct request *rq = scmd->request;
4591 	int class;
4592 	Mpi25SCSIIORequest_t *mpi_request;
4593 	struct _pcie_device *pcie_device = NULL;
4594 	u32 mpi_control;
4595 	u16 smid;
4596 	u16 handle;
4597 
4598 	if (ioc->logging_level & MPT_DEBUG_SCSI)
4599 		scsi_print_command(scmd);
4600 
4601 	sas_device_priv_data = scmd->device->hostdata;
4602 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4603 		scmd->result = DID_NO_CONNECT << 16;
4604 		scmd->scsi_done(scmd);
4605 		return 0;
4606 	}
4607 
4608 	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
4609 		scmd->result = DID_NO_CONNECT << 16;
4610 		scmd->scsi_done(scmd);
4611 		return 0;
4612 	}
4613 
4614 	sas_target_priv_data = sas_device_priv_data->sas_target;
4615 
4616 	/* invalid device handle */
4617 	handle = sas_target_priv_data->handle;
4618 	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
4619 		scmd->result = DID_NO_CONNECT << 16;
4620 		scmd->scsi_done(scmd);
4621 		return 0;
4622 	}
4623 
4624 
4625 	if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
4626 		/* host recovery or link resets sent via IOCTLs */
4627 		return SCSI_MLQUEUE_HOST_BUSY;
4628 	} else if (sas_target_priv_data->deleted) {
4629 		/* device has been deleted */
4630 		scmd->result = DID_NO_CONNECT << 16;
4631 		scmd->scsi_done(scmd);
4632 		return 0;
4633 	} else if (sas_target_priv_data->tm_busy ||
4634 		   sas_device_priv_data->block) {
4635 		/* device busy with task management */
4636 		return SCSI_MLQUEUE_DEVICE_BUSY;
4637 	}
4638 
4639 	/*
4640 	 * Bug work around for firmware SATL handling.  The loop
4641 	 * is based on atomic operations and ensures consistency
4642 	 * since we're lockless at this point
4643 	 */
4644 	do {
4645 		if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
4646 			scmd->result = SAM_STAT_BUSY;
4647 			scmd->scsi_done(scmd);
4648 			return 0;
4649 		}
4650 	} while (_scsih_set_satl_pending(scmd, true));
4651 
4652 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4653 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
4654 	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4655 		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
4656 	else
4657 		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
4658 
4659 	/* set tags */
4660 	mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
4661 	/* NCQ Prio supported, make sure control indicated high priority */
4662 	if (sas_device_priv_data->ncq_prio_enable) {
4663 		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4664 		if (class == IOPRIO_CLASS_RT)
4665 			mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
4666 	}
4667 	/* Make sure Device is not raid volume.
4668 	 * We do not expose raid functionality to upper layer for warpdrive.
4669 	 */
4670 	if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
4671 		&& !scsih_is_nvme(&scmd->device->sdev_gendev))
4672 		&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
4673 		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
4674 
4675 	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
4676 	if (!smid) {
4677 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4678 		_scsih_set_satl_pending(scmd, false);
4679 		goto out;
4680 	}
4681 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4682 	memset(mpi_request, 0, ioc->request_sz);
4683 	_scsih_setup_eedp(ioc, scmd, mpi_request);
4684 
4685 	if (scmd->cmd_len == 32)
4686 		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
4687 	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4688 	if (sas_device_priv_data->sas_target->flags &
4689 	    MPT_TARGET_FLAGS_RAID_COMPONENT)
4690 		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
4691 	else
4692 		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4693 	mpi_request->DevHandle = cpu_to_le16(handle);
4694 	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
4695 	mpi_request->Control = cpu_to_le32(mpi_control);
4696 	mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
4697 	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
4698 	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
4699 	mpi_request->SenseBufferLowAddress =
4700 	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
4701 	mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
4702 	int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
4703 	    mpi_request->LUN);
4704 	memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4705 
4706 	if (mpi_request->DataLength) {
4707 		pcie_device = sas_target_priv_data->pcie_dev;
4708 		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
4709 			mpt3sas_base_free_smid(ioc, smid);
4710 			_scsih_set_satl_pending(scmd, false);
4711 			goto out;
4712 		}
4713 	} else
4714 		ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
4715 
4716 	raid_device = sas_target_priv_data->raid_device;
4717 	if (raid_device && raid_device->direct_io_enabled)
4718 		mpt3sas_setup_direct_io(ioc, scmd,
4719 			raid_device, mpi_request);
4720 
4721 	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
4722 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
4723 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
4724 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
4725 			mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
4726 		} else
4727 			ioc->put_smid_scsi_io(ioc, smid,
4728 			    le16_to_cpu(mpi_request->DevHandle));
4729 	} else
4730 		mpt3sas_base_put_smid_default(ioc, smid);
4731 	return 0;
4732 
4733  out:
4734 	return SCSI_MLQUEUE_HOST_BUSY;
4735 }
4736 
4737 /**
4738  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
4739  * @sense_buffer: sense data returned by target
4740  * @data: normalized skey/asc/ascq
4741  */
4742 static void
4743 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
4744 {
4745 	if ((sense_buffer[0] & 0x7F) >= 0x72) {
4746 		/* descriptor format */
4747 		data->skey = sense_buffer[1] & 0x0F;
4748 		data->asc = sense_buffer[2];
4749 		data->ascq = sense_buffer[3];
4750 	} else {
4751 		/* fixed format */
4752 		data->skey = sense_buffer[2] & 0x0F;
4753 		data->asc = sense_buffer[12];
4754 		data->ascq = sense_buffer[13];
4755 	}
4756 }
4757 
4758 /**
4759  * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
4760  * @ioc: per adapter object
4761  * @scmd: pointer to scsi command object
4762  * @mpi_reply: reply mf payload returned from firmware
4763  * @smid: ?
4764  *
4765  * scsi_status - SCSI Status code returned from target device
4766  * scsi_state - state info associated with SCSI_IO determined by ioc
4767  * ioc_status - ioc supplied status info
4768  */
4769 static void
4770 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4771 	Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
4772 {
4773 	u32 response_info;
4774 	u8 *response_bytes;
4775 	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
4776 	    MPI2_IOCSTATUS_MASK;
4777 	u8 scsi_state = mpi_reply->SCSIState;
4778 	u8 scsi_status = mpi_reply->SCSIStatus;
4779 	char *desc_ioc_state = NULL;
4780 	char *desc_scsi_status = NULL;
4781 	char *desc_scsi_state = ioc->tmp_string;
4782 	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
4783 	struct _sas_device *sas_device = NULL;
4784 	struct _pcie_device *pcie_device = NULL;
4785 	struct scsi_target *starget = scmd->device->sdev_target;
4786 	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
4787 	char *device_str = NULL;
4788 
4789 	if (!priv_target)
4790 		return;
4791 	if (ioc->hide_ir_msg)
4792 		device_str = "WarpDrive";
4793 	else
4794 		device_str = "volume";
4795 
4796 	if (log_info == 0x31170000)
4797 		return;
4798 
4799 	switch (ioc_status) {
4800 	case MPI2_IOCSTATUS_SUCCESS:
4801 		desc_ioc_state = "success";
4802 		break;
4803 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
4804 		desc_ioc_state = "invalid function";
4805 		break;
4806 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
4807 		desc_ioc_state = "scsi recovered error";
4808 		break;
4809 	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
4810 		desc_ioc_state = "scsi invalid dev handle";
4811 		break;
4812 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4813 		desc_ioc_state = "scsi device not there";
4814 		break;
4815 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
4816 		desc_ioc_state = "scsi data overrun";
4817 		break;
4818 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
4819 		desc_ioc_state = "scsi data underrun";
4820 		break;
4821 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
4822 		desc_ioc_state = "scsi io data error";
4823 		break;
4824 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4825 		desc_ioc_state = "scsi protocol error";
4826 		break;
4827 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
4828 		desc_ioc_state = "scsi task terminated";
4829 		break;
4830 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4831 		desc_ioc_state = "scsi residual mismatch";
4832 		break;
4833 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4834 		desc_ioc_state = "scsi task mgmt failed";
4835 		break;
4836 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
4837 		desc_ioc_state = "scsi ioc terminated";
4838 		break;
4839 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
4840 		desc_ioc_state = "scsi ext terminated";
4841 		break;
4842 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4843 		desc_ioc_state = "eedp guard error";
4844 		break;
4845 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4846 		desc_ioc_state = "eedp ref tag error";
4847 		break;
4848 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4849 		desc_ioc_state = "eedp app tag error";
4850 		break;
4851 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
4852 		desc_ioc_state = "insufficient power";
4853 		break;
4854 	default:
4855 		desc_ioc_state = "unknown";
4856 		break;
4857 	}
4858 
4859 	switch (scsi_status) {
4860 	case MPI2_SCSI_STATUS_GOOD:
4861 		desc_scsi_status = "good";
4862 		break;
4863 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
4864 		desc_scsi_status = "check condition";
4865 		break;
4866 	case MPI2_SCSI_STATUS_CONDITION_MET:
4867 		desc_scsi_status = "condition met";
4868 		break;
4869 	case MPI2_SCSI_STATUS_BUSY:
4870 		desc_scsi_status = "busy";
4871 		break;
4872 	case MPI2_SCSI_STATUS_INTERMEDIATE:
4873 		desc_scsi_status = "intermediate";
4874 		break;
4875 	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
4876 		desc_scsi_status = "intermediate condmet";
4877 		break;
4878 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
4879 		desc_scsi_status = "reservation conflict";
4880 		break;
4881 	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
4882 		desc_scsi_status = "command terminated";
4883 		break;
4884 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
4885 		desc_scsi_status = "task set full";
4886 		break;
4887 	case MPI2_SCSI_STATUS_ACA_ACTIVE:
4888 		desc_scsi_status = "aca active";
4889 		break;
4890 	case MPI2_SCSI_STATUS_TASK_ABORTED:
4891 		desc_scsi_status = "task aborted";
4892 		break;
4893 	default:
4894 		desc_scsi_status = "unknown";
4895 		break;
4896 	}
4897 
4898 	desc_scsi_state[0] = '\0';
4899 	if (!scsi_state)
4900 		desc_scsi_state = " ";
4901 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
4902 		strcat(desc_scsi_state, "response info ");
4903 	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
4904 		strcat(desc_scsi_state, "state terminated ");
4905 	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
4906 		strcat(desc_scsi_state, "no status ");
4907 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
4908 		strcat(desc_scsi_state, "autosense failed ");
4909 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
4910 		strcat(desc_scsi_state, "autosense valid ");
4911 
4912 	scsi_print_command(scmd);
4913 
4914 	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
4915 		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
4916 			 device_str, (u64)priv_target->sas_address);
4917 	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
4918 		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
4919 		if (pcie_device) {
4920 			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
4921 				 (u64)pcie_device->wwid, pcie_device->port_num);
4922 			if (pcie_device->enclosure_handle != 0)
4923 				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
4924 					 (u64)pcie_device->enclosure_logical_id,
4925 					 pcie_device->slot);
4926 			if (pcie_device->connector_name[0])
4927 				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
4928 					 pcie_device->enclosure_level,
4929 					 pcie_device->connector_name);
4930 			pcie_device_put(pcie_device);
4931 		}
4932 	} else {
4933 		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
4934 		if (sas_device) {
4935 			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
4936 				 (u64)sas_device->sas_address, sas_device->phy);
4937 
4938 			_scsih_display_enclosure_chassis_info(ioc, sas_device,
4939 			    NULL, NULL);
4940 
4941 			sas_device_put(sas_device);
4942 		}
4943 	}
4944 
4945 	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
4946 		 le16_to_cpu(mpi_reply->DevHandle),
4947 		 desc_ioc_state, ioc_status, smid);
4948 	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
4949 		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
4950 	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
4951 		 le16_to_cpu(mpi_reply->TaskTag),
4952 		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
4953 	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
4954 		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
4955 
4956 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
4957 		struct sense_info data;
4958 		_scsih_normalize_sense(scmd->sense_buffer, &data);
4959 		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
4960 			 data.skey, data.asc, data.ascq,
4961 			 le32_to_cpu(mpi_reply->SenseCount));
4962 	}
4963 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
4964 		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
4965 		response_bytes = (u8 *)&response_info;
4966 		_scsih_response_code(ioc, response_bytes[0]);
4967 	}
4968 }
4969 
4970 /**
4971  * _scsih_turn_on_pfa_led - illuminate PFA LED
4972  * @ioc: per adapter object
4973  * @handle: device handle
4974  * Context: process
4975  */
4976 static void
4977 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4978 {
4979 	Mpi2SepReply_t mpi_reply;
4980 	Mpi2SepRequest_t mpi_request;
4981 	struct _sas_device *sas_device;
4982 
4983 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
4984 	if (!sas_device)
4985 		return;
4986 
4987 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
4988 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
4989 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
4990 	mpi_request.SlotStatus =
4991 	    cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
4992 	mpi_request.DevHandle = cpu_to_le16(handle);
4993 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
4994 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
4995 	    &mpi_request)) != 0) {
4996 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
4997 			__FILE__, __LINE__, __func__);
4998 		goto out;
4999 	}
5000 	sas_device->pfa_led_on = 1;
5001 
5002 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5003 		dewtprintk(ioc,
5004 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5005 				    le16_to_cpu(mpi_reply.IOCStatus),
5006 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5007 		goto out;
5008 	}
5009 out:
5010 	sas_device_put(sas_device);
5011 }
5012 
5013 /**
5014  * _scsih_turn_off_pfa_led - turn off Fault LED
5015  * @ioc: per adapter object
5016  * @sas_device: sas device whose PFA LED has to turned off
5017  * Context: process
5018  */
5019 static void
5020 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5021 	struct _sas_device *sas_device)
5022 {
5023 	Mpi2SepReply_t mpi_reply;
5024 	Mpi2SepRequest_t mpi_request;
5025 
5026 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5027 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5028 	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5029 	mpi_request.SlotStatus = 0;
5030 	mpi_request.Slot = cpu_to_le16(sas_device->slot);
5031 	mpi_request.DevHandle = 0;
5032 	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5033 	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5034 	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5035 		&mpi_request)) != 0) {
5036 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5037 			__FILE__, __LINE__, __func__);
5038 		return;
5039 	}
5040 
5041 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5042 		dewtprintk(ioc,
5043 			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5044 				    le16_to_cpu(mpi_reply.IOCStatus),
5045 				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5046 		return;
5047 	}
5048 }
5049 
5050 /**
5051  * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5052  * @ioc: per adapter object
5053  * @handle: device handle
5054  * Context: interrupt.
5055  */
5056 static void
5057 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5058 {
5059 	struct fw_event_work *fw_event;
5060 
5061 	fw_event = alloc_fw_event_work(0);
5062 	if (!fw_event)
5063 		return;
5064 	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5065 	fw_event->device_handle = handle;
5066 	fw_event->ioc = ioc;
5067 	_scsih_fw_event_add(ioc, fw_event);
5068 	fw_event_work_put(fw_event);
5069 }
5070 
5071 /**
5072  * _scsih_smart_predicted_fault - process smart errors
5073  * @ioc: per adapter object
5074  * @handle: device handle
5075  * Context: interrupt.
5076  */
5077 static void
5078 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5079 {
5080 	struct scsi_target *starget;
5081 	struct MPT3SAS_TARGET *sas_target_priv_data;
5082 	Mpi2EventNotificationReply_t *event_reply;
5083 	Mpi2EventDataSasDeviceStatusChange_t *event_data;
5084 	struct _sas_device *sas_device;
5085 	ssize_t sz;
5086 	unsigned long flags;
5087 
5088 	/* only handle non-raid devices */
5089 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5090 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5091 	if (!sas_device)
5092 		goto out_unlock;
5093 
5094 	starget = sas_device->starget;
5095 	sas_target_priv_data = starget->hostdata;
5096 
5097 	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5098 	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5099 		goto out_unlock;
5100 
5101 	_scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5102 
5103 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5104 
5105 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5106 		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5107 
5108 	/* insert into event log */
5109 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5110 	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5111 	event_reply = kzalloc(sz, GFP_KERNEL);
5112 	if (!event_reply) {
5113 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5114 			__FILE__, __LINE__, __func__);
5115 		goto out;
5116 	}
5117 
5118 	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5119 	event_reply->Event =
5120 	    cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5121 	event_reply->MsgLength = sz/4;
5122 	event_reply->EventDataLength =
5123 	    cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5124 	event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5125 	    event_reply->EventData;
5126 	event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5127 	event_data->ASC = 0x5D;
5128 	event_data->DevHandle = cpu_to_le16(handle);
5129 	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5130 	mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5131 	kfree(event_reply);
5132 out:
5133 	if (sas_device)
5134 		sas_device_put(sas_device);
5135 	return;
5136 
5137 out_unlock:
5138 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5139 	goto out;
5140 }
5141 
5142 /**
5143  * _scsih_io_done - scsi request callback
5144  * @ioc: per adapter object
5145  * @smid: system request message index
5146  * @msix_index: MSIX table index supplied by the OS
5147  * @reply: reply message frame(lower 32bit addr)
5148  *
5149  * Callback handler when using _scsih_qcmd.
5150  *
5151  * Return: 1 meaning mf should be freed from _base_interrupt
5152  *         0 means the mf is freed from this function.
5153  */
5154 static u8
5155 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5156 {
5157 	Mpi25SCSIIORequest_t *mpi_request;
5158 	Mpi2SCSIIOReply_t *mpi_reply;
5159 	struct scsi_cmnd *scmd;
5160 	struct scsiio_tracker *st;
5161 	u16 ioc_status;
5162 	u32 xfer_cnt;
5163 	u8 scsi_state;
5164 	u8 scsi_status;
5165 	u32 log_info;
5166 	struct MPT3SAS_DEVICE *sas_device_priv_data;
5167 	u32 response_code = 0;
5168 
5169 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5170 
5171 	scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5172 	if (scmd == NULL)
5173 		return 1;
5174 
5175 	_scsih_set_satl_pending(scmd, false);
5176 
5177 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5178 
5179 	if (mpi_reply == NULL) {
5180 		scmd->result = DID_OK << 16;
5181 		goto out;
5182 	}
5183 
5184 	sas_device_priv_data = scmd->device->hostdata;
5185 	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5186 	     sas_device_priv_data->sas_target->deleted) {
5187 		scmd->result = DID_NO_CONNECT << 16;
5188 		goto out;
5189 	}
5190 	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5191 
5192 	/*
5193 	 * WARPDRIVE: If direct_io is set then it is directIO,
5194 	 * the failed direct I/O should be redirected to volume
5195 	 */
5196 	st = scsi_cmd_priv(scmd);
5197 	if (st->direct_io &&
5198 	     ((ioc_status & MPI2_IOCSTATUS_MASK)
5199 	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5200 		st->direct_io = 0;
5201 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5202 		mpi_request->DevHandle =
5203 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
5204 		ioc->put_smid_scsi_io(ioc, smid,
5205 		    sas_device_priv_data->sas_target->handle);
5206 		return 0;
5207 	}
5208 	/* turning off TLR */
5209 	scsi_state = mpi_reply->SCSIState;
5210 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5211 		response_code =
5212 		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5213 	if (!sas_device_priv_data->tlr_snoop_check) {
5214 		sas_device_priv_data->tlr_snoop_check++;
5215 		if ((!ioc->is_warpdrive &&
5216 		    !scsih_is_raid(&scmd->device->sdev_gendev) &&
5217 		    !scsih_is_nvme(&scmd->device->sdev_gendev))
5218 		    && sas_is_tlr_enabled(scmd->device) &&
5219 		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5220 			sas_disable_tlr(scmd->device);
5221 			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5222 		}
5223 	}
5224 
5225 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5226 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5227 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5228 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
5229 	else
5230 		log_info = 0;
5231 	ioc_status &= MPI2_IOCSTATUS_MASK;
5232 	scsi_status = mpi_reply->SCSIStatus;
5233 
5234 	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5235 	    (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5236 	     scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5237 	     scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5238 		ioc_status = MPI2_IOCSTATUS_SUCCESS;
5239 	}
5240 
5241 	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5242 		struct sense_info data;
5243 		const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5244 		    smid);
5245 		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5246 		    le32_to_cpu(mpi_reply->SenseCount));
5247 		memcpy(scmd->sense_buffer, sense_data, sz);
5248 		_scsih_normalize_sense(scmd->sense_buffer, &data);
5249 		/* failure prediction threshold exceeded */
5250 		if (data.asc == 0x5D)
5251 			_scsih_smart_predicted_fault(ioc,
5252 			    le16_to_cpu(mpi_reply->DevHandle));
5253 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5254 
5255 		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5256 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5257 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5258 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5259 			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5260 	}
5261 	switch (ioc_status) {
5262 	case MPI2_IOCSTATUS_BUSY:
5263 	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5264 		scmd->result = SAM_STAT_BUSY;
5265 		break;
5266 
5267 	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5268 		scmd->result = DID_NO_CONNECT << 16;
5269 		break;
5270 
5271 	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5272 		if (sas_device_priv_data->block) {
5273 			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5274 			goto out;
5275 		}
5276 		if (log_info == 0x31110630) {
5277 			if (scmd->retries > 2) {
5278 				scmd->result = DID_NO_CONNECT << 16;
5279 				scsi_device_set_state(scmd->device,
5280 				    SDEV_OFFLINE);
5281 			} else {
5282 				scmd->result = DID_SOFT_ERROR << 16;
5283 				scmd->device->expecting_cc_ua = 1;
5284 			}
5285 			break;
5286 		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5287 			scmd->result = DID_RESET << 16;
5288 			break;
5289 		} else if ((scmd->device->channel == RAID_CHANNEL) &&
5290 		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5291 		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5292 			scmd->result = DID_RESET << 16;
5293 			break;
5294 		}
5295 		scmd->result = DID_SOFT_ERROR << 16;
5296 		break;
5297 	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5298 	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5299 		scmd->result = DID_RESET << 16;
5300 		break;
5301 
5302 	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5303 		if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5304 			scmd->result = DID_SOFT_ERROR << 16;
5305 		else
5306 			scmd->result = (DID_OK << 16) | scsi_status;
5307 		break;
5308 
5309 	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5310 		scmd->result = (DID_OK << 16) | scsi_status;
5311 
5312 		if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5313 			break;
5314 
5315 		if (xfer_cnt < scmd->underflow) {
5316 			if (scsi_status == SAM_STAT_BUSY)
5317 				scmd->result = SAM_STAT_BUSY;
5318 			else
5319 				scmd->result = DID_SOFT_ERROR << 16;
5320 		} else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5321 		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
5322 			scmd->result = DID_SOFT_ERROR << 16;
5323 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5324 			scmd->result = DID_RESET << 16;
5325 		else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5326 			mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5327 			mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5328 			scmd->result = (DRIVER_SENSE << 24) |
5329 			    SAM_STAT_CHECK_CONDITION;
5330 			scmd->sense_buffer[0] = 0x70;
5331 			scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5332 			scmd->sense_buffer[12] = 0x20;
5333 			scmd->sense_buffer[13] = 0;
5334 		}
5335 		break;
5336 
5337 	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5338 		scsi_set_resid(scmd, 0);
5339 		/* fall through */
5340 	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5341 	case MPI2_IOCSTATUS_SUCCESS:
5342 		scmd->result = (DID_OK << 16) | scsi_status;
5343 		if (response_code ==
5344 		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5345 		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5346 		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5347 			scmd->result = DID_SOFT_ERROR << 16;
5348 		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5349 			scmd->result = DID_RESET << 16;
5350 		break;
5351 
5352 	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5353 	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5354 	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5355 		_scsih_eedp_error_handling(scmd, ioc_status);
5356 		break;
5357 
5358 	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5359 	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5360 	case MPI2_IOCSTATUS_INVALID_SGL:
5361 	case MPI2_IOCSTATUS_INTERNAL_ERROR:
5362 	case MPI2_IOCSTATUS_INVALID_FIELD:
5363 	case MPI2_IOCSTATUS_INVALID_STATE:
5364 	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5365 	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5366 	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5367 	default:
5368 		scmd->result = DID_SOFT_ERROR << 16;
5369 		break;
5370 
5371 	}
5372 
5373 	if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5374 		_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5375 
5376  out:
5377 
5378 	scsi_dma_unmap(scmd);
5379 	mpt3sas_base_free_smid(ioc, smid);
5380 	scmd->scsi_done(scmd);
5381 	return 0;
5382 }
5383 
5384 /**
5385  * _scsih_sas_host_refresh - refreshing sas host object contents
5386  * @ioc: per adapter object
5387  * Context: user
5388  *
5389  * During port enable, fw will send topology events for every device. Its
5390  * possible that the handles may change from the previous setting, so this
5391  * code keeping handles updating if changed.
5392  */
5393 static void
5394 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
5395 {
5396 	u16 sz;
5397 	u16 ioc_status;
5398 	int i;
5399 	Mpi2ConfigReply_t mpi_reply;
5400 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5401 	u16 attached_handle;
5402 	u8 link_rate;
5403 
5404 	dtmprintk(ioc,
5405 		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
5406 			   (u64)ioc->sas_hba.sas_address));
5407 
5408 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
5409 	    * sizeof(Mpi2SasIOUnit0PhyData_t));
5410 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5411 	if (!sas_iounit_pg0) {
5412 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5413 			__FILE__, __LINE__, __func__);
5414 		return;
5415 	}
5416 
5417 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5418 	    sas_iounit_pg0, sz)) != 0)
5419 		goto out;
5420 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5421 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5422 		goto out;
5423 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5424 		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
5425 		if (i == 0)
5426 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5427 			    PhyData[0].ControllerDevHandle);
5428 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5429 		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
5430 		    AttachedDevHandle);
5431 		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
5432 			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
5433 		mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
5434 		    attached_handle, i, link_rate);
5435 	}
5436  out:
5437 	kfree(sas_iounit_pg0);
5438 }
5439 
5440 /**
5441  * _scsih_sas_host_add - create sas host object
5442  * @ioc: per adapter object
5443  *
5444  * Creating host side data object, stored in ioc->sas_hba
5445  */
5446 static void
5447 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5448 {
5449 	int i;
5450 	Mpi2ConfigReply_t mpi_reply;
5451 	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5452 	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5453 	Mpi2SasPhyPage0_t phy_pg0;
5454 	Mpi2SasDevicePage0_t sas_device_pg0;
5455 	Mpi2SasEnclosurePage0_t enclosure_pg0;
5456 	u16 ioc_status;
5457 	u16 sz;
5458 	u8 device_missing_delay;
5459 	u8 num_phys;
5460 
5461 	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
5462 	if (!num_phys) {
5463 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5464 			__FILE__, __LINE__, __func__);
5465 		return;
5466 	}
5467 	ioc->sas_hba.phy = kcalloc(num_phys,
5468 	    sizeof(struct _sas_phy), GFP_KERNEL);
5469 	if (!ioc->sas_hba.phy) {
5470 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5471 			__FILE__, __LINE__, __func__);
5472 		goto out;
5473 	}
5474 	ioc->sas_hba.num_phys = num_phys;
5475 
5476 	/* sas_iounit page 0 */
5477 	sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
5478 	    sizeof(Mpi2SasIOUnit0PhyData_t));
5479 	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5480 	if (!sas_iounit_pg0) {
5481 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5482 			__FILE__, __LINE__, __func__);
5483 		return;
5484 	}
5485 	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5486 	    sas_iounit_pg0, sz))) {
5487 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5488 			__FILE__, __LINE__, __func__);
5489 		goto out;
5490 	}
5491 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5492 	    MPI2_IOCSTATUS_MASK;
5493 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5494 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5495 			__FILE__, __LINE__, __func__);
5496 		goto out;
5497 	}
5498 
5499 	/* sas_iounit page 1 */
5500 	sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
5501 	    sizeof(Mpi2SasIOUnit1PhyData_t));
5502 	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
5503 	if (!sas_iounit_pg1) {
5504 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5505 			__FILE__, __LINE__, __func__);
5506 		goto out;
5507 	}
5508 	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5509 	    sas_iounit_pg1, sz))) {
5510 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5511 			__FILE__, __LINE__, __func__);
5512 		goto out;
5513 	}
5514 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5515 	    MPI2_IOCSTATUS_MASK;
5516 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5517 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5518 			__FILE__, __LINE__, __func__);
5519 		goto out;
5520 	}
5521 
5522 	ioc->io_missing_delay =
5523 	    sas_iounit_pg1->IODeviceMissingDelay;
5524 	device_missing_delay =
5525 	    sas_iounit_pg1->ReportDeviceMissingDelay;
5526 	if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
5527 		ioc->device_missing_delay = (device_missing_delay &
5528 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
5529 	else
5530 		ioc->device_missing_delay = device_missing_delay &
5531 		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
5532 
5533 	ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
5534 	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5535 		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5536 		    i))) {
5537 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5538 				__FILE__, __LINE__, __func__);
5539 			goto out;
5540 		}
5541 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5542 		    MPI2_IOCSTATUS_MASK;
5543 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5544 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5545 				__FILE__, __LINE__, __func__);
5546 			goto out;
5547 		}
5548 
5549 		if (i == 0)
5550 			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5551 			    PhyData[0].ControllerDevHandle);
5552 		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5553 		ioc->sas_hba.phy[i].phy_id = i;
5554 		mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
5555 		    phy_pg0, ioc->sas_hba.parent_dev);
5556 	}
5557 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5558 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
5559 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5560 			__FILE__, __LINE__, __func__);
5561 		goto out;
5562 	}
5563 	ioc->sas_hba.enclosure_handle =
5564 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
5565 	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5566 	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5567 		 ioc->sas_hba.handle,
5568 		 (u64)ioc->sas_hba.sas_address,
5569 		 ioc->sas_hba.num_phys);
5570 
5571 	if (ioc->sas_hba.enclosure_handle) {
5572 		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
5573 		    &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
5574 		   ioc->sas_hba.enclosure_handle)))
5575 			ioc->sas_hba.enclosure_logical_id =
5576 			    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5577 	}
5578 
5579  out:
5580 	kfree(sas_iounit_pg1);
5581 	kfree(sas_iounit_pg0);
5582 }
5583 
5584 /**
5585  * _scsih_expander_add -  creating expander object
5586  * @ioc: per adapter object
5587  * @handle: expander handle
5588  *
5589  * Creating expander object, stored in ioc->sas_expander_list.
5590  *
5591  * Return: 0 for success, else error.
5592  */
5593 static int
5594 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5595 {
5596 	struct _sas_node *sas_expander;
5597 	struct _enclosure_node *enclosure_dev;
5598 	Mpi2ConfigReply_t mpi_reply;
5599 	Mpi2ExpanderPage0_t expander_pg0;
5600 	Mpi2ExpanderPage1_t expander_pg1;
5601 	u32 ioc_status;
5602 	u16 parent_handle;
5603 	u64 sas_address, sas_address_parent = 0;
5604 	int i;
5605 	unsigned long flags;
5606 	struct _sas_port *mpt3sas_port = NULL;
5607 
5608 	int rc = 0;
5609 
5610 	if (!handle)
5611 		return -1;
5612 
5613 	if (ioc->shost_recovery || ioc->pci_error_recovery)
5614 		return -1;
5615 
5616 	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
5617 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
5618 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5619 			__FILE__, __LINE__, __func__);
5620 		return -1;
5621 	}
5622 
5623 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5624 	    MPI2_IOCSTATUS_MASK;
5625 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5626 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5627 			__FILE__, __LINE__, __func__);
5628 		return -1;
5629 	}
5630 
5631 	/* handle out of order topology events */
5632 	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
5633 	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
5634 	    != 0) {
5635 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5636 			__FILE__, __LINE__, __func__);
5637 		return -1;
5638 	}
5639 	if (sas_address_parent != ioc->sas_hba.sas_address) {
5640 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
5641 		sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5642 		    sas_address_parent);
5643 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5644 		if (!sas_expander) {
5645 			rc = _scsih_expander_add(ioc, parent_handle);
5646 			if (rc != 0)
5647 				return rc;
5648 		}
5649 	}
5650 
5651 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
5652 	sas_address = le64_to_cpu(expander_pg0.SASAddress);
5653 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5654 	    sas_address);
5655 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5656 
5657 	if (sas_expander)
5658 		return 0;
5659 
5660 	sas_expander = kzalloc(sizeof(struct _sas_node),
5661 	    GFP_KERNEL);
5662 	if (!sas_expander) {
5663 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5664 			__FILE__, __LINE__, __func__);
5665 		return -1;
5666 	}
5667 
5668 	sas_expander->handle = handle;
5669 	sas_expander->num_phys = expander_pg0.NumPhys;
5670 	sas_expander->sas_address_parent = sas_address_parent;
5671 	sas_expander->sas_address = sas_address;
5672 
5673 	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5674 		 handle, parent_handle,
5675 		 (u64)sas_expander->sas_address, sas_expander->num_phys);
5676 
5677 	if (!sas_expander->num_phys)
5678 		goto out_fail;
5679 	sas_expander->phy = kcalloc(sas_expander->num_phys,
5680 	    sizeof(struct _sas_phy), GFP_KERNEL);
5681 	if (!sas_expander->phy) {
5682 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5683 			__FILE__, __LINE__, __func__);
5684 		rc = -1;
5685 		goto out_fail;
5686 	}
5687 
5688 	INIT_LIST_HEAD(&sas_expander->sas_port_list);
5689 	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
5690 	    sas_address_parent);
5691 	if (!mpt3sas_port) {
5692 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5693 			__FILE__, __LINE__, __func__);
5694 		rc = -1;
5695 		goto out_fail;
5696 	}
5697 	sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
5698 
5699 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
5700 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
5701 		    &expander_pg1, i, handle))) {
5702 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5703 				__FILE__, __LINE__, __func__);
5704 			rc = -1;
5705 			goto out_fail;
5706 		}
5707 		sas_expander->phy[i].handle = handle;
5708 		sas_expander->phy[i].phy_id = i;
5709 
5710 		if ((mpt3sas_transport_add_expander_phy(ioc,
5711 		    &sas_expander->phy[i], expander_pg1,
5712 		    sas_expander->parent_dev))) {
5713 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5714 				__FILE__, __LINE__, __func__);
5715 			rc = -1;
5716 			goto out_fail;
5717 		}
5718 	}
5719 
5720 	if (sas_expander->enclosure_handle) {
5721 		enclosure_dev =
5722 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
5723 						sas_expander->enclosure_handle);
5724 		if (enclosure_dev)
5725 			sas_expander->enclosure_logical_id =
5726 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5727 	}
5728 
5729 	_scsih_expander_node_add(ioc, sas_expander);
5730 	return 0;
5731 
5732  out_fail:
5733 
5734 	if (mpt3sas_port)
5735 		mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
5736 		    sas_address_parent);
5737 	kfree(sas_expander);
5738 	return rc;
5739 }
5740 
5741 /**
5742  * mpt3sas_expander_remove - removing expander object
5743  * @ioc: per adapter object
5744  * @sas_address: expander sas_address
5745  */
5746 void
5747 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
5748 {
5749 	struct _sas_node *sas_expander;
5750 	unsigned long flags;
5751 
5752 	if (ioc->shost_recovery)
5753 		return;
5754 
5755 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
5756 	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5757 	    sas_address);
5758 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5759 	if (sas_expander)
5760 		_scsih_expander_node_remove(ioc, sas_expander);
5761 }
5762 
5763 /**
5764  * _scsih_done -  internal SCSI_IO callback handler.
5765  * @ioc: per adapter object
5766  * @smid: system request message index
5767  * @msix_index: MSIX table index supplied by the OS
5768  * @reply: reply message frame(lower 32bit addr)
5769  *
5770  * Callback handler when sending internal generated SCSI_IO.
5771  * The callback index passed is `ioc->scsih_cb_idx`
5772  *
5773  * Return: 1 meaning mf should be freed from _base_interrupt
5774  *         0 means the mf is freed from this function.
5775  */
5776 static u8
5777 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5778 {
5779 	MPI2DefaultReply_t *mpi_reply;
5780 
5781 	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
5782 	if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
5783 		return 1;
5784 	if (ioc->scsih_cmds.smid != smid)
5785 		return 1;
5786 	ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
5787 	if (mpi_reply) {
5788 		memcpy(ioc->scsih_cmds.reply, mpi_reply,
5789 		    mpi_reply->MsgLength*4);
5790 		ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
5791 	}
5792 	ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
5793 	complete(&ioc->scsih_cmds.done);
5794 	return 1;
5795 }
5796 
5797 
5798 
5799 
5800 #define MPT3_MAX_LUNS (255)
5801 
5802 
5803 /**
5804  * _scsih_check_access_status - check access flags
5805  * @ioc: per adapter object
5806  * @sas_address: sas address
5807  * @handle: sas device handle
5808  * @access_status: errors returned during discovery of the device
5809  *
5810  * Return: 0 for success, else failure
5811  */
5812 static u8
5813 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
5814 	u16 handle, u8 access_status)
5815 {
5816 	u8 rc = 1;
5817 	char *desc = NULL;
5818 
5819 	switch (access_status) {
5820 	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
5821 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
5822 		rc = 0;
5823 		break;
5824 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
5825 		desc = "sata capability failed";
5826 		break;
5827 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
5828 		desc = "sata affiliation conflict";
5829 		break;
5830 	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
5831 		desc = "route not addressable";
5832 		break;
5833 	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
5834 		desc = "smp error not addressable";
5835 		break;
5836 	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
5837 		desc = "device blocked";
5838 		break;
5839 	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
5840 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
5841 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
5842 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
5843 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
5844 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
5845 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
5846 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
5847 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
5848 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
5849 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
5850 	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
5851 		desc = "sata initialization failed";
5852 		break;
5853 	default:
5854 		desc = "unknown";
5855 		break;
5856 	}
5857 
5858 	if (!rc)
5859 		return 0;
5860 
5861 	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
5862 		desc, (u64)sas_address, handle);
5863 	return rc;
5864 }
5865 
5866 /**
5867  * _scsih_check_device - checking device responsiveness
5868  * @ioc: per adapter object
5869  * @parent_sas_address: sas address of parent expander or sas host
5870  * @handle: attached device handle
5871  * @phy_number: phy number
5872  * @link_rate: new link rate
5873  */
5874 static void
5875 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5876 	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
5877 {
5878 	Mpi2ConfigReply_t mpi_reply;
5879 	Mpi2SasDevicePage0_t sas_device_pg0;
5880 	struct _sas_device *sas_device;
5881 	struct _enclosure_node *enclosure_dev = NULL;
5882 	u32 ioc_status;
5883 	unsigned long flags;
5884 	u64 sas_address;
5885 	struct scsi_target *starget;
5886 	struct MPT3SAS_TARGET *sas_target_priv_data;
5887 	u32 device_info;
5888 
5889 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5890 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
5891 		return;
5892 
5893 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5894 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5895 		return;
5896 
5897 	/* wide port handling ~ we need only handle device once for the phy that
5898 	 * is matched in sas device page zero
5899 	 */
5900 	if (phy_number != sas_device_pg0.PhyNum)
5901 		return;
5902 
5903 	/* check if this is end device */
5904 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
5905 	if (!(_scsih_is_end_device(device_info)))
5906 		return;
5907 
5908 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5909 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5910 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
5911 	    sas_address);
5912 
5913 	if (!sas_device)
5914 		goto out_unlock;
5915 
5916 	if (unlikely(sas_device->handle != handle)) {
5917 		starget = sas_device->starget;
5918 		sas_target_priv_data = starget->hostdata;
5919 		starget_printk(KERN_INFO, starget,
5920 			"handle changed from(0x%04x) to (0x%04x)!!!\n",
5921 			sas_device->handle, handle);
5922 		sas_target_priv_data->handle = handle;
5923 		sas_device->handle = handle;
5924 		if (le16_to_cpu(sas_device_pg0.Flags) &
5925 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
5926 			sas_device->enclosure_level =
5927 				sas_device_pg0.EnclosureLevel;
5928 			memcpy(sas_device->connector_name,
5929 				sas_device_pg0.ConnectorName, 4);
5930 			sas_device->connector_name[4] = '\0';
5931 		} else {
5932 			sas_device->enclosure_level = 0;
5933 			sas_device->connector_name[0] = '\0';
5934 		}
5935 
5936 		sas_device->enclosure_handle =
5937 				le16_to_cpu(sas_device_pg0.EnclosureHandle);
5938 		sas_device->is_chassis_slot_valid = 0;
5939 		enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
5940 						sas_device->enclosure_handle);
5941 		if (enclosure_dev) {
5942 			sas_device->enclosure_logical_id =
5943 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5944 			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
5945 			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
5946 				sas_device->is_chassis_slot_valid = 1;
5947 				sas_device->chassis_slot =
5948 					enclosure_dev->pg0.ChassisSlot;
5949 			}
5950 		}
5951 	}
5952 
5953 	/* check if device is present */
5954 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
5955 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
5956 		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
5957 			handle);
5958 		goto out_unlock;
5959 	}
5960 
5961 	/* check if there were any issues with discovery */
5962 	if (_scsih_check_access_status(ioc, sas_address, handle,
5963 	    sas_device_pg0.AccessStatus))
5964 		goto out_unlock;
5965 
5966 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5967 	_scsih_ublock_io_device(ioc, sas_address);
5968 
5969 	if (sas_device)
5970 		sas_device_put(sas_device);
5971 	return;
5972 
5973 out_unlock:
5974 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5975 	if (sas_device)
5976 		sas_device_put(sas_device);
5977 }
5978 
5979 /**
5980  * _scsih_add_device -  creating sas device object
5981  * @ioc: per adapter object
5982  * @handle: sas device handle
5983  * @phy_num: phy number end device attached to
5984  * @is_pd: is this hidden raid component
5985  *
5986  * Creating end device object, stored in ioc->sas_device_list.
5987  *
5988  * Return: 0 for success, non-zero for failure.
5989  */
5990 static int
5991 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
5992 	u8 is_pd)
5993 {
5994 	Mpi2ConfigReply_t mpi_reply;
5995 	Mpi2SasDevicePage0_t sas_device_pg0;
5996 	struct _sas_device *sas_device;
5997 	struct _enclosure_node *enclosure_dev = NULL;
5998 	u32 ioc_status;
5999 	u64 sas_address;
6000 	u32 device_info;
6001 
6002 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6003 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
6004 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6005 			__FILE__, __LINE__, __func__);
6006 		return -1;
6007 	}
6008 
6009 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6010 	    MPI2_IOCSTATUS_MASK;
6011 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6012 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6013 			__FILE__, __LINE__, __func__);
6014 		return -1;
6015 	}
6016 
6017 	/* check if this is end device */
6018 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6019 	if (!(_scsih_is_end_device(device_info)))
6020 		return -1;
6021 	set_bit(handle, ioc->pend_os_device_add);
6022 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6023 
6024 	/* check if device is present */
6025 	if (!(le16_to_cpu(sas_device_pg0.Flags) &
6026 	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6027 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6028 			handle);
6029 		return -1;
6030 	}
6031 
6032 	/* check if there were any issues with discovery */
6033 	if (_scsih_check_access_status(ioc, sas_address, handle,
6034 	    sas_device_pg0.AccessStatus))
6035 		return -1;
6036 
6037 	sas_device = mpt3sas_get_sdev_by_addr(ioc,
6038 					sas_address);
6039 	if (sas_device) {
6040 		clear_bit(handle, ioc->pend_os_device_add);
6041 		sas_device_put(sas_device);
6042 		return -1;
6043 	}
6044 
6045 	if (sas_device_pg0.EnclosureHandle) {
6046 		enclosure_dev =
6047 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6048 			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
6049 		if (enclosure_dev == NULL)
6050 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
6051 				 sas_device_pg0.EnclosureHandle);
6052 	}
6053 
6054 	sas_device = kzalloc(sizeof(struct _sas_device),
6055 	    GFP_KERNEL);
6056 	if (!sas_device) {
6057 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6058 			__FILE__, __LINE__, __func__);
6059 		return 0;
6060 	}
6061 
6062 	kref_init(&sas_device->refcount);
6063 	sas_device->handle = handle;
6064 	if (_scsih_get_sas_address(ioc,
6065 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
6066 	    &sas_device->sas_address_parent) != 0)
6067 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6068 			__FILE__, __LINE__, __func__);
6069 	sas_device->enclosure_handle =
6070 	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
6071 	if (sas_device->enclosure_handle != 0)
6072 		sas_device->slot =
6073 		    le16_to_cpu(sas_device_pg0.Slot);
6074 	sas_device->device_info = device_info;
6075 	sas_device->sas_address = sas_address;
6076 	sas_device->phy = sas_device_pg0.PhyNum;
6077 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
6078 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6079 
6080 	if (le16_to_cpu(sas_device_pg0.Flags)
6081 		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6082 		sas_device->enclosure_level =
6083 			sas_device_pg0.EnclosureLevel;
6084 		memcpy(sas_device->connector_name,
6085 			sas_device_pg0.ConnectorName, 4);
6086 		sas_device->connector_name[4] = '\0';
6087 	} else {
6088 		sas_device->enclosure_level = 0;
6089 		sas_device->connector_name[0] = '\0';
6090 	}
6091 	/* get enclosure_logical_id & chassis_slot*/
6092 	sas_device->is_chassis_slot_valid = 0;
6093 	if (enclosure_dev) {
6094 		sas_device->enclosure_logical_id =
6095 		    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6096 		if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6097 		    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6098 			sas_device->is_chassis_slot_valid = 1;
6099 			sas_device->chassis_slot =
6100 					enclosure_dev->pg0.ChassisSlot;
6101 		}
6102 	}
6103 
6104 	/* get device name */
6105 	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
6106 
6107 	if (ioc->wait_for_discovery_to_complete)
6108 		_scsih_sas_device_init_add(ioc, sas_device);
6109 	else
6110 		_scsih_sas_device_add(ioc, sas_device);
6111 
6112 	sas_device_put(sas_device);
6113 	return 0;
6114 }
6115 
6116 /**
6117  * _scsih_remove_device -  removing sas device object
6118  * @ioc: per adapter object
6119  * @sas_device: the sas_device object
6120  */
6121 static void
6122 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6123 	struct _sas_device *sas_device)
6124 {
6125 	struct MPT3SAS_TARGET *sas_target_priv_data;
6126 
6127 	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
6128 	     (sas_device->pfa_led_on)) {
6129 		_scsih_turn_off_pfa_led(ioc, sas_device);
6130 		sas_device->pfa_led_on = 0;
6131 	}
6132 
6133 	dewtprintk(ioc,
6134 		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
6135 			    __func__,
6136 			    sas_device->handle, (u64)sas_device->sas_address));
6137 
6138 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6139 	    NULL, NULL));
6140 
6141 	if (sas_device->starget && sas_device->starget->hostdata) {
6142 		sas_target_priv_data = sas_device->starget->hostdata;
6143 		sas_target_priv_data->deleted = 1;
6144 		_scsih_ublock_io_device(ioc, sas_device->sas_address);
6145 		sas_target_priv_data->handle =
6146 		     MPT3SAS_INVALID_DEVICE_HANDLE;
6147 	}
6148 
6149 	if (!ioc->hide_drives)
6150 		mpt3sas_transport_port_remove(ioc,
6151 		    sas_device->sas_address,
6152 		    sas_device->sas_address_parent);
6153 
6154 	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
6155 		 sas_device->handle, (u64)sas_device->sas_address);
6156 
6157 	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
6158 
6159 	dewtprintk(ioc,
6160 		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
6161 			    __func__,
6162 			    sas_device->handle, (u64)sas_device->sas_address));
6163 	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6164 	    NULL, NULL));
6165 }
6166 
6167 /**
6168  * _scsih_sas_topology_change_event_debug - debug for topology event
6169  * @ioc: per adapter object
6170  * @event_data: event data payload
6171  * Context: user.
6172  */
6173 static void
6174 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6175 	Mpi2EventDataSasTopologyChangeList_t *event_data)
6176 {
6177 	int i;
6178 	u16 handle;
6179 	u16 reason_code;
6180 	u8 phy_number;
6181 	char *status_str = NULL;
6182 	u8 link_rate, prev_link_rate;
6183 
6184 	switch (event_data->ExpStatus) {
6185 	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6186 		status_str = "add";
6187 		break;
6188 	case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6189 		status_str = "remove";
6190 		break;
6191 	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6192 	case 0:
6193 		status_str =  "responding";
6194 		break;
6195 	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6196 		status_str = "remove delay";
6197 		break;
6198 	default:
6199 		status_str = "unknown status";
6200 		break;
6201 	}
6202 	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
6203 	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
6204 	    "start_phy(%02d), count(%d)\n",
6205 	    le16_to_cpu(event_data->ExpanderDevHandle),
6206 	    le16_to_cpu(event_data->EnclosureHandle),
6207 	    event_data->StartPhyNum, event_data->NumEntries);
6208 	for (i = 0; i < event_data->NumEntries; i++) {
6209 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6210 		if (!handle)
6211 			continue;
6212 		phy_number = event_data->StartPhyNum + i;
6213 		reason_code = event_data->PHY[i].PhyStatus &
6214 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6215 		switch (reason_code) {
6216 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6217 			status_str = "target add";
6218 			break;
6219 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6220 			status_str = "target remove";
6221 			break;
6222 		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6223 			status_str = "delay target remove";
6224 			break;
6225 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6226 			status_str = "link rate change";
6227 			break;
6228 		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6229 			status_str = "target responding";
6230 			break;
6231 		default:
6232 			status_str = "unknown";
6233 			break;
6234 		}
6235 		link_rate = event_data->PHY[i].LinkRate >> 4;
6236 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6237 		pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
6238 		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
6239 		    handle, status_str, link_rate, prev_link_rate);
6240 
6241 	}
6242 }
6243 
6244 /**
6245  * _scsih_sas_topology_change_event - handle topology changes
6246  * @ioc: per adapter object
6247  * @fw_event: The fw_event_work object
6248  * Context: user.
6249  *
6250  */
6251 static int
6252 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6253 	struct fw_event_work *fw_event)
6254 {
6255 	int i;
6256 	u16 parent_handle, handle;
6257 	u16 reason_code;
6258 	u8 phy_number, max_phys;
6259 	struct _sas_node *sas_expander;
6260 	u64 sas_address;
6261 	unsigned long flags;
6262 	u8 link_rate, prev_link_rate;
6263 	Mpi2EventDataSasTopologyChangeList_t *event_data =
6264 		(Mpi2EventDataSasTopologyChangeList_t *)
6265 		fw_event->event_data;
6266 
6267 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6268 		_scsih_sas_topology_change_event_debug(ioc, event_data);
6269 
6270 	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
6271 		return 0;
6272 
6273 	if (!ioc->sas_hba.num_phys)
6274 		_scsih_sas_host_add(ioc);
6275 	else
6276 		_scsih_sas_host_refresh(ioc);
6277 
6278 	if (fw_event->ignore) {
6279 		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
6280 		return 0;
6281 	}
6282 
6283 	parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
6284 
6285 	/* handle expander add */
6286 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
6287 		if (_scsih_expander_add(ioc, parent_handle) != 0)
6288 			return 0;
6289 
6290 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6291 	sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
6292 	    parent_handle);
6293 	if (sas_expander) {
6294 		sas_address = sas_expander->sas_address;
6295 		max_phys = sas_expander->num_phys;
6296 	} else if (parent_handle < ioc->sas_hba.num_phys) {
6297 		sas_address = ioc->sas_hba.sas_address;
6298 		max_phys = ioc->sas_hba.num_phys;
6299 	} else {
6300 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6301 		return 0;
6302 	}
6303 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6304 
6305 	/* handle siblings events */
6306 	for (i = 0; i < event_data->NumEntries; i++) {
6307 		if (fw_event->ignore) {
6308 			dewtprintk(ioc,
6309 				   ioc_info(ioc, "ignoring expander event\n"));
6310 			return 0;
6311 		}
6312 		if (ioc->remove_host || ioc->pci_error_recovery)
6313 			return 0;
6314 		phy_number = event_data->StartPhyNum + i;
6315 		if (phy_number >= max_phys)
6316 			continue;
6317 		reason_code = event_data->PHY[i].PhyStatus &
6318 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
6319 		if ((event_data->PHY[i].PhyStatus &
6320 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
6321 		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
6322 				continue;
6323 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6324 		if (!handle)
6325 			continue;
6326 		link_rate = event_data->PHY[i].LinkRate >> 4;
6327 		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6328 		switch (reason_code) {
6329 		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6330 
6331 			if (ioc->shost_recovery)
6332 				break;
6333 
6334 			if (link_rate == prev_link_rate)
6335 				break;
6336 
6337 			mpt3sas_transport_update_links(ioc, sas_address,
6338 			    handle, phy_number, link_rate);
6339 
6340 			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6341 				break;
6342 
6343 			_scsih_check_device(ioc, sas_address, handle,
6344 			    phy_number, link_rate);
6345 
6346 			if (!test_bit(handle, ioc->pend_os_device_add))
6347 				break;
6348 
6349 			/* fall through */
6350 
6351 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6352 
6353 			if (ioc->shost_recovery)
6354 				break;
6355 
6356 			mpt3sas_transport_update_links(ioc, sas_address,
6357 			    handle, phy_number, link_rate);
6358 
6359 			_scsih_add_device(ioc, handle, phy_number, 0);
6360 
6361 			break;
6362 		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6363 
6364 			_scsih_device_remove_by_handle(ioc, handle);
6365 			break;
6366 		}
6367 	}
6368 
6369 	/* handle expander removal */
6370 	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
6371 	    sas_expander)
6372 		mpt3sas_expander_remove(ioc, sas_address);
6373 
6374 	return 0;
6375 }
6376 
6377 /**
6378  * _scsih_sas_device_status_change_event_debug - debug for device event
6379  * @ioc: ?
6380  * @event_data: event data payload
6381  * Context: user.
6382  */
6383 static void
6384 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6385 	Mpi2EventDataSasDeviceStatusChange_t *event_data)
6386 {
6387 	char *reason_str = NULL;
6388 
6389 	switch (event_data->ReasonCode) {
6390 	case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
6391 		reason_str = "smart data";
6392 		break;
6393 	case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
6394 		reason_str = "unsupported device discovered";
6395 		break;
6396 	case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
6397 		reason_str = "internal device reset";
6398 		break;
6399 	case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
6400 		reason_str = "internal task abort";
6401 		break;
6402 	case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
6403 		reason_str = "internal task abort set";
6404 		break;
6405 	case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
6406 		reason_str = "internal clear task set";
6407 		break;
6408 	case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
6409 		reason_str = "internal query task";
6410 		break;
6411 	case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
6412 		reason_str = "sata init failure";
6413 		break;
6414 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
6415 		reason_str = "internal device reset complete";
6416 		break;
6417 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
6418 		reason_str = "internal task abort complete";
6419 		break;
6420 	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
6421 		reason_str = "internal async notification";
6422 		break;
6423 	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
6424 		reason_str = "expander reduced functionality";
6425 		break;
6426 	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
6427 		reason_str = "expander reduced functionality complete";
6428 		break;
6429 	default:
6430 		reason_str = "unknown reason";
6431 		break;
6432 	}
6433 	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
6434 		 reason_str, le16_to_cpu(event_data->DevHandle),
6435 		 (u64)le64_to_cpu(event_data->SASAddress),
6436 		 le16_to_cpu(event_data->TaskTag));
6437 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
6438 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
6439 			event_data->ASC, event_data->ASCQ);
6440 	pr_cont("\n");
6441 }
6442 
6443 /**
6444  * _scsih_sas_device_status_change_event - handle device status change
6445  * @ioc: per adapter object
6446  * @fw_event: The fw_event_work object
6447  * Context: user.
6448  */
6449 static void
6450 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6451 	struct fw_event_work *fw_event)
6452 {
6453 	struct MPT3SAS_TARGET *target_priv_data;
6454 	struct _sas_device *sas_device;
6455 	u64 sas_address;
6456 	unsigned long flags;
6457 	Mpi2EventDataSasDeviceStatusChange_t *event_data =
6458 		(Mpi2EventDataSasDeviceStatusChange_t *)
6459 		fw_event->event_data;
6460 
6461 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6462 		_scsih_sas_device_status_change_event_debug(ioc,
6463 		     event_data);
6464 
6465 	/* In MPI Revision K (0xC), the internal device reset complete was
6466 	 * implemented, so avoid setting tm_busy flag for older firmware.
6467 	 */
6468 	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
6469 		return;
6470 
6471 	if (event_data->ReasonCode !=
6472 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
6473 	   event_data->ReasonCode !=
6474 	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
6475 		return;
6476 
6477 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
6478 	sas_address = le64_to_cpu(event_data->SASAddress);
6479 	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6480 	    sas_address);
6481 
6482 	if (!sas_device || !sas_device->starget)
6483 		goto out;
6484 
6485 	target_priv_data = sas_device->starget->hostdata;
6486 	if (!target_priv_data)
6487 		goto out;
6488 
6489 	if (event_data->ReasonCode ==
6490 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
6491 		target_priv_data->tm_busy = 1;
6492 	else
6493 		target_priv_data->tm_busy = 0;
6494 
6495 out:
6496 	if (sas_device)
6497 		sas_device_put(sas_device);
6498 
6499 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6500 }
6501 
6502 
6503 /**
6504  * _scsih_check_pcie_access_status - check access flags
6505  * @ioc: per adapter object
6506  * @wwid: wwid
6507  * @handle: sas device handle
6508  * @access_status: errors returned during discovery of the device
6509  *
6510  * Return: 0 for success, else failure
6511  */
6512 static u8
6513 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6514 	u16 handle, u8 access_status)
6515 {
6516 	u8 rc = 1;
6517 	char *desc = NULL;
6518 
6519 	switch (access_status) {
6520 	case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
6521 	case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
6522 		rc = 0;
6523 		break;
6524 	case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
6525 		desc = "PCIe device capability failed";
6526 		break;
6527 	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6528 		desc = "PCIe device blocked";
6529 		break;
6530 	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6531 		desc = "PCIe device mem space access failed";
6532 		break;
6533 	case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
6534 		desc = "PCIe device unsupported";
6535 		break;
6536 	case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
6537 		desc = "PCIe device MSIx Required";
6538 		break;
6539 	case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
6540 		desc = "PCIe device init fail max";
6541 		break;
6542 	case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
6543 		desc = "PCIe device status unknown";
6544 		break;
6545 	case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
6546 		desc = "nvme ready timeout";
6547 		break;
6548 	case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
6549 		desc = "nvme device configuration unsupported";
6550 		break;
6551 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
6552 		desc = "nvme identify failed";
6553 		break;
6554 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
6555 		desc = "nvme qconfig failed";
6556 		break;
6557 	case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
6558 		desc = "nvme qcreation failed";
6559 		break;
6560 	case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
6561 		desc = "nvme eventcfg failed";
6562 		break;
6563 	case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
6564 		desc = "nvme get feature stat failed";
6565 		break;
6566 	case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
6567 		desc = "nvme idle timeout";
6568 		break;
6569 	case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
6570 		desc = "nvme failure status";
6571 		break;
6572 	default:
6573 		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
6574 			access_status, (u64)wwid, handle);
6575 		return rc;
6576 	}
6577 
6578 	if (!rc)
6579 		return rc;
6580 
6581 	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6582 		 desc, (u64)wwid, handle);
6583 	return rc;
6584 }
6585 
6586 /**
6587  * _scsih_pcie_device_remove_from_sml -  removing pcie device
6588  * from SML and free up associated memory
6589  * @ioc: per adapter object
6590  * @pcie_device: the pcie_device object
6591  */
6592 static void
6593 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6594 	struct _pcie_device *pcie_device)
6595 {
6596 	struct MPT3SAS_TARGET *sas_target_priv_data;
6597 
6598 	dewtprintk(ioc,
6599 		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
6600 			    __func__,
6601 			    pcie_device->handle, (u64)pcie_device->wwid));
6602 	if (pcie_device->enclosure_handle != 0)
6603 		dewtprintk(ioc,
6604 			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6605 				    __func__,
6606 				    (u64)pcie_device->enclosure_logical_id,
6607 				    pcie_device->slot));
6608 	if (pcie_device->connector_name[0] != '\0')
6609 		dewtprintk(ioc,
6610 			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
6611 				    __func__,
6612 				    pcie_device->enclosure_level,
6613 				    pcie_device->connector_name));
6614 
6615 	if (pcie_device->starget && pcie_device->starget->hostdata) {
6616 		sas_target_priv_data = pcie_device->starget->hostdata;
6617 		sas_target_priv_data->deleted = 1;
6618 		_scsih_ublock_io_device(ioc, pcie_device->wwid);
6619 		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6620 	}
6621 
6622 	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
6623 		 pcie_device->handle, (u64)pcie_device->wwid);
6624 	if (pcie_device->enclosure_handle != 0)
6625 		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6626 			 (u64)pcie_device->enclosure_logical_id,
6627 			 pcie_device->slot);
6628 	if (pcie_device->connector_name[0] != '\0')
6629 		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
6630 			 pcie_device->enclosure_level,
6631 			 pcie_device->connector_name);
6632 
6633 	if (pcie_device->starget)
6634 		scsi_remove_target(&pcie_device->starget->dev);
6635 	dewtprintk(ioc,
6636 		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
6637 			    __func__,
6638 			    pcie_device->handle, (u64)pcie_device->wwid));
6639 	if (pcie_device->enclosure_handle != 0)
6640 		dewtprintk(ioc,
6641 			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
6642 				    __func__,
6643 				    (u64)pcie_device->enclosure_logical_id,
6644 				    pcie_device->slot));
6645 	if (pcie_device->connector_name[0] != '\0')
6646 		dewtprintk(ioc,
6647 			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
6648 				    __func__,
6649 				    pcie_device->enclosure_level,
6650 				    pcie_device->connector_name));
6651 
6652 	kfree(pcie_device->serial_number);
6653 }
6654 
6655 
6656 /**
6657  * _scsih_pcie_check_device - checking device responsiveness
6658  * @ioc: per adapter object
6659  * @handle: attached device handle
6660  */
6661 static void
6662 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6663 {
6664 	Mpi2ConfigReply_t mpi_reply;
6665 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
6666 	u32 ioc_status;
6667 	struct _pcie_device *pcie_device;
6668 	u64 wwid;
6669 	unsigned long flags;
6670 	struct scsi_target *starget;
6671 	struct MPT3SAS_TARGET *sas_target_priv_data;
6672 	u32 device_info;
6673 
6674 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6675 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
6676 		return;
6677 
6678 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6679 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6680 		return;
6681 
6682 	/* check if this is end device */
6683 	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6684 	if (!(_scsih_is_nvme_device(device_info)))
6685 		return;
6686 
6687 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
6688 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
6689 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
6690 
6691 	if (!pcie_device) {
6692 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6693 		return;
6694 	}
6695 
6696 	if (unlikely(pcie_device->handle != handle)) {
6697 		starget = pcie_device->starget;
6698 		sas_target_priv_data = starget->hostdata;
6699 		starget_printk(KERN_INFO, starget,
6700 		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
6701 		    pcie_device->handle, handle);
6702 		sas_target_priv_data->handle = handle;
6703 		pcie_device->handle = handle;
6704 
6705 		if (le32_to_cpu(pcie_device_pg0.Flags) &
6706 		    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6707 			pcie_device->enclosure_level =
6708 			    pcie_device_pg0.EnclosureLevel;
6709 			memcpy(&pcie_device->connector_name[0],
6710 			    &pcie_device_pg0.ConnectorName[0], 4);
6711 		} else {
6712 			pcie_device->enclosure_level = 0;
6713 			pcie_device->connector_name[0] = '\0';
6714 		}
6715 	}
6716 
6717 	/* check if device is present */
6718 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6719 	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6720 		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
6721 			 handle);
6722 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6723 		pcie_device_put(pcie_device);
6724 		return;
6725 	}
6726 
6727 	/* check if there were any issues with discovery */
6728 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6729 	    pcie_device_pg0.AccessStatus)) {
6730 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6731 		pcie_device_put(pcie_device);
6732 		return;
6733 	}
6734 
6735 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6736 	pcie_device_put(pcie_device);
6737 
6738 	_scsih_ublock_io_device(ioc, wwid);
6739 
6740 	return;
6741 }
6742 
6743 /**
6744  * _scsih_pcie_add_device -  creating pcie device object
6745  * @ioc: per adapter object
6746  * @handle: pcie device handle
6747  *
6748  * Creating end device object, stored in ioc->pcie_device_list.
6749  *
6750  * Return: 1 means queue the event later, 0 means complete the event
6751  */
6752 static int
6753 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6754 {
6755 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
6756 	Mpi26PCIeDevicePage2_t pcie_device_pg2;
6757 	Mpi2ConfigReply_t mpi_reply;
6758 	struct _pcie_device *pcie_device;
6759 	struct _enclosure_node *enclosure_dev;
6760 	u32 ioc_status;
6761 	u64 wwid;
6762 
6763 	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6764 	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
6765 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6766 			__FILE__, __LINE__, __func__);
6767 		return 0;
6768 	}
6769 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6770 	    MPI2_IOCSTATUS_MASK;
6771 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6772 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6773 			__FILE__, __LINE__, __func__);
6774 		return 0;
6775 	}
6776 
6777 	set_bit(handle, ioc->pend_os_device_add);
6778 	wwid = le64_to_cpu(pcie_device_pg0.WWID);
6779 
6780 	/* check if device is present */
6781 	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6782 		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6783 		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6784 			handle);
6785 		return 0;
6786 	}
6787 
6788 	/* check if there were any issues with discovery */
6789 	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6790 	    pcie_device_pg0.AccessStatus))
6791 		return 0;
6792 
6793 	if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo))))
6794 		return 0;
6795 
6796 	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
6797 	if (pcie_device) {
6798 		clear_bit(handle, ioc->pend_os_device_add);
6799 		pcie_device_put(pcie_device);
6800 		return 0;
6801 	}
6802 
6803 	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
6804 	if (!pcie_device) {
6805 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6806 			__FILE__, __LINE__, __func__);
6807 		return 0;
6808 	}
6809 
6810 	kref_init(&pcie_device->refcount);
6811 	pcie_device->id = ioc->pcie_target_id++;
6812 	pcie_device->channel = PCIE_CHANNEL;
6813 	pcie_device->handle = handle;
6814 	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6815 	pcie_device->wwid = wwid;
6816 	pcie_device->port_num = pcie_device_pg0.PortNum;
6817 	pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
6818 	    MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6819 
6820 	pcie_device->enclosure_handle =
6821 	    le16_to_cpu(pcie_device_pg0.EnclosureHandle);
6822 	if (pcie_device->enclosure_handle != 0)
6823 		pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
6824 
6825 	if (le32_to_cpu(pcie_device_pg0.Flags) &
6826 	    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6827 		pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
6828 		memcpy(&pcie_device->connector_name[0],
6829 		    &pcie_device_pg0.ConnectorName[0], 4);
6830 	} else {
6831 		pcie_device->enclosure_level = 0;
6832 		pcie_device->connector_name[0] = '\0';
6833 	}
6834 
6835 	/* get enclosure_logical_id */
6836 	if (pcie_device->enclosure_handle) {
6837 		enclosure_dev =
6838 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
6839 						pcie_device->enclosure_handle);
6840 		if (enclosure_dev)
6841 			pcie_device->enclosure_logical_id =
6842 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6843 	}
6844 	/* TODO -- Add device name once FW supports it */
6845 	if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
6846 		&pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
6847 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6848 			__FILE__, __LINE__, __func__);
6849 		kfree(pcie_device);
6850 		return 0;
6851 	}
6852 
6853 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6854 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6855 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6856 			__FILE__, __LINE__, __func__);
6857 		kfree(pcie_device);
6858 		return 0;
6859 	}
6860 	pcie_device->nvme_mdts =
6861 		le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
6862 	if (pcie_device_pg2.ControllerResetTO)
6863 		pcie_device->reset_timeout =
6864 			pcie_device_pg2.ControllerResetTO;
6865 	else
6866 		pcie_device->reset_timeout = 30;
6867 
6868 	if (ioc->wait_for_discovery_to_complete)
6869 		_scsih_pcie_device_init_add(ioc, pcie_device);
6870 	else
6871 		_scsih_pcie_device_add(ioc, pcie_device);
6872 
6873 	pcie_device_put(pcie_device);
6874 	return 0;
6875 }
6876 
6877 /**
6878  * _scsih_pcie_topology_change_event_debug - debug for topology
6879  * event
6880  * @ioc: per adapter object
6881  * @event_data: event data payload
6882  * Context: user.
6883  */
6884 static void
6885 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6886 	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
6887 {
6888 	int i;
6889 	u16 handle;
6890 	u16 reason_code;
6891 	u8 port_number;
6892 	char *status_str = NULL;
6893 	u8 link_rate, prev_link_rate;
6894 
6895 	switch (event_data->SwitchStatus) {
6896 	case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
6897 		status_str = "add";
6898 		break;
6899 	case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
6900 		status_str = "remove";
6901 		break;
6902 	case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
6903 	case 0:
6904 		status_str =  "responding";
6905 		break;
6906 	case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
6907 		status_str = "remove delay";
6908 		break;
6909 	default:
6910 		status_str = "unknown status";
6911 		break;
6912 	}
6913 	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
6914 	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
6915 		"start_port(%02d), count(%d)\n",
6916 		le16_to_cpu(event_data->SwitchDevHandle),
6917 		le16_to_cpu(event_data->EnclosureHandle),
6918 		event_data->StartPortNum, event_data->NumEntries);
6919 	for (i = 0; i < event_data->NumEntries; i++) {
6920 		handle =
6921 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
6922 		if (!handle)
6923 			continue;
6924 		port_number = event_data->StartPortNum + i;
6925 		reason_code = event_data->PortEntry[i].PortStatus;
6926 		switch (reason_code) {
6927 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
6928 			status_str = "target add";
6929 			break;
6930 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
6931 			status_str = "target remove";
6932 			break;
6933 		case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
6934 			status_str = "delay target remove";
6935 			break;
6936 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
6937 			status_str = "link rate change";
6938 			break;
6939 		case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
6940 			status_str = "target responding";
6941 			break;
6942 		default:
6943 			status_str = "unknown";
6944 			break;
6945 		}
6946 		link_rate = event_data->PortEntry[i].CurrentPortInfo &
6947 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
6948 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
6949 			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
6950 		pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
6951 			" link rate: new(0x%02x), old(0x%02x)\n", port_number,
6952 			handle, status_str, link_rate, prev_link_rate);
6953 	}
6954 }
6955 
6956 /**
6957  * _scsih_pcie_topology_change_event - handle PCIe topology
6958  *  changes
6959  * @ioc: per adapter object
6960  * @fw_event: The fw_event_work object
6961  * Context: user.
6962  *
6963  */
6964 static void
6965 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6966 	struct fw_event_work *fw_event)
6967 {
6968 	int i;
6969 	u16 handle;
6970 	u16 reason_code;
6971 	u8 link_rate, prev_link_rate;
6972 	unsigned long flags;
6973 	int rc;
6974 	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
6975 		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
6976 	struct _pcie_device *pcie_device;
6977 
6978 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6979 		_scsih_pcie_topology_change_event_debug(ioc, event_data);
6980 
6981 	if (ioc->shost_recovery || ioc->remove_host ||
6982 		ioc->pci_error_recovery)
6983 		return;
6984 
6985 	if (fw_event->ignore) {
6986 		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
6987 		return;
6988 	}
6989 
6990 	/* handle siblings events */
6991 	for (i = 0; i < event_data->NumEntries; i++) {
6992 		if (fw_event->ignore) {
6993 			dewtprintk(ioc,
6994 				   ioc_info(ioc, "ignoring switch event\n"));
6995 			return;
6996 		}
6997 		if (ioc->remove_host || ioc->pci_error_recovery)
6998 			return;
6999 		reason_code = event_data->PortEntry[i].PortStatus;
7000 		handle =
7001 			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7002 		if (!handle)
7003 			continue;
7004 
7005 		link_rate = event_data->PortEntry[i].CurrentPortInfo
7006 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7007 		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
7008 			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7009 
7010 		switch (reason_code) {
7011 		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7012 			if (ioc->shost_recovery)
7013 				break;
7014 			if (link_rate == prev_link_rate)
7015 				break;
7016 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7017 				break;
7018 
7019 			_scsih_pcie_check_device(ioc, handle);
7020 
7021 			/* This code after this point handles the test case
7022 			 * where a device has been added, however its returning
7023 			 * BUSY for sometime.  Then before the Device Missing
7024 			 * Delay expires and the device becomes READY, the
7025 			 * device is removed and added back.
7026 			 */
7027 			spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7028 			pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
7029 			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7030 
7031 			if (pcie_device) {
7032 				pcie_device_put(pcie_device);
7033 				break;
7034 			}
7035 
7036 			if (!test_bit(handle, ioc->pend_os_device_add))
7037 				break;
7038 
7039 			dewtprintk(ioc,
7040 				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
7041 					    handle));
7042 			event_data->PortEntry[i].PortStatus &= 0xF0;
7043 			event_data->PortEntry[i].PortStatus |=
7044 				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
7045 			/* fall through */
7046 		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7047 			if (ioc->shost_recovery)
7048 				break;
7049 			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7050 				break;
7051 
7052 			rc = _scsih_pcie_add_device(ioc, handle);
7053 			if (!rc) {
7054 				/* mark entry vacant */
7055 				/* TODO This needs to be reviewed and fixed,
7056 				 * we dont have an entry
7057 				 * to make an event void like vacant
7058 				 */
7059 				event_data->PortEntry[i].PortStatus |=
7060 					MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
7061 			}
7062 			break;
7063 		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7064 			_scsih_pcie_device_remove_by_handle(ioc, handle);
7065 			break;
7066 		}
7067 	}
7068 }
7069 
7070 /**
7071  * _scsih_pcie_device_status_change_event_debug - debug for device event
7072  * @ioc: ?
7073  * @event_data: event data payload
7074  * Context: user.
7075  */
7076 static void
7077 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7078 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
7079 {
7080 	char *reason_str = NULL;
7081 
7082 	switch (event_data->ReasonCode) {
7083 	case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
7084 		reason_str = "smart data";
7085 		break;
7086 	case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
7087 		reason_str = "unsupported device discovered";
7088 		break;
7089 	case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
7090 		reason_str = "internal device reset";
7091 		break;
7092 	case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
7093 		reason_str = "internal task abort";
7094 		break;
7095 	case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7096 		reason_str = "internal task abort set";
7097 		break;
7098 	case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7099 		reason_str = "internal clear task set";
7100 		break;
7101 	case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
7102 		reason_str = "internal query task";
7103 		break;
7104 	case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
7105 		reason_str = "device init failure";
7106 		break;
7107 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7108 		reason_str = "internal device reset complete";
7109 		break;
7110 	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7111 		reason_str = "internal task abort complete";
7112 		break;
7113 	case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7114 		reason_str = "internal async notification";
7115 		break;
7116 	case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
7117 		reason_str = "pcie hot reset failed";
7118 		break;
7119 	default:
7120 		reason_str = "unknown reason";
7121 		break;
7122 	}
7123 
7124 	ioc_info(ioc, "PCIE device status change: (%s)\n"
7125 		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7126 		 reason_str, le16_to_cpu(event_data->DevHandle),
7127 		 (u64)le64_to_cpu(event_data->WWID),
7128 		 le16_to_cpu(event_data->TaskTag));
7129 	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7130 		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7131 			event_data->ASC, event_data->ASCQ);
7132 	pr_cont("\n");
7133 }
7134 
7135 /**
7136  * _scsih_pcie_device_status_change_event - handle device status
7137  * change
7138  * @ioc: per adapter object
7139  * @fw_event: The fw_event_work object
7140  * Context: user.
7141  */
7142 static void
7143 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7144 	struct fw_event_work *fw_event)
7145 {
7146 	struct MPT3SAS_TARGET *target_priv_data;
7147 	struct _pcie_device *pcie_device;
7148 	u64 wwid;
7149 	unsigned long flags;
7150 	Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
7151 		(Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
7152 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7153 		_scsih_pcie_device_status_change_event_debug(ioc,
7154 			event_data);
7155 
7156 	if (event_data->ReasonCode !=
7157 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7158 		event_data->ReasonCode !=
7159 		MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7160 		return;
7161 
7162 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7163 	wwid = le64_to_cpu(event_data->WWID);
7164 	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7165 
7166 	if (!pcie_device || !pcie_device->starget)
7167 		goto out;
7168 
7169 	target_priv_data = pcie_device->starget->hostdata;
7170 	if (!target_priv_data)
7171 		goto out;
7172 
7173 	if (event_data->ReasonCode ==
7174 		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
7175 		target_priv_data->tm_busy = 1;
7176 	else
7177 		target_priv_data->tm_busy = 0;
7178 out:
7179 	if (pcie_device)
7180 		pcie_device_put(pcie_device);
7181 
7182 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7183 }
7184 
7185 /**
7186  * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
7187  * event
7188  * @ioc: per adapter object
7189  * @event_data: event data payload
7190  * Context: user.
7191  */
7192 static void
7193 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7194 	Mpi2EventDataSasEnclDevStatusChange_t *event_data)
7195 {
7196 	char *reason_str = NULL;
7197 
7198 	switch (event_data->ReasonCode) {
7199 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7200 		reason_str = "enclosure add";
7201 		break;
7202 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7203 		reason_str = "enclosure remove";
7204 		break;
7205 	default:
7206 		reason_str = "unknown reason";
7207 		break;
7208 	}
7209 
7210 	ioc_info(ioc, "enclosure status change: (%s)\n"
7211 		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
7212 		 reason_str,
7213 		 le16_to_cpu(event_data->EnclosureHandle),
7214 		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
7215 		 le16_to_cpu(event_data->StartSlot));
7216 }
7217 
7218 /**
7219  * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
7220  * @ioc: per adapter object
7221  * @fw_event: The fw_event_work object
7222  * Context: user.
7223  */
7224 static void
7225 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7226 	struct fw_event_work *fw_event)
7227 {
7228 	Mpi2ConfigReply_t mpi_reply;
7229 	struct _enclosure_node *enclosure_dev = NULL;
7230 	Mpi2EventDataSasEnclDevStatusChange_t *event_data =
7231 		(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
7232 	int rc;
7233 	u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
7234 
7235 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7236 		_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
7237 		     (Mpi2EventDataSasEnclDevStatusChange_t *)
7238 		     fw_event->event_data);
7239 	if (ioc->shost_recovery)
7240 		return;
7241 
7242 	if (enclosure_handle)
7243 		enclosure_dev =
7244 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7245 						enclosure_handle);
7246 	switch (event_data->ReasonCode) {
7247 	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7248 		if (!enclosure_dev) {
7249 			enclosure_dev =
7250 				kzalloc(sizeof(struct _enclosure_node),
7251 					GFP_KERNEL);
7252 			if (!enclosure_dev) {
7253 				ioc_info(ioc, "failure at %s:%d/%s()!\n",
7254 					 __FILE__, __LINE__, __func__);
7255 				return;
7256 			}
7257 			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7258 				&enclosure_dev->pg0,
7259 				MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7260 				enclosure_handle);
7261 
7262 			if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
7263 						MPI2_IOCSTATUS_MASK)) {
7264 				kfree(enclosure_dev);
7265 				return;
7266 			}
7267 
7268 			list_add_tail(&enclosure_dev->list,
7269 							&ioc->enclosure_list);
7270 		}
7271 		break;
7272 	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7273 		if (enclosure_dev) {
7274 			list_del(&enclosure_dev->list);
7275 			kfree(enclosure_dev);
7276 		}
7277 		break;
7278 	default:
7279 		break;
7280 	}
7281 }
7282 
7283 /**
7284  * _scsih_sas_broadcast_primitive_event - handle broadcast events
7285  * @ioc: per adapter object
7286  * @fw_event: The fw_event_work object
7287  * Context: user.
7288  */
7289 static void
7290 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7291 	struct fw_event_work *fw_event)
7292 {
7293 	struct scsi_cmnd *scmd;
7294 	struct scsi_device *sdev;
7295 	struct scsiio_tracker *st;
7296 	u16 smid, handle;
7297 	u32 lun;
7298 	struct MPT3SAS_DEVICE *sas_device_priv_data;
7299 	u32 termination_count;
7300 	u32 query_count;
7301 	Mpi2SCSITaskManagementReply_t *mpi_reply;
7302 	Mpi2EventDataSasBroadcastPrimitive_t *event_data =
7303 		(Mpi2EventDataSasBroadcastPrimitive_t *)
7304 		fw_event->event_data;
7305 	u16 ioc_status;
7306 	unsigned long flags;
7307 	int r;
7308 	u8 max_retries = 0;
7309 	u8 task_abort_retries;
7310 
7311 	mutex_lock(&ioc->tm_cmds.mutex);
7312 	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
7313 		 __func__, event_data->PhyNum, event_data->PortWidth);
7314 
7315 	_scsih_block_io_all_device(ioc);
7316 
7317 	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7318 	mpi_reply = ioc->tm_cmds.reply;
7319  broadcast_aen_retry:
7320 
7321 	/* sanity checks for retrying this loop */
7322 	if (max_retries++ == 5) {
7323 		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
7324 		goto out;
7325 	} else if (max_retries > 1)
7326 		dewtprintk(ioc,
7327 			   ioc_info(ioc, "%s: %d retry\n",
7328 				    __func__, max_retries - 1));
7329 
7330 	termination_count = 0;
7331 	query_count = 0;
7332 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
7333 		if (ioc->shost_recovery)
7334 			goto out;
7335 		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
7336 		if (!scmd)
7337 			continue;
7338 		st = scsi_cmd_priv(scmd);
7339 		sdev = scmd->device;
7340 		sas_device_priv_data = sdev->hostdata;
7341 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
7342 			continue;
7343 		 /* skip hidden raid components */
7344 		if (sas_device_priv_data->sas_target->flags &
7345 		    MPT_TARGET_FLAGS_RAID_COMPONENT)
7346 			continue;
7347 		 /* skip volumes */
7348 		if (sas_device_priv_data->sas_target->flags &
7349 		    MPT_TARGET_FLAGS_VOLUME)
7350 			continue;
7351 		 /* skip PCIe devices */
7352 		if (sas_device_priv_data->sas_target->flags &
7353 		    MPT_TARGET_FLAGS_PCIE_DEVICE)
7354 			continue;
7355 
7356 		handle = sas_device_priv_data->sas_target->handle;
7357 		lun = sas_device_priv_data->lun;
7358 		query_count++;
7359 
7360 		if (ioc->shost_recovery)
7361 			goto out;
7362 
7363 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7364 		r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
7365 			MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7366 			st->msix_io, 30, 0);
7367 		if (r == FAILED) {
7368 			sdev_printk(KERN_WARNING, sdev,
7369 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
7370 			    "QUERY_TASK: scmd(%p)\n", scmd);
7371 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7372 			goto broadcast_aen_retry;
7373 		}
7374 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
7375 		    & MPI2_IOCSTATUS_MASK;
7376 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7377 			sdev_printk(KERN_WARNING, sdev,
7378 				"query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
7379 				ioc_status, scmd);
7380 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7381 			goto broadcast_aen_retry;
7382 		}
7383 
7384 		/* see if IO is still owned by IOC and target */
7385 		if (mpi_reply->ResponseCode ==
7386 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
7387 		     mpi_reply->ResponseCode ==
7388 		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
7389 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7390 			continue;
7391 		}
7392 		task_abort_retries = 0;
7393  tm_retry:
7394 		if (task_abort_retries++ == 60) {
7395 			dewtprintk(ioc,
7396 				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
7397 					    __func__));
7398 			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7399 			goto broadcast_aen_retry;
7400 		}
7401 
7402 		if (ioc->shost_recovery)
7403 			goto out_no_lock;
7404 
7405 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
7406 			MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
7407 			st->msix_io, 30, 0);
7408 		if (r == FAILED || st->cb_idx != 0xFF) {
7409 			sdev_printk(KERN_WARNING, sdev,
7410 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
7411 			    "scmd(%p)\n", scmd);
7412 			goto tm_retry;
7413 		}
7414 
7415 		if (task_abort_retries > 1)
7416 			sdev_printk(KERN_WARNING, sdev,
7417 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
7418 			    " scmd(%p)\n",
7419 			    task_abort_retries - 1, scmd);
7420 
7421 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
7422 		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7423 	}
7424 
7425 	if (ioc->broadcast_aen_pending) {
7426 		dewtprintk(ioc,
7427 			   ioc_info(ioc,
7428 				    "%s: loop back due to pending AEN\n",
7429 				    __func__));
7430 		 ioc->broadcast_aen_pending = 0;
7431 		 goto broadcast_aen_retry;
7432 	}
7433 
7434  out:
7435 	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7436  out_no_lock:
7437 
7438 	dewtprintk(ioc,
7439 		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
7440 			    __func__, query_count, termination_count));
7441 
7442 	ioc->broadcast_aen_busy = 0;
7443 	if (!ioc->shost_recovery)
7444 		_scsih_ublock_io_all_device(ioc);
7445 	mutex_unlock(&ioc->tm_cmds.mutex);
7446 }
7447 
7448 /**
7449  * _scsih_sas_discovery_event - handle discovery events
7450  * @ioc: per adapter object
7451  * @fw_event: The fw_event_work object
7452  * Context: user.
7453  */
7454 static void
7455 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7456 	struct fw_event_work *fw_event)
7457 {
7458 	Mpi2EventDataSasDiscovery_t *event_data =
7459 		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
7460 
7461 	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7462 		ioc_info(ioc, "discovery event: (%s)",
7463 			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
7464 			 "start" : "stop");
7465 		if (event_data->DiscoveryStatus)
7466 			pr_cont("discovery_status(0x%08x)",
7467 				le32_to_cpu(event_data->DiscoveryStatus));
7468 		pr_cont("\n");
7469 	}
7470 
7471 	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
7472 	    !ioc->sas_hba.num_phys) {
7473 		if (disable_discovery > 0 && ioc->shost_recovery) {
7474 			/* Wait for the reset to complete */
7475 			while (ioc->shost_recovery)
7476 				ssleep(1);
7477 		}
7478 		_scsih_sas_host_add(ioc);
7479 	}
7480 }
7481 
7482 /**
7483  * _scsih_sas_device_discovery_error_event - display SAS device discovery error
7484  *						events
7485  * @ioc: per adapter object
7486  * @fw_event: The fw_event_work object
7487  * Context: user.
7488  */
7489 static void
7490 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7491 	struct fw_event_work *fw_event)
7492 {
7493 	Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
7494 		(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
7495 
7496 	switch (event_data->ReasonCode) {
7497 	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7498 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
7499 			 le16_to_cpu(event_data->DevHandle),
7500 			 (u64)le64_to_cpu(event_data->SASAddress),
7501 			 event_data->PhysicalPort);
7502 		break;
7503 	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7504 		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
7505 			 le16_to_cpu(event_data->DevHandle),
7506 			 (u64)le64_to_cpu(event_data->SASAddress),
7507 			 event_data->PhysicalPort);
7508 		break;
7509 	default:
7510 		break;
7511 	}
7512 }
7513 
7514 /**
7515  * _scsih_pcie_enumeration_event - handle enumeration events
7516  * @ioc: per adapter object
7517  * @fw_event: The fw_event_work object
7518  * Context: user.
7519  */
7520 static void
7521 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7522 	struct fw_event_work *fw_event)
7523 {
7524 	Mpi26EventDataPCIeEnumeration_t *event_data =
7525 		(Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
7526 
7527 	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7528 		return;
7529 
7530 	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
7531 		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7532 		 "started" : "completed",
7533 		 event_data->Flags);
7534 	if (event_data->EnumerationStatus)
7535 		pr_cont("enumeration_status(0x%08x)",
7536 			le32_to_cpu(event_data->EnumerationStatus));
7537 	pr_cont("\n");
7538 }
7539 
7540 /**
7541  * _scsih_ir_fastpath - turn on fastpath for IR physdisk
7542  * @ioc: per adapter object
7543  * @handle: device handle for physical disk
7544  * @phys_disk_num: physical disk number
7545  *
7546  * Return: 0 for success, else failure.
7547  */
7548 static int
7549 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7550 {
7551 	Mpi2RaidActionRequest_t *mpi_request;
7552 	Mpi2RaidActionReply_t *mpi_reply;
7553 	u16 smid;
7554 	u8 issue_reset = 0;
7555 	int rc = 0;
7556 	u16 ioc_status;
7557 	u32 log_info;
7558 
7559 	if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
7560 		return rc;
7561 
7562 	mutex_lock(&ioc->scsih_cmds.mutex);
7563 
7564 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7565 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
7566 		rc = -EAGAIN;
7567 		goto out;
7568 	}
7569 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
7570 
7571 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
7572 	if (!smid) {
7573 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7574 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7575 		rc = -EAGAIN;
7576 		goto out;
7577 	}
7578 
7579 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7580 	ioc->scsih_cmds.smid = smid;
7581 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
7582 
7583 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
7584 	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
7585 	mpi_request->PhysDiskNum = phys_disk_num;
7586 
7587 	dewtprintk(ioc,
7588 		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
7589 			    handle, phys_disk_num));
7590 
7591 	init_completion(&ioc->scsih_cmds.done);
7592 	mpt3sas_base_put_smid_default(ioc, smid);
7593 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
7594 
7595 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
7596 		issue_reset =
7597 			mpt3sas_base_check_cmd_timeout(ioc,
7598 				ioc->scsih_cmds.status, mpi_request,
7599 				sizeof(Mpi2RaidActionRequest_t)/4);
7600 		rc = -EFAULT;
7601 		goto out;
7602 	}
7603 
7604 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
7605 
7606 		mpi_reply = ioc->scsih_cmds.reply;
7607 		ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
7608 		if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
7609 			log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
7610 		else
7611 			log_info = 0;
7612 		ioc_status &= MPI2_IOCSTATUS_MASK;
7613 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7614 			dewtprintk(ioc,
7615 				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
7616 					    ioc_status, log_info));
7617 			rc = -EFAULT;
7618 		} else
7619 			dewtprintk(ioc,
7620 				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
7621 	}
7622 
7623  out:
7624 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7625 	mutex_unlock(&ioc->scsih_cmds.mutex);
7626 
7627 	if (issue_reset)
7628 		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7629 	return rc;
7630 }
7631 
7632 /**
7633  * _scsih_reprobe_lun - reprobing lun
7634  * @sdev: scsi device struct
7635  * @no_uld_attach: sdev->no_uld_attach flag setting
7636  *
7637  **/
7638 static void
7639 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
7640 {
7641 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
7642 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
7643 	    sdev->no_uld_attach ? "hiding" : "exposing");
7644 	WARN_ON(scsi_device_reprobe(sdev));
7645 }
7646 
7647 /**
7648  * _scsih_sas_volume_add - add new volume
7649  * @ioc: per adapter object
7650  * @element: IR config element data
7651  * Context: user.
7652  */
7653 static void
7654 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
7655 	Mpi2EventIrConfigElement_t *element)
7656 {
7657 	struct _raid_device *raid_device;
7658 	unsigned long flags;
7659 	u64 wwid;
7660 	u16 handle = le16_to_cpu(element->VolDevHandle);
7661 	int rc;
7662 
7663 	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
7664 	if (!wwid) {
7665 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7666 			__FILE__, __LINE__, __func__);
7667 		return;
7668 	}
7669 
7670 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
7671 	raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
7672 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7673 
7674 	if (raid_device)
7675 		return;
7676 
7677 	raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
7678 	if (!raid_device) {
7679 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7680 			__FILE__, __LINE__, __func__);
7681 		return;
7682 	}
7683 
7684 	raid_device->id = ioc->sas_id++;
7685 	raid_device->channel = RAID_CHANNEL;
7686 	raid_device->handle = handle;
7687 	raid_device->wwid = wwid;
7688 	_scsih_raid_device_add(ioc, raid_device);
7689 	if (!ioc->wait_for_discovery_to_complete) {
7690 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
7691 		    raid_device->id, 0);
7692 		if (rc)
7693 			_scsih_raid_device_remove(ioc, raid_device);
7694 	} else {
7695 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
7696 		_scsih_determine_boot_device(ioc, raid_device, 1);
7697 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7698 	}
7699 }
7700 
7701 /**
7702  * _scsih_sas_volume_delete - delete volume
7703  * @ioc: per adapter object
7704  * @handle: volume device handle
7705  * Context: user.
7706  */
7707 static void
7708 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7709 {
7710 	struct _raid_device *raid_device;
7711 	unsigned long flags;
7712 	struct MPT3SAS_TARGET *sas_target_priv_data;
7713 	struct scsi_target *starget = NULL;
7714 
7715 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
7716 	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
7717 	if (raid_device) {
7718 		if (raid_device->starget) {
7719 			starget = raid_device->starget;
7720 			sas_target_priv_data = starget->hostdata;
7721 			sas_target_priv_data->deleted = 1;
7722 		}
7723 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7724 			 raid_device->handle, (u64)raid_device->wwid);
7725 		list_del(&raid_device->list);
7726 		kfree(raid_device);
7727 	}
7728 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7729 	if (starget)
7730 		scsi_remove_target(&starget->dev);
7731 }
7732 
7733 /**
7734  * _scsih_sas_pd_expose - expose pd component to /dev/sdX
7735  * @ioc: per adapter object
7736  * @element: IR config element data
7737  * Context: user.
7738  */
7739 static void
7740 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
7741 	Mpi2EventIrConfigElement_t *element)
7742 {
7743 	struct _sas_device *sas_device;
7744 	struct scsi_target *starget = NULL;
7745 	struct MPT3SAS_TARGET *sas_target_priv_data;
7746 	unsigned long flags;
7747 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7748 
7749 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7750 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7751 	if (sas_device) {
7752 		sas_device->volume_handle = 0;
7753 		sas_device->volume_wwid = 0;
7754 		clear_bit(handle, ioc->pd_handles);
7755 		if (sas_device->starget && sas_device->starget->hostdata) {
7756 			starget = sas_device->starget;
7757 			sas_target_priv_data = starget->hostdata;
7758 			sas_target_priv_data->flags &=
7759 			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
7760 		}
7761 	}
7762 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7763 	if (!sas_device)
7764 		return;
7765 
7766 	/* exposing raid component */
7767 	if (starget)
7768 		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
7769 
7770 	sas_device_put(sas_device);
7771 }
7772 
7773 /**
7774  * _scsih_sas_pd_hide - hide pd component from /dev/sdX
7775  * @ioc: per adapter object
7776  * @element: IR config element data
7777  * Context: user.
7778  */
7779 static void
7780 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
7781 	Mpi2EventIrConfigElement_t *element)
7782 {
7783 	struct _sas_device *sas_device;
7784 	struct scsi_target *starget = NULL;
7785 	struct MPT3SAS_TARGET *sas_target_priv_data;
7786 	unsigned long flags;
7787 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7788 	u16 volume_handle = 0;
7789 	u64 volume_wwid = 0;
7790 
7791 	mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
7792 	if (volume_handle)
7793 		mpt3sas_config_get_volume_wwid(ioc, volume_handle,
7794 		    &volume_wwid);
7795 
7796 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7797 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7798 	if (sas_device) {
7799 		set_bit(handle, ioc->pd_handles);
7800 		if (sas_device->starget && sas_device->starget->hostdata) {
7801 			starget = sas_device->starget;
7802 			sas_target_priv_data = starget->hostdata;
7803 			sas_target_priv_data->flags |=
7804 			    MPT_TARGET_FLAGS_RAID_COMPONENT;
7805 			sas_device->volume_handle = volume_handle;
7806 			sas_device->volume_wwid = volume_wwid;
7807 		}
7808 	}
7809 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7810 	if (!sas_device)
7811 		return;
7812 
7813 	/* hiding raid component */
7814 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7815 
7816 	if (starget)
7817 		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
7818 
7819 	sas_device_put(sas_device);
7820 }
7821 
7822 /**
7823  * _scsih_sas_pd_delete - delete pd component
7824  * @ioc: per adapter object
7825  * @element: IR config element data
7826  * Context: user.
7827  */
7828 static void
7829 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
7830 	Mpi2EventIrConfigElement_t *element)
7831 {
7832 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7833 
7834 	_scsih_device_remove_by_handle(ioc, handle);
7835 }
7836 
7837 /**
7838  * _scsih_sas_pd_add - remove pd component
7839  * @ioc: per adapter object
7840  * @element: IR config element data
7841  * Context: user.
7842  */
7843 static void
7844 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
7845 	Mpi2EventIrConfigElement_t *element)
7846 {
7847 	struct _sas_device *sas_device;
7848 	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7849 	Mpi2ConfigReply_t mpi_reply;
7850 	Mpi2SasDevicePage0_t sas_device_pg0;
7851 	u32 ioc_status;
7852 	u64 sas_address;
7853 	u16 parent_handle;
7854 
7855 	set_bit(handle, ioc->pd_handles);
7856 
7857 	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
7858 	if (sas_device) {
7859 		_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7860 		sas_device_put(sas_device);
7861 		return;
7862 	}
7863 
7864 	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7865 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7866 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7867 			__FILE__, __LINE__, __func__);
7868 		return;
7869 	}
7870 
7871 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7872 	    MPI2_IOCSTATUS_MASK;
7873 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7874 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7875 			__FILE__, __LINE__, __func__);
7876 		return;
7877 	}
7878 
7879 	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
7880 	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
7881 		mpt3sas_transport_update_links(ioc, sas_address, handle,
7882 		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
7883 
7884 	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7885 	_scsih_add_device(ioc, handle, 0, 1);
7886 }
7887 
7888 /**
7889  * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
7890  * @ioc: per adapter object
7891  * @event_data: event data payload
7892  * Context: user.
7893  */
7894 static void
7895 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7896 	Mpi2EventDataIrConfigChangeList_t *event_data)
7897 {
7898 	Mpi2EventIrConfigElement_t *element;
7899 	u8 element_type;
7900 	int i;
7901 	char *reason_str = NULL, *element_str = NULL;
7902 
7903 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
7904 
7905 	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
7906 		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
7907 		 "foreign" : "native",
7908 		 event_data->NumElements);
7909 	for (i = 0; i < event_data->NumElements; i++, element++) {
7910 		switch (element->ReasonCode) {
7911 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7912 			reason_str = "add";
7913 			break;
7914 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7915 			reason_str = "remove";
7916 			break;
7917 		case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
7918 			reason_str = "no change";
7919 			break;
7920 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7921 			reason_str = "hide";
7922 			break;
7923 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7924 			reason_str = "unhide";
7925 			break;
7926 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7927 			reason_str = "volume_created";
7928 			break;
7929 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7930 			reason_str = "volume_deleted";
7931 			break;
7932 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7933 			reason_str = "pd_created";
7934 			break;
7935 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7936 			reason_str = "pd_deleted";
7937 			break;
7938 		default:
7939 			reason_str = "unknown reason";
7940 			break;
7941 		}
7942 		element_type = le16_to_cpu(element->ElementFlags) &
7943 		    MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
7944 		switch (element_type) {
7945 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
7946 			element_str = "volume";
7947 			break;
7948 		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
7949 			element_str = "phys disk";
7950 			break;
7951 		case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
7952 			element_str = "hot spare";
7953 			break;
7954 		default:
7955 			element_str = "unknown element";
7956 			break;
7957 		}
7958 		pr_info("\t(%s:%s), vol handle(0x%04x), " \
7959 		    "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
7960 		    reason_str, le16_to_cpu(element->VolDevHandle),
7961 		    le16_to_cpu(element->PhysDiskDevHandle),
7962 		    element->PhysDiskNum);
7963 	}
7964 }
7965 
7966 /**
7967  * _scsih_sas_ir_config_change_event - handle ir configuration change events
7968  * @ioc: per adapter object
7969  * @fw_event: The fw_event_work object
7970  * Context: user.
7971  */
7972 static void
7973 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
7974 	struct fw_event_work *fw_event)
7975 {
7976 	Mpi2EventIrConfigElement_t *element;
7977 	int i;
7978 	u8 foreign_config;
7979 	Mpi2EventDataIrConfigChangeList_t *event_data =
7980 		(Mpi2EventDataIrConfigChangeList_t *)
7981 		fw_event->event_data;
7982 
7983 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
7984 	     (!ioc->hide_ir_msg))
7985 		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
7986 
7987 	foreign_config = (le32_to_cpu(event_data->Flags) &
7988 	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
7989 
7990 	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
7991 	if (ioc->shost_recovery &&
7992 	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7993 		for (i = 0; i < event_data->NumElements; i++, element++) {
7994 			if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
7995 				_scsih_ir_fastpath(ioc,
7996 					le16_to_cpu(element->PhysDiskDevHandle),
7997 					element->PhysDiskNum);
7998 		}
7999 		return;
8000 	}
8001 
8002 	for (i = 0; i < event_data->NumElements; i++, element++) {
8003 
8004 		switch (element->ReasonCode) {
8005 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8006 		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8007 			if (!foreign_config)
8008 				_scsih_sas_volume_add(ioc, element);
8009 			break;
8010 		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8011 		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8012 			if (!foreign_config)
8013 				_scsih_sas_volume_delete(ioc,
8014 				    le16_to_cpu(element->VolDevHandle));
8015 			break;
8016 		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8017 			if (!ioc->is_warpdrive)
8018 				_scsih_sas_pd_hide(ioc, element);
8019 			break;
8020 		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8021 			if (!ioc->is_warpdrive)
8022 				_scsih_sas_pd_expose(ioc, element);
8023 			break;
8024 		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8025 			if (!ioc->is_warpdrive)
8026 				_scsih_sas_pd_add(ioc, element);
8027 			break;
8028 		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8029 			if (!ioc->is_warpdrive)
8030 				_scsih_sas_pd_delete(ioc, element);
8031 			break;
8032 		}
8033 	}
8034 }
8035 
8036 /**
8037  * _scsih_sas_ir_volume_event - IR volume event
8038  * @ioc: per adapter object
8039  * @fw_event: The fw_event_work object
8040  * Context: user.
8041  */
8042 static void
8043 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8044 	struct fw_event_work *fw_event)
8045 {
8046 	u64 wwid;
8047 	unsigned long flags;
8048 	struct _raid_device *raid_device;
8049 	u16 handle;
8050 	u32 state;
8051 	int rc;
8052 	Mpi2EventDataIrVolume_t *event_data =
8053 		(Mpi2EventDataIrVolume_t *) fw_event->event_data;
8054 
8055 	if (ioc->shost_recovery)
8056 		return;
8057 
8058 	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
8059 		return;
8060 
8061 	handle = le16_to_cpu(event_data->VolDevHandle);
8062 	state = le32_to_cpu(event_data->NewValue);
8063 	if (!ioc->hide_ir_msg)
8064 		dewtprintk(ioc,
8065 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8066 				    __func__, handle,
8067 				    le32_to_cpu(event_data->PreviousValue),
8068 				    state));
8069 	switch (state) {
8070 	case MPI2_RAID_VOL_STATE_MISSING:
8071 	case MPI2_RAID_VOL_STATE_FAILED:
8072 		_scsih_sas_volume_delete(ioc, handle);
8073 		break;
8074 
8075 	case MPI2_RAID_VOL_STATE_ONLINE:
8076 	case MPI2_RAID_VOL_STATE_DEGRADED:
8077 	case MPI2_RAID_VOL_STATE_OPTIMAL:
8078 
8079 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8080 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8081 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8082 
8083 		if (raid_device)
8084 			break;
8085 
8086 		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8087 		if (!wwid) {
8088 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8089 				__FILE__, __LINE__, __func__);
8090 			break;
8091 		}
8092 
8093 		raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8094 		if (!raid_device) {
8095 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8096 				__FILE__, __LINE__, __func__);
8097 			break;
8098 		}
8099 
8100 		raid_device->id = ioc->sas_id++;
8101 		raid_device->channel = RAID_CHANNEL;
8102 		raid_device->handle = handle;
8103 		raid_device->wwid = wwid;
8104 		_scsih_raid_device_add(ioc, raid_device);
8105 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8106 		    raid_device->id, 0);
8107 		if (rc)
8108 			_scsih_raid_device_remove(ioc, raid_device);
8109 		break;
8110 
8111 	case MPI2_RAID_VOL_STATE_INITIALIZING:
8112 	default:
8113 		break;
8114 	}
8115 }
8116 
8117 /**
8118  * _scsih_sas_ir_physical_disk_event - PD event
8119  * @ioc: per adapter object
8120  * @fw_event: The fw_event_work object
8121  * Context: user.
8122  */
8123 static void
8124 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8125 	struct fw_event_work *fw_event)
8126 {
8127 	u16 handle, parent_handle;
8128 	u32 state;
8129 	struct _sas_device *sas_device;
8130 	Mpi2ConfigReply_t mpi_reply;
8131 	Mpi2SasDevicePage0_t sas_device_pg0;
8132 	u32 ioc_status;
8133 	Mpi2EventDataIrPhysicalDisk_t *event_data =
8134 		(Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
8135 	u64 sas_address;
8136 
8137 	if (ioc->shost_recovery)
8138 		return;
8139 
8140 	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
8141 		return;
8142 
8143 	handle = le16_to_cpu(event_data->PhysDiskDevHandle);
8144 	state = le32_to_cpu(event_data->NewValue);
8145 
8146 	if (!ioc->hide_ir_msg)
8147 		dewtprintk(ioc,
8148 			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8149 				    __func__, handle,
8150 				    le32_to_cpu(event_data->PreviousValue),
8151 				    state));
8152 
8153 	switch (state) {
8154 	case MPI2_RAID_PD_STATE_ONLINE:
8155 	case MPI2_RAID_PD_STATE_DEGRADED:
8156 	case MPI2_RAID_PD_STATE_REBUILDING:
8157 	case MPI2_RAID_PD_STATE_OPTIMAL:
8158 	case MPI2_RAID_PD_STATE_HOT_SPARE:
8159 
8160 		if (!ioc->is_warpdrive)
8161 			set_bit(handle, ioc->pd_handles);
8162 
8163 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8164 		if (sas_device) {
8165 			sas_device_put(sas_device);
8166 			return;
8167 		}
8168 
8169 		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8170 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8171 		    handle))) {
8172 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8173 				__FILE__, __LINE__, __func__);
8174 			return;
8175 		}
8176 
8177 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8178 		    MPI2_IOCSTATUS_MASK;
8179 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8180 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8181 				__FILE__, __LINE__, __func__);
8182 			return;
8183 		}
8184 
8185 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8186 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8187 			mpt3sas_transport_update_links(ioc, sas_address, handle,
8188 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8189 
8190 		_scsih_add_device(ioc, handle, 0, 1);
8191 
8192 		break;
8193 
8194 	case MPI2_RAID_PD_STATE_OFFLINE:
8195 	case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
8196 	case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
8197 	default:
8198 		break;
8199 	}
8200 }
8201 
8202 /**
8203  * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
8204  * @ioc: per adapter object
8205  * @event_data: event data payload
8206  * Context: user.
8207  */
8208 static void
8209 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
8210 	Mpi2EventDataIrOperationStatus_t *event_data)
8211 {
8212 	char *reason_str = NULL;
8213 
8214 	switch (event_data->RAIDOperation) {
8215 	case MPI2_EVENT_IR_RAIDOP_RESYNC:
8216 		reason_str = "resync";
8217 		break;
8218 	case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8219 		reason_str = "online capacity expansion";
8220 		break;
8221 	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8222 		reason_str = "consistency check";
8223 		break;
8224 	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
8225 		reason_str = "background init";
8226 		break;
8227 	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
8228 		reason_str = "make data consistent";
8229 		break;
8230 	}
8231 
8232 	if (!reason_str)
8233 		return;
8234 
8235 	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
8236 		 reason_str,
8237 		 le16_to_cpu(event_data->VolDevHandle),
8238 		 event_data->PercentComplete);
8239 }
8240 
8241 /**
8242  * _scsih_sas_ir_operation_status_event - handle RAID operation events
8243  * @ioc: per adapter object
8244  * @fw_event: The fw_event_work object
8245  * Context: user.
8246  */
8247 static void
8248 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
8249 	struct fw_event_work *fw_event)
8250 {
8251 	Mpi2EventDataIrOperationStatus_t *event_data =
8252 		(Mpi2EventDataIrOperationStatus_t *)
8253 		fw_event->event_data;
8254 	static struct _raid_device *raid_device;
8255 	unsigned long flags;
8256 	u16 handle;
8257 
8258 	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8259 	    (!ioc->hide_ir_msg))
8260 		_scsih_sas_ir_operation_status_event_debug(ioc,
8261 		     event_data);
8262 
8263 	/* code added for raid transport support */
8264 	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
8265 
8266 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
8267 		handle = le16_to_cpu(event_data->VolDevHandle);
8268 		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8269 		if (raid_device)
8270 			raid_device->percent_complete =
8271 			    event_data->PercentComplete;
8272 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8273 	}
8274 }
8275 
8276 /**
8277  * _scsih_prep_device_scan - initialize parameters prior to device scan
8278  * @ioc: per adapter object
8279  *
8280  * Set the deleted flag prior to device scan.  If the device is found during
8281  * the scan, then we clear the deleted flag.
8282  */
8283 static void
8284 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
8285 {
8286 	struct MPT3SAS_DEVICE *sas_device_priv_data;
8287 	struct scsi_device *sdev;
8288 
8289 	shost_for_each_device(sdev, ioc->shost) {
8290 		sas_device_priv_data = sdev->hostdata;
8291 		if (sas_device_priv_data && sas_device_priv_data->sas_target)
8292 			sas_device_priv_data->sas_target->deleted = 1;
8293 	}
8294 }
8295 
8296 /**
8297  * _scsih_mark_responding_sas_device - mark a sas_devices as responding
8298  * @ioc: per adapter object
8299  * @sas_device_pg0: SAS Device page 0
8300  *
8301  * After host reset, find out whether devices are still responding.
8302  * Used in _scsih_remove_unresponsive_sas_devices.
8303  */
8304 static void
8305 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
8306 Mpi2SasDevicePage0_t *sas_device_pg0)
8307 {
8308 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8309 	struct scsi_target *starget;
8310 	struct _sas_device *sas_device = NULL;
8311 	struct _enclosure_node *enclosure_dev = NULL;
8312 	unsigned long flags;
8313 
8314 	if (sas_device_pg0->EnclosureHandle) {
8315 		enclosure_dev =
8316 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8317 				le16_to_cpu(sas_device_pg0->EnclosureHandle));
8318 		if (enclosure_dev == NULL)
8319 			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
8320 				 sas_device_pg0->EnclosureHandle);
8321 	}
8322 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8323 	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
8324 		if ((sas_device->sas_address == le64_to_cpu(
8325 		    sas_device_pg0->SASAddress)) && (sas_device->slot ==
8326 		    le16_to_cpu(sas_device_pg0->Slot))) {
8327 			sas_device->responding = 1;
8328 			starget = sas_device->starget;
8329 			if (starget && starget->hostdata) {
8330 				sas_target_priv_data = starget->hostdata;
8331 				sas_target_priv_data->tm_busy = 0;
8332 				sas_target_priv_data->deleted = 0;
8333 			} else
8334 				sas_target_priv_data = NULL;
8335 			if (starget) {
8336 				starget_printk(KERN_INFO, starget,
8337 				    "handle(0x%04x), sas_addr(0x%016llx)\n",
8338 				    le16_to_cpu(sas_device_pg0->DevHandle),
8339 				    (unsigned long long)
8340 				    sas_device->sas_address);
8341 
8342 				if (sas_device->enclosure_handle != 0)
8343 					starget_printk(KERN_INFO, starget,
8344 					 "enclosure logical id(0x%016llx),"
8345 					 " slot(%d)\n",
8346 					 (unsigned long long)
8347 					 sas_device->enclosure_logical_id,
8348 					 sas_device->slot);
8349 			}
8350 			if (le16_to_cpu(sas_device_pg0->Flags) &
8351 			      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8352 				sas_device->enclosure_level =
8353 				   sas_device_pg0->EnclosureLevel;
8354 				memcpy(&sas_device->connector_name[0],
8355 					&sas_device_pg0->ConnectorName[0], 4);
8356 			} else {
8357 				sas_device->enclosure_level = 0;
8358 				sas_device->connector_name[0] = '\0';
8359 			}
8360 
8361 			sas_device->enclosure_handle =
8362 				le16_to_cpu(sas_device_pg0->EnclosureHandle);
8363 			sas_device->is_chassis_slot_valid = 0;
8364 			if (enclosure_dev) {
8365 				sas_device->enclosure_logical_id = le64_to_cpu(
8366 					enclosure_dev->pg0.EnclosureLogicalID);
8367 				if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8368 				    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8369 					sas_device->is_chassis_slot_valid = 1;
8370 					sas_device->chassis_slot =
8371 						enclosure_dev->pg0.ChassisSlot;
8372 				}
8373 			}
8374 
8375 			if (sas_device->handle == le16_to_cpu(
8376 			    sas_device_pg0->DevHandle))
8377 				goto out;
8378 			pr_info("\thandle changed from(0x%04x)!!!\n",
8379 			    sas_device->handle);
8380 			sas_device->handle = le16_to_cpu(
8381 			    sas_device_pg0->DevHandle);
8382 			if (sas_target_priv_data)
8383 				sas_target_priv_data->handle =
8384 				    le16_to_cpu(sas_device_pg0->DevHandle);
8385 			goto out;
8386 		}
8387 	}
8388  out:
8389 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8390 }
8391 
8392 /**
8393  * _scsih_create_enclosure_list_after_reset - Free Existing list,
8394  *	And create enclosure list by scanning all Enclosure Page(0)s
8395  * @ioc: per adapter object
8396  */
8397 static void
8398 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8399 {
8400 	struct _enclosure_node *enclosure_dev;
8401 	Mpi2ConfigReply_t mpi_reply;
8402 	u16 enclosure_handle;
8403 	int rc;
8404 
8405 	/* Free existing enclosure list */
8406 	mpt3sas_free_enclosure_list(ioc);
8407 
8408 	/* Re constructing enclosure list after reset*/
8409 	enclosure_handle = 0xFFFF;
8410 	do {
8411 		enclosure_dev =
8412 			kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8413 		if (!enclosure_dev) {
8414 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8415 				__FILE__, __LINE__, __func__);
8416 			return;
8417 		}
8418 		rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8419 				&enclosure_dev->pg0,
8420 				MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
8421 				enclosure_handle);
8422 
8423 		if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8424 						MPI2_IOCSTATUS_MASK)) {
8425 			kfree(enclosure_dev);
8426 			return;
8427 		}
8428 		list_add_tail(&enclosure_dev->list,
8429 						&ioc->enclosure_list);
8430 		enclosure_handle =
8431 			le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
8432 	} while (1);
8433 }
8434 
8435 /**
8436  * _scsih_search_responding_sas_devices -
8437  * @ioc: per adapter object
8438  *
8439  * After host reset, find out whether devices are still responding.
8440  * If not remove.
8441  */
8442 static void
8443 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8444 {
8445 	Mpi2SasDevicePage0_t sas_device_pg0;
8446 	Mpi2ConfigReply_t mpi_reply;
8447 	u16 ioc_status;
8448 	u16 handle;
8449 	u32 device_info;
8450 
8451 	ioc_info(ioc, "search for end-devices: start\n");
8452 
8453 	if (list_empty(&ioc->sas_device_list))
8454 		goto out;
8455 
8456 	handle = 0xFFFF;
8457 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8458 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8459 	    handle))) {
8460 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8461 		    MPI2_IOCSTATUS_MASK;
8462 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8463 			break;
8464 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
8465 		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8466 		if (!(_scsih_is_end_device(device_info)))
8467 			continue;
8468 		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
8469 	}
8470 
8471  out:
8472 	ioc_info(ioc, "search for end-devices: complete\n");
8473 }
8474 
8475 /**
8476  * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
8477  * @ioc: per adapter object
8478  * @pcie_device_pg0: PCIe Device page 0
8479  *
8480  * After host reset, find out whether devices are still responding.
8481  * Used in _scsih_remove_unresponding_devices.
8482  */
8483 static void
8484 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8485 	Mpi26PCIeDevicePage0_t *pcie_device_pg0)
8486 {
8487 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8488 	struct scsi_target *starget;
8489 	struct _pcie_device *pcie_device;
8490 	unsigned long flags;
8491 
8492 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8493 	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8494 		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8495 		    && (pcie_device->slot == le16_to_cpu(
8496 		    pcie_device_pg0->Slot))) {
8497 			pcie_device->responding = 1;
8498 			starget = pcie_device->starget;
8499 			if (starget && starget->hostdata) {
8500 				sas_target_priv_data = starget->hostdata;
8501 				sas_target_priv_data->tm_busy = 0;
8502 				sas_target_priv_data->deleted = 0;
8503 			} else
8504 				sas_target_priv_data = NULL;
8505 			if (starget) {
8506 				starget_printk(KERN_INFO, starget,
8507 				    "handle(0x%04x), wwid(0x%016llx) ",
8508 				    pcie_device->handle,
8509 				    (unsigned long long)pcie_device->wwid);
8510 				if (pcie_device->enclosure_handle != 0)
8511 					starget_printk(KERN_INFO, starget,
8512 					    "enclosure logical id(0x%016llx), "
8513 					    "slot(%d)\n",
8514 					    (unsigned long long)
8515 					    pcie_device->enclosure_logical_id,
8516 					    pcie_device->slot);
8517 			}
8518 
8519 			if (((le32_to_cpu(pcie_device_pg0->Flags)) &
8520 			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
8521 			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8522 				pcie_device->enclosure_level =
8523 				    pcie_device_pg0->EnclosureLevel;
8524 				memcpy(&pcie_device->connector_name[0],
8525 				    &pcie_device_pg0->ConnectorName[0], 4);
8526 			} else {
8527 				pcie_device->enclosure_level = 0;
8528 				pcie_device->connector_name[0] = '\0';
8529 			}
8530 
8531 			if (pcie_device->handle == le16_to_cpu(
8532 			    pcie_device_pg0->DevHandle))
8533 				goto out;
8534 			pr_info("\thandle changed from(0x%04x)!!!\n",
8535 			    pcie_device->handle);
8536 			pcie_device->handle = le16_to_cpu(
8537 			    pcie_device_pg0->DevHandle);
8538 			if (sas_target_priv_data)
8539 				sas_target_priv_data->handle =
8540 				    le16_to_cpu(pcie_device_pg0->DevHandle);
8541 			goto out;
8542 		}
8543 	}
8544 
8545  out:
8546 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8547 }
8548 
8549 /**
8550  * _scsih_search_responding_pcie_devices -
8551  * @ioc: per adapter object
8552  *
8553  * After host reset, find out whether devices are still responding.
8554  * If not remove.
8555  */
8556 static void
8557 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8558 {
8559 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8560 	Mpi2ConfigReply_t mpi_reply;
8561 	u16 ioc_status;
8562 	u16 handle;
8563 	u32 device_info;
8564 
8565 	ioc_info(ioc, "search for end-devices: start\n");
8566 
8567 	if (list_empty(&ioc->pcie_device_list))
8568 		goto out;
8569 
8570 	handle = 0xFFFF;
8571 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8572 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8573 		handle))) {
8574 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8575 		    MPI2_IOCSTATUS_MASK;
8576 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8577 			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
8578 				 __func__, ioc_status,
8579 				 le32_to_cpu(mpi_reply.IOCLogInfo));
8580 			break;
8581 		}
8582 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8583 		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8584 		if (!(_scsih_is_nvme_device(device_info)))
8585 			continue;
8586 		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8587 	}
8588 out:
8589 	ioc_info(ioc, "search for PCIe end-devices: complete\n");
8590 }
8591 
8592 /**
8593  * _scsih_mark_responding_raid_device - mark a raid_device as responding
8594  * @ioc: per adapter object
8595  * @wwid: world wide identifier for raid volume
8596  * @handle: device handle
8597  *
8598  * After host reset, find out whether devices are still responding.
8599  * Used in _scsih_remove_unresponsive_raid_devices.
8600  */
8601 static void
8602 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
8603 	u16 handle)
8604 {
8605 	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8606 	struct scsi_target *starget;
8607 	struct _raid_device *raid_device;
8608 	unsigned long flags;
8609 
8610 	spin_lock_irqsave(&ioc->raid_device_lock, flags);
8611 	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
8612 		if (raid_device->wwid == wwid && raid_device->starget) {
8613 			starget = raid_device->starget;
8614 			if (starget && starget->hostdata) {
8615 				sas_target_priv_data = starget->hostdata;
8616 				sas_target_priv_data->deleted = 0;
8617 			} else
8618 				sas_target_priv_data = NULL;
8619 			raid_device->responding = 1;
8620 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8621 			starget_printk(KERN_INFO, raid_device->starget,
8622 			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
8623 			    (unsigned long long)raid_device->wwid);
8624 
8625 			/*
8626 			 * WARPDRIVE: The handles of the PDs might have changed
8627 			 * across the host reset so re-initialize the
8628 			 * required data for Direct IO
8629 			 */
8630 			mpt3sas_init_warpdrive_properties(ioc, raid_device);
8631 			spin_lock_irqsave(&ioc->raid_device_lock, flags);
8632 			if (raid_device->handle == handle) {
8633 				spin_unlock_irqrestore(&ioc->raid_device_lock,
8634 				    flags);
8635 				return;
8636 			}
8637 			pr_info("\thandle changed from(0x%04x)!!!\n",
8638 			    raid_device->handle);
8639 			raid_device->handle = handle;
8640 			if (sas_target_priv_data)
8641 				sas_target_priv_data->handle = handle;
8642 			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8643 			return;
8644 		}
8645 	}
8646 	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8647 }
8648 
8649 /**
8650  * _scsih_search_responding_raid_devices -
8651  * @ioc: per adapter object
8652  *
8653  * After host reset, find out whether devices are still responding.
8654  * If not remove.
8655  */
8656 static void
8657 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
8658 {
8659 	Mpi2RaidVolPage1_t volume_pg1;
8660 	Mpi2RaidVolPage0_t volume_pg0;
8661 	Mpi2RaidPhysDiskPage0_t pd_pg0;
8662 	Mpi2ConfigReply_t mpi_reply;
8663 	u16 ioc_status;
8664 	u16 handle;
8665 	u8 phys_disk_num;
8666 
8667 	if (!ioc->ir_firmware)
8668 		return;
8669 
8670 	ioc_info(ioc, "search for raid volumes: start\n");
8671 
8672 	if (list_empty(&ioc->raid_device_list))
8673 		goto out;
8674 
8675 	handle = 0xFFFF;
8676 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
8677 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
8678 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8679 		    MPI2_IOCSTATUS_MASK;
8680 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8681 			break;
8682 		handle = le16_to_cpu(volume_pg1.DevHandle);
8683 
8684 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
8685 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
8686 		     sizeof(Mpi2RaidVolPage0_t)))
8687 			continue;
8688 
8689 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
8690 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
8691 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
8692 			_scsih_mark_responding_raid_device(ioc,
8693 			    le64_to_cpu(volume_pg1.WWID), handle);
8694 	}
8695 
8696 	/* refresh the pd_handles */
8697 	if (!ioc->is_warpdrive) {
8698 		phys_disk_num = 0xFF;
8699 		memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
8700 		while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
8701 		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
8702 		    phys_disk_num))) {
8703 			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8704 			    MPI2_IOCSTATUS_MASK;
8705 			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8706 				break;
8707 			phys_disk_num = pd_pg0.PhysDiskNum;
8708 			handle = le16_to_cpu(pd_pg0.DevHandle);
8709 			set_bit(handle, ioc->pd_handles);
8710 		}
8711 	}
8712  out:
8713 	ioc_info(ioc, "search for responding raid volumes: complete\n");
8714 }
8715 
8716 /**
8717  * _scsih_mark_responding_expander - mark a expander as responding
8718  * @ioc: per adapter object
8719  * @expander_pg0:SAS Expander Config Page0
8720  *
8721  * After host reset, find out whether devices are still responding.
8722  * Used in _scsih_remove_unresponsive_expanders.
8723  */
8724 static void
8725 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
8726 	Mpi2ExpanderPage0_t *expander_pg0)
8727 {
8728 	struct _sas_node *sas_expander = NULL;
8729 	unsigned long flags;
8730 	int i;
8731 	struct _enclosure_node *enclosure_dev = NULL;
8732 	u16 handle = le16_to_cpu(expander_pg0->DevHandle);
8733 	u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
8734 	u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
8735 
8736 	if (enclosure_handle)
8737 		enclosure_dev =
8738 			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8739 							enclosure_handle);
8740 
8741 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
8742 	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
8743 		if (sas_expander->sas_address != sas_address)
8744 			continue;
8745 		sas_expander->responding = 1;
8746 
8747 		if (enclosure_dev) {
8748 			sas_expander->enclosure_logical_id =
8749 			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8750 			sas_expander->enclosure_handle =
8751 			    le16_to_cpu(expander_pg0->EnclosureHandle);
8752 		}
8753 
8754 		if (sas_expander->handle == handle)
8755 			goto out;
8756 		pr_info("\texpander(0x%016llx): handle changed" \
8757 		    " from(0x%04x) to (0x%04x)!!!\n",
8758 		    (unsigned long long)sas_expander->sas_address,
8759 		    sas_expander->handle, handle);
8760 		sas_expander->handle = handle;
8761 		for (i = 0 ; i < sas_expander->num_phys ; i++)
8762 			sas_expander->phy[i].handle = handle;
8763 		goto out;
8764 	}
8765  out:
8766 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8767 }
8768 
8769 /**
8770  * _scsih_search_responding_expanders -
8771  * @ioc: per adapter object
8772  *
8773  * After host reset, find out whether devices are still responding.
8774  * If not remove.
8775  */
8776 static void
8777 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
8778 {
8779 	Mpi2ExpanderPage0_t expander_pg0;
8780 	Mpi2ConfigReply_t mpi_reply;
8781 	u16 ioc_status;
8782 	u64 sas_address;
8783 	u16 handle;
8784 
8785 	ioc_info(ioc, "search for expanders: start\n");
8786 
8787 	if (list_empty(&ioc->sas_expander_list))
8788 		goto out;
8789 
8790 	handle = 0xFFFF;
8791 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
8792 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
8793 
8794 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8795 		    MPI2_IOCSTATUS_MASK;
8796 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8797 			break;
8798 
8799 		handle = le16_to_cpu(expander_pg0.DevHandle);
8800 		sas_address = le64_to_cpu(expander_pg0.SASAddress);
8801 		pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
8802 			handle,
8803 		    (unsigned long long)sas_address);
8804 		_scsih_mark_responding_expander(ioc, &expander_pg0);
8805 	}
8806 
8807  out:
8808 	ioc_info(ioc, "search for expanders: complete\n");
8809 }
8810 
8811 /**
8812  * _scsih_remove_unresponding_devices - removing unresponding devices
8813  * @ioc: per adapter object
8814  */
8815 static void
8816 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8817 {
8818 	struct _sas_device *sas_device, *sas_device_next;
8819 	struct _sas_node *sas_expander, *sas_expander_next;
8820 	struct _raid_device *raid_device, *raid_device_next;
8821 	struct _pcie_device *pcie_device, *pcie_device_next;
8822 	struct list_head tmp_list;
8823 	unsigned long flags;
8824 	LIST_HEAD(head);
8825 
8826 	ioc_info(ioc, "removing unresponding devices: start\n");
8827 
8828 	/* removing unresponding end devices */
8829 	ioc_info(ioc, "removing unresponding devices: end-devices\n");
8830 	/*
8831 	 * Iterate, pulling off devices marked as non-responding. We become the
8832 	 * owner for the reference the list had on any object we prune.
8833 	 */
8834 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
8835 	list_for_each_entry_safe(sas_device, sas_device_next,
8836 	    &ioc->sas_device_list, list) {
8837 		if (!sas_device->responding)
8838 			list_move_tail(&sas_device->list, &head);
8839 		else
8840 			sas_device->responding = 0;
8841 	}
8842 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8843 
8844 	/*
8845 	 * Now, uninitialize and remove the unresponding devices we pruned.
8846 	 */
8847 	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
8848 		_scsih_remove_device(ioc, sas_device);
8849 		list_del_init(&sas_device->list);
8850 		sas_device_put(sas_device);
8851 	}
8852 
8853 	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
8854 	INIT_LIST_HEAD(&head);
8855 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8856 	list_for_each_entry_safe(pcie_device, pcie_device_next,
8857 	    &ioc->pcie_device_list, list) {
8858 		if (!pcie_device->responding)
8859 			list_move_tail(&pcie_device->list, &head);
8860 		else
8861 			pcie_device->responding = 0;
8862 	}
8863 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8864 
8865 	list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
8866 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
8867 		list_del_init(&pcie_device->list);
8868 		pcie_device_put(pcie_device);
8869 	}
8870 
8871 	/* removing unresponding volumes */
8872 	if (ioc->ir_firmware) {
8873 		ioc_info(ioc, "removing unresponding devices: volumes\n");
8874 		list_for_each_entry_safe(raid_device, raid_device_next,
8875 		    &ioc->raid_device_list, list) {
8876 			if (!raid_device->responding)
8877 				_scsih_sas_volume_delete(ioc,
8878 				    raid_device->handle);
8879 			else
8880 				raid_device->responding = 0;
8881 		}
8882 	}
8883 
8884 	/* removing unresponding expanders */
8885 	ioc_info(ioc, "removing unresponding devices: expanders\n");
8886 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
8887 	INIT_LIST_HEAD(&tmp_list);
8888 	list_for_each_entry_safe(sas_expander, sas_expander_next,
8889 	    &ioc->sas_expander_list, list) {
8890 		if (!sas_expander->responding)
8891 			list_move_tail(&sas_expander->list, &tmp_list);
8892 		else
8893 			sas_expander->responding = 0;
8894 	}
8895 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8896 	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
8897 	    list) {
8898 		_scsih_expander_node_remove(ioc, sas_expander);
8899 	}
8900 
8901 	ioc_info(ioc, "removing unresponding devices: complete\n");
8902 
8903 	/* unblock devices */
8904 	_scsih_ublock_io_all_device(ioc);
8905 }
8906 
8907 static void
8908 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
8909 	struct _sas_node *sas_expander, u16 handle)
8910 {
8911 	Mpi2ExpanderPage1_t expander_pg1;
8912 	Mpi2ConfigReply_t mpi_reply;
8913 	int i;
8914 
8915 	for (i = 0 ; i < sas_expander->num_phys ; i++) {
8916 		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
8917 		    &expander_pg1, i, handle))) {
8918 			ioc_err(ioc, "failure at %s:%d/%s()!\n",
8919 				__FILE__, __LINE__, __func__);
8920 			return;
8921 		}
8922 
8923 		mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
8924 		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
8925 		    expander_pg1.NegotiatedLinkRate >> 4);
8926 	}
8927 }
8928 
8929 /**
8930  * _scsih_scan_for_devices_after_reset - scan for devices after host reset
8931  * @ioc: per adapter object
8932  */
8933 static void
8934 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
8935 {
8936 	Mpi2ExpanderPage0_t expander_pg0;
8937 	Mpi2SasDevicePage0_t sas_device_pg0;
8938 	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8939 	Mpi2RaidVolPage1_t volume_pg1;
8940 	Mpi2RaidVolPage0_t volume_pg0;
8941 	Mpi2RaidPhysDiskPage0_t pd_pg0;
8942 	Mpi2EventIrConfigElement_t element;
8943 	Mpi2ConfigReply_t mpi_reply;
8944 	u8 phys_disk_num;
8945 	u16 ioc_status;
8946 	u16 handle, parent_handle;
8947 	u64 sas_address;
8948 	struct _sas_device *sas_device;
8949 	struct _pcie_device *pcie_device;
8950 	struct _sas_node *expander_device;
8951 	static struct _raid_device *raid_device;
8952 	u8 retry_count;
8953 	unsigned long flags;
8954 
8955 	ioc_info(ioc, "scan devices: start\n");
8956 
8957 	_scsih_sas_host_refresh(ioc);
8958 
8959 	ioc_info(ioc, "\tscan devices: expanders start\n");
8960 
8961 	/* expanders */
8962 	handle = 0xFFFF;
8963 	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
8964 	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
8965 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8966 		    MPI2_IOCSTATUS_MASK;
8967 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8968 			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
8969 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
8970 			break;
8971 		}
8972 		handle = le16_to_cpu(expander_pg0.DevHandle);
8973 		spin_lock_irqsave(&ioc->sas_node_lock, flags);
8974 		expander_device = mpt3sas_scsih_expander_find_by_sas_address(
8975 		    ioc, le64_to_cpu(expander_pg0.SASAddress));
8976 		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8977 		if (expander_device)
8978 			_scsih_refresh_expander_links(ioc, expander_device,
8979 			    handle);
8980 		else {
8981 			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
8982 				 handle,
8983 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
8984 			_scsih_expander_add(ioc, handle);
8985 			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
8986 				 handle,
8987 				 (u64)le64_to_cpu(expander_pg0.SASAddress));
8988 		}
8989 	}
8990 
8991 	ioc_info(ioc, "\tscan devices: expanders complete\n");
8992 
8993 	if (!ioc->ir_firmware)
8994 		goto skip_to_sas;
8995 
8996 	ioc_info(ioc, "\tscan devices: phys disk start\n");
8997 
8998 	/* phys disk */
8999 	phys_disk_num = 0xFF;
9000 	while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9001 	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9002 	    phys_disk_num))) {
9003 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9004 		    MPI2_IOCSTATUS_MASK;
9005 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9006 			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9007 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9008 			break;
9009 		}
9010 		phys_disk_num = pd_pg0.PhysDiskNum;
9011 		handle = le16_to_cpu(pd_pg0.DevHandle);
9012 		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9013 		if (sas_device) {
9014 			sas_device_put(sas_device);
9015 			continue;
9016 		}
9017 		if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9018 		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9019 		    handle) != 0)
9020 			continue;
9021 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9022 		    MPI2_IOCSTATUS_MASK;
9023 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9024 			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
9025 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9026 			break;
9027 		}
9028 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9029 		if (!_scsih_get_sas_address(ioc, parent_handle,
9030 		    &sas_address)) {
9031 			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9032 				 handle,
9033 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9034 			mpt3sas_transport_update_links(ioc, sas_address,
9035 			    handle, sas_device_pg0.PhyNum,
9036 			    MPI2_SAS_NEG_LINK_RATE_1_5);
9037 			set_bit(handle, ioc->pd_handles);
9038 			retry_count = 0;
9039 			/* This will retry adding the end device.
9040 			 * _scsih_add_device() will decide on retries and
9041 			 * return "1" when it should be retried
9042 			 */
9043 			while (_scsih_add_device(ioc, handle, retry_count++,
9044 			    1)) {
9045 				ssleep(1);
9046 			}
9047 			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9048 				 handle,
9049 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9050 		}
9051 	}
9052 
9053 	ioc_info(ioc, "\tscan devices: phys disk complete\n");
9054 
9055 	ioc_info(ioc, "\tscan devices: volumes start\n");
9056 
9057 	/* volumes */
9058 	handle = 0xFFFF;
9059 	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9060 	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9061 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9062 		    MPI2_IOCSTATUS_MASK;
9063 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9064 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9065 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9066 			break;
9067 		}
9068 		handle = le16_to_cpu(volume_pg1.DevHandle);
9069 		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9070 		raid_device = _scsih_raid_device_find_by_wwid(ioc,
9071 		    le64_to_cpu(volume_pg1.WWID));
9072 		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9073 		if (raid_device)
9074 			continue;
9075 		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9076 		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9077 		     sizeof(Mpi2RaidVolPage0_t)))
9078 			continue;
9079 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9080 		    MPI2_IOCSTATUS_MASK;
9081 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9082 			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9083 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9084 			break;
9085 		}
9086 		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9087 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9088 		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
9089 			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
9090 			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
9091 			element.VolDevHandle = volume_pg1.DevHandle;
9092 			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
9093 				 volume_pg1.DevHandle);
9094 			_scsih_sas_volume_add(ioc, &element);
9095 			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
9096 				 volume_pg1.DevHandle);
9097 		}
9098 	}
9099 
9100 	ioc_info(ioc, "\tscan devices: volumes complete\n");
9101 
9102  skip_to_sas:
9103 
9104 	ioc_info(ioc, "\tscan devices: end devices start\n");
9105 
9106 	/* sas devices */
9107 	handle = 0xFFFF;
9108 	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9109 	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9110 	    handle))) {
9111 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9112 		    MPI2_IOCSTATUS_MASK;
9113 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9114 			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9115 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9116 			break;
9117 		}
9118 		handle = le16_to_cpu(sas_device_pg0.DevHandle);
9119 		if (!(_scsih_is_end_device(
9120 		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
9121 			continue;
9122 		sas_device = mpt3sas_get_sdev_by_addr(ioc,
9123 		    le64_to_cpu(sas_device_pg0.SASAddress));
9124 		if (sas_device) {
9125 			sas_device_put(sas_device);
9126 			continue;
9127 		}
9128 		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9129 		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
9130 			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9131 				 handle,
9132 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9133 			mpt3sas_transport_update_links(ioc, sas_address, handle,
9134 			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
9135 			retry_count = 0;
9136 			/* This will retry adding the end device.
9137 			 * _scsih_add_device() will decide on retries and
9138 			 * return "1" when it should be retried
9139 			 */
9140 			while (_scsih_add_device(ioc, handle, retry_count++,
9141 			    0)) {
9142 				ssleep(1);
9143 			}
9144 			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9145 				 handle,
9146 				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9147 		}
9148 	}
9149 	ioc_info(ioc, "\tscan devices: end devices complete\n");
9150 	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
9151 
9152 	/* pcie devices */
9153 	handle = 0xFFFF;
9154 	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9155 		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9156 		handle))) {
9157 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9158 				& MPI2_IOCSTATUS_MASK;
9159 		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9160 			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9161 				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9162 			break;
9163 		}
9164 		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9165 		if (!(_scsih_is_nvme_device(
9166 			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9167 			continue;
9168 		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
9169 				le64_to_cpu(pcie_device_pg0.WWID));
9170 		if (pcie_device) {
9171 			pcie_device_put(pcie_device);
9172 			continue;
9173 		}
9174 		retry_count = 0;
9175 		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9176 		_scsih_pcie_add_device(ioc, handle);
9177 
9178 		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
9179 			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
9180 	}
9181 	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
9182 	ioc_info(ioc, "scan devices: complete\n");
9183 }
9184 
9185 /**
9186  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9187  * @ioc: per adapter object
9188  *
9189  * The handler for doing any required cleanup or initialization.
9190  */
9191 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9192 {
9193 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
9194 }
9195 
9196 /**
9197  * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
9198  * @ioc: per adapter object
9199  *
9200  * The handler for doing any required cleanup or initialization.
9201  */
9202 void
9203 mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9204 {
9205 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
9206 	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
9207 		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
9208 		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
9209 		complete(&ioc->scsih_cmds.done);
9210 	}
9211 	if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
9212 		ioc->tm_cmds.status |= MPT3_CMD_RESET;
9213 		mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
9214 		complete(&ioc->tm_cmds.done);
9215 	}
9216 
9217 	memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
9218 	memset(ioc->device_remove_in_progress, 0,
9219 	       ioc->device_remove_in_progress_sz);
9220 	_scsih_fw_event_cleanup_queue(ioc);
9221 	_scsih_flush_running_cmds(ioc);
9222 }
9223 
9224 /**
9225  * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9226  * @ioc: per adapter object
9227  *
9228  * The handler for doing any required cleanup or initialization.
9229  */
9230 void
9231 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
9232 {
9233 	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
9234 	if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9235 					   !ioc->sas_hba.num_phys)) {
9236 		_scsih_prep_device_scan(ioc);
9237 		_scsih_create_enclosure_list_after_reset(ioc);
9238 		_scsih_search_responding_sas_devices(ioc);
9239 		_scsih_search_responding_pcie_devices(ioc);
9240 		_scsih_search_responding_raid_devices(ioc);
9241 		_scsih_search_responding_expanders(ioc);
9242 		_scsih_error_recovery_delete_devices(ioc);
9243 	}
9244 }
9245 
9246 /**
9247  * _mpt3sas_fw_work - delayed task for processing firmware events
9248  * @ioc: per adapter object
9249  * @fw_event: The fw_event_work object
9250  * Context: user.
9251  */
9252 static void
9253 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9254 {
9255 	_scsih_fw_event_del_from_list(ioc, fw_event);
9256 
9257 	/* the queue is being flushed so ignore this event */
9258 	if (ioc->remove_host || ioc->pci_error_recovery) {
9259 		fw_event_work_put(fw_event);
9260 		return;
9261 	}
9262 
9263 	switch (fw_event->event) {
9264 	case MPT3SAS_PROCESS_TRIGGER_DIAG:
9265 		mpt3sas_process_trigger_data(ioc,
9266 			(struct SL_WH_TRIGGERS_EVENT_DATA_T *)
9267 			fw_event->event_data);
9268 		break;
9269 	case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
9270 		while (scsi_host_in_recovery(ioc->shost) ||
9271 					 ioc->shost_recovery) {
9272 			/*
9273 			 * If we're unloading, bail. Otherwise, this can become
9274 			 * an infinite loop.
9275 			 */
9276 			if (ioc->remove_host)
9277 				goto out;
9278 			ssleep(1);
9279 		}
9280 		_scsih_remove_unresponding_devices(ioc);
9281 		_scsih_scan_for_devices_after_reset(ioc);
9282 		break;
9283 	case MPT3SAS_PORT_ENABLE_COMPLETE:
9284 		ioc->start_scan = 0;
9285 		if (missing_delay[0] != -1 && missing_delay[1] != -1)
9286 			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
9287 			    missing_delay[1]);
9288 		dewtprintk(ioc,
9289 			   ioc_info(ioc, "port enable: complete from worker thread\n"));
9290 		break;
9291 	case MPT3SAS_TURN_ON_PFA_LED:
9292 		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
9293 		break;
9294 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9295 		_scsih_sas_topology_change_event(ioc, fw_event);
9296 		break;
9297 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9298 		_scsih_sas_device_status_change_event(ioc, fw_event);
9299 		break;
9300 	case MPI2_EVENT_SAS_DISCOVERY:
9301 		_scsih_sas_discovery_event(ioc, fw_event);
9302 		break;
9303 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9304 		_scsih_sas_device_discovery_error_event(ioc, fw_event);
9305 		break;
9306 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9307 		_scsih_sas_broadcast_primitive_event(ioc, fw_event);
9308 		break;
9309 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9310 		_scsih_sas_enclosure_dev_status_change_event(ioc,
9311 		    fw_event);
9312 		break;
9313 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9314 		_scsih_sas_ir_config_change_event(ioc, fw_event);
9315 		break;
9316 	case MPI2_EVENT_IR_VOLUME:
9317 		_scsih_sas_ir_volume_event(ioc, fw_event);
9318 		break;
9319 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9320 		_scsih_sas_ir_physical_disk_event(ioc, fw_event);
9321 		break;
9322 	case MPI2_EVENT_IR_OPERATION_STATUS:
9323 		_scsih_sas_ir_operation_status_event(ioc, fw_event);
9324 		break;
9325 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9326 		_scsih_pcie_device_status_change_event(ioc, fw_event);
9327 		break;
9328 	case MPI2_EVENT_PCIE_ENUMERATION:
9329 		_scsih_pcie_enumeration_event(ioc, fw_event);
9330 		break;
9331 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9332 		_scsih_pcie_topology_change_event(ioc, fw_event);
9333 			return;
9334 	break;
9335 	}
9336 out:
9337 	fw_event_work_put(fw_event);
9338 }
9339 
9340 /**
9341  * _firmware_event_work
9342  * @work: The fw_event_work object
9343  * Context: user.
9344  *
9345  * wrappers for the work thread handling firmware events
9346  */
9347 
9348 static void
9349 _firmware_event_work(struct work_struct *work)
9350 {
9351 	struct fw_event_work *fw_event = container_of(work,
9352 	    struct fw_event_work, work);
9353 
9354 	_mpt3sas_fw_work(fw_event->ioc, fw_event);
9355 }
9356 
9357 /**
9358  * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
9359  * @ioc: per adapter object
9360  * @msix_index: MSIX table index supplied by the OS
9361  * @reply: reply message frame(lower 32bit addr)
9362  * Context: interrupt.
9363  *
9364  * This function merely adds a new work task into ioc->firmware_event_thread.
9365  * The tasks are worked from _firmware_event_work in user context.
9366  *
9367  * Return: 1 meaning mf should be freed from _base_interrupt
9368  *         0 means the mf is freed from this function.
9369  */
9370 u8
9371 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9372 	u32 reply)
9373 {
9374 	struct fw_event_work *fw_event;
9375 	Mpi2EventNotificationReply_t *mpi_reply;
9376 	u16 event;
9377 	u16 sz;
9378 	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
9379 
9380 	/* events turned off due to host reset */
9381 	if (ioc->pci_error_recovery)
9382 		return 1;
9383 
9384 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
9385 
9386 	if (unlikely(!mpi_reply)) {
9387 		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
9388 			__FILE__, __LINE__, __func__);
9389 		return 1;
9390 	}
9391 
9392 	event = le16_to_cpu(mpi_reply->Event);
9393 
9394 	if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
9395 		mpt3sas_trigger_event(ioc, event, 0);
9396 
9397 	switch (event) {
9398 	/* handle these */
9399 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9400 	{
9401 		Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
9402 		    (Mpi2EventDataSasBroadcastPrimitive_t *)
9403 		    mpi_reply->EventData;
9404 
9405 		if (baen_data->Primitive !=
9406 		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
9407 			return 1;
9408 
9409 		if (ioc->broadcast_aen_busy) {
9410 			ioc->broadcast_aen_pending++;
9411 			return 1;
9412 		} else
9413 			ioc->broadcast_aen_busy = 1;
9414 		break;
9415 	}
9416 
9417 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9418 		_scsih_check_topo_delete_events(ioc,
9419 		    (Mpi2EventDataSasTopologyChangeList_t *)
9420 		    mpi_reply->EventData);
9421 		break;
9422 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9423 	_scsih_check_pcie_topo_remove_events(ioc,
9424 		    (Mpi26EventDataPCIeTopologyChangeList_t *)
9425 		    mpi_reply->EventData);
9426 		break;
9427 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9428 		_scsih_check_ir_config_unhide_events(ioc,
9429 		    (Mpi2EventDataIrConfigChangeList_t *)
9430 		    mpi_reply->EventData);
9431 		break;
9432 	case MPI2_EVENT_IR_VOLUME:
9433 		_scsih_check_volume_delete_events(ioc,
9434 		    (Mpi2EventDataIrVolume_t *)
9435 		    mpi_reply->EventData);
9436 		break;
9437 	case MPI2_EVENT_LOG_ENTRY_ADDED:
9438 	{
9439 		Mpi2EventDataLogEntryAdded_t *log_entry;
9440 		u32 *log_code;
9441 
9442 		if (!ioc->is_warpdrive)
9443 			break;
9444 
9445 		log_entry = (Mpi2EventDataLogEntryAdded_t *)
9446 		    mpi_reply->EventData;
9447 		log_code = (u32 *)log_entry->LogData;
9448 
9449 		if (le16_to_cpu(log_entry->LogEntryQualifier)
9450 		    != MPT2_WARPDRIVE_LOGENTRY)
9451 			break;
9452 
9453 		switch (le32_to_cpu(*log_code)) {
9454 		case MPT2_WARPDRIVE_LC_SSDT:
9455 			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9456 			break;
9457 		case MPT2_WARPDRIVE_LC_SSDLW:
9458 			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
9459 			break;
9460 		case MPT2_WARPDRIVE_LC_SSDLF:
9461 			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
9462 			break;
9463 		case MPT2_WARPDRIVE_LC_BRMF:
9464 			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9465 			break;
9466 		}
9467 
9468 		break;
9469 	}
9470 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9471 	case MPI2_EVENT_IR_OPERATION_STATUS:
9472 	case MPI2_EVENT_SAS_DISCOVERY:
9473 	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9474 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9475 	case MPI2_EVENT_IR_PHYSICAL_DISK:
9476 	case MPI2_EVENT_PCIE_ENUMERATION:
9477 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9478 		break;
9479 
9480 	case MPI2_EVENT_TEMP_THRESHOLD:
9481 		_scsih_temp_threshold_events(ioc,
9482 			(Mpi2EventDataTemperature_t *)
9483 			mpi_reply->EventData);
9484 		break;
9485 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
9486 		ActiveCableEventData =
9487 		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
9488 		switch (ActiveCableEventData->ReasonCode) {
9489 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
9490 			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
9491 				   ActiveCableEventData->ReceptacleID);
9492 			pr_notice("cannot be powered and devices connected\n");
9493 			pr_notice("to this active cable will not be seen\n");
9494 			pr_notice("This active cable requires %d mW of power\n",
9495 			     ActiveCableEventData->ActiveCablePowerRequirement);
9496 			break;
9497 
9498 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
9499 			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
9500 				   ActiveCableEventData->ReceptacleID);
9501 			pr_notice(
9502 			    "is not running at optimal speed(12 Gb/s rate)\n");
9503 			break;
9504 		}
9505 
9506 		break;
9507 
9508 	default: /* ignore the rest */
9509 		return 1;
9510 	}
9511 
9512 	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
9513 	fw_event = alloc_fw_event_work(sz);
9514 	if (!fw_event) {
9515 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9516 			__FILE__, __LINE__, __func__);
9517 		return 1;
9518 	}
9519 
9520 	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
9521 	fw_event->ioc = ioc;
9522 	fw_event->VF_ID = mpi_reply->VF_ID;
9523 	fw_event->VP_ID = mpi_reply->VP_ID;
9524 	fw_event->event = event;
9525 	_scsih_fw_event_add(ioc, fw_event);
9526 	fw_event_work_put(fw_event);
9527 	return 1;
9528 }
9529 
9530 /**
9531  * _scsih_expander_node_remove - removing expander device from list.
9532  * @ioc: per adapter object
9533  * @sas_expander: the sas_device object
9534  *
9535  * Removing object and freeing associated memory from the
9536  * ioc->sas_expander_list.
9537  */
9538 static void
9539 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
9540 	struct _sas_node *sas_expander)
9541 {
9542 	struct _sas_port *mpt3sas_port, *next;
9543 	unsigned long flags;
9544 
9545 	/* remove sibling ports attached to this expander */
9546 	list_for_each_entry_safe(mpt3sas_port, next,
9547 	   &sas_expander->sas_port_list, port_list) {
9548 		if (ioc->shost_recovery)
9549 			return;
9550 		if (mpt3sas_port->remote_identify.device_type ==
9551 		    SAS_END_DEVICE)
9552 			mpt3sas_device_remove_by_sas_address(ioc,
9553 			    mpt3sas_port->remote_identify.sas_address);
9554 		else if (mpt3sas_port->remote_identify.device_type ==
9555 		    SAS_EDGE_EXPANDER_DEVICE ||
9556 		    mpt3sas_port->remote_identify.device_type ==
9557 		    SAS_FANOUT_EXPANDER_DEVICE)
9558 			mpt3sas_expander_remove(ioc,
9559 			    mpt3sas_port->remote_identify.sas_address);
9560 	}
9561 
9562 	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
9563 	    sas_expander->sas_address_parent);
9564 
9565 	ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
9566 		 sas_expander->handle, (unsigned long long)
9567 		 sas_expander->sas_address);
9568 
9569 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
9570 	list_del(&sas_expander->list);
9571 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9572 
9573 	kfree(sas_expander->phy);
9574 	kfree(sas_expander);
9575 }
9576 
9577 /**
9578  * _scsih_ir_shutdown - IR shutdown notification
9579  * @ioc: per adapter object
9580  *
9581  * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
9582  * the host system is shutting down.
9583  */
9584 static void
9585 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
9586 {
9587 	Mpi2RaidActionRequest_t *mpi_request;
9588 	Mpi2RaidActionReply_t *mpi_reply;
9589 	u16 smid;
9590 
9591 	/* is IR firmware build loaded ? */
9592 	if (!ioc->ir_firmware)
9593 		return;
9594 
9595 	/* are there any volumes ? */
9596 	if (list_empty(&ioc->raid_device_list))
9597 		return;
9598 
9599 	mutex_lock(&ioc->scsih_cmds.mutex);
9600 
9601 	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
9602 		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
9603 		goto out;
9604 	}
9605 	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
9606 
9607 	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
9608 	if (!smid) {
9609 		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
9610 		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9611 		goto out;
9612 	}
9613 
9614 	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
9615 	ioc->scsih_cmds.smid = smid;
9616 	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
9617 
9618 	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
9619 	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
9620 
9621 	if (!ioc->hide_ir_msg)
9622 		ioc_info(ioc, "IR shutdown (sending)\n");
9623 	init_completion(&ioc->scsih_cmds.done);
9624 	mpt3sas_base_put_smid_default(ioc, smid);
9625 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
9626 
9627 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
9628 		ioc_err(ioc, "%s: timeout\n", __func__);
9629 		goto out;
9630 	}
9631 
9632 	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
9633 		mpi_reply = ioc->scsih_cmds.reply;
9634 		if (!ioc->hide_ir_msg)
9635 			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
9636 				 le16_to_cpu(mpi_reply->IOCStatus),
9637 				 le32_to_cpu(mpi_reply->IOCLogInfo));
9638 	}
9639 
9640  out:
9641 	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9642 	mutex_unlock(&ioc->scsih_cmds.mutex);
9643 }
9644 
9645 /**
9646  * scsih_remove - detach and remove add host
9647  * @pdev: PCI device struct
9648  *
9649  * Routine called when unloading the driver.
9650  */
9651 static void scsih_remove(struct pci_dev *pdev)
9652 {
9653 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9654 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9655 	struct _sas_port *mpt3sas_port, *next_port;
9656 	struct _raid_device *raid_device, *next;
9657 	struct MPT3SAS_TARGET *sas_target_priv_data;
9658 	struct _pcie_device *pcie_device, *pcienext;
9659 	struct workqueue_struct	*wq;
9660 	unsigned long flags;
9661 
9662 	ioc->remove_host = 1;
9663 
9664 	mpt3sas_wait_for_commands_to_complete(ioc);
9665 	_scsih_flush_running_cmds(ioc);
9666 
9667 	_scsih_fw_event_cleanup_queue(ioc);
9668 
9669 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
9670 	wq = ioc->firmware_event_thread;
9671 	ioc->firmware_event_thread = NULL;
9672 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9673 	if (wq)
9674 		destroy_workqueue(wq);
9675 
9676 	/* release all the volumes */
9677 	_scsih_ir_shutdown(ioc);
9678 	sas_remove_host(shost);
9679 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
9680 	    list) {
9681 		if (raid_device->starget) {
9682 			sas_target_priv_data =
9683 			    raid_device->starget->hostdata;
9684 			sas_target_priv_data->deleted = 1;
9685 			scsi_remove_target(&raid_device->starget->dev);
9686 		}
9687 		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9688 			 raid_device->handle, (u64)raid_device->wwid);
9689 		_scsih_raid_device_remove(ioc, raid_device);
9690 	}
9691 	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
9692 		list) {
9693 		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9694 		list_del_init(&pcie_device->list);
9695 		pcie_device_put(pcie_device);
9696 	}
9697 
9698 	/* free ports attached to the sas_host */
9699 	list_for_each_entry_safe(mpt3sas_port, next_port,
9700 	   &ioc->sas_hba.sas_port_list, port_list) {
9701 		if (mpt3sas_port->remote_identify.device_type ==
9702 		    SAS_END_DEVICE)
9703 			mpt3sas_device_remove_by_sas_address(ioc,
9704 			    mpt3sas_port->remote_identify.sas_address);
9705 		else if (mpt3sas_port->remote_identify.device_type ==
9706 		    SAS_EDGE_EXPANDER_DEVICE ||
9707 		    mpt3sas_port->remote_identify.device_type ==
9708 		    SAS_FANOUT_EXPANDER_DEVICE)
9709 			mpt3sas_expander_remove(ioc,
9710 			    mpt3sas_port->remote_identify.sas_address);
9711 	}
9712 
9713 	/* free phys attached to the sas_host */
9714 	if (ioc->sas_hba.num_phys) {
9715 		kfree(ioc->sas_hba.phy);
9716 		ioc->sas_hba.phy = NULL;
9717 		ioc->sas_hba.num_phys = 0;
9718 	}
9719 
9720 	mpt3sas_base_detach(ioc);
9721 	spin_lock(&gioc_lock);
9722 	list_del(&ioc->list);
9723 	spin_unlock(&gioc_lock);
9724 	scsi_host_put(shost);
9725 }
9726 
9727 /**
9728  * scsih_shutdown - routine call during system shutdown
9729  * @pdev: PCI device struct
9730  */
9731 static void
9732 scsih_shutdown(struct pci_dev *pdev)
9733 {
9734 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
9735 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9736 	struct workqueue_struct	*wq;
9737 	unsigned long flags;
9738 
9739 	ioc->remove_host = 1;
9740 
9741 	mpt3sas_wait_for_commands_to_complete(ioc);
9742 	_scsih_flush_running_cmds(ioc);
9743 
9744 	_scsih_fw_event_cleanup_queue(ioc);
9745 
9746 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
9747 	wq = ioc->firmware_event_thread;
9748 	ioc->firmware_event_thread = NULL;
9749 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9750 	if (wq)
9751 		destroy_workqueue(wq);
9752 
9753 	_scsih_ir_shutdown(ioc);
9754 	mpt3sas_base_detach(ioc);
9755 }
9756 
9757 
9758 /**
9759  * _scsih_probe_boot_devices - reports 1st device
9760  * @ioc: per adapter object
9761  *
9762  * If specified in bios page 2, this routine reports the 1st
9763  * device scsi-ml or sas transport for persistent boot device
9764  * purposes.  Please refer to function _scsih_determine_boot_device()
9765  */
9766 static void
9767 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
9768 {
9769 	u32 channel;
9770 	void *device;
9771 	struct _sas_device *sas_device;
9772 	struct _raid_device *raid_device;
9773 	struct _pcie_device *pcie_device;
9774 	u16 handle;
9775 	u64 sas_address_parent;
9776 	u64 sas_address;
9777 	unsigned long flags;
9778 	int rc;
9779 	int tid;
9780 
9781 	 /* no Bios, return immediately */
9782 	if (!ioc->bios_pg3.BiosVersion)
9783 		return;
9784 
9785 	device = NULL;
9786 	if (ioc->req_boot_device.device) {
9787 		device =  ioc->req_boot_device.device;
9788 		channel = ioc->req_boot_device.channel;
9789 	} else if (ioc->req_alt_boot_device.device) {
9790 		device =  ioc->req_alt_boot_device.device;
9791 		channel = ioc->req_alt_boot_device.channel;
9792 	} else if (ioc->current_boot_device.device) {
9793 		device =  ioc->current_boot_device.device;
9794 		channel = ioc->current_boot_device.channel;
9795 	}
9796 
9797 	if (!device)
9798 		return;
9799 
9800 	if (channel == RAID_CHANNEL) {
9801 		raid_device = device;
9802 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9803 		    raid_device->id, 0);
9804 		if (rc)
9805 			_scsih_raid_device_remove(ioc, raid_device);
9806 	} else if (channel == PCIE_CHANNEL) {
9807 		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9808 		pcie_device = device;
9809 		tid = pcie_device->id;
9810 		list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
9811 		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9812 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
9813 		if (rc)
9814 			_scsih_pcie_device_remove(ioc, pcie_device);
9815 	} else {
9816 		spin_lock_irqsave(&ioc->sas_device_lock, flags);
9817 		sas_device = device;
9818 		handle = sas_device->handle;
9819 		sas_address_parent = sas_device->sas_address_parent;
9820 		sas_address = sas_device->sas_address;
9821 		list_move_tail(&sas_device->list, &ioc->sas_device_list);
9822 		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9823 
9824 		if (ioc->hide_drives)
9825 			return;
9826 		if (!mpt3sas_transport_port_add(ioc, handle,
9827 		    sas_address_parent)) {
9828 			_scsih_sas_device_remove(ioc, sas_device);
9829 		} else if (!sas_device->starget) {
9830 			if (!ioc->is_driver_loading) {
9831 				mpt3sas_transport_port_remove(ioc,
9832 				    sas_address,
9833 				    sas_address_parent);
9834 				_scsih_sas_device_remove(ioc, sas_device);
9835 			}
9836 		}
9837 	}
9838 }
9839 
9840 /**
9841  * _scsih_probe_raid - reporting raid volumes to scsi-ml
9842  * @ioc: per adapter object
9843  *
9844  * Called during initial loading of the driver.
9845  */
9846 static void
9847 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
9848 {
9849 	struct _raid_device *raid_device, *raid_next;
9850 	int rc;
9851 
9852 	list_for_each_entry_safe(raid_device, raid_next,
9853 	    &ioc->raid_device_list, list) {
9854 		if (raid_device->starget)
9855 			continue;
9856 		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9857 		    raid_device->id, 0);
9858 		if (rc)
9859 			_scsih_raid_device_remove(ioc, raid_device);
9860 	}
9861 }
9862 
9863 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
9864 {
9865 	struct _sas_device *sas_device = NULL;
9866 	unsigned long flags;
9867 
9868 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9869 	if (!list_empty(&ioc->sas_device_init_list)) {
9870 		sas_device = list_first_entry(&ioc->sas_device_init_list,
9871 				struct _sas_device, list);
9872 		sas_device_get(sas_device);
9873 	}
9874 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9875 
9876 	return sas_device;
9877 }
9878 
9879 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
9880 		struct _sas_device *sas_device)
9881 {
9882 	unsigned long flags;
9883 
9884 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9885 
9886 	/*
9887 	 * Since we dropped the lock during the call to port_add(), we need to
9888 	 * be careful here that somebody else didn't move or delete this item
9889 	 * while we were busy with other things.
9890 	 *
9891 	 * If it was on the list, we need a put() for the reference the list
9892 	 * had. Either way, we need a get() for the destination list.
9893 	 */
9894 	if (!list_empty(&sas_device->list)) {
9895 		list_del_init(&sas_device->list);
9896 		sas_device_put(sas_device);
9897 	}
9898 
9899 	sas_device_get(sas_device);
9900 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
9901 
9902 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9903 }
9904 
9905 /**
9906  * _scsih_probe_sas - reporting sas devices to sas transport
9907  * @ioc: per adapter object
9908  *
9909  * Called during initial loading of the driver.
9910  */
9911 static void
9912 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
9913 {
9914 	struct _sas_device *sas_device;
9915 
9916 	if (ioc->hide_drives)
9917 		return;
9918 
9919 	while ((sas_device = get_next_sas_device(ioc))) {
9920 		if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
9921 		    sas_device->sas_address_parent)) {
9922 			_scsih_sas_device_remove(ioc, sas_device);
9923 			sas_device_put(sas_device);
9924 			continue;
9925 		} else if (!sas_device->starget) {
9926 			/*
9927 			 * When asyn scanning is enabled, its not possible to
9928 			 * remove devices while scanning is turned on due to an
9929 			 * oops in scsi_sysfs_add_sdev()->add_device()->
9930 			 * sysfs_addrm_start()
9931 			 */
9932 			if (!ioc->is_driver_loading) {
9933 				mpt3sas_transport_port_remove(ioc,
9934 				    sas_device->sas_address,
9935 				    sas_device->sas_address_parent);
9936 				_scsih_sas_device_remove(ioc, sas_device);
9937 				sas_device_put(sas_device);
9938 				continue;
9939 			}
9940 		}
9941 		sas_device_make_active(ioc, sas_device);
9942 		sas_device_put(sas_device);
9943 	}
9944 }
9945 
9946 /**
9947  * get_next_pcie_device - Get the next pcie device
9948  * @ioc: per adapter object
9949  *
9950  * Get the next pcie device from pcie_device_init_list list.
9951  *
9952  * Return: pcie device structure if pcie_device_init_list list is not empty
9953  * otherwise returns NULL
9954  */
9955 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
9956 {
9957 	struct _pcie_device *pcie_device = NULL;
9958 	unsigned long flags;
9959 
9960 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9961 	if (!list_empty(&ioc->pcie_device_init_list)) {
9962 		pcie_device = list_first_entry(&ioc->pcie_device_init_list,
9963 				struct _pcie_device, list);
9964 		pcie_device_get(pcie_device);
9965 	}
9966 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9967 
9968 	return pcie_device;
9969 }
9970 
9971 /**
9972  * pcie_device_make_active - Add pcie device to pcie_device_list list
9973  * @ioc: per adapter object
9974  * @pcie_device: pcie device object
9975  *
9976  * Add the pcie device which has registered with SCSI Transport Later to
9977  * pcie_device_list list
9978  */
9979 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
9980 		struct _pcie_device *pcie_device)
9981 {
9982 	unsigned long flags;
9983 
9984 	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9985 
9986 	if (!list_empty(&pcie_device->list)) {
9987 		list_del_init(&pcie_device->list);
9988 		pcie_device_put(pcie_device);
9989 	}
9990 	pcie_device_get(pcie_device);
9991 	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
9992 
9993 	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9994 }
9995 
9996 /**
9997  * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
9998  * @ioc: per adapter object
9999  *
10000  * Called during initial loading of the driver.
10001  */
10002 static void
10003 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10004 {
10005 	struct _pcie_device *pcie_device;
10006 	int rc;
10007 
10008 	/* PCIe Device List */
10009 	while ((pcie_device = get_next_pcie_device(ioc))) {
10010 		if (pcie_device->starget) {
10011 			pcie_device_put(pcie_device);
10012 			continue;
10013 		}
10014 		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10015 			pcie_device->id, 0);
10016 		if (rc) {
10017 			_scsih_pcie_device_remove(ioc, pcie_device);
10018 			pcie_device_put(pcie_device);
10019 			continue;
10020 		} else if (!pcie_device->starget) {
10021 			/*
10022 			 * When async scanning is enabled, its not possible to
10023 			 * remove devices while scanning is turned on due to an
10024 			 * oops in scsi_sysfs_add_sdev()->add_device()->
10025 			 * sysfs_addrm_start()
10026 			 */
10027 			if (!ioc->is_driver_loading) {
10028 			/* TODO-- Need to find out whether this condition will
10029 			 * occur or not
10030 			 */
10031 				_scsih_pcie_device_remove(ioc, pcie_device);
10032 				pcie_device_put(pcie_device);
10033 				continue;
10034 			}
10035 		}
10036 		pcie_device_make_active(ioc, pcie_device);
10037 		pcie_device_put(pcie_device);
10038 	}
10039 }
10040 
10041 /**
10042  * _scsih_probe_devices - probing for devices
10043  * @ioc: per adapter object
10044  *
10045  * Called during initial loading of the driver.
10046  */
10047 static void
10048 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
10049 {
10050 	u16 volume_mapping_flags;
10051 
10052 	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
10053 		return;  /* return when IOC doesn't support initiator mode */
10054 
10055 	_scsih_probe_boot_devices(ioc);
10056 
10057 	if (ioc->ir_firmware) {
10058 		volume_mapping_flags =
10059 		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
10060 		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
10061 		if (volume_mapping_flags ==
10062 		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
10063 			_scsih_probe_raid(ioc);
10064 			_scsih_probe_sas(ioc);
10065 		} else {
10066 			_scsih_probe_sas(ioc);
10067 			_scsih_probe_raid(ioc);
10068 		}
10069 	} else {
10070 		_scsih_probe_sas(ioc);
10071 		_scsih_probe_pcie(ioc);
10072 	}
10073 }
10074 
10075 /**
10076  * scsih_scan_start - scsi lld callback for .scan_start
10077  * @shost: SCSI host pointer
10078  *
10079  * The shost has the ability to discover targets on its own instead
10080  * of scanning the entire bus.  In our implemention, we will kick off
10081  * firmware discovery.
10082  */
10083 static void
10084 scsih_scan_start(struct Scsi_Host *shost)
10085 {
10086 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10087 	int rc;
10088 	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
10089 		mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
10090 
10091 	if (disable_discovery > 0)
10092 		return;
10093 
10094 	ioc->start_scan = 1;
10095 	rc = mpt3sas_port_enable(ioc);
10096 
10097 	if (rc != 0)
10098 		ioc_info(ioc, "port enable: FAILED\n");
10099 }
10100 
10101 /**
10102  * scsih_scan_finished - scsi lld callback for .scan_finished
10103  * @shost: SCSI host pointer
10104  * @time: elapsed time of the scan in jiffies
10105  *
10106  * This function will be called periodicallyn until it returns 1 with the
10107  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
10108  * we wait for firmware discovery to complete, then return 1.
10109  */
10110 static int
10111 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10112 {
10113 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10114 
10115 	if (disable_discovery > 0) {
10116 		ioc->is_driver_loading = 0;
10117 		ioc->wait_for_discovery_to_complete = 0;
10118 		return 1;
10119 	}
10120 
10121 	if (time >= (300 * HZ)) {
10122 		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10123 		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
10124 		ioc->is_driver_loading = 0;
10125 		return 1;
10126 	}
10127 
10128 	if (ioc->start_scan)
10129 		return 0;
10130 
10131 	if (ioc->start_scan_failed) {
10132 		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
10133 			 ioc->start_scan_failed);
10134 		ioc->is_driver_loading = 0;
10135 		ioc->wait_for_discovery_to_complete = 0;
10136 		ioc->remove_host = 1;
10137 		return 1;
10138 	}
10139 
10140 	ioc_info(ioc, "port enable: SUCCESS\n");
10141 	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10142 
10143 	if (ioc->wait_for_discovery_to_complete) {
10144 		ioc->wait_for_discovery_to_complete = 0;
10145 		_scsih_probe_devices(ioc);
10146 	}
10147 	mpt3sas_base_start_watchdog(ioc);
10148 	ioc->is_driver_loading = 0;
10149 	return 1;
10150 }
10151 
10152 /* shost template for SAS 2.0 HBA devices */
10153 static struct scsi_host_template mpt2sas_driver_template = {
10154 	.module				= THIS_MODULE,
10155 	.name				= "Fusion MPT SAS Host",
10156 	.proc_name			= MPT2SAS_DRIVER_NAME,
10157 	.queuecommand			= scsih_qcmd,
10158 	.target_alloc			= scsih_target_alloc,
10159 	.slave_alloc			= scsih_slave_alloc,
10160 	.slave_configure		= scsih_slave_configure,
10161 	.target_destroy			= scsih_target_destroy,
10162 	.slave_destroy			= scsih_slave_destroy,
10163 	.scan_finished			= scsih_scan_finished,
10164 	.scan_start			= scsih_scan_start,
10165 	.change_queue_depth		= scsih_change_queue_depth,
10166 	.eh_abort_handler		= scsih_abort,
10167 	.eh_device_reset_handler	= scsih_dev_reset,
10168 	.eh_target_reset_handler	= scsih_target_reset,
10169 	.eh_host_reset_handler		= scsih_host_reset,
10170 	.bios_param			= scsih_bios_param,
10171 	.can_queue			= 1,
10172 	.this_id			= -1,
10173 	.sg_tablesize			= MPT2SAS_SG_DEPTH,
10174 	.max_sectors			= 32767,
10175 	.cmd_per_lun			= 7,
10176 	.shost_attrs			= mpt3sas_host_attrs,
10177 	.sdev_attrs			= mpt3sas_dev_attrs,
10178 	.track_queue_depth		= 1,
10179 	.cmd_size			= sizeof(struct scsiio_tracker),
10180 };
10181 
10182 /* raid transport support for SAS 2.0 HBA devices */
10183 static struct raid_function_template mpt2sas_raid_functions = {
10184 	.cookie		= &mpt2sas_driver_template,
10185 	.is_raid	= scsih_is_raid,
10186 	.get_resync	= scsih_get_resync,
10187 	.get_state	= scsih_get_state,
10188 };
10189 
10190 /* shost template for SAS 3.0 HBA devices */
10191 static struct scsi_host_template mpt3sas_driver_template = {
10192 	.module				= THIS_MODULE,
10193 	.name				= "Fusion MPT SAS Host",
10194 	.proc_name			= MPT3SAS_DRIVER_NAME,
10195 	.queuecommand			= scsih_qcmd,
10196 	.target_alloc			= scsih_target_alloc,
10197 	.slave_alloc			= scsih_slave_alloc,
10198 	.slave_configure		= scsih_slave_configure,
10199 	.target_destroy			= scsih_target_destroy,
10200 	.slave_destroy			= scsih_slave_destroy,
10201 	.scan_finished			= scsih_scan_finished,
10202 	.scan_start			= scsih_scan_start,
10203 	.change_queue_depth		= scsih_change_queue_depth,
10204 	.eh_abort_handler		= scsih_abort,
10205 	.eh_device_reset_handler	= scsih_dev_reset,
10206 	.eh_target_reset_handler	= scsih_target_reset,
10207 	.eh_host_reset_handler		= scsih_host_reset,
10208 	.bios_param			= scsih_bios_param,
10209 	.can_queue			= 1,
10210 	.this_id			= -1,
10211 	.sg_tablesize			= MPT3SAS_SG_DEPTH,
10212 	.max_sectors			= 32767,
10213 	.cmd_per_lun			= 7,
10214 	.shost_attrs			= mpt3sas_host_attrs,
10215 	.sdev_attrs			= mpt3sas_dev_attrs,
10216 	.track_queue_depth		= 1,
10217 	.cmd_size			= sizeof(struct scsiio_tracker),
10218 };
10219 
10220 /* raid transport support for SAS 3.0 HBA devices */
10221 static struct raid_function_template mpt3sas_raid_functions = {
10222 	.cookie		= &mpt3sas_driver_template,
10223 	.is_raid	= scsih_is_raid,
10224 	.get_resync	= scsih_get_resync,
10225 	.get_state	= scsih_get_state,
10226 };
10227 
10228 /**
10229  * _scsih_determine_hba_mpi_version - determine in which MPI version class
10230  *					this device belongs to.
10231  * @pdev: PCI device struct
10232  *
10233  * return MPI2_VERSION for SAS 2.0 HBA devices,
10234  *	MPI25_VERSION for SAS 3.0 HBA devices, and
10235  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
10236  */
10237 static u16
10238 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
10239 {
10240 
10241 	switch (pdev->device) {
10242 	case MPI2_MFGPAGE_DEVID_SSS6200:
10243 	case MPI2_MFGPAGE_DEVID_SAS2004:
10244 	case MPI2_MFGPAGE_DEVID_SAS2008:
10245 	case MPI2_MFGPAGE_DEVID_SAS2108_1:
10246 	case MPI2_MFGPAGE_DEVID_SAS2108_2:
10247 	case MPI2_MFGPAGE_DEVID_SAS2108_3:
10248 	case MPI2_MFGPAGE_DEVID_SAS2116_1:
10249 	case MPI2_MFGPAGE_DEVID_SAS2116_2:
10250 	case MPI2_MFGPAGE_DEVID_SAS2208_1:
10251 	case MPI2_MFGPAGE_DEVID_SAS2208_2:
10252 	case MPI2_MFGPAGE_DEVID_SAS2208_3:
10253 	case MPI2_MFGPAGE_DEVID_SAS2208_4:
10254 	case MPI2_MFGPAGE_DEVID_SAS2208_5:
10255 	case MPI2_MFGPAGE_DEVID_SAS2208_6:
10256 	case MPI2_MFGPAGE_DEVID_SAS2308_1:
10257 	case MPI2_MFGPAGE_DEVID_SAS2308_2:
10258 	case MPI2_MFGPAGE_DEVID_SAS2308_3:
10259 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10260 	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10261 		return MPI2_VERSION;
10262 	case MPI25_MFGPAGE_DEVID_SAS3004:
10263 	case MPI25_MFGPAGE_DEVID_SAS3008:
10264 	case MPI25_MFGPAGE_DEVID_SAS3108_1:
10265 	case MPI25_MFGPAGE_DEVID_SAS3108_2:
10266 	case MPI25_MFGPAGE_DEVID_SAS3108_5:
10267 	case MPI25_MFGPAGE_DEVID_SAS3108_6:
10268 		return MPI25_VERSION;
10269 	case MPI26_MFGPAGE_DEVID_SAS3216:
10270 	case MPI26_MFGPAGE_DEVID_SAS3224:
10271 	case MPI26_MFGPAGE_DEVID_SAS3316_1:
10272 	case MPI26_MFGPAGE_DEVID_SAS3316_2:
10273 	case MPI26_MFGPAGE_DEVID_SAS3316_3:
10274 	case MPI26_MFGPAGE_DEVID_SAS3316_4:
10275 	case MPI26_MFGPAGE_DEVID_SAS3324_1:
10276 	case MPI26_MFGPAGE_DEVID_SAS3324_2:
10277 	case MPI26_MFGPAGE_DEVID_SAS3324_3:
10278 	case MPI26_MFGPAGE_DEVID_SAS3324_4:
10279 	case MPI26_MFGPAGE_DEVID_SAS3508:
10280 	case MPI26_MFGPAGE_DEVID_SAS3508_1:
10281 	case MPI26_MFGPAGE_DEVID_SAS3408:
10282 	case MPI26_MFGPAGE_DEVID_SAS3516:
10283 	case MPI26_MFGPAGE_DEVID_SAS3516_1:
10284 	case MPI26_MFGPAGE_DEVID_SAS3416:
10285 	case MPI26_MFGPAGE_DEVID_SAS3616:
10286 	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10287 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10288 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10289 	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10290 	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10291 		return MPI26_VERSION;
10292 	}
10293 	return 0;
10294 }
10295 
10296 /**
10297  * _scsih_probe - attach and add scsi host
10298  * @pdev: PCI device struct
10299  * @id: pci device id
10300  *
10301  * Return: 0 success, anything else error.
10302  */
10303 static int
10304 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10305 {
10306 	struct MPT3SAS_ADAPTER *ioc;
10307 	struct Scsi_Host *shost = NULL;
10308 	int rv;
10309 	u16 hba_mpi_version;
10310 
10311 	/* Determine in which MPI version class this pci device belongs */
10312 	hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
10313 	if (hba_mpi_version == 0)
10314 		return -ENODEV;
10315 
10316 	/* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
10317 	 * for other generation HBA's return with -ENODEV
10318 	 */
10319 	if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
10320 		return -ENODEV;
10321 
10322 	/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
10323 	 * for other generation HBA's return with -ENODEV
10324 	 */
10325 	if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
10326 		|| hba_mpi_version ==  MPI26_VERSION)))
10327 		return -ENODEV;
10328 
10329 	switch (hba_mpi_version) {
10330 	case MPI2_VERSION:
10331 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
10332 			PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
10333 		/* Use mpt2sas driver host template for SAS 2.0 HBA's */
10334 		shost = scsi_host_alloc(&mpt2sas_driver_template,
10335 		  sizeof(struct MPT3SAS_ADAPTER));
10336 		if (!shost)
10337 			return -ENODEV;
10338 		ioc = shost_priv(shost);
10339 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10340 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10341 		ioc->id = mpt2_ids++;
10342 		sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
10343 		switch (pdev->device) {
10344 		case MPI2_MFGPAGE_DEVID_SSS6200:
10345 			ioc->is_warpdrive = 1;
10346 			ioc->hide_ir_msg = 1;
10347 			break;
10348 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10349 		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10350 			ioc->is_mcpu_endpoint = 1;
10351 			break;
10352 		default:
10353 			ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
10354 			break;
10355 		}
10356 		break;
10357 	case MPI25_VERSION:
10358 	case MPI26_VERSION:
10359 		/* Use mpt3sas driver host template for SAS 3.0 HBA's */
10360 		shost = scsi_host_alloc(&mpt3sas_driver_template,
10361 		  sizeof(struct MPT3SAS_ADAPTER));
10362 		if (!shost)
10363 			return -ENODEV;
10364 		ioc = shost_priv(shost);
10365 		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10366 		ioc->hba_mpi_version_belonged = hba_mpi_version;
10367 		ioc->id = mpt3_ids++;
10368 		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
10369 		switch (pdev->device) {
10370 		case MPI26_MFGPAGE_DEVID_SAS3508:
10371 		case MPI26_MFGPAGE_DEVID_SAS3508_1:
10372 		case MPI26_MFGPAGE_DEVID_SAS3408:
10373 		case MPI26_MFGPAGE_DEVID_SAS3516:
10374 		case MPI26_MFGPAGE_DEVID_SAS3516_1:
10375 		case MPI26_MFGPAGE_DEVID_SAS3416:
10376 		case MPI26_MFGPAGE_DEVID_SAS3616:
10377 		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10378 			ioc->is_gen35_ioc = 1;
10379 			break;
10380 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10381 		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10382 			dev_info(&pdev->dev,
10383 			    "HBA is in Configurable Secure mode\n");
10384 			/* fall through */
10385 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10386 		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10387 			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
10388 			break;
10389 		default:
10390 			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
10391 		}
10392 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
10393 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
10394 			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
10395 			ioc->combined_reply_queue = 1;
10396 			if (ioc->is_gen35_ioc)
10397 				ioc->combined_reply_index_count =
10398 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
10399 			else
10400 				ioc->combined_reply_index_count =
10401 				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
10402 		}
10403 		break;
10404 	default:
10405 		return -ENODEV;
10406 	}
10407 
10408 	INIT_LIST_HEAD(&ioc->list);
10409 	spin_lock(&gioc_lock);
10410 	list_add_tail(&ioc->list, &mpt3sas_ioc_list);
10411 	spin_unlock(&gioc_lock);
10412 	ioc->shost = shost;
10413 	ioc->pdev = pdev;
10414 	ioc->scsi_io_cb_idx = scsi_io_cb_idx;
10415 	ioc->tm_cb_idx = tm_cb_idx;
10416 	ioc->ctl_cb_idx = ctl_cb_idx;
10417 	ioc->base_cb_idx = base_cb_idx;
10418 	ioc->port_enable_cb_idx = port_enable_cb_idx;
10419 	ioc->transport_cb_idx = transport_cb_idx;
10420 	ioc->scsih_cb_idx = scsih_cb_idx;
10421 	ioc->config_cb_idx = config_cb_idx;
10422 	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
10423 	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
10424 	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
10425 	ioc->logging_level = logging_level;
10426 	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
10427 	/* misc semaphores and spin locks */
10428 	mutex_init(&ioc->reset_in_progress_mutex);
10429 	/* initializing pci_access_mutex lock */
10430 	mutex_init(&ioc->pci_access_mutex);
10431 	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
10432 	spin_lock_init(&ioc->scsi_lookup_lock);
10433 	spin_lock_init(&ioc->sas_device_lock);
10434 	spin_lock_init(&ioc->sas_node_lock);
10435 	spin_lock_init(&ioc->fw_event_lock);
10436 	spin_lock_init(&ioc->raid_device_lock);
10437 	spin_lock_init(&ioc->pcie_device_lock);
10438 	spin_lock_init(&ioc->diag_trigger_lock);
10439 
10440 	INIT_LIST_HEAD(&ioc->sas_device_list);
10441 	INIT_LIST_HEAD(&ioc->sas_device_init_list);
10442 	INIT_LIST_HEAD(&ioc->sas_expander_list);
10443 	INIT_LIST_HEAD(&ioc->enclosure_list);
10444 	INIT_LIST_HEAD(&ioc->pcie_device_list);
10445 	INIT_LIST_HEAD(&ioc->pcie_device_init_list);
10446 	INIT_LIST_HEAD(&ioc->fw_event_list);
10447 	INIT_LIST_HEAD(&ioc->raid_device_list);
10448 	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
10449 	INIT_LIST_HEAD(&ioc->delayed_tr_list);
10450 	INIT_LIST_HEAD(&ioc->delayed_sc_list);
10451 	INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
10452 	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
10453 	INIT_LIST_HEAD(&ioc->reply_queue_list);
10454 
10455 	sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
10456 
10457 	/* init shost parameters */
10458 	shost->max_cmd_len = 32;
10459 	shost->max_lun = max_lun;
10460 	shost->transportt = mpt3sas_transport_template;
10461 	shost->unique_id = ioc->id;
10462 
10463 	if (ioc->is_mcpu_endpoint) {
10464 		/* mCPU MPI support 64K max IO */
10465 		shost->max_sectors = 128;
10466 		ioc_info(ioc, "The max_sectors value is set to %d\n",
10467 			 shost->max_sectors);
10468 	} else {
10469 		if (max_sectors != 0xFFFF) {
10470 			if (max_sectors < 64) {
10471 				shost->max_sectors = 64;
10472 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
10473 					 max_sectors);
10474 			} else if (max_sectors > 32767) {
10475 				shost->max_sectors = 32767;
10476 				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
10477 					 max_sectors);
10478 			} else {
10479 				shost->max_sectors = max_sectors & 0xFFFE;
10480 				ioc_info(ioc, "The max_sectors value is set to %d\n",
10481 					 shost->max_sectors);
10482 			}
10483 		}
10484 	}
10485 	/* register EEDP capabilities with SCSI layer */
10486 	if (prot_mask > 0)
10487 		scsi_host_set_prot(shost, prot_mask);
10488 	else
10489 		scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
10490 				   | SHOST_DIF_TYPE2_PROTECTION
10491 				   | SHOST_DIF_TYPE3_PROTECTION);
10492 
10493 	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
10494 
10495 	/* event thread */
10496 	snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
10497 	    "fw_event_%s%d", ioc->driver_name, ioc->id);
10498 	ioc->firmware_event_thread = alloc_ordered_workqueue(
10499 	    ioc->firmware_event_name, 0);
10500 	if (!ioc->firmware_event_thread) {
10501 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10502 			__FILE__, __LINE__, __func__);
10503 		rv = -ENODEV;
10504 		goto out_thread_fail;
10505 	}
10506 
10507 	ioc->is_driver_loading = 1;
10508 	if ((mpt3sas_base_attach(ioc))) {
10509 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10510 			__FILE__, __LINE__, __func__);
10511 		rv = -ENODEV;
10512 		goto out_attach_fail;
10513 	}
10514 
10515 	if (ioc->is_warpdrive) {
10516 		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
10517 			ioc->hide_drives = 0;
10518 		else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
10519 			ioc->hide_drives = 1;
10520 		else {
10521 			if (mpt3sas_get_num_volumes(ioc))
10522 				ioc->hide_drives = 1;
10523 			else
10524 				ioc->hide_drives = 0;
10525 		}
10526 	} else
10527 		ioc->hide_drives = 0;
10528 
10529 	rv = scsi_add_host(shost, &pdev->dev);
10530 	if (rv) {
10531 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10532 			__FILE__, __LINE__, __func__);
10533 		goto out_add_shost_fail;
10534 	}
10535 
10536 	scsi_scan_host(shost);
10537 	return 0;
10538 out_add_shost_fail:
10539 	mpt3sas_base_detach(ioc);
10540  out_attach_fail:
10541 	destroy_workqueue(ioc->firmware_event_thread);
10542  out_thread_fail:
10543 	spin_lock(&gioc_lock);
10544 	list_del(&ioc->list);
10545 	spin_unlock(&gioc_lock);
10546 	scsi_host_put(shost);
10547 	return rv;
10548 }
10549 
10550 #ifdef CONFIG_PM
10551 /**
10552  * scsih_suspend - power management suspend main entry point
10553  * @pdev: PCI device struct
10554  * @state: PM state change to (usually PCI_D3)
10555  *
10556  * Return: 0 success, anything else error.
10557  */
10558 static int
10559 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
10560 {
10561 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10562 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10563 	pci_power_t device_state;
10564 
10565 	mpt3sas_base_stop_watchdog(ioc);
10566 	flush_scheduled_work();
10567 	scsi_block_requests(shost);
10568 	device_state = pci_choose_state(pdev, state);
10569 	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
10570 		 pdev, pci_name(pdev), device_state);
10571 
10572 	pci_save_state(pdev);
10573 	mpt3sas_base_free_resources(ioc);
10574 	pci_set_power_state(pdev, device_state);
10575 	return 0;
10576 }
10577 
10578 /**
10579  * scsih_resume - power management resume main entry point
10580  * @pdev: PCI device struct
10581  *
10582  * Return: 0 success, anything else error.
10583  */
10584 static int
10585 scsih_resume(struct pci_dev *pdev)
10586 {
10587 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10588 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10589 	pci_power_t device_state = pdev->current_state;
10590 	int r;
10591 
10592 	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
10593 		 pdev, pci_name(pdev), device_state);
10594 
10595 	pci_set_power_state(pdev, PCI_D0);
10596 	pci_enable_wake(pdev, PCI_D0, 0);
10597 	pci_restore_state(pdev);
10598 	ioc->pdev = pdev;
10599 	r = mpt3sas_base_map_resources(ioc);
10600 	if (r)
10601 		return r;
10602 
10603 	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
10604 	scsi_unblock_requests(shost);
10605 	mpt3sas_base_start_watchdog(ioc);
10606 	return 0;
10607 }
10608 #endif /* CONFIG_PM */
10609 
10610 /**
10611  * scsih_pci_error_detected - Called when a PCI error is detected.
10612  * @pdev: PCI device struct
10613  * @state: PCI channel state
10614  *
10615  * Description: Called when a PCI error is detected.
10616  *
10617  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
10618  */
10619 static pci_ers_result_t
10620 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10621 {
10622 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10623 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10624 
10625 	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
10626 
10627 	switch (state) {
10628 	case pci_channel_io_normal:
10629 		return PCI_ERS_RESULT_CAN_RECOVER;
10630 	case pci_channel_io_frozen:
10631 		/* Fatal error, prepare for slot reset */
10632 		ioc->pci_error_recovery = 1;
10633 		scsi_block_requests(ioc->shost);
10634 		mpt3sas_base_stop_watchdog(ioc);
10635 		mpt3sas_base_free_resources(ioc);
10636 		return PCI_ERS_RESULT_NEED_RESET;
10637 	case pci_channel_io_perm_failure:
10638 		/* Permanent error, prepare for device removal */
10639 		ioc->pci_error_recovery = 1;
10640 		mpt3sas_base_stop_watchdog(ioc);
10641 		_scsih_flush_running_cmds(ioc);
10642 		return PCI_ERS_RESULT_DISCONNECT;
10643 	}
10644 	return PCI_ERS_RESULT_NEED_RESET;
10645 }
10646 
10647 /**
10648  * scsih_pci_slot_reset - Called when PCI slot has been reset.
10649  * @pdev: PCI device struct
10650  *
10651  * Description: This routine is called by the pci error recovery
10652  * code after the PCI slot has been reset, just before we
10653  * should resume normal operations.
10654  */
10655 static pci_ers_result_t
10656 scsih_pci_slot_reset(struct pci_dev *pdev)
10657 {
10658 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10659 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10660 	int rc;
10661 
10662 	ioc_info(ioc, "PCI error: slot reset callback!!\n");
10663 
10664 	ioc->pci_error_recovery = 0;
10665 	ioc->pdev = pdev;
10666 	pci_restore_state(pdev);
10667 	rc = mpt3sas_base_map_resources(ioc);
10668 	if (rc)
10669 		return PCI_ERS_RESULT_DISCONNECT;
10670 
10671 	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
10672 
10673 	ioc_warn(ioc, "hard reset: %s\n",
10674 		 (rc == 0) ? "success" : "failed");
10675 
10676 	if (!rc)
10677 		return PCI_ERS_RESULT_RECOVERED;
10678 	else
10679 		return PCI_ERS_RESULT_DISCONNECT;
10680 }
10681 
10682 /**
10683  * scsih_pci_resume() - resume normal ops after PCI reset
10684  * @pdev: pointer to PCI device
10685  *
10686  * Called when the error recovery driver tells us that its
10687  * OK to resume normal operation. Use completion to allow
10688  * halted scsi ops to resume.
10689  */
10690 static void
10691 scsih_pci_resume(struct pci_dev *pdev)
10692 {
10693 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10694 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10695 
10696 	ioc_info(ioc, "PCI error: resume callback!!\n");
10697 
10698 	mpt3sas_base_start_watchdog(ioc);
10699 	scsi_unblock_requests(ioc->shost);
10700 }
10701 
10702 /**
10703  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
10704  * @pdev: pointer to PCI device
10705  */
10706 static pci_ers_result_t
10707 scsih_pci_mmio_enabled(struct pci_dev *pdev)
10708 {
10709 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
10710 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10711 
10712 	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
10713 
10714 	/* TODO - dump whatever for debugging purposes */
10715 
10716 	/* This called only if scsih_pci_error_detected returns
10717 	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
10718 	 * works, no need to reset slot.
10719 	 */
10720 	return PCI_ERS_RESULT_RECOVERED;
10721 }
10722 
10723 /**
10724  * scsih__ncq_prio_supp - Check for NCQ command priority support
10725  * @sdev: scsi device struct
10726  *
10727  * This is called when a user indicates they would like to enable
10728  * ncq command priorities. This works only on SATA devices.
10729  */
10730 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
10731 {
10732 	unsigned char *buf;
10733 	bool ncq_prio_supp = false;
10734 
10735 	if (!scsi_device_supports_vpd(sdev))
10736 		return ncq_prio_supp;
10737 
10738 	buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
10739 	if (!buf)
10740 		return ncq_prio_supp;
10741 
10742 	if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
10743 		ncq_prio_supp = (buf[213] >> 4) & 1;
10744 
10745 	kfree(buf);
10746 	return ncq_prio_supp;
10747 }
10748 /*
10749  * The pci device ids are defined in mpi/mpi2_cnfg.h.
10750  */
10751 static const struct pci_device_id mpt3sas_pci_table[] = {
10752 	/* Spitfire ~ 2004 */
10753 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
10754 		PCI_ANY_ID, PCI_ANY_ID },
10755 	/* Falcon ~ 2008 */
10756 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
10757 		PCI_ANY_ID, PCI_ANY_ID },
10758 	/* Liberator ~ 2108 */
10759 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
10760 		PCI_ANY_ID, PCI_ANY_ID },
10761 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
10762 		PCI_ANY_ID, PCI_ANY_ID },
10763 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
10764 		PCI_ANY_ID, PCI_ANY_ID },
10765 	/* Meteor ~ 2116 */
10766 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
10767 		PCI_ANY_ID, PCI_ANY_ID },
10768 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
10769 		PCI_ANY_ID, PCI_ANY_ID },
10770 	/* Thunderbolt ~ 2208 */
10771 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
10772 		PCI_ANY_ID, PCI_ANY_ID },
10773 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
10774 		PCI_ANY_ID, PCI_ANY_ID },
10775 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
10776 		PCI_ANY_ID, PCI_ANY_ID },
10777 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
10778 		PCI_ANY_ID, PCI_ANY_ID },
10779 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
10780 		PCI_ANY_ID, PCI_ANY_ID },
10781 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
10782 		PCI_ANY_ID, PCI_ANY_ID },
10783 	/* Mustang ~ 2308 */
10784 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
10785 		PCI_ANY_ID, PCI_ANY_ID },
10786 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
10787 		PCI_ANY_ID, PCI_ANY_ID },
10788 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
10789 		PCI_ANY_ID, PCI_ANY_ID },
10790 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
10791 		PCI_ANY_ID, PCI_ANY_ID },
10792 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
10793 		PCI_ANY_ID, PCI_ANY_ID },
10794 	/* SSS6200 */
10795 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
10796 		PCI_ANY_ID, PCI_ANY_ID },
10797 	/* Fury ~ 3004 and 3008 */
10798 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
10799 		PCI_ANY_ID, PCI_ANY_ID },
10800 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
10801 		PCI_ANY_ID, PCI_ANY_ID },
10802 	/* Invader ~ 3108 */
10803 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
10804 		PCI_ANY_ID, PCI_ANY_ID },
10805 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
10806 		PCI_ANY_ID, PCI_ANY_ID },
10807 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
10808 		PCI_ANY_ID, PCI_ANY_ID },
10809 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
10810 		PCI_ANY_ID, PCI_ANY_ID },
10811 	/* Cutlass ~ 3216 and 3224 */
10812 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
10813 		PCI_ANY_ID, PCI_ANY_ID },
10814 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
10815 		PCI_ANY_ID, PCI_ANY_ID },
10816 	/* Intruder ~ 3316 and 3324 */
10817 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
10818 		PCI_ANY_ID, PCI_ANY_ID },
10819 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
10820 		PCI_ANY_ID, PCI_ANY_ID },
10821 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
10822 		PCI_ANY_ID, PCI_ANY_ID },
10823 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
10824 		PCI_ANY_ID, PCI_ANY_ID },
10825 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
10826 		PCI_ANY_ID, PCI_ANY_ID },
10827 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
10828 		PCI_ANY_ID, PCI_ANY_ID },
10829 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
10830 		PCI_ANY_ID, PCI_ANY_ID },
10831 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
10832 		PCI_ANY_ID, PCI_ANY_ID },
10833 	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
10834 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
10835 		PCI_ANY_ID, PCI_ANY_ID },
10836 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
10837 		PCI_ANY_ID, PCI_ANY_ID },
10838 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
10839 		PCI_ANY_ID, PCI_ANY_ID },
10840 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
10841 		PCI_ANY_ID, PCI_ANY_ID },
10842 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
10843 		PCI_ANY_ID, PCI_ANY_ID },
10844 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
10845 		PCI_ANY_ID, PCI_ANY_ID },
10846 	/* Mercator ~ 3616*/
10847 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
10848 		PCI_ANY_ID, PCI_ANY_ID },
10849 
10850 	/* Aero SI 0x00E1 Configurable Secure
10851 	 * 0x00E2 Hard Secure
10852 	 */
10853 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
10854 		PCI_ANY_ID, PCI_ANY_ID },
10855 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
10856 		PCI_ANY_ID, PCI_ANY_ID },
10857 
10858 	/* Atlas PCIe Switch Management Port */
10859 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
10860 		PCI_ANY_ID, PCI_ANY_ID },
10861 
10862 	/* Sea SI 0x00E5 Configurable Secure
10863 	 * 0x00E6 Hard Secure
10864 	 */
10865 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
10866 		PCI_ANY_ID, PCI_ANY_ID },
10867 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
10868 		PCI_ANY_ID, PCI_ANY_ID },
10869 
10870 	{0}     /* Terminating entry */
10871 };
10872 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
10873 
10874 static struct pci_error_handlers _mpt3sas_err_handler = {
10875 	.error_detected	= scsih_pci_error_detected,
10876 	.mmio_enabled	= scsih_pci_mmio_enabled,
10877 	.slot_reset	= scsih_pci_slot_reset,
10878 	.resume		= scsih_pci_resume,
10879 };
10880 
10881 static struct pci_driver mpt3sas_driver = {
10882 	.name		= MPT3SAS_DRIVER_NAME,
10883 	.id_table	= mpt3sas_pci_table,
10884 	.probe		= _scsih_probe,
10885 	.remove		= scsih_remove,
10886 	.shutdown	= scsih_shutdown,
10887 	.err_handler	= &_mpt3sas_err_handler,
10888 #ifdef CONFIG_PM
10889 	.suspend	= scsih_suspend,
10890 	.resume		= scsih_resume,
10891 #endif
10892 };
10893 
10894 /**
10895  * scsih_init - main entry point for this driver.
10896  *
10897  * Return: 0 success, anything else error.
10898  */
10899 static int
10900 scsih_init(void)
10901 {
10902 	mpt2_ids = 0;
10903 	mpt3_ids = 0;
10904 
10905 	mpt3sas_base_initialize_callback_handler();
10906 
10907 	 /* queuecommand callback hander */
10908 	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
10909 
10910 	/* task management callback handler */
10911 	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
10912 
10913 	/* base internal commands callback handler */
10914 	base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
10915 	port_enable_cb_idx = mpt3sas_base_register_callback_handler(
10916 	    mpt3sas_port_enable_done);
10917 
10918 	/* transport internal commands callback handler */
10919 	transport_cb_idx = mpt3sas_base_register_callback_handler(
10920 	    mpt3sas_transport_done);
10921 
10922 	/* scsih internal commands callback handler */
10923 	scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
10924 
10925 	/* configuration page API internal commands callback handler */
10926 	config_cb_idx = mpt3sas_base_register_callback_handler(
10927 	    mpt3sas_config_done);
10928 
10929 	/* ctl module callback handler */
10930 	ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
10931 
10932 	tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
10933 	    _scsih_tm_tr_complete);
10934 
10935 	tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
10936 	    _scsih_tm_volume_tr_complete);
10937 
10938 	tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
10939 	    _scsih_sas_control_complete);
10940 
10941 	return 0;
10942 }
10943 
10944 /**
10945  * scsih_exit - exit point for this driver (when it is a module).
10946  *
10947  * Return: 0 success, anything else error.
10948  */
10949 static void
10950 scsih_exit(void)
10951 {
10952 
10953 	mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
10954 	mpt3sas_base_release_callback_handler(tm_cb_idx);
10955 	mpt3sas_base_release_callback_handler(base_cb_idx);
10956 	mpt3sas_base_release_callback_handler(port_enable_cb_idx);
10957 	mpt3sas_base_release_callback_handler(transport_cb_idx);
10958 	mpt3sas_base_release_callback_handler(scsih_cb_idx);
10959 	mpt3sas_base_release_callback_handler(config_cb_idx);
10960 	mpt3sas_base_release_callback_handler(ctl_cb_idx);
10961 
10962 	mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
10963 	mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
10964 	mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
10965 
10966 /* raid transport support */
10967 	if (hbas_to_enumerate != 1)
10968 		raid_class_release(mpt3sas_raid_template);
10969 	if (hbas_to_enumerate != 2)
10970 		raid_class_release(mpt2sas_raid_template);
10971 	sas_release_transport(mpt3sas_transport_template);
10972 }
10973 
10974 /**
10975  * _mpt3sas_init - main entry point for this driver.
10976  *
10977  * Return: 0 success, anything else error.
10978  */
10979 static int __init
10980 _mpt3sas_init(void)
10981 {
10982 	int error;
10983 
10984 	pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
10985 					MPT3SAS_DRIVER_VERSION);
10986 
10987 	mpt3sas_transport_template =
10988 	    sas_attach_transport(&mpt3sas_transport_functions);
10989 	if (!mpt3sas_transport_template)
10990 		return -ENODEV;
10991 
10992 	/* No need attach mpt3sas raid functions template
10993 	 * if hbas_to_enumarate value is one.
10994 	 */
10995 	if (hbas_to_enumerate != 1) {
10996 		mpt3sas_raid_template =
10997 				raid_class_attach(&mpt3sas_raid_functions);
10998 		if (!mpt3sas_raid_template) {
10999 			sas_release_transport(mpt3sas_transport_template);
11000 			return -ENODEV;
11001 		}
11002 	}
11003 
11004 	/* No need to attach mpt2sas raid functions template
11005 	 * if hbas_to_enumarate value is two
11006 	 */
11007 	if (hbas_to_enumerate != 2) {
11008 		mpt2sas_raid_template =
11009 				raid_class_attach(&mpt2sas_raid_functions);
11010 		if (!mpt2sas_raid_template) {
11011 			sas_release_transport(mpt3sas_transport_template);
11012 			return -ENODEV;
11013 		}
11014 	}
11015 
11016 	error = scsih_init();
11017 	if (error) {
11018 		scsih_exit();
11019 		return error;
11020 	}
11021 
11022 	mpt3sas_ctl_init(hbas_to_enumerate);
11023 
11024 	error = pci_register_driver(&mpt3sas_driver);
11025 	if (error)
11026 		scsih_exit();
11027 
11028 	return error;
11029 }
11030 
11031 /**
11032  * _mpt3sas_exit - exit point for this driver (when it is a module).
11033  *
11034  */
11035 static void __exit
11036 _mpt3sas_exit(void)
11037 {
11038 	pr_info("mpt3sas version %s unloading\n",
11039 				MPT3SAS_DRIVER_VERSION);
11040 
11041 	mpt3sas_ctl_exit(hbas_to_enumerate);
11042 
11043 	pci_unregister_driver(&mpt3sas_driver);
11044 
11045 	scsih_exit();
11046 }
11047 
11048 module_init(_mpt3sas_init);
11049 module_exit(_mpt3sas_exit);
11050