1 /* 2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers 3 * 4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c 5 * Copyright (C) 2012-2014 LSI Corporation 6 * Copyright (C) 2013-2014 Avago Technologies 7 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 2 12 * of the License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * NO WARRANTY 20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 24 * solely responsible for determining the appropriateness of using and 25 * distributing the Program and assumes all risks associated with its 26 * exercise of rights under this Agreement, including but not limited to 27 * the risks and costs of program errors, damage to or loss of data, 28 * programs or equipment, and unavailability or interruption of operations. 29 30 * DISCLAIMER OF LIABILITY 31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 38 39 * You should have received a copy of the GNU General Public License 40 * along with this program; if not, write to the Free Software 41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 42 * USA. 43 */ 44 45 #include <linux/module.h> 46 #include <linux/kernel.h> 47 #include <linux/init.h> 48 #include <linux/errno.h> 49 #include <linux/blkdev.h> 50 #include <linux/sched.h> 51 #include <linux/workqueue.h> 52 #include <linux/delay.h> 53 #include <linux/pci.h> 54 #include <linux/interrupt.h> 55 #include <linux/aer.h> 56 #include <linux/raid_class.h> 57 #include <linux/blk-mq-pci.h> 58 #include <asm/unaligned.h> 59 60 #include "mpt3sas_base.h" 61 62 #define RAID_CHANNEL 1 63 64 #define PCIE_CHANNEL 2 65 66 /* forward proto's */ 67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 68 struct _sas_node *sas_expander); 69 static void _firmware_event_work(struct work_struct *work); 70 71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, 72 struct _sas_device *sas_device); 73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, 74 u8 retry_count, u8 is_pd); 75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 77 struct _pcie_device *pcie_device); 78 static void 79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); 81 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc); 82 83 /* global parameters */ 84 LIST_HEAD(mpt3sas_ioc_list); 85 /* global ioc lock for list operations */ 86 DEFINE_SPINLOCK(gioc_lock); 87 88 MODULE_AUTHOR(MPT3SAS_AUTHOR); 89 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION); 90 MODULE_LICENSE("GPL"); 91 MODULE_VERSION(MPT3SAS_DRIVER_VERSION); 92 MODULE_ALIAS("mpt2sas"); 93 94 /* local parameters */ 95 static u8 scsi_io_cb_idx = -1; 96 static u8 tm_cb_idx = -1; 97 static u8 ctl_cb_idx = -1; 98 static u8 base_cb_idx = -1; 99 static u8 port_enable_cb_idx = -1; 100 static u8 transport_cb_idx = -1; 101 static u8 scsih_cb_idx = -1; 102 static u8 config_cb_idx = -1; 103 static int mpt2_ids; 104 static int mpt3_ids; 105 106 static u8 tm_tr_cb_idx = -1 ; 107 static u8 tm_tr_volume_cb_idx = -1 ; 108 static u8 tm_sas_control_cb_idx = -1; 109 110 /* command line options */ 111 static u32 logging_level; 112 MODULE_PARM_DESC(logging_level, 113 " bits for enabling additional logging info (default=0)"); 114 115 116 static ushort max_sectors = 0xFFFF; 117 module_param(max_sectors, ushort, 0444); 118 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); 119 120 121 static int missing_delay[2] = {-1, -1}; 122 module_param_array(missing_delay, int, NULL, 0444); 123 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); 124 125 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ 126 #define MPT3SAS_MAX_LUN (16895) 127 static u64 max_lun = MPT3SAS_MAX_LUN; 128 module_param(max_lun, ullong, 0444); 129 MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); 130 131 static ushort hbas_to_enumerate; 132 module_param(hbas_to_enumerate, ushort, 0444); 133 MODULE_PARM_DESC(hbas_to_enumerate, 134 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \ 135 1 - enumerates only SAS 2.0 generation HBAs\n \ 136 2 - enumerates only SAS 3.0 generation HBAs (default=0)"); 137 138 /* diag_buffer_enable is bitwise 139 * bit 0 set = TRACE 140 * bit 1 set = SNAPSHOT 141 * bit 2 set = EXTENDED 142 * 143 * Either bit can be set, or both 144 */ 145 static int diag_buffer_enable = -1; 146 module_param(diag_buffer_enable, int, 0444); 147 MODULE_PARM_DESC(diag_buffer_enable, 148 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); 149 static int disable_discovery = -1; 150 module_param(disable_discovery, int, 0444); 151 MODULE_PARM_DESC(disable_discovery, " disable discovery "); 152 153 154 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ 155 static int prot_mask = -1; 156 module_param(prot_mask, int, 0444); 157 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); 158 159 static bool enable_sdev_max_qd; 160 module_param(enable_sdev_max_qd, bool, 0444); 161 MODULE_PARM_DESC(enable_sdev_max_qd, 162 "Enable sdev max qd as can_queue, def=disabled(0)"); 163 164 static int multipath_on_hba = -1; 165 module_param(multipath_on_hba, int, 0); 166 MODULE_PARM_DESC(multipath_on_hba, 167 "Multipath support to add same target device\n\t\t" 168 "as many times as it is visible to HBA from various paths\n\t\t" 169 "(by default:\n\t\t" 170 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t" 171 "\t SAS 3.5 HBA - This will be enabled)"); 172 173 static int host_tagset_enable = 1; 174 module_param(host_tagset_enable, int, 0444); 175 MODULE_PARM_DESC(host_tagset_enable, 176 "Shared host tagset enable/disable Default: enable(1)"); 177 178 /* raid transport support */ 179 static struct raid_template *mpt3sas_raid_template; 180 static struct raid_template *mpt2sas_raid_template; 181 182 183 /** 184 * struct sense_info - common structure for obtaining sense keys 185 * @skey: sense key 186 * @asc: additional sense code 187 * @ascq: additional sense code qualifier 188 */ 189 struct sense_info { 190 u8 skey; 191 u8 asc; 192 u8 ascq; 193 }; 194 195 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) 196 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC) 197 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) 198 #define MPT3SAS_ABRT_TASK_SET (0xFFFE) 199 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) 200 /** 201 * struct fw_event_work - firmware event struct 202 * @list: link list framework 203 * @work: work object (ioc->fault_reset_work_q) 204 * @ioc: per adapter object 205 * @device_handle: device handle 206 * @VF_ID: virtual function id 207 * @VP_ID: virtual port id 208 * @ignore: flag meaning this event has been marked to ignore 209 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h 210 * @refcount: kref for this event 211 * @event_data: reply event data payload follows 212 * 213 * This object stored on ioc->fw_event_list. 214 */ 215 struct fw_event_work { 216 struct list_head list; 217 struct work_struct work; 218 219 struct MPT3SAS_ADAPTER *ioc; 220 u16 device_handle; 221 u8 VF_ID; 222 u8 VP_ID; 223 u8 ignore; 224 u16 event; 225 struct kref refcount; 226 char event_data[] __aligned(4); 227 }; 228 229 static void fw_event_work_free(struct kref *r) 230 { 231 kfree(container_of(r, struct fw_event_work, refcount)); 232 } 233 234 static void fw_event_work_get(struct fw_event_work *fw_work) 235 { 236 kref_get(&fw_work->refcount); 237 } 238 239 static void fw_event_work_put(struct fw_event_work *fw_work) 240 { 241 kref_put(&fw_work->refcount, fw_event_work_free); 242 } 243 244 static struct fw_event_work *alloc_fw_event_work(int len) 245 { 246 struct fw_event_work *fw_event; 247 248 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); 249 if (!fw_event) 250 return NULL; 251 252 kref_init(&fw_event->refcount); 253 return fw_event; 254 } 255 256 /** 257 * struct _scsi_io_transfer - scsi io transfer 258 * @handle: sas device handle (assigned by firmware) 259 * @is_raid: flag set for hidden raid components 260 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, 261 * @data_length: data transfer length 262 * @data_dma: dma pointer to data 263 * @sense: sense data 264 * @lun: lun number 265 * @cdb_length: cdb length 266 * @cdb: cdb contents 267 * @timeout: timeout for this command 268 * @VF_ID: virtual function id 269 * @VP_ID: virtual port id 270 * @valid_reply: flag set for reply message 271 * @sense_length: sense length 272 * @ioc_status: ioc status 273 * @scsi_state: scsi state 274 * @scsi_status: scsi staus 275 * @log_info: log information 276 * @transfer_length: data length transfer when there is a reply message 277 * 278 * Used for sending internal scsi commands to devices within this module. 279 * Refer to _scsi_send_scsi_io(). 280 */ 281 struct _scsi_io_transfer { 282 u16 handle; 283 u8 is_raid; 284 enum dma_data_direction dir; 285 u32 data_length; 286 dma_addr_t data_dma; 287 u8 sense[SCSI_SENSE_BUFFERSIZE]; 288 u32 lun; 289 u8 cdb_length; 290 u8 cdb[32]; 291 u8 timeout; 292 u8 VF_ID; 293 u8 VP_ID; 294 u8 valid_reply; 295 /* the following bits are only valid when 'valid_reply = 1' */ 296 u32 sense_length; 297 u16 ioc_status; 298 u8 scsi_state; 299 u8 scsi_status; 300 u32 log_info; 301 u32 transfer_length; 302 }; 303 304 /** 305 * _scsih_set_debug_level - global setting of ioc->logging_level. 306 * @val: ? 307 * @kp: ? 308 * 309 * Note: The logging levels are defined in mpt3sas_debug.h. 310 */ 311 static int 312 _scsih_set_debug_level(const char *val, const struct kernel_param *kp) 313 { 314 int ret = param_set_int(val, kp); 315 struct MPT3SAS_ADAPTER *ioc; 316 317 if (ret) 318 return ret; 319 320 pr_info("setting logging_level(0x%08x)\n", logging_level); 321 spin_lock(&gioc_lock); 322 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) 323 ioc->logging_level = logging_level; 324 spin_unlock(&gioc_lock); 325 return 0; 326 } 327 module_param_call(logging_level, _scsih_set_debug_level, param_get_int, 328 &logging_level, 0644); 329 330 /** 331 * _scsih_srch_boot_sas_address - search based on sas_address 332 * @sas_address: sas address 333 * @boot_device: boot device object from bios page 2 334 * 335 * Return: 1 when there's a match, 0 means no match. 336 */ 337 static inline int 338 _scsih_srch_boot_sas_address(u64 sas_address, 339 Mpi2BootDeviceSasWwid_t *boot_device) 340 { 341 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; 342 } 343 344 /** 345 * _scsih_srch_boot_device_name - search based on device name 346 * @device_name: device name specified in INDENTIFY fram 347 * @boot_device: boot device object from bios page 2 348 * 349 * Return: 1 when there's a match, 0 means no match. 350 */ 351 static inline int 352 _scsih_srch_boot_device_name(u64 device_name, 353 Mpi2BootDeviceDeviceName_t *boot_device) 354 { 355 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; 356 } 357 358 /** 359 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot 360 * @enclosure_logical_id: enclosure logical id 361 * @slot_number: slot number 362 * @boot_device: boot device object from bios page 2 363 * 364 * Return: 1 when there's a match, 0 means no match. 365 */ 366 static inline int 367 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, 368 Mpi2BootDeviceEnclosureSlot_t *boot_device) 369 { 370 return (enclosure_logical_id == le64_to_cpu(boot_device-> 371 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> 372 SlotNumber)) ? 1 : 0; 373 } 374 375 /** 376 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided 377 * port number from port list 378 * @ioc: per adapter object 379 * @port_id: port number 380 * @bypass_dirty_port_flag: when set look the matching hba port entry even 381 * if hba port entry is marked as dirty. 382 * 383 * Search for hba port entry corresponding to provided port number, 384 * if available return port object otherwise return NULL. 385 */ 386 struct hba_port * 387 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, 388 u8 port_id, u8 bypass_dirty_port_flag) 389 { 390 struct hba_port *port, *port_next; 391 392 /* 393 * When multipath_on_hba is disabled then 394 * search the hba_port entry using default 395 * port id i.e. 255 396 */ 397 if (!ioc->multipath_on_hba) 398 port_id = MULTIPATH_DISABLED_PORT_ID; 399 400 list_for_each_entry_safe(port, port_next, 401 &ioc->port_table_list, list) { 402 if (port->port_id != port_id) 403 continue; 404 if (bypass_dirty_port_flag) 405 return port; 406 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT) 407 continue; 408 return port; 409 } 410 411 /* 412 * Allocate hba_port object for default port id (i.e. 255) 413 * when multipath_on_hba is disabled for the HBA. 414 * And add this object to port_table_list. 415 */ 416 if (!ioc->multipath_on_hba) { 417 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC); 418 if (!port) 419 return NULL; 420 421 port->port_id = port_id; 422 ioc_info(ioc, 423 "hba_port entry: %p, port: %d is added to hba_port list\n", 424 port, port->port_id); 425 list_add_tail(&port->list, 426 &ioc->port_table_list); 427 return port; 428 } 429 return NULL; 430 } 431 432 /** 433 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number 434 * @ioc: per adapter object 435 * @port: hba_port object 436 * @phy: phy number 437 * 438 * Return virtual_phy object corresponding to phy number. 439 */ 440 struct virtual_phy * 441 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, 442 struct hba_port *port, u32 phy) 443 { 444 struct virtual_phy *vphy, *vphy_next; 445 446 if (!port->vphys_mask) 447 return NULL; 448 449 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { 450 if (vphy->phy_mask & (1 << phy)) 451 return vphy; 452 } 453 return NULL; 454 } 455 456 /** 457 * _scsih_is_boot_device - search for matching boot device. 458 * @sas_address: sas address 459 * @device_name: device name specified in INDENTIFY fram 460 * @enclosure_logical_id: enclosure logical id 461 * @slot: slot number 462 * @form: specifies boot device form 463 * @boot_device: boot device object from bios page 2 464 * 465 * Return: 1 when there's a match, 0 means no match. 466 */ 467 static int 468 _scsih_is_boot_device(u64 sas_address, u64 device_name, 469 u64 enclosure_logical_id, u16 slot, u8 form, 470 Mpi2BiosPage2BootDevice_t *boot_device) 471 { 472 int rc = 0; 473 474 switch (form) { 475 case MPI2_BIOSPAGE2_FORM_SAS_WWID: 476 if (!sas_address) 477 break; 478 rc = _scsih_srch_boot_sas_address( 479 sas_address, &boot_device->SasWwid); 480 break; 481 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: 482 if (!enclosure_logical_id) 483 break; 484 rc = _scsih_srch_boot_encl_slot( 485 enclosure_logical_id, 486 slot, &boot_device->EnclosureSlot); 487 break; 488 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: 489 if (!device_name) 490 break; 491 rc = _scsih_srch_boot_device_name( 492 device_name, &boot_device->DeviceName); 493 break; 494 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: 495 break; 496 } 497 498 return rc; 499 } 500 501 /** 502 * _scsih_get_sas_address - set the sas_address for given device handle 503 * @ioc: ? 504 * @handle: device handle 505 * @sas_address: sas address 506 * 507 * Return: 0 success, non-zero when failure 508 */ 509 static int 510 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, 511 u64 *sas_address) 512 { 513 Mpi2SasDevicePage0_t sas_device_pg0; 514 Mpi2ConfigReply_t mpi_reply; 515 u32 ioc_status; 516 517 *sas_address = 0; 518 519 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 520 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 521 ioc_err(ioc, "failure at %s:%d/%s()!\n", 522 __FILE__, __LINE__, __func__); 523 return -ENXIO; 524 } 525 526 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 527 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 528 /* For HBA, vSES doesn't return HBA SAS address. Instead return 529 * vSES's sas address. 530 */ 531 if ((handle <= ioc->sas_hba.num_phys) && 532 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & 533 MPI2_SAS_DEVICE_INFO_SEP))) 534 *sas_address = ioc->sas_hba.sas_address; 535 else 536 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 537 return 0; 538 } 539 540 /* we hit this because the given parent handle doesn't exist */ 541 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 542 return -ENXIO; 543 544 /* else error case */ 545 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", 546 handle, ioc_status, __FILE__, __LINE__, __func__); 547 return -EIO; 548 } 549 550 /** 551 * _scsih_determine_boot_device - determine boot device. 552 * @ioc: per adapter object 553 * @device: sas_device or pcie_device object 554 * @channel: SAS or PCIe channel 555 * 556 * Determines whether this device should be first reported device to 557 * to scsi-ml or sas transport, this purpose is for persistent boot device. 558 * There are primary, alternate, and current entries in bios page 2. The order 559 * priority is primary, alternate, then current. This routine saves 560 * the corresponding device object. 561 * The saved data to be used later in _scsih_probe_boot_devices(). 562 */ 563 static void 564 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, 565 u32 channel) 566 { 567 struct _sas_device *sas_device; 568 struct _pcie_device *pcie_device; 569 struct _raid_device *raid_device; 570 u64 sas_address; 571 u64 device_name; 572 u64 enclosure_logical_id; 573 u16 slot; 574 575 /* only process this function when driver loads */ 576 if (!ioc->is_driver_loading) 577 return; 578 579 /* no Bios, return immediately */ 580 if (!ioc->bios_pg3.BiosVersion) 581 return; 582 583 if (channel == RAID_CHANNEL) { 584 raid_device = device; 585 sas_address = raid_device->wwid; 586 device_name = 0; 587 enclosure_logical_id = 0; 588 slot = 0; 589 } else if (channel == PCIE_CHANNEL) { 590 pcie_device = device; 591 sas_address = pcie_device->wwid; 592 device_name = 0; 593 enclosure_logical_id = 0; 594 slot = 0; 595 } else { 596 sas_device = device; 597 sas_address = sas_device->sas_address; 598 device_name = sas_device->device_name; 599 enclosure_logical_id = sas_device->enclosure_logical_id; 600 slot = sas_device->slot; 601 } 602 603 if (!ioc->req_boot_device.device) { 604 if (_scsih_is_boot_device(sas_address, device_name, 605 enclosure_logical_id, slot, 606 (ioc->bios_pg2.ReqBootDeviceForm & 607 MPI2_BIOSPAGE2_FORM_MASK), 608 &ioc->bios_pg2.RequestedBootDevice)) { 609 dinitprintk(ioc, 610 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n", 611 __func__, (u64)sas_address)); 612 ioc->req_boot_device.device = device; 613 ioc->req_boot_device.channel = channel; 614 } 615 } 616 617 if (!ioc->req_alt_boot_device.device) { 618 if (_scsih_is_boot_device(sas_address, device_name, 619 enclosure_logical_id, slot, 620 (ioc->bios_pg2.ReqAltBootDeviceForm & 621 MPI2_BIOSPAGE2_FORM_MASK), 622 &ioc->bios_pg2.RequestedAltBootDevice)) { 623 dinitprintk(ioc, 624 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n", 625 __func__, (u64)sas_address)); 626 ioc->req_alt_boot_device.device = device; 627 ioc->req_alt_boot_device.channel = channel; 628 } 629 } 630 631 if (!ioc->current_boot_device.device) { 632 if (_scsih_is_boot_device(sas_address, device_name, 633 enclosure_logical_id, slot, 634 (ioc->bios_pg2.CurrentBootDeviceForm & 635 MPI2_BIOSPAGE2_FORM_MASK), 636 &ioc->bios_pg2.CurrentBootDevice)) { 637 dinitprintk(ioc, 638 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n", 639 __func__, (u64)sas_address)); 640 ioc->current_boot_device.device = device; 641 ioc->current_boot_device.channel = channel; 642 } 643 } 644 } 645 646 static struct _sas_device * 647 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, 648 struct MPT3SAS_TARGET *tgt_priv) 649 { 650 struct _sas_device *ret; 651 652 assert_spin_locked(&ioc->sas_device_lock); 653 654 ret = tgt_priv->sas_dev; 655 if (ret) 656 sas_device_get(ret); 657 658 return ret; 659 } 660 661 static struct _sas_device * 662 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, 663 struct MPT3SAS_TARGET *tgt_priv) 664 { 665 struct _sas_device *ret; 666 unsigned long flags; 667 668 spin_lock_irqsave(&ioc->sas_device_lock, flags); 669 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv); 670 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 671 672 return ret; 673 } 674 675 static struct _pcie_device * 676 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, 677 struct MPT3SAS_TARGET *tgt_priv) 678 { 679 struct _pcie_device *ret; 680 681 assert_spin_locked(&ioc->pcie_device_lock); 682 683 ret = tgt_priv->pcie_dev; 684 if (ret) 685 pcie_device_get(ret); 686 687 return ret; 688 } 689 690 /** 691 * mpt3sas_get_pdev_from_target - pcie device search 692 * @ioc: per adapter object 693 * @tgt_priv: starget private object 694 * 695 * Context: This function will acquire ioc->pcie_device_lock and will release 696 * before returning the pcie_device object. 697 * 698 * This searches for pcie_device from target, then return pcie_device object. 699 */ 700 static struct _pcie_device * 701 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, 702 struct MPT3SAS_TARGET *tgt_priv) 703 { 704 struct _pcie_device *ret; 705 unsigned long flags; 706 707 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 708 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv); 709 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 710 711 return ret; 712 } 713 714 715 /** 716 * __mpt3sas_get_sdev_by_rphy - sas device search 717 * @ioc: per adapter object 718 * @rphy: sas_rphy pointer 719 * 720 * Context: This function will acquire ioc->sas_device_lock and will release 721 * before returning the sas_device object. 722 * 723 * This searches for sas_device from rphy object 724 * then return sas_device object. 725 */ 726 struct _sas_device * 727 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, 728 struct sas_rphy *rphy) 729 { 730 struct _sas_device *sas_device; 731 732 assert_spin_locked(&ioc->sas_device_lock); 733 734 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 735 if (sas_device->rphy != rphy) 736 continue; 737 sas_device_get(sas_device); 738 return sas_device; 739 } 740 741 sas_device = NULL; 742 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { 743 if (sas_device->rphy != rphy) 744 continue; 745 sas_device_get(sas_device); 746 return sas_device; 747 } 748 749 return NULL; 750 } 751 752 /** 753 * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided 754 * sas address from sas_device_list list 755 * @ioc: per adapter object 756 * @sas_address: device sas address 757 * @port: port number 758 * 759 * Search for _sas_device object corresponding to provided sas address, 760 * if available return _sas_device object address otherwise return NULL. 761 */ 762 struct _sas_device * 763 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 764 u64 sas_address, struct hba_port *port) 765 { 766 struct _sas_device *sas_device; 767 768 if (!port) 769 return NULL; 770 771 assert_spin_locked(&ioc->sas_device_lock); 772 773 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 774 if (sas_device->sas_address != sas_address) 775 continue; 776 if (sas_device->port != port) 777 continue; 778 sas_device_get(sas_device); 779 return sas_device; 780 } 781 782 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { 783 if (sas_device->sas_address != sas_address) 784 continue; 785 if (sas_device->port != port) 786 continue; 787 sas_device_get(sas_device); 788 return sas_device; 789 } 790 791 return NULL; 792 } 793 794 /** 795 * mpt3sas_get_sdev_by_addr - sas device search 796 * @ioc: per adapter object 797 * @sas_address: sas address 798 * @port: hba port entry 799 * Context: Calling function should acquire ioc->sas_device_lock 800 * 801 * This searches for sas_device based on sas_address & port number, 802 * then return sas_device object. 803 */ 804 struct _sas_device * 805 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 806 u64 sas_address, struct hba_port *port) 807 { 808 struct _sas_device *sas_device; 809 unsigned long flags; 810 811 spin_lock_irqsave(&ioc->sas_device_lock, flags); 812 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 813 sas_address, port); 814 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 815 816 return sas_device; 817 } 818 819 static struct _sas_device * 820 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 821 { 822 struct _sas_device *sas_device; 823 824 assert_spin_locked(&ioc->sas_device_lock); 825 826 list_for_each_entry(sas_device, &ioc->sas_device_list, list) 827 if (sas_device->handle == handle) 828 goto found_device; 829 830 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) 831 if (sas_device->handle == handle) 832 goto found_device; 833 834 return NULL; 835 836 found_device: 837 sas_device_get(sas_device); 838 return sas_device; 839 } 840 841 /** 842 * mpt3sas_get_sdev_by_handle - sas device search 843 * @ioc: per adapter object 844 * @handle: sas device handle (assigned by firmware) 845 * Context: Calling function should acquire ioc->sas_device_lock 846 * 847 * This searches for sas_device based on sas_address, then return sas_device 848 * object. 849 */ 850 struct _sas_device * 851 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 852 { 853 struct _sas_device *sas_device; 854 unsigned long flags; 855 856 spin_lock_irqsave(&ioc->sas_device_lock, flags); 857 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 858 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 859 860 return sas_device; 861 } 862 863 /** 864 * _scsih_display_enclosure_chassis_info - display device location info 865 * @ioc: per adapter object 866 * @sas_device: per sas device object 867 * @sdev: scsi device struct 868 * @starget: scsi target struct 869 */ 870 static void 871 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, 872 struct _sas_device *sas_device, struct scsi_device *sdev, 873 struct scsi_target *starget) 874 { 875 if (sdev) { 876 if (sas_device->enclosure_handle != 0) 877 sdev_printk(KERN_INFO, sdev, 878 "enclosure logical id (0x%016llx), slot(%d) \n", 879 (unsigned long long) 880 sas_device->enclosure_logical_id, 881 sas_device->slot); 882 if (sas_device->connector_name[0] != '\0') 883 sdev_printk(KERN_INFO, sdev, 884 "enclosure level(0x%04x), connector name( %s)\n", 885 sas_device->enclosure_level, 886 sas_device->connector_name); 887 if (sas_device->is_chassis_slot_valid) 888 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", 889 sas_device->chassis_slot); 890 } else if (starget) { 891 if (sas_device->enclosure_handle != 0) 892 starget_printk(KERN_INFO, starget, 893 "enclosure logical id(0x%016llx), slot(%d) \n", 894 (unsigned long long) 895 sas_device->enclosure_logical_id, 896 sas_device->slot); 897 if (sas_device->connector_name[0] != '\0') 898 starget_printk(KERN_INFO, starget, 899 "enclosure level(0x%04x), connector name( %s)\n", 900 sas_device->enclosure_level, 901 sas_device->connector_name); 902 if (sas_device->is_chassis_slot_valid) 903 starget_printk(KERN_INFO, starget, 904 "chassis slot(0x%04x)\n", 905 sas_device->chassis_slot); 906 } else { 907 if (sas_device->enclosure_handle != 0) 908 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n", 909 (u64)sas_device->enclosure_logical_id, 910 sas_device->slot); 911 if (sas_device->connector_name[0] != '\0') 912 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n", 913 sas_device->enclosure_level, 914 sas_device->connector_name); 915 if (sas_device->is_chassis_slot_valid) 916 ioc_info(ioc, "chassis slot(0x%04x)\n", 917 sas_device->chassis_slot); 918 } 919 } 920 921 /** 922 * _scsih_sas_device_remove - remove sas_device from list. 923 * @ioc: per adapter object 924 * @sas_device: the sas_device object 925 * Context: This function will acquire ioc->sas_device_lock. 926 * 927 * If sas_device is on the list, remove it and decrement its reference count. 928 */ 929 static void 930 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, 931 struct _sas_device *sas_device) 932 { 933 unsigned long flags; 934 935 if (!sas_device) 936 return; 937 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", 938 sas_device->handle, (u64)sas_device->sas_address); 939 940 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 941 942 /* 943 * The lock serializes access to the list, but we still need to verify 944 * that nobody removed the entry while we were waiting on the lock. 945 */ 946 spin_lock_irqsave(&ioc->sas_device_lock, flags); 947 if (!list_empty(&sas_device->list)) { 948 list_del_init(&sas_device->list); 949 sas_device_put(sas_device); 950 } 951 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 952 } 953 954 /** 955 * _scsih_device_remove_by_handle - removing device object by handle 956 * @ioc: per adapter object 957 * @handle: device handle 958 */ 959 static void 960 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 961 { 962 struct _sas_device *sas_device; 963 unsigned long flags; 964 965 if (ioc->shost_recovery) 966 return; 967 968 spin_lock_irqsave(&ioc->sas_device_lock, flags); 969 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 970 if (sas_device) { 971 list_del_init(&sas_device->list); 972 sas_device_put(sas_device); 973 } 974 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 975 if (sas_device) { 976 _scsih_remove_device(ioc, sas_device); 977 sas_device_put(sas_device); 978 } 979 } 980 981 /** 982 * mpt3sas_device_remove_by_sas_address - removing device object by 983 * sas address & port number 984 * @ioc: per adapter object 985 * @sas_address: device sas_address 986 * @port: hba port entry 987 * 988 * Return nothing. 989 */ 990 void 991 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, 992 u64 sas_address, struct hba_port *port) 993 { 994 struct _sas_device *sas_device; 995 unsigned long flags; 996 997 if (ioc->shost_recovery) 998 return; 999 1000 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1001 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port); 1002 if (sas_device) { 1003 list_del_init(&sas_device->list); 1004 sas_device_put(sas_device); 1005 } 1006 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1007 if (sas_device) { 1008 _scsih_remove_device(ioc, sas_device); 1009 sas_device_put(sas_device); 1010 } 1011 } 1012 1013 /** 1014 * _scsih_sas_device_add - insert sas_device to the list. 1015 * @ioc: per adapter object 1016 * @sas_device: the sas_device object 1017 * Context: This function will acquire ioc->sas_device_lock. 1018 * 1019 * Adding new object to the ioc->sas_device_list. 1020 */ 1021 static void 1022 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, 1023 struct _sas_device *sas_device) 1024 { 1025 unsigned long flags; 1026 1027 dewtprintk(ioc, 1028 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 1029 __func__, sas_device->handle, 1030 (u64)sas_device->sas_address)); 1031 1032 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 1033 NULL, NULL)); 1034 1035 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1036 sas_device_get(sas_device); 1037 list_add_tail(&sas_device->list, &ioc->sas_device_list); 1038 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1039 1040 if (ioc->hide_drives) { 1041 clear_bit(sas_device->handle, ioc->pend_os_device_add); 1042 return; 1043 } 1044 1045 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 1046 sas_device->sas_address_parent, sas_device->port)) { 1047 _scsih_sas_device_remove(ioc, sas_device); 1048 } else if (!sas_device->starget) { 1049 /* 1050 * When asyn scanning is enabled, its not possible to remove 1051 * devices while scanning is turned on due to an oops in 1052 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() 1053 */ 1054 if (!ioc->is_driver_loading) { 1055 mpt3sas_transport_port_remove(ioc, 1056 sas_device->sas_address, 1057 sas_device->sas_address_parent, 1058 sas_device->port); 1059 _scsih_sas_device_remove(ioc, sas_device); 1060 } 1061 } else 1062 clear_bit(sas_device->handle, ioc->pend_os_device_add); 1063 } 1064 1065 /** 1066 * _scsih_sas_device_init_add - insert sas_device to the list. 1067 * @ioc: per adapter object 1068 * @sas_device: the sas_device object 1069 * Context: This function will acquire ioc->sas_device_lock. 1070 * 1071 * Adding new object at driver load time to the ioc->sas_device_init_list. 1072 */ 1073 static void 1074 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, 1075 struct _sas_device *sas_device) 1076 { 1077 unsigned long flags; 1078 1079 dewtprintk(ioc, 1080 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 1081 __func__, sas_device->handle, 1082 (u64)sas_device->sas_address)); 1083 1084 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 1085 NULL, NULL)); 1086 1087 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1088 sas_device_get(sas_device); 1089 list_add_tail(&sas_device->list, &ioc->sas_device_init_list); 1090 _scsih_determine_boot_device(ioc, sas_device, 0); 1091 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1092 } 1093 1094 1095 static struct _pcie_device * 1096 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1097 { 1098 struct _pcie_device *pcie_device; 1099 1100 assert_spin_locked(&ioc->pcie_device_lock); 1101 1102 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1103 if (pcie_device->wwid == wwid) 1104 goto found_device; 1105 1106 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1107 if (pcie_device->wwid == wwid) 1108 goto found_device; 1109 1110 return NULL; 1111 1112 found_device: 1113 pcie_device_get(pcie_device); 1114 return pcie_device; 1115 } 1116 1117 1118 /** 1119 * mpt3sas_get_pdev_by_wwid - pcie device search 1120 * @ioc: per adapter object 1121 * @wwid: wwid 1122 * 1123 * Context: This function will acquire ioc->pcie_device_lock and will release 1124 * before returning the pcie_device object. 1125 * 1126 * This searches for pcie_device based on wwid, then return pcie_device object. 1127 */ 1128 static struct _pcie_device * 1129 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1130 { 1131 struct _pcie_device *pcie_device; 1132 unsigned long flags; 1133 1134 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1135 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 1136 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1137 1138 return pcie_device; 1139 } 1140 1141 1142 static struct _pcie_device * 1143 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id, 1144 int channel) 1145 { 1146 struct _pcie_device *pcie_device; 1147 1148 assert_spin_locked(&ioc->pcie_device_lock); 1149 1150 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1151 if (pcie_device->id == id && pcie_device->channel == channel) 1152 goto found_device; 1153 1154 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1155 if (pcie_device->id == id && pcie_device->channel == channel) 1156 goto found_device; 1157 1158 return NULL; 1159 1160 found_device: 1161 pcie_device_get(pcie_device); 1162 return pcie_device; 1163 } 1164 1165 static struct _pcie_device * 1166 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1167 { 1168 struct _pcie_device *pcie_device; 1169 1170 assert_spin_locked(&ioc->pcie_device_lock); 1171 1172 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1173 if (pcie_device->handle == handle) 1174 goto found_device; 1175 1176 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1177 if (pcie_device->handle == handle) 1178 goto found_device; 1179 1180 return NULL; 1181 1182 found_device: 1183 pcie_device_get(pcie_device); 1184 return pcie_device; 1185 } 1186 1187 1188 /** 1189 * mpt3sas_get_pdev_by_handle - pcie device search 1190 * @ioc: per adapter object 1191 * @handle: Firmware device handle 1192 * 1193 * Context: This function will acquire ioc->pcie_device_lock and will release 1194 * before returning the pcie_device object. 1195 * 1196 * This searches for pcie_device based on handle, then return pcie_device 1197 * object. 1198 */ 1199 struct _pcie_device * 1200 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1201 { 1202 struct _pcie_device *pcie_device; 1203 unsigned long flags; 1204 1205 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1206 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 1207 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1208 1209 return pcie_device; 1210 } 1211 1212 /** 1213 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency. 1214 * @ioc: per adapter object 1215 * Context: This function will acquire ioc->pcie_device_lock 1216 * 1217 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency 1218 * which has reported maximum among all available NVMe drives. 1219 * Minimum max_shutdown_latency will be six seconds. 1220 */ 1221 static void 1222 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc) 1223 { 1224 struct _pcie_device *pcie_device; 1225 unsigned long flags; 1226 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; 1227 1228 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1229 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 1230 if (pcie_device->shutdown_latency) { 1231 if (shutdown_latency < pcie_device->shutdown_latency) 1232 shutdown_latency = 1233 pcie_device->shutdown_latency; 1234 } 1235 } 1236 ioc->max_shutdown_latency = shutdown_latency; 1237 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1238 } 1239 1240 /** 1241 * _scsih_pcie_device_remove - remove pcie_device from list. 1242 * @ioc: per adapter object 1243 * @pcie_device: the pcie_device object 1244 * Context: This function will acquire ioc->pcie_device_lock. 1245 * 1246 * If pcie_device is on the list, remove it and decrement its reference count. 1247 */ 1248 static void 1249 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, 1250 struct _pcie_device *pcie_device) 1251 { 1252 unsigned long flags; 1253 int was_on_pcie_device_list = 0; 1254 u8 update_latency = 0; 1255 1256 if (!pcie_device) 1257 return; 1258 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 1259 pcie_device->handle, (u64)pcie_device->wwid); 1260 if (pcie_device->enclosure_handle != 0) 1261 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n", 1262 (u64)pcie_device->enclosure_logical_id, 1263 pcie_device->slot); 1264 if (pcie_device->connector_name[0] != '\0') 1265 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n", 1266 pcie_device->enclosure_level, 1267 pcie_device->connector_name); 1268 1269 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1270 if (!list_empty(&pcie_device->list)) { 1271 list_del_init(&pcie_device->list); 1272 was_on_pcie_device_list = 1; 1273 } 1274 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) 1275 update_latency = 1; 1276 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1277 if (was_on_pcie_device_list) { 1278 kfree(pcie_device->serial_number); 1279 pcie_device_put(pcie_device); 1280 } 1281 1282 /* 1283 * This device's RTD3 Entry Latency matches IOC's 1284 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency 1285 * from the available drives as current drive is getting removed. 1286 */ 1287 if (update_latency) 1288 _scsih_set_nvme_max_shutdown_latency(ioc); 1289 } 1290 1291 1292 /** 1293 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle 1294 * @ioc: per adapter object 1295 * @handle: device handle 1296 */ 1297 static void 1298 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1299 { 1300 struct _pcie_device *pcie_device; 1301 unsigned long flags; 1302 int was_on_pcie_device_list = 0; 1303 u8 update_latency = 0; 1304 1305 if (ioc->shost_recovery) 1306 return; 1307 1308 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1309 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 1310 if (pcie_device) { 1311 if (!list_empty(&pcie_device->list)) { 1312 list_del_init(&pcie_device->list); 1313 was_on_pcie_device_list = 1; 1314 pcie_device_put(pcie_device); 1315 } 1316 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) 1317 update_latency = 1; 1318 } 1319 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1320 if (was_on_pcie_device_list) { 1321 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 1322 pcie_device_put(pcie_device); 1323 } 1324 1325 /* 1326 * This device's RTD3 Entry Latency matches IOC's 1327 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency 1328 * from the available drives as current drive is getting removed. 1329 */ 1330 if (update_latency) 1331 _scsih_set_nvme_max_shutdown_latency(ioc); 1332 } 1333 1334 /** 1335 * _scsih_pcie_device_add - add pcie_device object 1336 * @ioc: per adapter object 1337 * @pcie_device: pcie_device object 1338 * 1339 * This is added to the pcie_device_list link list. 1340 */ 1341 static void 1342 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc, 1343 struct _pcie_device *pcie_device) 1344 { 1345 unsigned long flags; 1346 1347 dewtprintk(ioc, 1348 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", 1349 __func__, 1350 pcie_device->handle, (u64)pcie_device->wwid)); 1351 if (pcie_device->enclosure_handle != 0) 1352 dewtprintk(ioc, 1353 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1354 __func__, 1355 (u64)pcie_device->enclosure_logical_id, 1356 pcie_device->slot)); 1357 if (pcie_device->connector_name[0] != '\0') 1358 dewtprintk(ioc, 1359 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", 1360 __func__, pcie_device->enclosure_level, 1361 pcie_device->connector_name)); 1362 1363 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1364 pcie_device_get(pcie_device); 1365 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 1366 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1367 1368 if (pcie_device->access_status == 1369 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { 1370 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1371 return; 1372 } 1373 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) { 1374 _scsih_pcie_device_remove(ioc, pcie_device); 1375 } else if (!pcie_device->starget) { 1376 if (!ioc->is_driver_loading) { 1377 /*TODO-- Need to find out whether this condition will occur or not*/ 1378 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1379 } 1380 } else 1381 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1382 } 1383 1384 /* 1385 * _scsih_pcie_device_init_add - insert pcie_device to the init list. 1386 * @ioc: per adapter object 1387 * @pcie_device: the pcie_device object 1388 * Context: This function will acquire ioc->pcie_device_lock. 1389 * 1390 * Adding new object at driver load time to the ioc->pcie_device_init_list. 1391 */ 1392 static void 1393 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc, 1394 struct _pcie_device *pcie_device) 1395 { 1396 unsigned long flags; 1397 1398 dewtprintk(ioc, 1399 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", 1400 __func__, 1401 pcie_device->handle, (u64)pcie_device->wwid)); 1402 if (pcie_device->enclosure_handle != 0) 1403 dewtprintk(ioc, 1404 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1405 __func__, 1406 (u64)pcie_device->enclosure_logical_id, 1407 pcie_device->slot)); 1408 if (pcie_device->connector_name[0] != '\0') 1409 dewtprintk(ioc, 1410 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", 1411 __func__, pcie_device->enclosure_level, 1412 pcie_device->connector_name)); 1413 1414 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1415 pcie_device_get(pcie_device); 1416 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list); 1417 if (pcie_device->access_status != 1418 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) 1419 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL); 1420 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1421 } 1422 /** 1423 * _scsih_raid_device_find_by_id - raid device search 1424 * @ioc: per adapter object 1425 * @id: sas device target id 1426 * @channel: sas device channel 1427 * Context: Calling function should acquire ioc->raid_device_lock 1428 * 1429 * This searches for raid_device based on target id, then return raid_device 1430 * object. 1431 */ 1432 static struct _raid_device * 1433 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) 1434 { 1435 struct _raid_device *raid_device, *r; 1436 1437 r = NULL; 1438 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1439 if (raid_device->id == id && raid_device->channel == channel) { 1440 r = raid_device; 1441 goto out; 1442 } 1443 } 1444 1445 out: 1446 return r; 1447 } 1448 1449 /** 1450 * mpt3sas_raid_device_find_by_handle - raid device search 1451 * @ioc: per adapter object 1452 * @handle: sas device handle (assigned by firmware) 1453 * Context: Calling function should acquire ioc->raid_device_lock 1454 * 1455 * This searches for raid_device based on handle, then return raid_device 1456 * object. 1457 */ 1458 struct _raid_device * 1459 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1460 { 1461 struct _raid_device *raid_device, *r; 1462 1463 r = NULL; 1464 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1465 if (raid_device->handle != handle) 1466 continue; 1467 r = raid_device; 1468 goto out; 1469 } 1470 1471 out: 1472 return r; 1473 } 1474 1475 /** 1476 * _scsih_raid_device_find_by_wwid - raid device search 1477 * @ioc: per adapter object 1478 * @wwid: ? 1479 * Context: Calling function should acquire ioc->raid_device_lock 1480 * 1481 * This searches for raid_device based on wwid, then return raid_device 1482 * object. 1483 */ 1484 static struct _raid_device * 1485 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1486 { 1487 struct _raid_device *raid_device, *r; 1488 1489 r = NULL; 1490 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1491 if (raid_device->wwid != wwid) 1492 continue; 1493 r = raid_device; 1494 goto out; 1495 } 1496 1497 out: 1498 return r; 1499 } 1500 1501 /** 1502 * _scsih_raid_device_add - add raid_device object 1503 * @ioc: per adapter object 1504 * @raid_device: raid_device object 1505 * 1506 * This is added to the raid_device_list link list. 1507 */ 1508 static void 1509 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, 1510 struct _raid_device *raid_device) 1511 { 1512 unsigned long flags; 1513 1514 dewtprintk(ioc, 1515 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n", 1516 __func__, 1517 raid_device->handle, (u64)raid_device->wwid)); 1518 1519 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1520 list_add_tail(&raid_device->list, &ioc->raid_device_list); 1521 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1522 } 1523 1524 /** 1525 * _scsih_raid_device_remove - delete raid_device object 1526 * @ioc: per adapter object 1527 * @raid_device: raid_device object 1528 * 1529 */ 1530 static void 1531 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc, 1532 struct _raid_device *raid_device) 1533 { 1534 unsigned long flags; 1535 1536 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1537 list_del(&raid_device->list); 1538 kfree(raid_device); 1539 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1540 } 1541 1542 /** 1543 * mpt3sas_scsih_expander_find_by_handle - expander device search 1544 * @ioc: per adapter object 1545 * @handle: expander handle (assigned by firmware) 1546 * Context: Calling function should acquire ioc->sas_device_lock 1547 * 1548 * This searches for expander device based on handle, then returns the 1549 * sas_node object. 1550 */ 1551 struct _sas_node * 1552 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1553 { 1554 struct _sas_node *sas_expander, *r; 1555 1556 r = NULL; 1557 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 1558 if (sas_expander->handle != handle) 1559 continue; 1560 r = sas_expander; 1561 goto out; 1562 } 1563 out: 1564 return r; 1565 } 1566 1567 /** 1568 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search 1569 * @ioc: per adapter object 1570 * @handle: enclosure handle (assigned by firmware) 1571 * Context: Calling function should acquire ioc->sas_device_lock 1572 * 1573 * This searches for enclosure device based on handle, then returns the 1574 * enclosure object. 1575 */ 1576 static struct _enclosure_node * 1577 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1578 { 1579 struct _enclosure_node *enclosure_dev, *r; 1580 1581 r = NULL; 1582 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { 1583 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) 1584 continue; 1585 r = enclosure_dev; 1586 goto out; 1587 } 1588 out: 1589 return r; 1590 } 1591 /** 1592 * mpt3sas_scsih_expander_find_by_sas_address - expander device search 1593 * @ioc: per adapter object 1594 * @sas_address: sas address 1595 * @port: hba port entry 1596 * Context: Calling function should acquire ioc->sas_node_lock. 1597 * 1598 * This searches for expander device based on sas_address & port number, 1599 * then returns the sas_node object. 1600 */ 1601 struct _sas_node * 1602 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, 1603 u64 sas_address, struct hba_port *port) 1604 { 1605 struct _sas_node *sas_expander, *r = NULL; 1606 1607 if (!port) 1608 return r; 1609 1610 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 1611 if (sas_expander->sas_address != sas_address) 1612 continue; 1613 if (sas_expander->port != port) 1614 continue; 1615 r = sas_expander; 1616 goto out; 1617 } 1618 out: 1619 return r; 1620 } 1621 1622 /** 1623 * _scsih_expander_node_add - insert expander device to the list. 1624 * @ioc: per adapter object 1625 * @sas_expander: the sas_device object 1626 * Context: This function will acquire ioc->sas_node_lock. 1627 * 1628 * Adding new object to the ioc->sas_expander_list. 1629 */ 1630 static void 1631 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, 1632 struct _sas_node *sas_expander) 1633 { 1634 unsigned long flags; 1635 1636 spin_lock_irqsave(&ioc->sas_node_lock, flags); 1637 list_add_tail(&sas_expander->list, &ioc->sas_expander_list); 1638 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 1639 } 1640 1641 /** 1642 * _scsih_is_end_device - determines if device is an end device 1643 * @device_info: bitfield providing information about the device. 1644 * Context: none 1645 * 1646 * Return: 1 if end device. 1647 */ 1648 static int 1649 _scsih_is_end_device(u32 device_info) 1650 { 1651 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && 1652 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | 1653 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | 1654 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) 1655 return 1; 1656 else 1657 return 0; 1658 } 1659 1660 /** 1661 * _scsih_is_nvme_pciescsi_device - determines if 1662 * device is an pcie nvme/scsi device 1663 * @device_info: bitfield providing information about the device. 1664 * Context: none 1665 * 1666 * Returns 1 if device is pcie device type nvme/scsi. 1667 */ 1668 static int 1669 _scsih_is_nvme_pciescsi_device(u32 device_info) 1670 { 1671 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) 1672 == MPI26_PCIE_DEVINFO_NVME) || 1673 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) 1674 == MPI26_PCIE_DEVINFO_SCSI)) 1675 return 1; 1676 else 1677 return 0; 1678 } 1679 1680 /** 1681 * _scsih_scsi_lookup_find_by_target - search for matching channel:id 1682 * @ioc: per adapter object 1683 * @id: target id 1684 * @channel: channel 1685 * Context: This function will acquire ioc->scsi_lookup_lock. 1686 * 1687 * This will search for a matching channel:id in the scsi_lookup array, 1688 * returning 1 if found. 1689 */ 1690 static u8 1691 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id, 1692 int channel) 1693 { 1694 int smid; 1695 struct scsi_cmnd *scmd; 1696 1697 for (smid = 1; 1698 smid <= ioc->shost->can_queue; smid++) { 1699 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 1700 if (!scmd) 1701 continue; 1702 if (scmd->device->id == id && 1703 scmd->device->channel == channel) 1704 return 1; 1705 } 1706 return 0; 1707 } 1708 1709 /** 1710 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun 1711 * @ioc: per adapter object 1712 * @id: target id 1713 * @lun: lun number 1714 * @channel: channel 1715 * Context: This function will acquire ioc->scsi_lookup_lock. 1716 * 1717 * This will search for a matching channel:id:lun in the scsi_lookup array, 1718 * returning 1 if found. 1719 */ 1720 static u8 1721 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, 1722 unsigned int lun, int channel) 1723 { 1724 int smid; 1725 struct scsi_cmnd *scmd; 1726 1727 for (smid = 1; smid <= ioc->shost->can_queue; smid++) { 1728 1729 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 1730 if (!scmd) 1731 continue; 1732 if (scmd->device->id == id && 1733 scmd->device->channel == channel && 1734 scmd->device->lun == lun) 1735 return 1; 1736 } 1737 return 0; 1738 } 1739 1740 /** 1741 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry 1742 * @ioc: per adapter object 1743 * @smid: system request message index 1744 * 1745 * Return: the smid stored scmd pointer. 1746 * Then will dereference the stored scmd pointer. 1747 */ 1748 struct scsi_cmnd * 1749 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1750 { 1751 struct scsi_cmnd *scmd = NULL; 1752 struct scsiio_tracker *st; 1753 Mpi25SCSIIORequest_t *mpi_request; 1754 u16 tag = smid - 1; 1755 1756 if (smid > 0 && 1757 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { 1758 u32 unique_tag = 1759 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; 1760 1761 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1762 1763 /* 1764 * If SCSI IO request is outstanding at driver level then 1765 * DevHandle filed must be non-zero. If DevHandle is zero 1766 * then it means that this smid is free at driver level, 1767 * so return NULL. 1768 */ 1769 if (!mpi_request->DevHandle) 1770 return scmd; 1771 1772 scmd = scsi_host_find_tag(ioc->shost, unique_tag); 1773 if (scmd) { 1774 st = scsi_cmd_priv(scmd); 1775 if (st->cb_idx == 0xFF || st->smid == 0) 1776 scmd = NULL; 1777 } 1778 } 1779 return scmd; 1780 } 1781 1782 /** 1783 * scsih_change_queue_depth - setting device queue depth 1784 * @sdev: scsi device struct 1785 * @qdepth: requested queue depth 1786 * 1787 * Return: queue depth. 1788 */ 1789 static int 1790 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1791 { 1792 struct Scsi_Host *shost = sdev->host; 1793 int max_depth; 1794 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1795 struct MPT3SAS_DEVICE *sas_device_priv_data; 1796 struct MPT3SAS_TARGET *sas_target_priv_data; 1797 struct _sas_device *sas_device; 1798 unsigned long flags; 1799 1800 max_depth = shost->can_queue; 1801 1802 /* 1803 * limit max device queue for SATA to 32 if enable_sdev_max_qd 1804 * is disabled. 1805 */ 1806 if (ioc->enable_sdev_max_qd) 1807 goto not_sata; 1808 1809 sas_device_priv_data = sdev->hostdata; 1810 if (!sas_device_priv_data) 1811 goto not_sata; 1812 sas_target_priv_data = sas_device_priv_data->sas_target; 1813 if (!sas_target_priv_data) 1814 goto not_sata; 1815 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) 1816 goto not_sata; 1817 1818 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1819 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 1820 if (sas_device) { 1821 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 1822 max_depth = MPT3SAS_SATA_QUEUE_DEPTH; 1823 1824 sas_device_put(sas_device); 1825 } 1826 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1827 1828 not_sata: 1829 1830 if (!sdev->tagged_supported) 1831 max_depth = 1; 1832 if (qdepth > max_depth) 1833 qdepth = max_depth; 1834 scsi_change_queue_depth(sdev, qdepth); 1835 sdev_printk(KERN_INFO, sdev, 1836 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", 1837 sdev->queue_depth, sdev->tagged_supported, 1838 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); 1839 return sdev->queue_depth; 1840 } 1841 1842 /** 1843 * mpt3sas_scsih_change_queue_depth - setting device queue depth 1844 * @sdev: scsi device struct 1845 * @qdepth: requested queue depth 1846 * 1847 * Returns nothing. 1848 */ 1849 void 1850 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1851 { 1852 struct Scsi_Host *shost = sdev->host; 1853 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1854 1855 if (ioc->enable_sdev_max_qd) 1856 qdepth = shost->can_queue; 1857 1858 scsih_change_queue_depth(sdev, qdepth); 1859 } 1860 1861 /** 1862 * scsih_target_alloc - target add routine 1863 * @starget: scsi target struct 1864 * 1865 * Return: 0 if ok. Any other return is assumed to be an error and 1866 * the device is ignored. 1867 */ 1868 static int 1869 scsih_target_alloc(struct scsi_target *starget) 1870 { 1871 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1872 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1873 struct MPT3SAS_TARGET *sas_target_priv_data; 1874 struct _sas_device *sas_device; 1875 struct _raid_device *raid_device; 1876 struct _pcie_device *pcie_device; 1877 unsigned long flags; 1878 struct sas_rphy *rphy; 1879 1880 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), 1881 GFP_KERNEL); 1882 if (!sas_target_priv_data) 1883 return -ENOMEM; 1884 1885 starget->hostdata = sas_target_priv_data; 1886 sas_target_priv_data->starget = starget; 1887 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 1888 1889 /* RAID volumes */ 1890 if (starget->channel == RAID_CHANNEL) { 1891 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1892 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, 1893 starget->channel); 1894 if (raid_device) { 1895 sas_target_priv_data->handle = raid_device->handle; 1896 sas_target_priv_data->sas_address = raid_device->wwid; 1897 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; 1898 if (ioc->is_warpdrive) 1899 sas_target_priv_data->raid_device = raid_device; 1900 raid_device->starget = starget; 1901 } 1902 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1903 return 0; 1904 } 1905 1906 /* PCIe devices */ 1907 if (starget->channel == PCIE_CHANNEL) { 1908 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1909 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id, 1910 starget->channel); 1911 if (pcie_device) { 1912 sas_target_priv_data->handle = pcie_device->handle; 1913 sas_target_priv_data->sas_address = pcie_device->wwid; 1914 sas_target_priv_data->port = NULL; 1915 sas_target_priv_data->pcie_dev = pcie_device; 1916 pcie_device->starget = starget; 1917 pcie_device->id = starget->id; 1918 pcie_device->channel = starget->channel; 1919 sas_target_priv_data->flags |= 1920 MPT_TARGET_FLAGS_PCIE_DEVICE; 1921 if (pcie_device->fast_path) 1922 sas_target_priv_data->flags |= 1923 MPT_TARGET_FASTPATH_IO; 1924 } 1925 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1926 return 0; 1927 } 1928 1929 /* sas/sata devices */ 1930 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1931 rphy = dev_to_rphy(starget->dev.parent); 1932 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); 1933 1934 if (sas_device) { 1935 sas_target_priv_data->handle = sas_device->handle; 1936 sas_target_priv_data->sas_address = sas_device->sas_address; 1937 sas_target_priv_data->port = sas_device->port; 1938 sas_target_priv_data->sas_dev = sas_device; 1939 sas_device->starget = starget; 1940 sas_device->id = starget->id; 1941 sas_device->channel = starget->channel; 1942 if (test_bit(sas_device->handle, ioc->pd_handles)) 1943 sas_target_priv_data->flags |= 1944 MPT_TARGET_FLAGS_RAID_COMPONENT; 1945 if (sas_device->fast_path) 1946 sas_target_priv_data->flags |= 1947 MPT_TARGET_FASTPATH_IO; 1948 } 1949 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1950 1951 return 0; 1952 } 1953 1954 /** 1955 * scsih_target_destroy - target destroy routine 1956 * @starget: scsi target struct 1957 */ 1958 static void 1959 scsih_target_destroy(struct scsi_target *starget) 1960 { 1961 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1962 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1963 struct MPT3SAS_TARGET *sas_target_priv_data; 1964 struct _sas_device *sas_device; 1965 struct _raid_device *raid_device; 1966 struct _pcie_device *pcie_device; 1967 unsigned long flags; 1968 1969 sas_target_priv_data = starget->hostdata; 1970 if (!sas_target_priv_data) 1971 return; 1972 1973 if (starget->channel == RAID_CHANNEL) { 1974 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1975 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, 1976 starget->channel); 1977 if (raid_device) { 1978 raid_device->starget = NULL; 1979 raid_device->sdev = NULL; 1980 } 1981 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1982 goto out; 1983 } 1984 1985 if (starget->channel == PCIE_CHANNEL) { 1986 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1987 pcie_device = __mpt3sas_get_pdev_from_target(ioc, 1988 sas_target_priv_data); 1989 if (pcie_device && (pcie_device->starget == starget) && 1990 (pcie_device->id == starget->id) && 1991 (pcie_device->channel == starget->channel)) 1992 pcie_device->starget = NULL; 1993 1994 if (pcie_device) { 1995 /* 1996 * Corresponding get() is in _scsih_target_alloc() 1997 */ 1998 sas_target_priv_data->pcie_dev = NULL; 1999 pcie_device_put(pcie_device); 2000 pcie_device_put(pcie_device); 2001 } 2002 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2003 goto out; 2004 } 2005 2006 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2007 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 2008 if (sas_device && (sas_device->starget == starget) && 2009 (sas_device->id == starget->id) && 2010 (sas_device->channel == starget->channel)) 2011 sas_device->starget = NULL; 2012 2013 if (sas_device) { 2014 /* 2015 * Corresponding get() is in _scsih_target_alloc() 2016 */ 2017 sas_target_priv_data->sas_dev = NULL; 2018 sas_device_put(sas_device); 2019 2020 sas_device_put(sas_device); 2021 } 2022 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2023 2024 out: 2025 kfree(sas_target_priv_data); 2026 starget->hostdata = NULL; 2027 } 2028 2029 /** 2030 * scsih_slave_alloc - device add routine 2031 * @sdev: scsi device struct 2032 * 2033 * Return: 0 if ok. Any other return is assumed to be an error and 2034 * the device is ignored. 2035 */ 2036 static int 2037 scsih_slave_alloc(struct scsi_device *sdev) 2038 { 2039 struct Scsi_Host *shost; 2040 struct MPT3SAS_ADAPTER *ioc; 2041 struct MPT3SAS_TARGET *sas_target_priv_data; 2042 struct MPT3SAS_DEVICE *sas_device_priv_data; 2043 struct scsi_target *starget; 2044 struct _raid_device *raid_device; 2045 struct _sas_device *sas_device; 2046 struct _pcie_device *pcie_device; 2047 unsigned long flags; 2048 2049 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), 2050 GFP_KERNEL); 2051 if (!sas_device_priv_data) 2052 return -ENOMEM; 2053 2054 sas_device_priv_data->lun = sdev->lun; 2055 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; 2056 2057 starget = scsi_target(sdev); 2058 sas_target_priv_data = starget->hostdata; 2059 sas_target_priv_data->num_luns++; 2060 sas_device_priv_data->sas_target = sas_target_priv_data; 2061 sdev->hostdata = sas_device_priv_data; 2062 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) 2063 sdev->no_uld_attach = 1; 2064 2065 shost = dev_to_shost(&starget->dev); 2066 ioc = shost_priv(shost); 2067 if (starget->channel == RAID_CHANNEL) { 2068 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2069 raid_device = _scsih_raid_device_find_by_id(ioc, 2070 starget->id, starget->channel); 2071 if (raid_device) 2072 raid_device->sdev = sdev; /* raid is single lun */ 2073 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2074 } 2075 if (starget->channel == PCIE_CHANNEL) { 2076 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2077 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, 2078 sas_target_priv_data->sas_address); 2079 if (pcie_device && (pcie_device->starget == NULL)) { 2080 sdev_printk(KERN_INFO, sdev, 2081 "%s : pcie_device->starget set to starget @ %d\n", 2082 __func__, __LINE__); 2083 pcie_device->starget = starget; 2084 } 2085 2086 if (pcie_device) 2087 pcie_device_put(pcie_device); 2088 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2089 2090 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 2091 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2092 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 2093 sas_target_priv_data->sas_address, 2094 sas_target_priv_data->port); 2095 if (sas_device && (sas_device->starget == NULL)) { 2096 sdev_printk(KERN_INFO, sdev, 2097 "%s : sas_device->starget set to starget @ %d\n", 2098 __func__, __LINE__); 2099 sas_device->starget = starget; 2100 } 2101 2102 if (sas_device) 2103 sas_device_put(sas_device); 2104 2105 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2106 } 2107 2108 return 0; 2109 } 2110 2111 /** 2112 * scsih_slave_destroy - device destroy routine 2113 * @sdev: scsi device struct 2114 */ 2115 static void 2116 scsih_slave_destroy(struct scsi_device *sdev) 2117 { 2118 struct MPT3SAS_TARGET *sas_target_priv_data; 2119 struct scsi_target *starget; 2120 struct Scsi_Host *shost; 2121 struct MPT3SAS_ADAPTER *ioc; 2122 struct _sas_device *sas_device; 2123 struct _pcie_device *pcie_device; 2124 unsigned long flags; 2125 2126 if (!sdev->hostdata) 2127 return; 2128 2129 starget = scsi_target(sdev); 2130 sas_target_priv_data = starget->hostdata; 2131 sas_target_priv_data->num_luns--; 2132 2133 shost = dev_to_shost(&starget->dev); 2134 ioc = shost_priv(shost); 2135 2136 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 2137 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2138 pcie_device = __mpt3sas_get_pdev_from_target(ioc, 2139 sas_target_priv_data); 2140 if (pcie_device && !sas_target_priv_data->num_luns) 2141 pcie_device->starget = NULL; 2142 2143 if (pcie_device) 2144 pcie_device_put(pcie_device); 2145 2146 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2147 2148 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 2149 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2150 sas_device = __mpt3sas_get_sdev_from_target(ioc, 2151 sas_target_priv_data); 2152 if (sas_device && !sas_target_priv_data->num_luns) 2153 sas_device->starget = NULL; 2154 2155 if (sas_device) 2156 sas_device_put(sas_device); 2157 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2158 } 2159 2160 kfree(sdev->hostdata); 2161 sdev->hostdata = NULL; 2162 } 2163 2164 /** 2165 * _scsih_display_sata_capabilities - sata capabilities 2166 * @ioc: per adapter object 2167 * @handle: device handle 2168 * @sdev: scsi device struct 2169 */ 2170 static void 2171 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, 2172 u16 handle, struct scsi_device *sdev) 2173 { 2174 Mpi2ConfigReply_t mpi_reply; 2175 Mpi2SasDevicePage0_t sas_device_pg0; 2176 u32 ioc_status; 2177 u16 flags; 2178 u32 device_info; 2179 2180 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 2181 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 2182 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2183 __FILE__, __LINE__, __func__); 2184 return; 2185 } 2186 2187 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 2188 MPI2_IOCSTATUS_MASK; 2189 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 2190 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2191 __FILE__, __LINE__, __func__); 2192 return; 2193 } 2194 2195 flags = le16_to_cpu(sas_device_pg0.Flags); 2196 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 2197 2198 sdev_printk(KERN_INFO, sdev, 2199 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " 2200 "sw_preserve(%s)\n", 2201 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n", 2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n", 2203 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" : 2204 "n", 2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n", 2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n", 2207 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); 2208 } 2209 2210 /* 2211 * raid transport support - 2212 * Enabled for SLES11 and newer, in older kernels the driver will panic when 2213 * unloading the driver followed by a load - I believe that the subroutine 2214 * raid_class_release() is not cleaning up properly. 2215 */ 2216 2217 /** 2218 * scsih_is_raid - return boolean indicating device is raid volume 2219 * @dev: the device struct object 2220 */ 2221 static int 2222 scsih_is_raid(struct device *dev) 2223 { 2224 struct scsi_device *sdev = to_scsi_device(dev); 2225 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2226 2227 if (ioc->is_warpdrive) 2228 return 0; 2229 return (sdev->channel == RAID_CHANNEL) ? 1 : 0; 2230 } 2231 2232 static int 2233 scsih_is_nvme(struct device *dev) 2234 { 2235 struct scsi_device *sdev = to_scsi_device(dev); 2236 2237 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0; 2238 } 2239 2240 /** 2241 * scsih_get_resync - get raid volume resync percent complete 2242 * @dev: the device struct object 2243 */ 2244 static void 2245 scsih_get_resync(struct device *dev) 2246 { 2247 struct scsi_device *sdev = to_scsi_device(dev); 2248 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2249 static struct _raid_device *raid_device; 2250 unsigned long flags; 2251 Mpi2RaidVolPage0_t vol_pg0; 2252 Mpi2ConfigReply_t mpi_reply; 2253 u32 volume_status_flags; 2254 u8 percent_complete; 2255 u16 handle; 2256 2257 percent_complete = 0; 2258 handle = 0; 2259 if (ioc->is_warpdrive) 2260 goto out; 2261 2262 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2263 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 2264 sdev->channel); 2265 if (raid_device) { 2266 handle = raid_device->handle; 2267 percent_complete = raid_device->percent_complete; 2268 } 2269 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2270 2271 if (!handle) 2272 goto out; 2273 2274 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 2275 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 2276 sizeof(Mpi2RaidVolPage0_t))) { 2277 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2278 __FILE__, __LINE__, __func__); 2279 percent_complete = 0; 2280 goto out; 2281 } 2282 2283 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); 2284 if (!(volume_status_flags & 2285 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) 2286 percent_complete = 0; 2287 2288 out: 2289 2290 switch (ioc->hba_mpi_version_belonged) { 2291 case MPI2_VERSION: 2292 raid_set_resync(mpt2sas_raid_template, dev, percent_complete); 2293 break; 2294 case MPI25_VERSION: 2295 case MPI26_VERSION: 2296 raid_set_resync(mpt3sas_raid_template, dev, percent_complete); 2297 break; 2298 } 2299 } 2300 2301 /** 2302 * scsih_get_state - get raid volume level 2303 * @dev: the device struct object 2304 */ 2305 static void 2306 scsih_get_state(struct device *dev) 2307 { 2308 struct scsi_device *sdev = to_scsi_device(dev); 2309 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2310 static struct _raid_device *raid_device; 2311 unsigned long flags; 2312 Mpi2RaidVolPage0_t vol_pg0; 2313 Mpi2ConfigReply_t mpi_reply; 2314 u32 volstate; 2315 enum raid_state state = RAID_STATE_UNKNOWN; 2316 u16 handle = 0; 2317 2318 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2319 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 2320 sdev->channel); 2321 if (raid_device) 2322 handle = raid_device->handle; 2323 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2324 2325 if (!raid_device) 2326 goto out; 2327 2328 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 2329 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 2330 sizeof(Mpi2RaidVolPage0_t))) { 2331 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2332 __FILE__, __LINE__, __func__); 2333 goto out; 2334 } 2335 2336 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); 2337 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { 2338 state = RAID_STATE_RESYNCING; 2339 goto out; 2340 } 2341 2342 switch (vol_pg0.VolumeState) { 2343 case MPI2_RAID_VOL_STATE_OPTIMAL: 2344 case MPI2_RAID_VOL_STATE_ONLINE: 2345 state = RAID_STATE_ACTIVE; 2346 break; 2347 case MPI2_RAID_VOL_STATE_DEGRADED: 2348 state = RAID_STATE_DEGRADED; 2349 break; 2350 case MPI2_RAID_VOL_STATE_FAILED: 2351 case MPI2_RAID_VOL_STATE_MISSING: 2352 state = RAID_STATE_OFFLINE; 2353 break; 2354 } 2355 out: 2356 switch (ioc->hba_mpi_version_belonged) { 2357 case MPI2_VERSION: 2358 raid_set_state(mpt2sas_raid_template, dev, state); 2359 break; 2360 case MPI25_VERSION: 2361 case MPI26_VERSION: 2362 raid_set_state(mpt3sas_raid_template, dev, state); 2363 break; 2364 } 2365 } 2366 2367 /** 2368 * _scsih_set_level - set raid level 2369 * @ioc: ? 2370 * @sdev: scsi device struct 2371 * @volume_type: volume type 2372 */ 2373 static void 2374 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc, 2375 struct scsi_device *sdev, u8 volume_type) 2376 { 2377 enum raid_level level = RAID_LEVEL_UNKNOWN; 2378 2379 switch (volume_type) { 2380 case MPI2_RAID_VOL_TYPE_RAID0: 2381 level = RAID_LEVEL_0; 2382 break; 2383 case MPI2_RAID_VOL_TYPE_RAID10: 2384 level = RAID_LEVEL_10; 2385 break; 2386 case MPI2_RAID_VOL_TYPE_RAID1E: 2387 level = RAID_LEVEL_1E; 2388 break; 2389 case MPI2_RAID_VOL_TYPE_RAID1: 2390 level = RAID_LEVEL_1; 2391 break; 2392 } 2393 2394 switch (ioc->hba_mpi_version_belonged) { 2395 case MPI2_VERSION: 2396 raid_set_level(mpt2sas_raid_template, 2397 &sdev->sdev_gendev, level); 2398 break; 2399 case MPI25_VERSION: 2400 case MPI26_VERSION: 2401 raid_set_level(mpt3sas_raid_template, 2402 &sdev->sdev_gendev, level); 2403 break; 2404 } 2405 } 2406 2407 2408 /** 2409 * _scsih_get_volume_capabilities - volume capabilities 2410 * @ioc: per adapter object 2411 * @raid_device: the raid_device object 2412 * 2413 * Return: 0 for success, else 1 2414 */ 2415 static int 2416 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, 2417 struct _raid_device *raid_device) 2418 { 2419 Mpi2RaidVolPage0_t *vol_pg0; 2420 Mpi2RaidPhysDiskPage0_t pd_pg0; 2421 Mpi2SasDevicePage0_t sas_device_pg0; 2422 Mpi2ConfigReply_t mpi_reply; 2423 u16 sz; 2424 u8 num_pds; 2425 2426 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, 2427 &num_pds)) || !num_pds) { 2428 dfailprintk(ioc, 2429 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2430 __FILE__, __LINE__, __func__)); 2431 return 1; 2432 } 2433 2434 raid_device->num_pds = num_pds; 2435 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * 2436 sizeof(Mpi2RaidVol0PhysDisk_t)); 2437 vol_pg0 = kzalloc(sz, GFP_KERNEL); 2438 if (!vol_pg0) { 2439 dfailprintk(ioc, 2440 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2441 __FILE__, __LINE__, __func__)); 2442 return 1; 2443 } 2444 2445 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, 2446 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { 2447 dfailprintk(ioc, 2448 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2449 __FILE__, __LINE__, __func__)); 2450 kfree(vol_pg0); 2451 return 1; 2452 } 2453 2454 raid_device->volume_type = vol_pg0->VolumeType; 2455 2456 /* figure out what the underlying devices are by 2457 * obtaining the device_info bits for the 1st device 2458 */ 2459 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 2460 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, 2461 vol_pg0->PhysDisk[0].PhysDiskNum))) { 2462 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 2463 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 2464 le16_to_cpu(pd_pg0.DevHandle)))) { 2465 raid_device->device_info = 2466 le32_to_cpu(sas_device_pg0.DeviceInfo); 2467 } 2468 } 2469 2470 kfree(vol_pg0); 2471 return 0; 2472 } 2473 2474 /** 2475 * _scsih_enable_tlr - setting TLR flags 2476 * @ioc: per adapter object 2477 * @sdev: scsi device struct 2478 * 2479 * Enabling Transaction Layer Retries for tape devices when 2480 * vpd page 0x90 is present 2481 * 2482 */ 2483 static void 2484 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) 2485 { 2486 2487 /* only for TAPE */ 2488 if (sdev->type != TYPE_TAPE) 2489 return; 2490 2491 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) 2492 return; 2493 2494 sas_enable_tlr(sdev); 2495 sdev_printk(KERN_INFO, sdev, "TLR %s\n", 2496 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); 2497 return; 2498 2499 } 2500 2501 /** 2502 * scsih_slave_configure - device configure routine. 2503 * @sdev: scsi device struct 2504 * 2505 * Return: 0 if ok. Any other return is assumed to be an error and 2506 * the device is ignored. 2507 */ 2508 static int 2509 scsih_slave_configure(struct scsi_device *sdev) 2510 { 2511 struct Scsi_Host *shost = sdev->host; 2512 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2513 struct MPT3SAS_DEVICE *sas_device_priv_data; 2514 struct MPT3SAS_TARGET *sas_target_priv_data; 2515 struct _sas_device *sas_device; 2516 struct _pcie_device *pcie_device; 2517 struct _raid_device *raid_device; 2518 unsigned long flags; 2519 int qdepth; 2520 u8 ssp_target = 0; 2521 char *ds = ""; 2522 char *r_level = ""; 2523 u16 handle, volume_handle = 0; 2524 u64 volume_wwid = 0; 2525 2526 qdepth = 1; 2527 sas_device_priv_data = sdev->hostdata; 2528 sas_device_priv_data->configured_lun = 1; 2529 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; 2530 sas_target_priv_data = sas_device_priv_data->sas_target; 2531 handle = sas_target_priv_data->handle; 2532 2533 /* raid volume handling */ 2534 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { 2535 2536 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2537 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 2538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2539 if (!raid_device) { 2540 dfailprintk(ioc, 2541 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2542 __FILE__, __LINE__, __func__)); 2543 return 1; 2544 } 2545 2546 if (_scsih_get_volume_capabilities(ioc, raid_device)) { 2547 dfailprintk(ioc, 2548 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2549 __FILE__, __LINE__, __func__)); 2550 return 1; 2551 } 2552 2553 /* 2554 * WARPDRIVE: Initialize the required data for Direct IO 2555 */ 2556 mpt3sas_init_warpdrive_properties(ioc, raid_device); 2557 2558 /* RAID Queue Depth Support 2559 * IS volume = underlying qdepth of drive type, either 2560 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH 2561 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH) 2562 */ 2563 if (raid_device->device_info & 2564 MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2565 qdepth = MPT3SAS_SAS_QUEUE_DEPTH; 2566 ds = "SSP"; 2567 } else { 2568 qdepth = MPT3SAS_SATA_QUEUE_DEPTH; 2569 if (raid_device->device_info & 2570 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 2571 ds = "SATA"; 2572 else 2573 ds = "STP"; 2574 } 2575 2576 switch (raid_device->volume_type) { 2577 case MPI2_RAID_VOL_TYPE_RAID0: 2578 r_level = "RAID0"; 2579 break; 2580 case MPI2_RAID_VOL_TYPE_RAID1E: 2581 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2582 if (ioc->manu_pg10.OEMIdentifier && 2583 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & 2584 MFG10_GF0_R10_DISPLAY) && 2585 !(raid_device->num_pds % 2)) 2586 r_level = "RAID10"; 2587 else 2588 r_level = "RAID1E"; 2589 break; 2590 case MPI2_RAID_VOL_TYPE_RAID1: 2591 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2592 r_level = "RAID1"; 2593 break; 2594 case MPI2_RAID_VOL_TYPE_RAID10: 2595 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2596 r_level = "RAID10"; 2597 break; 2598 case MPI2_RAID_VOL_TYPE_UNKNOWN: 2599 default: 2600 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2601 r_level = "RAIDX"; 2602 break; 2603 } 2604 2605 if (!ioc->hide_ir_msg) 2606 sdev_printk(KERN_INFO, sdev, 2607 "%s: handle(0x%04x), wwid(0x%016llx)," 2608 " pd_count(%d), type(%s)\n", 2609 r_level, raid_device->handle, 2610 (unsigned long long)raid_device->wwid, 2611 raid_device->num_pds, ds); 2612 2613 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { 2614 blk_queue_max_hw_sectors(sdev->request_queue, 2615 MPT3SAS_RAID_MAX_SECTORS); 2616 sdev_printk(KERN_INFO, sdev, 2617 "Set queue's max_sector to: %u\n", 2618 MPT3SAS_RAID_MAX_SECTORS); 2619 } 2620 2621 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2622 2623 /* raid transport support */ 2624 if (!ioc->is_warpdrive) 2625 _scsih_set_level(ioc, sdev, raid_device->volume_type); 2626 return 0; 2627 } 2628 2629 /* non-raid handling */ 2630 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { 2631 if (mpt3sas_config_get_volume_handle(ioc, handle, 2632 &volume_handle)) { 2633 dfailprintk(ioc, 2634 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2635 __FILE__, __LINE__, __func__)); 2636 return 1; 2637 } 2638 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, 2639 volume_handle, &volume_wwid)) { 2640 dfailprintk(ioc, 2641 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2642 __FILE__, __LINE__, __func__)); 2643 return 1; 2644 } 2645 } 2646 2647 /* PCIe handling */ 2648 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 2649 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2650 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, 2651 sas_device_priv_data->sas_target->sas_address); 2652 if (!pcie_device) { 2653 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2654 dfailprintk(ioc, 2655 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2656 __FILE__, __LINE__, __func__)); 2657 return 1; 2658 } 2659 2660 qdepth = MPT3SAS_NVME_QUEUE_DEPTH; 2661 ds = "NVMe"; 2662 sdev_printk(KERN_INFO, sdev, 2663 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n", 2664 ds, handle, (unsigned long long)pcie_device->wwid, 2665 pcie_device->port_num); 2666 if (pcie_device->enclosure_handle != 0) 2667 sdev_printk(KERN_INFO, sdev, 2668 "%s: enclosure logical id(0x%016llx), slot(%d)\n", 2669 ds, 2670 (unsigned long long)pcie_device->enclosure_logical_id, 2671 pcie_device->slot); 2672 if (pcie_device->connector_name[0] != '\0') 2673 sdev_printk(KERN_INFO, sdev, 2674 "%s: enclosure level(0x%04x)," 2675 "connector name( %s)\n", ds, 2676 pcie_device->enclosure_level, 2677 pcie_device->connector_name); 2678 2679 if (pcie_device->nvme_mdts) 2680 blk_queue_max_hw_sectors(sdev->request_queue, 2681 pcie_device->nvme_mdts/512); 2682 2683 pcie_device_put(pcie_device); 2684 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2685 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2686 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be 2687 ** merged and can eliminate holes created during merging 2688 ** operation. 2689 **/ 2690 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, 2691 sdev->request_queue); 2692 blk_queue_virt_boundary(sdev->request_queue, 2693 ioc->page_size - 1); 2694 return 0; 2695 } 2696 2697 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2698 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 2699 sas_device_priv_data->sas_target->sas_address, 2700 sas_device_priv_data->sas_target->port); 2701 if (!sas_device) { 2702 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2703 dfailprintk(ioc, 2704 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2705 __FILE__, __LINE__, __func__)); 2706 return 1; 2707 } 2708 2709 sas_device->volume_handle = volume_handle; 2710 sas_device->volume_wwid = volume_wwid; 2711 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2712 qdepth = MPT3SAS_SAS_QUEUE_DEPTH; 2713 ssp_target = 1; 2714 if (sas_device->device_info & 2715 MPI2_SAS_DEVICE_INFO_SEP) { 2716 sdev_printk(KERN_WARNING, sdev, 2717 "set ignore_delay_remove for handle(0x%04x)\n", 2718 sas_device_priv_data->sas_target->handle); 2719 sas_device_priv_data->ignore_delay_remove = 1; 2720 ds = "SES"; 2721 } else 2722 ds = "SSP"; 2723 } else { 2724 qdepth = MPT3SAS_SATA_QUEUE_DEPTH; 2725 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) 2726 ds = "STP"; 2727 else if (sas_device->device_info & 2728 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 2729 ds = "SATA"; 2730 } 2731 2732 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \ 2733 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", 2734 ds, handle, (unsigned long long)sas_device->sas_address, 2735 sas_device->phy, (unsigned long long)sas_device->device_name); 2736 2737 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); 2738 2739 sas_device_put(sas_device); 2740 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2741 2742 if (!ssp_target) 2743 _scsih_display_sata_capabilities(ioc, handle, sdev); 2744 2745 2746 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2747 2748 if (ssp_target) { 2749 sas_read_port_mode_page(sdev); 2750 _scsih_enable_tlr(ioc, sdev); 2751 } 2752 2753 return 0; 2754 } 2755 2756 /** 2757 * scsih_bios_param - fetch head, sector, cylinder info for a disk 2758 * @sdev: scsi device struct 2759 * @bdev: pointer to block device context 2760 * @capacity: device size (in 512 byte sectors) 2761 * @params: three element array to place output: 2762 * params[0] number of heads (max 255) 2763 * params[1] number of sectors (max 63) 2764 * params[2] number of cylinders 2765 */ 2766 static int 2767 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2768 sector_t capacity, int params[]) 2769 { 2770 int heads; 2771 int sectors; 2772 sector_t cylinders; 2773 ulong dummy; 2774 2775 heads = 64; 2776 sectors = 32; 2777 2778 dummy = heads * sectors; 2779 cylinders = capacity; 2780 sector_div(cylinders, dummy); 2781 2782 /* 2783 * Handle extended translation size for logical drives 2784 * > 1Gb 2785 */ 2786 if ((ulong)capacity >= 0x200000) { 2787 heads = 255; 2788 sectors = 63; 2789 dummy = heads * sectors; 2790 cylinders = capacity; 2791 sector_div(cylinders, dummy); 2792 } 2793 2794 /* return result */ 2795 params[0] = heads; 2796 params[1] = sectors; 2797 params[2] = cylinders; 2798 2799 return 0; 2800 } 2801 2802 /** 2803 * _scsih_response_code - translation of device response code 2804 * @ioc: per adapter object 2805 * @response_code: response code returned by the device 2806 */ 2807 static void 2808 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) 2809 { 2810 char *desc; 2811 2812 switch (response_code) { 2813 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 2814 desc = "task management request completed"; 2815 break; 2816 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 2817 desc = "invalid frame"; 2818 break; 2819 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 2820 desc = "task management request not supported"; 2821 break; 2822 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 2823 desc = "task management request failed"; 2824 break; 2825 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 2826 desc = "task management request succeeded"; 2827 break; 2828 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 2829 desc = "invalid lun"; 2830 break; 2831 case 0xA: 2832 desc = "overlapped tag attempted"; 2833 break; 2834 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 2835 desc = "task queued, however not sent to target"; 2836 break; 2837 default: 2838 desc = "unknown"; 2839 break; 2840 } 2841 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc); 2842 } 2843 2844 /** 2845 * _scsih_tm_done - tm completion routine 2846 * @ioc: per adapter object 2847 * @smid: system request message index 2848 * @msix_index: MSIX table index supplied by the OS 2849 * @reply: reply message frame(lower 32bit addr) 2850 * Context: none. 2851 * 2852 * The callback handler when using scsih_issue_tm. 2853 * 2854 * Return: 1 meaning mf should be freed from _base_interrupt 2855 * 0 means the mf is freed from this function. 2856 */ 2857 static u8 2858 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 2859 { 2860 MPI2DefaultReply_t *mpi_reply; 2861 2862 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED) 2863 return 1; 2864 if (ioc->tm_cmds.smid != smid) 2865 return 1; 2866 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; 2867 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 2868 if (mpi_reply) { 2869 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 2870 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID; 2871 } 2872 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING; 2873 complete(&ioc->tm_cmds.done); 2874 return 1; 2875 } 2876 2877 /** 2878 * mpt3sas_scsih_set_tm_flag - set per target tm_busy 2879 * @ioc: per adapter object 2880 * @handle: device handle 2881 * 2882 * During taskmangement request, we need to freeze the device queue. 2883 */ 2884 void 2885 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2886 { 2887 struct MPT3SAS_DEVICE *sas_device_priv_data; 2888 struct scsi_device *sdev; 2889 u8 skip = 0; 2890 2891 shost_for_each_device(sdev, ioc->shost) { 2892 if (skip) 2893 continue; 2894 sas_device_priv_data = sdev->hostdata; 2895 if (!sas_device_priv_data) 2896 continue; 2897 if (sas_device_priv_data->sas_target->handle == handle) { 2898 sas_device_priv_data->sas_target->tm_busy = 1; 2899 skip = 1; 2900 ioc->ignore_loginfos = 1; 2901 } 2902 } 2903 } 2904 2905 /** 2906 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy 2907 * @ioc: per adapter object 2908 * @handle: device handle 2909 * 2910 * During taskmangement request, we need to freeze the device queue. 2911 */ 2912 void 2913 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2914 { 2915 struct MPT3SAS_DEVICE *sas_device_priv_data; 2916 struct scsi_device *sdev; 2917 u8 skip = 0; 2918 2919 shost_for_each_device(sdev, ioc->shost) { 2920 if (skip) 2921 continue; 2922 sas_device_priv_data = sdev->hostdata; 2923 if (!sas_device_priv_data) 2924 continue; 2925 if (sas_device_priv_data->sas_target->handle == handle) { 2926 sas_device_priv_data->sas_target->tm_busy = 0; 2927 skip = 1; 2928 ioc->ignore_loginfos = 0; 2929 } 2930 } 2931 } 2932 2933 /** 2934 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status 2935 * @ioc: per adapter object 2936 * @channel: the channel assigned by the OS 2937 * @id: the id assigned by the OS 2938 * @lun: lun number 2939 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2940 * @smid_task: smid assigned to the task 2941 * 2942 * Look whether TM has aborted the timed out SCSI command, if 2943 * TM has aborted the IO then return SUCCESS else return FAILED. 2944 */ 2945 static int 2946 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel, 2947 uint id, uint lun, u8 type, u16 smid_task) 2948 { 2949 2950 if (smid_task <= ioc->shost->can_queue) { 2951 switch (type) { 2952 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 2953 if (!(_scsih_scsi_lookup_find_by_target(ioc, 2954 id, channel))) 2955 return SUCCESS; 2956 break; 2957 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 2958 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 2959 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id, 2960 lun, channel))) 2961 return SUCCESS; 2962 break; 2963 default: 2964 return SUCCESS; 2965 } 2966 } else if (smid_task == ioc->scsih_cmds.smid) { 2967 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) || 2968 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED)) 2969 return SUCCESS; 2970 } else if (smid_task == ioc->ctl_cmds.smid) { 2971 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) || 2972 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED)) 2973 return SUCCESS; 2974 } 2975 2976 return FAILED; 2977 } 2978 2979 /** 2980 * scsih_tm_post_processing - post processing of target & LUN reset 2981 * @ioc: per adapter object 2982 * @handle: device handle 2983 * @channel: the channel assigned by the OS 2984 * @id: the id assigned by the OS 2985 * @lun: lun number 2986 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2987 * @smid_task: smid assigned to the task 2988 * 2989 * Post processing of target & LUN reset. Due to interrupt latency 2990 * issue it possible that interrupt for aborted IO might not be 2991 * received yet. So before returning failure status, poll the 2992 * reply descriptor pools for the reply of timed out SCSI command. 2993 * Return FAILED status if reply for timed out is not received 2994 * otherwise return SUCCESS. 2995 */ 2996 static int 2997 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle, 2998 uint channel, uint id, uint lun, u8 type, u16 smid_task) 2999 { 3000 int rc; 3001 3002 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); 3003 if (rc == SUCCESS) 3004 return rc; 3005 3006 ioc_info(ioc, 3007 "Poll ReplyDescriptor queues for completion of" 3008 " smid(%d), task_type(0x%02x), handle(0x%04x)\n", 3009 smid_task, type, handle); 3010 3011 /* 3012 * Due to interrupt latency issues, driver may receive interrupt for 3013 * TM first and then for aborted SCSI IO command. So, poll all the 3014 * ReplyDescriptor pools before returning the FAILED status to SML. 3015 */ 3016 mpt3sas_base_mask_interrupts(ioc); 3017 mpt3sas_base_sync_reply_irqs(ioc, 1); 3018 mpt3sas_base_unmask_interrupts(ioc); 3019 3020 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); 3021 } 3022 3023 /** 3024 * mpt3sas_scsih_issue_tm - main routine for sending tm requests 3025 * @ioc: per adapter struct 3026 * @handle: device handle 3027 * @channel: the channel assigned by the OS 3028 * @id: the id assigned by the OS 3029 * @lun: lun number 3030 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 3031 * @smid_task: smid assigned to the task 3032 * @msix_task: MSIX table index supplied by the OS 3033 * @timeout: timeout in seconds 3034 * @tr_method: Target Reset Method 3035 * Context: user 3036 * 3037 * A generic API for sending task management requests to firmware. 3038 * 3039 * The callback index is set inside `ioc->tm_cb_idx`. 3040 * The caller is responsible to check for outstanding commands. 3041 * 3042 * Return: SUCCESS or FAILED. 3043 */ 3044 int 3045 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, 3046 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task, 3047 u8 timeout, u8 tr_method) 3048 { 3049 Mpi2SCSITaskManagementRequest_t *mpi_request; 3050 Mpi2SCSITaskManagementReply_t *mpi_reply; 3051 Mpi25SCSIIORequest_t *request; 3052 u16 smid = 0; 3053 u32 ioc_state; 3054 int rc; 3055 u8 issue_reset = 0; 3056 3057 lockdep_assert_held(&ioc->tm_cmds.mutex); 3058 3059 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { 3060 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__); 3061 return FAILED; 3062 } 3063 3064 if (ioc->shost_recovery || ioc->remove_host || 3065 ioc->pci_error_recovery) { 3066 ioc_info(ioc, "%s: host reset in progress!\n", __func__); 3067 return FAILED; 3068 } 3069 3070 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 3071 if (ioc_state & MPI2_DOORBELL_USED) { 3072 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); 3073 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3074 return (!rc) ? SUCCESS : FAILED; 3075 } 3076 3077 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 3078 mpt3sas_print_fault_code(ioc, ioc_state & 3079 MPI2_DOORBELL_DATA_MASK); 3080 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3081 return (!rc) ? SUCCESS : FAILED; 3082 } else if ((ioc_state & MPI2_IOC_STATE_MASK) == 3083 MPI2_IOC_STATE_COREDUMP) { 3084 mpt3sas_print_coredump_info(ioc, ioc_state & 3085 MPI2_DOORBELL_DATA_MASK); 3086 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3087 return (!rc) ? SUCCESS : FAILED; 3088 } 3089 3090 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); 3091 if (!smid) { 3092 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 3093 return FAILED; 3094 } 3095 3096 dtmprintk(ioc, 3097 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n", 3098 handle, type, smid_task, timeout, tr_method)); 3099 ioc->tm_cmds.status = MPT3_CMD_PENDING; 3100 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3101 ioc->tm_cmds.smid = smid; 3102 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 3103 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); 3104 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3105 mpi_request->DevHandle = cpu_to_le16(handle); 3106 mpi_request->TaskType = type; 3107 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 3108 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3109 mpi_request->MsgFlags = tr_method; 3110 mpi_request->TaskMID = cpu_to_le16(smid_task); 3111 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); 3112 mpt3sas_scsih_set_tm_flag(ioc, handle); 3113 init_completion(&ioc->tm_cmds.done); 3114 ioc->put_smid_hi_priority(ioc, smid, msix_task); 3115 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); 3116 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { 3117 mpt3sas_check_cmd_timeout(ioc, 3118 ioc->tm_cmds.status, mpi_request, 3119 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset); 3120 if (issue_reset) { 3121 rc = mpt3sas_base_hard_reset_handler(ioc, 3122 FORCE_BIG_HAMMER); 3123 rc = (!rc) ? SUCCESS : FAILED; 3124 goto out; 3125 } 3126 } 3127 3128 /* sync IRQs in case those were busy during flush. */ 3129 mpt3sas_base_sync_reply_irqs(ioc, 0); 3130 3131 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { 3132 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 3133 mpi_reply = ioc->tm_cmds.reply; 3134 dtmprintk(ioc, 3135 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", 3136 le16_to_cpu(mpi_reply->IOCStatus), 3137 le32_to_cpu(mpi_reply->IOCLogInfo), 3138 le32_to_cpu(mpi_reply->TerminationCount))); 3139 if (ioc->logging_level & MPT_DEBUG_TM) { 3140 _scsih_response_code(ioc, mpi_reply->ResponseCode); 3141 if (mpi_reply->IOCStatus) 3142 _debug_dump_mf(mpi_request, 3143 sizeof(Mpi2SCSITaskManagementRequest_t)/4); 3144 } 3145 } 3146 3147 switch (type) { 3148 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 3149 rc = SUCCESS; 3150 /* 3151 * If DevHandle filed in smid_task's entry of request pool 3152 * doesn't match with device handle on which this task abort 3153 * TM is received then it means that TM has successfully 3154 * aborted the timed out command. Since smid_task's entry in 3155 * request pool will be memset to zero once the timed out 3156 * command is returned to the SML. If the command is not 3157 * aborted then smid_task’s entry won’t be cleared and it 3158 * will have same DevHandle value on which this task abort TM 3159 * is received and driver will return the TM status as FAILED. 3160 */ 3161 request = mpt3sas_base_get_msg_frame(ioc, smid_task); 3162 if (le16_to_cpu(request->DevHandle) != handle) 3163 break; 3164 3165 ioc_info(ioc, "Task abort tm failed: handle(0x%04x)," 3166 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n", 3167 handle, timeout, tr_method, smid_task, msix_task); 3168 rc = FAILED; 3169 break; 3170 3171 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3172 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 3173 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3174 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun, 3175 type, smid_task); 3176 break; 3177 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: 3178 rc = SUCCESS; 3179 break; 3180 default: 3181 rc = FAILED; 3182 break; 3183 } 3184 3185 out: 3186 mpt3sas_scsih_clear_tm_flag(ioc, handle); 3187 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 3188 return rc; 3189 } 3190 3191 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 3192 uint channel, uint id, u64 lun, u8 type, u16 smid_task, 3193 u16 msix_task, u8 timeout, u8 tr_method) 3194 { 3195 int ret; 3196 3197 mutex_lock(&ioc->tm_cmds.mutex); 3198 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type, 3199 smid_task, msix_task, timeout, tr_method); 3200 mutex_unlock(&ioc->tm_cmds.mutex); 3201 3202 return ret; 3203 } 3204 3205 /** 3206 * _scsih_tm_display_info - displays info about the device 3207 * @ioc: per adapter struct 3208 * @scmd: pointer to scsi command object 3209 * 3210 * Called by task management callback handlers. 3211 */ 3212 static void 3213 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) 3214 { 3215 struct scsi_target *starget = scmd->device->sdev_target; 3216 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 3217 struct _sas_device *sas_device = NULL; 3218 struct _pcie_device *pcie_device = NULL; 3219 unsigned long flags; 3220 char *device_str = NULL; 3221 3222 if (!priv_target) 3223 return; 3224 if (ioc->hide_ir_msg) 3225 device_str = "WarpDrive"; 3226 else 3227 device_str = "volume"; 3228 3229 scsi_print_command(scmd); 3230 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 3231 starget_printk(KERN_INFO, starget, 3232 "%s handle(0x%04x), %s wwid(0x%016llx)\n", 3233 device_str, priv_target->handle, 3234 device_str, (unsigned long long)priv_target->sas_address); 3235 3236 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 3237 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 3238 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target); 3239 if (pcie_device) { 3240 starget_printk(KERN_INFO, starget, 3241 "handle(0x%04x), wwid(0x%016llx), port(%d)\n", 3242 pcie_device->handle, 3243 (unsigned long long)pcie_device->wwid, 3244 pcie_device->port_num); 3245 if (pcie_device->enclosure_handle != 0) 3246 starget_printk(KERN_INFO, starget, 3247 "enclosure logical id(0x%016llx), slot(%d)\n", 3248 (unsigned long long) 3249 pcie_device->enclosure_logical_id, 3250 pcie_device->slot); 3251 if (pcie_device->connector_name[0] != '\0') 3252 starget_printk(KERN_INFO, starget, 3253 "enclosure level(0x%04x), connector name( %s)\n", 3254 pcie_device->enclosure_level, 3255 pcie_device->connector_name); 3256 pcie_device_put(pcie_device); 3257 } 3258 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 3259 3260 } else { 3261 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3262 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target); 3263 if (sas_device) { 3264 if (priv_target->flags & 3265 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3266 starget_printk(KERN_INFO, starget, 3267 "volume handle(0x%04x), " 3268 "volume wwid(0x%016llx)\n", 3269 sas_device->volume_handle, 3270 (unsigned long long)sas_device->volume_wwid); 3271 } 3272 starget_printk(KERN_INFO, starget, 3273 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", 3274 sas_device->handle, 3275 (unsigned long long)sas_device->sas_address, 3276 sas_device->phy); 3277 3278 _scsih_display_enclosure_chassis_info(NULL, sas_device, 3279 NULL, starget); 3280 3281 sas_device_put(sas_device); 3282 } 3283 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3284 } 3285 } 3286 3287 /** 3288 * scsih_abort - eh threads main abort routine 3289 * @scmd: pointer to scsi command object 3290 * 3291 * Return: SUCCESS if command aborted else FAILED 3292 */ 3293 static int 3294 scsih_abort(struct scsi_cmnd *scmd) 3295 { 3296 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3297 struct MPT3SAS_DEVICE *sas_device_priv_data; 3298 struct scsiio_tracker *st = scsi_cmd_priv(scmd); 3299 u16 handle; 3300 int r; 3301 3302 u8 timeout = 30; 3303 struct _pcie_device *pcie_device = NULL; 3304 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!" 3305 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n", 3306 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), 3307 (scmd->request->timeout / HZ) * 1000); 3308 _scsih_tm_display_info(ioc, scmd); 3309 3310 sas_device_priv_data = scmd->device->hostdata; 3311 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3312 ioc->remove_host) { 3313 sdev_printk(KERN_INFO, scmd->device, 3314 "device been deleted! scmd(0x%p)\n", scmd); 3315 scmd->result = DID_NO_CONNECT << 16; 3316 scmd->scsi_done(scmd); 3317 r = SUCCESS; 3318 goto out; 3319 } 3320 3321 /* check for completed command */ 3322 if (st == NULL || st->cb_idx == 0xFF) { 3323 sdev_printk(KERN_INFO, scmd->device, "No reference found at " 3324 "driver, assuming scmd(0x%p) might have completed\n", scmd); 3325 scmd->result = DID_RESET << 16; 3326 r = SUCCESS; 3327 goto out; 3328 } 3329 3330 /* for hidden raid components and volumes this is not supported */ 3331 if (sas_device_priv_data->sas_target->flags & 3332 MPT_TARGET_FLAGS_RAID_COMPONENT || 3333 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { 3334 scmd->result = DID_RESET << 16; 3335 r = FAILED; 3336 goto out; 3337 } 3338 3339 mpt3sas_halt_firmware(ioc); 3340 3341 handle = sas_device_priv_data->sas_target->handle; 3342 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3343 if (pcie_device && (!ioc->tm_custom_handling) && 3344 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) 3345 timeout = ioc->nvme_abort_timeout; 3346 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3347 scmd->device->id, scmd->device->lun, 3348 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 3349 st->smid, st->msix_io, timeout, 0); 3350 /* Command must be cleared after abort */ 3351 if (r == SUCCESS && st->cb_idx != 0xFF) 3352 r = FAILED; 3353 out: 3354 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n", 3355 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3356 if (pcie_device) 3357 pcie_device_put(pcie_device); 3358 return r; 3359 } 3360 3361 /** 3362 * scsih_dev_reset - eh threads main device reset routine 3363 * @scmd: pointer to scsi command object 3364 * 3365 * Return: SUCCESS if command aborted else FAILED 3366 */ 3367 static int 3368 scsih_dev_reset(struct scsi_cmnd *scmd) 3369 { 3370 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3371 struct MPT3SAS_DEVICE *sas_device_priv_data; 3372 struct _sas_device *sas_device = NULL; 3373 struct _pcie_device *pcie_device = NULL; 3374 u16 handle; 3375 u8 tr_method = 0; 3376 u8 tr_timeout = 30; 3377 int r; 3378 3379 struct scsi_target *starget = scmd->device->sdev_target; 3380 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 3381 3382 sdev_printk(KERN_INFO, scmd->device, 3383 "attempting device reset! scmd(0x%p)\n", scmd); 3384 _scsih_tm_display_info(ioc, scmd); 3385 3386 sas_device_priv_data = scmd->device->hostdata; 3387 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3388 ioc->remove_host) { 3389 sdev_printk(KERN_INFO, scmd->device, 3390 "device been deleted! scmd(0x%p)\n", scmd); 3391 scmd->result = DID_NO_CONNECT << 16; 3392 scmd->scsi_done(scmd); 3393 r = SUCCESS; 3394 goto out; 3395 } 3396 3397 /* for hidden raid components obtain the volume_handle */ 3398 handle = 0; 3399 if (sas_device_priv_data->sas_target->flags & 3400 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3401 sas_device = mpt3sas_get_sdev_from_target(ioc, 3402 target_priv_data); 3403 if (sas_device) 3404 handle = sas_device->volume_handle; 3405 } else 3406 handle = sas_device_priv_data->sas_target->handle; 3407 3408 if (!handle) { 3409 scmd->result = DID_RESET << 16; 3410 r = FAILED; 3411 goto out; 3412 } 3413 3414 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3415 3416 if (pcie_device && (!ioc->tm_custom_handling) && 3417 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { 3418 tr_timeout = pcie_device->reset_timeout; 3419 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3420 } else 3421 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3422 3423 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3424 scmd->device->id, scmd->device->lun, 3425 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 3426 tr_timeout, tr_method); 3427 /* Check for busy commands after reset */ 3428 if (r == SUCCESS && scsi_device_busy(scmd->device)) 3429 r = FAILED; 3430 out: 3431 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n", 3432 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3433 3434 if (sas_device) 3435 sas_device_put(sas_device); 3436 if (pcie_device) 3437 pcie_device_put(pcie_device); 3438 3439 return r; 3440 } 3441 3442 /** 3443 * scsih_target_reset - eh threads main target reset routine 3444 * @scmd: pointer to scsi command object 3445 * 3446 * Return: SUCCESS if command aborted else FAILED 3447 */ 3448 static int 3449 scsih_target_reset(struct scsi_cmnd *scmd) 3450 { 3451 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3452 struct MPT3SAS_DEVICE *sas_device_priv_data; 3453 struct _sas_device *sas_device = NULL; 3454 struct _pcie_device *pcie_device = NULL; 3455 u16 handle; 3456 u8 tr_method = 0; 3457 u8 tr_timeout = 30; 3458 int r; 3459 struct scsi_target *starget = scmd->device->sdev_target; 3460 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 3461 3462 starget_printk(KERN_INFO, starget, 3463 "attempting target reset! scmd(0x%p)\n", scmd); 3464 _scsih_tm_display_info(ioc, scmd); 3465 3466 sas_device_priv_data = scmd->device->hostdata; 3467 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3468 ioc->remove_host) { 3469 starget_printk(KERN_INFO, starget, 3470 "target been deleted! scmd(0x%p)\n", scmd); 3471 scmd->result = DID_NO_CONNECT << 16; 3472 scmd->scsi_done(scmd); 3473 r = SUCCESS; 3474 goto out; 3475 } 3476 3477 /* for hidden raid components obtain the volume_handle */ 3478 handle = 0; 3479 if (sas_device_priv_data->sas_target->flags & 3480 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3481 sas_device = mpt3sas_get_sdev_from_target(ioc, 3482 target_priv_data); 3483 if (sas_device) 3484 handle = sas_device->volume_handle; 3485 } else 3486 handle = sas_device_priv_data->sas_target->handle; 3487 3488 if (!handle) { 3489 scmd->result = DID_RESET << 16; 3490 r = FAILED; 3491 goto out; 3492 } 3493 3494 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3495 3496 if (pcie_device && (!ioc->tm_custom_handling) && 3497 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { 3498 tr_timeout = pcie_device->reset_timeout; 3499 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3500 } else 3501 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3502 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3503 scmd->device->id, 0, 3504 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 3505 tr_timeout, tr_method); 3506 /* Check for busy commands after reset */ 3507 if (r == SUCCESS && atomic_read(&starget->target_busy)) 3508 r = FAILED; 3509 out: 3510 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n", 3511 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3512 3513 if (sas_device) 3514 sas_device_put(sas_device); 3515 if (pcie_device) 3516 pcie_device_put(pcie_device); 3517 return r; 3518 } 3519 3520 3521 /** 3522 * scsih_host_reset - eh threads main host reset routine 3523 * @scmd: pointer to scsi command object 3524 * 3525 * Return: SUCCESS if command aborted else FAILED 3526 */ 3527 static int 3528 scsih_host_reset(struct scsi_cmnd *scmd) 3529 { 3530 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3531 int r, retval; 3532 3533 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd); 3534 scsi_print_command(scmd); 3535 3536 if (ioc->is_driver_loading || ioc->remove_host) { 3537 ioc_info(ioc, "Blocking the host reset\n"); 3538 r = FAILED; 3539 goto out; 3540 } 3541 3542 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3543 r = (retval < 0) ? FAILED : SUCCESS; 3544 out: 3545 ioc_info(ioc, "host reset: %s scmd(0x%p)\n", 3546 r == SUCCESS ? "SUCCESS" : "FAILED", scmd); 3547 3548 return r; 3549 } 3550 3551 /** 3552 * _scsih_fw_event_add - insert and queue up fw_event 3553 * @ioc: per adapter object 3554 * @fw_event: object describing the event 3555 * Context: This function will acquire ioc->fw_event_lock. 3556 * 3557 * This adds the firmware event object into link list, then queues it up to 3558 * be processed from user context. 3559 */ 3560 static void 3561 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) 3562 { 3563 unsigned long flags; 3564 3565 if (ioc->firmware_event_thread == NULL) 3566 return; 3567 3568 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3569 fw_event_work_get(fw_event); 3570 INIT_LIST_HEAD(&fw_event->list); 3571 list_add_tail(&fw_event->list, &ioc->fw_event_list); 3572 INIT_WORK(&fw_event->work, _firmware_event_work); 3573 fw_event_work_get(fw_event); 3574 queue_work(ioc->firmware_event_thread, &fw_event->work); 3575 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3576 } 3577 3578 /** 3579 * _scsih_fw_event_del_from_list - delete fw_event from the list 3580 * @ioc: per adapter object 3581 * @fw_event: object describing the event 3582 * Context: This function will acquire ioc->fw_event_lock. 3583 * 3584 * If the fw_event is on the fw_event_list, remove it and do a put. 3585 */ 3586 static void 3587 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work 3588 *fw_event) 3589 { 3590 unsigned long flags; 3591 3592 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3593 if (!list_empty(&fw_event->list)) { 3594 list_del_init(&fw_event->list); 3595 fw_event_work_put(fw_event); 3596 } 3597 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3598 } 3599 3600 3601 /** 3602 * mpt3sas_send_trigger_data_event - send event for processing trigger data 3603 * @ioc: per adapter object 3604 * @event_data: trigger event data 3605 */ 3606 void 3607 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, 3608 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) 3609 { 3610 struct fw_event_work *fw_event; 3611 u16 sz; 3612 3613 if (ioc->is_driver_loading) 3614 return; 3615 sz = sizeof(*event_data); 3616 fw_event = alloc_fw_event_work(sz); 3617 if (!fw_event) 3618 return; 3619 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; 3620 fw_event->ioc = ioc; 3621 memcpy(fw_event->event_data, event_data, sizeof(*event_data)); 3622 _scsih_fw_event_add(ioc, fw_event); 3623 fw_event_work_put(fw_event); 3624 } 3625 3626 /** 3627 * _scsih_error_recovery_delete_devices - remove devices not responding 3628 * @ioc: per adapter object 3629 */ 3630 static void 3631 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) 3632 { 3633 struct fw_event_work *fw_event; 3634 3635 fw_event = alloc_fw_event_work(0); 3636 if (!fw_event) 3637 return; 3638 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; 3639 fw_event->ioc = ioc; 3640 _scsih_fw_event_add(ioc, fw_event); 3641 fw_event_work_put(fw_event); 3642 } 3643 3644 /** 3645 * mpt3sas_port_enable_complete - port enable completed (fake event) 3646 * @ioc: per adapter object 3647 */ 3648 void 3649 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) 3650 { 3651 struct fw_event_work *fw_event; 3652 3653 fw_event = alloc_fw_event_work(0); 3654 if (!fw_event) 3655 return; 3656 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; 3657 fw_event->ioc = ioc; 3658 _scsih_fw_event_add(ioc, fw_event); 3659 fw_event_work_put(fw_event); 3660 } 3661 3662 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) 3663 { 3664 unsigned long flags; 3665 struct fw_event_work *fw_event = NULL; 3666 3667 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3668 if (!list_empty(&ioc->fw_event_list)) { 3669 fw_event = list_first_entry(&ioc->fw_event_list, 3670 struct fw_event_work, list); 3671 list_del_init(&fw_event->list); 3672 } 3673 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3674 3675 return fw_event; 3676 } 3677 3678 /** 3679 * _scsih_fw_event_cleanup_queue - cleanup event queue 3680 * @ioc: per adapter object 3681 * 3682 * Walk the firmware event queue, either killing timers, or waiting 3683 * for outstanding events to complete 3684 * 3685 * Context: task, can sleep 3686 */ 3687 static void 3688 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) 3689 { 3690 struct fw_event_work *fw_event; 3691 3692 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) || 3693 !ioc->firmware_event_thread) 3694 return; 3695 /* 3696 * Set current running event as ignore, so that 3697 * current running event will exit quickly. 3698 * As diag reset has occurred it is of no use 3699 * to process remaining stale event data entries. 3700 */ 3701 if (ioc->shost_recovery && ioc->current_event) 3702 ioc->current_event->ignore = 1; 3703 3704 ioc->fw_events_cleanup = 1; 3705 while ((fw_event = dequeue_next_fw_event(ioc)) || 3706 (fw_event = ioc->current_event)) { 3707 3708 /* 3709 * Don't call cancel_work_sync() for current_event 3710 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES; 3711 * otherwise we may observe deadlock if current 3712 * hard reset issued as part of processing the current_event. 3713 * 3714 * Orginal logic of cleaning the current_event is added 3715 * for handling the back to back host reset issued by the user. 3716 * i.e. during back to back host reset, driver use to process 3717 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES 3718 * event back to back and this made the drives to unregister 3719 * the devices from SML. 3720 */ 3721 3722 if (fw_event == ioc->current_event && 3723 ioc->current_event->event != 3724 MPT3SAS_REMOVE_UNRESPONDING_DEVICES) { 3725 ioc->current_event = NULL; 3726 continue; 3727 } 3728 3729 /* 3730 * Driver has to clear ioc->start_scan flag when 3731 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE, 3732 * otherwise scsi_scan_host() API waits for the 3733 * 5 minute timer to expire. If we exit from 3734 * scsi_scan_host() early then we can issue the 3735 * new port enable request as part of current diag reset. 3736 */ 3737 if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) { 3738 ioc->port_enable_cmds.status |= MPT3_CMD_RESET; 3739 ioc->start_scan = 0; 3740 } 3741 3742 /* 3743 * Wait on the fw_event to complete. If this returns 1, then 3744 * the event was never executed, and we need a put for the 3745 * reference the work had on the fw_event. 3746 * 3747 * If it did execute, we wait for it to finish, and the put will 3748 * happen from _firmware_event_work() 3749 */ 3750 if (cancel_work_sync(&fw_event->work)) 3751 fw_event_work_put(fw_event); 3752 3753 fw_event_work_put(fw_event); 3754 } 3755 ioc->fw_events_cleanup = 0; 3756 } 3757 3758 /** 3759 * _scsih_internal_device_block - block the sdev device 3760 * @sdev: per device object 3761 * @sas_device_priv_data : per device driver private data 3762 * 3763 * make sure device is blocked without error, if not 3764 * print an error 3765 */ 3766 static void 3767 _scsih_internal_device_block(struct scsi_device *sdev, 3768 struct MPT3SAS_DEVICE *sas_device_priv_data) 3769 { 3770 int r = 0; 3771 3772 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", 3773 sas_device_priv_data->sas_target->handle); 3774 sas_device_priv_data->block = 1; 3775 3776 r = scsi_internal_device_block_nowait(sdev); 3777 if (r == -EINVAL) 3778 sdev_printk(KERN_WARNING, sdev, 3779 "device_block failed with return(%d) for handle(0x%04x)\n", 3780 r, sas_device_priv_data->sas_target->handle); 3781 } 3782 3783 /** 3784 * _scsih_internal_device_unblock - unblock the sdev device 3785 * @sdev: per device object 3786 * @sas_device_priv_data : per device driver private data 3787 * make sure device is unblocked without error, if not retry 3788 * by blocking and then unblocking 3789 */ 3790 3791 static void 3792 _scsih_internal_device_unblock(struct scsi_device *sdev, 3793 struct MPT3SAS_DEVICE *sas_device_priv_data) 3794 { 3795 int r = 0; 3796 3797 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " 3798 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); 3799 sas_device_priv_data->block = 0; 3800 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 3801 if (r == -EINVAL) { 3802 /* The device has been set to SDEV_RUNNING by SD layer during 3803 * device addition but the request queue is still stopped by 3804 * our earlier block call. We need to perform a block again 3805 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */ 3806 3807 sdev_printk(KERN_WARNING, sdev, 3808 "device_unblock failed with return(%d) for handle(0x%04x) " 3809 "performing a block followed by an unblock\n", 3810 r, sas_device_priv_data->sas_target->handle); 3811 sas_device_priv_data->block = 1; 3812 r = scsi_internal_device_block_nowait(sdev); 3813 if (r) 3814 sdev_printk(KERN_WARNING, sdev, "retried device_block " 3815 "failed with return(%d) for handle(0x%04x)\n", 3816 r, sas_device_priv_data->sas_target->handle); 3817 3818 sas_device_priv_data->block = 0; 3819 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 3820 if (r) 3821 sdev_printk(KERN_WARNING, sdev, "retried device_unblock" 3822 " failed with return(%d) for handle(0x%04x)\n", 3823 r, sas_device_priv_data->sas_target->handle); 3824 } 3825 } 3826 3827 /** 3828 * _scsih_ublock_io_all_device - unblock every device 3829 * @ioc: per adapter object 3830 * 3831 * change the device state from block to running 3832 */ 3833 static void 3834 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3835 { 3836 struct MPT3SAS_DEVICE *sas_device_priv_data; 3837 struct scsi_device *sdev; 3838 3839 shost_for_each_device(sdev, ioc->shost) { 3840 sas_device_priv_data = sdev->hostdata; 3841 if (!sas_device_priv_data) 3842 continue; 3843 if (!sas_device_priv_data->block) 3844 continue; 3845 3846 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, 3847 "device_running, handle(0x%04x)\n", 3848 sas_device_priv_data->sas_target->handle)); 3849 _scsih_internal_device_unblock(sdev, sas_device_priv_data); 3850 } 3851 } 3852 3853 3854 /** 3855 * _scsih_ublock_io_device - prepare device to be deleted 3856 * @ioc: per adapter object 3857 * @sas_address: sas address 3858 * @port: hba port entry 3859 * 3860 * unblock then put device in offline state 3861 */ 3862 static void 3863 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, 3864 u64 sas_address, struct hba_port *port) 3865 { 3866 struct MPT3SAS_DEVICE *sas_device_priv_data; 3867 struct scsi_device *sdev; 3868 3869 shost_for_each_device(sdev, ioc->shost) { 3870 sas_device_priv_data = sdev->hostdata; 3871 if (!sas_device_priv_data) 3872 continue; 3873 if (sas_device_priv_data->sas_target->sas_address 3874 != sas_address) 3875 continue; 3876 if (sas_device_priv_data->sas_target->port != port) 3877 continue; 3878 if (sas_device_priv_data->block) 3879 _scsih_internal_device_unblock(sdev, 3880 sas_device_priv_data); 3881 } 3882 } 3883 3884 /** 3885 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK 3886 * @ioc: per adapter object 3887 * 3888 * During device pull we need to appropriately set the sdev state. 3889 */ 3890 static void 3891 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3892 { 3893 struct MPT3SAS_DEVICE *sas_device_priv_data; 3894 struct scsi_device *sdev; 3895 3896 shost_for_each_device(sdev, ioc->shost) { 3897 sas_device_priv_data = sdev->hostdata; 3898 if (!sas_device_priv_data) 3899 continue; 3900 if (sas_device_priv_data->block) 3901 continue; 3902 if (sas_device_priv_data->ignore_delay_remove) { 3903 sdev_printk(KERN_INFO, sdev, 3904 "%s skip device_block for SES handle(0x%04x)\n", 3905 __func__, sas_device_priv_data->sas_target->handle); 3906 continue; 3907 } 3908 _scsih_internal_device_block(sdev, sas_device_priv_data); 3909 } 3910 } 3911 3912 /** 3913 * _scsih_block_io_device - set the device state to SDEV_BLOCK 3914 * @ioc: per adapter object 3915 * @handle: device handle 3916 * 3917 * During device pull we need to appropriately set the sdev state. 3918 */ 3919 static void 3920 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 3921 { 3922 struct MPT3SAS_DEVICE *sas_device_priv_data; 3923 struct scsi_device *sdev; 3924 struct _sas_device *sas_device; 3925 3926 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 3927 3928 shost_for_each_device(sdev, ioc->shost) { 3929 sas_device_priv_data = sdev->hostdata; 3930 if (!sas_device_priv_data) 3931 continue; 3932 if (sas_device_priv_data->sas_target->handle != handle) 3933 continue; 3934 if (sas_device_priv_data->block) 3935 continue; 3936 if (sas_device && sas_device->pend_sas_rphy_add) 3937 continue; 3938 if (sas_device_priv_data->ignore_delay_remove) { 3939 sdev_printk(KERN_INFO, sdev, 3940 "%s skip device_block for SES handle(0x%04x)\n", 3941 __func__, sas_device_priv_data->sas_target->handle); 3942 continue; 3943 } 3944 _scsih_internal_device_block(sdev, sas_device_priv_data); 3945 } 3946 3947 if (sas_device) 3948 sas_device_put(sas_device); 3949 } 3950 3951 /** 3952 * _scsih_block_io_to_children_attached_to_ex 3953 * @ioc: per adapter object 3954 * @sas_expander: the sas_device object 3955 * 3956 * This routine set sdev state to SDEV_BLOCK for all devices 3957 * attached to this expander. This function called when expander is 3958 * pulled. 3959 */ 3960 static void 3961 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, 3962 struct _sas_node *sas_expander) 3963 { 3964 struct _sas_port *mpt3sas_port; 3965 struct _sas_device *sas_device; 3966 struct _sas_node *expander_sibling; 3967 unsigned long flags; 3968 3969 if (!sas_expander) 3970 return; 3971 3972 list_for_each_entry(mpt3sas_port, 3973 &sas_expander->sas_port_list, port_list) { 3974 if (mpt3sas_port->remote_identify.device_type == 3975 SAS_END_DEVICE) { 3976 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3977 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 3978 mpt3sas_port->remote_identify.sas_address, 3979 mpt3sas_port->hba_port); 3980 if (sas_device) { 3981 set_bit(sas_device->handle, 3982 ioc->blocking_handles); 3983 sas_device_put(sas_device); 3984 } 3985 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3986 } 3987 } 3988 3989 list_for_each_entry(mpt3sas_port, 3990 &sas_expander->sas_port_list, port_list) { 3991 3992 if (mpt3sas_port->remote_identify.device_type == 3993 SAS_EDGE_EXPANDER_DEVICE || 3994 mpt3sas_port->remote_identify.device_type == 3995 SAS_FANOUT_EXPANDER_DEVICE) { 3996 expander_sibling = 3997 mpt3sas_scsih_expander_find_by_sas_address( 3998 ioc, mpt3sas_port->remote_identify.sas_address, 3999 mpt3sas_port->hba_port); 4000 _scsih_block_io_to_children_attached_to_ex(ioc, 4001 expander_sibling); 4002 } 4003 } 4004 } 4005 4006 /** 4007 * _scsih_block_io_to_children_attached_directly 4008 * @ioc: per adapter object 4009 * @event_data: topology change event data 4010 * 4011 * This routine set sdev state to SDEV_BLOCK for all devices 4012 * direct attached during device pull. 4013 */ 4014 static void 4015 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, 4016 Mpi2EventDataSasTopologyChangeList_t *event_data) 4017 { 4018 int i; 4019 u16 handle; 4020 u16 reason_code; 4021 4022 for (i = 0; i < event_data->NumEntries; i++) { 4023 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 4024 if (!handle) 4025 continue; 4026 reason_code = event_data->PHY[i].PhyStatus & 4027 MPI2_EVENT_SAS_TOPO_RC_MASK; 4028 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) 4029 _scsih_block_io_device(ioc, handle); 4030 } 4031 } 4032 4033 /** 4034 * _scsih_block_io_to_pcie_children_attached_directly 4035 * @ioc: per adapter object 4036 * @event_data: topology change event data 4037 * 4038 * This routine set sdev state to SDEV_BLOCK for all devices 4039 * direct attached during device pull/reconnect. 4040 */ 4041 static void 4042 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, 4043 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 4044 { 4045 int i; 4046 u16 handle; 4047 u16 reason_code; 4048 4049 for (i = 0; i < event_data->NumEntries; i++) { 4050 handle = 4051 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 4052 if (!handle) 4053 continue; 4054 reason_code = event_data->PortEntry[i].PortStatus; 4055 if (reason_code == 4056 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING) 4057 _scsih_block_io_device(ioc, handle); 4058 } 4059 } 4060 /** 4061 * _scsih_tm_tr_send - send task management request 4062 * @ioc: per adapter object 4063 * @handle: device handle 4064 * Context: interrupt time. 4065 * 4066 * This code is to initiate the device removal handshake protocol 4067 * with controller firmware. This function will issue target reset 4068 * using high priority request queue. It will send a sas iounit 4069 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. 4070 * 4071 * This is designed to send muliple task management request at the same 4072 * time to the fifo. If the fifo is full, we will append the request, 4073 * and process it in a future completion. 4074 */ 4075 static void 4076 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4077 { 4078 Mpi2SCSITaskManagementRequest_t *mpi_request; 4079 u16 smid; 4080 struct _sas_device *sas_device = NULL; 4081 struct _pcie_device *pcie_device = NULL; 4082 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 4083 u64 sas_address = 0; 4084 unsigned long flags; 4085 struct _tr_list *delayed_tr; 4086 u32 ioc_state; 4087 u8 tr_method = 0; 4088 struct hba_port *port = NULL; 4089 4090 if (ioc->pci_error_recovery) { 4091 dewtprintk(ioc, 4092 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n", 4093 __func__, handle)); 4094 return; 4095 } 4096 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4097 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4098 dewtprintk(ioc, 4099 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n", 4100 __func__, handle)); 4101 return; 4102 } 4103 4104 /* if PD, then return */ 4105 if (test_bit(handle, ioc->pd_handles)) 4106 return; 4107 4108 clear_bit(handle, ioc->pend_os_device_add); 4109 4110 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4111 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 4112 if (sas_device && sas_device->starget && 4113 sas_device->starget->hostdata) { 4114 sas_target_priv_data = sas_device->starget->hostdata; 4115 sas_target_priv_data->deleted = 1; 4116 sas_address = sas_device->sas_address; 4117 port = sas_device->port; 4118 } 4119 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4120 if (!sas_device) { 4121 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 4122 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 4123 if (pcie_device && pcie_device->starget && 4124 pcie_device->starget->hostdata) { 4125 sas_target_priv_data = pcie_device->starget->hostdata; 4126 sas_target_priv_data->deleted = 1; 4127 sas_address = pcie_device->wwid; 4128 } 4129 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 4130 if (pcie_device && (!ioc->tm_custom_handling) && 4131 (!(mpt3sas_scsih_is_pcie_scsi_device( 4132 pcie_device->device_info)))) 4133 tr_method = 4134 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 4135 else 4136 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 4137 } 4138 if (sas_target_priv_data) { 4139 dewtprintk(ioc, 4140 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", 4141 handle, (u64)sas_address)); 4142 if (sas_device) { 4143 if (sas_device->enclosure_handle != 0) 4144 dewtprintk(ioc, 4145 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n", 4146 (u64)sas_device->enclosure_logical_id, 4147 sas_device->slot)); 4148 if (sas_device->connector_name[0] != '\0') 4149 dewtprintk(ioc, 4150 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n", 4151 sas_device->enclosure_level, 4152 sas_device->connector_name)); 4153 } else if (pcie_device) { 4154 if (pcie_device->enclosure_handle != 0) 4155 dewtprintk(ioc, 4156 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n", 4157 (u64)pcie_device->enclosure_logical_id, 4158 pcie_device->slot)); 4159 if (pcie_device->connector_name[0] != '\0') 4160 dewtprintk(ioc, 4161 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n", 4162 pcie_device->enclosure_level, 4163 pcie_device->connector_name)); 4164 } 4165 _scsih_ublock_io_device(ioc, sas_address, port); 4166 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 4167 } 4168 4169 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); 4170 if (!smid) { 4171 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4172 if (!delayed_tr) 4173 goto out; 4174 INIT_LIST_HEAD(&delayed_tr->list); 4175 delayed_tr->handle = handle; 4176 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 4177 dewtprintk(ioc, 4178 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4179 handle)); 4180 goto out; 4181 } 4182 4183 dewtprintk(ioc, 4184 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4185 handle, smid, ioc->tm_tr_cb_idx)); 4186 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4187 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 4188 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4189 mpi_request->DevHandle = cpu_to_le16(handle); 4190 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 4191 mpi_request->MsgFlags = tr_method; 4192 set_bit(handle, ioc->device_remove_in_progress); 4193 ioc->put_smid_hi_priority(ioc, smid, 0); 4194 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); 4195 4196 out: 4197 if (sas_device) 4198 sas_device_put(sas_device); 4199 if (pcie_device) 4200 pcie_device_put(pcie_device); 4201 } 4202 4203 /** 4204 * _scsih_tm_tr_complete - 4205 * @ioc: per adapter object 4206 * @smid: system request message index 4207 * @msix_index: MSIX table index supplied by the OS 4208 * @reply: reply message frame(lower 32bit addr) 4209 * Context: interrupt time. 4210 * 4211 * This is the target reset completion routine. 4212 * This code is part of the code to initiate the device removal 4213 * handshake protocol with controller firmware. 4214 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) 4215 * 4216 * Return: 1 meaning mf should be freed from _base_interrupt 4217 * 0 means the mf is freed from this function. 4218 */ 4219 static u8 4220 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 4221 u32 reply) 4222 { 4223 u16 handle; 4224 Mpi2SCSITaskManagementRequest_t *mpi_request_tm; 4225 Mpi2SCSITaskManagementReply_t *mpi_reply = 4226 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4227 Mpi2SasIoUnitControlRequest_t *mpi_request; 4228 u16 smid_sas_ctrl; 4229 u32 ioc_state; 4230 struct _sc_list *delayed_sc; 4231 4232 if (ioc->pci_error_recovery) { 4233 dewtprintk(ioc, 4234 ioc_info(ioc, "%s: host in pci error recovery\n", 4235 __func__)); 4236 return 1; 4237 } 4238 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4239 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4240 dewtprintk(ioc, 4241 ioc_info(ioc, "%s: host is not operational\n", 4242 __func__)); 4243 return 1; 4244 } 4245 if (unlikely(!mpi_reply)) { 4246 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4247 __FILE__, __LINE__, __func__); 4248 return 1; 4249 } 4250 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 4251 handle = le16_to_cpu(mpi_request_tm->DevHandle); 4252 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 4253 dewtprintk(ioc, 4254 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 4255 handle, 4256 le16_to_cpu(mpi_reply->DevHandle), smid)); 4257 return 0; 4258 } 4259 4260 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 4261 dewtprintk(ioc, 4262 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", 4263 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 4264 le32_to_cpu(mpi_reply->IOCLogInfo), 4265 le32_to_cpu(mpi_reply->TerminationCount))); 4266 4267 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); 4268 if (!smid_sas_ctrl) { 4269 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); 4270 if (!delayed_sc) 4271 return _scsih_check_for_pending_tm(ioc, smid); 4272 INIT_LIST_HEAD(&delayed_sc->list); 4273 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); 4274 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); 4275 dewtprintk(ioc, 4276 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n", 4277 handle)); 4278 return _scsih_check_for_pending_tm(ioc, smid); 4279 } 4280 4281 dewtprintk(ioc, 4282 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4283 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); 4284 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); 4285 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 4286 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 4287 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 4288 mpi_request->DevHandle = mpi_request_tm->DevHandle; 4289 ioc->put_smid_default(ioc, smid_sas_ctrl); 4290 4291 return _scsih_check_for_pending_tm(ioc, smid); 4292 } 4293 4294 /** _scsih_allow_scmd_to_device - check whether scmd needs to 4295 * issue to IOC or not. 4296 * @ioc: per adapter object 4297 * @scmd: pointer to scsi command object 4298 * 4299 * Returns true if scmd can be issued to IOC otherwise returns false. 4300 */ 4301 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, 4302 struct scsi_cmnd *scmd) 4303 { 4304 4305 if (ioc->pci_error_recovery) 4306 return false; 4307 4308 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { 4309 if (ioc->remove_host) 4310 return false; 4311 4312 return true; 4313 } 4314 4315 if (ioc->remove_host) { 4316 4317 switch (scmd->cmnd[0]) { 4318 case SYNCHRONIZE_CACHE: 4319 case START_STOP: 4320 return true; 4321 default: 4322 return false; 4323 } 4324 } 4325 4326 return true; 4327 } 4328 4329 /** 4330 * _scsih_sas_control_complete - completion routine 4331 * @ioc: per adapter object 4332 * @smid: system request message index 4333 * @msix_index: MSIX table index supplied by the OS 4334 * @reply: reply message frame(lower 32bit addr) 4335 * Context: interrupt time. 4336 * 4337 * This is the sas iounit control completion routine. 4338 * This code is part of the code to initiate the device removal 4339 * handshake protocol with controller firmware. 4340 * 4341 * Return: 1 meaning mf should be freed from _base_interrupt 4342 * 0 means the mf is freed from this function. 4343 */ 4344 static u8 4345 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4346 u8 msix_index, u32 reply) 4347 { 4348 Mpi2SasIoUnitControlReply_t *mpi_reply = 4349 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4350 4351 if (likely(mpi_reply)) { 4352 dewtprintk(ioc, 4353 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", 4354 le16_to_cpu(mpi_reply->DevHandle), smid, 4355 le16_to_cpu(mpi_reply->IOCStatus), 4356 le32_to_cpu(mpi_reply->IOCLogInfo))); 4357 if (le16_to_cpu(mpi_reply->IOCStatus) == 4358 MPI2_IOCSTATUS_SUCCESS) { 4359 clear_bit(le16_to_cpu(mpi_reply->DevHandle), 4360 ioc->device_remove_in_progress); 4361 } 4362 } else { 4363 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4364 __FILE__, __LINE__, __func__); 4365 } 4366 return mpt3sas_check_for_pending_internal_cmds(ioc, smid); 4367 } 4368 4369 /** 4370 * _scsih_tm_tr_volume_send - send target reset request for volumes 4371 * @ioc: per adapter object 4372 * @handle: device handle 4373 * Context: interrupt time. 4374 * 4375 * This is designed to send muliple task management request at the same 4376 * time to the fifo. If the fifo is full, we will append the request, 4377 * and process it in a future completion. 4378 */ 4379 static void 4380 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4381 { 4382 Mpi2SCSITaskManagementRequest_t *mpi_request; 4383 u16 smid; 4384 struct _tr_list *delayed_tr; 4385 4386 if (ioc->pci_error_recovery) { 4387 dewtprintk(ioc, 4388 ioc_info(ioc, "%s: host reset in progress!\n", 4389 __func__)); 4390 return; 4391 } 4392 4393 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); 4394 if (!smid) { 4395 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4396 if (!delayed_tr) 4397 return; 4398 INIT_LIST_HEAD(&delayed_tr->list); 4399 delayed_tr->handle = handle; 4400 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); 4401 dewtprintk(ioc, 4402 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4403 handle)); 4404 return; 4405 } 4406 4407 dewtprintk(ioc, 4408 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4409 handle, smid, ioc->tm_tr_volume_cb_idx)); 4410 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4411 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 4412 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4413 mpi_request->DevHandle = cpu_to_le16(handle); 4414 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 4415 ioc->put_smid_hi_priority(ioc, smid, 0); 4416 } 4417 4418 /** 4419 * _scsih_tm_volume_tr_complete - target reset completion 4420 * @ioc: per adapter object 4421 * @smid: system request message index 4422 * @msix_index: MSIX table index supplied by the OS 4423 * @reply: reply message frame(lower 32bit addr) 4424 * Context: interrupt time. 4425 * 4426 * Return: 1 meaning mf should be freed from _base_interrupt 4427 * 0 means the mf is freed from this function. 4428 */ 4429 static u8 4430 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4431 u8 msix_index, u32 reply) 4432 { 4433 u16 handle; 4434 Mpi2SCSITaskManagementRequest_t *mpi_request_tm; 4435 Mpi2SCSITaskManagementReply_t *mpi_reply = 4436 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4437 4438 if (ioc->shost_recovery || ioc->pci_error_recovery) { 4439 dewtprintk(ioc, 4440 ioc_info(ioc, "%s: host reset in progress!\n", 4441 __func__)); 4442 return 1; 4443 } 4444 if (unlikely(!mpi_reply)) { 4445 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4446 __FILE__, __LINE__, __func__); 4447 return 1; 4448 } 4449 4450 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 4451 handle = le16_to_cpu(mpi_request_tm->DevHandle); 4452 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 4453 dewtprintk(ioc, 4454 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 4455 handle, le16_to_cpu(mpi_reply->DevHandle), 4456 smid)); 4457 return 0; 4458 } 4459 4460 dewtprintk(ioc, 4461 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", 4462 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 4463 le32_to_cpu(mpi_reply->IOCLogInfo), 4464 le32_to_cpu(mpi_reply->TerminationCount))); 4465 4466 return _scsih_check_for_pending_tm(ioc, smid); 4467 } 4468 4469 /** 4470 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages 4471 * @ioc: per adapter object 4472 * @smid: system request message index 4473 * @event: Event ID 4474 * @event_context: used to track events uniquely 4475 * 4476 * Context - processed in interrupt context. 4477 */ 4478 static void 4479 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, 4480 U32 event_context) 4481 { 4482 Mpi2EventAckRequest_t *ack_request; 4483 int i = smid - ioc->internal_smid; 4484 unsigned long flags; 4485 4486 /* Without releasing the smid just update the 4487 * call back index and reuse the same smid for 4488 * processing this delayed request 4489 */ 4490 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4491 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; 4492 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4493 4494 dewtprintk(ioc, 4495 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", 4496 le16_to_cpu(event), smid, ioc->base_cb_idx)); 4497 ack_request = mpt3sas_base_get_msg_frame(ioc, smid); 4498 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 4499 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 4500 ack_request->Event = event; 4501 ack_request->EventContext = event_context; 4502 ack_request->VF_ID = 0; /* TODO */ 4503 ack_request->VP_ID = 0; 4504 ioc->put_smid_default(ioc, smid); 4505 } 4506 4507 /** 4508 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed 4509 * sas_io_unit_ctrl messages 4510 * @ioc: per adapter object 4511 * @smid: system request message index 4512 * @handle: device handle 4513 * 4514 * Context - processed in interrupt context. 4515 */ 4516 static void 4517 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, 4518 u16 smid, u16 handle) 4519 { 4520 Mpi2SasIoUnitControlRequest_t *mpi_request; 4521 u32 ioc_state; 4522 int i = smid - ioc->internal_smid; 4523 unsigned long flags; 4524 4525 if (ioc->remove_host) { 4526 dewtprintk(ioc, 4527 ioc_info(ioc, "%s: host has been removed\n", 4528 __func__)); 4529 return; 4530 } else if (ioc->pci_error_recovery) { 4531 dewtprintk(ioc, 4532 ioc_info(ioc, "%s: host in pci error recovery\n", 4533 __func__)); 4534 return; 4535 } 4536 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4537 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4538 dewtprintk(ioc, 4539 ioc_info(ioc, "%s: host is not operational\n", 4540 __func__)); 4541 return; 4542 } 4543 4544 /* Without releasing the smid just update the 4545 * call back index and reuse the same smid for 4546 * processing this delayed request 4547 */ 4548 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4549 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; 4550 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4551 4552 dewtprintk(ioc, 4553 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4554 handle, smid, ioc->tm_sas_control_cb_idx)); 4555 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4556 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 4557 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 4558 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 4559 mpi_request->DevHandle = cpu_to_le16(handle); 4560 ioc->put_smid_default(ioc, smid); 4561 } 4562 4563 /** 4564 * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages 4565 * @ioc: per adapter object 4566 * @smid: system request message index 4567 * 4568 * Context: Executed in interrupt context 4569 * 4570 * This will check delayed internal messages list, and process the 4571 * next request. 4572 * 4573 * Return: 1 meaning mf should be freed from _base_interrupt 4574 * 0 means the mf is freed from this function. 4575 */ 4576 u8 4577 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4578 { 4579 struct _sc_list *delayed_sc; 4580 struct _event_ack_list *delayed_event_ack; 4581 4582 if (!list_empty(&ioc->delayed_event_ack_list)) { 4583 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, 4584 struct _event_ack_list, list); 4585 _scsih_issue_delayed_event_ack(ioc, smid, 4586 delayed_event_ack->Event, delayed_event_ack->EventContext); 4587 list_del(&delayed_event_ack->list); 4588 kfree(delayed_event_ack); 4589 return 0; 4590 } 4591 4592 if (!list_empty(&ioc->delayed_sc_list)) { 4593 delayed_sc = list_entry(ioc->delayed_sc_list.next, 4594 struct _sc_list, list); 4595 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid, 4596 delayed_sc->handle); 4597 list_del(&delayed_sc->list); 4598 kfree(delayed_sc); 4599 return 0; 4600 } 4601 return 1; 4602 } 4603 4604 /** 4605 * _scsih_check_for_pending_tm - check for pending task management 4606 * @ioc: per adapter object 4607 * @smid: system request message index 4608 * 4609 * This will check delayed target reset list, and feed the 4610 * next reqeust. 4611 * 4612 * Return: 1 meaning mf should be freed from _base_interrupt 4613 * 0 means the mf is freed from this function. 4614 */ 4615 static u8 4616 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4617 { 4618 struct _tr_list *delayed_tr; 4619 4620 if (!list_empty(&ioc->delayed_tr_volume_list)) { 4621 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, 4622 struct _tr_list, list); 4623 mpt3sas_base_free_smid(ioc, smid); 4624 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle); 4625 list_del(&delayed_tr->list); 4626 kfree(delayed_tr); 4627 return 0; 4628 } 4629 4630 if (!list_empty(&ioc->delayed_tr_list)) { 4631 delayed_tr = list_entry(ioc->delayed_tr_list.next, 4632 struct _tr_list, list); 4633 mpt3sas_base_free_smid(ioc, smid); 4634 _scsih_tm_tr_send(ioc, delayed_tr->handle); 4635 list_del(&delayed_tr->list); 4636 kfree(delayed_tr); 4637 return 0; 4638 } 4639 4640 return 1; 4641 } 4642 4643 /** 4644 * _scsih_check_topo_delete_events - sanity check on topo events 4645 * @ioc: per adapter object 4646 * @event_data: the event data payload 4647 * 4648 * This routine added to better handle cable breaker. 4649 * 4650 * This handles the case where driver receives multiple expander 4651 * add and delete events in a single shot. When there is a delete event 4652 * the routine will void any pending add events waiting in the event queue. 4653 */ 4654 static void 4655 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, 4656 Mpi2EventDataSasTopologyChangeList_t *event_data) 4657 { 4658 struct fw_event_work *fw_event; 4659 Mpi2EventDataSasTopologyChangeList_t *local_event_data; 4660 u16 expander_handle; 4661 struct _sas_node *sas_expander; 4662 unsigned long flags; 4663 int i, reason_code; 4664 u16 handle; 4665 4666 for (i = 0 ; i < event_data->NumEntries; i++) { 4667 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 4668 if (!handle) 4669 continue; 4670 reason_code = event_data->PHY[i].PhyStatus & 4671 MPI2_EVENT_SAS_TOPO_RC_MASK; 4672 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) 4673 _scsih_tm_tr_send(ioc, handle); 4674 } 4675 4676 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); 4677 if (expander_handle < ioc->sas_hba.num_phys) { 4678 _scsih_block_io_to_children_attached_directly(ioc, event_data); 4679 return; 4680 } 4681 if (event_data->ExpStatus == 4682 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { 4683 /* put expander attached devices into blocking state */ 4684 spin_lock_irqsave(&ioc->sas_node_lock, flags); 4685 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, 4686 expander_handle); 4687 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); 4688 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 4689 do { 4690 handle = find_first_bit(ioc->blocking_handles, 4691 ioc->facts.MaxDevHandle); 4692 if (handle < ioc->facts.MaxDevHandle) 4693 _scsih_block_io_device(ioc, handle); 4694 } while (test_and_clear_bit(handle, ioc->blocking_handles)); 4695 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) 4696 _scsih_block_io_to_children_attached_directly(ioc, event_data); 4697 4698 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4699 return; 4700 4701 /* mark ignore flag for pending events */ 4702 spin_lock_irqsave(&ioc->fw_event_lock, flags); 4703 list_for_each_entry(fw_event, &ioc->fw_event_list, list) { 4704 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || 4705 fw_event->ignore) 4706 continue; 4707 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) 4708 fw_event->event_data; 4709 if (local_event_data->ExpStatus == 4710 MPI2_EVENT_SAS_TOPO_ES_ADDED || 4711 local_event_data->ExpStatus == 4712 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4713 if (le16_to_cpu(local_event_data->ExpanderDevHandle) == 4714 expander_handle) { 4715 dewtprintk(ioc, 4716 ioc_info(ioc, "setting ignoring flag\n")); 4717 fw_event->ignore = 1; 4718 } 4719 } 4720 } 4721 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 4722 } 4723 4724 /** 4725 * _scsih_check_pcie_topo_remove_events - sanity check on topo 4726 * events 4727 * @ioc: per adapter object 4728 * @event_data: the event data payload 4729 * 4730 * This handles the case where driver receives multiple switch 4731 * or device add and delete events in a single shot. When there 4732 * is a delete event the routine will void any pending add 4733 * events waiting in the event queue. 4734 */ 4735 static void 4736 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, 4737 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 4738 { 4739 struct fw_event_work *fw_event; 4740 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data; 4741 unsigned long flags; 4742 int i, reason_code; 4743 u16 handle, switch_handle; 4744 4745 for (i = 0; i < event_data->NumEntries; i++) { 4746 handle = 4747 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 4748 if (!handle) 4749 continue; 4750 reason_code = event_data->PortEntry[i].PortStatus; 4751 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING) 4752 _scsih_tm_tr_send(ioc, handle); 4753 } 4754 4755 switch_handle = le16_to_cpu(event_data->SwitchDevHandle); 4756 if (!switch_handle) { 4757 _scsih_block_io_to_pcie_children_attached_directly( 4758 ioc, event_data); 4759 return; 4760 } 4761 /* TODO We are not supporting cascaded PCIe Switch removal yet*/ 4762 if ((event_data->SwitchStatus 4763 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) || 4764 (event_data->SwitchStatus == 4765 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING)) 4766 _scsih_block_io_to_pcie_children_attached_directly( 4767 ioc, event_data); 4768 4769 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4770 return; 4771 4772 /* mark ignore flag for pending events */ 4773 spin_lock_irqsave(&ioc->fw_event_lock, flags); 4774 list_for_each_entry(fw_event, &ioc->fw_event_list, list) { 4775 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST || 4776 fw_event->ignore) 4777 continue; 4778 local_event_data = 4779 (Mpi26EventDataPCIeTopologyChangeList_t *) 4780 fw_event->event_data; 4781 if (local_event_data->SwitchStatus == 4782 MPI2_EVENT_SAS_TOPO_ES_ADDED || 4783 local_event_data->SwitchStatus == 4784 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4785 if (le16_to_cpu(local_event_data->SwitchDevHandle) == 4786 switch_handle) { 4787 dewtprintk(ioc, 4788 ioc_info(ioc, "setting ignoring flag for switch event\n")); 4789 fw_event->ignore = 1; 4790 } 4791 } 4792 } 4793 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 4794 } 4795 4796 /** 4797 * _scsih_set_volume_delete_flag - setting volume delete flag 4798 * @ioc: per adapter object 4799 * @handle: device handle 4800 * 4801 * This returns nothing. 4802 */ 4803 static void 4804 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4805 { 4806 struct _raid_device *raid_device; 4807 struct MPT3SAS_TARGET *sas_target_priv_data; 4808 unsigned long flags; 4809 4810 spin_lock_irqsave(&ioc->raid_device_lock, flags); 4811 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 4812 if (raid_device && raid_device->starget && 4813 raid_device->starget->hostdata) { 4814 sas_target_priv_data = 4815 raid_device->starget->hostdata; 4816 sas_target_priv_data->deleted = 1; 4817 dewtprintk(ioc, 4818 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", 4819 handle, (u64)raid_device->wwid)); 4820 } 4821 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 4822 } 4823 4824 /** 4825 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume 4826 * @handle: input handle 4827 * @a: handle for volume a 4828 * @b: handle for volume b 4829 * 4830 * IR firmware only supports two raid volumes. The purpose of this 4831 * routine is to set the volume handle in either a or b. When the given 4832 * input handle is non-zero, or when a and b have not been set before. 4833 */ 4834 static void 4835 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) 4836 { 4837 if (!handle || handle == *a || handle == *b) 4838 return; 4839 if (!*a) 4840 *a = handle; 4841 else if (!*b) 4842 *b = handle; 4843 } 4844 4845 /** 4846 * _scsih_check_ir_config_unhide_events - check for UNHIDE events 4847 * @ioc: per adapter object 4848 * @event_data: the event data payload 4849 * Context: interrupt time. 4850 * 4851 * This routine will send target reset to volume, followed by target 4852 * resets to the PDs. This is called when a PD has been removed, or 4853 * volume has been deleted or removed. When the target reset is sent 4854 * to volume, the PD target resets need to be queued to start upon 4855 * completion of the volume target reset. 4856 */ 4857 static void 4858 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, 4859 Mpi2EventDataIrConfigChangeList_t *event_data) 4860 { 4861 Mpi2EventIrConfigElement_t *element; 4862 int i; 4863 u16 handle, volume_handle, a, b; 4864 struct _tr_list *delayed_tr; 4865 4866 a = 0; 4867 b = 0; 4868 4869 if (ioc->is_warpdrive) 4870 return; 4871 4872 /* Volume Resets for Deleted or Removed */ 4873 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4874 for (i = 0; i < event_data->NumElements; i++, element++) { 4875 if (le32_to_cpu(event_data->Flags) & 4876 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) 4877 continue; 4878 if (element->ReasonCode == 4879 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || 4880 element->ReasonCode == 4881 MPI2_EVENT_IR_CHANGE_RC_REMOVED) { 4882 volume_handle = le16_to_cpu(element->VolDevHandle); 4883 _scsih_set_volume_delete_flag(ioc, volume_handle); 4884 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); 4885 } 4886 } 4887 4888 /* Volume Resets for UNHIDE events */ 4889 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4890 for (i = 0; i < event_data->NumElements; i++, element++) { 4891 if (le32_to_cpu(event_data->Flags) & 4892 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) 4893 continue; 4894 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { 4895 volume_handle = le16_to_cpu(element->VolDevHandle); 4896 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); 4897 } 4898 } 4899 4900 if (a) 4901 _scsih_tm_tr_volume_send(ioc, a); 4902 if (b) 4903 _scsih_tm_tr_volume_send(ioc, b); 4904 4905 /* PD target resets */ 4906 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4907 for (i = 0; i < event_data->NumElements; i++, element++) { 4908 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) 4909 continue; 4910 handle = le16_to_cpu(element->PhysDiskDevHandle); 4911 volume_handle = le16_to_cpu(element->VolDevHandle); 4912 clear_bit(handle, ioc->pd_handles); 4913 if (!volume_handle) 4914 _scsih_tm_tr_send(ioc, handle); 4915 else if (volume_handle == a || volume_handle == b) { 4916 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4917 BUG_ON(!delayed_tr); 4918 INIT_LIST_HEAD(&delayed_tr->list); 4919 delayed_tr->handle = handle; 4920 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 4921 dewtprintk(ioc, 4922 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4923 handle)); 4924 } else 4925 _scsih_tm_tr_send(ioc, handle); 4926 } 4927 } 4928 4929 4930 /** 4931 * _scsih_check_volume_delete_events - set delete flag for volumes 4932 * @ioc: per adapter object 4933 * @event_data: the event data payload 4934 * Context: interrupt time. 4935 * 4936 * This will handle the case when the cable connected to entire volume is 4937 * pulled. We will take care of setting the deleted flag so normal IO will 4938 * not be sent. 4939 */ 4940 static void 4941 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, 4942 Mpi2EventDataIrVolume_t *event_data) 4943 { 4944 u32 state; 4945 4946 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 4947 return; 4948 state = le32_to_cpu(event_data->NewValue); 4949 if (state == MPI2_RAID_VOL_STATE_MISSING || state == 4950 MPI2_RAID_VOL_STATE_FAILED) 4951 _scsih_set_volume_delete_flag(ioc, 4952 le16_to_cpu(event_data->VolDevHandle)); 4953 } 4954 4955 /** 4956 * _scsih_temp_threshold_events - display temperature threshold exceeded events 4957 * @ioc: per adapter object 4958 * @event_data: the temp threshold event data 4959 * Context: interrupt time. 4960 */ 4961 static void 4962 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, 4963 Mpi2EventDataTemperature_t *event_data) 4964 { 4965 u32 doorbell; 4966 if (ioc->temp_sensors_count >= event_data->SensorNum) { 4967 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n", 4968 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ", 4969 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ", 4970 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ", 4971 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ", 4972 event_data->SensorNum); 4973 ioc_err(ioc, "Current Temp In Celsius: %d\n", 4974 event_data->CurrentTemperature); 4975 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 4976 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 4977 if ((doorbell & MPI2_IOC_STATE_MASK) == 4978 MPI2_IOC_STATE_FAULT) { 4979 mpt3sas_print_fault_code(ioc, 4980 doorbell & MPI2_DOORBELL_DATA_MASK); 4981 } else if ((doorbell & MPI2_IOC_STATE_MASK) == 4982 MPI2_IOC_STATE_COREDUMP) { 4983 mpt3sas_print_coredump_info(ioc, 4984 doorbell & MPI2_DOORBELL_DATA_MASK); 4985 } 4986 } 4987 } 4988 } 4989 4990 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) 4991 { 4992 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; 4993 4994 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) 4995 return 0; 4996 4997 if (pending) 4998 return test_and_set_bit(0, &priv->ata_command_pending); 4999 5000 clear_bit(0, &priv->ata_command_pending); 5001 return 0; 5002 } 5003 5004 /** 5005 * _scsih_flush_running_cmds - completing outstanding commands. 5006 * @ioc: per adapter object 5007 * 5008 * The flushing out of all pending scmd commands following host reset, 5009 * where all IO is dropped to the floor. 5010 */ 5011 static void 5012 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) 5013 { 5014 struct scsi_cmnd *scmd; 5015 struct scsiio_tracker *st; 5016 u16 smid; 5017 int count = 0; 5018 5019 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 5020 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 5021 if (!scmd) 5022 continue; 5023 count++; 5024 _scsih_set_satl_pending(scmd, false); 5025 st = scsi_cmd_priv(scmd); 5026 mpt3sas_base_clear_st(ioc, st); 5027 scsi_dma_unmap(scmd); 5028 if (ioc->pci_error_recovery || ioc->remove_host) 5029 scmd->result = DID_NO_CONNECT << 16; 5030 else 5031 scmd->result = DID_RESET << 16; 5032 scmd->scsi_done(scmd); 5033 } 5034 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count)); 5035 } 5036 5037 /** 5038 * _scsih_setup_eedp - setup MPI request for EEDP transfer 5039 * @ioc: per adapter object 5040 * @scmd: pointer to scsi command object 5041 * @mpi_request: pointer to the SCSI_IO request message frame 5042 * 5043 * Supporting protection 1 and 3. 5044 */ 5045 static void 5046 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 5047 Mpi25SCSIIORequest_t *mpi_request) 5048 { 5049 u16 eedp_flags; 5050 unsigned char prot_op = scsi_get_prot_op(scmd); 5051 unsigned char prot_type = scsi_get_prot_type(scmd); 5052 Mpi25SCSIIORequest_t *mpi_request_3v = 5053 (Mpi25SCSIIORequest_t *)mpi_request; 5054 5055 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL) 5056 return; 5057 5058 if (prot_op == SCSI_PROT_READ_STRIP) 5059 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; 5060 else if (prot_op == SCSI_PROT_WRITE_INSERT) 5061 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; 5062 else 5063 return; 5064 5065 switch (prot_type) { 5066 case SCSI_PROT_DIF_TYPE1: 5067 case SCSI_PROT_DIF_TYPE2: 5068 5069 /* 5070 * enable ref/guard checking 5071 * auto increment ref tag 5072 */ 5073 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 5074 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 5075 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 5076 mpi_request->CDB.EEDP32.PrimaryReferenceTag = 5077 cpu_to_be32(t10_pi_ref_tag(scmd->request)); 5078 break; 5079 5080 case SCSI_PROT_DIF_TYPE3: 5081 5082 /* 5083 * enable guard checking 5084 */ 5085 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 5086 5087 break; 5088 } 5089 5090 mpi_request_3v->EEDPBlockSize = 5091 cpu_to_le16(scmd->device->sector_size); 5092 5093 if (ioc->is_gen35_ioc) 5094 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; 5095 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); 5096 } 5097 5098 /** 5099 * _scsih_eedp_error_handling - return sense code for EEDP errors 5100 * @scmd: pointer to scsi command object 5101 * @ioc_status: ioc status 5102 */ 5103 static void 5104 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) 5105 { 5106 u8 ascq; 5107 5108 switch (ioc_status) { 5109 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5110 ascq = 0x01; 5111 break; 5112 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5113 ascq = 0x02; 5114 break; 5115 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5116 ascq = 0x03; 5117 break; 5118 default: 5119 ascq = 0x00; 5120 break; 5121 } 5122 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10, 5123 ascq); 5124 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) | 5125 SAM_STAT_CHECK_CONDITION; 5126 } 5127 5128 /** 5129 * scsih_qcmd - main scsi request entry point 5130 * @shost: SCSI host pointer 5131 * @scmd: pointer to scsi command object 5132 * 5133 * The callback index is set inside `ioc->scsi_io_cb_idx`. 5134 * 5135 * Return: 0 on success. If there's a failure, return either: 5136 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or 5137 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 5138 */ 5139 static int 5140 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 5141 { 5142 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 5143 struct MPT3SAS_DEVICE *sas_device_priv_data; 5144 struct MPT3SAS_TARGET *sas_target_priv_data; 5145 struct _raid_device *raid_device; 5146 struct request *rq = scmd->request; 5147 int class; 5148 Mpi25SCSIIORequest_t *mpi_request; 5149 struct _pcie_device *pcie_device = NULL; 5150 u32 mpi_control; 5151 u16 smid; 5152 u16 handle; 5153 5154 if (ioc->logging_level & MPT_DEBUG_SCSI) 5155 scsi_print_command(scmd); 5156 5157 sas_device_priv_data = scmd->device->hostdata; 5158 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 5159 scmd->result = DID_NO_CONNECT << 16; 5160 scmd->scsi_done(scmd); 5161 return 0; 5162 } 5163 5164 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { 5165 scmd->result = DID_NO_CONNECT << 16; 5166 scmd->scsi_done(scmd); 5167 return 0; 5168 } 5169 5170 sas_target_priv_data = sas_device_priv_data->sas_target; 5171 5172 /* invalid device handle */ 5173 handle = sas_target_priv_data->handle; 5174 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { 5175 scmd->result = DID_NO_CONNECT << 16; 5176 scmd->scsi_done(scmd); 5177 return 0; 5178 } 5179 5180 5181 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { 5182 /* host recovery or link resets sent via IOCTLs */ 5183 return SCSI_MLQUEUE_HOST_BUSY; 5184 } else if (sas_target_priv_data->deleted) { 5185 /* device has been deleted */ 5186 scmd->result = DID_NO_CONNECT << 16; 5187 scmd->scsi_done(scmd); 5188 return 0; 5189 } else if (sas_target_priv_data->tm_busy || 5190 sas_device_priv_data->block) { 5191 /* device busy with task management */ 5192 return SCSI_MLQUEUE_DEVICE_BUSY; 5193 } 5194 5195 /* 5196 * Bug work around for firmware SATL handling. The loop 5197 * is based on atomic operations and ensures consistency 5198 * since we're lockless at this point 5199 */ 5200 do { 5201 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) 5202 return SCSI_MLQUEUE_DEVICE_BUSY; 5203 } while (_scsih_set_satl_pending(scmd, true)); 5204 5205 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 5206 mpi_control = MPI2_SCSIIO_CONTROL_READ; 5207 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 5208 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 5209 else 5210 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 5211 5212 /* set tags */ 5213 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 5214 /* NCQ Prio supported, make sure control indicated high priority */ 5215 if (sas_device_priv_data->ncq_prio_enable) { 5216 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 5217 if (class == IOPRIO_CLASS_RT) 5218 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT; 5219 } 5220 /* Make sure Device is not raid volume. 5221 * We do not expose raid functionality to upper layer for warpdrive. 5222 */ 5223 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)) 5224 && !scsih_is_nvme(&scmd->device->sdev_gendev)) 5225 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) 5226 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 5227 5228 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 5229 if (!smid) { 5230 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 5231 _scsih_set_satl_pending(scmd, false); 5232 goto out; 5233 } 5234 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5235 memset(mpi_request, 0, ioc->request_sz); 5236 _scsih_setup_eedp(ioc, scmd, mpi_request); 5237 5238 if (scmd->cmd_len == 32) 5239 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 5240 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 5241 if (sas_device_priv_data->sas_target->flags & 5242 MPT_TARGET_FLAGS_RAID_COMPONENT) 5243 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 5244 else 5245 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 5246 mpi_request->DevHandle = cpu_to_le16(handle); 5247 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 5248 mpi_request->Control = cpu_to_le32(mpi_control); 5249 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); 5250 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; 5251 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 5252 mpi_request->SenseBufferLowAddress = 5253 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 5254 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4; 5255 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) 5256 mpi_request->LUN); 5257 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 5258 5259 if (mpi_request->DataLength) { 5260 pcie_device = sas_target_priv_data->pcie_dev; 5261 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) { 5262 mpt3sas_base_free_smid(ioc, smid); 5263 _scsih_set_satl_pending(scmd, false); 5264 goto out; 5265 } 5266 } else 5267 ioc->build_zero_len_sge(ioc, &mpi_request->SGL); 5268 5269 raid_device = sas_target_priv_data->raid_device; 5270 if (raid_device && raid_device->direct_io_enabled) 5271 mpt3sas_setup_direct_io(ioc, scmd, 5272 raid_device, mpi_request); 5273 5274 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { 5275 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { 5276 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | 5277 MPI25_SCSIIO_IOFLAGS_FAST_PATH); 5278 ioc->put_smid_fast_path(ioc, smid, handle); 5279 } else 5280 ioc->put_smid_scsi_io(ioc, smid, 5281 le16_to_cpu(mpi_request->DevHandle)); 5282 } else 5283 ioc->put_smid_default(ioc, smid); 5284 return 0; 5285 5286 out: 5287 return SCSI_MLQUEUE_HOST_BUSY; 5288 } 5289 5290 /** 5291 * _scsih_normalize_sense - normalize descriptor and fixed format sense data 5292 * @sense_buffer: sense data returned by target 5293 * @data: normalized skey/asc/ascq 5294 */ 5295 static void 5296 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) 5297 { 5298 if ((sense_buffer[0] & 0x7F) >= 0x72) { 5299 /* descriptor format */ 5300 data->skey = sense_buffer[1] & 0x0F; 5301 data->asc = sense_buffer[2]; 5302 data->ascq = sense_buffer[3]; 5303 } else { 5304 /* fixed format */ 5305 data->skey = sense_buffer[2] & 0x0F; 5306 data->asc = sense_buffer[12]; 5307 data->ascq = sense_buffer[13]; 5308 } 5309 } 5310 5311 /** 5312 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request 5313 * @ioc: per adapter object 5314 * @scmd: pointer to scsi command object 5315 * @mpi_reply: reply mf payload returned from firmware 5316 * @smid: ? 5317 * 5318 * scsi_status - SCSI Status code returned from target device 5319 * scsi_state - state info associated with SCSI_IO determined by ioc 5320 * ioc_status - ioc supplied status info 5321 */ 5322 static void 5323 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 5324 Mpi2SCSIIOReply_t *mpi_reply, u16 smid) 5325 { 5326 u32 response_info; 5327 u8 *response_bytes; 5328 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & 5329 MPI2_IOCSTATUS_MASK; 5330 u8 scsi_state = mpi_reply->SCSIState; 5331 u8 scsi_status = mpi_reply->SCSIStatus; 5332 char *desc_ioc_state = NULL; 5333 char *desc_scsi_status = NULL; 5334 char *desc_scsi_state = ioc->tmp_string; 5335 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 5336 struct _sas_device *sas_device = NULL; 5337 struct _pcie_device *pcie_device = NULL; 5338 struct scsi_target *starget = scmd->device->sdev_target; 5339 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 5340 char *device_str = NULL; 5341 5342 if (!priv_target) 5343 return; 5344 if (ioc->hide_ir_msg) 5345 device_str = "WarpDrive"; 5346 else 5347 device_str = "volume"; 5348 5349 if (log_info == 0x31170000) 5350 return; 5351 5352 switch (ioc_status) { 5353 case MPI2_IOCSTATUS_SUCCESS: 5354 desc_ioc_state = "success"; 5355 break; 5356 case MPI2_IOCSTATUS_INVALID_FUNCTION: 5357 desc_ioc_state = "invalid function"; 5358 break; 5359 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 5360 desc_ioc_state = "scsi recovered error"; 5361 break; 5362 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 5363 desc_ioc_state = "scsi invalid dev handle"; 5364 break; 5365 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 5366 desc_ioc_state = "scsi device not there"; 5367 break; 5368 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 5369 desc_ioc_state = "scsi data overrun"; 5370 break; 5371 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 5372 desc_ioc_state = "scsi data underrun"; 5373 break; 5374 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 5375 desc_ioc_state = "scsi io data error"; 5376 break; 5377 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 5378 desc_ioc_state = "scsi protocol error"; 5379 break; 5380 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 5381 desc_ioc_state = "scsi task terminated"; 5382 break; 5383 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 5384 desc_ioc_state = "scsi residual mismatch"; 5385 break; 5386 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 5387 desc_ioc_state = "scsi task mgmt failed"; 5388 break; 5389 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 5390 desc_ioc_state = "scsi ioc terminated"; 5391 break; 5392 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 5393 desc_ioc_state = "scsi ext terminated"; 5394 break; 5395 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5396 desc_ioc_state = "eedp guard error"; 5397 break; 5398 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5399 desc_ioc_state = "eedp ref tag error"; 5400 break; 5401 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5402 desc_ioc_state = "eedp app tag error"; 5403 break; 5404 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 5405 desc_ioc_state = "insufficient power"; 5406 break; 5407 default: 5408 desc_ioc_state = "unknown"; 5409 break; 5410 } 5411 5412 switch (scsi_status) { 5413 case MPI2_SCSI_STATUS_GOOD: 5414 desc_scsi_status = "good"; 5415 break; 5416 case MPI2_SCSI_STATUS_CHECK_CONDITION: 5417 desc_scsi_status = "check condition"; 5418 break; 5419 case MPI2_SCSI_STATUS_CONDITION_MET: 5420 desc_scsi_status = "condition met"; 5421 break; 5422 case MPI2_SCSI_STATUS_BUSY: 5423 desc_scsi_status = "busy"; 5424 break; 5425 case MPI2_SCSI_STATUS_INTERMEDIATE: 5426 desc_scsi_status = "intermediate"; 5427 break; 5428 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: 5429 desc_scsi_status = "intermediate condmet"; 5430 break; 5431 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 5432 desc_scsi_status = "reservation conflict"; 5433 break; 5434 case MPI2_SCSI_STATUS_COMMAND_TERMINATED: 5435 desc_scsi_status = "command terminated"; 5436 break; 5437 case MPI2_SCSI_STATUS_TASK_SET_FULL: 5438 desc_scsi_status = "task set full"; 5439 break; 5440 case MPI2_SCSI_STATUS_ACA_ACTIVE: 5441 desc_scsi_status = "aca active"; 5442 break; 5443 case MPI2_SCSI_STATUS_TASK_ABORTED: 5444 desc_scsi_status = "task aborted"; 5445 break; 5446 default: 5447 desc_scsi_status = "unknown"; 5448 break; 5449 } 5450 5451 desc_scsi_state[0] = '\0'; 5452 if (!scsi_state) 5453 desc_scsi_state = " "; 5454 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 5455 strcat(desc_scsi_state, "response info "); 5456 if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5457 strcat(desc_scsi_state, "state terminated "); 5458 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) 5459 strcat(desc_scsi_state, "no status "); 5460 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) 5461 strcat(desc_scsi_state, "autosense failed "); 5462 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) 5463 strcat(desc_scsi_state, "autosense valid "); 5464 5465 scsi_print_command(scmd); 5466 5467 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 5468 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n", 5469 device_str, (u64)priv_target->sas_address); 5470 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 5471 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target); 5472 if (pcie_device) { 5473 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n", 5474 (u64)pcie_device->wwid, pcie_device->port_num); 5475 if (pcie_device->enclosure_handle != 0) 5476 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n", 5477 (u64)pcie_device->enclosure_logical_id, 5478 pcie_device->slot); 5479 if (pcie_device->connector_name[0]) 5480 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n", 5481 pcie_device->enclosure_level, 5482 pcie_device->connector_name); 5483 pcie_device_put(pcie_device); 5484 } 5485 } else { 5486 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target); 5487 if (sas_device) { 5488 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", 5489 (u64)sas_device->sas_address, sas_device->phy); 5490 5491 _scsih_display_enclosure_chassis_info(ioc, sas_device, 5492 NULL, NULL); 5493 5494 sas_device_put(sas_device); 5495 } 5496 } 5497 5498 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", 5499 le16_to_cpu(mpi_reply->DevHandle), 5500 desc_ioc_state, ioc_status, smid); 5501 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n", 5502 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd)); 5503 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", 5504 le16_to_cpu(mpi_reply->TaskTag), 5505 le32_to_cpu(mpi_reply->TransferCount), scmd->result); 5506 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", 5507 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); 5508 5509 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 5510 struct sense_info data; 5511 _scsih_normalize_sense(scmd->sense_buffer, &data); 5512 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", 5513 data.skey, data.asc, data.ascq, 5514 le32_to_cpu(mpi_reply->SenseCount)); 5515 } 5516 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 5517 response_info = le32_to_cpu(mpi_reply->ResponseInfo); 5518 response_bytes = (u8 *)&response_info; 5519 _scsih_response_code(ioc, response_bytes[0]); 5520 } 5521 } 5522 5523 /** 5524 * _scsih_turn_on_pfa_led - illuminate PFA LED 5525 * @ioc: per adapter object 5526 * @handle: device handle 5527 * Context: process 5528 */ 5529 static void 5530 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5531 { 5532 Mpi2SepReply_t mpi_reply; 5533 Mpi2SepRequest_t mpi_request; 5534 struct _sas_device *sas_device; 5535 5536 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 5537 if (!sas_device) 5538 return; 5539 5540 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 5541 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 5542 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 5543 mpi_request.SlotStatus = 5544 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); 5545 mpi_request.DevHandle = cpu_to_le16(handle); 5546 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; 5547 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 5548 &mpi_request)) != 0) { 5549 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5550 __FILE__, __LINE__, __func__); 5551 goto out; 5552 } 5553 sas_device->pfa_led_on = 1; 5554 5555 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 5556 dewtprintk(ioc, 5557 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 5558 le16_to_cpu(mpi_reply.IOCStatus), 5559 le32_to_cpu(mpi_reply.IOCLogInfo))); 5560 goto out; 5561 } 5562 out: 5563 sas_device_put(sas_device); 5564 } 5565 5566 /** 5567 * _scsih_turn_off_pfa_led - turn off Fault LED 5568 * @ioc: per adapter object 5569 * @sas_device: sas device whose PFA LED has to turned off 5570 * Context: process 5571 */ 5572 static void 5573 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, 5574 struct _sas_device *sas_device) 5575 { 5576 Mpi2SepReply_t mpi_reply; 5577 Mpi2SepRequest_t mpi_request; 5578 5579 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 5580 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 5581 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 5582 mpi_request.SlotStatus = 0; 5583 mpi_request.Slot = cpu_to_le16(sas_device->slot); 5584 mpi_request.DevHandle = 0; 5585 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); 5586 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; 5587 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 5588 &mpi_request)) != 0) { 5589 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5590 __FILE__, __LINE__, __func__); 5591 return; 5592 } 5593 5594 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 5595 dewtprintk(ioc, 5596 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 5597 le16_to_cpu(mpi_reply.IOCStatus), 5598 le32_to_cpu(mpi_reply.IOCLogInfo))); 5599 return; 5600 } 5601 } 5602 5603 /** 5604 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event 5605 * @ioc: per adapter object 5606 * @handle: device handle 5607 * Context: interrupt. 5608 */ 5609 static void 5610 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5611 { 5612 struct fw_event_work *fw_event; 5613 5614 fw_event = alloc_fw_event_work(0); 5615 if (!fw_event) 5616 return; 5617 fw_event->event = MPT3SAS_TURN_ON_PFA_LED; 5618 fw_event->device_handle = handle; 5619 fw_event->ioc = ioc; 5620 _scsih_fw_event_add(ioc, fw_event); 5621 fw_event_work_put(fw_event); 5622 } 5623 5624 /** 5625 * _scsih_smart_predicted_fault - process smart errors 5626 * @ioc: per adapter object 5627 * @handle: device handle 5628 * Context: interrupt. 5629 */ 5630 static void 5631 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5632 { 5633 struct scsi_target *starget; 5634 struct MPT3SAS_TARGET *sas_target_priv_data; 5635 Mpi2EventNotificationReply_t *event_reply; 5636 Mpi2EventDataSasDeviceStatusChange_t *event_data; 5637 struct _sas_device *sas_device; 5638 ssize_t sz; 5639 unsigned long flags; 5640 5641 /* only handle non-raid devices */ 5642 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5643 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 5644 if (!sas_device) 5645 goto out_unlock; 5646 5647 starget = sas_device->starget; 5648 sas_target_priv_data = starget->hostdata; 5649 5650 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || 5651 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) 5652 goto out_unlock; 5653 5654 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget); 5655 5656 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5657 5658 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) 5659 _scsih_send_event_to_turn_on_pfa_led(ioc, handle); 5660 5661 /* insert into event log */ 5662 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 5663 sizeof(Mpi2EventDataSasDeviceStatusChange_t); 5664 event_reply = kzalloc(sz, GFP_ATOMIC); 5665 if (!event_reply) { 5666 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5667 __FILE__, __LINE__, __func__); 5668 goto out; 5669 } 5670 5671 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 5672 event_reply->Event = 5673 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 5674 event_reply->MsgLength = sz/4; 5675 event_reply->EventDataLength = 5676 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); 5677 event_data = (Mpi2EventDataSasDeviceStatusChange_t *) 5678 event_reply->EventData; 5679 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; 5680 event_data->ASC = 0x5D; 5681 event_data->DevHandle = cpu_to_le16(handle); 5682 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); 5683 mpt3sas_ctl_add_to_event_log(ioc, event_reply); 5684 kfree(event_reply); 5685 out: 5686 if (sas_device) 5687 sas_device_put(sas_device); 5688 return; 5689 5690 out_unlock: 5691 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5692 goto out; 5693 } 5694 5695 /** 5696 * _scsih_io_done - scsi request callback 5697 * @ioc: per adapter object 5698 * @smid: system request message index 5699 * @msix_index: MSIX table index supplied by the OS 5700 * @reply: reply message frame(lower 32bit addr) 5701 * 5702 * Callback handler when using _scsih_qcmd. 5703 * 5704 * Return: 1 meaning mf should be freed from _base_interrupt 5705 * 0 means the mf is freed from this function. 5706 */ 5707 static u8 5708 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 5709 { 5710 Mpi25SCSIIORequest_t *mpi_request; 5711 Mpi2SCSIIOReply_t *mpi_reply; 5712 struct scsi_cmnd *scmd; 5713 struct scsiio_tracker *st; 5714 u16 ioc_status; 5715 u32 xfer_cnt; 5716 u8 scsi_state; 5717 u8 scsi_status; 5718 u32 log_info; 5719 struct MPT3SAS_DEVICE *sas_device_priv_data; 5720 u32 response_code = 0; 5721 5722 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 5723 5724 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 5725 if (scmd == NULL) 5726 return 1; 5727 5728 _scsih_set_satl_pending(scmd, false); 5729 5730 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5731 5732 if (mpi_reply == NULL) { 5733 scmd->result = DID_OK << 16; 5734 goto out; 5735 } 5736 5737 sas_device_priv_data = scmd->device->hostdata; 5738 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 5739 sas_device_priv_data->sas_target->deleted) { 5740 scmd->result = DID_NO_CONNECT << 16; 5741 goto out; 5742 } 5743 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 5744 5745 /* 5746 * WARPDRIVE: If direct_io is set then it is directIO, 5747 * the failed direct I/O should be redirected to volume 5748 */ 5749 st = scsi_cmd_priv(scmd); 5750 if (st->direct_io && 5751 ((ioc_status & MPI2_IOCSTATUS_MASK) 5752 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { 5753 st->direct_io = 0; 5754 st->scmd = scmd; 5755 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 5756 mpi_request->DevHandle = 5757 cpu_to_le16(sas_device_priv_data->sas_target->handle); 5758 ioc->put_smid_scsi_io(ioc, smid, 5759 sas_device_priv_data->sas_target->handle); 5760 return 0; 5761 } 5762 /* turning off TLR */ 5763 scsi_state = mpi_reply->SCSIState; 5764 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 5765 response_code = 5766 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; 5767 if (!sas_device_priv_data->tlr_snoop_check) { 5768 sas_device_priv_data->tlr_snoop_check++; 5769 if ((!ioc->is_warpdrive && 5770 !scsih_is_raid(&scmd->device->sdev_gendev) && 5771 !scsih_is_nvme(&scmd->device->sdev_gendev)) 5772 && sas_is_tlr_enabled(scmd->device) && 5773 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { 5774 sas_disable_tlr(scmd->device); 5775 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); 5776 } 5777 } 5778 5779 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); 5780 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); 5781 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 5782 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 5783 else 5784 log_info = 0; 5785 ioc_status &= MPI2_IOCSTATUS_MASK; 5786 scsi_status = mpi_reply->SCSIStatus; 5787 5788 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && 5789 (scsi_status == MPI2_SCSI_STATUS_BUSY || 5790 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || 5791 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { 5792 ioc_status = MPI2_IOCSTATUS_SUCCESS; 5793 } 5794 5795 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 5796 struct sense_info data; 5797 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc, 5798 smid); 5799 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, 5800 le32_to_cpu(mpi_reply->SenseCount)); 5801 memcpy(scmd->sense_buffer, sense_data, sz); 5802 _scsih_normalize_sense(scmd->sense_buffer, &data); 5803 /* failure prediction threshold exceeded */ 5804 if (data.asc == 0x5D) 5805 _scsih_smart_predicted_fault(ioc, 5806 le16_to_cpu(mpi_reply->DevHandle)); 5807 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); 5808 5809 if ((ioc->logging_level & MPT_DEBUG_REPLY) && 5810 ((scmd->sense_buffer[2] == UNIT_ATTENTION) || 5811 (scmd->sense_buffer[2] == MEDIUM_ERROR) || 5812 (scmd->sense_buffer[2] == HARDWARE_ERROR))) 5813 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid); 5814 } 5815 switch (ioc_status) { 5816 case MPI2_IOCSTATUS_BUSY: 5817 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 5818 scmd->result = SAM_STAT_BUSY; 5819 break; 5820 5821 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 5822 scmd->result = DID_NO_CONNECT << 16; 5823 break; 5824 5825 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 5826 if (sas_device_priv_data->block) { 5827 scmd->result = DID_TRANSPORT_DISRUPTED << 16; 5828 goto out; 5829 } 5830 if (log_info == 0x31110630) { 5831 if (scmd->retries > 2) { 5832 scmd->result = DID_NO_CONNECT << 16; 5833 scsi_device_set_state(scmd->device, 5834 SDEV_OFFLINE); 5835 } else { 5836 scmd->result = DID_SOFT_ERROR << 16; 5837 scmd->device->expecting_cc_ua = 1; 5838 } 5839 break; 5840 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { 5841 scmd->result = DID_RESET << 16; 5842 break; 5843 } else if ((scmd->device->channel == RAID_CHANNEL) && 5844 (scsi_state == (MPI2_SCSI_STATE_TERMINATED | 5845 MPI2_SCSI_STATE_NO_SCSI_STATUS))) { 5846 scmd->result = DID_RESET << 16; 5847 break; 5848 } 5849 scmd->result = DID_SOFT_ERROR << 16; 5850 break; 5851 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 5852 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 5853 scmd->result = DID_RESET << 16; 5854 break; 5855 5856 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 5857 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) 5858 scmd->result = DID_SOFT_ERROR << 16; 5859 else 5860 scmd->result = (DID_OK << 16) | scsi_status; 5861 break; 5862 5863 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 5864 scmd->result = (DID_OK << 16) | scsi_status; 5865 5866 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) 5867 break; 5868 5869 if (xfer_cnt < scmd->underflow) { 5870 if (scsi_status == SAM_STAT_BUSY) 5871 scmd->result = SAM_STAT_BUSY; 5872 else 5873 scmd->result = DID_SOFT_ERROR << 16; 5874 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 5875 MPI2_SCSI_STATE_NO_SCSI_STATUS)) 5876 scmd->result = DID_SOFT_ERROR << 16; 5877 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5878 scmd->result = DID_RESET << 16; 5879 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { 5880 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; 5881 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; 5882 scmd->result = (DRIVER_SENSE << 24) | 5883 SAM_STAT_CHECK_CONDITION; 5884 scmd->sense_buffer[0] = 0x70; 5885 scmd->sense_buffer[2] = ILLEGAL_REQUEST; 5886 scmd->sense_buffer[12] = 0x20; 5887 scmd->sense_buffer[13] = 0; 5888 } 5889 break; 5890 5891 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 5892 scsi_set_resid(scmd, 0); 5893 fallthrough; 5894 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 5895 case MPI2_IOCSTATUS_SUCCESS: 5896 scmd->result = (DID_OK << 16) | scsi_status; 5897 if (response_code == 5898 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || 5899 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 5900 MPI2_SCSI_STATE_NO_SCSI_STATUS))) 5901 scmd->result = DID_SOFT_ERROR << 16; 5902 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5903 scmd->result = DID_RESET << 16; 5904 break; 5905 5906 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5907 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5908 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5909 _scsih_eedp_error_handling(scmd, ioc_status); 5910 break; 5911 5912 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 5913 case MPI2_IOCSTATUS_INVALID_FUNCTION: 5914 case MPI2_IOCSTATUS_INVALID_SGL: 5915 case MPI2_IOCSTATUS_INTERNAL_ERROR: 5916 case MPI2_IOCSTATUS_INVALID_FIELD: 5917 case MPI2_IOCSTATUS_INVALID_STATE: 5918 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 5919 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 5920 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 5921 default: 5922 scmd->result = DID_SOFT_ERROR << 16; 5923 break; 5924 5925 } 5926 5927 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) 5928 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); 5929 5930 out: 5931 5932 scsi_dma_unmap(scmd); 5933 mpt3sas_base_free_smid(ioc, smid); 5934 scmd->scsi_done(scmd); 5935 return 0; 5936 } 5937 5938 /** 5939 * _scsih_update_vphys_after_reset - update the Port's 5940 * vphys_list after reset 5941 * @ioc: per adapter object 5942 * 5943 * Returns nothing. 5944 */ 5945 static void 5946 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc) 5947 { 5948 u16 sz, ioc_status; 5949 int i; 5950 Mpi2ConfigReply_t mpi_reply; 5951 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 5952 u16 attached_handle; 5953 u64 attached_sas_addr; 5954 u8 found = 0, port_id; 5955 Mpi2SasPhyPage0_t phy_pg0; 5956 struct hba_port *port, *port_next, *mport; 5957 struct virtual_phy *vphy, *vphy_next; 5958 struct _sas_device *sas_device; 5959 5960 /* 5961 * Mark all the vphys objects as dirty. 5962 */ 5963 list_for_each_entry_safe(port, port_next, 5964 &ioc->port_table_list, list) { 5965 if (!port->vphys_mask) 5966 continue; 5967 list_for_each_entry_safe(vphy, vphy_next, 5968 &port->vphys_list, list) { 5969 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY; 5970 } 5971 } 5972 5973 /* 5974 * Read SASIOUnitPage0 to get each HBA Phy's data. 5975 */ 5976 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + 5977 (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t)); 5978 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 5979 if (!sas_iounit_pg0) { 5980 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5981 __FILE__, __LINE__, __func__); 5982 return; 5983 } 5984 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 5985 sas_iounit_pg0, sz)) != 0) 5986 goto out; 5987 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 5988 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 5989 goto out; 5990 /* 5991 * Loop over each HBA Phy. 5992 */ 5993 for (i = 0; i < ioc->sas_hba.num_phys; i++) { 5994 /* 5995 * Check whether Phy's Negotiation Link Rate is > 1.5G or not. 5996 */ 5997 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < 5998 MPI2_SAS_NEG_LINK_RATE_1_5) 5999 continue; 6000 /* 6001 * Check whether Phy is connected to SEP device or not, 6002 * if it is SEP device then read the Phy's SASPHYPage0 data to 6003 * determine whether Phy is a virtual Phy or not. if it is 6004 * virtual phy then it is conformed that the attached remote 6005 * device is a HBA's vSES device. 6006 */ 6007 if (!(le32_to_cpu( 6008 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & 6009 MPI2_SAS_DEVICE_INFO_SEP)) 6010 continue; 6011 6012 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 6013 i))) { 6014 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6015 __FILE__, __LINE__, __func__); 6016 continue; 6017 } 6018 6019 if (!(le32_to_cpu(phy_pg0.PhyInfo) & 6020 MPI2_SAS_PHYINFO_VIRTUAL_PHY)) 6021 continue; 6022 /* 6023 * Get the vSES device's SAS Address. 6024 */ 6025 attached_handle = le16_to_cpu( 6026 sas_iounit_pg0->PhyData[i].AttachedDevHandle); 6027 if (_scsih_get_sas_address(ioc, attached_handle, 6028 &attached_sas_addr) != 0) { 6029 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6030 __FILE__, __LINE__, __func__); 6031 continue; 6032 } 6033 6034 found = 0; 6035 port = port_next = NULL; 6036 /* 6037 * Loop over each virtual_phy object from 6038 * each port's vphys_list. 6039 */ 6040 list_for_each_entry_safe(port, 6041 port_next, &ioc->port_table_list, list) { 6042 if (!port->vphys_mask) 6043 continue; 6044 list_for_each_entry_safe(vphy, vphy_next, 6045 &port->vphys_list, list) { 6046 /* 6047 * Continue with next virtual_phy object 6048 * if the object is not marked as dirty. 6049 */ 6050 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY)) 6051 continue; 6052 6053 /* 6054 * Continue with next virtual_phy object 6055 * if the object's SAS Address is not equals 6056 * to current Phy's vSES device SAS Address. 6057 */ 6058 if (vphy->sas_address != attached_sas_addr) 6059 continue; 6060 /* 6061 * Enable current Phy number bit in object's 6062 * phy_mask field. 6063 */ 6064 if (!(vphy->phy_mask & (1 << i))) 6065 vphy->phy_mask = (1 << i); 6066 /* 6067 * Get hba_port object from hba_port table 6068 * corresponding to current phy's Port ID. 6069 * if there is no hba_port object corresponding 6070 * to Phy's Port ID then create a new hba_port 6071 * object & add to hba_port table. 6072 */ 6073 port_id = sas_iounit_pg0->PhyData[i].Port; 6074 mport = mpt3sas_get_port_by_id(ioc, port_id, 1); 6075 if (!mport) { 6076 mport = kzalloc( 6077 sizeof(struct hba_port), GFP_KERNEL); 6078 if (!mport) 6079 break; 6080 mport->port_id = port_id; 6081 ioc_info(ioc, 6082 "%s: hba_port entry: %p, port: %d is added to hba_port list\n", 6083 __func__, mport, mport->port_id); 6084 list_add_tail(&mport->list, 6085 &ioc->port_table_list); 6086 } 6087 /* 6088 * If mport & port pointers are not pointing to 6089 * same hba_port object then it means that vSES 6090 * device's Port ID got changed after reset and 6091 * hence move current virtual_phy object from 6092 * port's vphys_list to mport's vphys_list. 6093 */ 6094 if (port != mport) { 6095 if (!mport->vphys_mask) 6096 INIT_LIST_HEAD( 6097 &mport->vphys_list); 6098 mport->vphys_mask |= (1 << i); 6099 port->vphys_mask &= ~(1 << i); 6100 list_move(&vphy->list, 6101 &mport->vphys_list); 6102 sas_device = mpt3sas_get_sdev_by_addr( 6103 ioc, attached_sas_addr, port); 6104 if (sas_device) 6105 sas_device->port = mport; 6106 } 6107 /* 6108 * Earlier while updating the hba_port table, 6109 * it is determined that there is no other 6110 * direct attached device with mport's Port ID, 6111 * Hence mport was marked as dirty. Only vSES 6112 * device has this Port ID, so unmark the mport 6113 * as dirt. 6114 */ 6115 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) { 6116 mport->sas_address = 0; 6117 mport->phy_mask = 0; 6118 mport->flags &= 6119 ~HBA_PORT_FLAG_DIRTY_PORT; 6120 } 6121 /* 6122 * Unmark current virtual_phy object as dirty. 6123 */ 6124 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY; 6125 found = 1; 6126 break; 6127 } 6128 if (found) 6129 break; 6130 } 6131 } 6132 out: 6133 kfree(sas_iounit_pg0); 6134 } 6135 6136 /** 6137 * _scsih_get_port_table_after_reset - Construct temporary port table 6138 * @ioc: per adapter object 6139 * @port_table: address where port table needs to be constructed 6140 * 6141 * return number of HBA port entries available after reset. 6142 */ 6143 static int 6144 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc, 6145 struct hba_port *port_table) 6146 { 6147 u16 sz, ioc_status; 6148 int i, j; 6149 Mpi2ConfigReply_t mpi_reply; 6150 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6151 u16 attached_handle; 6152 u64 attached_sas_addr; 6153 u8 found = 0, port_count = 0, port_id; 6154 6155 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys 6156 * sizeof(Mpi2SasIOUnit0PhyData_t)); 6157 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6158 if (!sas_iounit_pg0) { 6159 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6160 __FILE__, __LINE__, __func__); 6161 return port_count; 6162 } 6163 6164 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6165 sas_iounit_pg0, sz)) != 0) 6166 goto out; 6167 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6168 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6169 goto out; 6170 for (i = 0; i < ioc->sas_hba.num_phys; i++) { 6171 found = 0; 6172 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < 6173 MPI2_SAS_NEG_LINK_RATE_1_5) 6174 continue; 6175 attached_handle = 6176 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); 6177 if (_scsih_get_sas_address( 6178 ioc, attached_handle, &attached_sas_addr) != 0) { 6179 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6180 __FILE__, __LINE__, __func__); 6181 continue; 6182 } 6183 6184 for (j = 0; j < port_count; j++) { 6185 port_id = sas_iounit_pg0->PhyData[i].Port; 6186 if (port_table[j].port_id == port_id && 6187 port_table[j].sas_address == attached_sas_addr) { 6188 port_table[j].phy_mask |= (1 << i); 6189 found = 1; 6190 break; 6191 } 6192 } 6193 6194 if (found) 6195 continue; 6196 6197 port_id = sas_iounit_pg0->PhyData[i].Port; 6198 port_table[port_count].port_id = port_id; 6199 port_table[port_count].phy_mask = (1 << i); 6200 port_table[port_count].sas_address = attached_sas_addr; 6201 port_count++; 6202 } 6203 out: 6204 kfree(sas_iounit_pg0); 6205 return port_count; 6206 } 6207 6208 enum hba_port_matched_codes { 6209 NOT_MATCHED = 0, 6210 MATCHED_WITH_ADDR_AND_PHYMASK, 6211 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT, 6212 MATCHED_WITH_ADDR_AND_SUBPHYMASK, 6213 MATCHED_WITH_ADDR, 6214 }; 6215 6216 /** 6217 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry 6218 * from HBA port table 6219 * @ioc: per adapter object 6220 * @port_entry: hba port entry from temporary port table which needs to be 6221 * searched for matched entry in the HBA port table 6222 * @matched_port_entry: save matched hba port entry here 6223 * @count: count of matched entries 6224 * 6225 * return type of matched entry found. 6226 */ 6227 static enum hba_port_matched_codes 6228 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc, 6229 struct hba_port *port_entry, 6230 struct hba_port **matched_port_entry, int *count) 6231 { 6232 struct hba_port *port_table_entry, *matched_port = NULL; 6233 enum hba_port_matched_codes matched_code = NOT_MATCHED; 6234 int lcount = 0; 6235 *matched_port_entry = NULL; 6236 6237 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { 6238 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT)) 6239 continue; 6240 6241 if ((port_table_entry->sas_address == port_entry->sas_address) 6242 && (port_table_entry->phy_mask == port_entry->phy_mask)) { 6243 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK; 6244 matched_port = port_table_entry; 6245 break; 6246 } 6247 6248 if ((port_table_entry->sas_address == port_entry->sas_address) 6249 && (port_table_entry->phy_mask & port_entry->phy_mask) 6250 && (port_table_entry->port_id == port_entry->port_id)) { 6251 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; 6252 matched_port = port_table_entry; 6253 continue; 6254 } 6255 6256 if ((port_table_entry->sas_address == port_entry->sas_address) 6257 && (port_table_entry->phy_mask & port_entry->phy_mask)) { 6258 if (matched_code == 6259 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) 6260 continue; 6261 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK; 6262 matched_port = port_table_entry; 6263 continue; 6264 } 6265 6266 if (port_table_entry->sas_address == port_entry->sas_address) { 6267 if (matched_code == 6268 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) 6269 continue; 6270 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK) 6271 continue; 6272 matched_code = MATCHED_WITH_ADDR; 6273 matched_port = port_table_entry; 6274 lcount++; 6275 } 6276 } 6277 6278 *matched_port_entry = matched_port; 6279 if (matched_code == MATCHED_WITH_ADDR) 6280 *count = lcount; 6281 return matched_code; 6282 } 6283 6284 /** 6285 * _scsih_del_phy_part_of_anther_port - remove phy if it 6286 * is a part of anther port 6287 *@ioc: per adapter object 6288 *@port_table: port table after reset 6289 *@index: hba port entry index 6290 *@port_count: number of ports available after host reset 6291 *@offset: HBA phy bit offset 6292 * 6293 */ 6294 static void 6295 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc, 6296 struct hba_port *port_table, 6297 int index, u8 port_count, int offset) 6298 { 6299 struct _sas_node *sas_node = &ioc->sas_hba; 6300 u32 i, found = 0; 6301 6302 for (i = 0; i < port_count; i++) { 6303 if (i == index) 6304 continue; 6305 6306 if (port_table[i].phy_mask & (1 << offset)) { 6307 mpt3sas_transport_del_phy_from_an_existing_port( 6308 ioc, sas_node, &sas_node->phy[offset]); 6309 found = 1; 6310 break; 6311 } 6312 } 6313 if (!found) 6314 port_table[index].phy_mask |= (1 << offset); 6315 } 6316 6317 /** 6318 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from 6319 * right port 6320 *@ioc: per adapter object 6321 *@hba_port_entry: hba port table entry 6322 *@port_table: temporary port table 6323 *@index: hba port entry index 6324 *@port_count: number of ports available after host reset 6325 * 6326 */ 6327 static void 6328 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc, 6329 struct hba_port *hba_port_entry, struct hba_port *port_table, 6330 int index, int port_count) 6331 { 6332 u32 phy_mask, offset = 0; 6333 struct _sas_node *sas_node = &ioc->sas_hba; 6334 6335 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask; 6336 6337 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) { 6338 if (phy_mask & (1 << offset)) { 6339 if (!(port_table[index].phy_mask & (1 << offset))) { 6340 _scsih_del_phy_part_of_anther_port( 6341 ioc, port_table, index, port_count, 6342 offset); 6343 continue; 6344 } 6345 if (sas_node->phy[offset].phy_belongs_to_port) 6346 mpt3sas_transport_del_phy_from_an_existing_port( 6347 ioc, sas_node, &sas_node->phy[offset]); 6348 mpt3sas_transport_add_phy_to_an_existing_port( 6349 ioc, sas_node, &sas_node->phy[offset], 6350 hba_port_entry->sas_address, 6351 hba_port_entry); 6352 } 6353 } 6354 } 6355 6356 /** 6357 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty. 6358 * @ioc: per adapter object 6359 * 6360 * Returns nothing. 6361 */ 6362 static void 6363 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc) 6364 { 6365 struct hba_port *port, *port_next; 6366 struct virtual_phy *vphy, *vphy_next; 6367 6368 list_for_each_entry_safe(port, port_next, 6369 &ioc->port_table_list, list) { 6370 if (!port->vphys_mask) 6371 continue; 6372 list_for_each_entry_safe(vphy, vphy_next, 6373 &port->vphys_list, list) { 6374 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) { 6375 drsprintk(ioc, ioc_info(ioc, 6376 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n", 6377 vphy, port->port_id, 6378 vphy->phy_mask)); 6379 port->vphys_mask &= ~vphy->phy_mask; 6380 list_del(&vphy->list); 6381 kfree(vphy); 6382 } 6383 } 6384 if (!port->vphys_mask && !port->sas_address) 6385 port->flags |= HBA_PORT_FLAG_DIRTY_PORT; 6386 } 6387 } 6388 6389 /** 6390 * _scsih_del_dirty_port_entries - delete dirty port entries from port list 6391 * after host reset 6392 *@ioc: per adapter object 6393 * 6394 */ 6395 static void 6396 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc) 6397 { 6398 struct hba_port *port, *port_next; 6399 6400 list_for_each_entry_safe(port, port_next, 6401 &ioc->port_table_list, list) { 6402 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) || 6403 port->flags & HBA_PORT_FLAG_NEW_PORT) 6404 continue; 6405 6406 drsprintk(ioc, ioc_info(ioc, 6407 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n", 6408 port, port->port_id, port->phy_mask)); 6409 list_del(&port->list); 6410 kfree(port); 6411 } 6412 } 6413 6414 /** 6415 * _scsih_sas_port_refresh - Update HBA port table after host reset 6416 * @ioc: per adapter object 6417 */ 6418 static void 6419 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) 6420 { 6421 u32 port_count = 0; 6422 struct hba_port *port_table; 6423 struct hba_port *port_table_entry; 6424 struct hba_port *port_entry = NULL; 6425 int i, j, count = 0, lcount = 0; 6426 int ret; 6427 u64 sas_addr; 6428 6429 drsprintk(ioc, ioc_info(ioc, 6430 "updating ports for sas_host(0x%016llx)\n", 6431 (unsigned long long)ioc->sas_hba.sas_address)); 6432 6433 port_table = kcalloc(ioc->sas_hba.num_phys, 6434 sizeof(struct hba_port), GFP_KERNEL); 6435 if (!port_table) 6436 return; 6437 6438 port_count = _scsih_get_port_table_after_reset(ioc, port_table); 6439 if (!port_count) 6440 return; 6441 6442 drsprintk(ioc, ioc_info(ioc, "New Port table\n")); 6443 for (j = 0; j < port_count; j++) 6444 drsprintk(ioc, ioc_info(ioc, 6445 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", 6446 port_table[j].port_id, 6447 port_table[j].phy_mask, port_table[j].sas_address)); 6448 6449 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) 6450 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT; 6451 6452 drsprintk(ioc, ioc_info(ioc, "Old Port table\n")); 6453 port_table_entry = NULL; 6454 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { 6455 drsprintk(ioc, ioc_info(ioc, 6456 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", 6457 port_table_entry->port_id, 6458 port_table_entry->phy_mask, 6459 port_table_entry->sas_address)); 6460 } 6461 6462 for (j = 0; j < port_count; j++) { 6463 ret = _scsih_look_and_get_matched_port_entry(ioc, 6464 &port_table[j], &port_entry, &count); 6465 if (!port_entry) { 6466 drsprintk(ioc, ioc_info(ioc, 6467 "No Matched entry for sas_addr(0x%16llx), Port:%d\n", 6468 port_table[j].sas_address, 6469 port_table[j].port_id)); 6470 continue; 6471 } 6472 6473 switch (ret) { 6474 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: 6475 case MATCHED_WITH_ADDR_AND_SUBPHYMASK: 6476 _scsih_add_or_del_phys_from_existing_port(ioc, 6477 port_entry, port_table, j, port_count); 6478 break; 6479 case MATCHED_WITH_ADDR: 6480 sas_addr = port_table[j].sas_address; 6481 for (i = 0; i < port_count; i++) { 6482 if (port_table[i].sas_address == sas_addr) 6483 lcount++; 6484 } 6485 6486 if (count > 1 || lcount > 1) 6487 port_entry = NULL; 6488 else 6489 _scsih_add_or_del_phys_from_existing_port(ioc, 6490 port_entry, port_table, j, port_count); 6491 } 6492 6493 if (!port_entry) 6494 continue; 6495 6496 if (port_entry->port_id != port_table[j].port_id) 6497 port_entry->port_id = port_table[j].port_id; 6498 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT; 6499 port_entry->phy_mask = port_table[j].phy_mask; 6500 } 6501 6502 port_table_entry = NULL; 6503 } 6504 6505 /** 6506 * _scsih_alloc_vphy - allocate virtual_phy object 6507 * @ioc: per adapter object 6508 * @port_id: Port ID number 6509 * @phy_num: HBA Phy number 6510 * 6511 * Returns allocated virtual_phy object. 6512 */ 6513 static struct virtual_phy * 6514 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num) 6515 { 6516 struct virtual_phy *vphy; 6517 struct hba_port *port; 6518 6519 port = mpt3sas_get_port_by_id(ioc, port_id, 0); 6520 if (!port) 6521 return NULL; 6522 6523 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num); 6524 if (!vphy) { 6525 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL); 6526 if (!vphy) 6527 return NULL; 6528 6529 if (!port->vphys_mask) 6530 INIT_LIST_HEAD(&port->vphys_list); 6531 6532 /* 6533 * Enable bit corresponding to HBA phy number on its 6534 * parent hba_port object's vphys_mask field. 6535 */ 6536 port->vphys_mask |= (1 << phy_num); 6537 vphy->phy_mask |= (1 << phy_num); 6538 6539 list_add_tail(&vphy->list, &port->vphys_list); 6540 6541 ioc_info(ioc, 6542 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n", 6543 vphy, port->port_id, phy_num); 6544 } 6545 return vphy; 6546 } 6547 6548 /** 6549 * _scsih_sas_host_refresh - refreshing sas host object contents 6550 * @ioc: per adapter object 6551 * Context: user 6552 * 6553 * During port enable, fw will send topology events for every device. Its 6554 * possible that the handles may change from the previous setting, so this 6555 * code keeping handles updating if changed. 6556 */ 6557 static void 6558 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) 6559 { 6560 u16 sz; 6561 u16 ioc_status; 6562 int i; 6563 Mpi2ConfigReply_t mpi_reply; 6564 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6565 u16 attached_handle; 6566 u8 link_rate, port_id; 6567 struct hba_port *port; 6568 Mpi2SasPhyPage0_t phy_pg0; 6569 6570 dtmprintk(ioc, 6571 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n", 6572 (u64)ioc->sas_hba.sas_address)); 6573 6574 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys 6575 * sizeof(Mpi2SasIOUnit0PhyData_t)); 6576 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6577 if (!sas_iounit_pg0) { 6578 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6579 __FILE__, __LINE__, __func__); 6580 return; 6581 } 6582 6583 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6584 sas_iounit_pg0, sz)) != 0) 6585 goto out; 6586 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6587 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6588 goto out; 6589 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 6590 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; 6591 if (i == 0) 6592 ioc->sas_hba.handle = le16_to_cpu( 6593 sas_iounit_pg0->PhyData[0].ControllerDevHandle); 6594 port_id = sas_iounit_pg0->PhyData[i].Port; 6595 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { 6596 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); 6597 if (!port) 6598 goto out; 6599 6600 port->port_id = port_id; 6601 ioc_info(ioc, 6602 "hba_port entry: %p, port: %d is added to hba_port list\n", 6603 port, port->port_id); 6604 if (ioc->shost_recovery) 6605 port->flags = HBA_PORT_FLAG_NEW_PORT; 6606 list_add_tail(&port->list, &ioc->port_table_list); 6607 } 6608 /* 6609 * Check whether current Phy belongs to HBA vSES device or not. 6610 */ 6611 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & 6612 MPI2_SAS_DEVICE_INFO_SEP && 6613 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { 6614 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, 6615 &phy_pg0, i))) { 6616 ioc_err(ioc, 6617 "failure at %s:%d/%s()!\n", 6618 __FILE__, __LINE__, __func__); 6619 goto out; 6620 } 6621 if (!(le32_to_cpu(phy_pg0.PhyInfo) & 6622 MPI2_SAS_PHYINFO_VIRTUAL_PHY)) 6623 continue; 6624 /* 6625 * Allocate a virtual_phy object for vSES device, if 6626 * this vSES device is hot added. 6627 */ 6628 if (!_scsih_alloc_vphy(ioc, port_id, i)) 6629 goto out; 6630 ioc->sas_hba.phy[i].hba_vphy = 1; 6631 } 6632 6633 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 6634 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. 6635 AttachedDevHandle); 6636 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 6637 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; 6638 ioc->sas_hba.phy[i].port = 6639 mpt3sas_get_port_by_id(ioc, port_id, 0); 6640 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, 6641 attached_handle, i, link_rate, 6642 ioc->sas_hba.phy[i].port); 6643 } 6644 out: 6645 kfree(sas_iounit_pg0); 6646 } 6647 6648 /** 6649 * _scsih_sas_host_add - create sas host object 6650 * @ioc: per adapter object 6651 * 6652 * Creating host side data object, stored in ioc->sas_hba 6653 */ 6654 static void 6655 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) 6656 { 6657 int i; 6658 Mpi2ConfigReply_t mpi_reply; 6659 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6660 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 6661 Mpi2SasPhyPage0_t phy_pg0; 6662 Mpi2SasDevicePage0_t sas_device_pg0; 6663 Mpi2SasEnclosurePage0_t enclosure_pg0; 6664 u16 ioc_status; 6665 u16 sz; 6666 u8 device_missing_delay; 6667 u8 num_phys, port_id; 6668 struct hba_port *port; 6669 6670 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 6671 if (!num_phys) { 6672 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6673 __FILE__, __LINE__, __func__); 6674 return; 6675 } 6676 ioc->sas_hba.phy = kcalloc(num_phys, 6677 sizeof(struct _sas_phy), GFP_KERNEL); 6678 if (!ioc->sas_hba.phy) { 6679 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6680 __FILE__, __LINE__, __func__); 6681 goto out; 6682 } 6683 ioc->sas_hba.num_phys = num_phys; 6684 6685 /* sas_iounit page 0 */ 6686 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * 6687 sizeof(Mpi2SasIOUnit0PhyData_t)); 6688 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6689 if (!sas_iounit_pg0) { 6690 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6691 __FILE__, __LINE__, __func__); 6692 return; 6693 } 6694 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6695 sas_iounit_pg0, sz))) { 6696 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6697 __FILE__, __LINE__, __func__); 6698 goto out; 6699 } 6700 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6701 MPI2_IOCSTATUS_MASK; 6702 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6703 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6704 __FILE__, __LINE__, __func__); 6705 goto out; 6706 } 6707 6708 /* sas_iounit page 1 */ 6709 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * 6710 sizeof(Mpi2SasIOUnit1PhyData_t)); 6711 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 6712 if (!sas_iounit_pg1) { 6713 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6714 __FILE__, __LINE__, __func__); 6715 goto out; 6716 } 6717 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 6718 sas_iounit_pg1, sz))) { 6719 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6720 __FILE__, __LINE__, __func__); 6721 goto out; 6722 } 6723 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6724 MPI2_IOCSTATUS_MASK; 6725 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6726 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6727 __FILE__, __LINE__, __func__); 6728 goto out; 6729 } 6730 6731 ioc->io_missing_delay = 6732 sas_iounit_pg1->IODeviceMissingDelay; 6733 device_missing_delay = 6734 sas_iounit_pg1->ReportDeviceMissingDelay; 6735 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 6736 ioc->device_missing_delay = (device_missing_delay & 6737 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 6738 else 6739 ioc->device_missing_delay = device_missing_delay & 6740 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 6741 6742 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; 6743 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 6744 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 6745 i))) { 6746 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6747 __FILE__, __LINE__, __func__); 6748 goto out; 6749 } 6750 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6751 MPI2_IOCSTATUS_MASK; 6752 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6753 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6754 __FILE__, __LINE__, __func__); 6755 goto out; 6756 } 6757 6758 if (i == 0) 6759 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> 6760 PhyData[0].ControllerDevHandle); 6761 6762 port_id = sas_iounit_pg0->PhyData[i].Port; 6763 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { 6764 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); 6765 if (!port) 6766 goto out; 6767 6768 port->port_id = port_id; 6769 ioc_info(ioc, 6770 "hba_port entry: %p, port: %d is added to hba_port list\n", 6771 port, port->port_id); 6772 list_add_tail(&port->list, 6773 &ioc->port_table_list); 6774 } 6775 6776 /* 6777 * Check whether current Phy belongs to HBA vSES device or not. 6778 */ 6779 if ((le32_to_cpu(phy_pg0.PhyInfo) & 6780 MPI2_SAS_PHYINFO_VIRTUAL_PHY) && 6781 (phy_pg0.NegotiatedLinkRate >> 4) >= 6782 MPI2_SAS_NEG_LINK_RATE_1_5) { 6783 /* 6784 * Allocate a virtual_phy object for vSES device. 6785 */ 6786 if (!_scsih_alloc_vphy(ioc, port_id, i)) 6787 goto out; 6788 ioc->sas_hba.phy[i].hba_vphy = 1; 6789 } 6790 6791 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 6792 ioc->sas_hba.phy[i].phy_id = i; 6793 ioc->sas_hba.phy[i].port = 6794 mpt3sas_get_port_by_id(ioc, port_id, 0); 6795 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], 6796 phy_pg0, ioc->sas_hba.parent_dev); 6797 } 6798 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 6799 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { 6800 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6801 __FILE__, __LINE__, __func__); 6802 goto out; 6803 } 6804 ioc->sas_hba.enclosure_handle = 6805 le16_to_cpu(sas_device_pg0.EnclosureHandle); 6806 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 6807 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 6808 ioc->sas_hba.handle, 6809 (u64)ioc->sas_hba.sas_address, 6810 ioc->sas_hba.num_phys); 6811 6812 if (ioc->sas_hba.enclosure_handle) { 6813 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 6814 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 6815 ioc->sas_hba.enclosure_handle))) 6816 ioc->sas_hba.enclosure_logical_id = 6817 le64_to_cpu(enclosure_pg0.EnclosureLogicalID); 6818 } 6819 6820 out: 6821 kfree(sas_iounit_pg1); 6822 kfree(sas_iounit_pg0); 6823 } 6824 6825 /** 6826 * _scsih_expander_add - creating expander object 6827 * @ioc: per adapter object 6828 * @handle: expander handle 6829 * 6830 * Creating expander object, stored in ioc->sas_expander_list. 6831 * 6832 * Return: 0 for success, else error. 6833 */ 6834 static int 6835 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) 6836 { 6837 struct _sas_node *sas_expander; 6838 struct _enclosure_node *enclosure_dev; 6839 Mpi2ConfigReply_t mpi_reply; 6840 Mpi2ExpanderPage0_t expander_pg0; 6841 Mpi2ExpanderPage1_t expander_pg1; 6842 u32 ioc_status; 6843 u16 parent_handle; 6844 u64 sas_address, sas_address_parent = 0; 6845 int i; 6846 unsigned long flags; 6847 struct _sas_port *mpt3sas_port = NULL; 6848 u8 port_id; 6849 6850 int rc = 0; 6851 6852 if (!handle) 6853 return -1; 6854 6855 if (ioc->shost_recovery || ioc->pci_error_recovery) 6856 return -1; 6857 6858 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 6859 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { 6860 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6861 __FILE__, __LINE__, __func__); 6862 return -1; 6863 } 6864 6865 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6866 MPI2_IOCSTATUS_MASK; 6867 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6868 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6869 __FILE__, __LINE__, __func__); 6870 return -1; 6871 } 6872 6873 /* handle out of order topology events */ 6874 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); 6875 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) 6876 != 0) { 6877 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6878 __FILE__, __LINE__, __func__); 6879 return -1; 6880 } 6881 6882 port_id = expander_pg0.PhysicalPort; 6883 if (sas_address_parent != ioc->sas_hba.sas_address) { 6884 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6885 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 6886 sas_address_parent, 6887 mpt3sas_get_port_by_id(ioc, port_id, 0)); 6888 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6889 if (!sas_expander) { 6890 rc = _scsih_expander_add(ioc, parent_handle); 6891 if (rc != 0) 6892 return rc; 6893 } 6894 } 6895 6896 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6897 sas_address = le64_to_cpu(expander_pg0.SASAddress); 6898 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 6899 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); 6900 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6901 6902 if (sas_expander) 6903 return 0; 6904 6905 sas_expander = kzalloc(sizeof(struct _sas_node), 6906 GFP_KERNEL); 6907 if (!sas_expander) { 6908 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6909 __FILE__, __LINE__, __func__); 6910 return -1; 6911 } 6912 6913 sas_expander->handle = handle; 6914 sas_expander->num_phys = expander_pg0.NumPhys; 6915 sas_expander->sas_address_parent = sas_address_parent; 6916 sas_expander->sas_address = sas_address; 6917 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0); 6918 if (!sas_expander->port) { 6919 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6920 __FILE__, __LINE__, __func__); 6921 rc = -1; 6922 goto out_fail; 6923 } 6924 6925 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 6926 handle, parent_handle, 6927 (u64)sas_expander->sas_address, sas_expander->num_phys); 6928 6929 if (!sas_expander->num_phys) 6930 goto out_fail; 6931 sas_expander->phy = kcalloc(sas_expander->num_phys, 6932 sizeof(struct _sas_phy), GFP_KERNEL); 6933 if (!sas_expander->phy) { 6934 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6935 __FILE__, __LINE__, __func__); 6936 rc = -1; 6937 goto out_fail; 6938 } 6939 6940 INIT_LIST_HEAD(&sas_expander->sas_port_list); 6941 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, 6942 sas_address_parent, sas_expander->port); 6943 if (!mpt3sas_port) { 6944 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6945 __FILE__, __LINE__, __func__); 6946 rc = -1; 6947 goto out_fail; 6948 } 6949 sas_expander->parent_dev = &mpt3sas_port->rphy->dev; 6950 sas_expander->rphy = mpt3sas_port->rphy; 6951 6952 for (i = 0 ; i < sas_expander->num_phys ; i++) { 6953 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 6954 &expander_pg1, i, handle))) { 6955 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6956 __FILE__, __LINE__, __func__); 6957 rc = -1; 6958 goto out_fail; 6959 } 6960 sas_expander->phy[i].handle = handle; 6961 sas_expander->phy[i].phy_id = i; 6962 sas_expander->phy[i].port = 6963 mpt3sas_get_port_by_id(ioc, port_id, 0); 6964 6965 if ((mpt3sas_transport_add_expander_phy(ioc, 6966 &sas_expander->phy[i], expander_pg1, 6967 sas_expander->parent_dev))) { 6968 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6969 __FILE__, __LINE__, __func__); 6970 rc = -1; 6971 goto out_fail; 6972 } 6973 } 6974 6975 if (sas_expander->enclosure_handle) { 6976 enclosure_dev = 6977 mpt3sas_scsih_enclosure_find_by_handle(ioc, 6978 sas_expander->enclosure_handle); 6979 if (enclosure_dev) 6980 sas_expander->enclosure_logical_id = 6981 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 6982 } 6983 6984 _scsih_expander_node_add(ioc, sas_expander); 6985 return 0; 6986 6987 out_fail: 6988 6989 if (mpt3sas_port) 6990 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 6991 sas_address_parent, sas_expander->port); 6992 kfree(sas_expander); 6993 return rc; 6994 } 6995 6996 /** 6997 * mpt3sas_expander_remove - removing expander object 6998 * @ioc: per adapter object 6999 * @sas_address: expander sas_address 7000 * @port: hba port entry 7001 */ 7002 void 7003 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 7004 struct hba_port *port) 7005 { 7006 struct _sas_node *sas_expander; 7007 unsigned long flags; 7008 7009 if (ioc->shost_recovery) 7010 return; 7011 7012 if (!port) 7013 return; 7014 7015 spin_lock_irqsave(&ioc->sas_node_lock, flags); 7016 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 7017 sas_address, port); 7018 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7019 if (sas_expander) 7020 _scsih_expander_node_remove(ioc, sas_expander); 7021 } 7022 7023 /** 7024 * _scsih_done - internal SCSI_IO callback handler. 7025 * @ioc: per adapter object 7026 * @smid: system request message index 7027 * @msix_index: MSIX table index supplied by the OS 7028 * @reply: reply message frame(lower 32bit addr) 7029 * 7030 * Callback handler when sending internal generated SCSI_IO. 7031 * The callback index passed is `ioc->scsih_cb_idx` 7032 * 7033 * Return: 1 meaning mf should be freed from _base_interrupt 7034 * 0 means the mf is freed from this function. 7035 */ 7036 static u8 7037 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 7038 { 7039 MPI2DefaultReply_t *mpi_reply; 7040 7041 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 7042 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED) 7043 return 1; 7044 if (ioc->scsih_cmds.smid != smid) 7045 return 1; 7046 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE; 7047 if (mpi_reply) { 7048 memcpy(ioc->scsih_cmds.reply, mpi_reply, 7049 mpi_reply->MsgLength*4); 7050 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID; 7051 } 7052 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING; 7053 complete(&ioc->scsih_cmds.done); 7054 return 1; 7055 } 7056 7057 7058 7059 7060 #define MPT3_MAX_LUNS (255) 7061 7062 7063 /** 7064 * _scsih_check_access_status - check access flags 7065 * @ioc: per adapter object 7066 * @sas_address: sas address 7067 * @handle: sas device handle 7068 * @access_status: errors returned during discovery of the device 7069 * 7070 * Return: 0 for success, else failure 7071 */ 7072 static u8 7073 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 7074 u16 handle, u8 access_status) 7075 { 7076 u8 rc = 1; 7077 char *desc = NULL; 7078 7079 switch (access_status) { 7080 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: 7081 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: 7082 rc = 0; 7083 break; 7084 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: 7085 desc = "sata capability failed"; 7086 break; 7087 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: 7088 desc = "sata affiliation conflict"; 7089 break; 7090 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: 7091 desc = "route not addressable"; 7092 break; 7093 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: 7094 desc = "smp error not addressable"; 7095 break; 7096 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: 7097 desc = "device blocked"; 7098 break; 7099 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: 7100 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: 7101 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: 7102 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: 7103 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: 7104 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: 7105 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: 7106 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: 7107 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: 7108 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: 7109 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: 7110 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: 7111 desc = "sata initialization failed"; 7112 break; 7113 default: 7114 desc = "unknown"; 7115 break; 7116 } 7117 7118 if (!rc) 7119 return 0; 7120 7121 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", 7122 desc, (u64)sas_address, handle); 7123 return rc; 7124 } 7125 7126 /** 7127 * _scsih_check_device - checking device responsiveness 7128 * @ioc: per adapter object 7129 * @parent_sas_address: sas address of parent expander or sas host 7130 * @handle: attached device handle 7131 * @phy_number: phy number 7132 * @link_rate: new link rate 7133 */ 7134 static void 7135 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, 7136 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate) 7137 { 7138 Mpi2ConfigReply_t mpi_reply; 7139 Mpi2SasDevicePage0_t sas_device_pg0; 7140 struct _sas_device *sas_device = NULL; 7141 struct _enclosure_node *enclosure_dev = NULL; 7142 u32 ioc_status; 7143 unsigned long flags; 7144 u64 sas_address; 7145 struct scsi_target *starget; 7146 struct MPT3SAS_TARGET *sas_target_priv_data; 7147 u32 device_info; 7148 struct hba_port *port; 7149 7150 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 7151 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) 7152 return; 7153 7154 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 7155 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 7156 return; 7157 7158 /* wide port handling ~ we need only handle device once for the phy that 7159 * is matched in sas device page zero 7160 */ 7161 if (phy_number != sas_device_pg0.PhyNum) 7162 return; 7163 7164 /* check if this is end device */ 7165 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 7166 if (!(_scsih_is_end_device(device_info))) 7167 return; 7168 7169 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7170 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 7171 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0); 7172 if (!port) 7173 goto out_unlock; 7174 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 7175 sas_address, port); 7176 7177 if (!sas_device) 7178 goto out_unlock; 7179 7180 if (unlikely(sas_device->handle != handle)) { 7181 starget = sas_device->starget; 7182 sas_target_priv_data = starget->hostdata; 7183 starget_printk(KERN_INFO, starget, 7184 "handle changed from(0x%04x) to (0x%04x)!!!\n", 7185 sas_device->handle, handle); 7186 sas_target_priv_data->handle = handle; 7187 sas_device->handle = handle; 7188 if (le16_to_cpu(sas_device_pg0.Flags) & 7189 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 7190 sas_device->enclosure_level = 7191 sas_device_pg0.EnclosureLevel; 7192 memcpy(sas_device->connector_name, 7193 sas_device_pg0.ConnectorName, 4); 7194 sas_device->connector_name[4] = '\0'; 7195 } else { 7196 sas_device->enclosure_level = 0; 7197 sas_device->connector_name[0] = '\0'; 7198 } 7199 7200 sas_device->enclosure_handle = 7201 le16_to_cpu(sas_device_pg0.EnclosureHandle); 7202 sas_device->is_chassis_slot_valid = 0; 7203 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, 7204 sas_device->enclosure_handle); 7205 if (enclosure_dev) { 7206 sas_device->enclosure_logical_id = 7207 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 7208 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 7209 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 7210 sas_device->is_chassis_slot_valid = 1; 7211 sas_device->chassis_slot = 7212 enclosure_dev->pg0.ChassisSlot; 7213 } 7214 } 7215 } 7216 7217 /* check if device is present */ 7218 if (!(le16_to_cpu(sas_device_pg0.Flags) & 7219 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 7220 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n", 7221 handle); 7222 goto out_unlock; 7223 } 7224 7225 /* check if there were any issues with discovery */ 7226 if (_scsih_check_access_status(ioc, sas_address, handle, 7227 sas_device_pg0.AccessStatus)) 7228 goto out_unlock; 7229 7230 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7231 _scsih_ublock_io_device(ioc, sas_address, port); 7232 7233 if (sas_device) 7234 sas_device_put(sas_device); 7235 return; 7236 7237 out_unlock: 7238 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7239 if (sas_device) 7240 sas_device_put(sas_device); 7241 } 7242 7243 /** 7244 * _scsih_add_device - creating sas device object 7245 * @ioc: per adapter object 7246 * @handle: sas device handle 7247 * @phy_num: phy number end device attached to 7248 * @is_pd: is this hidden raid component 7249 * 7250 * Creating end device object, stored in ioc->sas_device_list. 7251 * 7252 * Return: 0 for success, non-zero for failure. 7253 */ 7254 static int 7255 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, 7256 u8 is_pd) 7257 { 7258 Mpi2ConfigReply_t mpi_reply; 7259 Mpi2SasDevicePage0_t sas_device_pg0; 7260 struct _sas_device *sas_device; 7261 struct _enclosure_node *enclosure_dev = NULL; 7262 u32 ioc_status; 7263 u64 sas_address; 7264 u32 device_info; 7265 u8 port_id; 7266 7267 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 7268 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 7269 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7270 __FILE__, __LINE__, __func__); 7271 return -1; 7272 } 7273 7274 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 7275 MPI2_IOCSTATUS_MASK; 7276 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7277 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7278 __FILE__, __LINE__, __func__); 7279 return -1; 7280 } 7281 7282 /* check if this is end device */ 7283 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 7284 if (!(_scsih_is_end_device(device_info))) 7285 return -1; 7286 set_bit(handle, ioc->pend_os_device_add); 7287 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 7288 7289 /* check if device is present */ 7290 if (!(le16_to_cpu(sas_device_pg0.Flags) & 7291 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 7292 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", 7293 handle); 7294 return -1; 7295 } 7296 7297 /* check if there were any issues with discovery */ 7298 if (_scsih_check_access_status(ioc, sas_address, handle, 7299 sas_device_pg0.AccessStatus)) 7300 return -1; 7301 7302 port_id = sas_device_pg0.PhysicalPort; 7303 sas_device = mpt3sas_get_sdev_by_addr(ioc, 7304 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); 7305 if (sas_device) { 7306 clear_bit(handle, ioc->pend_os_device_add); 7307 sas_device_put(sas_device); 7308 return -1; 7309 } 7310 7311 if (sas_device_pg0.EnclosureHandle) { 7312 enclosure_dev = 7313 mpt3sas_scsih_enclosure_find_by_handle(ioc, 7314 le16_to_cpu(sas_device_pg0.EnclosureHandle)); 7315 if (enclosure_dev == NULL) 7316 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 7317 sas_device_pg0.EnclosureHandle); 7318 } 7319 7320 sas_device = kzalloc(sizeof(struct _sas_device), 7321 GFP_KERNEL); 7322 if (!sas_device) { 7323 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7324 __FILE__, __LINE__, __func__); 7325 return 0; 7326 } 7327 7328 kref_init(&sas_device->refcount); 7329 sas_device->handle = handle; 7330 if (_scsih_get_sas_address(ioc, 7331 le16_to_cpu(sas_device_pg0.ParentDevHandle), 7332 &sas_device->sas_address_parent) != 0) 7333 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7334 __FILE__, __LINE__, __func__); 7335 sas_device->enclosure_handle = 7336 le16_to_cpu(sas_device_pg0.EnclosureHandle); 7337 if (sas_device->enclosure_handle != 0) 7338 sas_device->slot = 7339 le16_to_cpu(sas_device_pg0.Slot); 7340 sas_device->device_info = device_info; 7341 sas_device->sas_address = sas_address; 7342 sas_device->phy = sas_device_pg0.PhyNum; 7343 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & 7344 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 7345 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0); 7346 if (!sas_device->port) { 7347 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7348 __FILE__, __LINE__, __func__); 7349 goto out; 7350 } 7351 7352 if (le16_to_cpu(sas_device_pg0.Flags) 7353 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 7354 sas_device->enclosure_level = 7355 sas_device_pg0.EnclosureLevel; 7356 memcpy(sas_device->connector_name, 7357 sas_device_pg0.ConnectorName, 4); 7358 sas_device->connector_name[4] = '\0'; 7359 } else { 7360 sas_device->enclosure_level = 0; 7361 sas_device->connector_name[0] = '\0'; 7362 } 7363 /* get enclosure_logical_id & chassis_slot*/ 7364 sas_device->is_chassis_slot_valid = 0; 7365 if (enclosure_dev) { 7366 sas_device->enclosure_logical_id = 7367 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 7368 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 7369 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 7370 sas_device->is_chassis_slot_valid = 1; 7371 sas_device->chassis_slot = 7372 enclosure_dev->pg0.ChassisSlot; 7373 } 7374 } 7375 7376 /* get device name */ 7377 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); 7378 7379 if (ioc->wait_for_discovery_to_complete) 7380 _scsih_sas_device_init_add(ioc, sas_device); 7381 else 7382 _scsih_sas_device_add(ioc, sas_device); 7383 7384 out: 7385 sas_device_put(sas_device); 7386 return 0; 7387 } 7388 7389 /** 7390 * _scsih_remove_device - removing sas device object 7391 * @ioc: per adapter object 7392 * @sas_device: the sas_device object 7393 */ 7394 static void 7395 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, 7396 struct _sas_device *sas_device) 7397 { 7398 struct MPT3SAS_TARGET *sas_target_priv_data; 7399 7400 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && 7401 (sas_device->pfa_led_on)) { 7402 _scsih_turn_off_pfa_led(ioc, sas_device); 7403 sas_device->pfa_led_on = 0; 7404 } 7405 7406 dewtprintk(ioc, 7407 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", 7408 __func__, 7409 sas_device->handle, (u64)sas_device->sas_address)); 7410 7411 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 7412 NULL, NULL)); 7413 7414 if (sas_device->starget && sas_device->starget->hostdata) { 7415 sas_target_priv_data = sas_device->starget->hostdata; 7416 sas_target_priv_data->deleted = 1; 7417 _scsih_ublock_io_device(ioc, sas_device->sas_address, 7418 sas_device->port); 7419 sas_target_priv_data->handle = 7420 MPT3SAS_INVALID_DEVICE_HANDLE; 7421 } 7422 7423 if (!ioc->hide_drives) 7424 mpt3sas_transport_port_remove(ioc, 7425 sas_device->sas_address, 7426 sas_device->sas_address_parent, 7427 sas_device->port); 7428 7429 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", 7430 sas_device->handle, (u64)sas_device->sas_address); 7431 7432 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 7433 7434 dewtprintk(ioc, 7435 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", 7436 __func__, 7437 sas_device->handle, (u64)sas_device->sas_address)); 7438 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 7439 NULL, NULL)); 7440 } 7441 7442 /** 7443 * _scsih_sas_topology_change_event_debug - debug for topology event 7444 * @ioc: per adapter object 7445 * @event_data: event data payload 7446 * Context: user. 7447 */ 7448 static void 7449 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7450 Mpi2EventDataSasTopologyChangeList_t *event_data) 7451 { 7452 int i; 7453 u16 handle; 7454 u16 reason_code; 7455 u8 phy_number; 7456 char *status_str = NULL; 7457 u8 link_rate, prev_link_rate; 7458 7459 switch (event_data->ExpStatus) { 7460 case MPI2_EVENT_SAS_TOPO_ES_ADDED: 7461 status_str = "add"; 7462 break; 7463 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 7464 status_str = "remove"; 7465 break; 7466 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: 7467 case 0: 7468 status_str = "responding"; 7469 break; 7470 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 7471 status_str = "remove delay"; 7472 break; 7473 default: 7474 status_str = "unknown status"; 7475 break; 7476 } 7477 ioc_info(ioc, "sas topology change: (%s)\n", status_str); 7478 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ 7479 "start_phy(%02d), count(%d)\n", 7480 le16_to_cpu(event_data->ExpanderDevHandle), 7481 le16_to_cpu(event_data->EnclosureHandle), 7482 event_data->StartPhyNum, event_data->NumEntries); 7483 for (i = 0; i < event_data->NumEntries; i++) { 7484 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 7485 if (!handle) 7486 continue; 7487 phy_number = event_data->StartPhyNum + i; 7488 reason_code = event_data->PHY[i].PhyStatus & 7489 MPI2_EVENT_SAS_TOPO_RC_MASK; 7490 switch (reason_code) { 7491 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 7492 status_str = "target add"; 7493 break; 7494 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 7495 status_str = "target remove"; 7496 break; 7497 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: 7498 status_str = "delay target remove"; 7499 break; 7500 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 7501 status_str = "link rate change"; 7502 break; 7503 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: 7504 status_str = "target responding"; 7505 break; 7506 default: 7507 status_str = "unknown"; 7508 break; 7509 } 7510 link_rate = event_data->PHY[i].LinkRate >> 4; 7511 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 7512 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \ 7513 " link rate: new(0x%02x), old(0x%02x)\n", phy_number, 7514 handle, status_str, link_rate, prev_link_rate); 7515 7516 } 7517 } 7518 7519 /** 7520 * _scsih_sas_topology_change_event - handle topology changes 7521 * @ioc: per adapter object 7522 * @fw_event: The fw_event_work object 7523 * Context: user. 7524 * 7525 */ 7526 static int 7527 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 7528 struct fw_event_work *fw_event) 7529 { 7530 int i; 7531 u16 parent_handle, handle; 7532 u16 reason_code; 7533 u8 phy_number, max_phys; 7534 struct _sas_node *sas_expander; 7535 u64 sas_address; 7536 unsigned long flags; 7537 u8 link_rate, prev_link_rate; 7538 struct hba_port *port; 7539 Mpi2EventDataSasTopologyChangeList_t *event_data = 7540 (Mpi2EventDataSasTopologyChangeList_t *) 7541 fw_event->event_data; 7542 7543 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7544 _scsih_sas_topology_change_event_debug(ioc, event_data); 7545 7546 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) 7547 return 0; 7548 7549 if (!ioc->sas_hba.num_phys) 7550 _scsih_sas_host_add(ioc); 7551 else 7552 _scsih_sas_host_refresh(ioc); 7553 7554 if (fw_event->ignore) { 7555 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n")); 7556 return 0; 7557 } 7558 7559 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); 7560 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0); 7561 7562 /* handle expander add */ 7563 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) 7564 if (_scsih_expander_add(ioc, parent_handle) != 0) 7565 return 0; 7566 7567 spin_lock_irqsave(&ioc->sas_node_lock, flags); 7568 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, 7569 parent_handle); 7570 if (sas_expander) { 7571 sas_address = sas_expander->sas_address; 7572 max_phys = sas_expander->num_phys; 7573 port = sas_expander->port; 7574 } else if (parent_handle < ioc->sas_hba.num_phys) { 7575 sas_address = ioc->sas_hba.sas_address; 7576 max_phys = ioc->sas_hba.num_phys; 7577 } else { 7578 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7579 return 0; 7580 } 7581 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7582 7583 /* handle siblings events */ 7584 for (i = 0; i < event_data->NumEntries; i++) { 7585 if (fw_event->ignore) { 7586 dewtprintk(ioc, 7587 ioc_info(ioc, "ignoring expander event\n")); 7588 return 0; 7589 } 7590 if (ioc->remove_host || ioc->pci_error_recovery) 7591 return 0; 7592 phy_number = event_data->StartPhyNum + i; 7593 if (phy_number >= max_phys) 7594 continue; 7595 reason_code = event_data->PHY[i].PhyStatus & 7596 MPI2_EVENT_SAS_TOPO_RC_MASK; 7597 if ((event_data->PHY[i].PhyStatus & 7598 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != 7599 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) 7600 continue; 7601 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 7602 if (!handle) 7603 continue; 7604 link_rate = event_data->PHY[i].LinkRate >> 4; 7605 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 7606 switch (reason_code) { 7607 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 7608 7609 if (ioc->shost_recovery) 7610 break; 7611 7612 if (link_rate == prev_link_rate) 7613 break; 7614 7615 mpt3sas_transport_update_links(ioc, sas_address, 7616 handle, phy_number, link_rate, port); 7617 7618 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 7619 break; 7620 7621 _scsih_check_device(ioc, sas_address, handle, 7622 phy_number, link_rate); 7623 7624 if (!test_bit(handle, ioc->pend_os_device_add)) 7625 break; 7626 7627 fallthrough; 7628 7629 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 7630 7631 if (ioc->shost_recovery) 7632 break; 7633 7634 mpt3sas_transport_update_links(ioc, sas_address, 7635 handle, phy_number, link_rate, port); 7636 7637 _scsih_add_device(ioc, handle, phy_number, 0); 7638 7639 break; 7640 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 7641 7642 _scsih_device_remove_by_handle(ioc, handle); 7643 break; 7644 } 7645 } 7646 7647 /* handle expander removal */ 7648 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && 7649 sas_expander) 7650 mpt3sas_expander_remove(ioc, sas_address, port); 7651 7652 return 0; 7653 } 7654 7655 /** 7656 * _scsih_sas_device_status_change_event_debug - debug for device event 7657 * @ioc: ? 7658 * @event_data: event data payload 7659 * Context: user. 7660 */ 7661 static void 7662 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7663 Mpi2EventDataSasDeviceStatusChange_t *event_data) 7664 { 7665 char *reason_str = NULL; 7666 7667 switch (event_data->ReasonCode) { 7668 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 7669 reason_str = "smart data"; 7670 break; 7671 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: 7672 reason_str = "unsupported device discovered"; 7673 break; 7674 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 7675 reason_str = "internal device reset"; 7676 break; 7677 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 7678 reason_str = "internal task abort"; 7679 break; 7680 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 7681 reason_str = "internal task abort set"; 7682 break; 7683 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 7684 reason_str = "internal clear task set"; 7685 break; 7686 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: 7687 reason_str = "internal query task"; 7688 break; 7689 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: 7690 reason_str = "sata init failure"; 7691 break; 7692 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 7693 reason_str = "internal device reset complete"; 7694 break; 7695 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 7696 reason_str = "internal task abort complete"; 7697 break; 7698 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: 7699 reason_str = "internal async notification"; 7700 break; 7701 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: 7702 reason_str = "expander reduced functionality"; 7703 break; 7704 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: 7705 reason_str = "expander reduced functionality complete"; 7706 break; 7707 default: 7708 reason_str = "unknown reason"; 7709 break; 7710 } 7711 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)", 7712 reason_str, le16_to_cpu(event_data->DevHandle), 7713 (u64)le64_to_cpu(event_data->SASAddress), 7714 le16_to_cpu(event_data->TaskTag)); 7715 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) 7716 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", 7717 event_data->ASC, event_data->ASCQ); 7718 pr_cont("\n"); 7719 } 7720 7721 /** 7722 * _scsih_sas_device_status_change_event - handle device status change 7723 * @ioc: per adapter object 7724 * @event_data: The fw event 7725 * Context: user. 7726 */ 7727 static void 7728 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 7729 Mpi2EventDataSasDeviceStatusChange_t *event_data) 7730 { 7731 struct MPT3SAS_TARGET *target_priv_data; 7732 struct _sas_device *sas_device; 7733 u64 sas_address; 7734 unsigned long flags; 7735 7736 /* In MPI Revision K (0xC), the internal device reset complete was 7737 * implemented, so avoid setting tm_busy flag for older firmware. 7738 */ 7739 if ((ioc->facts.HeaderVersion >> 8) < 0xC) 7740 return; 7741 7742 if (event_data->ReasonCode != 7743 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && 7744 event_data->ReasonCode != 7745 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) 7746 return; 7747 7748 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7749 sas_address = le64_to_cpu(event_data->SASAddress); 7750 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 7751 sas_address, 7752 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0)); 7753 7754 if (!sas_device || !sas_device->starget) 7755 goto out; 7756 7757 target_priv_data = sas_device->starget->hostdata; 7758 if (!target_priv_data) 7759 goto out; 7760 7761 if (event_data->ReasonCode == 7762 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) 7763 target_priv_data->tm_busy = 1; 7764 else 7765 target_priv_data->tm_busy = 0; 7766 7767 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7768 ioc_info(ioc, 7769 "%s tm_busy flag for handle(0x%04x)\n", 7770 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable", 7771 target_priv_data->handle); 7772 7773 out: 7774 if (sas_device) 7775 sas_device_put(sas_device); 7776 7777 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7778 } 7779 7780 7781 /** 7782 * _scsih_check_pcie_access_status - check access flags 7783 * @ioc: per adapter object 7784 * @wwid: wwid 7785 * @handle: sas device handle 7786 * @access_status: errors returned during discovery of the device 7787 * 7788 * Return: 0 for success, else failure 7789 */ 7790 static u8 7791 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, 7792 u16 handle, u8 access_status) 7793 { 7794 u8 rc = 1; 7795 char *desc = NULL; 7796 7797 switch (access_status) { 7798 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS: 7799 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION: 7800 rc = 0; 7801 break; 7802 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED: 7803 desc = "PCIe device capability failed"; 7804 break; 7805 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED: 7806 desc = "PCIe device blocked"; 7807 ioc_info(ioc, 7808 "Device with Access Status (%s): wwid(0x%016llx), " 7809 "handle(0x%04x)\n ll only be added to the internal list", 7810 desc, (u64)wwid, handle); 7811 rc = 0; 7812 break; 7813 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED: 7814 desc = "PCIe device mem space access failed"; 7815 break; 7816 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE: 7817 desc = "PCIe device unsupported"; 7818 break; 7819 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED: 7820 desc = "PCIe device MSIx Required"; 7821 break; 7822 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX: 7823 desc = "PCIe device init fail max"; 7824 break; 7825 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN: 7826 desc = "PCIe device status unknown"; 7827 break; 7828 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT: 7829 desc = "nvme ready timeout"; 7830 break; 7831 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED: 7832 desc = "nvme device configuration unsupported"; 7833 break; 7834 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED: 7835 desc = "nvme identify failed"; 7836 break; 7837 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED: 7838 desc = "nvme qconfig failed"; 7839 break; 7840 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED: 7841 desc = "nvme qcreation failed"; 7842 break; 7843 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED: 7844 desc = "nvme eventcfg failed"; 7845 break; 7846 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED: 7847 desc = "nvme get feature stat failed"; 7848 break; 7849 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT: 7850 desc = "nvme idle timeout"; 7851 break; 7852 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS: 7853 desc = "nvme failure status"; 7854 break; 7855 default: 7856 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n", 7857 access_status, (u64)wwid, handle); 7858 return rc; 7859 } 7860 7861 if (!rc) 7862 return rc; 7863 7864 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n", 7865 desc, (u64)wwid, handle); 7866 return rc; 7867 } 7868 7869 /** 7870 * _scsih_pcie_device_remove_from_sml - removing pcie device 7871 * from SML and free up associated memory 7872 * @ioc: per adapter object 7873 * @pcie_device: the pcie_device object 7874 */ 7875 static void 7876 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 7877 struct _pcie_device *pcie_device) 7878 { 7879 struct MPT3SAS_TARGET *sas_target_priv_data; 7880 7881 dewtprintk(ioc, 7882 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", 7883 __func__, 7884 pcie_device->handle, (u64)pcie_device->wwid)); 7885 if (pcie_device->enclosure_handle != 0) 7886 dewtprintk(ioc, 7887 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", 7888 __func__, 7889 (u64)pcie_device->enclosure_logical_id, 7890 pcie_device->slot)); 7891 if (pcie_device->connector_name[0] != '\0') 7892 dewtprintk(ioc, 7893 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n", 7894 __func__, 7895 pcie_device->enclosure_level, 7896 pcie_device->connector_name)); 7897 7898 if (pcie_device->starget && pcie_device->starget->hostdata) { 7899 sas_target_priv_data = pcie_device->starget->hostdata; 7900 sas_target_priv_data->deleted = 1; 7901 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL); 7902 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 7903 } 7904 7905 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 7906 pcie_device->handle, (u64)pcie_device->wwid); 7907 if (pcie_device->enclosure_handle != 0) 7908 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n", 7909 (u64)pcie_device->enclosure_logical_id, 7910 pcie_device->slot); 7911 if (pcie_device->connector_name[0] != '\0') 7912 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n", 7913 pcie_device->enclosure_level, 7914 pcie_device->connector_name); 7915 7916 if (pcie_device->starget && (pcie_device->access_status != 7917 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) 7918 scsi_remove_target(&pcie_device->starget->dev); 7919 dewtprintk(ioc, 7920 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", 7921 __func__, 7922 pcie_device->handle, (u64)pcie_device->wwid)); 7923 if (pcie_device->enclosure_handle != 0) 7924 dewtprintk(ioc, 7925 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", 7926 __func__, 7927 (u64)pcie_device->enclosure_logical_id, 7928 pcie_device->slot)); 7929 if (pcie_device->connector_name[0] != '\0') 7930 dewtprintk(ioc, 7931 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n", 7932 __func__, 7933 pcie_device->enclosure_level, 7934 pcie_device->connector_name)); 7935 7936 kfree(pcie_device->serial_number); 7937 } 7938 7939 7940 /** 7941 * _scsih_pcie_check_device - checking device responsiveness 7942 * @ioc: per adapter object 7943 * @handle: attached device handle 7944 */ 7945 static void 7946 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 7947 { 7948 Mpi2ConfigReply_t mpi_reply; 7949 Mpi26PCIeDevicePage0_t pcie_device_pg0; 7950 u32 ioc_status; 7951 struct _pcie_device *pcie_device; 7952 u64 wwid; 7953 unsigned long flags; 7954 struct scsi_target *starget; 7955 struct MPT3SAS_TARGET *sas_target_priv_data; 7956 u32 device_info; 7957 7958 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 7959 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) 7960 return; 7961 7962 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 7963 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 7964 return; 7965 7966 /* check if this is end device */ 7967 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 7968 if (!(_scsih_is_nvme_pciescsi_device(device_info))) 7969 return; 7970 7971 wwid = le64_to_cpu(pcie_device_pg0.WWID); 7972 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 7973 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 7974 7975 if (!pcie_device) { 7976 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 7977 return; 7978 } 7979 7980 if (unlikely(pcie_device->handle != handle)) { 7981 starget = pcie_device->starget; 7982 sas_target_priv_data = starget->hostdata; 7983 pcie_device->access_status = pcie_device_pg0.AccessStatus; 7984 starget_printk(KERN_INFO, starget, 7985 "handle changed from(0x%04x) to (0x%04x)!!!\n", 7986 pcie_device->handle, handle); 7987 sas_target_priv_data->handle = handle; 7988 pcie_device->handle = handle; 7989 7990 if (le32_to_cpu(pcie_device_pg0.Flags) & 7991 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 7992 pcie_device->enclosure_level = 7993 pcie_device_pg0.EnclosureLevel; 7994 memcpy(&pcie_device->connector_name[0], 7995 &pcie_device_pg0.ConnectorName[0], 4); 7996 } else { 7997 pcie_device->enclosure_level = 0; 7998 pcie_device->connector_name[0] = '\0'; 7999 } 8000 } 8001 8002 /* check if device is present */ 8003 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 8004 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 8005 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n", 8006 handle); 8007 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8008 pcie_device_put(pcie_device); 8009 return; 8010 } 8011 8012 /* check if there were any issues with discovery */ 8013 if (_scsih_check_pcie_access_status(ioc, wwid, handle, 8014 pcie_device_pg0.AccessStatus)) { 8015 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8016 pcie_device_put(pcie_device); 8017 return; 8018 } 8019 8020 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8021 pcie_device_put(pcie_device); 8022 8023 _scsih_ublock_io_device(ioc, wwid, NULL); 8024 8025 return; 8026 } 8027 8028 /** 8029 * _scsih_pcie_add_device - creating pcie device object 8030 * @ioc: per adapter object 8031 * @handle: pcie device handle 8032 * 8033 * Creating end device object, stored in ioc->pcie_device_list. 8034 * 8035 * Return: 1 means queue the event later, 0 means complete the event 8036 */ 8037 static int 8038 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 8039 { 8040 Mpi26PCIeDevicePage0_t pcie_device_pg0; 8041 Mpi26PCIeDevicePage2_t pcie_device_pg2; 8042 Mpi2ConfigReply_t mpi_reply; 8043 struct _pcie_device *pcie_device; 8044 struct _enclosure_node *enclosure_dev; 8045 u32 ioc_status; 8046 u64 wwid; 8047 8048 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 8049 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) { 8050 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8051 __FILE__, __LINE__, __func__); 8052 return 0; 8053 } 8054 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8055 MPI2_IOCSTATUS_MASK; 8056 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8057 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8058 __FILE__, __LINE__, __func__); 8059 return 0; 8060 } 8061 8062 set_bit(handle, ioc->pend_os_device_add); 8063 wwid = le64_to_cpu(pcie_device_pg0.WWID); 8064 8065 /* check if device is present */ 8066 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 8067 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 8068 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", 8069 handle); 8070 return 0; 8071 } 8072 8073 /* check if there were any issues with discovery */ 8074 if (_scsih_check_pcie_access_status(ioc, wwid, handle, 8075 pcie_device_pg0.AccessStatus)) 8076 return 0; 8077 8078 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu 8079 (pcie_device_pg0.DeviceInfo)))) 8080 return 0; 8081 8082 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid); 8083 if (pcie_device) { 8084 clear_bit(handle, ioc->pend_os_device_add); 8085 pcie_device_put(pcie_device); 8086 return 0; 8087 } 8088 8089 /* PCIe Device Page 2 contains read-only information about a 8090 * specific NVMe device; therefore, this page is only 8091 * valid for NVMe devices and skip for pcie devices of type scsi. 8092 */ 8093 if (!(mpt3sas_scsih_is_pcie_scsi_device( 8094 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { 8095 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, 8096 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 8097 handle)) { 8098 ioc_err(ioc, 8099 "failure at %s:%d/%s()!\n", __FILE__, 8100 __LINE__, __func__); 8101 return 0; 8102 } 8103 8104 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8105 MPI2_IOCSTATUS_MASK; 8106 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8107 ioc_err(ioc, 8108 "failure at %s:%d/%s()!\n", __FILE__, 8109 __LINE__, __func__); 8110 return 0; 8111 } 8112 } 8113 8114 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); 8115 if (!pcie_device) { 8116 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8117 __FILE__, __LINE__, __func__); 8118 return 0; 8119 } 8120 8121 kref_init(&pcie_device->refcount); 8122 pcie_device->id = ioc->pcie_target_id++; 8123 pcie_device->channel = PCIE_CHANNEL; 8124 pcie_device->handle = handle; 8125 pcie_device->access_status = pcie_device_pg0.AccessStatus; 8126 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 8127 pcie_device->wwid = wwid; 8128 pcie_device->port_num = pcie_device_pg0.PortNum; 8129 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) & 8130 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 8131 8132 pcie_device->enclosure_handle = 8133 le16_to_cpu(pcie_device_pg0.EnclosureHandle); 8134 if (pcie_device->enclosure_handle != 0) 8135 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot); 8136 8137 if (le32_to_cpu(pcie_device_pg0.Flags) & 8138 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 8139 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel; 8140 memcpy(&pcie_device->connector_name[0], 8141 &pcie_device_pg0.ConnectorName[0], 4); 8142 } else { 8143 pcie_device->enclosure_level = 0; 8144 pcie_device->connector_name[0] = '\0'; 8145 } 8146 8147 /* get enclosure_logical_id */ 8148 if (pcie_device->enclosure_handle) { 8149 enclosure_dev = 8150 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8151 pcie_device->enclosure_handle); 8152 if (enclosure_dev) 8153 pcie_device->enclosure_logical_id = 8154 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 8155 } 8156 /* TODO -- Add device name once FW supports it */ 8157 if (!(mpt3sas_scsih_is_pcie_scsi_device( 8158 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { 8159 pcie_device->nvme_mdts = 8160 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); 8161 pcie_device->shutdown_latency = 8162 le16_to_cpu(pcie_device_pg2.ShutdownLatency); 8163 /* 8164 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency 8165 * if drive's RTD3 Entry Latency is greater then IOC's 8166 * max_shutdown_latency. 8167 */ 8168 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency) 8169 ioc->max_shutdown_latency = 8170 pcie_device->shutdown_latency; 8171 if (pcie_device_pg2.ControllerResetTO) 8172 pcie_device->reset_timeout = 8173 pcie_device_pg2.ControllerResetTO; 8174 else 8175 pcie_device->reset_timeout = 30; 8176 } else 8177 pcie_device->reset_timeout = 30; 8178 8179 if (ioc->wait_for_discovery_to_complete) 8180 _scsih_pcie_device_init_add(ioc, pcie_device); 8181 else 8182 _scsih_pcie_device_add(ioc, pcie_device); 8183 8184 pcie_device_put(pcie_device); 8185 return 0; 8186 } 8187 8188 /** 8189 * _scsih_pcie_topology_change_event_debug - debug for topology 8190 * event 8191 * @ioc: per adapter object 8192 * @event_data: event data payload 8193 * Context: user. 8194 */ 8195 static void 8196 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8197 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 8198 { 8199 int i; 8200 u16 handle; 8201 u16 reason_code; 8202 u8 port_number; 8203 char *status_str = NULL; 8204 u8 link_rate, prev_link_rate; 8205 8206 switch (event_data->SwitchStatus) { 8207 case MPI26_EVENT_PCIE_TOPO_SS_ADDED: 8208 status_str = "add"; 8209 break; 8210 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 8211 status_str = "remove"; 8212 break; 8213 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING: 8214 case 0: 8215 status_str = "responding"; 8216 break; 8217 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 8218 status_str = "remove delay"; 8219 break; 8220 default: 8221 status_str = "unknown status"; 8222 break; 8223 } 8224 ioc_info(ioc, "pcie topology change: (%s)\n", status_str); 8225 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)" 8226 "start_port(%02d), count(%d)\n", 8227 le16_to_cpu(event_data->SwitchDevHandle), 8228 le16_to_cpu(event_data->EnclosureHandle), 8229 event_data->StartPortNum, event_data->NumEntries); 8230 for (i = 0; i < event_data->NumEntries; i++) { 8231 handle = 8232 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 8233 if (!handle) 8234 continue; 8235 port_number = event_data->StartPortNum + i; 8236 reason_code = event_data->PortEntry[i].PortStatus; 8237 switch (reason_code) { 8238 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: 8239 status_str = "target add"; 8240 break; 8241 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 8242 status_str = "target remove"; 8243 break; 8244 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 8245 status_str = "delay target remove"; 8246 break; 8247 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 8248 status_str = "link rate change"; 8249 break; 8250 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE: 8251 status_str = "target responding"; 8252 break; 8253 default: 8254 status_str = "unknown"; 8255 break; 8256 } 8257 link_rate = event_data->PortEntry[i].CurrentPortInfo & 8258 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8259 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo & 8260 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8261 pr_info("\tport(%02d), attached_handle(0x%04x): %s:" 8262 " link rate: new(0x%02x), old(0x%02x)\n", port_number, 8263 handle, status_str, link_rate, prev_link_rate); 8264 } 8265 } 8266 8267 /** 8268 * _scsih_pcie_topology_change_event - handle PCIe topology 8269 * changes 8270 * @ioc: per adapter object 8271 * @fw_event: The fw_event_work object 8272 * Context: user. 8273 * 8274 */ 8275 static void 8276 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 8277 struct fw_event_work *fw_event) 8278 { 8279 int i; 8280 u16 handle; 8281 u16 reason_code; 8282 u8 link_rate, prev_link_rate; 8283 unsigned long flags; 8284 int rc; 8285 Mpi26EventDataPCIeTopologyChangeList_t *event_data = 8286 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data; 8287 struct _pcie_device *pcie_device; 8288 8289 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8290 _scsih_pcie_topology_change_event_debug(ioc, event_data); 8291 8292 if (ioc->shost_recovery || ioc->remove_host || 8293 ioc->pci_error_recovery) 8294 return; 8295 8296 if (fw_event->ignore) { 8297 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n")); 8298 return; 8299 } 8300 8301 /* handle siblings events */ 8302 for (i = 0; i < event_data->NumEntries; i++) { 8303 if (fw_event->ignore) { 8304 dewtprintk(ioc, 8305 ioc_info(ioc, "ignoring switch event\n")); 8306 return; 8307 } 8308 if (ioc->remove_host || ioc->pci_error_recovery) 8309 return; 8310 reason_code = event_data->PortEntry[i].PortStatus; 8311 handle = 8312 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 8313 if (!handle) 8314 continue; 8315 8316 link_rate = event_data->PortEntry[i].CurrentPortInfo 8317 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8318 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo 8319 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8320 8321 switch (reason_code) { 8322 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 8323 if (ioc->shost_recovery) 8324 break; 8325 if (link_rate == prev_link_rate) 8326 break; 8327 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 8328 break; 8329 8330 _scsih_pcie_check_device(ioc, handle); 8331 8332 /* This code after this point handles the test case 8333 * where a device has been added, however its returning 8334 * BUSY for sometime. Then before the Device Missing 8335 * Delay expires and the device becomes READY, the 8336 * device is removed and added back. 8337 */ 8338 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8339 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 8340 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8341 8342 if (pcie_device) { 8343 pcie_device_put(pcie_device); 8344 break; 8345 } 8346 8347 if (!test_bit(handle, ioc->pend_os_device_add)) 8348 break; 8349 8350 dewtprintk(ioc, 8351 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n", 8352 handle)); 8353 event_data->PortEntry[i].PortStatus &= 0xF0; 8354 event_data->PortEntry[i].PortStatus |= 8355 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; 8356 fallthrough; 8357 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: 8358 if (ioc->shost_recovery) 8359 break; 8360 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 8361 break; 8362 8363 rc = _scsih_pcie_add_device(ioc, handle); 8364 if (!rc) { 8365 /* mark entry vacant */ 8366 /* TODO This needs to be reviewed and fixed, 8367 * we dont have an entry 8368 * to make an event void like vacant 8369 */ 8370 event_data->PortEntry[i].PortStatus |= 8371 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE; 8372 } 8373 break; 8374 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 8375 _scsih_pcie_device_remove_by_handle(ioc, handle); 8376 break; 8377 } 8378 } 8379 } 8380 8381 /** 8382 * _scsih_pcie_device_status_change_event_debug - debug for device event 8383 * @ioc: ? 8384 * @event_data: event data payload 8385 * Context: user. 8386 */ 8387 static void 8388 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8389 Mpi26EventDataPCIeDeviceStatusChange_t *event_data) 8390 { 8391 char *reason_str = NULL; 8392 8393 switch (event_data->ReasonCode) { 8394 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA: 8395 reason_str = "smart data"; 8396 break; 8397 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED: 8398 reason_str = "unsupported device discovered"; 8399 break; 8400 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET: 8401 reason_str = "internal device reset"; 8402 break; 8403 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL: 8404 reason_str = "internal task abort"; 8405 break; 8406 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 8407 reason_str = "internal task abort set"; 8408 break; 8409 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 8410 reason_str = "internal clear task set"; 8411 break; 8412 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL: 8413 reason_str = "internal query task"; 8414 break; 8415 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE: 8416 reason_str = "device init failure"; 8417 break; 8418 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 8419 reason_str = "internal device reset complete"; 8420 break; 8421 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 8422 reason_str = "internal task abort complete"; 8423 break; 8424 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION: 8425 reason_str = "internal async notification"; 8426 break; 8427 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED: 8428 reason_str = "pcie hot reset failed"; 8429 break; 8430 default: 8431 reason_str = "unknown reason"; 8432 break; 8433 } 8434 8435 ioc_info(ioc, "PCIE device status change: (%s)\n" 8436 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)", 8437 reason_str, le16_to_cpu(event_data->DevHandle), 8438 (u64)le64_to_cpu(event_data->WWID), 8439 le16_to_cpu(event_data->TaskTag)); 8440 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA) 8441 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", 8442 event_data->ASC, event_data->ASCQ); 8443 pr_cont("\n"); 8444 } 8445 8446 /** 8447 * _scsih_pcie_device_status_change_event - handle device status 8448 * change 8449 * @ioc: per adapter object 8450 * @fw_event: The fw_event_work object 8451 * Context: user. 8452 */ 8453 static void 8454 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 8455 struct fw_event_work *fw_event) 8456 { 8457 struct MPT3SAS_TARGET *target_priv_data; 8458 struct _pcie_device *pcie_device; 8459 u64 wwid; 8460 unsigned long flags; 8461 Mpi26EventDataPCIeDeviceStatusChange_t *event_data = 8462 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data; 8463 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8464 _scsih_pcie_device_status_change_event_debug(ioc, 8465 event_data); 8466 8467 if (event_data->ReasonCode != 8468 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET && 8469 event_data->ReasonCode != 8470 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET) 8471 return; 8472 8473 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8474 wwid = le64_to_cpu(event_data->WWID); 8475 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 8476 8477 if (!pcie_device || !pcie_device->starget) 8478 goto out; 8479 8480 target_priv_data = pcie_device->starget->hostdata; 8481 if (!target_priv_data) 8482 goto out; 8483 8484 if (event_data->ReasonCode == 8485 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET) 8486 target_priv_data->tm_busy = 1; 8487 else 8488 target_priv_data->tm_busy = 0; 8489 out: 8490 if (pcie_device) 8491 pcie_device_put(pcie_device); 8492 8493 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8494 } 8495 8496 /** 8497 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure 8498 * event 8499 * @ioc: per adapter object 8500 * @event_data: event data payload 8501 * Context: user. 8502 */ 8503 static void 8504 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8505 Mpi2EventDataSasEnclDevStatusChange_t *event_data) 8506 { 8507 char *reason_str = NULL; 8508 8509 switch (event_data->ReasonCode) { 8510 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 8511 reason_str = "enclosure add"; 8512 break; 8513 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 8514 reason_str = "enclosure remove"; 8515 break; 8516 default: 8517 reason_str = "unknown reason"; 8518 break; 8519 } 8520 8521 ioc_info(ioc, "enclosure status change: (%s)\n" 8522 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n", 8523 reason_str, 8524 le16_to_cpu(event_data->EnclosureHandle), 8525 (u64)le64_to_cpu(event_data->EnclosureLogicalID), 8526 le16_to_cpu(event_data->StartSlot)); 8527 } 8528 8529 /** 8530 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events 8531 * @ioc: per adapter object 8532 * @fw_event: The fw_event_work object 8533 * Context: user. 8534 */ 8535 static void 8536 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, 8537 struct fw_event_work *fw_event) 8538 { 8539 Mpi2ConfigReply_t mpi_reply; 8540 struct _enclosure_node *enclosure_dev = NULL; 8541 Mpi2EventDataSasEnclDevStatusChange_t *event_data = 8542 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data; 8543 int rc; 8544 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle); 8545 8546 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8547 _scsih_sas_enclosure_dev_status_change_event_debug(ioc, 8548 (Mpi2EventDataSasEnclDevStatusChange_t *) 8549 fw_event->event_data); 8550 if (ioc->shost_recovery) 8551 return; 8552 8553 if (enclosure_handle) 8554 enclosure_dev = 8555 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8556 enclosure_handle); 8557 switch (event_data->ReasonCode) { 8558 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 8559 if (!enclosure_dev) { 8560 enclosure_dev = 8561 kzalloc(sizeof(struct _enclosure_node), 8562 GFP_KERNEL); 8563 if (!enclosure_dev) { 8564 ioc_info(ioc, "failure at %s:%d/%s()!\n", 8565 __FILE__, __LINE__, __func__); 8566 return; 8567 } 8568 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 8569 &enclosure_dev->pg0, 8570 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 8571 enclosure_handle); 8572 8573 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & 8574 MPI2_IOCSTATUS_MASK)) { 8575 kfree(enclosure_dev); 8576 return; 8577 } 8578 8579 list_add_tail(&enclosure_dev->list, 8580 &ioc->enclosure_list); 8581 } 8582 break; 8583 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 8584 if (enclosure_dev) { 8585 list_del(&enclosure_dev->list); 8586 kfree(enclosure_dev); 8587 } 8588 break; 8589 default: 8590 break; 8591 } 8592 } 8593 8594 /** 8595 * _scsih_sas_broadcast_primitive_event - handle broadcast events 8596 * @ioc: per adapter object 8597 * @fw_event: The fw_event_work object 8598 * Context: user. 8599 */ 8600 static void 8601 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, 8602 struct fw_event_work *fw_event) 8603 { 8604 struct scsi_cmnd *scmd; 8605 struct scsi_device *sdev; 8606 struct scsiio_tracker *st; 8607 u16 smid, handle; 8608 u32 lun; 8609 struct MPT3SAS_DEVICE *sas_device_priv_data; 8610 u32 termination_count; 8611 u32 query_count; 8612 Mpi2SCSITaskManagementReply_t *mpi_reply; 8613 Mpi2EventDataSasBroadcastPrimitive_t *event_data = 8614 (Mpi2EventDataSasBroadcastPrimitive_t *) 8615 fw_event->event_data; 8616 u16 ioc_status; 8617 unsigned long flags; 8618 int r; 8619 u8 max_retries = 0; 8620 u8 task_abort_retries; 8621 8622 mutex_lock(&ioc->tm_cmds.mutex); 8623 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n", 8624 __func__, event_data->PhyNum, event_data->PortWidth); 8625 8626 _scsih_block_io_all_device(ioc); 8627 8628 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8629 mpi_reply = ioc->tm_cmds.reply; 8630 broadcast_aen_retry: 8631 8632 /* sanity checks for retrying this loop */ 8633 if (max_retries++ == 5) { 8634 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__)); 8635 goto out; 8636 } else if (max_retries > 1) 8637 dewtprintk(ioc, 8638 ioc_info(ioc, "%s: %d retry\n", 8639 __func__, max_retries - 1)); 8640 8641 termination_count = 0; 8642 query_count = 0; 8643 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 8644 if (ioc->shost_recovery) 8645 goto out; 8646 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 8647 if (!scmd) 8648 continue; 8649 st = scsi_cmd_priv(scmd); 8650 sdev = scmd->device; 8651 sas_device_priv_data = sdev->hostdata; 8652 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) 8653 continue; 8654 /* skip hidden raid components */ 8655 if (sas_device_priv_data->sas_target->flags & 8656 MPT_TARGET_FLAGS_RAID_COMPONENT) 8657 continue; 8658 /* skip volumes */ 8659 if (sas_device_priv_data->sas_target->flags & 8660 MPT_TARGET_FLAGS_VOLUME) 8661 continue; 8662 /* skip PCIe devices */ 8663 if (sas_device_priv_data->sas_target->flags & 8664 MPT_TARGET_FLAGS_PCIE_DEVICE) 8665 continue; 8666 8667 handle = sas_device_priv_data->sas_target->handle; 8668 lun = sas_device_priv_data->lun; 8669 query_count++; 8670 8671 if (ioc->shost_recovery) 8672 goto out; 8673 8674 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 8675 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 8676 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid, 8677 st->msix_io, 30, 0); 8678 if (r == FAILED) { 8679 sdev_printk(KERN_WARNING, sdev, 8680 "mpt3sas_scsih_issue_tm: FAILED when sending " 8681 "QUERY_TASK: scmd(%p)\n", scmd); 8682 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8683 goto broadcast_aen_retry; 8684 } 8685 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) 8686 & MPI2_IOCSTATUS_MASK; 8687 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8688 sdev_printk(KERN_WARNING, sdev, 8689 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", 8690 ioc_status, scmd); 8691 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8692 goto broadcast_aen_retry; 8693 } 8694 8695 /* see if IO is still owned by IOC and target */ 8696 if (mpi_reply->ResponseCode == 8697 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || 8698 mpi_reply->ResponseCode == 8699 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { 8700 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8701 continue; 8702 } 8703 task_abort_retries = 0; 8704 tm_retry: 8705 if (task_abort_retries++ == 60) { 8706 dewtprintk(ioc, 8707 ioc_info(ioc, "%s: ABORT_TASK: giving up\n", 8708 __func__)); 8709 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8710 goto broadcast_aen_retry; 8711 } 8712 8713 if (ioc->shost_recovery) 8714 goto out_no_lock; 8715 8716 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, 8717 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 8718 st->smid, st->msix_io, 30, 0); 8719 if (r == FAILED || st->cb_idx != 0xFF) { 8720 sdev_printk(KERN_WARNING, sdev, 8721 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " 8722 "scmd(%p)\n", scmd); 8723 goto tm_retry; 8724 } 8725 8726 if (task_abort_retries > 1) 8727 sdev_printk(KERN_WARNING, sdev, 8728 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" 8729 " scmd(%p)\n", 8730 task_abort_retries - 1, scmd); 8731 8732 termination_count += le32_to_cpu(mpi_reply->TerminationCount); 8733 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8734 } 8735 8736 if (ioc->broadcast_aen_pending) { 8737 dewtprintk(ioc, 8738 ioc_info(ioc, 8739 "%s: loop back due to pending AEN\n", 8740 __func__)); 8741 ioc->broadcast_aen_pending = 0; 8742 goto broadcast_aen_retry; 8743 } 8744 8745 out: 8746 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 8747 out_no_lock: 8748 8749 dewtprintk(ioc, 8750 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n", 8751 __func__, query_count, termination_count)); 8752 8753 ioc->broadcast_aen_busy = 0; 8754 if (!ioc->shost_recovery) 8755 _scsih_ublock_io_all_device(ioc); 8756 mutex_unlock(&ioc->tm_cmds.mutex); 8757 } 8758 8759 /** 8760 * _scsih_sas_discovery_event - handle discovery events 8761 * @ioc: per adapter object 8762 * @fw_event: The fw_event_work object 8763 * Context: user. 8764 */ 8765 static void 8766 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, 8767 struct fw_event_work *fw_event) 8768 { 8769 Mpi2EventDataSasDiscovery_t *event_data = 8770 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; 8771 8772 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { 8773 ioc_info(ioc, "discovery event: (%s)", 8774 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? 8775 "start" : "stop"); 8776 if (event_data->DiscoveryStatus) 8777 pr_cont("discovery_status(0x%08x)", 8778 le32_to_cpu(event_data->DiscoveryStatus)); 8779 pr_cont("\n"); 8780 } 8781 8782 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && 8783 !ioc->sas_hba.num_phys) { 8784 if (disable_discovery > 0 && ioc->shost_recovery) { 8785 /* Wait for the reset to complete */ 8786 while (ioc->shost_recovery) 8787 ssleep(1); 8788 } 8789 _scsih_sas_host_add(ioc); 8790 } 8791 } 8792 8793 /** 8794 * _scsih_sas_device_discovery_error_event - display SAS device discovery error 8795 * events 8796 * @ioc: per adapter object 8797 * @fw_event: The fw_event_work object 8798 * Context: user. 8799 */ 8800 static void 8801 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, 8802 struct fw_event_work *fw_event) 8803 { 8804 Mpi25EventDataSasDeviceDiscoveryError_t *event_data = 8805 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data; 8806 8807 switch (event_data->ReasonCode) { 8808 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED: 8809 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n", 8810 le16_to_cpu(event_data->DevHandle), 8811 (u64)le64_to_cpu(event_data->SASAddress), 8812 event_data->PhysicalPort); 8813 break; 8814 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: 8815 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n", 8816 le16_to_cpu(event_data->DevHandle), 8817 (u64)le64_to_cpu(event_data->SASAddress), 8818 event_data->PhysicalPort); 8819 break; 8820 default: 8821 break; 8822 } 8823 } 8824 8825 /** 8826 * _scsih_pcie_enumeration_event - handle enumeration events 8827 * @ioc: per adapter object 8828 * @fw_event: The fw_event_work object 8829 * Context: user. 8830 */ 8831 static void 8832 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, 8833 struct fw_event_work *fw_event) 8834 { 8835 Mpi26EventDataPCIeEnumeration_t *event_data = 8836 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data; 8837 8838 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)) 8839 return; 8840 8841 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x", 8842 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? 8843 "started" : "completed", 8844 event_data->Flags); 8845 if (event_data->EnumerationStatus) 8846 pr_cont("enumeration_status(0x%08x)", 8847 le32_to_cpu(event_data->EnumerationStatus)); 8848 pr_cont("\n"); 8849 } 8850 8851 /** 8852 * _scsih_ir_fastpath - turn on fastpath for IR physdisk 8853 * @ioc: per adapter object 8854 * @handle: device handle for physical disk 8855 * @phys_disk_num: physical disk number 8856 * 8857 * Return: 0 for success, else failure. 8858 */ 8859 static int 8860 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) 8861 { 8862 Mpi2RaidActionRequest_t *mpi_request; 8863 Mpi2RaidActionReply_t *mpi_reply; 8864 u16 smid; 8865 u8 issue_reset = 0; 8866 int rc = 0; 8867 u16 ioc_status; 8868 u32 log_info; 8869 8870 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) 8871 return rc; 8872 8873 mutex_lock(&ioc->scsih_cmds.mutex); 8874 8875 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 8876 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 8877 rc = -EAGAIN; 8878 goto out; 8879 } 8880 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 8881 8882 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 8883 if (!smid) { 8884 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 8885 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 8886 rc = -EAGAIN; 8887 goto out; 8888 } 8889 8890 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 8891 ioc->scsih_cmds.smid = smid; 8892 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); 8893 8894 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 8895 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; 8896 mpi_request->PhysDiskNum = phys_disk_num; 8897 8898 dewtprintk(ioc, 8899 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", 8900 handle, phys_disk_num)); 8901 8902 init_completion(&ioc->scsih_cmds.done); 8903 ioc->put_smid_default(ioc, smid); 8904 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 8905 8906 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 8907 mpt3sas_check_cmd_timeout(ioc, 8908 ioc->scsih_cmds.status, mpi_request, 8909 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset); 8910 rc = -EFAULT; 8911 goto out; 8912 } 8913 8914 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 8915 8916 mpi_reply = ioc->scsih_cmds.reply; 8917 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 8918 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 8919 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 8920 else 8921 log_info = 0; 8922 ioc_status &= MPI2_IOCSTATUS_MASK; 8923 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8924 dewtprintk(ioc, 8925 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", 8926 ioc_status, log_info)); 8927 rc = -EFAULT; 8928 } else 8929 dewtprintk(ioc, 8930 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n")); 8931 } 8932 8933 out: 8934 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 8935 mutex_unlock(&ioc->scsih_cmds.mutex); 8936 8937 if (issue_reset) 8938 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 8939 return rc; 8940 } 8941 8942 /** 8943 * _scsih_reprobe_lun - reprobing lun 8944 * @sdev: scsi device struct 8945 * @no_uld_attach: sdev->no_uld_attach flag setting 8946 * 8947 **/ 8948 static void 8949 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) 8950 { 8951 sdev->no_uld_attach = no_uld_attach ? 1 : 0; 8952 sdev_printk(KERN_INFO, sdev, "%s raid component\n", 8953 sdev->no_uld_attach ? "hiding" : "exposing"); 8954 WARN_ON(scsi_device_reprobe(sdev)); 8955 } 8956 8957 /** 8958 * _scsih_sas_volume_add - add new volume 8959 * @ioc: per adapter object 8960 * @element: IR config element data 8961 * Context: user. 8962 */ 8963 static void 8964 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, 8965 Mpi2EventIrConfigElement_t *element) 8966 { 8967 struct _raid_device *raid_device; 8968 unsigned long flags; 8969 u64 wwid; 8970 u16 handle = le16_to_cpu(element->VolDevHandle); 8971 int rc; 8972 8973 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 8974 if (!wwid) { 8975 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8976 __FILE__, __LINE__, __func__); 8977 return; 8978 } 8979 8980 spin_lock_irqsave(&ioc->raid_device_lock, flags); 8981 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); 8982 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 8983 8984 if (raid_device) 8985 return; 8986 8987 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 8988 if (!raid_device) { 8989 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8990 __FILE__, __LINE__, __func__); 8991 return; 8992 } 8993 8994 raid_device->id = ioc->sas_id++; 8995 raid_device->channel = RAID_CHANNEL; 8996 raid_device->handle = handle; 8997 raid_device->wwid = wwid; 8998 _scsih_raid_device_add(ioc, raid_device); 8999 if (!ioc->wait_for_discovery_to_complete) { 9000 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 9001 raid_device->id, 0); 9002 if (rc) 9003 _scsih_raid_device_remove(ioc, raid_device); 9004 } else { 9005 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9006 _scsih_determine_boot_device(ioc, raid_device, 1); 9007 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9008 } 9009 } 9010 9011 /** 9012 * _scsih_sas_volume_delete - delete volume 9013 * @ioc: per adapter object 9014 * @handle: volume device handle 9015 * Context: user. 9016 */ 9017 static void 9018 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) 9019 { 9020 struct _raid_device *raid_device; 9021 unsigned long flags; 9022 struct MPT3SAS_TARGET *sas_target_priv_data; 9023 struct scsi_target *starget = NULL; 9024 9025 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9026 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9027 if (raid_device) { 9028 if (raid_device->starget) { 9029 starget = raid_device->starget; 9030 sas_target_priv_data = starget->hostdata; 9031 sas_target_priv_data->deleted = 1; 9032 } 9033 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 9034 raid_device->handle, (u64)raid_device->wwid); 9035 list_del(&raid_device->list); 9036 kfree(raid_device); 9037 } 9038 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9039 if (starget) 9040 scsi_remove_target(&starget->dev); 9041 } 9042 9043 /** 9044 * _scsih_sas_pd_expose - expose pd component to /dev/sdX 9045 * @ioc: per adapter object 9046 * @element: IR config element data 9047 * Context: user. 9048 */ 9049 static void 9050 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, 9051 Mpi2EventIrConfigElement_t *element) 9052 { 9053 struct _sas_device *sas_device; 9054 struct scsi_target *starget = NULL; 9055 struct MPT3SAS_TARGET *sas_target_priv_data; 9056 unsigned long flags; 9057 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9058 9059 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9060 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 9061 if (sas_device) { 9062 sas_device->volume_handle = 0; 9063 sas_device->volume_wwid = 0; 9064 clear_bit(handle, ioc->pd_handles); 9065 if (sas_device->starget && sas_device->starget->hostdata) { 9066 starget = sas_device->starget; 9067 sas_target_priv_data = starget->hostdata; 9068 sas_target_priv_data->flags &= 9069 ~MPT_TARGET_FLAGS_RAID_COMPONENT; 9070 } 9071 } 9072 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9073 if (!sas_device) 9074 return; 9075 9076 /* exposing raid component */ 9077 if (starget) 9078 starget_for_each_device(starget, NULL, _scsih_reprobe_lun); 9079 9080 sas_device_put(sas_device); 9081 } 9082 9083 /** 9084 * _scsih_sas_pd_hide - hide pd component from /dev/sdX 9085 * @ioc: per adapter object 9086 * @element: IR config element data 9087 * Context: user. 9088 */ 9089 static void 9090 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, 9091 Mpi2EventIrConfigElement_t *element) 9092 { 9093 struct _sas_device *sas_device; 9094 struct scsi_target *starget = NULL; 9095 struct MPT3SAS_TARGET *sas_target_priv_data; 9096 unsigned long flags; 9097 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9098 u16 volume_handle = 0; 9099 u64 volume_wwid = 0; 9100 9101 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle); 9102 if (volume_handle) 9103 mpt3sas_config_get_volume_wwid(ioc, volume_handle, 9104 &volume_wwid); 9105 9106 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9107 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 9108 if (sas_device) { 9109 set_bit(handle, ioc->pd_handles); 9110 if (sas_device->starget && sas_device->starget->hostdata) { 9111 starget = sas_device->starget; 9112 sas_target_priv_data = starget->hostdata; 9113 sas_target_priv_data->flags |= 9114 MPT_TARGET_FLAGS_RAID_COMPONENT; 9115 sas_device->volume_handle = volume_handle; 9116 sas_device->volume_wwid = volume_wwid; 9117 } 9118 } 9119 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9120 if (!sas_device) 9121 return; 9122 9123 /* hiding raid component */ 9124 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9125 9126 if (starget) 9127 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); 9128 9129 sas_device_put(sas_device); 9130 } 9131 9132 /** 9133 * _scsih_sas_pd_delete - delete pd component 9134 * @ioc: per adapter object 9135 * @element: IR config element data 9136 * Context: user. 9137 */ 9138 static void 9139 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, 9140 Mpi2EventIrConfigElement_t *element) 9141 { 9142 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9143 9144 _scsih_device_remove_by_handle(ioc, handle); 9145 } 9146 9147 /** 9148 * _scsih_sas_pd_add - remove pd component 9149 * @ioc: per adapter object 9150 * @element: IR config element data 9151 * Context: user. 9152 */ 9153 static void 9154 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, 9155 Mpi2EventIrConfigElement_t *element) 9156 { 9157 struct _sas_device *sas_device; 9158 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9159 Mpi2ConfigReply_t mpi_reply; 9160 Mpi2SasDevicePage0_t sas_device_pg0; 9161 u32 ioc_status; 9162 u64 sas_address; 9163 u16 parent_handle; 9164 9165 set_bit(handle, ioc->pd_handles); 9166 9167 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 9168 if (sas_device) { 9169 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9170 sas_device_put(sas_device); 9171 return; 9172 } 9173 9174 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 9175 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 9176 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9177 __FILE__, __LINE__, __func__); 9178 return; 9179 } 9180 9181 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9182 MPI2_IOCSTATUS_MASK; 9183 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9184 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9185 __FILE__, __LINE__, __func__); 9186 return; 9187 } 9188 9189 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 9190 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) 9191 mpt3sas_transport_update_links(ioc, sas_address, handle, 9192 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 9193 mpt3sas_get_port_by_id(ioc, 9194 sas_device_pg0.PhysicalPort, 0)); 9195 9196 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9197 _scsih_add_device(ioc, handle, 0, 1); 9198 } 9199 9200 /** 9201 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events 9202 * @ioc: per adapter object 9203 * @event_data: event data payload 9204 * Context: user. 9205 */ 9206 static void 9207 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 9208 Mpi2EventDataIrConfigChangeList_t *event_data) 9209 { 9210 Mpi2EventIrConfigElement_t *element; 9211 u8 element_type; 9212 int i; 9213 char *reason_str = NULL, *element_str = NULL; 9214 9215 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 9216 9217 ioc_info(ioc, "raid config change: (%s), elements(%d)\n", 9218 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ? 9219 "foreign" : "native", 9220 event_data->NumElements); 9221 for (i = 0; i < event_data->NumElements; i++, element++) { 9222 switch (element->ReasonCode) { 9223 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 9224 reason_str = "add"; 9225 break; 9226 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 9227 reason_str = "remove"; 9228 break; 9229 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: 9230 reason_str = "no change"; 9231 break; 9232 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 9233 reason_str = "hide"; 9234 break; 9235 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 9236 reason_str = "unhide"; 9237 break; 9238 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 9239 reason_str = "volume_created"; 9240 break; 9241 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 9242 reason_str = "volume_deleted"; 9243 break; 9244 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 9245 reason_str = "pd_created"; 9246 break; 9247 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 9248 reason_str = "pd_deleted"; 9249 break; 9250 default: 9251 reason_str = "unknown reason"; 9252 break; 9253 } 9254 element_type = le16_to_cpu(element->ElementFlags) & 9255 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; 9256 switch (element_type) { 9257 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: 9258 element_str = "volume"; 9259 break; 9260 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: 9261 element_str = "phys disk"; 9262 break; 9263 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: 9264 element_str = "hot spare"; 9265 break; 9266 default: 9267 element_str = "unknown element"; 9268 break; 9269 } 9270 pr_info("\t(%s:%s), vol handle(0x%04x), " \ 9271 "pd handle(0x%04x), pd num(0x%02x)\n", element_str, 9272 reason_str, le16_to_cpu(element->VolDevHandle), 9273 le16_to_cpu(element->PhysDiskDevHandle), 9274 element->PhysDiskNum); 9275 } 9276 } 9277 9278 /** 9279 * _scsih_sas_ir_config_change_event - handle ir configuration change events 9280 * @ioc: per adapter object 9281 * @fw_event: The fw_event_work object 9282 * Context: user. 9283 */ 9284 static void 9285 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, 9286 struct fw_event_work *fw_event) 9287 { 9288 Mpi2EventIrConfigElement_t *element; 9289 int i; 9290 u8 foreign_config; 9291 Mpi2EventDataIrConfigChangeList_t *event_data = 9292 (Mpi2EventDataIrConfigChangeList_t *) 9293 fw_event->event_data; 9294 9295 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && 9296 (!ioc->hide_ir_msg)) 9297 _scsih_sas_ir_config_change_event_debug(ioc, event_data); 9298 9299 foreign_config = (le32_to_cpu(event_data->Flags) & 9300 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; 9301 9302 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 9303 if (ioc->shost_recovery && 9304 ioc->hba_mpi_version_belonged != MPI2_VERSION) { 9305 for (i = 0; i < event_data->NumElements; i++, element++) { 9306 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) 9307 _scsih_ir_fastpath(ioc, 9308 le16_to_cpu(element->PhysDiskDevHandle), 9309 element->PhysDiskNum); 9310 } 9311 return; 9312 } 9313 9314 for (i = 0; i < event_data->NumElements; i++, element++) { 9315 9316 switch (element->ReasonCode) { 9317 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 9318 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 9319 if (!foreign_config) 9320 _scsih_sas_volume_add(ioc, element); 9321 break; 9322 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 9323 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 9324 if (!foreign_config) 9325 _scsih_sas_volume_delete(ioc, 9326 le16_to_cpu(element->VolDevHandle)); 9327 break; 9328 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 9329 if (!ioc->is_warpdrive) 9330 _scsih_sas_pd_hide(ioc, element); 9331 break; 9332 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 9333 if (!ioc->is_warpdrive) 9334 _scsih_sas_pd_expose(ioc, element); 9335 break; 9336 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 9337 if (!ioc->is_warpdrive) 9338 _scsih_sas_pd_add(ioc, element); 9339 break; 9340 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 9341 if (!ioc->is_warpdrive) 9342 _scsih_sas_pd_delete(ioc, element); 9343 break; 9344 } 9345 } 9346 } 9347 9348 /** 9349 * _scsih_sas_ir_volume_event - IR volume event 9350 * @ioc: per adapter object 9351 * @fw_event: The fw_event_work object 9352 * Context: user. 9353 */ 9354 static void 9355 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, 9356 struct fw_event_work *fw_event) 9357 { 9358 u64 wwid; 9359 unsigned long flags; 9360 struct _raid_device *raid_device; 9361 u16 handle; 9362 u32 state; 9363 int rc; 9364 Mpi2EventDataIrVolume_t *event_data = 9365 (Mpi2EventDataIrVolume_t *) fw_event->event_data; 9366 9367 if (ioc->shost_recovery) 9368 return; 9369 9370 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 9371 return; 9372 9373 handle = le16_to_cpu(event_data->VolDevHandle); 9374 state = le32_to_cpu(event_data->NewValue); 9375 if (!ioc->hide_ir_msg) 9376 dewtprintk(ioc, 9377 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 9378 __func__, handle, 9379 le32_to_cpu(event_data->PreviousValue), 9380 state)); 9381 switch (state) { 9382 case MPI2_RAID_VOL_STATE_MISSING: 9383 case MPI2_RAID_VOL_STATE_FAILED: 9384 _scsih_sas_volume_delete(ioc, handle); 9385 break; 9386 9387 case MPI2_RAID_VOL_STATE_ONLINE: 9388 case MPI2_RAID_VOL_STATE_DEGRADED: 9389 case MPI2_RAID_VOL_STATE_OPTIMAL: 9390 9391 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9392 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9393 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9394 9395 if (raid_device) 9396 break; 9397 9398 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 9399 if (!wwid) { 9400 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9401 __FILE__, __LINE__, __func__); 9402 break; 9403 } 9404 9405 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 9406 if (!raid_device) { 9407 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9408 __FILE__, __LINE__, __func__); 9409 break; 9410 } 9411 9412 raid_device->id = ioc->sas_id++; 9413 raid_device->channel = RAID_CHANNEL; 9414 raid_device->handle = handle; 9415 raid_device->wwid = wwid; 9416 _scsih_raid_device_add(ioc, raid_device); 9417 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 9418 raid_device->id, 0); 9419 if (rc) 9420 _scsih_raid_device_remove(ioc, raid_device); 9421 break; 9422 9423 case MPI2_RAID_VOL_STATE_INITIALIZING: 9424 default: 9425 break; 9426 } 9427 } 9428 9429 /** 9430 * _scsih_sas_ir_physical_disk_event - PD event 9431 * @ioc: per adapter object 9432 * @fw_event: The fw_event_work object 9433 * Context: user. 9434 */ 9435 static void 9436 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, 9437 struct fw_event_work *fw_event) 9438 { 9439 u16 handle, parent_handle; 9440 u32 state; 9441 struct _sas_device *sas_device; 9442 Mpi2ConfigReply_t mpi_reply; 9443 Mpi2SasDevicePage0_t sas_device_pg0; 9444 u32 ioc_status; 9445 Mpi2EventDataIrPhysicalDisk_t *event_data = 9446 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data; 9447 u64 sas_address; 9448 9449 if (ioc->shost_recovery) 9450 return; 9451 9452 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) 9453 return; 9454 9455 handle = le16_to_cpu(event_data->PhysDiskDevHandle); 9456 state = le32_to_cpu(event_data->NewValue); 9457 9458 if (!ioc->hide_ir_msg) 9459 dewtprintk(ioc, 9460 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 9461 __func__, handle, 9462 le32_to_cpu(event_data->PreviousValue), 9463 state)); 9464 9465 switch (state) { 9466 case MPI2_RAID_PD_STATE_ONLINE: 9467 case MPI2_RAID_PD_STATE_DEGRADED: 9468 case MPI2_RAID_PD_STATE_REBUILDING: 9469 case MPI2_RAID_PD_STATE_OPTIMAL: 9470 case MPI2_RAID_PD_STATE_HOT_SPARE: 9471 9472 if (!ioc->is_warpdrive) 9473 set_bit(handle, ioc->pd_handles); 9474 9475 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 9476 if (sas_device) { 9477 sas_device_put(sas_device); 9478 return; 9479 } 9480 9481 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 9482 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 9483 handle))) { 9484 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9485 __FILE__, __LINE__, __func__); 9486 return; 9487 } 9488 9489 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9490 MPI2_IOCSTATUS_MASK; 9491 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9492 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9493 __FILE__, __LINE__, __func__); 9494 return; 9495 } 9496 9497 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 9498 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) 9499 mpt3sas_transport_update_links(ioc, sas_address, handle, 9500 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 9501 mpt3sas_get_port_by_id(ioc, 9502 sas_device_pg0.PhysicalPort, 0)); 9503 9504 _scsih_add_device(ioc, handle, 0, 1); 9505 9506 break; 9507 9508 case MPI2_RAID_PD_STATE_OFFLINE: 9509 case MPI2_RAID_PD_STATE_NOT_CONFIGURED: 9510 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: 9511 default: 9512 break; 9513 } 9514 } 9515 9516 /** 9517 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event 9518 * @ioc: per adapter object 9519 * @event_data: event data payload 9520 * Context: user. 9521 */ 9522 static void 9523 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, 9524 Mpi2EventDataIrOperationStatus_t *event_data) 9525 { 9526 char *reason_str = NULL; 9527 9528 switch (event_data->RAIDOperation) { 9529 case MPI2_EVENT_IR_RAIDOP_RESYNC: 9530 reason_str = "resync"; 9531 break; 9532 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: 9533 reason_str = "online capacity expansion"; 9534 break; 9535 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: 9536 reason_str = "consistency check"; 9537 break; 9538 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: 9539 reason_str = "background init"; 9540 break; 9541 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: 9542 reason_str = "make data consistent"; 9543 break; 9544 } 9545 9546 if (!reason_str) 9547 return; 9548 9549 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", 9550 reason_str, 9551 le16_to_cpu(event_data->VolDevHandle), 9552 event_data->PercentComplete); 9553 } 9554 9555 /** 9556 * _scsih_sas_ir_operation_status_event - handle RAID operation events 9557 * @ioc: per adapter object 9558 * @fw_event: The fw_event_work object 9559 * Context: user. 9560 */ 9561 static void 9562 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, 9563 struct fw_event_work *fw_event) 9564 { 9565 Mpi2EventDataIrOperationStatus_t *event_data = 9566 (Mpi2EventDataIrOperationStatus_t *) 9567 fw_event->event_data; 9568 static struct _raid_device *raid_device; 9569 unsigned long flags; 9570 u16 handle; 9571 9572 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && 9573 (!ioc->hide_ir_msg)) 9574 _scsih_sas_ir_operation_status_event_debug(ioc, 9575 event_data); 9576 9577 /* code added for raid transport support */ 9578 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { 9579 9580 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9581 handle = le16_to_cpu(event_data->VolDevHandle); 9582 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9583 if (raid_device) 9584 raid_device->percent_complete = 9585 event_data->PercentComplete; 9586 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9587 } 9588 } 9589 9590 /** 9591 * _scsih_prep_device_scan - initialize parameters prior to device scan 9592 * @ioc: per adapter object 9593 * 9594 * Set the deleted flag prior to device scan. If the device is found during 9595 * the scan, then we clear the deleted flag. 9596 */ 9597 static void 9598 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) 9599 { 9600 struct MPT3SAS_DEVICE *sas_device_priv_data; 9601 struct scsi_device *sdev; 9602 9603 shost_for_each_device(sdev, ioc->shost) { 9604 sas_device_priv_data = sdev->hostdata; 9605 if (sas_device_priv_data && sas_device_priv_data->sas_target) 9606 sas_device_priv_data->sas_target->deleted = 1; 9607 } 9608 } 9609 9610 /** 9611 * _scsih_mark_responding_sas_device - mark a sas_devices as responding 9612 * @ioc: per adapter object 9613 * @sas_device_pg0: SAS Device page 0 9614 * 9615 * After host reset, find out whether devices are still responding. 9616 * Used in _scsih_remove_unresponsive_sas_devices. 9617 */ 9618 static void 9619 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, 9620 Mpi2SasDevicePage0_t *sas_device_pg0) 9621 { 9622 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9623 struct scsi_target *starget; 9624 struct _sas_device *sas_device = NULL; 9625 struct _enclosure_node *enclosure_dev = NULL; 9626 unsigned long flags; 9627 struct hba_port *port = mpt3sas_get_port_by_id( 9628 ioc, sas_device_pg0->PhysicalPort, 0); 9629 9630 if (sas_device_pg0->EnclosureHandle) { 9631 enclosure_dev = 9632 mpt3sas_scsih_enclosure_find_by_handle(ioc, 9633 le16_to_cpu(sas_device_pg0->EnclosureHandle)); 9634 if (enclosure_dev == NULL) 9635 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 9636 sas_device_pg0->EnclosureHandle); 9637 } 9638 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9639 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 9640 if (sas_device->sas_address != le64_to_cpu( 9641 sas_device_pg0->SASAddress)) 9642 continue; 9643 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot)) 9644 continue; 9645 if (sas_device->port != port) 9646 continue; 9647 sas_device->responding = 1; 9648 starget = sas_device->starget; 9649 if (starget && starget->hostdata) { 9650 sas_target_priv_data = starget->hostdata; 9651 sas_target_priv_data->tm_busy = 0; 9652 sas_target_priv_data->deleted = 0; 9653 } else 9654 sas_target_priv_data = NULL; 9655 if (starget) { 9656 starget_printk(KERN_INFO, starget, 9657 "handle(0x%04x), sas_addr(0x%016llx)\n", 9658 le16_to_cpu(sas_device_pg0->DevHandle), 9659 (unsigned long long) 9660 sas_device->sas_address); 9661 9662 if (sas_device->enclosure_handle != 0) 9663 starget_printk(KERN_INFO, starget, 9664 "enclosure logical id(0x%016llx), slot(%d)\n", 9665 (unsigned long long) 9666 sas_device->enclosure_logical_id, 9667 sas_device->slot); 9668 } 9669 if (le16_to_cpu(sas_device_pg0->Flags) & 9670 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 9671 sas_device->enclosure_level = 9672 sas_device_pg0->EnclosureLevel; 9673 memcpy(&sas_device->connector_name[0], 9674 &sas_device_pg0->ConnectorName[0], 4); 9675 } else { 9676 sas_device->enclosure_level = 0; 9677 sas_device->connector_name[0] = '\0'; 9678 } 9679 9680 sas_device->enclosure_handle = 9681 le16_to_cpu(sas_device_pg0->EnclosureHandle); 9682 sas_device->is_chassis_slot_valid = 0; 9683 if (enclosure_dev) { 9684 sas_device->enclosure_logical_id = le64_to_cpu( 9685 enclosure_dev->pg0.EnclosureLogicalID); 9686 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 9687 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 9688 sas_device->is_chassis_slot_valid = 1; 9689 sas_device->chassis_slot = 9690 enclosure_dev->pg0.ChassisSlot; 9691 } 9692 } 9693 9694 if (sas_device->handle == le16_to_cpu( 9695 sas_device_pg0->DevHandle)) 9696 goto out; 9697 pr_info("\thandle changed from(0x%04x)!!!\n", 9698 sas_device->handle); 9699 sas_device->handle = le16_to_cpu( 9700 sas_device_pg0->DevHandle); 9701 if (sas_target_priv_data) 9702 sas_target_priv_data->handle = 9703 le16_to_cpu(sas_device_pg0->DevHandle); 9704 goto out; 9705 } 9706 out: 9707 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9708 } 9709 9710 /** 9711 * _scsih_create_enclosure_list_after_reset - Free Existing list, 9712 * And create enclosure list by scanning all Enclosure Page(0)s 9713 * @ioc: per adapter object 9714 */ 9715 static void 9716 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) 9717 { 9718 struct _enclosure_node *enclosure_dev; 9719 Mpi2ConfigReply_t mpi_reply; 9720 u16 enclosure_handle; 9721 int rc; 9722 9723 /* Free existing enclosure list */ 9724 mpt3sas_free_enclosure_list(ioc); 9725 9726 /* Re constructing enclosure list after reset*/ 9727 enclosure_handle = 0xFFFF; 9728 do { 9729 enclosure_dev = 9730 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL); 9731 if (!enclosure_dev) { 9732 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9733 __FILE__, __LINE__, __func__); 9734 return; 9735 } 9736 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 9737 &enclosure_dev->pg0, 9738 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE, 9739 enclosure_handle); 9740 9741 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & 9742 MPI2_IOCSTATUS_MASK)) { 9743 kfree(enclosure_dev); 9744 return; 9745 } 9746 list_add_tail(&enclosure_dev->list, 9747 &ioc->enclosure_list); 9748 enclosure_handle = 9749 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); 9750 } while (1); 9751 } 9752 9753 /** 9754 * _scsih_search_responding_sas_devices - 9755 * @ioc: per adapter object 9756 * 9757 * After host reset, find out whether devices are still responding. 9758 * If not remove. 9759 */ 9760 static void 9761 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) 9762 { 9763 Mpi2SasDevicePage0_t sas_device_pg0; 9764 Mpi2ConfigReply_t mpi_reply; 9765 u16 ioc_status; 9766 u16 handle; 9767 u32 device_info; 9768 9769 ioc_info(ioc, "search for end-devices: start\n"); 9770 9771 if (list_empty(&ioc->sas_device_list)) 9772 goto out; 9773 9774 handle = 0xFFFF; 9775 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 9776 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 9777 handle))) { 9778 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9779 MPI2_IOCSTATUS_MASK; 9780 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 9781 break; 9782 handle = le16_to_cpu(sas_device_pg0.DevHandle); 9783 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 9784 if (!(_scsih_is_end_device(device_info))) 9785 continue; 9786 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); 9787 } 9788 9789 out: 9790 ioc_info(ioc, "search for end-devices: complete\n"); 9791 } 9792 9793 /** 9794 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding 9795 * @ioc: per adapter object 9796 * @pcie_device_pg0: PCIe Device page 0 9797 * 9798 * After host reset, find out whether devices are still responding. 9799 * Used in _scsih_remove_unresponding_devices. 9800 */ 9801 static void 9802 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, 9803 Mpi26PCIeDevicePage0_t *pcie_device_pg0) 9804 { 9805 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9806 struct scsi_target *starget; 9807 struct _pcie_device *pcie_device; 9808 unsigned long flags; 9809 9810 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 9811 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 9812 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID)) 9813 && (pcie_device->slot == le16_to_cpu( 9814 pcie_device_pg0->Slot))) { 9815 pcie_device->access_status = 9816 pcie_device_pg0->AccessStatus; 9817 pcie_device->responding = 1; 9818 starget = pcie_device->starget; 9819 if (starget && starget->hostdata) { 9820 sas_target_priv_data = starget->hostdata; 9821 sas_target_priv_data->tm_busy = 0; 9822 sas_target_priv_data->deleted = 0; 9823 } else 9824 sas_target_priv_data = NULL; 9825 if (starget) { 9826 starget_printk(KERN_INFO, starget, 9827 "handle(0x%04x), wwid(0x%016llx) ", 9828 pcie_device->handle, 9829 (unsigned long long)pcie_device->wwid); 9830 if (pcie_device->enclosure_handle != 0) 9831 starget_printk(KERN_INFO, starget, 9832 "enclosure logical id(0x%016llx), " 9833 "slot(%d)\n", 9834 (unsigned long long) 9835 pcie_device->enclosure_logical_id, 9836 pcie_device->slot); 9837 } 9838 9839 if (((le32_to_cpu(pcie_device_pg0->Flags)) & 9840 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) && 9841 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { 9842 pcie_device->enclosure_level = 9843 pcie_device_pg0->EnclosureLevel; 9844 memcpy(&pcie_device->connector_name[0], 9845 &pcie_device_pg0->ConnectorName[0], 4); 9846 } else { 9847 pcie_device->enclosure_level = 0; 9848 pcie_device->connector_name[0] = '\0'; 9849 } 9850 9851 if (pcie_device->handle == le16_to_cpu( 9852 pcie_device_pg0->DevHandle)) 9853 goto out; 9854 pr_info("\thandle changed from(0x%04x)!!!\n", 9855 pcie_device->handle); 9856 pcie_device->handle = le16_to_cpu( 9857 pcie_device_pg0->DevHandle); 9858 if (sas_target_priv_data) 9859 sas_target_priv_data->handle = 9860 le16_to_cpu(pcie_device_pg0->DevHandle); 9861 goto out; 9862 } 9863 } 9864 9865 out: 9866 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 9867 } 9868 9869 /** 9870 * _scsih_search_responding_pcie_devices - 9871 * @ioc: per adapter object 9872 * 9873 * After host reset, find out whether devices are still responding. 9874 * If not remove. 9875 */ 9876 static void 9877 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) 9878 { 9879 Mpi26PCIeDevicePage0_t pcie_device_pg0; 9880 Mpi2ConfigReply_t mpi_reply; 9881 u16 ioc_status; 9882 u16 handle; 9883 u32 device_info; 9884 9885 ioc_info(ioc, "search for end-devices: start\n"); 9886 9887 if (list_empty(&ioc->pcie_device_list)) 9888 goto out; 9889 9890 handle = 0xFFFF; 9891 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 9892 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 9893 handle))) { 9894 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9895 MPI2_IOCSTATUS_MASK; 9896 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9897 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", 9898 __func__, ioc_status, 9899 le32_to_cpu(mpi_reply.IOCLogInfo)); 9900 break; 9901 } 9902 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 9903 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 9904 if (!(_scsih_is_nvme_pciescsi_device(device_info))) 9905 continue; 9906 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); 9907 } 9908 out: 9909 ioc_info(ioc, "search for PCIe end-devices: complete\n"); 9910 } 9911 9912 /** 9913 * _scsih_mark_responding_raid_device - mark a raid_device as responding 9914 * @ioc: per adapter object 9915 * @wwid: world wide identifier for raid volume 9916 * @handle: device handle 9917 * 9918 * After host reset, find out whether devices are still responding. 9919 * Used in _scsih_remove_unresponsive_raid_devices. 9920 */ 9921 static void 9922 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, 9923 u16 handle) 9924 { 9925 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9926 struct scsi_target *starget; 9927 struct _raid_device *raid_device; 9928 unsigned long flags; 9929 9930 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9931 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 9932 if (raid_device->wwid == wwid && raid_device->starget) { 9933 starget = raid_device->starget; 9934 if (starget && starget->hostdata) { 9935 sas_target_priv_data = starget->hostdata; 9936 sas_target_priv_data->deleted = 0; 9937 } else 9938 sas_target_priv_data = NULL; 9939 raid_device->responding = 1; 9940 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9941 starget_printk(KERN_INFO, raid_device->starget, 9942 "handle(0x%04x), wwid(0x%016llx)\n", handle, 9943 (unsigned long long)raid_device->wwid); 9944 9945 /* 9946 * WARPDRIVE: The handles of the PDs might have changed 9947 * across the host reset so re-initialize the 9948 * required data for Direct IO 9949 */ 9950 mpt3sas_init_warpdrive_properties(ioc, raid_device); 9951 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9952 if (raid_device->handle == handle) { 9953 spin_unlock_irqrestore(&ioc->raid_device_lock, 9954 flags); 9955 return; 9956 } 9957 pr_info("\thandle changed from(0x%04x)!!!\n", 9958 raid_device->handle); 9959 raid_device->handle = handle; 9960 if (sas_target_priv_data) 9961 sas_target_priv_data->handle = handle; 9962 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9963 return; 9964 } 9965 } 9966 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9967 } 9968 9969 /** 9970 * _scsih_search_responding_raid_devices - 9971 * @ioc: per adapter object 9972 * 9973 * After host reset, find out whether devices are still responding. 9974 * If not remove. 9975 */ 9976 static void 9977 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) 9978 { 9979 Mpi2RaidVolPage1_t volume_pg1; 9980 Mpi2RaidVolPage0_t volume_pg0; 9981 Mpi2RaidPhysDiskPage0_t pd_pg0; 9982 Mpi2ConfigReply_t mpi_reply; 9983 u16 ioc_status; 9984 u16 handle; 9985 u8 phys_disk_num; 9986 9987 if (!ioc->ir_firmware) 9988 return; 9989 9990 ioc_info(ioc, "search for raid volumes: start\n"); 9991 9992 if (list_empty(&ioc->raid_device_list)) 9993 goto out; 9994 9995 handle = 0xFFFF; 9996 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 9997 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 9998 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9999 MPI2_IOCSTATUS_MASK; 10000 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 10001 break; 10002 handle = le16_to_cpu(volume_pg1.DevHandle); 10003 10004 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 10005 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 10006 sizeof(Mpi2RaidVolPage0_t))) 10007 continue; 10008 10009 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 10010 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || 10011 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) 10012 _scsih_mark_responding_raid_device(ioc, 10013 le64_to_cpu(volume_pg1.WWID), handle); 10014 } 10015 10016 /* refresh the pd_handles */ 10017 if (!ioc->is_warpdrive) { 10018 phys_disk_num = 0xFF; 10019 memset(ioc->pd_handles, 0, ioc->pd_handles_sz); 10020 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 10021 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 10022 phys_disk_num))) { 10023 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10024 MPI2_IOCSTATUS_MASK; 10025 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 10026 break; 10027 phys_disk_num = pd_pg0.PhysDiskNum; 10028 handle = le16_to_cpu(pd_pg0.DevHandle); 10029 set_bit(handle, ioc->pd_handles); 10030 } 10031 } 10032 out: 10033 ioc_info(ioc, "search for responding raid volumes: complete\n"); 10034 } 10035 10036 /** 10037 * _scsih_mark_responding_expander - mark a expander as responding 10038 * @ioc: per adapter object 10039 * @expander_pg0:SAS Expander Config Page0 10040 * 10041 * After host reset, find out whether devices are still responding. 10042 * Used in _scsih_remove_unresponsive_expanders. 10043 */ 10044 static void 10045 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, 10046 Mpi2ExpanderPage0_t *expander_pg0) 10047 { 10048 struct _sas_node *sas_expander = NULL; 10049 unsigned long flags; 10050 int i; 10051 struct _enclosure_node *enclosure_dev = NULL; 10052 u16 handle = le16_to_cpu(expander_pg0->DevHandle); 10053 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); 10054 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); 10055 struct hba_port *port = mpt3sas_get_port_by_id( 10056 ioc, expander_pg0->PhysicalPort, 0); 10057 10058 if (enclosure_handle) 10059 enclosure_dev = 10060 mpt3sas_scsih_enclosure_find_by_handle(ioc, 10061 enclosure_handle); 10062 10063 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10064 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 10065 if (sas_expander->sas_address != sas_address) 10066 continue; 10067 if (sas_expander->port != port) 10068 continue; 10069 sas_expander->responding = 1; 10070 10071 if (enclosure_dev) { 10072 sas_expander->enclosure_logical_id = 10073 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 10074 sas_expander->enclosure_handle = 10075 le16_to_cpu(expander_pg0->EnclosureHandle); 10076 } 10077 10078 if (sas_expander->handle == handle) 10079 goto out; 10080 pr_info("\texpander(0x%016llx): handle changed" \ 10081 " from(0x%04x) to (0x%04x)!!!\n", 10082 (unsigned long long)sas_expander->sas_address, 10083 sas_expander->handle, handle); 10084 sas_expander->handle = handle; 10085 for (i = 0 ; i < sas_expander->num_phys ; i++) 10086 sas_expander->phy[i].handle = handle; 10087 goto out; 10088 } 10089 out: 10090 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10091 } 10092 10093 /** 10094 * _scsih_search_responding_expanders - 10095 * @ioc: per adapter object 10096 * 10097 * After host reset, find out whether devices are still responding. 10098 * If not remove. 10099 */ 10100 static void 10101 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) 10102 { 10103 Mpi2ExpanderPage0_t expander_pg0; 10104 Mpi2ConfigReply_t mpi_reply; 10105 u16 ioc_status; 10106 u64 sas_address; 10107 u16 handle; 10108 u8 port; 10109 10110 ioc_info(ioc, "search for expanders: start\n"); 10111 10112 if (list_empty(&ioc->sas_expander_list)) 10113 goto out; 10114 10115 handle = 0xFFFF; 10116 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 10117 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 10118 10119 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10120 MPI2_IOCSTATUS_MASK; 10121 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 10122 break; 10123 10124 handle = le16_to_cpu(expander_pg0.DevHandle); 10125 sas_address = le64_to_cpu(expander_pg0.SASAddress); 10126 port = expander_pg0.PhysicalPort; 10127 pr_info( 10128 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", 10129 handle, (unsigned long long)sas_address, 10130 (ioc->multipath_on_hba ? 10131 port : MULTIPATH_DISABLED_PORT_ID)); 10132 _scsih_mark_responding_expander(ioc, &expander_pg0); 10133 } 10134 10135 out: 10136 ioc_info(ioc, "search for expanders: complete\n"); 10137 } 10138 10139 /** 10140 * _scsih_remove_unresponding_devices - removing unresponding devices 10141 * @ioc: per adapter object 10142 */ 10143 static void 10144 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) 10145 { 10146 struct _sas_device *sas_device, *sas_device_next; 10147 struct _sas_node *sas_expander, *sas_expander_next; 10148 struct _raid_device *raid_device, *raid_device_next; 10149 struct _pcie_device *pcie_device, *pcie_device_next; 10150 struct list_head tmp_list; 10151 unsigned long flags; 10152 LIST_HEAD(head); 10153 10154 ioc_info(ioc, "removing unresponding devices: start\n"); 10155 10156 /* removing unresponding end devices */ 10157 ioc_info(ioc, "removing unresponding devices: end-devices\n"); 10158 /* 10159 * Iterate, pulling off devices marked as non-responding. We become the 10160 * owner for the reference the list had on any object we prune. 10161 */ 10162 spin_lock_irqsave(&ioc->sas_device_lock, flags); 10163 10164 /* 10165 * Clean up the sas_device_init_list list as 10166 * driver goes for fresh scan as part of diag reset. 10167 */ 10168 list_for_each_entry_safe(sas_device, sas_device_next, 10169 &ioc->sas_device_init_list, list) { 10170 list_del_init(&sas_device->list); 10171 sas_device_put(sas_device); 10172 } 10173 10174 list_for_each_entry_safe(sas_device, sas_device_next, 10175 &ioc->sas_device_list, list) { 10176 if (!sas_device->responding) 10177 list_move_tail(&sas_device->list, &head); 10178 else 10179 sas_device->responding = 0; 10180 } 10181 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 10182 10183 /* 10184 * Now, uninitialize and remove the unresponding devices we pruned. 10185 */ 10186 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { 10187 _scsih_remove_device(ioc, sas_device); 10188 list_del_init(&sas_device->list); 10189 sas_device_put(sas_device); 10190 } 10191 10192 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n"); 10193 INIT_LIST_HEAD(&head); 10194 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 10195 /* 10196 * Clean up the pcie_device_init_list list as 10197 * driver goes for fresh scan as part of diag reset. 10198 */ 10199 list_for_each_entry_safe(pcie_device, pcie_device_next, 10200 &ioc->pcie_device_init_list, list) { 10201 list_del_init(&pcie_device->list); 10202 pcie_device_put(pcie_device); 10203 } 10204 10205 list_for_each_entry_safe(pcie_device, pcie_device_next, 10206 &ioc->pcie_device_list, list) { 10207 if (!pcie_device->responding) 10208 list_move_tail(&pcie_device->list, &head); 10209 else 10210 pcie_device->responding = 0; 10211 } 10212 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 10213 10214 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) { 10215 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 10216 list_del_init(&pcie_device->list); 10217 pcie_device_put(pcie_device); 10218 } 10219 10220 /* removing unresponding volumes */ 10221 if (ioc->ir_firmware) { 10222 ioc_info(ioc, "removing unresponding devices: volumes\n"); 10223 list_for_each_entry_safe(raid_device, raid_device_next, 10224 &ioc->raid_device_list, list) { 10225 if (!raid_device->responding) 10226 _scsih_sas_volume_delete(ioc, 10227 raid_device->handle); 10228 else 10229 raid_device->responding = 0; 10230 } 10231 } 10232 10233 /* removing unresponding expanders */ 10234 ioc_info(ioc, "removing unresponding devices: expanders\n"); 10235 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10236 INIT_LIST_HEAD(&tmp_list); 10237 list_for_each_entry_safe(sas_expander, sas_expander_next, 10238 &ioc->sas_expander_list, list) { 10239 if (!sas_expander->responding) 10240 list_move_tail(&sas_expander->list, &tmp_list); 10241 else 10242 sas_expander->responding = 0; 10243 } 10244 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10245 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, 10246 list) { 10247 _scsih_expander_node_remove(ioc, sas_expander); 10248 } 10249 10250 ioc_info(ioc, "removing unresponding devices: complete\n"); 10251 10252 /* unblock devices */ 10253 _scsih_ublock_io_all_device(ioc); 10254 } 10255 10256 static void 10257 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, 10258 struct _sas_node *sas_expander, u16 handle) 10259 { 10260 Mpi2ExpanderPage1_t expander_pg1; 10261 Mpi2ConfigReply_t mpi_reply; 10262 int i; 10263 10264 for (i = 0 ; i < sas_expander->num_phys ; i++) { 10265 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 10266 &expander_pg1, i, handle))) { 10267 ioc_err(ioc, "failure at %s:%d/%s()!\n", 10268 __FILE__, __LINE__, __func__); 10269 return; 10270 } 10271 10272 mpt3sas_transport_update_links(ioc, sas_expander->sas_address, 10273 le16_to_cpu(expander_pg1.AttachedDevHandle), i, 10274 expander_pg1.NegotiatedLinkRate >> 4, 10275 sas_expander->port); 10276 } 10277 } 10278 10279 /** 10280 * _scsih_scan_for_devices_after_reset - scan for devices after host reset 10281 * @ioc: per adapter object 10282 */ 10283 static void 10284 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) 10285 { 10286 Mpi2ExpanderPage0_t expander_pg0; 10287 Mpi2SasDevicePage0_t sas_device_pg0; 10288 Mpi26PCIeDevicePage0_t pcie_device_pg0; 10289 Mpi2RaidVolPage1_t *volume_pg1; 10290 Mpi2RaidVolPage0_t *volume_pg0; 10291 Mpi2RaidPhysDiskPage0_t pd_pg0; 10292 Mpi2EventIrConfigElement_t element; 10293 Mpi2ConfigReply_t mpi_reply; 10294 u8 phys_disk_num, port_id; 10295 u16 ioc_status; 10296 u16 handle, parent_handle; 10297 u64 sas_address; 10298 struct _sas_device *sas_device; 10299 struct _pcie_device *pcie_device; 10300 struct _sas_node *expander_device; 10301 static struct _raid_device *raid_device; 10302 u8 retry_count; 10303 unsigned long flags; 10304 10305 volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL); 10306 if (!volume_pg0) 10307 return; 10308 10309 volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL); 10310 if (!volume_pg1) { 10311 kfree(volume_pg0); 10312 return; 10313 } 10314 10315 ioc_info(ioc, "scan devices: start\n"); 10316 10317 _scsih_sas_host_refresh(ioc); 10318 10319 ioc_info(ioc, "\tscan devices: expanders start\n"); 10320 10321 /* expanders */ 10322 handle = 0xFFFF; 10323 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 10324 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 10325 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10326 MPI2_IOCSTATUS_MASK; 10327 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10328 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10329 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10330 break; 10331 } 10332 handle = le16_to_cpu(expander_pg0.DevHandle); 10333 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10334 port_id = expander_pg0.PhysicalPort; 10335 expander_device = mpt3sas_scsih_expander_find_by_sas_address( 10336 ioc, le64_to_cpu(expander_pg0.SASAddress), 10337 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10338 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10339 if (expander_device) 10340 _scsih_refresh_expander_links(ioc, expander_device, 10341 handle); 10342 else { 10343 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", 10344 handle, 10345 (u64)le64_to_cpu(expander_pg0.SASAddress)); 10346 _scsih_expander_add(ioc, handle); 10347 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", 10348 handle, 10349 (u64)le64_to_cpu(expander_pg0.SASAddress)); 10350 } 10351 } 10352 10353 ioc_info(ioc, "\tscan devices: expanders complete\n"); 10354 10355 if (!ioc->ir_firmware) 10356 goto skip_to_sas; 10357 10358 ioc_info(ioc, "\tscan devices: phys disk start\n"); 10359 10360 /* phys disk */ 10361 phys_disk_num = 0xFF; 10362 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 10363 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 10364 phys_disk_num))) { 10365 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10366 MPI2_IOCSTATUS_MASK; 10367 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10368 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10369 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10370 break; 10371 } 10372 phys_disk_num = pd_pg0.PhysDiskNum; 10373 handle = le16_to_cpu(pd_pg0.DevHandle); 10374 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 10375 if (sas_device) { 10376 sas_device_put(sas_device); 10377 continue; 10378 } 10379 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 10380 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 10381 handle) != 0) 10382 continue; 10383 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10384 MPI2_IOCSTATUS_MASK; 10385 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10386 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", 10387 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10388 break; 10389 } 10390 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 10391 if (!_scsih_get_sas_address(ioc, parent_handle, 10392 &sas_address)) { 10393 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", 10394 handle, 10395 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10396 port_id = sas_device_pg0.PhysicalPort; 10397 mpt3sas_transport_update_links(ioc, sas_address, 10398 handle, sas_device_pg0.PhyNum, 10399 MPI2_SAS_NEG_LINK_RATE_1_5, 10400 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10401 set_bit(handle, ioc->pd_handles); 10402 retry_count = 0; 10403 /* This will retry adding the end device. 10404 * _scsih_add_device() will decide on retries and 10405 * return "1" when it should be retried 10406 */ 10407 while (_scsih_add_device(ioc, handle, retry_count++, 10408 1)) { 10409 ssleep(1); 10410 } 10411 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", 10412 handle, 10413 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10414 } 10415 } 10416 10417 ioc_info(ioc, "\tscan devices: phys disk complete\n"); 10418 10419 ioc_info(ioc, "\tscan devices: volumes start\n"); 10420 10421 /* volumes */ 10422 handle = 0xFFFF; 10423 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 10424 volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 10425 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10426 MPI2_IOCSTATUS_MASK; 10427 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10428 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10429 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10430 break; 10431 } 10432 handle = le16_to_cpu(volume_pg1->DevHandle); 10433 spin_lock_irqsave(&ioc->raid_device_lock, flags); 10434 raid_device = _scsih_raid_device_find_by_wwid(ioc, 10435 le64_to_cpu(volume_pg1->WWID)); 10436 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 10437 if (raid_device) 10438 continue; 10439 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 10440 volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 10441 sizeof(Mpi2RaidVolPage0_t))) 10442 continue; 10443 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10444 MPI2_IOCSTATUS_MASK; 10445 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10446 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10447 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10448 break; 10449 } 10450 if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 10451 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE || 10452 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { 10453 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); 10454 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; 10455 element.VolDevHandle = volume_pg1->DevHandle; 10456 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n", 10457 volume_pg1->DevHandle); 10458 _scsih_sas_volume_add(ioc, &element); 10459 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n", 10460 volume_pg1->DevHandle); 10461 } 10462 } 10463 10464 ioc_info(ioc, "\tscan devices: volumes complete\n"); 10465 10466 skip_to_sas: 10467 10468 ioc_info(ioc, "\tscan devices: end devices start\n"); 10469 10470 /* sas devices */ 10471 handle = 0xFFFF; 10472 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 10473 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 10474 handle))) { 10475 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10476 MPI2_IOCSTATUS_MASK; 10477 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10478 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10479 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10480 break; 10481 } 10482 handle = le16_to_cpu(sas_device_pg0.DevHandle); 10483 if (!(_scsih_is_end_device( 10484 le32_to_cpu(sas_device_pg0.DeviceInfo)))) 10485 continue; 10486 port_id = sas_device_pg0.PhysicalPort; 10487 sas_device = mpt3sas_get_sdev_by_addr(ioc, 10488 le64_to_cpu(sas_device_pg0.SASAddress), 10489 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10490 if (sas_device) { 10491 sas_device_put(sas_device); 10492 continue; 10493 } 10494 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 10495 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { 10496 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", 10497 handle, 10498 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10499 mpt3sas_transport_update_links(ioc, sas_address, handle, 10500 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 10501 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10502 retry_count = 0; 10503 /* This will retry adding the end device. 10504 * _scsih_add_device() will decide on retries and 10505 * return "1" when it should be retried 10506 */ 10507 while (_scsih_add_device(ioc, handle, retry_count++, 10508 0)) { 10509 ssleep(1); 10510 } 10511 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", 10512 handle, 10513 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10514 } 10515 } 10516 ioc_info(ioc, "\tscan devices: end devices complete\n"); 10517 ioc_info(ioc, "\tscan devices: pcie end devices start\n"); 10518 10519 /* pcie devices */ 10520 handle = 0xFFFF; 10521 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 10522 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 10523 handle))) { 10524 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) 10525 & MPI2_IOCSTATUS_MASK; 10526 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10527 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10528 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10529 break; 10530 } 10531 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 10532 if (!(_scsih_is_nvme_pciescsi_device( 10533 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) 10534 continue; 10535 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, 10536 le64_to_cpu(pcie_device_pg0.WWID)); 10537 if (pcie_device) { 10538 pcie_device_put(pcie_device); 10539 continue; 10540 } 10541 retry_count = 0; 10542 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle); 10543 _scsih_pcie_add_device(ioc, handle); 10544 10545 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n", 10546 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID)); 10547 } 10548 10549 kfree(volume_pg0); 10550 kfree(volume_pg1); 10551 10552 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n"); 10553 ioc_info(ioc, "scan devices: complete\n"); 10554 } 10555 10556 /** 10557 * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih) 10558 * @ioc: per adapter object 10559 * 10560 * The handler for doing any required cleanup or initialization. 10561 */ 10562 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) 10563 { 10564 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); 10565 } 10566 10567 /** 10568 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding 10569 * scsi & tm cmds. 10570 * @ioc: per adapter object 10571 * 10572 * The handler for doing any required cleanup or initialization. 10573 */ 10574 void 10575 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc) 10576 { 10577 dtmprintk(ioc, 10578 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__)); 10579 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { 10580 ioc->scsih_cmds.status |= MPT3_CMD_RESET; 10581 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); 10582 complete(&ioc->scsih_cmds.done); 10583 } 10584 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { 10585 ioc->tm_cmds.status |= MPT3_CMD_RESET; 10586 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); 10587 complete(&ioc->tm_cmds.done); 10588 } 10589 10590 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); 10591 memset(ioc->device_remove_in_progress, 0, 10592 ioc->device_remove_in_progress_sz); 10593 _scsih_fw_event_cleanup_queue(ioc); 10594 _scsih_flush_running_cmds(ioc); 10595 } 10596 10597 /** 10598 * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih) 10599 * @ioc: per adapter object 10600 * 10601 * The handler for doing any required cleanup or initialization. 10602 */ 10603 void 10604 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) 10605 { 10606 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); 10607 if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) { 10608 if (ioc->multipath_on_hba) { 10609 _scsih_sas_port_refresh(ioc); 10610 _scsih_update_vphys_after_reset(ioc); 10611 } 10612 _scsih_prep_device_scan(ioc); 10613 _scsih_create_enclosure_list_after_reset(ioc); 10614 _scsih_search_responding_sas_devices(ioc); 10615 _scsih_search_responding_pcie_devices(ioc); 10616 _scsih_search_responding_raid_devices(ioc); 10617 _scsih_search_responding_expanders(ioc); 10618 _scsih_error_recovery_delete_devices(ioc); 10619 } 10620 } 10621 10622 /** 10623 * _mpt3sas_fw_work - delayed task for processing firmware events 10624 * @ioc: per adapter object 10625 * @fw_event: The fw_event_work object 10626 * Context: user. 10627 */ 10628 static void 10629 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) 10630 { 10631 ioc->current_event = fw_event; 10632 _scsih_fw_event_del_from_list(ioc, fw_event); 10633 10634 /* the queue is being flushed so ignore this event */ 10635 if (ioc->remove_host || ioc->pci_error_recovery) { 10636 fw_event_work_put(fw_event); 10637 ioc->current_event = NULL; 10638 return; 10639 } 10640 10641 switch (fw_event->event) { 10642 case MPT3SAS_PROCESS_TRIGGER_DIAG: 10643 mpt3sas_process_trigger_data(ioc, 10644 (struct SL_WH_TRIGGERS_EVENT_DATA_T *) 10645 fw_event->event_data); 10646 break; 10647 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: 10648 while (scsi_host_in_recovery(ioc->shost) || 10649 ioc->shost_recovery) { 10650 /* 10651 * If we're unloading or cancelling the work, bail. 10652 * Otherwise, this can become an infinite loop. 10653 */ 10654 if (ioc->remove_host || ioc->fw_events_cleanup) 10655 goto out; 10656 ssleep(1); 10657 } 10658 _scsih_remove_unresponding_devices(ioc); 10659 _scsih_del_dirty_vphy(ioc); 10660 _scsih_del_dirty_port_entries(ioc); 10661 _scsih_scan_for_devices_after_reset(ioc); 10662 /* 10663 * If diag reset has occurred during the driver load 10664 * then driver has to complete the driver load operation 10665 * by executing the following items: 10666 *- Register the devices from sas_device_init_list to SML 10667 *- clear is_driver_loading flag, 10668 *- start the watchdog thread. 10669 * In happy driver load path, above things are taken care of when 10670 * driver executes scsih_scan_finished(). 10671 */ 10672 if (ioc->is_driver_loading) 10673 _scsih_complete_devices_scanning(ioc); 10674 _scsih_set_nvme_max_shutdown_latency(ioc); 10675 break; 10676 case MPT3SAS_PORT_ENABLE_COMPLETE: 10677 ioc->start_scan = 0; 10678 if (missing_delay[0] != -1 && missing_delay[1] != -1) 10679 mpt3sas_base_update_missing_delay(ioc, missing_delay[0], 10680 missing_delay[1]); 10681 dewtprintk(ioc, 10682 ioc_info(ioc, "port enable: complete from worker thread\n")); 10683 break; 10684 case MPT3SAS_TURN_ON_PFA_LED: 10685 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); 10686 break; 10687 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 10688 _scsih_sas_topology_change_event(ioc, fw_event); 10689 break; 10690 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 10691 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 10692 _scsih_sas_device_status_change_event_debug(ioc, 10693 (Mpi2EventDataSasDeviceStatusChange_t *) 10694 fw_event->event_data); 10695 break; 10696 case MPI2_EVENT_SAS_DISCOVERY: 10697 _scsih_sas_discovery_event(ioc, fw_event); 10698 break; 10699 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 10700 _scsih_sas_device_discovery_error_event(ioc, fw_event); 10701 break; 10702 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 10703 _scsih_sas_broadcast_primitive_event(ioc, fw_event); 10704 break; 10705 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 10706 _scsih_sas_enclosure_dev_status_change_event(ioc, 10707 fw_event); 10708 break; 10709 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 10710 _scsih_sas_ir_config_change_event(ioc, fw_event); 10711 break; 10712 case MPI2_EVENT_IR_VOLUME: 10713 _scsih_sas_ir_volume_event(ioc, fw_event); 10714 break; 10715 case MPI2_EVENT_IR_PHYSICAL_DISK: 10716 _scsih_sas_ir_physical_disk_event(ioc, fw_event); 10717 break; 10718 case MPI2_EVENT_IR_OPERATION_STATUS: 10719 _scsih_sas_ir_operation_status_event(ioc, fw_event); 10720 break; 10721 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 10722 _scsih_pcie_device_status_change_event(ioc, fw_event); 10723 break; 10724 case MPI2_EVENT_PCIE_ENUMERATION: 10725 _scsih_pcie_enumeration_event(ioc, fw_event); 10726 break; 10727 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10728 _scsih_pcie_topology_change_event(ioc, fw_event); 10729 ioc->current_event = NULL; 10730 return; 10731 break; 10732 } 10733 out: 10734 fw_event_work_put(fw_event); 10735 ioc->current_event = NULL; 10736 } 10737 10738 /** 10739 * _firmware_event_work 10740 * @work: The fw_event_work object 10741 * Context: user. 10742 * 10743 * wrappers for the work thread handling firmware events 10744 */ 10745 10746 static void 10747 _firmware_event_work(struct work_struct *work) 10748 { 10749 struct fw_event_work *fw_event = container_of(work, 10750 struct fw_event_work, work); 10751 10752 _mpt3sas_fw_work(fw_event->ioc, fw_event); 10753 } 10754 10755 /** 10756 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) 10757 * @ioc: per adapter object 10758 * @msix_index: MSIX table index supplied by the OS 10759 * @reply: reply message frame(lower 32bit addr) 10760 * Context: interrupt. 10761 * 10762 * This function merely adds a new work task into ioc->firmware_event_thread. 10763 * The tasks are worked from _firmware_event_work in user context. 10764 * 10765 * Return: 1 meaning mf should be freed from _base_interrupt 10766 * 0 means the mf is freed from this function. 10767 */ 10768 u8 10769 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 10770 u32 reply) 10771 { 10772 struct fw_event_work *fw_event; 10773 Mpi2EventNotificationReply_t *mpi_reply; 10774 u16 event; 10775 u16 sz; 10776 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; 10777 10778 /* events turned off due to host reset */ 10779 if (ioc->pci_error_recovery) 10780 return 1; 10781 10782 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 10783 10784 if (unlikely(!mpi_reply)) { 10785 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 10786 __FILE__, __LINE__, __func__); 10787 return 1; 10788 } 10789 10790 event = le16_to_cpu(mpi_reply->Event); 10791 10792 if (event != MPI2_EVENT_LOG_ENTRY_ADDED) 10793 mpt3sas_trigger_event(ioc, event, 0); 10794 10795 switch (event) { 10796 /* handle these */ 10797 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 10798 { 10799 Mpi2EventDataSasBroadcastPrimitive_t *baen_data = 10800 (Mpi2EventDataSasBroadcastPrimitive_t *) 10801 mpi_reply->EventData; 10802 10803 if (baen_data->Primitive != 10804 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) 10805 return 1; 10806 10807 if (ioc->broadcast_aen_busy) { 10808 ioc->broadcast_aen_pending++; 10809 return 1; 10810 } else 10811 ioc->broadcast_aen_busy = 1; 10812 break; 10813 } 10814 10815 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 10816 _scsih_check_topo_delete_events(ioc, 10817 (Mpi2EventDataSasTopologyChangeList_t *) 10818 mpi_reply->EventData); 10819 /* 10820 * No need to add the topology change list 10821 * event to fw event work queue when 10822 * diag reset is going on. Since during diag 10823 * reset driver scan the devices by reading 10824 * sas device page0's not by processing the 10825 * events. 10826 */ 10827 if (ioc->shost_recovery) 10828 return 1; 10829 break; 10830 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10831 _scsih_check_pcie_topo_remove_events(ioc, 10832 (Mpi26EventDataPCIeTopologyChangeList_t *) 10833 mpi_reply->EventData); 10834 if (ioc->shost_recovery) 10835 return 1; 10836 break; 10837 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 10838 _scsih_check_ir_config_unhide_events(ioc, 10839 (Mpi2EventDataIrConfigChangeList_t *) 10840 mpi_reply->EventData); 10841 break; 10842 case MPI2_EVENT_IR_VOLUME: 10843 _scsih_check_volume_delete_events(ioc, 10844 (Mpi2EventDataIrVolume_t *) 10845 mpi_reply->EventData); 10846 break; 10847 case MPI2_EVENT_LOG_ENTRY_ADDED: 10848 { 10849 Mpi2EventDataLogEntryAdded_t *log_entry; 10850 u32 *log_code; 10851 10852 if (!ioc->is_warpdrive) 10853 break; 10854 10855 log_entry = (Mpi2EventDataLogEntryAdded_t *) 10856 mpi_reply->EventData; 10857 log_code = (u32 *)log_entry->LogData; 10858 10859 if (le16_to_cpu(log_entry->LogEntryQualifier) 10860 != MPT2_WARPDRIVE_LOGENTRY) 10861 break; 10862 10863 switch (le32_to_cpu(*log_code)) { 10864 case MPT2_WARPDRIVE_LC_SSDT: 10865 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); 10866 break; 10867 case MPT2_WARPDRIVE_LC_SSDLW: 10868 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n"); 10869 break; 10870 case MPT2_WARPDRIVE_LC_SSDLF: 10871 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n"); 10872 break; 10873 case MPT2_WARPDRIVE_LC_BRMF: 10874 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); 10875 break; 10876 } 10877 10878 break; 10879 } 10880 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 10881 _scsih_sas_device_status_change_event(ioc, 10882 (Mpi2EventDataSasDeviceStatusChange_t *) 10883 mpi_reply->EventData); 10884 break; 10885 case MPI2_EVENT_IR_OPERATION_STATUS: 10886 case MPI2_EVENT_SAS_DISCOVERY: 10887 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 10888 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 10889 case MPI2_EVENT_IR_PHYSICAL_DISK: 10890 case MPI2_EVENT_PCIE_ENUMERATION: 10891 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 10892 break; 10893 10894 case MPI2_EVENT_TEMP_THRESHOLD: 10895 _scsih_temp_threshold_events(ioc, 10896 (Mpi2EventDataTemperature_t *) 10897 mpi_reply->EventData); 10898 break; 10899 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 10900 ActiveCableEventData = 10901 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; 10902 switch (ActiveCableEventData->ReasonCode) { 10903 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: 10904 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n", 10905 ActiveCableEventData->ReceptacleID); 10906 pr_notice("cannot be powered and devices connected\n"); 10907 pr_notice("to this active cable will not be seen\n"); 10908 pr_notice("This active cable requires %d mW of power\n", 10909 le32_to_cpu( 10910 ActiveCableEventData->ActiveCablePowerRequirement)); 10911 break; 10912 10913 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: 10914 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n", 10915 ActiveCableEventData->ReceptacleID); 10916 pr_notice( 10917 "is not running at optimal speed(12 Gb/s rate)\n"); 10918 break; 10919 } 10920 10921 break; 10922 10923 default: /* ignore the rest */ 10924 return 1; 10925 } 10926 10927 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; 10928 fw_event = alloc_fw_event_work(sz); 10929 if (!fw_event) { 10930 ioc_err(ioc, "failure at %s:%d/%s()!\n", 10931 __FILE__, __LINE__, __func__); 10932 return 1; 10933 } 10934 10935 memcpy(fw_event->event_data, mpi_reply->EventData, sz); 10936 fw_event->ioc = ioc; 10937 fw_event->VF_ID = mpi_reply->VF_ID; 10938 fw_event->VP_ID = mpi_reply->VP_ID; 10939 fw_event->event = event; 10940 _scsih_fw_event_add(ioc, fw_event); 10941 fw_event_work_put(fw_event); 10942 return 1; 10943 } 10944 10945 /** 10946 * _scsih_expander_node_remove - removing expander device from list. 10947 * @ioc: per adapter object 10948 * @sas_expander: the sas_device object 10949 * 10950 * Removing object and freeing associated memory from the 10951 * ioc->sas_expander_list. 10952 */ 10953 static void 10954 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 10955 struct _sas_node *sas_expander) 10956 { 10957 struct _sas_port *mpt3sas_port, *next; 10958 unsigned long flags; 10959 10960 /* remove sibling ports attached to this expander */ 10961 list_for_each_entry_safe(mpt3sas_port, next, 10962 &sas_expander->sas_port_list, port_list) { 10963 if (ioc->shost_recovery) 10964 return; 10965 if (mpt3sas_port->remote_identify.device_type == 10966 SAS_END_DEVICE) 10967 mpt3sas_device_remove_by_sas_address(ioc, 10968 mpt3sas_port->remote_identify.sas_address, 10969 mpt3sas_port->hba_port); 10970 else if (mpt3sas_port->remote_identify.device_type == 10971 SAS_EDGE_EXPANDER_DEVICE || 10972 mpt3sas_port->remote_identify.device_type == 10973 SAS_FANOUT_EXPANDER_DEVICE) 10974 mpt3sas_expander_remove(ioc, 10975 mpt3sas_port->remote_identify.sas_address, 10976 mpt3sas_port->hba_port); 10977 } 10978 10979 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 10980 sas_expander->sas_address_parent, sas_expander->port); 10981 10982 ioc_info(ioc, 10983 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", 10984 sas_expander->handle, (unsigned long long) 10985 sas_expander->sas_address, 10986 sas_expander->port->port_id); 10987 10988 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10989 list_del(&sas_expander->list); 10990 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10991 10992 kfree(sas_expander->phy); 10993 kfree(sas_expander); 10994 } 10995 10996 /** 10997 * _scsih_nvme_shutdown - NVMe shutdown notification 10998 * @ioc: per adapter object 10999 * 11000 * Sending IoUnitControl request with shutdown operation code to alert IOC that 11001 * the host system is shutting down so that IOC can issue NVMe shutdown to 11002 * NVMe drives attached to it. 11003 */ 11004 static void 11005 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc) 11006 { 11007 Mpi26IoUnitControlRequest_t *mpi_request; 11008 Mpi26IoUnitControlReply_t *mpi_reply; 11009 u16 smid; 11010 11011 /* are there any NVMe devices ? */ 11012 if (list_empty(&ioc->pcie_device_list)) 11013 return; 11014 11015 mutex_lock(&ioc->scsih_cmds.mutex); 11016 11017 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 11018 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 11019 goto out; 11020 } 11021 11022 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 11023 11024 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 11025 if (!smid) { 11026 ioc_err(ioc, 11027 "%s: failed obtaining a smid\n", __func__); 11028 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11029 goto out; 11030 } 11031 11032 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 11033 ioc->scsih_cmds.smid = smid; 11034 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); 11035 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; 11036 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN; 11037 11038 init_completion(&ioc->scsih_cmds.done); 11039 ioc->put_smid_default(ioc, smid); 11040 /* Wait for max_shutdown_latency seconds */ 11041 ioc_info(ioc, 11042 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n", 11043 ioc->max_shutdown_latency); 11044 wait_for_completion_timeout(&ioc->scsih_cmds.done, 11045 ioc->max_shutdown_latency*HZ); 11046 11047 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 11048 ioc_err(ioc, "%s: timeout\n", __func__); 11049 goto out; 11050 } 11051 11052 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 11053 mpi_reply = ioc->scsih_cmds.reply; 11054 ioc_info(ioc, "Io Unit Control shutdown (complete):" 11055 "ioc_status(0x%04x), loginfo(0x%08x)\n", 11056 le16_to_cpu(mpi_reply->IOCStatus), 11057 le32_to_cpu(mpi_reply->IOCLogInfo)); 11058 } 11059 out: 11060 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11061 mutex_unlock(&ioc->scsih_cmds.mutex); 11062 } 11063 11064 11065 /** 11066 * _scsih_ir_shutdown - IR shutdown notification 11067 * @ioc: per adapter object 11068 * 11069 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that 11070 * the host system is shutting down. 11071 */ 11072 static void 11073 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) 11074 { 11075 Mpi2RaidActionRequest_t *mpi_request; 11076 Mpi2RaidActionReply_t *mpi_reply; 11077 u16 smid; 11078 11079 /* is IR firmware build loaded ? */ 11080 if (!ioc->ir_firmware) 11081 return; 11082 11083 /* are there any volumes ? */ 11084 if (list_empty(&ioc->raid_device_list)) 11085 return; 11086 11087 mutex_lock(&ioc->scsih_cmds.mutex); 11088 11089 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 11090 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 11091 goto out; 11092 } 11093 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 11094 11095 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 11096 if (!smid) { 11097 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 11098 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11099 goto out; 11100 } 11101 11102 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 11103 ioc->scsih_cmds.smid = smid; 11104 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); 11105 11106 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 11107 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; 11108 11109 if (!ioc->hide_ir_msg) 11110 ioc_info(ioc, "IR shutdown (sending)\n"); 11111 init_completion(&ioc->scsih_cmds.done); 11112 ioc->put_smid_default(ioc, smid); 11113 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 11114 11115 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 11116 ioc_err(ioc, "%s: timeout\n", __func__); 11117 goto out; 11118 } 11119 11120 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 11121 mpi_reply = ioc->scsih_cmds.reply; 11122 if (!ioc->hide_ir_msg) 11123 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", 11124 le16_to_cpu(mpi_reply->IOCStatus), 11125 le32_to_cpu(mpi_reply->IOCLogInfo)); 11126 } 11127 11128 out: 11129 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11130 mutex_unlock(&ioc->scsih_cmds.mutex); 11131 } 11132 11133 /** 11134 * _scsih_get_shost_and_ioc - get shost and ioc 11135 * and verify whether they are NULL or not 11136 * @pdev: PCI device struct 11137 * @shost: address of scsi host pointer 11138 * @ioc: address of HBA adapter pointer 11139 * 11140 * Return zero if *shost and *ioc are not NULL otherwise return error number. 11141 */ 11142 static int 11143 _scsih_get_shost_and_ioc(struct pci_dev *pdev, 11144 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc) 11145 { 11146 *shost = pci_get_drvdata(pdev); 11147 if (*shost == NULL) { 11148 dev_err(&pdev->dev, "pdev's driver data is null\n"); 11149 return -ENXIO; 11150 } 11151 11152 *ioc = shost_priv(*shost); 11153 if (*ioc == NULL) { 11154 dev_err(&pdev->dev, "shost's private data is null\n"); 11155 return -ENXIO; 11156 } 11157 11158 return 0; 11159 } 11160 11161 /** 11162 * scsih_remove - detach and remove add host 11163 * @pdev: PCI device struct 11164 * 11165 * Routine called when unloading the driver. 11166 */ 11167 static void scsih_remove(struct pci_dev *pdev) 11168 { 11169 struct Scsi_Host *shost; 11170 struct MPT3SAS_ADAPTER *ioc; 11171 struct _sas_port *mpt3sas_port, *next_port; 11172 struct _raid_device *raid_device, *next; 11173 struct MPT3SAS_TARGET *sas_target_priv_data; 11174 struct _pcie_device *pcie_device, *pcienext; 11175 struct workqueue_struct *wq; 11176 unsigned long flags; 11177 Mpi2ConfigReply_t mpi_reply; 11178 struct hba_port *port, *port_next; 11179 11180 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 11181 return; 11182 11183 ioc->remove_host = 1; 11184 11185 if (!pci_device_is_present(pdev)) 11186 _scsih_flush_running_cmds(ioc); 11187 11188 _scsih_fw_event_cleanup_queue(ioc); 11189 11190 spin_lock_irqsave(&ioc->fw_event_lock, flags); 11191 wq = ioc->firmware_event_thread; 11192 ioc->firmware_event_thread = NULL; 11193 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 11194 if (wq) 11195 destroy_workqueue(wq); 11196 /* 11197 * Copy back the unmodified ioc page1. so that on next driver load, 11198 * current modified changes on ioc page1 won't take effect. 11199 */ 11200 if (ioc->is_aero_ioc) 11201 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, 11202 &ioc->ioc_pg1_copy); 11203 /* release all the volumes */ 11204 _scsih_ir_shutdown(ioc); 11205 mpt3sas_destroy_debugfs(ioc); 11206 sas_remove_host(shost); 11207 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, 11208 list) { 11209 if (raid_device->starget) { 11210 sas_target_priv_data = 11211 raid_device->starget->hostdata; 11212 sas_target_priv_data->deleted = 1; 11213 scsi_remove_target(&raid_device->starget->dev); 11214 } 11215 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 11216 raid_device->handle, (u64)raid_device->wwid); 11217 _scsih_raid_device_remove(ioc, raid_device); 11218 } 11219 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list, 11220 list) { 11221 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 11222 list_del_init(&pcie_device->list); 11223 pcie_device_put(pcie_device); 11224 } 11225 11226 /* free ports attached to the sas_host */ 11227 list_for_each_entry_safe(mpt3sas_port, next_port, 11228 &ioc->sas_hba.sas_port_list, port_list) { 11229 if (mpt3sas_port->remote_identify.device_type == 11230 SAS_END_DEVICE) 11231 mpt3sas_device_remove_by_sas_address(ioc, 11232 mpt3sas_port->remote_identify.sas_address, 11233 mpt3sas_port->hba_port); 11234 else if (mpt3sas_port->remote_identify.device_type == 11235 SAS_EDGE_EXPANDER_DEVICE || 11236 mpt3sas_port->remote_identify.device_type == 11237 SAS_FANOUT_EXPANDER_DEVICE) 11238 mpt3sas_expander_remove(ioc, 11239 mpt3sas_port->remote_identify.sas_address, 11240 mpt3sas_port->hba_port); 11241 } 11242 11243 list_for_each_entry_safe(port, port_next, 11244 &ioc->port_table_list, list) { 11245 list_del(&port->list); 11246 kfree(port); 11247 } 11248 11249 /* free phys attached to the sas_host */ 11250 if (ioc->sas_hba.num_phys) { 11251 kfree(ioc->sas_hba.phy); 11252 ioc->sas_hba.phy = NULL; 11253 ioc->sas_hba.num_phys = 0; 11254 } 11255 11256 mpt3sas_base_detach(ioc); 11257 spin_lock(&gioc_lock); 11258 list_del(&ioc->list); 11259 spin_unlock(&gioc_lock); 11260 scsi_host_put(shost); 11261 } 11262 11263 /** 11264 * scsih_shutdown - routine call during system shutdown 11265 * @pdev: PCI device struct 11266 */ 11267 static void 11268 scsih_shutdown(struct pci_dev *pdev) 11269 { 11270 struct Scsi_Host *shost; 11271 struct MPT3SAS_ADAPTER *ioc; 11272 struct workqueue_struct *wq; 11273 unsigned long flags; 11274 Mpi2ConfigReply_t mpi_reply; 11275 11276 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 11277 return; 11278 11279 ioc->remove_host = 1; 11280 11281 if (!pci_device_is_present(pdev)) 11282 _scsih_flush_running_cmds(ioc); 11283 11284 _scsih_fw_event_cleanup_queue(ioc); 11285 11286 spin_lock_irqsave(&ioc->fw_event_lock, flags); 11287 wq = ioc->firmware_event_thread; 11288 ioc->firmware_event_thread = NULL; 11289 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 11290 if (wq) 11291 destroy_workqueue(wq); 11292 /* 11293 * Copy back the unmodified ioc page1 so that on next driver load, 11294 * current modified changes on ioc page1 won't take effect. 11295 */ 11296 if (ioc->is_aero_ioc) 11297 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, 11298 &ioc->ioc_pg1_copy); 11299 11300 _scsih_ir_shutdown(ioc); 11301 _scsih_nvme_shutdown(ioc); 11302 mpt3sas_base_detach(ioc); 11303 } 11304 11305 11306 /** 11307 * _scsih_probe_boot_devices - reports 1st device 11308 * @ioc: per adapter object 11309 * 11310 * If specified in bios page 2, this routine reports the 1st 11311 * device scsi-ml or sas transport for persistent boot device 11312 * purposes. Please refer to function _scsih_determine_boot_device() 11313 */ 11314 static void 11315 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) 11316 { 11317 u32 channel; 11318 void *device; 11319 struct _sas_device *sas_device; 11320 struct _raid_device *raid_device; 11321 struct _pcie_device *pcie_device; 11322 u16 handle; 11323 u64 sas_address_parent; 11324 u64 sas_address; 11325 unsigned long flags; 11326 int rc; 11327 int tid; 11328 struct hba_port *port; 11329 11330 /* no Bios, return immediately */ 11331 if (!ioc->bios_pg3.BiosVersion) 11332 return; 11333 11334 device = NULL; 11335 if (ioc->req_boot_device.device) { 11336 device = ioc->req_boot_device.device; 11337 channel = ioc->req_boot_device.channel; 11338 } else if (ioc->req_alt_boot_device.device) { 11339 device = ioc->req_alt_boot_device.device; 11340 channel = ioc->req_alt_boot_device.channel; 11341 } else if (ioc->current_boot_device.device) { 11342 device = ioc->current_boot_device.device; 11343 channel = ioc->current_boot_device.channel; 11344 } 11345 11346 if (!device) 11347 return; 11348 11349 if (channel == RAID_CHANNEL) { 11350 raid_device = device; 11351 /* 11352 * If this boot vd is already registered with SML then 11353 * no need to register it again as part of device scanning 11354 * after diag reset during driver load operation. 11355 */ 11356 if (raid_device->starget) 11357 return; 11358 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 11359 raid_device->id, 0); 11360 if (rc) 11361 _scsih_raid_device_remove(ioc, raid_device); 11362 } else if (channel == PCIE_CHANNEL) { 11363 pcie_device = device; 11364 /* 11365 * If this boot NVMe device is already registered with SML then 11366 * no need to register it again as part of device scanning 11367 * after diag reset during driver load operation. 11368 */ 11369 if (pcie_device->starget) 11370 return; 11371 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11372 tid = pcie_device->id; 11373 list_move_tail(&pcie_device->list, &ioc->pcie_device_list); 11374 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11375 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0); 11376 if (rc) 11377 _scsih_pcie_device_remove(ioc, pcie_device); 11378 } else { 11379 sas_device = device; 11380 /* 11381 * If this boot sas/sata device is already registered with SML 11382 * then no need to register it again as part of device scanning 11383 * after diag reset during driver load operation. 11384 */ 11385 if (sas_device->starget) 11386 return; 11387 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11388 handle = sas_device->handle; 11389 sas_address_parent = sas_device->sas_address_parent; 11390 sas_address = sas_device->sas_address; 11391 port = sas_device->port; 11392 list_move_tail(&sas_device->list, &ioc->sas_device_list); 11393 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11394 11395 if (ioc->hide_drives) 11396 return; 11397 11398 if (!port) 11399 return; 11400 11401 if (!mpt3sas_transport_port_add(ioc, handle, 11402 sas_address_parent, port)) { 11403 _scsih_sas_device_remove(ioc, sas_device); 11404 } else if (!sas_device->starget) { 11405 if (!ioc->is_driver_loading) { 11406 mpt3sas_transport_port_remove(ioc, 11407 sas_address, 11408 sas_address_parent, port); 11409 _scsih_sas_device_remove(ioc, sas_device); 11410 } 11411 } 11412 } 11413 } 11414 11415 /** 11416 * _scsih_probe_raid - reporting raid volumes to scsi-ml 11417 * @ioc: per adapter object 11418 * 11419 * Called during initial loading of the driver. 11420 */ 11421 static void 11422 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) 11423 { 11424 struct _raid_device *raid_device, *raid_next; 11425 int rc; 11426 11427 list_for_each_entry_safe(raid_device, raid_next, 11428 &ioc->raid_device_list, list) { 11429 if (raid_device->starget) 11430 continue; 11431 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 11432 raid_device->id, 0); 11433 if (rc) 11434 _scsih_raid_device_remove(ioc, raid_device); 11435 } 11436 } 11437 11438 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc) 11439 { 11440 struct _sas_device *sas_device = NULL; 11441 unsigned long flags; 11442 11443 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11444 if (!list_empty(&ioc->sas_device_init_list)) { 11445 sas_device = list_first_entry(&ioc->sas_device_init_list, 11446 struct _sas_device, list); 11447 sas_device_get(sas_device); 11448 } 11449 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11450 11451 return sas_device; 11452 } 11453 11454 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc, 11455 struct _sas_device *sas_device) 11456 { 11457 unsigned long flags; 11458 11459 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11460 11461 /* 11462 * Since we dropped the lock during the call to port_add(), we need to 11463 * be careful here that somebody else didn't move or delete this item 11464 * while we were busy with other things. 11465 * 11466 * If it was on the list, we need a put() for the reference the list 11467 * had. Either way, we need a get() for the destination list. 11468 */ 11469 if (!list_empty(&sas_device->list)) { 11470 list_del_init(&sas_device->list); 11471 sas_device_put(sas_device); 11472 } 11473 11474 sas_device_get(sas_device); 11475 list_add_tail(&sas_device->list, &ioc->sas_device_list); 11476 11477 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11478 } 11479 11480 /** 11481 * _scsih_probe_sas - reporting sas devices to sas transport 11482 * @ioc: per adapter object 11483 * 11484 * Called during initial loading of the driver. 11485 */ 11486 static void 11487 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) 11488 { 11489 struct _sas_device *sas_device; 11490 11491 if (ioc->hide_drives) 11492 return; 11493 11494 while ((sas_device = get_next_sas_device(ioc))) { 11495 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 11496 sas_device->sas_address_parent, sas_device->port)) { 11497 _scsih_sas_device_remove(ioc, sas_device); 11498 sas_device_put(sas_device); 11499 continue; 11500 } else if (!sas_device->starget) { 11501 /* 11502 * When asyn scanning is enabled, its not possible to 11503 * remove devices while scanning is turned on due to an 11504 * oops in scsi_sysfs_add_sdev()->add_device()-> 11505 * sysfs_addrm_start() 11506 */ 11507 if (!ioc->is_driver_loading) { 11508 mpt3sas_transport_port_remove(ioc, 11509 sas_device->sas_address, 11510 sas_device->sas_address_parent, 11511 sas_device->port); 11512 _scsih_sas_device_remove(ioc, sas_device); 11513 sas_device_put(sas_device); 11514 continue; 11515 } 11516 } 11517 sas_device_make_active(ioc, sas_device); 11518 sas_device_put(sas_device); 11519 } 11520 } 11521 11522 /** 11523 * get_next_pcie_device - Get the next pcie device 11524 * @ioc: per adapter object 11525 * 11526 * Get the next pcie device from pcie_device_init_list list. 11527 * 11528 * Return: pcie device structure if pcie_device_init_list list is not empty 11529 * otherwise returns NULL 11530 */ 11531 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc) 11532 { 11533 struct _pcie_device *pcie_device = NULL; 11534 unsigned long flags; 11535 11536 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11537 if (!list_empty(&ioc->pcie_device_init_list)) { 11538 pcie_device = list_first_entry(&ioc->pcie_device_init_list, 11539 struct _pcie_device, list); 11540 pcie_device_get(pcie_device); 11541 } 11542 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11543 11544 return pcie_device; 11545 } 11546 11547 /** 11548 * pcie_device_make_active - Add pcie device to pcie_device_list list 11549 * @ioc: per adapter object 11550 * @pcie_device: pcie device object 11551 * 11552 * Add the pcie device which has registered with SCSI Transport Later to 11553 * pcie_device_list list 11554 */ 11555 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc, 11556 struct _pcie_device *pcie_device) 11557 { 11558 unsigned long flags; 11559 11560 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11561 11562 if (!list_empty(&pcie_device->list)) { 11563 list_del_init(&pcie_device->list); 11564 pcie_device_put(pcie_device); 11565 } 11566 pcie_device_get(pcie_device); 11567 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 11568 11569 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11570 } 11571 11572 /** 11573 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml 11574 * @ioc: per adapter object 11575 * 11576 * Called during initial loading of the driver. 11577 */ 11578 static void 11579 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc) 11580 { 11581 struct _pcie_device *pcie_device; 11582 int rc; 11583 11584 /* PCIe Device List */ 11585 while ((pcie_device = get_next_pcie_device(ioc))) { 11586 if (pcie_device->starget) { 11587 pcie_device_put(pcie_device); 11588 continue; 11589 } 11590 if (pcie_device->access_status == 11591 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { 11592 pcie_device_make_active(ioc, pcie_device); 11593 pcie_device_put(pcie_device); 11594 continue; 11595 } 11596 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, 11597 pcie_device->id, 0); 11598 if (rc) { 11599 _scsih_pcie_device_remove(ioc, pcie_device); 11600 pcie_device_put(pcie_device); 11601 continue; 11602 } else if (!pcie_device->starget) { 11603 /* 11604 * When async scanning is enabled, its not possible to 11605 * remove devices while scanning is turned on due to an 11606 * oops in scsi_sysfs_add_sdev()->add_device()-> 11607 * sysfs_addrm_start() 11608 */ 11609 if (!ioc->is_driver_loading) { 11610 /* TODO-- Need to find out whether this condition will 11611 * occur or not 11612 */ 11613 _scsih_pcie_device_remove(ioc, pcie_device); 11614 pcie_device_put(pcie_device); 11615 continue; 11616 } 11617 } 11618 pcie_device_make_active(ioc, pcie_device); 11619 pcie_device_put(pcie_device); 11620 } 11621 } 11622 11623 /** 11624 * _scsih_probe_devices - probing for devices 11625 * @ioc: per adapter object 11626 * 11627 * Called during initial loading of the driver. 11628 */ 11629 static void 11630 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) 11631 { 11632 u16 volume_mapping_flags; 11633 11634 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) 11635 return; /* return when IOC doesn't support initiator mode */ 11636 11637 _scsih_probe_boot_devices(ioc); 11638 11639 if (ioc->ir_firmware) { 11640 volume_mapping_flags = 11641 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & 11642 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; 11643 if (volume_mapping_flags == 11644 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { 11645 _scsih_probe_raid(ioc); 11646 _scsih_probe_sas(ioc); 11647 } else { 11648 _scsih_probe_sas(ioc); 11649 _scsih_probe_raid(ioc); 11650 } 11651 } else { 11652 _scsih_probe_sas(ioc); 11653 _scsih_probe_pcie(ioc); 11654 } 11655 } 11656 11657 /** 11658 * scsih_scan_start - scsi lld callback for .scan_start 11659 * @shost: SCSI host pointer 11660 * 11661 * The shost has the ability to discover targets on its own instead 11662 * of scanning the entire bus. In our implemention, we will kick off 11663 * firmware discovery. 11664 */ 11665 static void 11666 scsih_scan_start(struct Scsi_Host *shost) 11667 { 11668 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 11669 int rc; 11670 if (diag_buffer_enable != -1 && diag_buffer_enable != 0) 11671 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); 11672 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0) 11673 mpt3sas_enable_diag_buffer(ioc, 1); 11674 11675 if (disable_discovery > 0) 11676 return; 11677 11678 ioc->start_scan = 1; 11679 rc = mpt3sas_port_enable(ioc); 11680 11681 if (rc != 0) 11682 ioc_info(ioc, "port enable: FAILED\n"); 11683 } 11684 11685 /** 11686 * _scsih_complete_devices_scanning - add the devices to sml and 11687 * complete ioc initialization. 11688 * @ioc: per adapter object 11689 * 11690 * Return nothing. 11691 */ 11692 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc) 11693 { 11694 11695 if (ioc->wait_for_discovery_to_complete) { 11696 ioc->wait_for_discovery_to_complete = 0; 11697 _scsih_probe_devices(ioc); 11698 } 11699 11700 mpt3sas_base_start_watchdog(ioc); 11701 ioc->is_driver_loading = 0; 11702 } 11703 11704 /** 11705 * scsih_scan_finished - scsi lld callback for .scan_finished 11706 * @shost: SCSI host pointer 11707 * @time: elapsed time of the scan in jiffies 11708 * 11709 * This function will be called periodicallyn until it returns 1 with the 11710 * scsi_host and the elapsed time of the scan in jiffies. In our implemention, 11711 * we wait for firmware discovery to complete, then return 1. 11712 */ 11713 static int 11714 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) 11715 { 11716 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 11717 u32 ioc_state; 11718 int issue_hard_reset = 0; 11719 11720 if (disable_discovery > 0) { 11721 ioc->is_driver_loading = 0; 11722 ioc->wait_for_discovery_to_complete = 0; 11723 return 1; 11724 } 11725 11726 if (time >= (300 * HZ)) { 11727 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11728 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n"); 11729 ioc->is_driver_loading = 0; 11730 return 1; 11731 } 11732 11733 if (ioc->start_scan) { 11734 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 11735 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 11736 mpt3sas_print_fault_code(ioc, ioc_state & 11737 MPI2_DOORBELL_DATA_MASK); 11738 issue_hard_reset = 1; 11739 goto out; 11740 } else if ((ioc_state & MPI2_IOC_STATE_MASK) == 11741 MPI2_IOC_STATE_COREDUMP) { 11742 mpt3sas_base_coredump_info(ioc, ioc_state & 11743 MPI2_DOORBELL_DATA_MASK); 11744 mpt3sas_base_wait_for_coredump_completion(ioc, __func__); 11745 issue_hard_reset = 1; 11746 goto out; 11747 } 11748 return 0; 11749 } 11750 11751 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) { 11752 ioc_info(ioc, 11753 "port enable: aborted due to diag reset\n"); 11754 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11755 goto out; 11756 } 11757 if (ioc->start_scan_failed) { 11758 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n", 11759 ioc->start_scan_failed); 11760 ioc->is_driver_loading = 0; 11761 ioc->wait_for_discovery_to_complete = 0; 11762 ioc->remove_host = 1; 11763 return 1; 11764 } 11765 11766 ioc_info(ioc, "port enable: SUCCESS\n"); 11767 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11768 _scsih_complete_devices_scanning(ioc); 11769 11770 out: 11771 if (issue_hard_reset) { 11772 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11773 if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET)) 11774 ioc->is_driver_loading = 0; 11775 } 11776 return 1; 11777 } 11778 11779 /** 11780 * scsih_map_queues - map reply queues with request queues 11781 * @shost: SCSI host pointer 11782 */ 11783 static int scsih_map_queues(struct Scsi_Host *shost) 11784 { 11785 struct MPT3SAS_ADAPTER *ioc = 11786 (struct MPT3SAS_ADAPTER *)shost->hostdata; 11787 11788 if (ioc->shost->nr_hw_queues == 1) 11789 return 0; 11790 11791 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 11792 ioc->pdev, ioc->high_iops_queues); 11793 } 11794 11795 /* shost template for SAS 2.0 HBA devices */ 11796 static struct scsi_host_template mpt2sas_driver_template = { 11797 .module = THIS_MODULE, 11798 .name = "Fusion MPT SAS Host", 11799 .proc_name = MPT2SAS_DRIVER_NAME, 11800 .queuecommand = scsih_qcmd, 11801 .target_alloc = scsih_target_alloc, 11802 .slave_alloc = scsih_slave_alloc, 11803 .slave_configure = scsih_slave_configure, 11804 .target_destroy = scsih_target_destroy, 11805 .slave_destroy = scsih_slave_destroy, 11806 .scan_finished = scsih_scan_finished, 11807 .scan_start = scsih_scan_start, 11808 .change_queue_depth = scsih_change_queue_depth, 11809 .eh_abort_handler = scsih_abort, 11810 .eh_device_reset_handler = scsih_dev_reset, 11811 .eh_target_reset_handler = scsih_target_reset, 11812 .eh_host_reset_handler = scsih_host_reset, 11813 .bios_param = scsih_bios_param, 11814 .can_queue = 1, 11815 .this_id = -1, 11816 .sg_tablesize = MPT2SAS_SG_DEPTH, 11817 .max_sectors = 32767, 11818 .cmd_per_lun = 7, 11819 .shost_attrs = mpt3sas_host_attrs, 11820 .sdev_attrs = mpt3sas_dev_attrs, 11821 .track_queue_depth = 1, 11822 .cmd_size = sizeof(struct scsiio_tracker), 11823 }; 11824 11825 /* raid transport support for SAS 2.0 HBA devices */ 11826 static struct raid_function_template mpt2sas_raid_functions = { 11827 .cookie = &mpt2sas_driver_template, 11828 .is_raid = scsih_is_raid, 11829 .get_resync = scsih_get_resync, 11830 .get_state = scsih_get_state, 11831 }; 11832 11833 /* shost template for SAS 3.0 HBA devices */ 11834 static struct scsi_host_template mpt3sas_driver_template = { 11835 .module = THIS_MODULE, 11836 .name = "Fusion MPT SAS Host", 11837 .proc_name = MPT3SAS_DRIVER_NAME, 11838 .queuecommand = scsih_qcmd, 11839 .target_alloc = scsih_target_alloc, 11840 .slave_alloc = scsih_slave_alloc, 11841 .slave_configure = scsih_slave_configure, 11842 .target_destroy = scsih_target_destroy, 11843 .slave_destroy = scsih_slave_destroy, 11844 .scan_finished = scsih_scan_finished, 11845 .scan_start = scsih_scan_start, 11846 .change_queue_depth = scsih_change_queue_depth, 11847 .eh_abort_handler = scsih_abort, 11848 .eh_device_reset_handler = scsih_dev_reset, 11849 .eh_target_reset_handler = scsih_target_reset, 11850 .eh_host_reset_handler = scsih_host_reset, 11851 .bios_param = scsih_bios_param, 11852 .can_queue = 1, 11853 .this_id = -1, 11854 .sg_tablesize = MPT3SAS_SG_DEPTH, 11855 .max_sectors = 32767, 11856 .max_segment_size = 0xffffffff, 11857 .cmd_per_lun = 7, 11858 .shost_attrs = mpt3sas_host_attrs, 11859 .sdev_attrs = mpt3sas_dev_attrs, 11860 .track_queue_depth = 1, 11861 .cmd_size = sizeof(struct scsiio_tracker), 11862 .map_queues = scsih_map_queues, 11863 }; 11864 11865 /* raid transport support for SAS 3.0 HBA devices */ 11866 static struct raid_function_template mpt3sas_raid_functions = { 11867 .cookie = &mpt3sas_driver_template, 11868 .is_raid = scsih_is_raid, 11869 .get_resync = scsih_get_resync, 11870 .get_state = scsih_get_state, 11871 }; 11872 11873 /** 11874 * _scsih_determine_hba_mpi_version - determine in which MPI version class 11875 * this device belongs to. 11876 * @pdev: PCI device struct 11877 * 11878 * return MPI2_VERSION for SAS 2.0 HBA devices, 11879 * MPI25_VERSION for SAS 3.0 HBA devices, and 11880 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices 11881 */ 11882 static u16 11883 _scsih_determine_hba_mpi_version(struct pci_dev *pdev) 11884 { 11885 11886 switch (pdev->device) { 11887 case MPI2_MFGPAGE_DEVID_SSS6200: 11888 case MPI2_MFGPAGE_DEVID_SAS2004: 11889 case MPI2_MFGPAGE_DEVID_SAS2008: 11890 case MPI2_MFGPAGE_DEVID_SAS2108_1: 11891 case MPI2_MFGPAGE_DEVID_SAS2108_2: 11892 case MPI2_MFGPAGE_DEVID_SAS2108_3: 11893 case MPI2_MFGPAGE_DEVID_SAS2116_1: 11894 case MPI2_MFGPAGE_DEVID_SAS2116_2: 11895 case MPI2_MFGPAGE_DEVID_SAS2208_1: 11896 case MPI2_MFGPAGE_DEVID_SAS2208_2: 11897 case MPI2_MFGPAGE_DEVID_SAS2208_3: 11898 case MPI2_MFGPAGE_DEVID_SAS2208_4: 11899 case MPI2_MFGPAGE_DEVID_SAS2208_5: 11900 case MPI2_MFGPAGE_DEVID_SAS2208_6: 11901 case MPI2_MFGPAGE_DEVID_SAS2308_1: 11902 case MPI2_MFGPAGE_DEVID_SAS2308_2: 11903 case MPI2_MFGPAGE_DEVID_SAS2308_3: 11904 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: 11905 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: 11906 return MPI2_VERSION; 11907 case MPI25_MFGPAGE_DEVID_SAS3004: 11908 case MPI25_MFGPAGE_DEVID_SAS3008: 11909 case MPI25_MFGPAGE_DEVID_SAS3108_1: 11910 case MPI25_MFGPAGE_DEVID_SAS3108_2: 11911 case MPI25_MFGPAGE_DEVID_SAS3108_5: 11912 case MPI25_MFGPAGE_DEVID_SAS3108_6: 11913 return MPI25_VERSION; 11914 case MPI26_MFGPAGE_DEVID_SAS3216: 11915 case MPI26_MFGPAGE_DEVID_SAS3224: 11916 case MPI26_MFGPAGE_DEVID_SAS3316_1: 11917 case MPI26_MFGPAGE_DEVID_SAS3316_2: 11918 case MPI26_MFGPAGE_DEVID_SAS3316_3: 11919 case MPI26_MFGPAGE_DEVID_SAS3316_4: 11920 case MPI26_MFGPAGE_DEVID_SAS3324_1: 11921 case MPI26_MFGPAGE_DEVID_SAS3324_2: 11922 case MPI26_MFGPAGE_DEVID_SAS3324_3: 11923 case MPI26_MFGPAGE_DEVID_SAS3324_4: 11924 case MPI26_MFGPAGE_DEVID_SAS3508: 11925 case MPI26_MFGPAGE_DEVID_SAS3508_1: 11926 case MPI26_MFGPAGE_DEVID_SAS3408: 11927 case MPI26_MFGPAGE_DEVID_SAS3516: 11928 case MPI26_MFGPAGE_DEVID_SAS3516_1: 11929 case MPI26_MFGPAGE_DEVID_SAS3416: 11930 case MPI26_MFGPAGE_DEVID_SAS3616: 11931 case MPI26_ATLAS_PCIe_SWITCH_DEVID: 11932 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: 11933 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: 11934 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: 11935 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: 11936 case MPI26_MFGPAGE_DEVID_INVALID0_3916: 11937 case MPI26_MFGPAGE_DEVID_INVALID1_3916: 11938 case MPI26_MFGPAGE_DEVID_INVALID0_3816: 11939 case MPI26_MFGPAGE_DEVID_INVALID1_3816: 11940 return MPI26_VERSION; 11941 } 11942 return 0; 11943 } 11944 11945 /** 11946 * _scsih_probe - attach and add scsi host 11947 * @pdev: PCI device struct 11948 * @id: pci device id 11949 * 11950 * Return: 0 success, anything else error. 11951 */ 11952 static int 11953 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) 11954 { 11955 struct MPT3SAS_ADAPTER *ioc; 11956 struct Scsi_Host *shost = NULL; 11957 int rv; 11958 u16 hba_mpi_version; 11959 11960 /* Determine in which MPI version class this pci device belongs */ 11961 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); 11962 if (hba_mpi_version == 0) 11963 return -ENODEV; 11964 11965 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one, 11966 * for other generation HBA's return with -ENODEV 11967 */ 11968 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION)) 11969 return -ENODEV; 11970 11971 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two, 11972 * for other generation HBA's return with -ENODEV 11973 */ 11974 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION 11975 || hba_mpi_version == MPI26_VERSION))) 11976 return -ENODEV; 11977 11978 switch (hba_mpi_version) { 11979 case MPI2_VERSION: 11980 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 11981 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 11982 /* Use mpt2sas driver host template for SAS 2.0 HBA's */ 11983 shost = scsi_host_alloc(&mpt2sas_driver_template, 11984 sizeof(struct MPT3SAS_ADAPTER)); 11985 if (!shost) 11986 return -ENODEV; 11987 ioc = shost_priv(shost); 11988 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); 11989 ioc->hba_mpi_version_belonged = hba_mpi_version; 11990 ioc->id = mpt2_ids++; 11991 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME); 11992 switch (pdev->device) { 11993 case MPI2_MFGPAGE_DEVID_SSS6200: 11994 ioc->is_warpdrive = 1; 11995 ioc->hide_ir_msg = 1; 11996 break; 11997 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: 11998 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: 11999 ioc->is_mcpu_endpoint = 1; 12000 break; 12001 default: 12002 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; 12003 break; 12004 } 12005 12006 if (multipath_on_hba == -1 || multipath_on_hba == 0) 12007 ioc->multipath_on_hba = 0; 12008 else 12009 ioc->multipath_on_hba = 1; 12010 12011 break; 12012 case MPI25_VERSION: 12013 case MPI26_VERSION: 12014 /* Use mpt3sas driver host template for SAS 3.0 HBA's */ 12015 shost = scsi_host_alloc(&mpt3sas_driver_template, 12016 sizeof(struct MPT3SAS_ADAPTER)); 12017 if (!shost) 12018 return -ENODEV; 12019 ioc = shost_priv(shost); 12020 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); 12021 ioc->hba_mpi_version_belonged = hba_mpi_version; 12022 ioc->id = mpt3_ids++; 12023 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); 12024 switch (pdev->device) { 12025 case MPI26_MFGPAGE_DEVID_SAS3508: 12026 case MPI26_MFGPAGE_DEVID_SAS3508_1: 12027 case MPI26_MFGPAGE_DEVID_SAS3408: 12028 case MPI26_MFGPAGE_DEVID_SAS3516: 12029 case MPI26_MFGPAGE_DEVID_SAS3516_1: 12030 case MPI26_MFGPAGE_DEVID_SAS3416: 12031 case MPI26_MFGPAGE_DEVID_SAS3616: 12032 case MPI26_ATLAS_PCIe_SWITCH_DEVID: 12033 ioc->is_gen35_ioc = 1; 12034 break; 12035 case MPI26_MFGPAGE_DEVID_INVALID0_3816: 12036 case MPI26_MFGPAGE_DEVID_INVALID0_3916: 12037 dev_err(&pdev->dev, 12038 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid", 12039 pdev->device, pdev->subsystem_vendor, 12040 pdev->subsystem_device); 12041 return 1; 12042 case MPI26_MFGPAGE_DEVID_INVALID1_3816: 12043 case MPI26_MFGPAGE_DEVID_INVALID1_3916: 12044 dev_err(&pdev->dev, 12045 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered", 12046 pdev->device, pdev->subsystem_vendor, 12047 pdev->subsystem_device); 12048 return 1; 12049 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: 12050 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: 12051 dev_info(&pdev->dev, 12052 "HBA is in Configurable Secure mode\n"); 12053 fallthrough; 12054 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: 12055 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: 12056 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1; 12057 break; 12058 default: 12059 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0; 12060 } 12061 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && 12062 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || 12063 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) { 12064 ioc->combined_reply_queue = 1; 12065 if (ioc->is_gen35_ioc) 12066 ioc->combined_reply_index_count = 12067 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35; 12068 else 12069 ioc->combined_reply_index_count = 12070 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3; 12071 } 12072 12073 switch (ioc->is_gen35_ioc) { 12074 case 0: 12075 if (multipath_on_hba == -1 || multipath_on_hba == 0) 12076 ioc->multipath_on_hba = 0; 12077 else 12078 ioc->multipath_on_hba = 1; 12079 break; 12080 case 1: 12081 if (multipath_on_hba == -1 || multipath_on_hba > 0) 12082 ioc->multipath_on_hba = 1; 12083 else 12084 ioc->multipath_on_hba = 0; 12085 break; 12086 default: 12087 break; 12088 } 12089 12090 break; 12091 default: 12092 return -ENODEV; 12093 } 12094 12095 INIT_LIST_HEAD(&ioc->list); 12096 spin_lock(&gioc_lock); 12097 list_add_tail(&ioc->list, &mpt3sas_ioc_list); 12098 spin_unlock(&gioc_lock); 12099 ioc->shost = shost; 12100 ioc->pdev = pdev; 12101 ioc->scsi_io_cb_idx = scsi_io_cb_idx; 12102 ioc->tm_cb_idx = tm_cb_idx; 12103 ioc->ctl_cb_idx = ctl_cb_idx; 12104 ioc->base_cb_idx = base_cb_idx; 12105 ioc->port_enable_cb_idx = port_enable_cb_idx; 12106 ioc->transport_cb_idx = transport_cb_idx; 12107 ioc->scsih_cb_idx = scsih_cb_idx; 12108 ioc->config_cb_idx = config_cb_idx; 12109 ioc->tm_tr_cb_idx = tm_tr_cb_idx; 12110 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; 12111 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; 12112 ioc->logging_level = logging_level; 12113 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; 12114 /* Host waits for minimum of six seconds */ 12115 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; 12116 /* 12117 * Enable MEMORY MOVE support flag. 12118 */ 12119 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE; 12120 /* Enable ADDITIONAL QUERY support flag. */ 12121 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY; 12122 12123 ioc->enable_sdev_max_qd = enable_sdev_max_qd; 12124 12125 /* misc semaphores and spin locks */ 12126 mutex_init(&ioc->reset_in_progress_mutex); 12127 /* initializing pci_access_mutex lock */ 12128 mutex_init(&ioc->pci_access_mutex); 12129 spin_lock_init(&ioc->ioc_reset_in_progress_lock); 12130 spin_lock_init(&ioc->scsi_lookup_lock); 12131 spin_lock_init(&ioc->sas_device_lock); 12132 spin_lock_init(&ioc->sas_node_lock); 12133 spin_lock_init(&ioc->fw_event_lock); 12134 spin_lock_init(&ioc->raid_device_lock); 12135 spin_lock_init(&ioc->pcie_device_lock); 12136 spin_lock_init(&ioc->diag_trigger_lock); 12137 12138 INIT_LIST_HEAD(&ioc->sas_device_list); 12139 INIT_LIST_HEAD(&ioc->sas_device_init_list); 12140 INIT_LIST_HEAD(&ioc->sas_expander_list); 12141 INIT_LIST_HEAD(&ioc->enclosure_list); 12142 INIT_LIST_HEAD(&ioc->pcie_device_list); 12143 INIT_LIST_HEAD(&ioc->pcie_device_init_list); 12144 INIT_LIST_HEAD(&ioc->fw_event_list); 12145 INIT_LIST_HEAD(&ioc->raid_device_list); 12146 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); 12147 INIT_LIST_HEAD(&ioc->delayed_tr_list); 12148 INIT_LIST_HEAD(&ioc->delayed_sc_list); 12149 INIT_LIST_HEAD(&ioc->delayed_event_ack_list); 12150 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); 12151 INIT_LIST_HEAD(&ioc->reply_queue_list); 12152 INIT_LIST_HEAD(&ioc->port_table_list); 12153 12154 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); 12155 12156 /* init shost parameters */ 12157 shost->max_cmd_len = 32; 12158 shost->max_lun = max_lun; 12159 shost->transportt = mpt3sas_transport_template; 12160 shost->unique_id = ioc->id; 12161 12162 if (ioc->is_mcpu_endpoint) { 12163 /* mCPU MPI support 64K max IO */ 12164 shost->max_sectors = 128; 12165 ioc_info(ioc, "The max_sectors value is set to %d\n", 12166 shost->max_sectors); 12167 } else { 12168 if (max_sectors != 0xFFFF) { 12169 if (max_sectors < 64) { 12170 shost->max_sectors = 64; 12171 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n", 12172 max_sectors); 12173 } else if (max_sectors > 32767) { 12174 shost->max_sectors = 32767; 12175 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n", 12176 max_sectors); 12177 } else { 12178 shost->max_sectors = max_sectors & 0xFFFE; 12179 ioc_info(ioc, "The max_sectors value is set to %d\n", 12180 shost->max_sectors); 12181 } 12182 } 12183 } 12184 /* register EEDP capabilities with SCSI layer */ 12185 if (prot_mask >= 0) 12186 scsi_host_set_prot(shost, (prot_mask & 0x07)); 12187 else 12188 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION 12189 | SHOST_DIF_TYPE2_PROTECTION 12190 | SHOST_DIF_TYPE3_PROTECTION); 12191 12192 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 12193 12194 /* event thread */ 12195 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 12196 "fw_event_%s%d", ioc->driver_name, ioc->id); 12197 ioc->firmware_event_thread = alloc_ordered_workqueue( 12198 ioc->firmware_event_name, 0); 12199 if (!ioc->firmware_event_thread) { 12200 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12201 __FILE__, __LINE__, __func__); 12202 rv = -ENODEV; 12203 goto out_thread_fail; 12204 } 12205 12206 ioc->is_driver_loading = 1; 12207 if ((mpt3sas_base_attach(ioc))) { 12208 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12209 __FILE__, __LINE__, __func__); 12210 rv = -ENODEV; 12211 goto out_attach_fail; 12212 } 12213 12214 if (ioc->is_warpdrive) { 12215 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) 12216 ioc->hide_drives = 0; 12217 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS) 12218 ioc->hide_drives = 1; 12219 else { 12220 if (mpt3sas_get_num_volumes(ioc)) 12221 ioc->hide_drives = 1; 12222 else 12223 ioc->hide_drives = 0; 12224 } 12225 } else 12226 ioc->hide_drives = 0; 12227 12228 shost->host_tagset = 0; 12229 shost->nr_hw_queues = 1; 12230 12231 if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 && 12232 host_tagset_enable && ioc->smp_affinity_enable) { 12233 12234 shost->host_tagset = 1; 12235 shost->nr_hw_queues = 12236 ioc->reply_queue_count - ioc->high_iops_queues; 12237 12238 dev_info(&ioc->pdev->dev, 12239 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n", 12240 shost->can_queue, shost->nr_hw_queues); 12241 } 12242 12243 rv = scsi_add_host(shost, &pdev->dev); 12244 if (rv) { 12245 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12246 __FILE__, __LINE__, __func__); 12247 goto out_add_shost_fail; 12248 } 12249 12250 scsi_scan_host(shost); 12251 mpt3sas_setup_debugfs(ioc); 12252 return 0; 12253 out_add_shost_fail: 12254 mpt3sas_base_detach(ioc); 12255 out_attach_fail: 12256 destroy_workqueue(ioc->firmware_event_thread); 12257 out_thread_fail: 12258 spin_lock(&gioc_lock); 12259 list_del(&ioc->list); 12260 spin_unlock(&gioc_lock); 12261 scsi_host_put(shost); 12262 return rv; 12263 } 12264 12265 /** 12266 * scsih_suspend - power management suspend main entry point 12267 * @dev: Device struct 12268 * 12269 * Return: 0 success, anything else error. 12270 */ 12271 static int __maybe_unused 12272 scsih_suspend(struct device *dev) 12273 { 12274 struct pci_dev *pdev = to_pci_dev(dev); 12275 struct Scsi_Host *shost; 12276 struct MPT3SAS_ADAPTER *ioc; 12277 int rc; 12278 12279 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); 12280 if (rc) 12281 return rc; 12282 12283 mpt3sas_base_stop_watchdog(ioc); 12284 flush_scheduled_work(); 12285 scsi_block_requests(shost); 12286 _scsih_nvme_shutdown(ioc); 12287 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n", 12288 pdev, pci_name(pdev)); 12289 12290 mpt3sas_base_free_resources(ioc); 12291 return 0; 12292 } 12293 12294 /** 12295 * scsih_resume - power management resume main entry point 12296 * @dev: Device struct 12297 * 12298 * Return: 0 success, anything else error. 12299 */ 12300 static int __maybe_unused 12301 scsih_resume(struct device *dev) 12302 { 12303 struct pci_dev *pdev = to_pci_dev(dev); 12304 struct Scsi_Host *shost; 12305 struct MPT3SAS_ADAPTER *ioc; 12306 pci_power_t device_state = pdev->current_state; 12307 int r; 12308 12309 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); 12310 if (r) 12311 return r; 12312 12313 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 12314 pdev, pci_name(pdev), device_state); 12315 12316 ioc->pdev = pdev; 12317 r = mpt3sas_base_map_resources(ioc); 12318 if (r) 12319 return r; 12320 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n"); 12321 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET); 12322 scsi_unblock_requests(shost); 12323 mpt3sas_base_start_watchdog(ioc); 12324 return 0; 12325 } 12326 12327 /** 12328 * scsih_pci_error_detected - Called when a PCI error is detected. 12329 * @pdev: PCI device struct 12330 * @state: PCI channel state 12331 * 12332 * Description: Called when a PCI error is detected. 12333 * 12334 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. 12335 */ 12336 static pci_ers_result_t 12337 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 12338 { 12339 struct Scsi_Host *shost; 12340 struct MPT3SAS_ADAPTER *ioc; 12341 12342 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12343 return PCI_ERS_RESULT_DISCONNECT; 12344 12345 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state); 12346 12347 switch (state) { 12348 case pci_channel_io_normal: 12349 return PCI_ERS_RESULT_CAN_RECOVER; 12350 case pci_channel_io_frozen: 12351 /* Fatal error, prepare for slot reset */ 12352 ioc->pci_error_recovery = 1; 12353 scsi_block_requests(ioc->shost); 12354 mpt3sas_base_stop_watchdog(ioc); 12355 mpt3sas_base_free_resources(ioc); 12356 return PCI_ERS_RESULT_NEED_RESET; 12357 case pci_channel_io_perm_failure: 12358 /* Permanent error, prepare for device removal */ 12359 ioc->pci_error_recovery = 1; 12360 mpt3sas_base_stop_watchdog(ioc); 12361 _scsih_flush_running_cmds(ioc); 12362 return PCI_ERS_RESULT_DISCONNECT; 12363 } 12364 return PCI_ERS_RESULT_NEED_RESET; 12365 } 12366 12367 /** 12368 * scsih_pci_slot_reset - Called when PCI slot has been reset. 12369 * @pdev: PCI device struct 12370 * 12371 * Description: This routine is called by the pci error recovery 12372 * code after the PCI slot has been reset, just before we 12373 * should resume normal operations. 12374 */ 12375 static pci_ers_result_t 12376 scsih_pci_slot_reset(struct pci_dev *pdev) 12377 { 12378 struct Scsi_Host *shost; 12379 struct MPT3SAS_ADAPTER *ioc; 12380 int rc; 12381 12382 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12383 return PCI_ERS_RESULT_DISCONNECT; 12384 12385 ioc_info(ioc, "PCI error: slot reset callback!!\n"); 12386 12387 ioc->pci_error_recovery = 0; 12388 ioc->pdev = pdev; 12389 pci_restore_state(pdev); 12390 rc = mpt3sas_base_map_resources(ioc); 12391 if (rc) 12392 return PCI_ERS_RESULT_DISCONNECT; 12393 12394 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n"); 12395 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 12396 12397 ioc_warn(ioc, "hard reset: %s\n", 12398 (rc == 0) ? "success" : "failed"); 12399 12400 if (!rc) 12401 return PCI_ERS_RESULT_RECOVERED; 12402 else 12403 return PCI_ERS_RESULT_DISCONNECT; 12404 } 12405 12406 /** 12407 * scsih_pci_resume() - resume normal ops after PCI reset 12408 * @pdev: pointer to PCI device 12409 * 12410 * Called when the error recovery driver tells us that its 12411 * OK to resume normal operation. Use completion to allow 12412 * halted scsi ops to resume. 12413 */ 12414 static void 12415 scsih_pci_resume(struct pci_dev *pdev) 12416 { 12417 struct Scsi_Host *shost; 12418 struct MPT3SAS_ADAPTER *ioc; 12419 12420 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12421 return; 12422 12423 ioc_info(ioc, "PCI error: resume callback!!\n"); 12424 12425 mpt3sas_base_start_watchdog(ioc); 12426 scsi_unblock_requests(ioc->shost); 12427 } 12428 12429 /** 12430 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers 12431 * @pdev: pointer to PCI device 12432 */ 12433 static pci_ers_result_t 12434 scsih_pci_mmio_enabled(struct pci_dev *pdev) 12435 { 12436 struct Scsi_Host *shost; 12437 struct MPT3SAS_ADAPTER *ioc; 12438 12439 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12440 return PCI_ERS_RESULT_DISCONNECT; 12441 12442 ioc_info(ioc, "PCI error: mmio enabled callback!!\n"); 12443 12444 /* TODO - dump whatever for debugging purposes */ 12445 12446 /* This called only if scsih_pci_error_detected returns 12447 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still 12448 * works, no need to reset slot. 12449 */ 12450 return PCI_ERS_RESULT_RECOVERED; 12451 } 12452 12453 /** 12454 * scsih_ncq_prio_supp - Check for NCQ command priority support 12455 * @sdev: scsi device struct 12456 * 12457 * This is called when a user indicates they would like to enable 12458 * ncq command priorities. This works only on SATA devices. 12459 */ 12460 bool scsih_ncq_prio_supp(struct scsi_device *sdev) 12461 { 12462 unsigned char *buf; 12463 bool ncq_prio_supp = false; 12464 12465 if (!scsi_device_supports_vpd(sdev)) 12466 return ncq_prio_supp; 12467 12468 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL); 12469 if (!buf) 12470 return ncq_prio_supp; 12471 12472 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN)) 12473 ncq_prio_supp = (buf[213] >> 4) & 1; 12474 12475 kfree(buf); 12476 return ncq_prio_supp; 12477 } 12478 /* 12479 * The pci device ids are defined in mpi/mpi2_cnfg.h. 12480 */ 12481 static const struct pci_device_id mpt3sas_pci_table[] = { 12482 /* Spitfire ~ 2004 */ 12483 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, 12484 PCI_ANY_ID, PCI_ANY_ID }, 12485 /* Falcon ~ 2008 */ 12486 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, 12487 PCI_ANY_ID, PCI_ANY_ID }, 12488 /* Liberator ~ 2108 */ 12489 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, 12490 PCI_ANY_ID, PCI_ANY_ID }, 12491 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2, 12492 PCI_ANY_ID, PCI_ANY_ID }, 12493 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, 12494 PCI_ANY_ID, PCI_ANY_ID }, 12495 /* Meteor ~ 2116 */ 12496 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, 12497 PCI_ANY_ID, PCI_ANY_ID }, 12498 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, 12499 PCI_ANY_ID, PCI_ANY_ID }, 12500 /* Thunderbolt ~ 2208 */ 12501 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, 12502 PCI_ANY_ID, PCI_ANY_ID }, 12503 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, 12504 PCI_ANY_ID, PCI_ANY_ID }, 12505 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, 12506 PCI_ANY_ID, PCI_ANY_ID }, 12507 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, 12508 PCI_ANY_ID, PCI_ANY_ID }, 12509 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, 12510 PCI_ANY_ID, PCI_ANY_ID }, 12511 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, 12512 PCI_ANY_ID, PCI_ANY_ID }, 12513 /* Mustang ~ 2308 */ 12514 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, 12515 PCI_ANY_ID, PCI_ANY_ID }, 12516 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, 12517 PCI_ANY_ID, PCI_ANY_ID }, 12518 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, 12519 PCI_ANY_ID, PCI_ANY_ID }, 12520 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP, 12521 PCI_ANY_ID, PCI_ANY_ID }, 12522 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1, 12523 PCI_ANY_ID, PCI_ANY_ID }, 12524 /* SSS6200 */ 12525 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, 12526 PCI_ANY_ID, PCI_ANY_ID }, 12527 /* Fury ~ 3004 and 3008 */ 12528 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, 12529 PCI_ANY_ID, PCI_ANY_ID }, 12530 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, 12531 PCI_ANY_ID, PCI_ANY_ID }, 12532 /* Invader ~ 3108 */ 12533 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, 12534 PCI_ANY_ID, PCI_ANY_ID }, 12535 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, 12536 PCI_ANY_ID, PCI_ANY_ID }, 12537 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, 12538 PCI_ANY_ID, PCI_ANY_ID }, 12539 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, 12540 PCI_ANY_ID, PCI_ANY_ID }, 12541 /* Cutlass ~ 3216 and 3224 */ 12542 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216, 12543 PCI_ANY_ID, PCI_ANY_ID }, 12544 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224, 12545 PCI_ANY_ID, PCI_ANY_ID }, 12546 /* Intruder ~ 3316 and 3324 */ 12547 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1, 12548 PCI_ANY_ID, PCI_ANY_ID }, 12549 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2, 12550 PCI_ANY_ID, PCI_ANY_ID }, 12551 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3, 12552 PCI_ANY_ID, PCI_ANY_ID }, 12553 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4, 12554 PCI_ANY_ID, PCI_ANY_ID }, 12555 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1, 12556 PCI_ANY_ID, PCI_ANY_ID }, 12557 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2, 12558 PCI_ANY_ID, PCI_ANY_ID }, 12559 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3, 12560 PCI_ANY_ID, PCI_ANY_ID }, 12561 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, 12562 PCI_ANY_ID, PCI_ANY_ID }, 12563 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/ 12564 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, 12565 PCI_ANY_ID, PCI_ANY_ID }, 12566 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1, 12567 PCI_ANY_ID, PCI_ANY_ID }, 12568 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408, 12569 PCI_ANY_ID, PCI_ANY_ID }, 12570 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516, 12571 PCI_ANY_ID, PCI_ANY_ID }, 12572 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1, 12573 PCI_ANY_ID, PCI_ANY_ID }, 12574 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, 12575 PCI_ANY_ID, PCI_ANY_ID }, 12576 /* Mercator ~ 3616*/ 12577 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, 12578 PCI_ANY_ID, PCI_ANY_ID }, 12579 12580 /* Aero SI 0x00E1 Configurable Secure 12581 * 0x00E2 Hard Secure 12582 */ 12583 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916, 12584 PCI_ANY_ID, PCI_ANY_ID }, 12585 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916, 12586 PCI_ANY_ID, PCI_ANY_ID }, 12587 12588 /* 12589 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered 12590 */ 12591 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916, 12592 PCI_ANY_ID, PCI_ANY_ID }, 12593 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916, 12594 PCI_ANY_ID, PCI_ANY_ID }, 12595 12596 /* Atlas PCIe Switch Management Port */ 12597 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID, 12598 PCI_ANY_ID, PCI_ANY_ID }, 12599 12600 /* Sea SI 0x00E5 Configurable Secure 12601 * 0x00E6 Hard Secure 12602 */ 12603 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816, 12604 PCI_ANY_ID, PCI_ANY_ID }, 12605 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, 12606 PCI_ANY_ID, PCI_ANY_ID }, 12607 12608 /* 12609 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered 12610 */ 12611 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816, 12612 PCI_ANY_ID, PCI_ANY_ID }, 12613 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816, 12614 PCI_ANY_ID, PCI_ANY_ID }, 12615 12616 {0} /* Terminating entry */ 12617 }; 12618 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); 12619 12620 static struct pci_error_handlers _mpt3sas_err_handler = { 12621 .error_detected = scsih_pci_error_detected, 12622 .mmio_enabled = scsih_pci_mmio_enabled, 12623 .slot_reset = scsih_pci_slot_reset, 12624 .resume = scsih_pci_resume, 12625 }; 12626 12627 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume); 12628 12629 static struct pci_driver mpt3sas_driver = { 12630 .name = MPT3SAS_DRIVER_NAME, 12631 .id_table = mpt3sas_pci_table, 12632 .probe = _scsih_probe, 12633 .remove = scsih_remove, 12634 .shutdown = scsih_shutdown, 12635 .err_handler = &_mpt3sas_err_handler, 12636 .driver.pm = &scsih_pm_ops, 12637 }; 12638 12639 /** 12640 * scsih_init - main entry point for this driver. 12641 * 12642 * Return: 0 success, anything else error. 12643 */ 12644 static int 12645 scsih_init(void) 12646 { 12647 mpt2_ids = 0; 12648 mpt3_ids = 0; 12649 12650 mpt3sas_base_initialize_callback_handler(); 12651 12652 /* queuecommand callback hander */ 12653 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); 12654 12655 /* task management callback handler */ 12656 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); 12657 12658 /* base internal commands callback handler */ 12659 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done); 12660 port_enable_cb_idx = mpt3sas_base_register_callback_handler( 12661 mpt3sas_port_enable_done); 12662 12663 /* transport internal commands callback handler */ 12664 transport_cb_idx = mpt3sas_base_register_callback_handler( 12665 mpt3sas_transport_done); 12666 12667 /* scsih internal commands callback handler */ 12668 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done); 12669 12670 /* configuration page API internal commands callback handler */ 12671 config_cb_idx = mpt3sas_base_register_callback_handler( 12672 mpt3sas_config_done); 12673 12674 /* ctl module callback handler */ 12675 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done); 12676 12677 tm_tr_cb_idx = mpt3sas_base_register_callback_handler( 12678 _scsih_tm_tr_complete); 12679 12680 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( 12681 _scsih_tm_volume_tr_complete); 12682 12683 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( 12684 _scsih_sas_control_complete); 12685 12686 mpt3sas_init_debugfs(); 12687 return 0; 12688 } 12689 12690 /** 12691 * scsih_exit - exit point for this driver (when it is a module). 12692 * 12693 * Return: 0 success, anything else error. 12694 */ 12695 static void 12696 scsih_exit(void) 12697 { 12698 12699 mpt3sas_base_release_callback_handler(scsi_io_cb_idx); 12700 mpt3sas_base_release_callback_handler(tm_cb_idx); 12701 mpt3sas_base_release_callback_handler(base_cb_idx); 12702 mpt3sas_base_release_callback_handler(port_enable_cb_idx); 12703 mpt3sas_base_release_callback_handler(transport_cb_idx); 12704 mpt3sas_base_release_callback_handler(scsih_cb_idx); 12705 mpt3sas_base_release_callback_handler(config_cb_idx); 12706 mpt3sas_base_release_callback_handler(ctl_cb_idx); 12707 12708 mpt3sas_base_release_callback_handler(tm_tr_cb_idx); 12709 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx); 12710 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx); 12711 12712 /* raid transport support */ 12713 if (hbas_to_enumerate != 1) 12714 raid_class_release(mpt3sas_raid_template); 12715 if (hbas_to_enumerate != 2) 12716 raid_class_release(mpt2sas_raid_template); 12717 sas_release_transport(mpt3sas_transport_template); 12718 mpt3sas_exit_debugfs(); 12719 } 12720 12721 /** 12722 * _mpt3sas_init - main entry point for this driver. 12723 * 12724 * Return: 0 success, anything else error. 12725 */ 12726 static int __init 12727 _mpt3sas_init(void) 12728 { 12729 int error; 12730 12731 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, 12732 MPT3SAS_DRIVER_VERSION); 12733 12734 mpt3sas_transport_template = 12735 sas_attach_transport(&mpt3sas_transport_functions); 12736 if (!mpt3sas_transport_template) 12737 return -ENODEV; 12738 12739 /* No need attach mpt3sas raid functions template 12740 * if hbas_to_enumarate value is one. 12741 */ 12742 if (hbas_to_enumerate != 1) { 12743 mpt3sas_raid_template = 12744 raid_class_attach(&mpt3sas_raid_functions); 12745 if (!mpt3sas_raid_template) { 12746 sas_release_transport(mpt3sas_transport_template); 12747 return -ENODEV; 12748 } 12749 } 12750 12751 /* No need to attach mpt2sas raid functions template 12752 * if hbas_to_enumarate value is two 12753 */ 12754 if (hbas_to_enumerate != 2) { 12755 mpt2sas_raid_template = 12756 raid_class_attach(&mpt2sas_raid_functions); 12757 if (!mpt2sas_raid_template) { 12758 sas_release_transport(mpt3sas_transport_template); 12759 return -ENODEV; 12760 } 12761 } 12762 12763 error = scsih_init(); 12764 if (error) { 12765 scsih_exit(); 12766 return error; 12767 } 12768 12769 mpt3sas_ctl_init(hbas_to_enumerate); 12770 12771 error = pci_register_driver(&mpt3sas_driver); 12772 if (error) 12773 scsih_exit(); 12774 12775 return error; 12776 } 12777 12778 /** 12779 * _mpt3sas_exit - exit point for this driver (when it is a module). 12780 * 12781 */ 12782 static void __exit 12783 _mpt3sas_exit(void) 12784 { 12785 pr_info("mpt3sas version %s unloading\n", 12786 MPT3SAS_DRIVER_VERSION); 12787 12788 mpt3sas_ctl_exit(hbas_to_enumerate); 12789 12790 pci_unregister_driver(&mpt3sas_driver); 12791 12792 scsih_exit(); 12793 } 12794 12795 module_init(_mpt3sas_init); 12796 module_exit(_mpt3sas_exit); 12797