1 /* 2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers 3 * 4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c 5 * Copyright (C) 2012-2014 LSI Corporation 6 * Copyright (C) 2013-2014 Avago Technologies 7 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 2 12 * of the License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * NO WARRANTY 20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 24 * solely responsible for determining the appropriateness of using and 25 * distributing the Program and assumes all risks associated with its 26 * exercise of rights under this Agreement, including but not limited to 27 * the risks and costs of program errors, damage to or loss of data, 28 * programs or equipment, and unavailability or interruption of operations. 29 30 * DISCLAIMER OF LIABILITY 31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 38 39 * You should have received a copy of the GNU General Public License 40 * along with this program; if not, write to the Free Software 41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 42 * USA. 43 */ 44 45 #include <linux/module.h> 46 #include <linux/kernel.h> 47 #include <linux/init.h> 48 #include <linux/errno.h> 49 #include <linux/blkdev.h> 50 #include <linux/sched.h> 51 #include <linux/workqueue.h> 52 #include <linux/delay.h> 53 #include <linux/pci.h> 54 #include <linux/interrupt.h> 55 #include <linux/aer.h> 56 #include <linux/raid_class.h> 57 #include <linux/blk-mq-pci.h> 58 #include <asm/unaligned.h> 59 60 #include "mpt3sas_base.h" 61 62 #define RAID_CHANNEL 1 63 64 #define PCIE_CHANNEL 2 65 66 /* forward proto's */ 67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 68 struct _sas_node *sas_expander); 69 static void _firmware_event_work(struct work_struct *work); 70 71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, 72 struct _sas_device *sas_device); 73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, 74 u8 retry_count, u8 is_pd); 75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 77 struct _pcie_device *pcie_device); 78 static void 79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); 81 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc); 82 83 /* global parameters */ 84 LIST_HEAD(mpt3sas_ioc_list); 85 /* global ioc lock for list operations */ 86 DEFINE_SPINLOCK(gioc_lock); 87 88 MODULE_AUTHOR(MPT3SAS_AUTHOR); 89 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION); 90 MODULE_LICENSE("GPL"); 91 MODULE_VERSION(MPT3SAS_DRIVER_VERSION); 92 MODULE_ALIAS("mpt2sas"); 93 94 /* local parameters */ 95 static u8 scsi_io_cb_idx = -1; 96 static u8 tm_cb_idx = -1; 97 static u8 ctl_cb_idx = -1; 98 static u8 base_cb_idx = -1; 99 static u8 port_enable_cb_idx = -1; 100 static u8 transport_cb_idx = -1; 101 static u8 scsih_cb_idx = -1; 102 static u8 config_cb_idx = -1; 103 static int mpt2_ids; 104 static int mpt3_ids; 105 106 static u8 tm_tr_cb_idx = -1 ; 107 static u8 tm_tr_volume_cb_idx = -1 ; 108 static u8 tm_sas_control_cb_idx = -1; 109 110 /* command line options */ 111 static u32 logging_level; 112 MODULE_PARM_DESC(logging_level, 113 " bits for enabling additional logging info (default=0)"); 114 115 116 static ushort max_sectors = 0xFFFF; 117 module_param(max_sectors, ushort, 0444); 118 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); 119 120 121 static int missing_delay[2] = {-1, -1}; 122 module_param_array(missing_delay, int, NULL, 0444); 123 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); 124 125 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ 126 #define MPT3SAS_MAX_LUN (16895) 127 static u64 max_lun = MPT3SAS_MAX_LUN; 128 module_param(max_lun, ullong, 0444); 129 MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); 130 131 static ushort hbas_to_enumerate; 132 module_param(hbas_to_enumerate, ushort, 0444); 133 MODULE_PARM_DESC(hbas_to_enumerate, 134 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \ 135 1 - enumerates only SAS 2.0 generation HBAs\n \ 136 2 - enumerates only SAS 3.0 generation HBAs (default=0)"); 137 138 /* diag_buffer_enable is bitwise 139 * bit 0 set = TRACE 140 * bit 1 set = SNAPSHOT 141 * bit 2 set = EXTENDED 142 * 143 * Either bit can be set, or both 144 */ 145 static int diag_buffer_enable = -1; 146 module_param(diag_buffer_enable, int, 0444); 147 MODULE_PARM_DESC(diag_buffer_enable, 148 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); 149 static int disable_discovery = -1; 150 module_param(disable_discovery, int, 0444); 151 MODULE_PARM_DESC(disable_discovery, " disable discovery "); 152 153 154 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ 155 static int prot_mask = -1; 156 module_param(prot_mask, int, 0444); 157 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); 158 159 static bool enable_sdev_max_qd; 160 module_param(enable_sdev_max_qd, bool, 0444); 161 MODULE_PARM_DESC(enable_sdev_max_qd, 162 "Enable sdev max qd as can_queue, def=disabled(0)"); 163 164 static int multipath_on_hba = -1; 165 module_param(multipath_on_hba, int, 0); 166 MODULE_PARM_DESC(multipath_on_hba, 167 "Multipath support to add same target device\n\t\t" 168 "as many times as it is visible to HBA from various paths\n\t\t" 169 "(by default:\n\t\t" 170 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t" 171 "\t SAS 3.5 HBA - This will be enabled)"); 172 173 static int host_tagset_enable = 1; 174 module_param(host_tagset_enable, int, 0444); 175 MODULE_PARM_DESC(host_tagset_enable, 176 "Shared host tagset enable/disable Default: enable(1)"); 177 178 /* raid transport support */ 179 static struct raid_template *mpt3sas_raid_template; 180 static struct raid_template *mpt2sas_raid_template; 181 182 183 /** 184 * struct sense_info - common structure for obtaining sense keys 185 * @skey: sense key 186 * @asc: additional sense code 187 * @ascq: additional sense code qualifier 188 */ 189 struct sense_info { 190 u8 skey; 191 u8 asc; 192 u8 ascq; 193 }; 194 195 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) 196 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC) 197 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) 198 #define MPT3SAS_ABRT_TASK_SET (0xFFFE) 199 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) 200 /** 201 * struct fw_event_work - firmware event struct 202 * @list: link list framework 203 * @work: work object (ioc->fault_reset_work_q) 204 * @ioc: per adapter object 205 * @device_handle: device handle 206 * @VF_ID: virtual function id 207 * @VP_ID: virtual port id 208 * @ignore: flag meaning this event has been marked to ignore 209 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h 210 * @refcount: kref for this event 211 * @event_data: reply event data payload follows 212 * 213 * This object stored on ioc->fw_event_list. 214 */ 215 struct fw_event_work { 216 struct list_head list; 217 struct work_struct work; 218 219 struct MPT3SAS_ADAPTER *ioc; 220 u16 device_handle; 221 u8 VF_ID; 222 u8 VP_ID; 223 u8 ignore; 224 u16 event; 225 struct kref refcount; 226 char event_data[] __aligned(4); 227 }; 228 229 static void fw_event_work_free(struct kref *r) 230 { 231 kfree(container_of(r, struct fw_event_work, refcount)); 232 } 233 234 static void fw_event_work_get(struct fw_event_work *fw_work) 235 { 236 kref_get(&fw_work->refcount); 237 } 238 239 static void fw_event_work_put(struct fw_event_work *fw_work) 240 { 241 kref_put(&fw_work->refcount, fw_event_work_free); 242 } 243 244 static struct fw_event_work *alloc_fw_event_work(int len) 245 { 246 struct fw_event_work *fw_event; 247 248 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); 249 if (!fw_event) 250 return NULL; 251 252 kref_init(&fw_event->refcount); 253 return fw_event; 254 } 255 256 /** 257 * struct _scsi_io_transfer - scsi io transfer 258 * @handle: sas device handle (assigned by firmware) 259 * @is_raid: flag set for hidden raid components 260 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, 261 * @data_length: data transfer length 262 * @data_dma: dma pointer to data 263 * @sense: sense data 264 * @lun: lun number 265 * @cdb_length: cdb length 266 * @cdb: cdb contents 267 * @timeout: timeout for this command 268 * @VF_ID: virtual function id 269 * @VP_ID: virtual port id 270 * @valid_reply: flag set for reply message 271 * @sense_length: sense length 272 * @ioc_status: ioc status 273 * @scsi_state: scsi state 274 * @scsi_status: scsi staus 275 * @log_info: log information 276 * @transfer_length: data length transfer when there is a reply message 277 * 278 * Used for sending internal scsi commands to devices within this module. 279 * Refer to _scsi_send_scsi_io(). 280 */ 281 struct _scsi_io_transfer { 282 u16 handle; 283 u8 is_raid; 284 enum dma_data_direction dir; 285 u32 data_length; 286 dma_addr_t data_dma; 287 u8 sense[SCSI_SENSE_BUFFERSIZE]; 288 u32 lun; 289 u8 cdb_length; 290 u8 cdb[32]; 291 u8 timeout; 292 u8 VF_ID; 293 u8 VP_ID; 294 u8 valid_reply; 295 /* the following bits are only valid when 'valid_reply = 1' */ 296 u32 sense_length; 297 u16 ioc_status; 298 u8 scsi_state; 299 u8 scsi_status; 300 u32 log_info; 301 u32 transfer_length; 302 }; 303 304 /** 305 * _scsih_set_debug_level - global setting of ioc->logging_level. 306 * @val: ? 307 * @kp: ? 308 * 309 * Note: The logging levels are defined in mpt3sas_debug.h. 310 */ 311 static int 312 _scsih_set_debug_level(const char *val, const struct kernel_param *kp) 313 { 314 int ret = param_set_int(val, kp); 315 struct MPT3SAS_ADAPTER *ioc; 316 317 if (ret) 318 return ret; 319 320 pr_info("setting logging_level(0x%08x)\n", logging_level); 321 spin_lock(&gioc_lock); 322 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) 323 ioc->logging_level = logging_level; 324 spin_unlock(&gioc_lock); 325 return 0; 326 } 327 module_param_call(logging_level, _scsih_set_debug_level, param_get_int, 328 &logging_level, 0644); 329 330 /** 331 * _scsih_srch_boot_sas_address - search based on sas_address 332 * @sas_address: sas address 333 * @boot_device: boot device object from bios page 2 334 * 335 * Return: 1 when there's a match, 0 means no match. 336 */ 337 static inline int 338 _scsih_srch_boot_sas_address(u64 sas_address, 339 Mpi2BootDeviceSasWwid_t *boot_device) 340 { 341 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; 342 } 343 344 /** 345 * _scsih_srch_boot_device_name - search based on device name 346 * @device_name: device name specified in INDENTIFY fram 347 * @boot_device: boot device object from bios page 2 348 * 349 * Return: 1 when there's a match, 0 means no match. 350 */ 351 static inline int 352 _scsih_srch_boot_device_name(u64 device_name, 353 Mpi2BootDeviceDeviceName_t *boot_device) 354 { 355 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; 356 } 357 358 /** 359 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot 360 * @enclosure_logical_id: enclosure logical id 361 * @slot_number: slot number 362 * @boot_device: boot device object from bios page 2 363 * 364 * Return: 1 when there's a match, 0 means no match. 365 */ 366 static inline int 367 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, 368 Mpi2BootDeviceEnclosureSlot_t *boot_device) 369 { 370 return (enclosure_logical_id == le64_to_cpu(boot_device-> 371 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> 372 SlotNumber)) ? 1 : 0; 373 } 374 375 /** 376 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided 377 * port number from port list 378 * @ioc: per adapter object 379 * @port_id: port number 380 * @bypass_dirty_port_flag: when set look the matching hba port entry even 381 * if hba port entry is marked as dirty. 382 * 383 * Search for hba port entry corresponding to provided port number, 384 * if available return port object otherwise return NULL. 385 */ 386 struct hba_port * 387 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, 388 u8 port_id, u8 bypass_dirty_port_flag) 389 { 390 struct hba_port *port, *port_next; 391 392 /* 393 * When multipath_on_hba is disabled then 394 * search the hba_port entry using default 395 * port id i.e. 255 396 */ 397 if (!ioc->multipath_on_hba) 398 port_id = MULTIPATH_DISABLED_PORT_ID; 399 400 list_for_each_entry_safe(port, port_next, 401 &ioc->port_table_list, list) { 402 if (port->port_id != port_id) 403 continue; 404 if (bypass_dirty_port_flag) 405 return port; 406 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT) 407 continue; 408 return port; 409 } 410 411 /* 412 * Allocate hba_port object for default port id (i.e. 255) 413 * when multipath_on_hba is disabled for the HBA. 414 * And add this object to port_table_list. 415 */ 416 if (!ioc->multipath_on_hba) { 417 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC); 418 if (!port) 419 return NULL; 420 421 port->port_id = port_id; 422 ioc_info(ioc, 423 "hba_port entry: %p, port: %d is added to hba_port list\n", 424 port, port->port_id); 425 list_add_tail(&port->list, 426 &ioc->port_table_list); 427 return port; 428 } 429 return NULL; 430 } 431 432 /** 433 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number 434 * @ioc: per adapter object 435 * @port: hba_port object 436 * @phy: phy number 437 * 438 * Return virtual_phy object corresponding to phy number. 439 */ 440 struct virtual_phy * 441 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, 442 struct hba_port *port, u32 phy) 443 { 444 struct virtual_phy *vphy, *vphy_next; 445 446 if (!port->vphys_mask) 447 return NULL; 448 449 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { 450 if (vphy->phy_mask & (1 << phy)) 451 return vphy; 452 } 453 return NULL; 454 } 455 456 /** 457 * _scsih_is_boot_device - search for matching boot device. 458 * @sas_address: sas address 459 * @device_name: device name specified in INDENTIFY fram 460 * @enclosure_logical_id: enclosure logical id 461 * @slot: slot number 462 * @form: specifies boot device form 463 * @boot_device: boot device object from bios page 2 464 * 465 * Return: 1 when there's a match, 0 means no match. 466 */ 467 static int 468 _scsih_is_boot_device(u64 sas_address, u64 device_name, 469 u64 enclosure_logical_id, u16 slot, u8 form, 470 Mpi2BiosPage2BootDevice_t *boot_device) 471 { 472 int rc = 0; 473 474 switch (form) { 475 case MPI2_BIOSPAGE2_FORM_SAS_WWID: 476 if (!sas_address) 477 break; 478 rc = _scsih_srch_boot_sas_address( 479 sas_address, &boot_device->SasWwid); 480 break; 481 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: 482 if (!enclosure_logical_id) 483 break; 484 rc = _scsih_srch_boot_encl_slot( 485 enclosure_logical_id, 486 slot, &boot_device->EnclosureSlot); 487 break; 488 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: 489 if (!device_name) 490 break; 491 rc = _scsih_srch_boot_device_name( 492 device_name, &boot_device->DeviceName); 493 break; 494 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: 495 break; 496 } 497 498 return rc; 499 } 500 501 /** 502 * _scsih_get_sas_address - set the sas_address for given device handle 503 * @ioc: ? 504 * @handle: device handle 505 * @sas_address: sas address 506 * 507 * Return: 0 success, non-zero when failure 508 */ 509 static int 510 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, 511 u64 *sas_address) 512 { 513 Mpi2SasDevicePage0_t sas_device_pg0; 514 Mpi2ConfigReply_t mpi_reply; 515 u32 ioc_status; 516 517 *sas_address = 0; 518 519 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 520 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 521 ioc_err(ioc, "failure at %s:%d/%s()!\n", 522 __FILE__, __LINE__, __func__); 523 return -ENXIO; 524 } 525 526 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 527 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 528 /* For HBA, vSES doesn't return HBA SAS address. Instead return 529 * vSES's sas address. 530 */ 531 if ((handle <= ioc->sas_hba.num_phys) && 532 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & 533 MPI2_SAS_DEVICE_INFO_SEP))) 534 *sas_address = ioc->sas_hba.sas_address; 535 else 536 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 537 return 0; 538 } 539 540 /* we hit this because the given parent handle doesn't exist */ 541 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 542 return -ENXIO; 543 544 /* else error case */ 545 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", 546 handle, ioc_status, __FILE__, __LINE__, __func__); 547 return -EIO; 548 } 549 550 /** 551 * _scsih_determine_boot_device - determine boot device. 552 * @ioc: per adapter object 553 * @device: sas_device or pcie_device object 554 * @channel: SAS or PCIe channel 555 * 556 * Determines whether this device should be first reported device to 557 * to scsi-ml or sas transport, this purpose is for persistent boot device. 558 * There are primary, alternate, and current entries in bios page 2. The order 559 * priority is primary, alternate, then current. This routine saves 560 * the corresponding device object. 561 * The saved data to be used later in _scsih_probe_boot_devices(). 562 */ 563 static void 564 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, 565 u32 channel) 566 { 567 struct _sas_device *sas_device; 568 struct _pcie_device *pcie_device; 569 struct _raid_device *raid_device; 570 u64 sas_address; 571 u64 device_name; 572 u64 enclosure_logical_id; 573 u16 slot; 574 575 /* only process this function when driver loads */ 576 if (!ioc->is_driver_loading) 577 return; 578 579 /* no Bios, return immediately */ 580 if (!ioc->bios_pg3.BiosVersion) 581 return; 582 583 if (channel == RAID_CHANNEL) { 584 raid_device = device; 585 sas_address = raid_device->wwid; 586 device_name = 0; 587 enclosure_logical_id = 0; 588 slot = 0; 589 } else if (channel == PCIE_CHANNEL) { 590 pcie_device = device; 591 sas_address = pcie_device->wwid; 592 device_name = 0; 593 enclosure_logical_id = 0; 594 slot = 0; 595 } else { 596 sas_device = device; 597 sas_address = sas_device->sas_address; 598 device_name = sas_device->device_name; 599 enclosure_logical_id = sas_device->enclosure_logical_id; 600 slot = sas_device->slot; 601 } 602 603 if (!ioc->req_boot_device.device) { 604 if (_scsih_is_boot_device(sas_address, device_name, 605 enclosure_logical_id, slot, 606 (ioc->bios_pg2.ReqBootDeviceForm & 607 MPI2_BIOSPAGE2_FORM_MASK), 608 &ioc->bios_pg2.RequestedBootDevice)) { 609 dinitprintk(ioc, 610 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n", 611 __func__, (u64)sas_address)); 612 ioc->req_boot_device.device = device; 613 ioc->req_boot_device.channel = channel; 614 } 615 } 616 617 if (!ioc->req_alt_boot_device.device) { 618 if (_scsih_is_boot_device(sas_address, device_name, 619 enclosure_logical_id, slot, 620 (ioc->bios_pg2.ReqAltBootDeviceForm & 621 MPI2_BIOSPAGE2_FORM_MASK), 622 &ioc->bios_pg2.RequestedAltBootDevice)) { 623 dinitprintk(ioc, 624 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n", 625 __func__, (u64)sas_address)); 626 ioc->req_alt_boot_device.device = device; 627 ioc->req_alt_boot_device.channel = channel; 628 } 629 } 630 631 if (!ioc->current_boot_device.device) { 632 if (_scsih_is_boot_device(sas_address, device_name, 633 enclosure_logical_id, slot, 634 (ioc->bios_pg2.CurrentBootDeviceForm & 635 MPI2_BIOSPAGE2_FORM_MASK), 636 &ioc->bios_pg2.CurrentBootDevice)) { 637 dinitprintk(ioc, 638 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n", 639 __func__, (u64)sas_address)); 640 ioc->current_boot_device.device = device; 641 ioc->current_boot_device.channel = channel; 642 } 643 } 644 } 645 646 static struct _sas_device * 647 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, 648 struct MPT3SAS_TARGET *tgt_priv) 649 { 650 struct _sas_device *ret; 651 652 assert_spin_locked(&ioc->sas_device_lock); 653 654 ret = tgt_priv->sas_dev; 655 if (ret) 656 sas_device_get(ret); 657 658 return ret; 659 } 660 661 static struct _sas_device * 662 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, 663 struct MPT3SAS_TARGET *tgt_priv) 664 { 665 struct _sas_device *ret; 666 unsigned long flags; 667 668 spin_lock_irqsave(&ioc->sas_device_lock, flags); 669 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv); 670 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 671 672 return ret; 673 } 674 675 static struct _pcie_device * 676 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, 677 struct MPT3SAS_TARGET *tgt_priv) 678 { 679 struct _pcie_device *ret; 680 681 assert_spin_locked(&ioc->pcie_device_lock); 682 683 ret = tgt_priv->pcie_dev; 684 if (ret) 685 pcie_device_get(ret); 686 687 return ret; 688 } 689 690 /** 691 * mpt3sas_get_pdev_from_target - pcie device search 692 * @ioc: per adapter object 693 * @tgt_priv: starget private object 694 * 695 * Context: This function will acquire ioc->pcie_device_lock and will release 696 * before returning the pcie_device object. 697 * 698 * This searches for pcie_device from target, then return pcie_device object. 699 */ 700 static struct _pcie_device * 701 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, 702 struct MPT3SAS_TARGET *tgt_priv) 703 { 704 struct _pcie_device *ret; 705 unsigned long flags; 706 707 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 708 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv); 709 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 710 711 return ret; 712 } 713 714 715 /** 716 * __mpt3sas_get_sdev_by_rphy - sas device search 717 * @ioc: per adapter object 718 * @rphy: sas_rphy pointer 719 * 720 * Context: This function will acquire ioc->sas_device_lock and will release 721 * before returning the sas_device object. 722 * 723 * This searches for sas_device from rphy object 724 * then return sas_device object. 725 */ 726 struct _sas_device * 727 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, 728 struct sas_rphy *rphy) 729 { 730 struct _sas_device *sas_device; 731 732 assert_spin_locked(&ioc->sas_device_lock); 733 734 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 735 if (sas_device->rphy != rphy) 736 continue; 737 sas_device_get(sas_device); 738 return sas_device; 739 } 740 741 sas_device = NULL; 742 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { 743 if (sas_device->rphy != rphy) 744 continue; 745 sas_device_get(sas_device); 746 return sas_device; 747 } 748 749 return NULL; 750 } 751 752 /** 753 * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided 754 * sas address from sas_device_list list 755 * @ioc: per adapter object 756 * @sas_address: device sas address 757 * @port: port number 758 * 759 * Search for _sas_device object corresponding to provided sas address, 760 * if available return _sas_device object address otherwise return NULL. 761 */ 762 struct _sas_device * 763 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 764 u64 sas_address, struct hba_port *port) 765 { 766 struct _sas_device *sas_device; 767 768 if (!port) 769 return NULL; 770 771 assert_spin_locked(&ioc->sas_device_lock); 772 773 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 774 if (sas_device->sas_address != sas_address) 775 continue; 776 if (sas_device->port != port) 777 continue; 778 sas_device_get(sas_device); 779 return sas_device; 780 } 781 782 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { 783 if (sas_device->sas_address != sas_address) 784 continue; 785 if (sas_device->port != port) 786 continue; 787 sas_device_get(sas_device); 788 return sas_device; 789 } 790 791 return NULL; 792 } 793 794 /** 795 * mpt3sas_get_sdev_by_addr - sas device search 796 * @ioc: per adapter object 797 * @sas_address: sas address 798 * @port: hba port entry 799 * Context: Calling function should acquire ioc->sas_device_lock 800 * 801 * This searches for sas_device based on sas_address & port number, 802 * then return sas_device object. 803 */ 804 struct _sas_device * 805 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 806 u64 sas_address, struct hba_port *port) 807 { 808 struct _sas_device *sas_device; 809 unsigned long flags; 810 811 spin_lock_irqsave(&ioc->sas_device_lock, flags); 812 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 813 sas_address, port); 814 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 815 816 return sas_device; 817 } 818 819 static struct _sas_device * 820 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 821 { 822 struct _sas_device *sas_device; 823 824 assert_spin_locked(&ioc->sas_device_lock); 825 826 list_for_each_entry(sas_device, &ioc->sas_device_list, list) 827 if (sas_device->handle == handle) 828 goto found_device; 829 830 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) 831 if (sas_device->handle == handle) 832 goto found_device; 833 834 return NULL; 835 836 found_device: 837 sas_device_get(sas_device); 838 return sas_device; 839 } 840 841 /** 842 * mpt3sas_get_sdev_by_handle - sas device search 843 * @ioc: per adapter object 844 * @handle: sas device handle (assigned by firmware) 845 * Context: Calling function should acquire ioc->sas_device_lock 846 * 847 * This searches for sas_device based on sas_address, then return sas_device 848 * object. 849 */ 850 struct _sas_device * 851 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 852 { 853 struct _sas_device *sas_device; 854 unsigned long flags; 855 856 spin_lock_irqsave(&ioc->sas_device_lock, flags); 857 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 858 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 859 860 return sas_device; 861 } 862 863 /** 864 * _scsih_display_enclosure_chassis_info - display device location info 865 * @ioc: per adapter object 866 * @sas_device: per sas device object 867 * @sdev: scsi device struct 868 * @starget: scsi target struct 869 */ 870 static void 871 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, 872 struct _sas_device *sas_device, struct scsi_device *sdev, 873 struct scsi_target *starget) 874 { 875 if (sdev) { 876 if (sas_device->enclosure_handle != 0) 877 sdev_printk(KERN_INFO, sdev, 878 "enclosure logical id (0x%016llx), slot(%d) \n", 879 (unsigned long long) 880 sas_device->enclosure_logical_id, 881 sas_device->slot); 882 if (sas_device->connector_name[0] != '\0') 883 sdev_printk(KERN_INFO, sdev, 884 "enclosure level(0x%04x), connector name( %s)\n", 885 sas_device->enclosure_level, 886 sas_device->connector_name); 887 if (sas_device->is_chassis_slot_valid) 888 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", 889 sas_device->chassis_slot); 890 } else if (starget) { 891 if (sas_device->enclosure_handle != 0) 892 starget_printk(KERN_INFO, starget, 893 "enclosure logical id(0x%016llx), slot(%d) \n", 894 (unsigned long long) 895 sas_device->enclosure_logical_id, 896 sas_device->slot); 897 if (sas_device->connector_name[0] != '\0') 898 starget_printk(KERN_INFO, starget, 899 "enclosure level(0x%04x), connector name( %s)\n", 900 sas_device->enclosure_level, 901 sas_device->connector_name); 902 if (sas_device->is_chassis_slot_valid) 903 starget_printk(KERN_INFO, starget, 904 "chassis slot(0x%04x)\n", 905 sas_device->chassis_slot); 906 } else { 907 if (sas_device->enclosure_handle != 0) 908 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n", 909 (u64)sas_device->enclosure_logical_id, 910 sas_device->slot); 911 if (sas_device->connector_name[0] != '\0') 912 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n", 913 sas_device->enclosure_level, 914 sas_device->connector_name); 915 if (sas_device->is_chassis_slot_valid) 916 ioc_info(ioc, "chassis slot(0x%04x)\n", 917 sas_device->chassis_slot); 918 } 919 } 920 921 /** 922 * _scsih_sas_device_remove - remove sas_device from list. 923 * @ioc: per adapter object 924 * @sas_device: the sas_device object 925 * Context: This function will acquire ioc->sas_device_lock. 926 * 927 * If sas_device is on the list, remove it and decrement its reference count. 928 */ 929 static void 930 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, 931 struct _sas_device *sas_device) 932 { 933 unsigned long flags; 934 935 if (!sas_device) 936 return; 937 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", 938 sas_device->handle, (u64)sas_device->sas_address); 939 940 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 941 942 /* 943 * The lock serializes access to the list, but we still need to verify 944 * that nobody removed the entry while we were waiting on the lock. 945 */ 946 spin_lock_irqsave(&ioc->sas_device_lock, flags); 947 if (!list_empty(&sas_device->list)) { 948 list_del_init(&sas_device->list); 949 sas_device_put(sas_device); 950 } 951 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 952 } 953 954 /** 955 * _scsih_device_remove_by_handle - removing device object by handle 956 * @ioc: per adapter object 957 * @handle: device handle 958 */ 959 static void 960 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 961 { 962 struct _sas_device *sas_device; 963 unsigned long flags; 964 965 if (ioc->shost_recovery) 966 return; 967 968 spin_lock_irqsave(&ioc->sas_device_lock, flags); 969 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 970 if (sas_device) { 971 list_del_init(&sas_device->list); 972 sas_device_put(sas_device); 973 } 974 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 975 if (sas_device) { 976 _scsih_remove_device(ioc, sas_device); 977 sas_device_put(sas_device); 978 } 979 } 980 981 /** 982 * mpt3sas_device_remove_by_sas_address - removing device object by 983 * sas address & port number 984 * @ioc: per adapter object 985 * @sas_address: device sas_address 986 * @port: hba port entry 987 * 988 * Return nothing. 989 */ 990 void 991 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, 992 u64 sas_address, struct hba_port *port) 993 { 994 struct _sas_device *sas_device; 995 unsigned long flags; 996 997 if (ioc->shost_recovery) 998 return; 999 1000 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1001 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port); 1002 if (sas_device) { 1003 list_del_init(&sas_device->list); 1004 sas_device_put(sas_device); 1005 } 1006 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1007 if (sas_device) { 1008 _scsih_remove_device(ioc, sas_device); 1009 sas_device_put(sas_device); 1010 } 1011 } 1012 1013 /** 1014 * _scsih_sas_device_add - insert sas_device to the list. 1015 * @ioc: per adapter object 1016 * @sas_device: the sas_device object 1017 * Context: This function will acquire ioc->sas_device_lock. 1018 * 1019 * Adding new object to the ioc->sas_device_list. 1020 */ 1021 static void 1022 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, 1023 struct _sas_device *sas_device) 1024 { 1025 unsigned long flags; 1026 1027 dewtprintk(ioc, 1028 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 1029 __func__, sas_device->handle, 1030 (u64)sas_device->sas_address)); 1031 1032 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 1033 NULL, NULL)); 1034 1035 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1036 sas_device_get(sas_device); 1037 list_add_tail(&sas_device->list, &ioc->sas_device_list); 1038 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1039 1040 if (ioc->hide_drives) { 1041 clear_bit(sas_device->handle, ioc->pend_os_device_add); 1042 return; 1043 } 1044 1045 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 1046 sas_device->sas_address_parent, sas_device->port)) { 1047 _scsih_sas_device_remove(ioc, sas_device); 1048 } else if (!sas_device->starget) { 1049 /* 1050 * When asyn scanning is enabled, its not possible to remove 1051 * devices while scanning is turned on due to an oops in 1052 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() 1053 */ 1054 if (!ioc->is_driver_loading) { 1055 mpt3sas_transport_port_remove(ioc, 1056 sas_device->sas_address, 1057 sas_device->sas_address_parent, 1058 sas_device->port); 1059 _scsih_sas_device_remove(ioc, sas_device); 1060 } 1061 } else 1062 clear_bit(sas_device->handle, ioc->pend_os_device_add); 1063 } 1064 1065 /** 1066 * _scsih_sas_device_init_add - insert sas_device to the list. 1067 * @ioc: per adapter object 1068 * @sas_device: the sas_device object 1069 * Context: This function will acquire ioc->sas_device_lock. 1070 * 1071 * Adding new object at driver load time to the ioc->sas_device_init_list. 1072 */ 1073 static void 1074 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, 1075 struct _sas_device *sas_device) 1076 { 1077 unsigned long flags; 1078 1079 dewtprintk(ioc, 1080 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 1081 __func__, sas_device->handle, 1082 (u64)sas_device->sas_address)); 1083 1084 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 1085 NULL, NULL)); 1086 1087 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1088 sas_device_get(sas_device); 1089 list_add_tail(&sas_device->list, &ioc->sas_device_init_list); 1090 _scsih_determine_boot_device(ioc, sas_device, 0); 1091 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1092 } 1093 1094 1095 static struct _pcie_device * 1096 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1097 { 1098 struct _pcie_device *pcie_device; 1099 1100 assert_spin_locked(&ioc->pcie_device_lock); 1101 1102 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1103 if (pcie_device->wwid == wwid) 1104 goto found_device; 1105 1106 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1107 if (pcie_device->wwid == wwid) 1108 goto found_device; 1109 1110 return NULL; 1111 1112 found_device: 1113 pcie_device_get(pcie_device); 1114 return pcie_device; 1115 } 1116 1117 1118 /** 1119 * mpt3sas_get_pdev_by_wwid - pcie device search 1120 * @ioc: per adapter object 1121 * @wwid: wwid 1122 * 1123 * Context: This function will acquire ioc->pcie_device_lock and will release 1124 * before returning the pcie_device object. 1125 * 1126 * This searches for pcie_device based on wwid, then return pcie_device object. 1127 */ 1128 static struct _pcie_device * 1129 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1130 { 1131 struct _pcie_device *pcie_device; 1132 unsigned long flags; 1133 1134 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1135 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 1136 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1137 1138 return pcie_device; 1139 } 1140 1141 1142 static struct _pcie_device * 1143 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id, 1144 int channel) 1145 { 1146 struct _pcie_device *pcie_device; 1147 1148 assert_spin_locked(&ioc->pcie_device_lock); 1149 1150 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1151 if (pcie_device->id == id && pcie_device->channel == channel) 1152 goto found_device; 1153 1154 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1155 if (pcie_device->id == id && pcie_device->channel == channel) 1156 goto found_device; 1157 1158 return NULL; 1159 1160 found_device: 1161 pcie_device_get(pcie_device); 1162 return pcie_device; 1163 } 1164 1165 static struct _pcie_device * 1166 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1167 { 1168 struct _pcie_device *pcie_device; 1169 1170 assert_spin_locked(&ioc->pcie_device_lock); 1171 1172 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1173 if (pcie_device->handle == handle) 1174 goto found_device; 1175 1176 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1177 if (pcie_device->handle == handle) 1178 goto found_device; 1179 1180 return NULL; 1181 1182 found_device: 1183 pcie_device_get(pcie_device); 1184 return pcie_device; 1185 } 1186 1187 1188 /** 1189 * mpt3sas_get_pdev_by_handle - pcie device search 1190 * @ioc: per adapter object 1191 * @handle: Firmware device handle 1192 * 1193 * Context: This function will acquire ioc->pcie_device_lock and will release 1194 * before returning the pcie_device object. 1195 * 1196 * This searches for pcie_device based on handle, then return pcie_device 1197 * object. 1198 */ 1199 struct _pcie_device * 1200 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1201 { 1202 struct _pcie_device *pcie_device; 1203 unsigned long flags; 1204 1205 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1206 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 1207 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1208 1209 return pcie_device; 1210 } 1211 1212 /** 1213 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency. 1214 * @ioc: per adapter object 1215 * Context: This function will acquire ioc->pcie_device_lock 1216 * 1217 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency 1218 * which has reported maximum among all available NVMe drives. 1219 * Minimum max_shutdown_latency will be six seconds. 1220 */ 1221 static void 1222 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc) 1223 { 1224 struct _pcie_device *pcie_device; 1225 unsigned long flags; 1226 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; 1227 1228 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1229 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 1230 if (pcie_device->shutdown_latency) { 1231 if (shutdown_latency < pcie_device->shutdown_latency) 1232 shutdown_latency = 1233 pcie_device->shutdown_latency; 1234 } 1235 } 1236 ioc->max_shutdown_latency = shutdown_latency; 1237 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1238 } 1239 1240 /** 1241 * _scsih_pcie_device_remove - remove pcie_device from list. 1242 * @ioc: per adapter object 1243 * @pcie_device: the pcie_device object 1244 * Context: This function will acquire ioc->pcie_device_lock. 1245 * 1246 * If pcie_device is on the list, remove it and decrement its reference count. 1247 */ 1248 static void 1249 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, 1250 struct _pcie_device *pcie_device) 1251 { 1252 unsigned long flags; 1253 int was_on_pcie_device_list = 0; 1254 u8 update_latency = 0; 1255 1256 if (!pcie_device) 1257 return; 1258 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 1259 pcie_device->handle, (u64)pcie_device->wwid); 1260 if (pcie_device->enclosure_handle != 0) 1261 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n", 1262 (u64)pcie_device->enclosure_logical_id, 1263 pcie_device->slot); 1264 if (pcie_device->connector_name[0] != '\0') 1265 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n", 1266 pcie_device->enclosure_level, 1267 pcie_device->connector_name); 1268 1269 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1270 if (!list_empty(&pcie_device->list)) { 1271 list_del_init(&pcie_device->list); 1272 was_on_pcie_device_list = 1; 1273 } 1274 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) 1275 update_latency = 1; 1276 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1277 if (was_on_pcie_device_list) { 1278 kfree(pcie_device->serial_number); 1279 pcie_device_put(pcie_device); 1280 } 1281 1282 /* 1283 * This device's RTD3 Entry Latency matches IOC's 1284 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency 1285 * from the available drives as current drive is getting removed. 1286 */ 1287 if (update_latency) 1288 _scsih_set_nvme_max_shutdown_latency(ioc); 1289 } 1290 1291 1292 /** 1293 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle 1294 * @ioc: per adapter object 1295 * @handle: device handle 1296 */ 1297 static void 1298 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1299 { 1300 struct _pcie_device *pcie_device; 1301 unsigned long flags; 1302 int was_on_pcie_device_list = 0; 1303 u8 update_latency = 0; 1304 1305 if (ioc->shost_recovery) 1306 return; 1307 1308 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1309 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 1310 if (pcie_device) { 1311 if (!list_empty(&pcie_device->list)) { 1312 list_del_init(&pcie_device->list); 1313 was_on_pcie_device_list = 1; 1314 pcie_device_put(pcie_device); 1315 } 1316 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) 1317 update_latency = 1; 1318 } 1319 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1320 if (was_on_pcie_device_list) { 1321 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 1322 pcie_device_put(pcie_device); 1323 } 1324 1325 /* 1326 * This device's RTD3 Entry Latency matches IOC's 1327 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency 1328 * from the available drives as current drive is getting removed. 1329 */ 1330 if (update_latency) 1331 _scsih_set_nvme_max_shutdown_latency(ioc); 1332 } 1333 1334 /** 1335 * _scsih_pcie_device_add - add pcie_device object 1336 * @ioc: per adapter object 1337 * @pcie_device: pcie_device object 1338 * 1339 * This is added to the pcie_device_list link list. 1340 */ 1341 static void 1342 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc, 1343 struct _pcie_device *pcie_device) 1344 { 1345 unsigned long flags; 1346 1347 dewtprintk(ioc, 1348 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", 1349 __func__, 1350 pcie_device->handle, (u64)pcie_device->wwid)); 1351 if (pcie_device->enclosure_handle != 0) 1352 dewtprintk(ioc, 1353 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1354 __func__, 1355 (u64)pcie_device->enclosure_logical_id, 1356 pcie_device->slot)); 1357 if (pcie_device->connector_name[0] != '\0') 1358 dewtprintk(ioc, 1359 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", 1360 __func__, pcie_device->enclosure_level, 1361 pcie_device->connector_name)); 1362 1363 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1364 pcie_device_get(pcie_device); 1365 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 1366 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1367 1368 if (pcie_device->access_status == 1369 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { 1370 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1371 return; 1372 } 1373 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) { 1374 _scsih_pcie_device_remove(ioc, pcie_device); 1375 } else if (!pcie_device->starget) { 1376 if (!ioc->is_driver_loading) { 1377 /*TODO-- Need to find out whether this condition will occur or not*/ 1378 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1379 } 1380 } else 1381 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1382 } 1383 1384 /* 1385 * _scsih_pcie_device_init_add - insert pcie_device to the init list. 1386 * @ioc: per adapter object 1387 * @pcie_device: the pcie_device object 1388 * Context: This function will acquire ioc->pcie_device_lock. 1389 * 1390 * Adding new object at driver load time to the ioc->pcie_device_init_list. 1391 */ 1392 static void 1393 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc, 1394 struct _pcie_device *pcie_device) 1395 { 1396 unsigned long flags; 1397 1398 dewtprintk(ioc, 1399 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", 1400 __func__, 1401 pcie_device->handle, (u64)pcie_device->wwid)); 1402 if (pcie_device->enclosure_handle != 0) 1403 dewtprintk(ioc, 1404 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1405 __func__, 1406 (u64)pcie_device->enclosure_logical_id, 1407 pcie_device->slot)); 1408 if (pcie_device->connector_name[0] != '\0') 1409 dewtprintk(ioc, 1410 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", 1411 __func__, pcie_device->enclosure_level, 1412 pcie_device->connector_name)); 1413 1414 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1415 pcie_device_get(pcie_device); 1416 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list); 1417 if (pcie_device->access_status != 1418 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) 1419 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL); 1420 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1421 } 1422 /** 1423 * _scsih_raid_device_find_by_id - raid device search 1424 * @ioc: per adapter object 1425 * @id: sas device target id 1426 * @channel: sas device channel 1427 * Context: Calling function should acquire ioc->raid_device_lock 1428 * 1429 * This searches for raid_device based on target id, then return raid_device 1430 * object. 1431 */ 1432 static struct _raid_device * 1433 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) 1434 { 1435 struct _raid_device *raid_device, *r; 1436 1437 r = NULL; 1438 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1439 if (raid_device->id == id && raid_device->channel == channel) { 1440 r = raid_device; 1441 goto out; 1442 } 1443 } 1444 1445 out: 1446 return r; 1447 } 1448 1449 /** 1450 * mpt3sas_raid_device_find_by_handle - raid device search 1451 * @ioc: per adapter object 1452 * @handle: sas device handle (assigned by firmware) 1453 * Context: Calling function should acquire ioc->raid_device_lock 1454 * 1455 * This searches for raid_device based on handle, then return raid_device 1456 * object. 1457 */ 1458 struct _raid_device * 1459 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1460 { 1461 struct _raid_device *raid_device, *r; 1462 1463 r = NULL; 1464 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1465 if (raid_device->handle != handle) 1466 continue; 1467 r = raid_device; 1468 goto out; 1469 } 1470 1471 out: 1472 return r; 1473 } 1474 1475 /** 1476 * _scsih_raid_device_find_by_wwid - raid device search 1477 * @ioc: per adapter object 1478 * @wwid: ? 1479 * Context: Calling function should acquire ioc->raid_device_lock 1480 * 1481 * This searches for raid_device based on wwid, then return raid_device 1482 * object. 1483 */ 1484 static struct _raid_device * 1485 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1486 { 1487 struct _raid_device *raid_device, *r; 1488 1489 r = NULL; 1490 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1491 if (raid_device->wwid != wwid) 1492 continue; 1493 r = raid_device; 1494 goto out; 1495 } 1496 1497 out: 1498 return r; 1499 } 1500 1501 /** 1502 * _scsih_raid_device_add - add raid_device object 1503 * @ioc: per adapter object 1504 * @raid_device: raid_device object 1505 * 1506 * This is added to the raid_device_list link list. 1507 */ 1508 static void 1509 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, 1510 struct _raid_device *raid_device) 1511 { 1512 unsigned long flags; 1513 1514 dewtprintk(ioc, 1515 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n", 1516 __func__, 1517 raid_device->handle, (u64)raid_device->wwid)); 1518 1519 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1520 list_add_tail(&raid_device->list, &ioc->raid_device_list); 1521 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1522 } 1523 1524 /** 1525 * _scsih_raid_device_remove - delete raid_device object 1526 * @ioc: per adapter object 1527 * @raid_device: raid_device object 1528 * 1529 */ 1530 static void 1531 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc, 1532 struct _raid_device *raid_device) 1533 { 1534 unsigned long flags; 1535 1536 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1537 list_del(&raid_device->list); 1538 kfree(raid_device); 1539 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1540 } 1541 1542 /** 1543 * mpt3sas_scsih_expander_find_by_handle - expander device search 1544 * @ioc: per adapter object 1545 * @handle: expander handle (assigned by firmware) 1546 * Context: Calling function should acquire ioc->sas_device_lock 1547 * 1548 * This searches for expander device based on handle, then returns the 1549 * sas_node object. 1550 */ 1551 struct _sas_node * 1552 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1553 { 1554 struct _sas_node *sas_expander, *r; 1555 1556 r = NULL; 1557 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 1558 if (sas_expander->handle != handle) 1559 continue; 1560 r = sas_expander; 1561 goto out; 1562 } 1563 out: 1564 return r; 1565 } 1566 1567 /** 1568 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search 1569 * @ioc: per adapter object 1570 * @handle: enclosure handle (assigned by firmware) 1571 * Context: Calling function should acquire ioc->sas_device_lock 1572 * 1573 * This searches for enclosure device based on handle, then returns the 1574 * enclosure object. 1575 */ 1576 static struct _enclosure_node * 1577 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1578 { 1579 struct _enclosure_node *enclosure_dev, *r; 1580 1581 r = NULL; 1582 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { 1583 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) 1584 continue; 1585 r = enclosure_dev; 1586 goto out; 1587 } 1588 out: 1589 return r; 1590 } 1591 /** 1592 * mpt3sas_scsih_expander_find_by_sas_address - expander device search 1593 * @ioc: per adapter object 1594 * @sas_address: sas address 1595 * @port: hba port entry 1596 * Context: Calling function should acquire ioc->sas_node_lock. 1597 * 1598 * This searches for expander device based on sas_address & port number, 1599 * then returns the sas_node object. 1600 */ 1601 struct _sas_node * 1602 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, 1603 u64 sas_address, struct hba_port *port) 1604 { 1605 struct _sas_node *sas_expander, *r = NULL; 1606 1607 if (!port) 1608 return r; 1609 1610 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 1611 if (sas_expander->sas_address != sas_address) 1612 continue; 1613 if (sas_expander->port != port) 1614 continue; 1615 r = sas_expander; 1616 goto out; 1617 } 1618 out: 1619 return r; 1620 } 1621 1622 /** 1623 * _scsih_expander_node_add - insert expander device to the list. 1624 * @ioc: per adapter object 1625 * @sas_expander: the sas_device object 1626 * Context: This function will acquire ioc->sas_node_lock. 1627 * 1628 * Adding new object to the ioc->sas_expander_list. 1629 */ 1630 static void 1631 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, 1632 struct _sas_node *sas_expander) 1633 { 1634 unsigned long flags; 1635 1636 spin_lock_irqsave(&ioc->sas_node_lock, flags); 1637 list_add_tail(&sas_expander->list, &ioc->sas_expander_list); 1638 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 1639 } 1640 1641 /** 1642 * _scsih_is_end_device - determines if device is an end device 1643 * @device_info: bitfield providing information about the device. 1644 * Context: none 1645 * 1646 * Return: 1 if end device. 1647 */ 1648 static int 1649 _scsih_is_end_device(u32 device_info) 1650 { 1651 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && 1652 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | 1653 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | 1654 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) 1655 return 1; 1656 else 1657 return 0; 1658 } 1659 1660 /** 1661 * _scsih_is_nvme_pciescsi_device - determines if 1662 * device is an pcie nvme/scsi device 1663 * @device_info: bitfield providing information about the device. 1664 * Context: none 1665 * 1666 * Returns 1 if device is pcie device type nvme/scsi. 1667 */ 1668 static int 1669 _scsih_is_nvme_pciescsi_device(u32 device_info) 1670 { 1671 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) 1672 == MPI26_PCIE_DEVINFO_NVME) || 1673 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) 1674 == MPI26_PCIE_DEVINFO_SCSI)) 1675 return 1; 1676 else 1677 return 0; 1678 } 1679 1680 /** 1681 * _scsih_scsi_lookup_find_by_target - search for matching channel:id 1682 * @ioc: per adapter object 1683 * @id: target id 1684 * @channel: channel 1685 * Context: This function will acquire ioc->scsi_lookup_lock. 1686 * 1687 * This will search for a matching channel:id in the scsi_lookup array, 1688 * returning 1 if found. 1689 */ 1690 static u8 1691 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id, 1692 int channel) 1693 { 1694 int smid; 1695 struct scsi_cmnd *scmd; 1696 1697 for (smid = 1; 1698 smid <= ioc->shost->can_queue; smid++) { 1699 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 1700 if (!scmd) 1701 continue; 1702 if (scmd->device->id == id && 1703 scmd->device->channel == channel) 1704 return 1; 1705 } 1706 return 0; 1707 } 1708 1709 /** 1710 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun 1711 * @ioc: per adapter object 1712 * @id: target id 1713 * @lun: lun number 1714 * @channel: channel 1715 * Context: This function will acquire ioc->scsi_lookup_lock. 1716 * 1717 * This will search for a matching channel:id:lun in the scsi_lookup array, 1718 * returning 1 if found. 1719 */ 1720 static u8 1721 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, 1722 unsigned int lun, int channel) 1723 { 1724 int smid; 1725 struct scsi_cmnd *scmd; 1726 1727 for (smid = 1; smid <= ioc->shost->can_queue; smid++) { 1728 1729 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 1730 if (!scmd) 1731 continue; 1732 if (scmd->device->id == id && 1733 scmd->device->channel == channel && 1734 scmd->device->lun == lun) 1735 return 1; 1736 } 1737 return 0; 1738 } 1739 1740 /** 1741 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry 1742 * @ioc: per adapter object 1743 * @smid: system request message index 1744 * 1745 * Return: the smid stored scmd pointer. 1746 * Then will dereference the stored scmd pointer. 1747 */ 1748 struct scsi_cmnd * 1749 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1750 { 1751 struct scsi_cmnd *scmd = NULL; 1752 struct scsiio_tracker *st; 1753 Mpi25SCSIIORequest_t *mpi_request; 1754 u16 tag = smid - 1; 1755 1756 if (smid > 0 && 1757 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { 1758 u32 unique_tag = 1759 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; 1760 1761 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1762 1763 /* 1764 * If SCSI IO request is outstanding at driver level then 1765 * DevHandle filed must be non-zero. If DevHandle is zero 1766 * then it means that this smid is free at driver level, 1767 * so return NULL. 1768 */ 1769 if (!mpi_request->DevHandle) 1770 return scmd; 1771 1772 scmd = scsi_host_find_tag(ioc->shost, unique_tag); 1773 if (scmd) { 1774 st = scsi_cmd_priv(scmd); 1775 if (st->cb_idx == 0xFF || st->smid == 0) 1776 scmd = NULL; 1777 } 1778 } 1779 return scmd; 1780 } 1781 1782 /** 1783 * scsih_change_queue_depth - setting device queue depth 1784 * @sdev: scsi device struct 1785 * @qdepth: requested queue depth 1786 * 1787 * Return: queue depth. 1788 */ 1789 static int 1790 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1791 { 1792 struct Scsi_Host *shost = sdev->host; 1793 int max_depth; 1794 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1795 struct MPT3SAS_DEVICE *sas_device_priv_data; 1796 struct MPT3SAS_TARGET *sas_target_priv_data; 1797 struct _sas_device *sas_device; 1798 unsigned long flags; 1799 1800 max_depth = shost->can_queue; 1801 1802 /* 1803 * limit max device queue for SATA to 32 if enable_sdev_max_qd 1804 * is disabled. 1805 */ 1806 if (ioc->enable_sdev_max_qd) 1807 goto not_sata; 1808 1809 sas_device_priv_data = sdev->hostdata; 1810 if (!sas_device_priv_data) 1811 goto not_sata; 1812 sas_target_priv_data = sas_device_priv_data->sas_target; 1813 if (!sas_target_priv_data) 1814 goto not_sata; 1815 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) 1816 goto not_sata; 1817 1818 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1819 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 1820 if (sas_device) { 1821 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 1822 max_depth = MPT3SAS_SATA_QUEUE_DEPTH; 1823 1824 sas_device_put(sas_device); 1825 } 1826 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1827 1828 not_sata: 1829 1830 if (!sdev->tagged_supported) 1831 max_depth = 1; 1832 if (qdepth > max_depth) 1833 qdepth = max_depth; 1834 scsi_change_queue_depth(sdev, qdepth); 1835 sdev_printk(KERN_INFO, sdev, 1836 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", 1837 sdev->queue_depth, sdev->tagged_supported, 1838 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); 1839 return sdev->queue_depth; 1840 } 1841 1842 /** 1843 * mpt3sas_scsih_change_queue_depth - setting device queue depth 1844 * @sdev: scsi device struct 1845 * @qdepth: requested queue depth 1846 * 1847 * Returns nothing. 1848 */ 1849 void 1850 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1851 { 1852 struct Scsi_Host *shost = sdev->host; 1853 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1854 1855 if (ioc->enable_sdev_max_qd) 1856 qdepth = shost->can_queue; 1857 1858 scsih_change_queue_depth(sdev, qdepth); 1859 } 1860 1861 /** 1862 * scsih_target_alloc - target add routine 1863 * @starget: scsi target struct 1864 * 1865 * Return: 0 if ok. Any other return is assumed to be an error and 1866 * the device is ignored. 1867 */ 1868 static int 1869 scsih_target_alloc(struct scsi_target *starget) 1870 { 1871 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1872 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1873 struct MPT3SAS_TARGET *sas_target_priv_data; 1874 struct _sas_device *sas_device; 1875 struct _raid_device *raid_device; 1876 struct _pcie_device *pcie_device; 1877 unsigned long flags; 1878 struct sas_rphy *rphy; 1879 1880 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), 1881 GFP_KERNEL); 1882 if (!sas_target_priv_data) 1883 return -ENOMEM; 1884 1885 starget->hostdata = sas_target_priv_data; 1886 sas_target_priv_data->starget = starget; 1887 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 1888 1889 /* RAID volumes */ 1890 if (starget->channel == RAID_CHANNEL) { 1891 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1892 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, 1893 starget->channel); 1894 if (raid_device) { 1895 sas_target_priv_data->handle = raid_device->handle; 1896 sas_target_priv_data->sas_address = raid_device->wwid; 1897 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; 1898 if (ioc->is_warpdrive) 1899 sas_target_priv_data->raid_device = raid_device; 1900 raid_device->starget = starget; 1901 } 1902 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1903 return 0; 1904 } 1905 1906 /* PCIe devices */ 1907 if (starget->channel == PCIE_CHANNEL) { 1908 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1909 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id, 1910 starget->channel); 1911 if (pcie_device) { 1912 sas_target_priv_data->handle = pcie_device->handle; 1913 sas_target_priv_data->sas_address = pcie_device->wwid; 1914 sas_target_priv_data->port = NULL; 1915 sas_target_priv_data->pcie_dev = pcie_device; 1916 pcie_device->starget = starget; 1917 pcie_device->id = starget->id; 1918 pcie_device->channel = starget->channel; 1919 sas_target_priv_data->flags |= 1920 MPT_TARGET_FLAGS_PCIE_DEVICE; 1921 if (pcie_device->fast_path) 1922 sas_target_priv_data->flags |= 1923 MPT_TARGET_FASTPATH_IO; 1924 } 1925 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1926 return 0; 1927 } 1928 1929 /* sas/sata devices */ 1930 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1931 rphy = dev_to_rphy(starget->dev.parent); 1932 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); 1933 1934 if (sas_device) { 1935 sas_target_priv_data->handle = sas_device->handle; 1936 sas_target_priv_data->sas_address = sas_device->sas_address; 1937 sas_target_priv_data->port = sas_device->port; 1938 sas_target_priv_data->sas_dev = sas_device; 1939 sas_device->starget = starget; 1940 sas_device->id = starget->id; 1941 sas_device->channel = starget->channel; 1942 if (test_bit(sas_device->handle, ioc->pd_handles)) 1943 sas_target_priv_data->flags |= 1944 MPT_TARGET_FLAGS_RAID_COMPONENT; 1945 if (sas_device->fast_path) 1946 sas_target_priv_data->flags |= 1947 MPT_TARGET_FASTPATH_IO; 1948 } 1949 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1950 1951 return 0; 1952 } 1953 1954 /** 1955 * scsih_target_destroy - target destroy routine 1956 * @starget: scsi target struct 1957 */ 1958 static void 1959 scsih_target_destroy(struct scsi_target *starget) 1960 { 1961 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1962 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1963 struct MPT3SAS_TARGET *sas_target_priv_data; 1964 struct _sas_device *sas_device; 1965 struct _raid_device *raid_device; 1966 struct _pcie_device *pcie_device; 1967 unsigned long flags; 1968 1969 sas_target_priv_data = starget->hostdata; 1970 if (!sas_target_priv_data) 1971 return; 1972 1973 if (starget->channel == RAID_CHANNEL) { 1974 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1975 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, 1976 starget->channel); 1977 if (raid_device) { 1978 raid_device->starget = NULL; 1979 raid_device->sdev = NULL; 1980 } 1981 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1982 goto out; 1983 } 1984 1985 if (starget->channel == PCIE_CHANNEL) { 1986 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1987 pcie_device = __mpt3sas_get_pdev_from_target(ioc, 1988 sas_target_priv_data); 1989 if (pcie_device && (pcie_device->starget == starget) && 1990 (pcie_device->id == starget->id) && 1991 (pcie_device->channel == starget->channel)) 1992 pcie_device->starget = NULL; 1993 1994 if (pcie_device) { 1995 /* 1996 * Corresponding get() is in _scsih_target_alloc() 1997 */ 1998 sas_target_priv_data->pcie_dev = NULL; 1999 pcie_device_put(pcie_device); 2000 pcie_device_put(pcie_device); 2001 } 2002 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2003 goto out; 2004 } 2005 2006 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2007 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 2008 if (sas_device && (sas_device->starget == starget) && 2009 (sas_device->id == starget->id) && 2010 (sas_device->channel == starget->channel)) 2011 sas_device->starget = NULL; 2012 2013 if (sas_device) { 2014 /* 2015 * Corresponding get() is in _scsih_target_alloc() 2016 */ 2017 sas_target_priv_data->sas_dev = NULL; 2018 sas_device_put(sas_device); 2019 2020 sas_device_put(sas_device); 2021 } 2022 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2023 2024 out: 2025 kfree(sas_target_priv_data); 2026 starget->hostdata = NULL; 2027 } 2028 2029 /** 2030 * scsih_slave_alloc - device add routine 2031 * @sdev: scsi device struct 2032 * 2033 * Return: 0 if ok. Any other return is assumed to be an error and 2034 * the device is ignored. 2035 */ 2036 static int 2037 scsih_slave_alloc(struct scsi_device *sdev) 2038 { 2039 struct Scsi_Host *shost; 2040 struct MPT3SAS_ADAPTER *ioc; 2041 struct MPT3SAS_TARGET *sas_target_priv_data; 2042 struct MPT3SAS_DEVICE *sas_device_priv_data; 2043 struct scsi_target *starget; 2044 struct _raid_device *raid_device; 2045 struct _sas_device *sas_device; 2046 struct _pcie_device *pcie_device; 2047 unsigned long flags; 2048 2049 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), 2050 GFP_KERNEL); 2051 if (!sas_device_priv_data) 2052 return -ENOMEM; 2053 2054 sas_device_priv_data->lun = sdev->lun; 2055 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; 2056 2057 starget = scsi_target(sdev); 2058 sas_target_priv_data = starget->hostdata; 2059 sas_target_priv_data->num_luns++; 2060 sas_device_priv_data->sas_target = sas_target_priv_data; 2061 sdev->hostdata = sas_device_priv_data; 2062 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) 2063 sdev->no_uld_attach = 1; 2064 2065 shost = dev_to_shost(&starget->dev); 2066 ioc = shost_priv(shost); 2067 if (starget->channel == RAID_CHANNEL) { 2068 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2069 raid_device = _scsih_raid_device_find_by_id(ioc, 2070 starget->id, starget->channel); 2071 if (raid_device) 2072 raid_device->sdev = sdev; /* raid is single lun */ 2073 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2074 } 2075 if (starget->channel == PCIE_CHANNEL) { 2076 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2077 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, 2078 sas_target_priv_data->sas_address); 2079 if (pcie_device && (pcie_device->starget == NULL)) { 2080 sdev_printk(KERN_INFO, sdev, 2081 "%s : pcie_device->starget set to starget @ %d\n", 2082 __func__, __LINE__); 2083 pcie_device->starget = starget; 2084 } 2085 2086 if (pcie_device) 2087 pcie_device_put(pcie_device); 2088 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2089 2090 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 2091 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2092 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 2093 sas_target_priv_data->sas_address, 2094 sas_target_priv_data->port); 2095 if (sas_device && (sas_device->starget == NULL)) { 2096 sdev_printk(KERN_INFO, sdev, 2097 "%s : sas_device->starget set to starget @ %d\n", 2098 __func__, __LINE__); 2099 sas_device->starget = starget; 2100 } 2101 2102 if (sas_device) 2103 sas_device_put(sas_device); 2104 2105 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2106 } 2107 2108 return 0; 2109 } 2110 2111 /** 2112 * scsih_slave_destroy - device destroy routine 2113 * @sdev: scsi device struct 2114 */ 2115 static void 2116 scsih_slave_destroy(struct scsi_device *sdev) 2117 { 2118 struct MPT3SAS_TARGET *sas_target_priv_data; 2119 struct scsi_target *starget; 2120 struct Scsi_Host *shost; 2121 struct MPT3SAS_ADAPTER *ioc; 2122 struct _sas_device *sas_device; 2123 struct _pcie_device *pcie_device; 2124 unsigned long flags; 2125 2126 if (!sdev->hostdata) 2127 return; 2128 2129 starget = scsi_target(sdev); 2130 sas_target_priv_data = starget->hostdata; 2131 sas_target_priv_data->num_luns--; 2132 2133 shost = dev_to_shost(&starget->dev); 2134 ioc = shost_priv(shost); 2135 2136 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 2137 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2138 pcie_device = __mpt3sas_get_pdev_from_target(ioc, 2139 sas_target_priv_data); 2140 if (pcie_device && !sas_target_priv_data->num_luns) 2141 pcie_device->starget = NULL; 2142 2143 if (pcie_device) 2144 pcie_device_put(pcie_device); 2145 2146 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2147 2148 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 2149 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2150 sas_device = __mpt3sas_get_sdev_from_target(ioc, 2151 sas_target_priv_data); 2152 if (sas_device && !sas_target_priv_data->num_luns) 2153 sas_device->starget = NULL; 2154 2155 if (sas_device) 2156 sas_device_put(sas_device); 2157 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2158 } 2159 2160 kfree(sdev->hostdata); 2161 sdev->hostdata = NULL; 2162 } 2163 2164 /** 2165 * _scsih_display_sata_capabilities - sata capabilities 2166 * @ioc: per adapter object 2167 * @handle: device handle 2168 * @sdev: scsi device struct 2169 */ 2170 static void 2171 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, 2172 u16 handle, struct scsi_device *sdev) 2173 { 2174 Mpi2ConfigReply_t mpi_reply; 2175 Mpi2SasDevicePage0_t sas_device_pg0; 2176 u32 ioc_status; 2177 u16 flags; 2178 u32 device_info; 2179 2180 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 2181 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 2182 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2183 __FILE__, __LINE__, __func__); 2184 return; 2185 } 2186 2187 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 2188 MPI2_IOCSTATUS_MASK; 2189 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 2190 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2191 __FILE__, __LINE__, __func__); 2192 return; 2193 } 2194 2195 flags = le16_to_cpu(sas_device_pg0.Flags); 2196 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 2197 2198 sdev_printk(KERN_INFO, sdev, 2199 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " 2200 "sw_preserve(%s)\n", 2201 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n", 2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n", 2203 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" : 2204 "n", 2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n", 2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n", 2207 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); 2208 } 2209 2210 /* 2211 * raid transport support - 2212 * Enabled for SLES11 and newer, in older kernels the driver will panic when 2213 * unloading the driver followed by a load - I believe that the subroutine 2214 * raid_class_release() is not cleaning up properly. 2215 */ 2216 2217 /** 2218 * scsih_is_raid - return boolean indicating device is raid volume 2219 * @dev: the device struct object 2220 */ 2221 static int 2222 scsih_is_raid(struct device *dev) 2223 { 2224 struct scsi_device *sdev = to_scsi_device(dev); 2225 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2226 2227 if (ioc->is_warpdrive) 2228 return 0; 2229 return (sdev->channel == RAID_CHANNEL) ? 1 : 0; 2230 } 2231 2232 static int 2233 scsih_is_nvme(struct device *dev) 2234 { 2235 struct scsi_device *sdev = to_scsi_device(dev); 2236 2237 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0; 2238 } 2239 2240 /** 2241 * scsih_get_resync - get raid volume resync percent complete 2242 * @dev: the device struct object 2243 */ 2244 static void 2245 scsih_get_resync(struct device *dev) 2246 { 2247 struct scsi_device *sdev = to_scsi_device(dev); 2248 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2249 static struct _raid_device *raid_device; 2250 unsigned long flags; 2251 Mpi2RaidVolPage0_t vol_pg0; 2252 Mpi2ConfigReply_t mpi_reply; 2253 u32 volume_status_flags; 2254 u8 percent_complete; 2255 u16 handle; 2256 2257 percent_complete = 0; 2258 handle = 0; 2259 if (ioc->is_warpdrive) 2260 goto out; 2261 2262 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2263 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 2264 sdev->channel); 2265 if (raid_device) { 2266 handle = raid_device->handle; 2267 percent_complete = raid_device->percent_complete; 2268 } 2269 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2270 2271 if (!handle) 2272 goto out; 2273 2274 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 2275 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 2276 sizeof(Mpi2RaidVolPage0_t))) { 2277 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2278 __FILE__, __LINE__, __func__); 2279 percent_complete = 0; 2280 goto out; 2281 } 2282 2283 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); 2284 if (!(volume_status_flags & 2285 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) 2286 percent_complete = 0; 2287 2288 out: 2289 2290 switch (ioc->hba_mpi_version_belonged) { 2291 case MPI2_VERSION: 2292 raid_set_resync(mpt2sas_raid_template, dev, percent_complete); 2293 break; 2294 case MPI25_VERSION: 2295 case MPI26_VERSION: 2296 raid_set_resync(mpt3sas_raid_template, dev, percent_complete); 2297 break; 2298 } 2299 } 2300 2301 /** 2302 * scsih_get_state - get raid volume level 2303 * @dev: the device struct object 2304 */ 2305 static void 2306 scsih_get_state(struct device *dev) 2307 { 2308 struct scsi_device *sdev = to_scsi_device(dev); 2309 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2310 static struct _raid_device *raid_device; 2311 unsigned long flags; 2312 Mpi2RaidVolPage0_t vol_pg0; 2313 Mpi2ConfigReply_t mpi_reply; 2314 u32 volstate; 2315 enum raid_state state = RAID_STATE_UNKNOWN; 2316 u16 handle = 0; 2317 2318 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2319 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 2320 sdev->channel); 2321 if (raid_device) 2322 handle = raid_device->handle; 2323 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2324 2325 if (!raid_device) 2326 goto out; 2327 2328 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 2329 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 2330 sizeof(Mpi2RaidVolPage0_t))) { 2331 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2332 __FILE__, __LINE__, __func__); 2333 goto out; 2334 } 2335 2336 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); 2337 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { 2338 state = RAID_STATE_RESYNCING; 2339 goto out; 2340 } 2341 2342 switch (vol_pg0.VolumeState) { 2343 case MPI2_RAID_VOL_STATE_OPTIMAL: 2344 case MPI2_RAID_VOL_STATE_ONLINE: 2345 state = RAID_STATE_ACTIVE; 2346 break; 2347 case MPI2_RAID_VOL_STATE_DEGRADED: 2348 state = RAID_STATE_DEGRADED; 2349 break; 2350 case MPI2_RAID_VOL_STATE_FAILED: 2351 case MPI2_RAID_VOL_STATE_MISSING: 2352 state = RAID_STATE_OFFLINE; 2353 break; 2354 } 2355 out: 2356 switch (ioc->hba_mpi_version_belonged) { 2357 case MPI2_VERSION: 2358 raid_set_state(mpt2sas_raid_template, dev, state); 2359 break; 2360 case MPI25_VERSION: 2361 case MPI26_VERSION: 2362 raid_set_state(mpt3sas_raid_template, dev, state); 2363 break; 2364 } 2365 } 2366 2367 /** 2368 * _scsih_set_level - set raid level 2369 * @ioc: ? 2370 * @sdev: scsi device struct 2371 * @volume_type: volume type 2372 */ 2373 static void 2374 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc, 2375 struct scsi_device *sdev, u8 volume_type) 2376 { 2377 enum raid_level level = RAID_LEVEL_UNKNOWN; 2378 2379 switch (volume_type) { 2380 case MPI2_RAID_VOL_TYPE_RAID0: 2381 level = RAID_LEVEL_0; 2382 break; 2383 case MPI2_RAID_VOL_TYPE_RAID10: 2384 level = RAID_LEVEL_10; 2385 break; 2386 case MPI2_RAID_VOL_TYPE_RAID1E: 2387 level = RAID_LEVEL_1E; 2388 break; 2389 case MPI2_RAID_VOL_TYPE_RAID1: 2390 level = RAID_LEVEL_1; 2391 break; 2392 } 2393 2394 switch (ioc->hba_mpi_version_belonged) { 2395 case MPI2_VERSION: 2396 raid_set_level(mpt2sas_raid_template, 2397 &sdev->sdev_gendev, level); 2398 break; 2399 case MPI25_VERSION: 2400 case MPI26_VERSION: 2401 raid_set_level(mpt3sas_raid_template, 2402 &sdev->sdev_gendev, level); 2403 break; 2404 } 2405 } 2406 2407 2408 /** 2409 * _scsih_get_volume_capabilities - volume capabilities 2410 * @ioc: per adapter object 2411 * @raid_device: the raid_device object 2412 * 2413 * Return: 0 for success, else 1 2414 */ 2415 static int 2416 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, 2417 struct _raid_device *raid_device) 2418 { 2419 Mpi2RaidVolPage0_t *vol_pg0; 2420 Mpi2RaidPhysDiskPage0_t pd_pg0; 2421 Mpi2SasDevicePage0_t sas_device_pg0; 2422 Mpi2ConfigReply_t mpi_reply; 2423 u16 sz; 2424 u8 num_pds; 2425 2426 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, 2427 &num_pds)) || !num_pds) { 2428 dfailprintk(ioc, 2429 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2430 __FILE__, __LINE__, __func__)); 2431 return 1; 2432 } 2433 2434 raid_device->num_pds = num_pds; 2435 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * 2436 sizeof(Mpi2RaidVol0PhysDisk_t)); 2437 vol_pg0 = kzalloc(sz, GFP_KERNEL); 2438 if (!vol_pg0) { 2439 dfailprintk(ioc, 2440 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2441 __FILE__, __LINE__, __func__)); 2442 return 1; 2443 } 2444 2445 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, 2446 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { 2447 dfailprintk(ioc, 2448 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2449 __FILE__, __LINE__, __func__)); 2450 kfree(vol_pg0); 2451 return 1; 2452 } 2453 2454 raid_device->volume_type = vol_pg0->VolumeType; 2455 2456 /* figure out what the underlying devices are by 2457 * obtaining the device_info bits for the 1st device 2458 */ 2459 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 2460 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, 2461 vol_pg0->PhysDisk[0].PhysDiskNum))) { 2462 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 2463 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 2464 le16_to_cpu(pd_pg0.DevHandle)))) { 2465 raid_device->device_info = 2466 le32_to_cpu(sas_device_pg0.DeviceInfo); 2467 } 2468 } 2469 2470 kfree(vol_pg0); 2471 return 0; 2472 } 2473 2474 /** 2475 * _scsih_enable_tlr - setting TLR flags 2476 * @ioc: per adapter object 2477 * @sdev: scsi device struct 2478 * 2479 * Enabling Transaction Layer Retries for tape devices when 2480 * vpd page 0x90 is present 2481 * 2482 */ 2483 static void 2484 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) 2485 { 2486 2487 /* only for TAPE */ 2488 if (sdev->type != TYPE_TAPE) 2489 return; 2490 2491 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) 2492 return; 2493 2494 sas_enable_tlr(sdev); 2495 sdev_printk(KERN_INFO, sdev, "TLR %s\n", 2496 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); 2497 return; 2498 2499 } 2500 2501 /** 2502 * scsih_slave_configure - device configure routine. 2503 * @sdev: scsi device struct 2504 * 2505 * Return: 0 if ok. Any other return is assumed to be an error and 2506 * the device is ignored. 2507 */ 2508 static int 2509 scsih_slave_configure(struct scsi_device *sdev) 2510 { 2511 struct Scsi_Host *shost = sdev->host; 2512 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2513 struct MPT3SAS_DEVICE *sas_device_priv_data; 2514 struct MPT3SAS_TARGET *sas_target_priv_data; 2515 struct _sas_device *sas_device; 2516 struct _pcie_device *pcie_device; 2517 struct _raid_device *raid_device; 2518 unsigned long flags; 2519 int qdepth; 2520 u8 ssp_target = 0; 2521 char *ds = ""; 2522 char *r_level = ""; 2523 u16 handle, volume_handle = 0; 2524 u64 volume_wwid = 0; 2525 2526 qdepth = 1; 2527 sas_device_priv_data = sdev->hostdata; 2528 sas_device_priv_data->configured_lun = 1; 2529 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; 2530 sas_target_priv_data = sas_device_priv_data->sas_target; 2531 handle = sas_target_priv_data->handle; 2532 2533 /* raid volume handling */ 2534 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { 2535 2536 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2537 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 2538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2539 if (!raid_device) { 2540 dfailprintk(ioc, 2541 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2542 __FILE__, __LINE__, __func__)); 2543 return 1; 2544 } 2545 2546 if (_scsih_get_volume_capabilities(ioc, raid_device)) { 2547 dfailprintk(ioc, 2548 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2549 __FILE__, __LINE__, __func__)); 2550 return 1; 2551 } 2552 2553 /* 2554 * WARPDRIVE: Initialize the required data for Direct IO 2555 */ 2556 mpt3sas_init_warpdrive_properties(ioc, raid_device); 2557 2558 /* RAID Queue Depth Support 2559 * IS volume = underlying qdepth of drive type, either 2560 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH 2561 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH) 2562 */ 2563 if (raid_device->device_info & 2564 MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2565 qdepth = MPT3SAS_SAS_QUEUE_DEPTH; 2566 ds = "SSP"; 2567 } else { 2568 qdepth = MPT3SAS_SATA_QUEUE_DEPTH; 2569 if (raid_device->device_info & 2570 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 2571 ds = "SATA"; 2572 else 2573 ds = "STP"; 2574 } 2575 2576 switch (raid_device->volume_type) { 2577 case MPI2_RAID_VOL_TYPE_RAID0: 2578 r_level = "RAID0"; 2579 break; 2580 case MPI2_RAID_VOL_TYPE_RAID1E: 2581 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2582 if (ioc->manu_pg10.OEMIdentifier && 2583 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & 2584 MFG10_GF0_R10_DISPLAY) && 2585 !(raid_device->num_pds % 2)) 2586 r_level = "RAID10"; 2587 else 2588 r_level = "RAID1E"; 2589 break; 2590 case MPI2_RAID_VOL_TYPE_RAID1: 2591 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2592 r_level = "RAID1"; 2593 break; 2594 case MPI2_RAID_VOL_TYPE_RAID10: 2595 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2596 r_level = "RAID10"; 2597 break; 2598 case MPI2_RAID_VOL_TYPE_UNKNOWN: 2599 default: 2600 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2601 r_level = "RAIDX"; 2602 break; 2603 } 2604 2605 if (!ioc->hide_ir_msg) 2606 sdev_printk(KERN_INFO, sdev, 2607 "%s: handle(0x%04x), wwid(0x%016llx)," 2608 " pd_count(%d), type(%s)\n", 2609 r_level, raid_device->handle, 2610 (unsigned long long)raid_device->wwid, 2611 raid_device->num_pds, ds); 2612 2613 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { 2614 blk_queue_max_hw_sectors(sdev->request_queue, 2615 MPT3SAS_RAID_MAX_SECTORS); 2616 sdev_printk(KERN_INFO, sdev, 2617 "Set queue's max_sector to: %u\n", 2618 MPT3SAS_RAID_MAX_SECTORS); 2619 } 2620 2621 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2622 2623 /* raid transport support */ 2624 if (!ioc->is_warpdrive) 2625 _scsih_set_level(ioc, sdev, raid_device->volume_type); 2626 return 0; 2627 } 2628 2629 /* non-raid handling */ 2630 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { 2631 if (mpt3sas_config_get_volume_handle(ioc, handle, 2632 &volume_handle)) { 2633 dfailprintk(ioc, 2634 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2635 __FILE__, __LINE__, __func__)); 2636 return 1; 2637 } 2638 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, 2639 volume_handle, &volume_wwid)) { 2640 dfailprintk(ioc, 2641 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2642 __FILE__, __LINE__, __func__)); 2643 return 1; 2644 } 2645 } 2646 2647 /* PCIe handling */ 2648 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 2649 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2650 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, 2651 sas_device_priv_data->sas_target->sas_address); 2652 if (!pcie_device) { 2653 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2654 dfailprintk(ioc, 2655 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2656 __FILE__, __LINE__, __func__)); 2657 return 1; 2658 } 2659 2660 qdepth = MPT3SAS_NVME_QUEUE_DEPTH; 2661 ds = "NVMe"; 2662 sdev_printk(KERN_INFO, sdev, 2663 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n", 2664 ds, handle, (unsigned long long)pcie_device->wwid, 2665 pcie_device->port_num); 2666 if (pcie_device->enclosure_handle != 0) 2667 sdev_printk(KERN_INFO, sdev, 2668 "%s: enclosure logical id(0x%016llx), slot(%d)\n", 2669 ds, 2670 (unsigned long long)pcie_device->enclosure_logical_id, 2671 pcie_device->slot); 2672 if (pcie_device->connector_name[0] != '\0') 2673 sdev_printk(KERN_INFO, sdev, 2674 "%s: enclosure level(0x%04x)," 2675 "connector name( %s)\n", ds, 2676 pcie_device->enclosure_level, 2677 pcie_device->connector_name); 2678 2679 if (pcie_device->nvme_mdts) 2680 blk_queue_max_hw_sectors(sdev->request_queue, 2681 pcie_device->nvme_mdts/512); 2682 2683 pcie_device_put(pcie_device); 2684 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2685 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2686 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be 2687 ** merged and can eliminate holes created during merging 2688 ** operation. 2689 **/ 2690 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, 2691 sdev->request_queue); 2692 blk_queue_virt_boundary(sdev->request_queue, 2693 ioc->page_size - 1); 2694 return 0; 2695 } 2696 2697 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2698 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 2699 sas_device_priv_data->sas_target->sas_address, 2700 sas_device_priv_data->sas_target->port); 2701 if (!sas_device) { 2702 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2703 dfailprintk(ioc, 2704 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2705 __FILE__, __LINE__, __func__)); 2706 return 1; 2707 } 2708 2709 sas_device->volume_handle = volume_handle; 2710 sas_device->volume_wwid = volume_wwid; 2711 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2712 qdepth = MPT3SAS_SAS_QUEUE_DEPTH; 2713 ssp_target = 1; 2714 if (sas_device->device_info & 2715 MPI2_SAS_DEVICE_INFO_SEP) { 2716 sdev_printk(KERN_WARNING, sdev, 2717 "set ignore_delay_remove for handle(0x%04x)\n", 2718 sas_device_priv_data->sas_target->handle); 2719 sas_device_priv_data->ignore_delay_remove = 1; 2720 ds = "SES"; 2721 } else 2722 ds = "SSP"; 2723 } else { 2724 qdepth = MPT3SAS_SATA_QUEUE_DEPTH; 2725 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) 2726 ds = "STP"; 2727 else if (sas_device->device_info & 2728 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 2729 ds = "SATA"; 2730 } 2731 2732 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \ 2733 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", 2734 ds, handle, (unsigned long long)sas_device->sas_address, 2735 sas_device->phy, (unsigned long long)sas_device->device_name); 2736 2737 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); 2738 2739 sas_device_put(sas_device); 2740 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2741 2742 if (!ssp_target) 2743 _scsih_display_sata_capabilities(ioc, handle, sdev); 2744 2745 2746 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2747 2748 if (ssp_target) { 2749 sas_read_port_mode_page(sdev); 2750 _scsih_enable_tlr(ioc, sdev); 2751 } 2752 2753 return 0; 2754 } 2755 2756 /** 2757 * scsih_bios_param - fetch head, sector, cylinder info for a disk 2758 * @sdev: scsi device struct 2759 * @bdev: pointer to block device context 2760 * @capacity: device size (in 512 byte sectors) 2761 * @params: three element array to place output: 2762 * params[0] number of heads (max 255) 2763 * params[1] number of sectors (max 63) 2764 * params[2] number of cylinders 2765 */ 2766 static int 2767 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2768 sector_t capacity, int params[]) 2769 { 2770 int heads; 2771 int sectors; 2772 sector_t cylinders; 2773 ulong dummy; 2774 2775 heads = 64; 2776 sectors = 32; 2777 2778 dummy = heads * sectors; 2779 cylinders = capacity; 2780 sector_div(cylinders, dummy); 2781 2782 /* 2783 * Handle extended translation size for logical drives 2784 * > 1Gb 2785 */ 2786 if ((ulong)capacity >= 0x200000) { 2787 heads = 255; 2788 sectors = 63; 2789 dummy = heads * sectors; 2790 cylinders = capacity; 2791 sector_div(cylinders, dummy); 2792 } 2793 2794 /* return result */ 2795 params[0] = heads; 2796 params[1] = sectors; 2797 params[2] = cylinders; 2798 2799 return 0; 2800 } 2801 2802 /** 2803 * _scsih_response_code - translation of device response code 2804 * @ioc: per adapter object 2805 * @response_code: response code returned by the device 2806 */ 2807 static void 2808 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) 2809 { 2810 char *desc; 2811 2812 switch (response_code) { 2813 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 2814 desc = "task management request completed"; 2815 break; 2816 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 2817 desc = "invalid frame"; 2818 break; 2819 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 2820 desc = "task management request not supported"; 2821 break; 2822 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 2823 desc = "task management request failed"; 2824 break; 2825 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 2826 desc = "task management request succeeded"; 2827 break; 2828 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 2829 desc = "invalid lun"; 2830 break; 2831 case 0xA: 2832 desc = "overlapped tag attempted"; 2833 break; 2834 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 2835 desc = "task queued, however not sent to target"; 2836 break; 2837 default: 2838 desc = "unknown"; 2839 break; 2840 } 2841 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc); 2842 } 2843 2844 /** 2845 * _scsih_tm_done - tm completion routine 2846 * @ioc: per adapter object 2847 * @smid: system request message index 2848 * @msix_index: MSIX table index supplied by the OS 2849 * @reply: reply message frame(lower 32bit addr) 2850 * Context: none. 2851 * 2852 * The callback handler when using scsih_issue_tm. 2853 * 2854 * Return: 1 meaning mf should be freed from _base_interrupt 2855 * 0 means the mf is freed from this function. 2856 */ 2857 static u8 2858 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 2859 { 2860 MPI2DefaultReply_t *mpi_reply; 2861 2862 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED) 2863 return 1; 2864 if (ioc->tm_cmds.smid != smid) 2865 return 1; 2866 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; 2867 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 2868 if (mpi_reply) { 2869 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 2870 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID; 2871 } 2872 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING; 2873 complete(&ioc->tm_cmds.done); 2874 return 1; 2875 } 2876 2877 /** 2878 * mpt3sas_scsih_set_tm_flag - set per target tm_busy 2879 * @ioc: per adapter object 2880 * @handle: device handle 2881 * 2882 * During taskmangement request, we need to freeze the device queue. 2883 */ 2884 void 2885 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2886 { 2887 struct MPT3SAS_DEVICE *sas_device_priv_data; 2888 struct scsi_device *sdev; 2889 u8 skip = 0; 2890 2891 shost_for_each_device(sdev, ioc->shost) { 2892 if (skip) 2893 continue; 2894 sas_device_priv_data = sdev->hostdata; 2895 if (!sas_device_priv_data) 2896 continue; 2897 if (sas_device_priv_data->sas_target->handle == handle) { 2898 sas_device_priv_data->sas_target->tm_busy = 1; 2899 skip = 1; 2900 ioc->ignore_loginfos = 1; 2901 } 2902 } 2903 } 2904 2905 /** 2906 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy 2907 * @ioc: per adapter object 2908 * @handle: device handle 2909 * 2910 * During taskmangement request, we need to freeze the device queue. 2911 */ 2912 void 2913 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2914 { 2915 struct MPT3SAS_DEVICE *sas_device_priv_data; 2916 struct scsi_device *sdev; 2917 u8 skip = 0; 2918 2919 shost_for_each_device(sdev, ioc->shost) { 2920 if (skip) 2921 continue; 2922 sas_device_priv_data = sdev->hostdata; 2923 if (!sas_device_priv_data) 2924 continue; 2925 if (sas_device_priv_data->sas_target->handle == handle) { 2926 sas_device_priv_data->sas_target->tm_busy = 0; 2927 skip = 1; 2928 ioc->ignore_loginfos = 0; 2929 } 2930 } 2931 } 2932 2933 /** 2934 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status 2935 * @ioc: per adapter object 2936 * @channel: the channel assigned by the OS 2937 * @id: the id assigned by the OS 2938 * @lun: lun number 2939 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2940 * @smid_task: smid assigned to the task 2941 * 2942 * Look whether TM has aborted the timed out SCSI command, if 2943 * TM has aborted the IO then return SUCCESS else return FAILED. 2944 */ 2945 static int 2946 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel, 2947 uint id, uint lun, u8 type, u16 smid_task) 2948 { 2949 2950 if (smid_task <= ioc->shost->can_queue) { 2951 switch (type) { 2952 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 2953 if (!(_scsih_scsi_lookup_find_by_target(ioc, 2954 id, channel))) 2955 return SUCCESS; 2956 break; 2957 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 2958 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 2959 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id, 2960 lun, channel))) 2961 return SUCCESS; 2962 break; 2963 default: 2964 return SUCCESS; 2965 } 2966 } else if (smid_task == ioc->scsih_cmds.smid) { 2967 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) || 2968 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED)) 2969 return SUCCESS; 2970 } else if (smid_task == ioc->ctl_cmds.smid) { 2971 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) || 2972 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED)) 2973 return SUCCESS; 2974 } 2975 2976 return FAILED; 2977 } 2978 2979 /** 2980 * scsih_tm_post_processing - post processing of target & LUN reset 2981 * @ioc: per adapter object 2982 * @handle: device handle 2983 * @channel: the channel assigned by the OS 2984 * @id: the id assigned by the OS 2985 * @lun: lun number 2986 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2987 * @smid_task: smid assigned to the task 2988 * 2989 * Post processing of target & LUN reset. Due to interrupt latency 2990 * issue it possible that interrupt for aborted IO might not be 2991 * received yet. So before returning failure status, poll the 2992 * reply descriptor pools for the reply of timed out SCSI command. 2993 * Return FAILED status if reply for timed out is not received 2994 * otherwise return SUCCESS. 2995 */ 2996 static int 2997 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle, 2998 uint channel, uint id, uint lun, u8 type, u16 smid_task) 2999 { 3000 int rc; 3001 3002 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); 3003 if (rc == SUCCESS) 3004 return rc; 3005 3006 ioc_info(ioc, 3007 "Poll ReplyDescriptor queues for completion of" 3008 " smid(%d), task_type(0x%02x), handle(0x%04x)\n", 3009 smid_task, type, handle); 3010 3011 /* 3012 * Due to interrupt latency issues, driver may receive interrupt for 3013 * TM first and then for aborted SCSI IO command. So, poll all the 3014 * ReplyDescriptor pools before returning the FAILED status to SML. 3015 */ 3016 mpt3sas_base_mask_interrupts(ioc); 3017 mpt3sas_base_sync_reply_irqs(ioc, 1); 3018 mpt3sas_base_unmask_interrupts(ioc); 3019 3020 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); 3021 } 3022 3023 /** 3024 * mpt3sas_scsih_issue_tm - main routine for sending tm requests 3025 * @ioc: per adapter struct 3026 * @handle: device handle 3027 * @channel: the channel assigned by the OS 3028 * @id: the id assigned by the OS 3029 * @lun: lun number 3030 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 3031 * @smid_task: smid assigned to the task 3032 * @msix_task: MSIX table index supplied by the OS 3033 * @timeout: timeout in seconds 3034 * @tr_method: Target Reset Method 3035 * Context: user 3036 * 3037 * A generic API for sending task management requests to firmware. 3038 * 3039 * The callback index is set inside `ioc->tm_cb_idx`. 3040 * The caller is responsible to check for outstanding commands. 3041 * 3042 * Return: SUCCESS or FAILED. 3043 */ 3044 int 3045 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, 3046 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task, 3047 u8 timeout, u8 tr_method) 3048 { 3049 Mpi2SCSITaskManagementRequest_t *mpi_request; 3050 Mpi2SCSITaskManagementReply_t *mpi_reply; 3051 Mpi25SCSIIORequest_t *request; 3052 u16 smid = 0; 3053 u32 ioc_state; 3054 int rc; 3055 u8 issue_reset = 0; 3056 3057 lockdep_assert_held(&ioc->tm_cmds.mutex); 3058 3059 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { 3060 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__); 3061 return FAILED; 3062 } 3063 3064 if (ioc->shost_recovery || ioc->remove_host || 3065 ioc->pci_error_recovery) { 3066 ioc_info(ioc, "%s: host reset in progress!\n", __func__); 3067 return FAILED; 3068 } 3069 3070 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 3071 if (ioc_state & MPI2_DOORBELL_USED) { 3072 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); 3073 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3074 return (!rc) ? SUCCESS : FAILED; 3075 } 3076 3077 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 3078 mpt3sas_print_fault_code(ioc, ioc_state & 3079 MPI2_DOORBELL_DATA_MASK); 3080 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3081 return (!rc) ? SUCCESS : FAILED; 3082 } else if ((ioc_state & MPI2_IOC_STATE_MASK) == 3083 MPI2_IOC_STATE_COREDUMP) { 3084 mpt3sas_print_coredump_info(ioc, ioc_state & 3085 MPI2_DOORBELL_DATA_MASK); 3086 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3087 return (!rc) ? SUCCESS : FAILED; 3088 } 3089 3090 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); 3091 if (!smid) { 3092 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 3093 return FAILED; 3094 } 3095 3096 dtmprintk(ioc, 3097 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n", 3098 handle, type, smid_task, timeout, tr_method)); 3099 ioc->tm_cmds.status = MPT3_CMD_PENDING; 3100 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3101 ioc->tm_cmds.smid = smid; 3102 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 3103 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); 3104 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3105 mpi_request->DevHandle = cpu_to_le16(handle); 3106 mpi_request->TaskType = type; 3107 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 3108 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3109 mpi_request->MsgFlags = tr_method; 3110 mpi_request->TaskMID = cpu_to_le16(smid_task); 3111 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); 3112 mpt3sas_scsih_set_tm_flag(ioc, handle); 3113 init_completion(&ioc->tm_cmds.done); 3114 ioc->put_smid_hi_priority(ioc, smid, msix_task); 3115 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); 3116 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { 3117 mpt3sas_check_cmd_timeout(ioc, 3118 ioc->tm_cmds.status, mpi_request, 3119 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset); 3120 if (issue_reset) { 3121 rc = mpt3sas_base_hard_reset_handler(ioc, 3122 FORCE_BIG_HAMMER); 3123 rc = (!rc) ? SUCCESS : FAILED; 3124 goto out; 3125 } 3126 } 3127 3128 /* sync IRQs in case those were busy during flush. */ 3129 mpt3sas_base_sync_reply_irqs(ioc, 0); 3130 3131 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { 3132 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 3133 mpi_reply = ioc->tm_cmds.reply; 3134 dtmprintk(ioc, 3135 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", 3136 le16_to_cpu(mpi_reply->IOCStatus), 3137 le32_to_cpu(mpi_reply->IOCLogInfo), 3138 le32_to_cpu(mpi_reply->TerminationCount))); 3139 if (ioc->logging_level & MPT_DEBUG_TM) { 3140 _scsih_response_code(ioc, mpi_reply->ResponseCode); 3141 if (mpi_reply->IOCStatus) 3142 _debug_dump_mf(mpi_request, 3143 sizeof(Mpi2SCSITaskManagementRequest_t)/4); 3144 } 3145 } 3146 3147 switch (type) { 3148 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 3149 rc = SUCCESS; 3150 /* 3151 * If DevHandle filed in smid_task's entry of request pool 3152 * doesn't match with device handle on which this task abort 3153 * TM is received then it means that TM has successfully 3154 * aborted the timed out command. Since smid_task's entry in 3155 * request pool will be memset to zero once the timed out 3156 * command is returned to the SML. If the command is not 3157 * aborted then smid_task’s entry won’t be cleared and it 3158 * will have same DevHandle value on which this task abort TM 3159 * is received and driver will return the TM status as FAILED. 3160 */ 3161 request = mpt3sas_base_get_msg_frame(ioc, smid_task); 3162 if (le16_to_cpu(request->DevHandle) != handle) 3163 break; 3164 3165 ioc_info(ioc, "Task abort tm failed: handle(0x%04x)," 3166 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n", 3167 handle, timeout, tr_method, smid_task, msix_task); 3168 rc = FAILED; 3169 break; 3170 3171 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3172 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 3173 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3174 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun, 3175 type, smid_task); 3176 break; 3177 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: 3178 rc = SUCCESS; 3179 break; 3180 default: 3181 rc = FAILED; 3182 break; 3183 } 3184 3185 out: 3186 mpt3sas_scsih_clear_tm_flag(ioc, handle); 3187 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 3188 return rc; 3189 } 3190 3191 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 3192 uint channel, uint id, u64 lun, u8 type, u16 smid_task, 3193 u16 msix_task, u8 timeout, u8 tr_method) 3194 { 3195 int ret; 3196 3197 mutex_lock(&ioc->tm_cmds.mutex); 3198 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type, 3199 smid_task, msix_task, timeout, tr_method); 3200 mutex_unlock(&ioc->tm_cmds.mutex); 3201 3202 return ret; 3203 } 3204 3205 /** 3206 * _scsih_tm_display_info - displays info about the device 3207 * @ioc: per adapter struct 3208 * @scmd: pointer to scsi command object 3209 * 3210 * Called by task management callback handlers. 3211 */ 3212 static void 3213 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) 3214 { 3215 struct scsi_target *starget = scmd->device->sdev_target; 3216 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 3217 struct _sas_device *sas_device = NULL; 3218 struct _pcie_device *pcie_device = NULL; 3219 unsigned long flags; 3220 char *device_str = NULL; 3221 3222 if (!priv_target) 3223 return; 3224 if (ioc->hide_ir_msg) 3225 device_str = "WarpDrive"; 3226 else 3227 device_str = "volume"; 3228 3229 scsi_print_command(scmd); 3230 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 3231 starget_printk(KERN_INFO, starget, 3232 "%s handle(0x%04x), %s wwid(0x%016llx)\n", 3233 device_str, priv_target->handle, 3234 device_str, (unsigned long long)priv_target->sas_address); 3235 3236 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 3237 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 3238 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target); 3239 if (pcie_device) { 3240 starget_printk(KERN_INFO, starget, 3241 "handle(0x%04x), wwid(0x%016llx), port(%d)\n", 3242 pcie_device->handle, 3243 (unsigned long long)pcie_device->wwid, 3244 pcie_device->port_num); 3245 if (pcie_device->enclosure_handle != 0) 3246 starget_printk(KERN_INFO, starget, 3247 "enclosure logical id(0x%016llx), slot(%d)\n", 3248 (unsigned long long) 3249 pcie_device->enclosure_logical_id, 3250 pcie_device->slot); 3251 if (pcie_device->connector_name[0] != '\0') 3252 starget_printk(KERN_INFO, starget, 3253 "enclosure level(0x%04x), connector name( %s)\n", 3254 pcie_device->enclosure_level, 3255 pcie_device->connector_name); 3256 pcie_device_put(pcie_device); 3257 } 3258 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 3259 3260 } else { 3261 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3262 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target); 3263 if (sas_device) { 3264 if (priv_target->flags & 3265 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3266 starget_printk(KERN_INFO, starget, 3267 "volume handle(0x%04x), " 3268 "volume wwid(0x%016llx)\n", 3269 sas_device->volume_handle, 3270 (unsigned long long)sas_device->volume_wwid); 3271 } 3272 starget_printk(KERN_INFO, starget, 3273 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", 3274 sas_device->handle, 3275 (unsigned long long)sas_device->sas_address, 3276 sas_device->phy); 3277 3278 _scsih_display_enclosure_chassis_info(NULL, sas_device, 3279 NULL, starget); 3280 3281 sas_device_put(sas_device); 3282 } 3283 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3284 } 3285 } 3286 3287 /** 3288 * scsih_abort - eh threads main abort routine 3289 * @scmd: pointer to scsi command object 3290 * 3291 * Return: SUCCESS if command aborted else FAILED 3292 */ 3293 static int 3294 scsih_abort(struct scsi_cmnd *scmd) 3295 { 3296 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3297 struct MPT3SAS_DEVICE *sas_device_priv_data; 3298 struct scsiio_tracker *st = scsi_cmd_priv(scmd); 3299 u16 handle; 3300 int r; 3301 3302 u8 timeout = 30; 3303 struct _pcie_device *pcie_device = NULL; 3304 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!" 3305 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n", 3306 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), 3307 (scmd->request->timeout / HZ) * 1000); 3308 _scsih_tm_display_info(ioc, scmd); 3309 3310 sas_device_priv_data = scmd->device->hostdata; 3311 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3312 ioc->remove_host) { 3313 sdev_printk(KERN_INFO, scmd->device, 3314 "device been deleted! scmd(0x%p)\n", scmd); 3315 scmd->result = DID_NO_CONNECT << 16; 3316 scmd->scsi_done(scmd); 3317 r = SUCCESS; 3318 goto out; 3319 } 3320 3321 /* check for completed command */ 3322 if (st == NULL || st->cb_idx == 0xFF) { 3323 sdev_printk(KERN_INFO, scmd->device, "No reference found at " 3324 "driver, assuming scmd(0x%p) might have completed\n", scmd); 3325 scmd->result = DID_RESET << 16; 3326 r = SUCCESS; 3327 goto out; 3328 } 3329 3330 /* for hidden raid components and volumes this is not supported */ 3331 if (sas_device_priv_data->sas_target->flags & 3332 MPT_TARGET_FLAGS_RAID_COMPONENT || 3333 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { 3334 scmd->result = DID_RESET << 16; 3335 r = FAILED; 3336 goto out; 3337 } 3338 3339 mpt3sas_halt_firmware(ioc); 3340 3341 handle = sas_device_priv_data->sas_target->handle; 3342 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3343 if (pcie_device && (!ioc->tm_custom_handling) && 3344 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) 3345 timeout = ioc->nvme_abort_timeout; 3346 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3347 scmd->device->id, scmd->device->lun, 3348 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 3349 st->smid, st->msix_io, timeout, 0); 3350 /* Command must be cleared after abort */ 3351 if (r == SUCCESS && st->cb_idx != 0xFF) 3352 r = FAILED; 3353 out: 3354 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n", 3355 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3356 if (pcie_device) 3357 pcie_device_put(pcie_device); 3358 return r; 3359 } 3360 3361 /** 3362 * scsih_dev_reset - eh threads main device reset routine 3363 * @scmd: pointer to scsi command object 3364 * 3365 * Return: SUCCESS if command aborted else FAILED 3366 */ 3367 static int 3368 scsih_dev_reset(struct scsi_cmnd *scmd) 3369 { 3370 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3371 struct MPT3SAS_DEVICE *sas_device_priv_data; 3372 struct _sas_device *sas_device = NULL; 3373 struct _pcie_device *pcie_device = NULL; 3374 u16 handle; 3375 u8 tr_method = 0; 3376 u8 tr_timeout = 30; 3377 int r; 3378 3379 struct scsi_target *starget = scmd->device->sdev_target; 3380 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 3381 3382 sdev_printk(KERN_INFO, scmd->device, 3383 "attempting device reset! scmd(0x%p)\n", scmd); 3384 _scsih_tm_display_info(ioc, scmd); 3385 3386 sas_device_priv_data = scmd->device->hostdata; 3387 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3388 ioc->remove_host) { 3389 sdev_printk(KERN_INFO, scmd->device, 3390 "device been deleted! scmd(0x%p)\n", scmd); 3391 scmd->result = DID_NO_CONNECT << 16; 3392 scmd->scsi_done(scmd); 3393 r = SUCCESS; 3394 goto out; 3395 } 3396 3397 /* for hidden raid components obtain the volume_handle */ 3398 handle = 0; 3399 if (sas_device_priv_data->sas_target->flags & 3400 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3401 sas_device = mpt3sas_get_sdev_from_target(ioc, 3402 target_priv_data); 3403 if (sas_device) 3404 handle = sas_device->volume_handle; 3405 } else 3406 handle = sas_device_priv_data->sas_target->handle; 3407 3408 if (!handle) { 3409 scmd->result = DID_RESET << 16; 3410 r = FAILED; 3411 goto out; 3412 } 3413 3414 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3415 3416 if (pcie_device && (!ioc->tm_custom_handling) && 3417 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { 3418 tr_timeout = pcie_device->reset_timeout; 3419 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3420 } else 3421 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3422 3423 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3424 scmd->device->id, scmd->device->lun, 3425 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 3426 tr_timeout, tr_method); 3427 /* Check for busy commands after reset */ 3428 if (r == SUCCESS && scsi_device_busy(scmd->device)) 3429 r = FAILED; 3430 out: 3431 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n", 3432 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3433 3434 if (sas_device) 3435 sas_device_put(sas_device); 3436 if (pcie_device) 3437 pcie_device_put(pcie_device); 3438 3439 return r; 3440 } 3441 3442 /** 3443 * scsih_target_reset - eh threads main target reset routine 3444 * @scmd: pointer to scsi command object 3445 * 3446 * Return: SUCCESS if command aborted else FAILED 3447 */ 3448 static int 3449 scsih_target_reset(struct scsi_cmnd *scmd) 3450 { 3451 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3452 struct MPT3SAS_DEVICE *sas_device_priv_data; 3453 struct _sas_device *sas_device = NULL; 3454 struct _pcie_device *pcie_device = NULL; 3455 u16 handle; 3456 u8 tr_method = 0; 3457 u8 tr_timeout = 30; 3458 int r; 3459 struct scsi_target *starget = scmd->device->sdev_target; 3460 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 3461 3462 starget_printk(KERN_INFO, starget, 3463 "attempting target reset! scmd(0x%p)\n", scmd); 3464 _scsih_tm_display_info(ioc, scmd); 3465 3466 sas_device_priv_data = scmd->device->hostdata; 3467 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3468 ioc->remove_host) { 3469 starget_printk(KERN_INFO, starget, 3470 "target been deleted! scmd(0x%p)\n", scmd); 3471 scmd->result = DID_NO_CONNECT << 16; 3472 scmd->scsi_done(scmd); 3473 r = SUCCESS; 3474 goto out; 3475 } 3476 3477 /* for hidden raid components obtain the volume_handle */ 3478 handle = 0; 3479 if (sas_device_priv_data->sas_target->flags & 3480 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3481 sas_device = mpt3sas_get_sdev_from_target(ioc, 3482 target_priv_data); 3483 if (sas_device) 3484 handle = sas_device->volume_handle; 3485 } else 3486 handle = sas_device_priv_data->sas_target->handle; 3487 3488 if (!handle) { 3489 scmd->result = DID_RESET << 16; 3490 r = FAILED; 3491 goto out; 3492 } 3493 3494 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3495 3496 if (pcie_device && (!ioc->tm_custom_handling) && 3497 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { 3498 tr_timeout = pcie_device->reset_timeout; 3499 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3500 } else 3501 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3502 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3503 scmd->device->id, 0, 3504 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 3505 tr_timeout, tr_method); 3506 /* Check for busy commands after reset */ 3507 if (r == SUCCESS && atomic_read(&starget->target_busy)) 3508 r = FAILED; 3509 out: 3510 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n", 3511 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3512 3513 if (sas_device) 3514 sas_device_put(sas_device); 3515 if (pcie_device) 3516 pcie_device_put(pcie_device); 3517 return r; 3518 } 3519 3520 3521 /** 3522 * scsih_host_reset - eh threads main host reset routine 3523 * @scmd: pointer to scsi command object 3524 * 3525 * Return: SUCCESS if command aborted else FAILED 3526 */ 3527 static int 3528 scsih_host_reset(struct scsi_cmnd *scmd) 3529 { 3530 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3531 int r, retval; 3532 3533 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd); 3534 scsi_print_command(scmd); 3535 3536 if (ioc->is_driver_loading || ioc->remove_host) { 3537 ioc_info(ioc, "Blocking the host reset\n"); 3538 r = FAILED; 3539 goto out; 3540 } 3541 3542 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3543 r = (retval < 0) ? FAILED : SUCCESS; 3544 out: 3545 ioc_info(ioc, "host reset: %s scmd(0x%p)\n", 3546 r == SUCCESS ? "SUCCESS" : "FAILED", scmd); 3547 3548 return r; 3549 } 3550 3551 /** 3552 * _scsih_fw_event_add - insert and queue up fw_event 3553 * @ioc: per adapter object 3554 * @fw_event: object describing the event 3555 * Context: This function will acquire ioc->fw_event_lock. 3556 * 3557 * This adds the firmware event object into link list, then queues it up to 3558 * be processed from user context. 3559 */ 3560 static void 3561 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) 3562 { 3563 unsigned long flags; 3564 3565 if (ioc->firmware_event_thread == NULL) 3566 return; 3567 3568 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3569 fw_event_work_get(fw_event); 3570 INIT_LIST_HEAD(&fw_event->list); 3571 list_add_tail(&fw_event->list, &ioc->fw_event_list); 3572 INIT_WORK(&fw_event->work, _firmware_event_work); 3573 fw_event_work_get(fw_event); 3574 queue_work(ioc->firmware_event_thread, &fw_event->work); 3575 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3576 } 3577 3578 /** 3579 * _scsih_fw_event_del_from_list - delete fw_event from the list 3580 * @ioc: per adapter object 3581 * @fw_event: object describing the event 3582 * Context: This function will acquire ioc->fw_event_lock. 3583 * 3584 * If the fw_event is on the fw_event_list, remove it and do a put. 3585 */ 3586 static void 3587 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work 3588 *fw_event) 3589 { 3590 unsigned long flags; 3591 3592 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3593 if (!list_empty(&fw_event->list)) { 3594 list_del_init(&fw_event->list); 3595 fw_event_work_put(fw_event); 3596 } 3597 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3598 } 3599 3600 3601 /** 3602 * mpt3sas_send_trigger_data_event - send event for processing trigger data 3603 * @ioc: per adapter object 3604 * @event_data: trigger event data 3605 */ 3606 void 3607 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, 3608 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) 3609 { 3610 struct fw_event_work *fw_event; 3611 u16 sz; 3612 3613 if (ioc->is_driver_loading) 3614 return; 3615 sz = sizeof(*event_data); 3616 fw_event = alloc_fw_event_work(sz); 3617 if (!fw_event) 3618 return; 3619 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; 3620 fw_event->ioc = ioc; 3621 memcpy(fw_event->event_data, event_data, sizeof(*event_data)); 3622 _scsih_fw_event_add(ioc, fw_event); 3623 fw_event_work_put(fw_event); 3624 } 3625 3626 /** 3627 * _scsih_error_recovery_delete_devices - remove devices not responding 3628 * @ioc: per adapter object 3629 */ 3630 static void 3631 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) 3632 { 3633 struct fw_event_work *fw_event; 3634 3635 fw_event = alloc_fw_event_work(0); 3636 if (!fw_event) 3637 return; 3638 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; 3639 fw_event->ioc = ioc; 3640 _scsih_fw_event_add(ioc, fw_event); 3641 fw_event_work_put(fw_event); 3642 } 3643 3644 /** 3645 * mpt3sas_port_enable_complete - port enable completed (fake event) 3646 * @ioc: per adapter object 3647 */ 3648 void 3649 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) 3650 { 3651 struct fw_event_work *fw_event; 3652 3653 fw_event = alloc_fw_event_work(0); 3654 if (!fw_event) 3655 return; 3656 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; 3657 fw_event->ioc = ioc; 3658 _scsih_fw_event_add(ioc, fw_event); 3659 fw_event_work_put(fw_event); 3660 } 3661 3662 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) 3663 { 3664 unsigned long flags; 3665 struct fw_event_work *fw_event = NULL; 3666 3667 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3668 if (!list_empty(&ioc->fw_event_list)) { 3669 fw_event = list_first_entry(&ioc->fw_event_list, 3670 struct fw_event_work, list); 3671 list_del_init(&fw_event->list); 3672 } 3673 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3674 3675 return fw_event; 3676 } 3677 3678 /** 3679 * _scsih_fw_event_cleanup_queue - cleanup event queue 3680 * @ioc: per adapter object 3681 * 3682 * Walk the firmware event queue, either killing timers, or waiting 3683 * for outstanding events to complete 3684 * 3685 * Context: task, can sleep 3686 */ 3687 static void 3688 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) 3689 { 3690 struct fw_event_work *fw_event; 3691 3692 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) || 3693 !ioc->firmware_event_thread) 3694 return; 3695 /* 3696 * Set current running event as ignore, so that 3697 * current running event will exit quickly. 3698 * As diag reset has occurred it is of no use 3699 * to process remaining stale event data entries. 3700 */ 3701 if (ioc->shost_recovery && ioc->current_event) 3702 ioc->current_event->ignore = 1; 3703 3704 ioc->fw_events_cleanup = 1; 3705 while ((fw_event = dequeue_next_fw_event(ioc)) || 3706 (fw_event = ioc->current_event)) { 3707 3708 /* 3709 * Don't call cancel_work_sync() for current_event 3710 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES; 3711 * otherwise we may observe deadlock if current 3712 * hard reset issued as part of processing the current_event. 3713 * 3714 * Orginal logic of cleaning the current_event is added 3715 * for handling the back to back host reset issued by the user. 3716 * i.e. during back to back host reset, driver use to process 3717 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES 3718 * event back to back and this made the drives to unregister 3719 * the devices from SML. 3720 */ 3721 3722 if (fw_event == ioc->current_event && 3723 ioc->current_event->event != 3724 MPT3SAS_REMOVE_UNRESPONDING_DEVICES) { 3725 ioc->current_event = NULL; 3726 continue; 3727 } 3728 3729 /* 3730 * Driver has to clear ioc->start_scan flag when 3731 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE, 3732 * otherwise scsi_scan_host() API waits for the 3733 * 5 minute timer to expire. If we exit from 3734 * scsi_scan_host() early then we can issue the 3735 * new port enable request as part of current diag reset. 3736 */ 3737 if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) { 3738 ioc->port_enable_cmds.status |= MPT3_CMD_RESET; 3739 ioc->start_scan = 0; 3740 } 3741 3742 /* 3743 * Wait on the fw_event to complete. If this returns 1, then 3744 * the event was never executed, and we need a put for the 3745 * reference the work had on the fw_event. 3746 * 3747 * If it did execute, we wait for it to finish, and the put will 3748 * happen from _firmware_event_work() 3749 */ 3750 if (cancel_work_sync(&fw_event->work)) 3751 fw_event_work_put(fw_event); 3752 3753 fw_event_work_put(fw_event); 3754 } 3755 ioc->fw_events_cleanup = 0; 3756 } 3757 3758 /** 3759 * _scsih_internal_device_block - block the sdev device 3760 * @sdev: per device object 3761 * @sas_device_priv_data : per device driver private data 3762 * 3763 * make sure device is blocked without error, if not 3764 * print an error 3765 */ 3766 static void 3767 _scsih_internal_device_block(struct scsi_device *sdev, 3768 struct MPT3SAS_DEVICE *sas_device_priv_data) 3769 { 3770 int r = 0; 3771 3772 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", 3773 sas_device_priv_data->sas_target->handle); 3774 sas_device_priv_data->block = 1; 3775 3776 r = scsi_internal_device_block_nowait(sdev); 3777 if (r == -EINVAL) 3778 sdev_printk(KERN_WARNING, sdev, 3779 "device_block failed with return(%d) for handle(0x%04x)\n", 3780 r, sas_device_priv_data->sas_target->handle); 3781 } 3782 3783 /** 3784 * _scsih_internal_device_unblock - unblock the sdev device 3785 * @sdev: per device object 3786 * @sas_device_priv_data : per device driver private data 3787 * make sure device is unblocked without error, if not retry 3788 * by blocking and then unblocking 3789 */ 3790 3791 static void 3792 _scsih_internal_device_unblock(struct scsi_device *sdev, 3793 struct MPT3SAS_DEVICE *sas_device_priv_data) 3794 { 3795 int r = 0; 3796 3797 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " 3798 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); 3799 sas_device_priv_data->block = 0; 3800 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 3801 if (r == -EINVAL) { 3802 /* The device has been set to SDEV_RUNNING by SD layer during 3803 * device addition but the request queue is still stopped by 3804 * our earlier block call. We need to perform a block again 3805 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */ 3806 3807 sdev_printk(KERN_WARNING, sdev, 3808 "device_unblock failed with return(%d) for handle(0x%04x) " 3809 "performing a block followed by an unblock\n", 3810 r, sas_device_priv_data->sas_target->handle); 3811 sas_device_priv_data->block = 1; 3812 r = scsi_internal_device_block_nowait(sdev); 3813 if (r) 3814 sdev_printk(KERN_WARNING, sdev, "retried device_block " 3815 "failed with return(%d) for handle(0x%04x)\n", 3816 r, sas_device_priv_data->sas_target->handle); 3817 3818 sas_device_priv_data->block = 0; 3819 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 3820 if (r) 3821 sdev_printk(KERN_WARNING, sdev, "retried device_unblock" 3822 " failed with return(%d) for handle(0x%04x)\n", 3823 r, sas_device_priv_data->sas_target->handle); 3824 } 3825 } 3826 3827 /** 3828 * _scsih_ublock_io_all_device - unblock every device 3829 * @ioc: per adapter object 3830 * 3831 * change the device state from block to running 3832 */ 3833 static void 3834 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3835 { 3836 struct MPT3SAS_DEVICE *sas_device_priv_data; 3837 struct scsi_device *sdev; 3838 3839 shost_for_each_device(sdev, ioc->shost) { 3840 sas_device_priv_data = sdev->hostdata; 3841 if (!sas_device_priv_data) 3842 continue; 3843 if (!sas_device_priv_data->block) 3844 continue; 3845 3846 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, 3847 "device_running, handle(0x%04x)\n", 3848 sas_device_priv_data->sas_target->handle)); 3849 _scsih_internal_device_unblock(sdev, sas_device_priv_data); 3850 } 3851 } 3852 3853 3854 /** 3855 * _scsih_ublock_io_device - prepare device to be deleted 3856 * @ioc: per adapter object 3857 * @sas_address: sas address 3858 * @port: hba port entry 3859 * 3860 * unblock then put device in offline state 3861 */ 3862 static void 3863 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, 3864 u64 sas_address, struct hba_port *port) 3865 { 3866 struct MPT3SAS_DEVICE *sas_device_priv_data; 3867 struct scsi_device *sdev; 3868 3869 shost_for_each_device(sdev, ioc->shost) { 3870 sas_device_priv_data = sdev->hostdata; 3871 if (!sas_device_priv_data) 3872 continue; 3873 if (sas_device_priv_data->sas_target->sas_address 3874 != sas_address) 3875 continue; 3876 if (sas_device_priv_data->sas_target->port != port) 3877 continue; 3878 if (sas_device_priv_data->block) 3879 _scsih_internal_device_unblock(sdev, 3880 sas_device_priv_data); 3881 } 3882 } 3883 3884 /** 3885 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK 3886 * @ioc: per adapter object 3887 * 3888 * During device pull we need to appropriately set the sdev state. 3889 */ 3890 static void 3891 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3892 { 3893 struct MPT3SAS_DEVICE *sas_device_priv_data; 3894 struct scsi_device *sdev; 3895 3896 shost_for_each_device(sdev, ioc->shost) { 3897 sas_device_priv_data = sdev->hostdata; 3898 if (!sas_device_priv_data) 3899 continue; 3900 if (sas_device_priv_data->block) 3901 continue; 3902 if (sas_device_priv_data->ignore_delay_remove) { 3903 sdev_printk(KERN_INFO, sdev, 3904 "%s skip device_block for SES handle(0x%04x)\n", 3905 __func__, sas_device_priv_data->sas_target->handle); 3906 continue; 3907 } 3908 _scsih_internal_device_block(sdev, sas_device_priv_data); 3909 } 3910 } 3911 3912 /** 3913 * _scsih_block_io_device - set the device state to SDEV_BLOCK 3914 * @ioc: per adapter object 3915 * @handle: device handle 3916 * 3917 * During device pull we need to appropriately set the sdev state. 3918 */ 3919 static void 3920 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 3921 { 3922 struct MPT3SAS_DEVICE *sas_device_priv_data; 3923 struct scsi_device *sdev; 3924 struct _sas_device *sas_device; 3925 3926 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 3927 3928 shost_for_each_device(sdev, ioc->shost) { 3929 sas_device_priv_data = sdev->hostdata; 3930 if (!sas_device_priv_data) 3931 continue; 3932 if (sas_device_priv_data->sas_target->handle != handle) 3933 continue; 3934 if (sas_device_priv_data->block) 3935 continue; 3936 if (sas_device && sas_device->pend_sas_rphy_add) 3937 continue; 3938 if (sas_device_priv_data->ignore_delay_remove) { 3939 sdev_printk(KERN_INFO, sdev, 3940 "%s skip device_block for SES handle(0x%04x)\n", 3941 __func__, sas_device_priv_data->sas_target->handle); 3942 continue; 3943 } 3944 _scsih_internal_device_block(sdev, sas_device_priv_data); 3945 } 3946 3947 if (sas_device) 3948 sas_device_put(sas_device); 3949 } 3950 3951 /** 3952 * _scsih_block_io_to_children_attached_to_ex 3953 * @ioc: per adapter object 3954 * @sas_expander: the sas_device object 3955 * 3956 * This routine set sdev state to SDEV_BLOCK for all devices 3957 * attached to this expander. This function called when expander is 3958 * pulled. 3959 */ 3960 static void 3961 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, 3962 struct _sas_node *sas_expander) 3963 { 3964 struct _sas_port *mpt3sas_port; 3965 struct _sas_device *sas_device; 3966 struct _sas_node *expander_sibling; 3967 unsigned long flags; 3968 3969 if (!sas_expander) 3970 return; 3971 3972 list_for_each_entry(mpt3sas_port, 3973 &sas_expander->sas_port_list, port_list) { 3974 if (mpt3sas_port->remote_identify.device_type == 3975 SAS_END_DEVICE) { 3976 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3977 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 3978 mpt3sas_port->remote_identify.sas_address, 3979 mpt3sas_port->hba_port); 3980 if (sas_device) { 3981 set_bit(sas_device->handle, 3982 ioc->blocking_handles); 3983 sas_device_put(sas_device); 3984 } 3985 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3986 } 3987 } 3988 3989 list_for_each_entry(mpt3sas_port, 3990 &sas_expander->sas_port_list, port_list) { 3991 3992 if (mpt3sas_port->remote_identify.device_type == 3993 SAS_EDGE_EXPANDER_DEVICE || 3994 mpt3sas_port->remote_identify.device_type == 3995 SAS_FANOUT_EXPANDER_DEVICE) { 3996 expander_sibling = 3997 mpt3sas_scsih_expander_find_by_sas_address( 3998 ioc, mpt3sas_port->remote_identify.sas_address, 3999 mpt3sas_port->hba_port); 4000 _scsih_block_io_to_children_attached_to_ex(ioc, 4001 expander_sibling); 4002 } 4003 } 4004 } 4005 4006 /** 4007 * _scsih_block_io_to_children_attached_directly 4008 * @ioc: per adapter object 4009 * @event_data: topology change event data 4010 * 4011 * This routine set sdev state to SDEV_BLOCK for all devices 4012 * direct attached during device pull. 4013 */ 4014 static void 4015 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, 4016 Mpi2EventDataSasTopologyChangeList_t *event_data) 4017 { 4018 int i; 4019 u16 handle; 4020 u16 reason_code; 4021 4022 for (i = 0; i < event_data->NumEntries; i++) { 4023 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 4024 if (!handle) 4025 continue; 4026 reason_code = event_data->PHY[i].PhyStatus & 4027 MPI2_EVENT_SAS_TOPO_RC_MASK; 4028 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) 4029 _scsih_block_io_device(ioc, handle); 4030 } 4031 } 4032 4033 /** 4034 * _scsih_block_io_to_pcie_children_attached_directly 4035 * @ioc: per adapter object 4036 * @event_data: topology change event data 4037 * 4038 * This routine set sdev state to SDEV_BLOCK for all devices 4039 * direct attached during device pull/reconnect. 4040 */ 4041 static void 4042 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, 4043 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 4044 { 4045 int i; 4046 u16 handle; 4047 u16 reason_code; 4048 4049 for (i = 0; i < event_data->NumEntries; i++) { 4050 handle = 4051 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 4052 if (!handle) 4053 continue; 4054 reason_code = event_data->PortEntry[i].PortStatus; 4055 if (reason_code == 4056 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING) 4057 _scsih_block_io_device(ioc, handle); 4058 } 4059 } 4060 /** 4061 * _scsih_tm_tr_send - send task management request 4062 * @ioc: per adapter object 4063 * @handle: device handle 4064 * Context: interrupt time. 4065 * 4066 * This code is to initiate the device removal handshake protocol 4067 * with controller firmware. This function will issue target reset 4068 * using high priority request queue. It will send a sas iounit 4069 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. 4070 * 4071 * This is designed to send muliple task management request at the same 4072 * time to the fifo. If the fifo is full, we will append the request, 4073 * and process it in a future completion. 4074 */ 4075 static void 4076 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4077 { 4078 Mpi2SCSITaskManagementRequest_t *mpi_request; 4079 u16 smid; 4080 struct _sas_device *sas_device = NULL; 4081 struct _pcie_device *pcie_device = NULL; 4082 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 4083 u64 sas_address = 0; 4084 unsigned long flags; 4085 struct _tr_list *delayed_tr; 4086 u32 ioc_state; 4087 u8 tr_method = 0; 4088 struct hba_port *port = NULL; 4089 4090 if (ioc->pci_error_recovery) { 4091 dewtprintk(ioc, 4092 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n", 4093 __func__, handle)); 4094 return; 4095 } 4096 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4097 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4098 dewtprintk(ioc, 4099 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n", 4100 __func__, handle)); 4101 return; 4102 } 4103 4104 /* if PD, then return */ 4105 if (test_bit(handle, ioc->pd_handles)) 4106 return; 4107 4108 clear_bit(handle, ioc->pend_os_device_add); 4109 4110 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4111 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 4112 if (sas_device && sas_device->starget && 4113 sas_device->starget->hostdata) { 4114 sas_target_priv_data = sas_device->starget->hostdata; 4115 sas_target_priv_data->deleted = 1; 4116 sas_address = sas_device->sas_address; 4117 port = sas_device->port; 4118 } 4119 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4120 if (!sas_device) { 4121 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 4122 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 4123 if (pcie_device && pcie_device->starget && 4124 pcie_device->starget->hostdata) { 4125 sas_target_priv_data = pcie_device->starget->hostdata; 4126 sas_target_priv_data->deleted = 1; 4127 sas_address = pcie_device->wwid; 4128 } 4129 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 4130 if (pcie_device && (!ioc->tm_custom_handling) && 4131 (!(mpt3sas_scsih_is_pcie_scsi_device( 4132 pcie_device->device_info)))) 4133 tr_method = 4134 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 4135 else 4136 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 4137 } 4138 if (sas_target_priv_data) { 4139 dewtprintk(ioc, 4140 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", 4141 handle, (u64)sas_address)); 4142 if (sas_device) { 4143 if (sas_device->enclosure_handle != 0) 4144 dewtprintk(ioc, 4145 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n", 4146 (u64)sas_device->enclosure_logical_id, 4147 sas_device->slot)); 4148 if (sas_device->connector_name[0] != '\0') 4149 dewtprintk(ioc, 4150 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n", 4151 sas_device->enclosure_level, 4152 sas_device->connector_name)); 4153 } else if (pcie_device) { 4154 if (pcie_device->enclosure_handle != 0) 4155 dewtprintk(ioc, 4156 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n", 4157 (u64)pcie_device->enclosure_logical_id, 4158 pcie_device->slot)); 4159 if (pcie_device->connector_name[0] != '\0') 4160 dewtprintk(ioc, 4161 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n", 4162 pcie_device->enclosure_level, 4163 pcie_device->connector_name)); 4164 } 4165 _scsih_ublock_io_device(ioc, sas_address, port); 4166 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 4167 } 4168 4169 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); 4170 if (!smid) { 4171 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4172 if (!delayed_tr) 4173 goto out; 4174 INIT_LIST_HEAD(&delayed_tr->list); 4175 delayed_tr->handle = handle; 4176 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 4177 dewtprintk(ioc, 4178 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4179 handle)); 4180 goto out; 4181 } 4182 4183 dewtprintk(ioc, 4184 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4185 handle, smid, ioc->tm_tr_cb_idx)); 4186 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4187 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 4188 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4189 mpi_request->DevHandle = cpu_to_le16(handle); 4190 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 4191 mpi_request->MsgFlags = tr_method; 4192 set_bit(handle, ioc->device_remove_in_progress); 4193 ioc->put_smid_hi_priority(ioc, smid, 0); 4194 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); 4195 4196 out: 4197 if (sas_device) 4198 sas_device_put(sas_device); 4199 if (pcie_device) 4200 pcie_device_put(pcie_device); 4201 } 4202 4203 /** 4204 * _scsih_tm_tr_complete - 4205 * @ioc: per adapter object 4206 * @smid: system request message index 4207 * @msix_index: MSIX table index supplied by the OS 4208 * @reply: reply message frame(lower 32bit addr) 4209 * Context: interrupt time. 4210 * 4211 * This is the target reset completion routine. 4212 * This code is part of the code to initiate the device removal 4213 * handshake protocol with controller firmware. 4214 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) 4215 * 4216 * Return: 1 meaning mf should be freed from _base_interrupt 4217 * 0 means the mf is freed from this function. 4218 */ 4219 static u8 4220 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 4221 u32 reply) 4222 { 4223 u16 handle; 4224 Mpi2SCSITaskManagementRequest_t *mpi_request_tm; 4225 Mpi2SCSITaskManagementReply_t *mpi_reply = 4226 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4227 Mpi2SasIoUnitControlRequest_t *mpi_request; 4228 u16 smid_sas_ctrl; 4229 u32 ioc_state; 4230 struct _sc_list *delayed_sc; 4231 4232 if (ioc->pci_error_recovery) { 4233 dewtprintk(ioc, 4234 ioc_info(ioc, "%s: host in pci error recovery\n", 4235 __func__)); 4236 return 1; 4237 } 4238 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4239 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4240 dewtprintk(ioc, 4241 ioc_info(ioc, "%s: host is not operational\n", 4242 __func__)); 4243 return 1; 4244 } 4245 if (unlikely(!mpi_reply)) { 4246 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4247 __FILE__, __LINE__, __func__); 4248 return 1; 4249 } 4250 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 4251 handle = le16_to_cpu(mpi_request_tm->DevHandle); 4252 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 4253 dewtprintk(ioc, 4254 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 4255 handle, 4256 le16_to_cpu(mpi_reply->DevHandle), smid)); 4257 return 0; 4258 } 4259 4260 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 4261 dewtprintk(ioc, 4262 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", 4263 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 4264 le32_to_cpu(mpi_reply->IOCLogInfo), 4265 le32_to_cpu(mpi_reply->TerminationCount))); 4266 4267 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); 4268 if (!smid_sas_ctrl) { 4269 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); 4270 if (!delayed_sc) 4271 return _scsih_check_for_pending_tm(ioc, smid); 4272 INIT_LIST_HEAD(&delayed_sc->list); 4273 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); 4274 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); 4275 dewtprintk(ioc, 4276 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n", 4277 handle)); 4278 return _scsih_check_for_pending_tm(ioc, smid); 4279 } 4280 4281 dewtprintk(ioc, 4282 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4283 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); 4284 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); 4285 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 4286 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 4287 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 4288 mpi_request->DevHandle = mpi_request_tm->DevHandle; 4289 ioc->put_smid_default(ioc, smid_sas_ctrl); 4290 4291 return _scsih_check_for_pending_tm(ioc, smid); 4292 } 4293 4294 /** _scsih_allow_scmd_to_device - check whether scmd needs to 4295 * issue to IOC or not. 4296 * @ioc: per adapter object 4297 * @scmd: pointer to scsi command object 4298 * 4299 * Returns true if scmd can be issued to IOC otherwise returns false. 4300 */ 4301 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, 4302 struct scsi_cmnd *scmd) 4303 { 4304 4305 if (ioc->pci_error_recovery) 4306 return false; 4307 4308 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { 4309 if (ioc->remove_host) 4310 return false; 4311 4312 return true; 4313 } 4314 4315 if (ioc->remove_host) { 4316 4317 switch (scmd->cmnd[0]) { 4318 case SYNCHRONIZE_CACHE: 4319 case START_STOP: 4320 return true; 4321 default: 4322 return false; 4323 } 4324 } 4325 4326 return true; 4327 } 4328 4329 /** 4330 * _scsih_sas_control_complete - completion routine 4331 * @ioc: per adapter object 4332 * @smid: system request message index 4333 * @msix_index: MSIX table index supplied by the OS 4334 * @reply: reply message frame(lower 32bit addr) 4335 * Context: interrupt time. 4336 * 4337 * This is the sas iounit control completion routine. 4338 * This code is part of the code to initiate the device removal 4339 * handshake protocol with controller firmware. 4340 * 4341 * Return: 1 meaning mf should be freed from _base_interrupt 4342 * 0 means the mf is freed from this function. 4343 */ 4344 static u8 4345 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4346 u8 msix_index, u32 reply) 4347 { 4348 Mpi2SasIoUnitControlReply_t *mpi_reply = 4349 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4350 4351 if (likely(mpi_reply)) { 4352 dewtprintk(ioc, 4353 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", 4354 le16_to_cpu(mpi_reply->DevHandle), smid, 4355 le16_to_cpu(mpi_reply->IOCStatus), 4356 le32_to_cpu(mpi_reply->IOCLogInfo))); 4357 if (le16_to_cpu(mpi_reply->IOCStatus) == 4358 MPI2_IOCSTATUS_SUCCESS) { 4359 clear_bit(le16_to_cpu(mpi_reply->DevHandle), 4360 ioc->device_remove_in_progress); 4361 } 4362 } else { 4363 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4364 __FILE__, __LINE__, __func__); 4365 } 4366 return mpt3sas_check_for_pending_internal_cmds(ioc, smid); 4367 } 4368 4369 /** 4370 * _scsih_tm_tr_volume_send - send target reset request for volumes 4371 * @ioc: per adapter object 4372 * @handle: device handle 4373 * Context: interrupt time. 4374 * 4375 * This is designed to send muliple task management request at the same 4376 * time to the fifo. If the fifo is full, we will append the request, 4377 * and process it in a future completion. 4378 */ 4379 static void 4380 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4381 { 4382 Mpi2SCSITaskManagementRequest_t *mpi_request; 4383 u16 smid; 4384 struct _tr_list *delayed_tr; 4385 4386 if (ioc->pci_error_recovery) { 4387 dewtprintk(ioc, 4388 ioc_info(ioc, "%s: host reset in progress!\n", 4389 __func__)); 4390 return; 4391 } 4392 4393 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); 4394 if (!smid) { 4395 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4396 if (!delayed_tr) 4397 return; 4398 INIT_LIST_HEAD(&delayed_tr->list); 4399 delayed_tr->handle = handle; 4400 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); 4401 dewtprintk(ioc, 4402 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4403 handle)); 4404 return; 4405 } 4406 4407 dewtprintk(ioc, 4408 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4409 handle, smid, ioc->tm_tr_volume_cb_idx)); 4410 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4411 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 4412 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4413 mpi_request->DevHandle = cpu_to_le16(handle); 4414 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 4415 ioc->put_smid_hi_priority(ioc, smid, 0); 4416 } 4417 4418 /** 4419 * _scsih_tm_volume_tr_complete - target reset completion 4420 * @ioc: per adapter object 4421 * @smid: system request message index 4422 * @msix_index: MSIX table index supplied by the OS 4423 * @reply: reply message frame(lower 32bit addr) 4424 * Context: interrupt time. 4425 * 4426 * Return: 1 meaning mf should be freed from _base_interrupt 4427 * 0 means the mf is freed from this function. 4428 */ 4429 static u8 4430 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4431 u8 msix_index, u32 reply) 4432 { 4433 u16 handle; 4434 Mpi2SCSITaskManagementRequest_t *mpi_request_tm; 4435 Mpi2SCSITaskManagementReply_t *mpi_reply = 4436 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4437 4438 if (ioc->shost_recovery || ioc->pci_error_recovery) { 4439 dewtprintk(ioc, 4440 ioc_info(ioc, "%s: host reset in progress!\n", 4441 __func__)); 4442 return 1; 4443 } 4444 if (unlikely(!mpi_reply)) { 4445 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4446 __FILE__, __LINE__, __func__); 4447 return 1; 4448 } 4449 4450 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 4451 handle = le16_to_cpu(mpi_request_tm->DevHandle); 4452 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 4453 dewtprintk(ioc, 4454 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 4455 handle, le16_to_cpu(mpi_reply->DevHandle), 4456 smid)); 4457 return 0; 4458 } 4459 4460 dewtprintk(ioc, 4461 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", 4462 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 4463 le32_to_cpu(mpi_reply->IOCLogInfo), 4464 le32_to_cpu(mpi_reply->TerminationCount))); 4465 4466 return _scsih_check_for_pending_tm(ioc, smid); 4467 } 4468 4469 /** 4470 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages 4471 * @ioc: per adapter object 4472 * @smid: system request message index 4473 * @event: Event ID 4474 * @event_context: used to track events uniquely 4475 * 4476 * Context - processed in interrupt context. 4477 */ 4478 static void 4479 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, 4480 U32 event_context) 4481 { 4482 Mpi2EventAckRequest_t *ack_request; 4483 int i = smid - ioc->internal_smid; 4484 unsigned long flags; 4485 4486 /* Without releasing the smid just update the 4487 * call back index and reuse the same smid for 4488 * processing this delayed request 4489 */ 4490 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4491 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; 4492 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4493 4494 dewtprintk(ioc, 4495 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", 4496 le16_to_cpu(event), smid, ioc->base_cb_idx)); 4497 ack_request = mpt3sas_base_get_msg_frame(ioc, smid); 4498 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 4499 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 4500 ack_request->Event = event; 4501 ack_request->EventContext = event_context; 4502 ack_request->VF_ID = 0; /* TODO */ 4503 ack_request->VP_ID = 0; 4504 ioc->put_smid_default(ioc, smid); 4505 } 4506 4507 /** 4508 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed 4509 * sas_io_unit_ctrl messages 4510 * @ioc: per adapter object 4511 * @smid: system request message index 4512 * @handle: device handle 4513 * 4514 * Context - processed in interrupt context. 4515 */ 4516 static void 4517 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, 4518 u16 smid, u16 handle) 4519 { 4520 Mpi2SasIoUnitControlRequest_t *mpi_request; 4521 u32 ioc_state; 4522 int i = smid - ioc->internal_smid; 4523 unsigned long flags; 4524 4525 if (ioc->remove_host) { 4526 dewtprintk(ioc, 4527 ioc_info(ioc, "%s: host has been removed\n", 4528 __func__)); 4529 return; 4530 } else if (ioc->pci_error_recovery) { 4531 dewtprintk(ioc, 4532 ioc_info(ioc, "%s: host in pci error recovery\n", 4533 __func__)); 4534 return; 4535 } 4536 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4537 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4538 dewtprintk(ioc, 4539 ioc_info(ioc, "%s: host is not operational\n", 4540 __func__)); 4541 return; 4542 } 4543 4544 /* Without releasing the smid just update the 4545 * call back index and reuse the same smid for 4546 * processing this delayed request 4547 */ 4548 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4549 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; 4550 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4551 4552 dewtprintk(ioc, 4553 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4554 handle, smid, ioc->tm_sas_control_cb_idx)); 4555 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4556 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 4557 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 4558 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 4559 mpi_request->DevHandle = cpu_to_le16(handle); 4560 ioc->put_smid_default(ioc, smid); 4561 } 4562 4563 /** 4564 * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages 4565 * @ioc: per adapter object 4566 * @smid: system request message index 4567 * 4568 * Context: Executed in interrupt context 4569 * 4570 * This will check delayed internal messages list, and process the 4571 * next request. 4572 * 4573 * Return: 1 meaning mf should be freed from _base_interrupt 4574 * 0 means the mf is freed from this function. 4575 */ 4576 u8 4577 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4578 { 4579 struct _sc_list *delayed_sc; 4580 struct _event_ack_list *delayed_event_ack; 4581 4582 if (!list_empty(&ioc->delayed_event_ack_list)) { 4583 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, 4584 struct _event_ack_list, list); 4585 _scsih_issue_delayed_event_ack(ioc, smid, 4586 delayed_event_ack->Event, delayed_event_ack->EventContext); 4587 list_del(&delayed_event_ack->list); 4588 kfree(delayed_event_ack); 4589 return 0; 4590 } 4591 4592 if (!list_empty(&ioc->delayed_sc_list)) { 4593 delayed_sc = list_entry(ioc->delayed_sc_list.next, 4594 struct _sc_list, list); 4595 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid, 4596 delayed_sc->handle); 4597 list_del(&delayed_sc->list); 4598 kfree(delayed_sc); 4599 return 0; 4600 } 4601 return 1; 4602 } 4603 4604 /** 4605 * _scsih_check_for_pending_tm - check for pending task management 4606 * @ioc: per adapter object 4607 * @smid: system request message index 4608 * 4609 * This will check delayed target reset list, and feed the 4610 * next reqeust. 4611 * 4612 * Return: 1 meaning mf should be freed from _base_interrupt 4613 * 0 means the mf is freed from this function. 4614 */ 4615 static u8 4616 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4617 { 4618 struct _tr_list *delayed_tr; 4619 4620 if (!list_empty(&ioc->delayed_tr_volume_list)) { 4621 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, 4622 struct _tr_list, list); 4623 mpt3sas_base_free_smid(ioc, smid); 4624 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle); 4625 list_del(&delayed_tr->list); 4626 kfree(delayed_tr); 4627 return 0; 4628 } 4629 4630 if (!list_empty(&ioc->delayed_tr_list)) { 4631 delayed_tr = list_entry(ioc->delayed_tr_list.next, 4632 struct _tr_list, list); 4633 mpt3sas_base_free_smid(ioc, smid); 4634 _scsih_tm_tr_send(ioc, delayed_tr->handle); 4635 list_del(&delayed_tr->list); 4636 kfree(delayed_tr); 4637 return 0; 4638 } 4639 4640 return 1; 4641 } 4642 4643 /** 4644 * _scsih_check_topo_delete_events - sanity check on topo events 4645 * @ioc: per adapter object 4646 * @event_data: the event data payload 4647 * 4648 * This routine added to better handle cable breaker. 4649 * 4650 * This handles the case where driver receives multiple expander 4651 * add and delete events in a single shot. When there is a delete event 4652 * the routine will void any pending add events waiting in the event queue. 4653 */ 4654 static void 4655 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, 4656 Mpi2EventDataSasTopologyChangeList_t *event_data) 4657 { 4658 struct fw_event_work *fw_event; 4659 Mpi2EventDataSasTopologyChangeList_t *local_event_data; 4660 u16 expander_handle; 4661 struct _sas_node *sas_expander; 4662 unsigned long flags; 4663 int i, reason_code; 4664 u16 handle; 4665 4666 for (i = 0 ; i < event_data->NumEntries; i++) { 4667 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 4668 if (!handle) 4669 continue; 4670 reason_code = event_data->PHY[i].PhyStatus & 4671 MPI2_EVENT_SAS_TOPO_RC_MASK; 4672 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) 4673 _scsih_tm_tr_send(ioc, handle); 4674 } 4675 4676 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); 4677 if (expander_handle < ioc->sas_hba.num_phys) { 4678 _scsih_block_io_to_children_attached_directly(ioc, event_data); 4679 return; 4680 } 4681 if (event_data->ExpStatus == 4682 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { 4683 /* put expander attached devices into blocking state */ 4684 spin_lock_irqsave(&ioc->sas_node_lock, flags); 4685 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, 4686 expander_handle); 4687 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); 4688 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 4689 do { 4690 handle = find_first_bit(ioc->blocking_handles, 4691 ioc->facts.MaxDevHandle); 4692 if (handle < ioc->facts.MaxDevHandle) 4693 _scsih_block_io_device(ioc, handle); 4694 } while (test_and_clear_bit(handle, ioc->blocking_handles)); 4695 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) 4696 _scsih_block_io_to_children_attached_directly(ioc, event_data); 4697 4698 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4699 return; 4700 4701 /* mark ignore flag for pending events */ 4702 spin_lock_irqsave(&ioc->fw_event_lock, flags); 4703 list_for_each_entry(fw_event, &ioc->fw_event_list, list) { 4704 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || 4705 fw_event->ignore) 4706 continue; 4707 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) 4708 fw_event->event_data; 4709 if (local_event_data->ExpStatus == 4710 MPI2_EVENT_SAS_TOPO_ES_ADDED || 4711 local_event_data->ExpStatus == 4712 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4713 if (le16_to_cpu(local_event_data->ExpanderDevHandle) == 4714 expander_handle) { 4715 dewtprintk(ioc, 4716 ioc_info(ioc, "setting ignoring flag\n")); 4717 fw_event->ignore = 1; 4718 } 4719 } 4720 } 4721 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 4722 } 4723 4724 /** 4725 * _scsih_check_pcie_topo_remove_events - sanity check on topo 4726 * events 4727 * @ioc: per adapter object 4728 * @event_data: the event data payload 4729 * 4730 * This handles the case where driver receives multiple switch 4731 * or device add and delete events in a single shot. When there 4732 * is a delete event the routine will void any pending add 4733 * events waiting in the event queue. 4734 */ 4735 static void 4736 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, 4737 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 4738 { 4739 struct fw_event_work *fw_event; 4740 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data; 4741 unsigned long flags; 4742 int i, reason_code; 4743 u16 handle, switch_handle; 4744 4745 for (i = 0; i < event_data->NumEntries; i++) { 4746 handle = 4747 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 4748 if (!handle) 4749 continue; 4750 reason_code = event_data->PortEntry[i].PortStatus; 4751 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING) 4752 _scsih_tm_tr_send(ioc, handle); 4753 } 4754 4755 switch_handle = le16_to_cpu(event_data->SwitchDevHandle); 4756 if (!switch_handle) { 4757 _scsih_block_io_to_pcie_children_attached_directly( 4758 ioc, event_data); 4759 return; 4760 } 4761 /* TODO We are not supporting cascaded PCIe Switch removal yet*/ 4762 if ((event_data->SwitchStatus 4763 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) || 4764 (event_data->SwitchStatus == 4765 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING)) 4766 _scsih_block_io_to_pcie_children_attached_directly( 4767 ioc, event_data); 4768 4769 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4770 return; 4771 4772 /* mark ignore flag for pending events */ 4773 spin_lock_irqsave(&ioc->fw_event_lock, flags); 4774 list_for_each_entry(fw_event, &ioc->fw_event_list, list) { 4775 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST || 4776 fw_event->ignore) 4777 continue; 4778 local_event_data = 4779 (Mpi26EventDataPCIeTopologyChangeList_t *) 4780 fw_event->event_data; 4781 if (local_event_data->SwitchStatus == 4782 MPI2_EVENT_SAS_TOPO_ES_ADDED || 4783 local_event_data->SwitchStatus == 4784 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4785 if (le16_to_cpu(local_event_data->SwitchDevHandle) == 4786 switch_handle) { 4787 dewtprintk(ioc, 4788 ioc_info(ioc, "setting ignoring flag for switch event\n")); 4789 fw_event->ignore = 1; 4790 } 4791 } 4792 } 4793 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 4794 } 4795 4796 /** 4797 * _scsih_set_volume_delete_flag - setting volume delete flag 4798 * @ioc: per adapter object 4799 * @handle: device handle 4800 * 4801 * This returns nothing. 4802 */ 4803 static void 4804 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4805 { 4806 struct _raid_device *raid_device; 4807 struct MPT3SAS_TARGET *sas_target_priv_data; 4808 unsigned long flags; 4809 4810 spin_lock_irqsave(&ioc->raid_device_lock, flags); 4811 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 4812 if (raid_device && raid_device->starget && 4813 raid_device->starget->hostdata) { 4814 sas_target_priv_data = 4815 raid_device->starget->hostdata; 4816 sas_target_priv_data->deleted = 1; 4817 dewtprintk(ioc, 4818 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", 4819 handle, (u64)raid_device->wwid)); 4820 } 4821 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 4822 } 4823 4824 /** 4825 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume 4826 * @handle: input handle 4827 * @a: handle for volume a 4828 * @b: handle for volume b 4829 * 4830 * IR firmware only supports two raid volumes. The purpose of this 4831 * routine is to set the volume handle in either a or b. When the given 4832 * input handle is non-zero, or when a and b have not been set before. 4833 */ 4834 static void 4835 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) 4836 { 4837 if (!handle || handle == *a || handle == *b) 4838 return; 4839 if (!*a) 4840 *a = handle; 4841 else if (!*b) 4842 *b = handle; 4843 } 4844 4845 /** 4846 * _scsih_check_ir_config_unhide_events - check for UNHIDE events 4847 * @ioc: per adapter object 4848 * @event_data: the event data payload 4849 * Context: interrupt time. 4850 * 4851 * This routine will send target reset to volume, followed by target 4852 * resets to the PDs. This is called when a PD has been removed, or 4853 * volume has been deleted or removed. When the target reset is sent 4854 * to volume, the PD target resets need to be queued to start upon 4855 * completion of the volume target reset. 4856 */ 4857 static void 4858 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, 4859 Mpi2EventDataIrConfigChangeList_t *event_data) 4860 { 4861 Mpi2EventIrConfigElement_t *element; 4862 int i; 4863 u16 handle, volume_handle, a, b; 4864 struct _tr_list *delayed_tr; 4865 4866 a = 0; 4867 b = 0; 4868 4869 if (ioc->is_warpdrive) 4870 return; 4871 4872 /* Volume Resets for Deleted or Removed */ 4873 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4874 for (i = 0; i < event_data->NumElements; i++, element++) { 4875 if (le32_to_cpu(event_data->Flags) & 4876 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) 4877 continue; 4878 if (element->ReasonCode == 4879 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || 4880 element->ReasonCode == 4881 MPI2_EVENT_IR_CHANGE_RC_REMOVED) { 4882 volume_handle = le16_to_cpu(element->VolDevHandle); 4883 _scsih_set_volume_delete_flag(ioc, volume_handle); 4884 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); 4885 } 4886 } 4887 4888 /* Volume Resets for UNHIDE events */ 4889 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4890 for (i = 0; i < event_data->NumElements; i++, element++) { 4891 if (le32_to_cpu(event_data->Flags) & 4892 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) 4893 continue; 4894 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { 4895 volume_handle = le16_to_cpu(element->VolDevHandle); 4896 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); 4897 } 4898 } 4899 4900 if (a) 4901 _scsih_tm_tr_volume_send(ioc, a); 4902 if (b) 4903 _scsih_tm_tr_volume_send(ioc, b); 4904 4905 /* PD target resets */ 4906 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4907 for (i = 0; i < event_data->NumElements; i++, element++) { 4908 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) 4909 continue; 4910 handle = le16_to_cpu(element->PhysDiskDevHandle); 4911 volume_handle = le16_to_cpu(element->VolDevHandle); 4912 clear_bit(handle, ioc->pd_handles); 4913 if (!volume_handle) 4914 _scsih_tm_tr_send(ioc, handle); 4915 else if (volume_handle == a || volume_handle == b) { 4916 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4917 BUG_ON(!delayed_tr); 4918 INIT_LIST_HEAD(&delayed_tr->list); 4919 delayed_tr->handle = handle; 4920 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 4921 dewtprintk(ioc, 4922 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4923 handle)); 4924 } else 4925 _scsih_tm_tr_send(ioc, handle); 4926 } 4927 } 4928 4929 4930 /** 4931 * _scsih_check_volume_delete_events - set delete flag for volumes 4932 * @ioc: per adapter object 4933 * @event_data: the event data payload 4934 * Context: interrupt time. 4935 * 4936 * This will handle the case when the cable connected to entire volume is 4937 * pulled. We will take care of setting the deleted flag so normal IO will 4938 * not be sent. 4939 */ 4940 static void 4941 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, 4942 Mpi2EventDataIrVolume_t *event_data) 4943 { 4944 u32 state; 4945 4946 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 4947 return; 4948 state = le32_to_cpu(event_data->NewValue); 4949 if (state == MPI2_RAID_VOL_STATE_MISSING || state == 4950 MPI2_RAID_VOL_STATE_FAILED) 4951 _scsih_set_volume_delete_flag(ioc, 4952 le16_to_cpu(event_data->VolDevHandle)); 4953 } 4954 4955 /** 4956 * _scsih_temp_threshold_events - display temperature threshold exceeded events 4957 * @ioc: per adapter object 4958 * @event_data: the temp threshold event data 4959 * Context: interrupt time. 4960 */ 4961 static void 4962 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, 4963 Mpi2EventDataTemperature_t *event_data) 4964 { 4965 u32 doorbell; 4966 if (ioc->temp_sensors_count >= event_data->SensorNum) { 4967 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n", 4968 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ", 4969 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ", 4970 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ", 4971 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ", 4972 event_data->SensorNum); 4973 ioc_err(ioc, "Current Temp In Celsius: %d\n", 4974 event_data->CurrentTemperature); 4975 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 4976 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 4977 if ((doorbell & MPI2_IOC_STATE_MASK) == 4978 MPI2_IOC_STATE_FAULT) { 4979 mpt3sas_print_fault_code(ioc, 4980 doorbell & MPI2_DOORBELL_DATA_MASK); 4981 } else if ((doorbell & MPI2_IOC_STATE_MASK) == 4982 MPI2_IOC_STATE_COREDUMP) { 4983 mpt3sas_print_coredump_info(ioc, 4984 doorbell & MPI2_DOORBELL_DATA_MASK); 4985 } 4986 } 4987 } 4988 } 4989 4990 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) 4991 { 4992 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; 4993 4994 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) 4995 return 0; 4996 4997 if (pending) 4998 return test_and_set_bit(0, &priv->ata_command_pending); 4999 5000 clear_bit(0, &priv->ata_command_pending); 5001 return 0; 5002 } 5003 5004 /** 5005 * _scsih_flush_running_cmds - completing outstanding commands. 5006 * @ioc: per adapter object 5007 * 5008 * The flushing out of all pending scmd commands following host reset, 5009 * where all IO is dropped to the floor. 5010 */ 5011 static void 5012 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) 5013 { 5014 struct scsi_cmnd *scmd; 5015 struct scsiio_tracker *st; 5016 u16 smid; 5017 int count = 0; 5018 5019 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 5020 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 5021 if (!scmd) 5022 continue; 5023 count++; 5024 _scsih_set_satl_pending(scmd, false); 5025 st = scsi_cmd_priv(scmd); 5026 mpt3sas_base_clear_st(ioc, st); 5027 scsi_dma_unmap(scmd); 5028 if (ioc->pci_error_recovery || ioc->remove_host) 5029 scmd->result = DID_NO_CONNECT << 16; 5030 else 5031 scmd->result = DID_RESET << 16; 5032 scmd->scsi_done(scmd); 5033 } 5034 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count)); 5035 } 5036 5037 /** 5038 * _scsih_setup_eedp - setup MPI request for EEDP transfer 5039 * @ioc: per adapter object 5040 * @scmd: pointer to scsi command object 5041 * @mpi_request: pointer to the SCSI_IO request message frame 5042 * 5043 * Supporting protection 1 and 3. 5044 */ 5045 static void 5046 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 5047 Mpi25SCSIIORequest_t *mpi_request) 5048 { 5049 u16 eedp_flags; 5050 unsigned char prot_op = scsi_get_prot_op(scmd); 5051 unsigned char prot_type = scsi_get_prot_type(scmd); 5052 Mpi25SCSIIORequest_t *mpi_request_3v = 5053 (Mpi25SCSIIORequest_t *)mpi_request; 5054 5055 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL) 5056 return; 5057 5058 if (prot_op == SCSI_PROT_READ_STRIP) 5059 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; 5060 else if (prot_op == SCSI_PROT_WRITE_INSERT) 5061 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; 5062 else 5063 return; 5064 5065 switch (prot_type) { 5066 case SCSI_PROT_DIF_TYPE1: 5067 case SCSI_PROT_DIF_TYPE2: 5068 5069 /* 5070 * enable ref/guard checking 5071 * auto increment ref tag 5072 */ 5073 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 5074 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 5075 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 5076 mpi_request->CDB.EEDP32.PrimaryReferenceTag = 5077 cpu_to_be32(t10_pi_ref_tag(scmd->request)); 5078 break; 5079 5080 case SCSI_PROT_DIF_TYPE3: 5081 5082 /* 5083 * enable guard checking 5084 */ 5085 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 5086 5087 break; 5088 } 5089 5090 mpi_request_3v->EEDPBlockSize = 5091 cpu_to_le16(scmd->device->sector_size); 5092 5093 if (ioc->is_gen35_ioc) 5094 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; 5095 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); 5096 } 5097 5098 /** 5099 * _scsih_eedp_error_handling - return sense code for EEDP errors 5100 * @scmd: pointer to scsi command object 5101 * @ioc_status: ioc status 5102 */ 5103 static void 5104 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) 5105 { 5106 u8 ascq; 5107 5108 switch (ioc_status) { 5109 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5110 ascq = 0x01; 5111 break; 5112 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5113 ascq = 0x02; 5114 break; 5115 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5116 ascq = 0x03; 5117 break; 5118 default: 5119 ascq = 0x00; 5120 break; 5121 } 5122 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq); 5123 set_host_byte(scmd, DID_ABORT); 5124 } 5125 5126 /** 5127 * scsih_qcmd - main scsi request entry point 5128 * @shost: SCSI host pointer 5129 * @scmd: pointer to scsi command object 5130 * 5131 * The callback index is set inside `ioc->scsi_io_cb_idx`. 5132 * 5133 * Return: 0 on success. If there's a failure, return either: 5134 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or 5135 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 5136 */ 5137 static int 5138 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 5139 { 5140 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 5141 struct MPT3SAS_DEVICE *sas_device_priv_data; 5142 struct MPT3SAS_TARGET *sas_target_priv_data; 5143 struct _raid_device *raid_device; 5144 struct request *rq = scmd->request; 5145 int class; 5146 Mpi25SCSIIORequest_t *mpi_request; 5147 struct _pcie_device *pcie_device = NULL; 5148 u32 mpi_control; 5149 u16 smid; 5150 u16 handle; 5151 5152 if (ioc->logging_level & MPT_DEBUG_SCSI) 5153 scsi_print_command(scmd); 5154 5155 sas_device_priv_data = scmd->device->hostdata; 5156 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 5157 scmd->result = DID_NO_CONNECT << 16; 5158 scmd->scsi_done(scmd); 5159 return 0; 5160 } 5161 5162 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { 5163 scmd->result = DID_NO_CONNECT << 16; 5164 scmd->scsi_done(scmd); 5165 return 0; 5166 } 5167 5168 sas_target_priv_data = sas_device_priv_data->sas_target; 5169 5170 /* invalid device handle */ 5171 handle = sas_target_priv_data->handle; 5172 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { 5173 scmd->result = DID_NO_CONNECT << 16; 5174 scmd->scsi_done(scmd); 5175 return 0; 5176 } 5177 5178 5179 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { 5180 /* host recovery or link resets sent via IOCTLs */ 5181 return SCSI_MLQUEUE_HOST_BUSY; 5182 } else if (sas_target_priv_data->deleted) { 5183 /* device has been deleted */ 5184 scmd->result = DID_NO_CONNECT << 16; 5185 scmd->scsi_done(scmd); 5186 return 0; 5187 } else if (sas_target_priv_data->tm_busy || 5188 sas_device_priv_data->block) { 5189 /* device busy with task management */ 5190 return SCSI_MLQUEUE_DEVICE_BUSY; 5191 } 5192 5193 /* 5194 * Bug work around for firmware SATL handling. The loop 5195 * is based on atomic operations and ensures consistency 5196 * since we're lockless at this point 5197 */ 5198 do { 5199 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) 5200 return SCSI_MLQUEUE_DEVICE_BUSY; 5201 } while (_scsih_set_satl_pending(scmd, true)); 5202 5203 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 5204 mpi_control = MPI2_SCSIIO_CONTROL_READ; 5205 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 5206 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 5207 else 5208 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 5209 5210 /* set tags */ 5211 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 5212 /* NCQ Prio supported, make sure control indicated high priority */ 5213 if (sas_device_priv_data->ncq_prio_enable) { 5214 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 5215 if (class == IOPRIO_CLASS_RT) 5216 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT; 5217 } 5218 /* Make sure Device is not raid volume. 5219 * We do not expose raid functionality to upper layer for warpdrive. 5220 */ 5221 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)) 5222 && !scsih_is_nvme(&scmd->device->sdev_gendev)) 5223 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) 5224 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 5225 5226 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 5227 if (!smid) { 5228 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 5229 _scsih_set_satl_pending(scmd, false); 5230 goto out; 5231 } 5232 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5233 memset(mpi_request, 0, ioc->request_sz); 5234 _scsih_setup_eedp(ioc, scmd, mpi_request); 5235 5236 if (scmd->cmd_len == 32) 5237 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 5238 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 5239 if (sas_device_priv_data->sas_target->flags & 5240 MPT_TARGET_FLAGS_RAID_COMPONENT) 5241 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 5242 else 5243 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 5244 mpi_request->DevHandle = cpu_to_le16(handle); 5245 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 5246 mpi_request->Control = cpu_to_le32(mpi_control); 5247 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); 5248 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; 5249 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 5250 mpi_request->SenseBufferLowAddress = 5251 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 5252 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4; 5253 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) 5254 mpi_request->LUN); 5255 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 5256 5257 if (mpi_request->DataLength) { 5258 pcie_device = sas_target_priv_data->pcie_dev; 5259 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) { 5260 mpt3sas_base_free_smid(ioc, smid); 5261 _scsih_set_satl_pending(scmd, false); 5262 goto out; 5263 } 5264 } else 5265 ioc->build_zero_len_sge(ioc, &mpi_request->SGL); 5266 5267 raid_device = sas_target_priv_data->raid_device; 5268 if (raid_device && raid_device->direct_io_enabled) 5269 mpt3sas_setup_direct_io(ioc, scmd, 5270 raid_device, mpi_request); 5271 5272 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { 5273 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { 5274 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | 5275 MPI25_SCSIIO_IOFLAGS_FAST_PATH); 5276 ioc->put_smid_fast_path(ioc, smid, handle); 5277 } else 5278 ioc->put_smid_scsi_io(ioc, smid, 5279 le16_to_cpu(mpi_request->DevHandle)); 5280 } else 5281 ioc->put_smid_default(ioc, smid); 5282 return 0; 5283 5284 out: 5285 return SCSI_MLQUEUE_HOST_BUSY; 5286 } 5287 5288 /** 5289 * _scsih_normalize_sense - normalize descriptor and fixed format sense data 5290 * @sense_buffer: sense data returned by target 5291 * @data: normalized skey/asc/ascq 5292 */ 5293 static void 5294 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) 5295 { 5296 if ((sense_buffer[0] & 0x7F) >= 0x72) { 5297 /* descriptor format */ 5298 data->skey = sense_buffer[1] & 0x0F; 5299 data->asc = sense_buffer[2]; 5300 data->ascq = sense_buffer[3]; 5301 } else { 5302 /* fixed format */ 5303 data->skey = sense_buffer[2] & 0x0F; 5304 data->asc = sense_buffer[12]; 5305 data->ascq = sense_buffer[13]; 5306 } 5307 } 5308 5309 /** 5310 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request 5311 * @ioc: per adapter object 5312 * @scmd: pointer to scsi command object 5313 * @mpi_reply: reply mf payload returned from firmware 5314 * @smid: ? 5315 * 5316 * scsi_status - SCSI Status code returned from target device 5317 * scsi_state - state info associated with SCSI_IO determined by ioc 5318 * ioc_status - ioc supplied status info 5319 */ 5320 static void 5321 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 5322 Mpi2SCSIIOReply_t *mpi_reply, u16 smid) 5323 { 5324 u32 response_info; 5325 u8 *response_bytes; 5326 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & 5327 MPI2_IOCSTATUS_MASK; 5328 u8 scsi_state = mpi_reply->SCSIState; 5329 u8 scsi_status = mpi_reply->SCSIStatus; 5330 char *desc_ioc_state = NULL; 5331 char *desc_scsi_status = NULL; 5332 char *desc_scsi_state = ioc->tmp_string; 5333 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 5334 struct _sas_device *sas_device = NULL; 5335 struct _pcie_device *pcie_device = NULL; 5336 struct scsi_target *starget = scmd->device->sdev_target; 5337 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 5338 char *device_str = NULL; 5339 5340 if (!priv_target) 5341 return; 5342 if (ioc->hide_ir_msg) 5343 device_str = "WarpDrive"; 5344 else 5345 device_str = "volume"; 5346 5347 if (log_info == 0x31170000) 5348 return; 5349 5350 switch (ioc_status) { 5351 case MPI2_IOCSTATUS_SUCCESS: 5352 desc_ioc_state = "success"; 5353 break; 5354 case MPI2_IOCSTATUS_INVALID_FUNCTION: 5355 desc_ioc_state = "invalid function"; 5356 break; 5357 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 5358 desc_ioc_state = "scsi recovered error"; 5359 break; 5360 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 5361 desc_ioc_state = "scsi invalid dev handle"; 5362 break; 5363 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 5364 desc_ioc_state = "scsi device not there"; 5365 break; 5366 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 5367 desc_ioc_state = "scsi data overrun"; 5368 break; 5369 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 5370 desc_ioc_state = "scsi data underrun"; 5371 break; 5372 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 5373 desc_ioc_state = "scsi io data error"; 5374 break; 5375 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 5376 desc_ioc_state = "scsi protocol error"; 5377 break; 5378 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 5379 desc_ioc_state = "scsi task terminated"; 5380 break; 5381 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 5382 desc_ioc_state = "scsi residual mismatch"; 5383 break; 5384 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 5385 desc_ioc_state = "scsi task mgmt failed"; 5386 break; 5387 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 5388 desc_ioc_state = "scsi ioc terminated"; 5389 break; 5390 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 5391 desc_ioc_state = "scsi ext terminated"; 5392 break; 5393 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5394 desc_ioc_state = "eedp guard error"; 5395 break; 5396 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5397 desc_ioc_state = "eedp ref tag error"; 5398 break; 5399 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5400 desc_ioc_state = "eedp app tag error"; 5401 break; 5402 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 5403 desc_ioc_state = "insufficient power"; 5404 break; 5405 default: 5406 desc_ioc_state = "unknown"; 5407 break; 5408 } 5409 5410 switch (scsi_status) { 5411 case MPI2_SCSI_STATUS_GOOD: 5412 desc_scsi_status = "good"; 5413 break; 5414 case MPI2_SCSI_STATUS_CHECK_CONDITION: 5415 desc_scsi_status = "check condition"; 5416 break; 5417 case MPI2_SCSI_STATUS_CONDITION_MET: 5418 desc_scsi_status = "condition met"; 5419 break; 5420 case MPI2_SCSI_STATUS_BUSY: 5421 desc_scsi_status = "busy"; 5422 break; 5423 case MPI2_SCSI_STATUS_INTERMEDIATE: 5424 desc_scsi_status = "intermediate"; 5425 break; 5426 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: 5427 desc_scsi_status = "intermediate condmet"; 5428 break; 5429 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 5430 desc_scsi_status = "reservation conflict"; 5431 break; 5432 case MPI2_SCSI_STATUS_COMMAND_TERMINATED: 5433 desc_scsi_status = "command terminated"; 5434 break; 5435 case MPI2_SCSI_STATUS_TASK_SET_FULL: 5436 desc_scsi_status = "task set full"; 5437 break; 5438 case MPI2_SCSI_STATUS_ACA_ACTIVE: 5439 desc_scsi_status = "aca active"; 5440 break; 5441 case MPI2_SCSI_STATUS_TASK_ABORTED: 5442 desc_scsi_status = "task aborted"; 5443 break; 5444 default: 5445 desc_scsi_status = "unknown"; 5446 break; 5447 } 5448 5449 desc_scsi_state[0] = '\0'; 5450 if (!scsi_state) 5451 desc_scsi_state = " "; 5452 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 5453 strcat(desc_scsi_state, "response info "); 5454 if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5455 strcat(desc_scsi_state, "state terminated "); 5456 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) 5457 strcat(desc_scsi_state, "no status "); 5458 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) 5459 strcat(desc_scsi_state, "autosense failed "); 5460 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) 5461 strcat(desc_scsi_state, "autosense valid "); 5462 5463 scsi_print_command(scmd); 5464 5465 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 5466 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n", 5467 device_str, (u64)priv_target->sas_address); 5468 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 5469 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target); 5470 if (pcie_device) { 5471 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n", 5472 (u64)pcie_device->wwid, pcie_device->port_num); 5473 if (pcie_device->enclosure_handle != 0) 5474 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n", 5475 (u64)pcie_device->enclosure_logical_id, 5476 pcie_device->slot); 5477 if (pcie_device->connector_name[0]) 5478 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n", 5479 pcie_device->enclosure_level, 5480 pcie_device->connector_name); 5481 pcie_device_put(pcie_device); 5482 } 5483 } else { 5484 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target); 5485 if (sas_device) { 5486 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", 5487 (u64)sas_device->sas_address, sas_device->phy); 5488 5489 _scsih_display_enclosure_chassis_info(ioc, sas_device, 5490 NULL, NULL); 5491 5492 sas_device_put(sas_device); 5493 } 5494 } 5495 5496 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", 5497 le16_to_cpu(mpi_reply->DevHandle), 5498 desc_ioc_state, ioc_status, smid); 5499 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n", 5500 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd)); 5501 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", 5502 le16_to_cpu(mpi_reply->TaskTag), 5503 le32_to_cpu(mpi_reply->TransferCount), scmd->result); 5504 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", 5505 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); 5506 5507 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 5508 struct sense_info data; 5509 _scsih_normalize_sense(scmd->sense_buffer, &data); 5510 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", 5511 data.skey, data.asc, data.ascq, 5512 le32_to_cpu(mpi_reply->SenseCount)); 5513 } 5514 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 5515 response_info = le32_to_cpu(mpi_reply->ResponseInfo); 5516 response_bytes = (u8 *)&response_info; 5517 _scsih_response_code(ioc, response_bytes[0]); 5518 } 5519 } 5520 5521 /** 5522 * _scsih_turn_on_pfa_led - illuminate PFA LED 5523 * @ioc: per adapter object 5524 * @handle: device handle 5525 * Context: process 5526 */ 5527 static void 5528 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5529 { 5530 Mpi2SepReply_t mpi_reply; 5531 Mpi2SepRequest_t mpi_request; 5532 struct _sas_device *sas_device; 5533 5534 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 5535 if (!sas_device) 5536 return; 5537 5538 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 5539 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 5540 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 5541 mpi_request.SlotStatus = 5542 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); 5543 mpi_request.DevHandle = cpu_to_le16(handle); 5544 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; 5545 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 5546 &mpi_request)) != 0) { 5547 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5548 __FILE__, __LINE__, __func__); 5549 goto out; 5550 } 5551 sas_device->pfa_led_on = 1; 5552 5553 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 5554 dewtprintk(ioc, 5555 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 5556 le16_to_cpu(mpi_reply.IOCStatus), 5557 le32_to_cpu(mpi_reply.IOCLogInfo))); 5558 goto out; 5559 } 5560 out: 5561 sas_device_put(sas_device); 5562 } 5563 5564 /** 5565 * _scsih_turn_off_pfa_led - turn off Fault LED 5566 * @ioc: per adapter object 5567 * @sas_device: sas device whose PFA LED has to turned off 5568 * Context: process 5569 */ 5570 static void 5571 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, 5572 struct _sas_device *sas_device) 5573 { 5574 Mpi2SepReply_t mpi_reply; 5575 Mpi2SepRequest_t mpi_request; 5576 5577 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 5578 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 5579 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 5580 mpi_request.SlotStatus = 0; 5581 mpi_request.Slot = cpu_to_le16(sas_device->slot); 5582 mpi_request.DevHandle = 0; 5583 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); 5584 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; 5585 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 5586 &mpi_request)) != 0) { 5587 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5588 __FILE__, __LINE__, __func__); 5589 return; 5590 } 5591 5592 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 5593 dewtprintk(ioc, 5594 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 5595 le16_to_cpu(mpi_reply.IOCStatus), 5596 le32_to_cpu(mpi_reply.IOCLogInfo))); 5597 return; 5598 } 5599 } 5600 5601 /** 5602 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event 5603 * @ioc: per adapter object 5604 * @handle: device handle 5605 * Context: interrupt. 5606 */ 5607 static void 5608 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5609 { 5610 struct fw_event_work *fw_event; 5611 5612 fw_event = alloc_fw_event_work(0); 5613 if (!fw_event) 5614 return; 5615 fw_event->event = MPT3SAS_TURN_ON_PFA_LED; 5616 fw_event->device_handle = handle; 5617 fw_event->ioc = ioc; 5618 _scsih_fw_event_add(ioc, fw_event); 5619 fw_event_work_put(fw_event); 5620 } 5621 5622 /** 5623 * _scsih_smart_predicted_fault - process smart errors 5624 * @ioc: per adapter object 5625 * @handle: device handle 5626 * Context: interrupt. 5627 */ 5628 static void 5629 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5630 { 5631 struct scsi_target *starget; 5632 struct MPT3SAS_TARGET *sas_target_priv_data; 5633 Mpi2EventNotificationReply_t *event_reply; 5634 Mpi2EventDataSasDeviceStatusChange_t *event_data; 5635 struct _sas_device *sas_device; 5636 ssize_t sz; 5637 unsigned long flags; 5638 5639 /* only handle non-raid devices */ 5640 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5641 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 5642 if (!sas_device) 5643 goto out_unlock; 5644 5645 starget = sas_device->starget; 5646 sas_target_priv_data = starget->hostdata; 5647 5648 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || 5649 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) 5650 goto out_unlock; 5651 5652 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget); 5653 5654 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5655 5656 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) 5657 _scsih_send_event_to_turn_on_pfa_led(ioc, handle); 5658 5659 /* insert into event log */ 5660 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 5661 sizeof(Mpi2EventDataSasDeviceStatusChange_t); 5662 event_reply = kzalloc(sz, GFP_ATOMIC); 5663 if (!event_reply) { 5664 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5665 __FILE__, __LINE__, __func__); 5666 goto out; 5667 } 5668 5669 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 5670 event_reply->Event = 5671 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 5672 event_reply->MsgLength = sz/4; 5673 event_reply->EventDataLength = 5674 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); 5675 event_data = (Mpi2EventDataSasDeviceStatusChange_t *) 5676 event_reply->EventData; 5677 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; 5678 event_data->ASC = 0x5D; 5679 event_data->DevHandle = cpu_to_le16(handle); 5680 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); 5681 mpt3sas_ctl_add_to_event_log(ioc, event_reply); 5682 kfree(event_reply); 5683 out: 5684 if (sas_device) 5685 sas_device_put(sas_device); 5686 return; 5687 5688 out_unlock: 5689 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5690 goto out; 5691 } 5692 5693 /** 5694 * _scsih_io_done - scsi request callback 5695 * @ioc: per adapter object 5696 * @smid: system request message index 5697 * @msix_index: MSIX table index supplied by the OS 5698 * @reply: reply message frame(lower 32bit addr) 5699 * 5700 * Callback handler when using _scsih_qcmd. 5701 * 5702 * Return: 1 meaning mf should be freed from _base_interrupt 5703 * 0 means the mf is freed from this function. 5704 */ 5705 static u8 5706 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 5707 { 5708 Mpi25SCSIIORequest_t *mpi_request; 5709 Mpi2SCSIIOReply_t *mpi_reply; 5710 struct scsi_cmnd *scmd; 5711 struct scsiio_tracker *st; 5712 u16 ioc_status; 5713 u32 xfer_cnt; 5714 u8 scsi_state; 5715 u8 scsi_status; 5716 u32 log_info; 5717 struct MPT3SAS_DEVICE *sas_device_priv_data; 5718 u32 response_code = 0; 5719 5720 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 5721 5722 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 5723 if (scmd == NULL) 5724 return 1; 5725 5726 _scsih_set_satl_pending(scmd, false); 5727 5728 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5729 5730 if (mpi_reply == NULL) { 5731 scmd->result = DID_OK << 16; 5732 goto out; 5733 } 5734 5735 sas_device_priv_data = scmd->device->hostdata; 5736 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 5737 sas_device_priv_data->sas_target->deleted) { 5738 scmd->result = DID_NO_CONNECT << 16; 5739 goto out; 5740 } 5741 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 5742 5743 /* 5744 * WARPDRIVE: If direct_io is set then it is directIO, 5745 * the failed direct I/O should be redirected to volume 5746 */ 5747 st = scsi_cmd_priv(scmd); 5748 if (st->direct_io && 5749 ((ioc_status & MPI2_IOCSTATUS_MASK) 5750 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { 5751 st->direct_io = 0; 5752 st->scmd = scmd; 5753 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 5754 mpi_request->DevHandle = 5755 cpu_to_le16(sas_device_priv_data->sas_target->handle); 5756 ioc->put_smid_scsi_io(ioc, smid, 5757 sas_device_priv_data->sas_target->handle); 5758 return 0; 5759 } 5760 /* turning off TLR */ 5761 scsi_state = mpi_reply->SCSIState; 5762 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 5763 response_code = 5764 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; 5765 if (!sas_device_priv_data->tlr_snoop_check) { 5766 sas_device_priv_data->tlr_snoop_check++; 5767 if ((!ioc->is_warpdrive && 5768 !scsih_is_raid(&scmd->device->sdev_gendev) && 5769 !scsih_is_nvme(&scmd->device->sdev_gendev)) 5770 && sas_is_tlr_enabled(scmd->device) && 5771 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { 5772 sas_disable_tlr(scmd->device); 5773 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); 5774 } 5775 } 5776 5777 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); 5778 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); 5779 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 5780 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 5781 else 5782 log_info = 0; 5783 ioc_status &= MPI2_IOCSTATUS_MASK; 5784 scsi_status = mpi_reply->SCSIStatus; 5785 5786 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && 5787 (scsi_status == MPI2_SCSI_STATUS_BUSY || 5788 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || 5789 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { 5790 ioc_status = MPI2_IOCSTATUS_SUCCESS; 5791 } 5792 5793 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 5794 struct sense_info data; 5795 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc, 5796 smid); 5797 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, 5798 le32_to_cpu(mpi_reply->SenseCount)); 5799 memcpy(scmd->sense_buffer, sense_data, sz); 5800 _scsih_normalize_sense(scmd->sense_buffer, &data); 5801 /* failure prediction threshold exceeded */ 5802 if (data.asc == 0x5D) 5803 _scsih_smart_predicted_fault(ioc, 5804 le16_to_cpu(mpi_reply->DevHandle)); 5805 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); 5806 5807 if ((ioc->logging_level & MPT_DEBUG_REPLY) && 5808 ((scmd->sense_buffer[2] == UNIT_ATTENTION) || 5809 (scmd->sense_buffer[2] == MEDIUM_ERROR) || 5810 (scmd->sense_buffer[2] == HARDWARE_ERROR))) 5811 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid); 5812 } 5813 switch (ioc_status) { 5814 case MPI2_IOCSTATUS_BUSY: 5815 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 5816 scmd->result = SAM_STAT_BUSY; 5817 break; 5818 5819 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 5820 scmd->result = DID_NO_CONNECT << 16; 5821 break; 5822 5823 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 5824 if (sas_device_priv_data->block) { 5825 scmd->result = DID_TRANSPORT_DISRUPTED << 16; 5826 goto out; 5827 } 5828 if (log_info == 0x31110630) { 5829 if (scmd->retries > 2) { 5830 scmd->result = DID_NO_CONNECT << 16; 5831 scsi_device_set_state(scmd->device, 5832 SDEV_OFFLINE); 5833 } else { 5834 scmd->result = DID_SOFT_ERROR << 16; 5835 scmd->device->expecting_cc_ua = 1; 5836 } 5837 break; 5838 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { 5839 scmd->result = DID_RESET << 16; 5840 break; 5841 } else if ((scmd->device->channel == RAID_CHANNEL) && 5842 (scsi_state == (MPI2_SCSI_STATE_TERMINATED | 5843 MPI2_SCSI_STATE_NO_SCSI_STATUS))) { 5844 scmd->result = DID_RESET << 16; 5845 break; 5846 } 5847 scmd->result = DID_SOFT_ERROR << 16; 5848 break; 5849 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 5850 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 5851 scmd->result = DID_RESET << 16; 5852 break; 5853 5854 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 5855 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) 5856 scmd->result = DID_SOFT_ERROR << 16; 5857 else 5858 scmd->result = (DID_OK << 16) | scsi_status; 5859 break; 5860 5861 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 5862 scmd->result = (DID_OK << 16) | scsi_status; 5863 5864 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) 5865 break; 5866 5867 if (xfer_cnt < scmd->underflow) { 5868 if (scsi_status == SAM_STAT_BUSY) 5869 scmd->result = SAM_STAT_BUSY; 5870 else 5871 scmd->result = DID_SOFT_ERROR << 16; 5872 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 5873 MPI2_SCSI_STATE_NO_SCSI_STATUS)) 5874 scmd->result = DID_SOFT_ERROR << 16; 5875 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5876 scmd->result = DID_RESET << 16; 5877 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { 5878 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; 5879 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; 5880 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 5881 0x20, 0); 5882 } 5883 break; 5884 5885 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 5886 scsi_set_resid(scmd, 0); 5887 fallthrough; 5888 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 5889 case MPI2_IOCSTATUS_SUCCESS: 5890 scmd->result = (DID_OK << 16) | scsi_status; 5891 if (response_code == 5892 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || 5893 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 5894 MPI2_SCSI_STATE_NO_SCSI_STATUS))) 5895 scmd->result = DID_SOFT_ERROR << 16; 5896 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5897 scmd->result = DID_RESET << 16; 5898 break; 5899 5900 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5901 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5902 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5903 _scsih_eedp_error_handling(scmd, ioc_status); 5904 break; 5905 5906 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 5907 case MPI2_IOCSTATUS_INVALID_FUNCTION: 5908 case MPI2_IOCSTATUS_INVALID_SGL: 5909 case MPI2_IOCSTATUS_INTERNAL_ERROR: 5910 case MPI2_IOCSTATUS_INVALID_FIELD: 5911 case MPI2_IOCSTATUS_INVALID_STATE: 5912 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 5913 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 5914 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 5915 default: 5916 scmd->result = DID_SOFT_ERROR << 16; 5917 break; 5918 5919 } 5920 5921 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) 5922 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); 5923 5924 out: 5925 5926 scsi_dma_unmap(scmd); 5927 mpt3sas_base_free_smid(ioc, smid); 5928 scmd->scsi_done(scmd); 5929 return 0; 5930 } 5931 5932 /** 5933 * _scsih_update_vphys_after_reset - update the Port's 5934 * vphys_list after reset 5935 * @ioc: per adapter object 5936 * 5937 * Returns nothing. 5938 */ 5939 static void 5940 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc) 5941 { 5942 u16 sz, ioc_status; 5943 int i; 5944 Mpi2ConfigReply_t mpi_reply; 5945 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 5946 u16 attached_handle; 5947 u64 attached_sas_addr; 5948 u8 found = 0, port_id; 5949 Mpi2SasPhyPage0_t phy_pg0; 5950 struct hba_port *port, *port_next, *mport; 5951 struct virtual_phy *vphy, *vphy_next; 5952 struct _sas_device *sas_device; 5953 5954 /* 5955 * Mark all the vphys objects as dirty. 5956 */ 5957 list_for_each_entry_safe(port, port_next, 5958 &ioc->port_table_list, list) { 5959 if (!port->vphys_mask) 5960 continue; 5961 list_for_each_entry_safe(vphy, vphy_next, 5962 &port->vphys_list, list) { 5963 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY; 5964 } 5965 } 5966 5967 /* 5968 * Read SASIOUnitPage0 to get each HBA Phy's data. 5969 */ 5970 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + 5971 (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t)); 5972 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 5973 if (!sas_iounit_pg0) { 5974 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5975 __FILE__, __LINE__, __func__); 5976 return; 5977 } 5978 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 5979 sas_iounit_pg0, sz)) != 0) 5980 goto out; 5981 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 5982 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 5983 goto out; 5984 /* 5985 * Loop over each HBA Phy. 5986 */ 5987 for (i = 0; i < ioc->sas_hba.num_phys; i++) { 5988 /* 5989 * Check whether Phy's Negotiation Link Rate is > 1.5G or not. 5990 */ 5991 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < 5992 MPI2_SAS_NEG_LINK_RATE_1_5) 5993 continue; 5994 /* 5995 * Check whether Phy is connected to SEP device or not, 5996 * if it is SEP device then read the Phy's SASPHYPage0 data to 5997 * determine whether Phy is a virtual Phy or not. if it is 5998 * virtual phy then it is conformed that the attached remote 5999 * device is a HBA's vSES device. 6000 */ 6001 if (!(le32_to_cpu( 6002 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & 6003 MPI2_SAS_DEVICE_INFO_SEP)) 6004 continue; 6005 6006 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 6007 i))) { 6008 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6009 __FILE__, __LINE__, __func__); 6010 continue; 6011 } 6012 6013 if (!(le32_to_cpu(phy_pg0.PhyInfo) & 6014 MPI2_SAS_PHYINFO_VIRTUAL_PHY)) 6015 continue; 6016 /* 6017 * Get the vSES device's SAS Address. 6018 */ 6019 attached_handle = le16_to_cpu( 6020 sas_iounit_pg0->PhyData[i].AttachedDevHandle); 6021 if (_scsih_get_sas_address(ioc, attached_handle, 6022 &attached_sas_addr) != 0) { 6023 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6024 __FILE__, __LINE__, __func__); 6025 continue; 6026 } 6027 6028 found = 0; 6029 port = port_next = NULL; 6030 /* 6031 * Loop over each virtual_phy object from 6032 * each port's vphys_list. 6033 */ 6034 list_for_each_entry_safe(port, 6035 port_next, &ioc->port_table_list, list) { 6036 if (!port->vphys_mask) 6037 continue; 6038 list_for_each_entry_safe(vphy, vphy_next, 6039 &port->vphys_list, list) { 6040 /* 6041 * Continue with next virtual_phy object 6042 * if the object is not marked as dirty. 6043 */ 6044 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY)) 6045 continue; 6046 6047 /* 6048 * Continue with next virtual_phy object 6049 * if the object's SAS Address is not equals 6050 * to current Phy's vSES device SAS Address. 6051 */ 6052 if (vphy->sas_address != attached_sas_addr) 6053 continue; 6054 /* 6055 * Enable current Phy number bit in object's 6056 * phy_mask field. 6057 */ 6058 if (!(vphy->phy_mask & (1 << i))) 6059 vphy->phy_mask = (1 << i); 6060 /* 6061 * Get hba_port object from hba_port table 6062 * corresponding to current phy's Port ID. 6063 * if there is no hba_port object corresponding 6064 * to Phy's Port ID then create a new hba_port 6065 * object & add to hba_port table. 6066 */ 6067 port_id = sas_iounit_pg0->PhyData[i].Port; 6068 mport = mpt3sas_get_port_by_id(ioc, port_id, 1); 6069 if (!mport) { 6070 mport = kzalloc( 6071 sizeof(struct hba_port), GFP_KERNEL); 6072 if (!mport) 6073 break; 6074 mport->port_id = port_id; 6075 ioc_info(ioc, 6076 "%s: hba_port entry: %p, port: %d is added to hba_port list\n", 6077 __func__, mport, mport->port_id); 6078 list_add_tail(&mport->list, 6079 &ioc->port_table_list); 6080 } 6081 /* 6082 * If mport & port pointers are not pointing to 6083 * same hba_port object then it means that vSES 6084 * device's Port ID got changed after reset and 6085 * hence move current virtual_phy object from 6086 * port's vphys_list to mport's vphys_list. 6087 */ 6088 if (port != mport) { 6089 if (!mport->vphys_mask) 6090 INIT_LIST_HEAD( 6091 &mport->vphys_list); 6092 mport->vphys_mask |= (1 << i); 6093 port->vphys_mask &= ~(1 << i); 6094 list_move(&vphy->list, 6095 &mport->vphys_list); 6096 sas_device = mpt3sas_get_sdev_by_addr( 6097 ioc, attached_sas_addr, port); 6098 if (sas_device) 6099 sas_device->port = mport; 6100 } 6101 /* 6102 * Earlier while updating the hba_port table, 6103 * it is determined that there is no other 6104 * direct attached device with mport's Port ID, 6105 * Hence mport was marked as dirty. Only vSES 6106 * device has this Port ID, so unmark the mport 6107 * as dirt. 6108 */ 6109 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) { 6110 mport->sas_address = 0; 6111 mport->phy_mask = 0; 6112 mport->flags &= 6113 ~HBA_PORT_FLAG_DIRTY_PORT; 6114 } 6115 /* 6116 * Unmark current virtual_phy object as dirty. 6117 */ 6118 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY; 6119 found = 1; 6120 break; 6121 } 6122 if (found) 6123 break; 6124 } 6125 } 6126 out: 6127 kfree(sas_iounit_pg0); 6128 } 6129 6130 /** 6131 * _scsih_get_port_table_after_reset - Construct temporary port table 6132 * @ioc: per adapter object 6133 * @port_table: address where port table needs to be constructed 6134 * 6135 * return number of HBA port entries available after reset. 6136 */ 6137 static int 6138 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc, 6139 struct hba_port *port_table) 6140 { 6141 u16 sz, ioc_status; 6142 int i, j; 6143 Mpi2ConfigReply_t mpi_reply; 6144 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6145 u16 attached_handle; 6146 u64 attached_sas_addr; 6147 u8 found = 0, port_count = 0, port_id; 6148 6149 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys 6150 * sizeof(Mpi2SasIOUnit0PhyData_t)); 6151 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6152 if (!sas_iounit_pg0) { 6153 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6154 __FILE__, __LINE__, __func__); 6155 return port_count; 6156 } 6157 6158 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6159 sas_iounit_pg0, sz)) != 0) 6160 goto out; 6161 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6162 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6163 goto out; 6164 for (i = 0; i < ioc->sas_hba.num_phys; i++) { 6165 found = 0; 6166 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < 6167 MPI2_SAS_NEG_LINK_RATE_1_5) 6168 continue; 6169 attached_handle = 6170 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); 6171 if (_scsih_get_sas_address( 6172 ioc, attached_handle, &attached_sas_addr) != 0) { 6173 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6174 __FILE__, __LINE__, __func__); 6175 continue; 6176 } 6177 6178 for (j = 0; j < port_count; j++) { 6179 port_id = sas_iounit_pg0->PhyData[i].Port; 6180 if (port_table[j].port_id == port_id && 6181 port_table[j].sas_address == attached_sas_addr) { 6182 port_table[j].phy_mask |= (1 << i); 6183 found = 1; 6184 break; 6185 } 6186 } 6187 6188 if (found) 6189 continue; 6190 6191 port_id = sas_iounit_pg0->PhyData[i].Port; 6192 port_table[port_count].port_id = port_id; 6193 port_table[port_count].phy_mask = (1 << i); 6194 port_table[port_count].sas_address = attached_sas_addr; 6195 port_count++; 6196 } 6197 out: 6198 kfree(sas_iounit_pg0); 6199 return port_count; 6200 } 6201 6202 enum hba_port_matched_codes { 6203 NOT_MATCHED = 0, 6204 MATCHED_WITH_ADDR_AND_PHYMASK, 6205 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT, 6206 MATCHED_WITH_ADDR_AND_SUBPHYMASK, 6207 MATCHED_WITH_ADDR, 6208 }; 6209 6210 /** 6211 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry 6212 * from HBA port table 6213 * @ioc: per adapter object 6214 * @port_entry: hba port entry from temporary port table which needs to be 6215 * searched for matched entry in the HBA port table 6216 * @matched_port_entry: save matched hba port entry here 6217 * @count: count of matched entries 6218 * 6219 * return type of matched entry found. 6220 */ 6221 static enum hba_port_matched_codes 6222 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc, 6223 struct hba_port *port_entry, 6224 struct hba_port **matched_port_entry, int *count) 6225 { 6226 struct hba_port *port_table_entry, *matched_port = NULL; 6227 enum hba_port_matched_codes matched_code = NOT_MATCHED; 6228 int lcount = 0; 6229 *matched_port_entry = NULL; 6230 6231 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { 6232 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT)) 6233 continue; 6234 6235 if ((port_table_entry->sas_address == port_entry->sas_address) 6236 && (port_table_entry->phy_mask == port_entry->phy_mask)) { 6237 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK; 6238 matched_port = port_table_entry; 6239 break; 6240 } 6241 6242 if ((port_table_entry->sas_address == port_entry->sas_address) 6243 && (port_table_entry->phy_mask & port_entry->phy_mask) 6244 && (port_table_entry->port_id == port_entry->port_id)) { 6245 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; 6246 matched_port = port_table_entry; 6247 continue; 6248 } 6249 6250 if ((port_table_entry->sas_address == port_entry->sas_address) 6251 && (port_table_entry->phy_mask & port_entry->phy_mask)) { 6252 if (matched_code == 6253 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) 6254 continue; 6255 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK; 6256 matched_port = port_table_entry; 6257 continue; 6258 } 6259 6260 if (port_table_entry->sas_address == port_entry->sas_address) { 6261 if (matched_code == 6262 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) 6263 continue; 6264 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK) 6265 continue; 6266 matched_code = MATCHED_WITH_ADDR; 6267 matched_port = port_table_entry; 6268 lcount++; 6269 } 6270 } 6271 6272 *matched_port_entry = matched_port; 6273 if (matched_code == MATCHED_WITH_ADDR) 6274 *count = lcount; 6275 return matched_code; 6276 } 6277 6278 /** 6279 * _scsih_del_phy_part_of_anther_port - remove phy if it 6280 * is a part of anther port 6281 *@ioc: per adapter object 6282 *@port_table: port table after reset 6283 *@index: hba port entry index 6284 *@port_count: number of ports available after host reset 6285 *@offset: HBA phy bit offset 6286 * 6287 */ 6288 static void 6289 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc, 6290 struct hba_port *port_table, 6291 int index, u8 port_count, int offset) 6292 { 6293 struct _sas_node *sas_node = &ioc->sas_hba; 6294 u32 i, found = 0; 6295 6296 for (i = 0; i < port_count; i++) { 6297 if (i == index) 6298 continue; 6299 6300 if (port_table[i].phy_mask & (1 << offset)) { 6301 mpt3sas_transport_del_phy_from_an_existing_port( 6302 ioc, sas_node, &sas_node->phy[offset]); 6303 found = 1; 6304 break; 6305 } 6306 } 6307 if (!found) 6308 port_table[index].phy_mask |= (1 << offset); 6309 } 6310 6311 /** 6312 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from 6313 * right port 6314 *@ioc: per adapter object 6315 *@hba_port_entry: hba port table entry 6316 *@port_table: temporary port table 6317 *@index: hba port entry index 6318 *@port_count: number of ports available after host reset 6319 * 6320 */ 6321 static void 6322 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc, 6323 struct hba_port *hba_port_entry, struct hba_port *port_table, 6324 int index, int port_count) 6325 { 6326 u32 phy_mask, offset = 0; 6327 struct _sas_node *sas_node = &ioc->sas_hba; 6328 6329 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask; 6330 6331 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) { 6332 if (phy_mask & (1 << offset)) { 6333 if (!(port_table[index].phy_mask & (1 << offset))) { 6334 _scsih_del_phy_part_of_anther_port( 6335 ioc, port_table, index, port_count, 6336 offset); 6337 continue; 6338 } 6339 if (sas_node->phy[offset].phy_belongs_to_port) 6340 mpt3sas_transport_del_phy_from_an_existing_port( 6341 ioc, sas_node, &sas_node->phy[offset]); 6342 mpt3sas_transport_add_phy_to_an_existing_port( 6343 ioc, sas_node, &sas_node->phy[offset], 6344 hba_port_entry->sas_address, 6345 hba_port_entry); 6346 } 6347 } 6348 } 6349 6350 /** 6351 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty. 6352 * @ioc: per adapter object 6353 * 6354 * Returns nothing. 6355 */ 6356 static void 6357 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc) 6358 { 6359 struct hba_port *port, *port_next; 6360 struct virtual_phy *vphy, *vphy_next; 6361 6362 list_for_each_entry_safe(port, port_next, 6363 &ioc->port_table_list, list) { 6364 if (!port->vphys_mask) 6365 continue; 6366 list_for_each_entry_safe(vphy, vphy_next, 6367 &port->vphys_list, list) { 6368 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) { 6369 drsprintk(ioc, ioc_info(ioc, 6370 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n", 6371 vphy, port->port_id, 6372 vphy->phy_mask)); 6373 port->vphys_mask &= ~vphy->phy_mask; 6374 list_del(&vphy->list); 6375 kfree(vphy); 6376 } 6377 } 6378 if (!port->vphys_mask && !port->sas_address) 6379 port->flags |= HBA_PORT_FLAG_DIRTY_PORT; 6380 } 6381 } 6382 6383 /** 6384 * _scsih_del_dirty_port_entries - delete dirty port entries from port list 6385 * after host reset 6386 *@ioc: per adapter object 6387 * 6388 */ 6389 static void 6390 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc) 6391 { 6392 struct hba_port *port, *port_next; 6393 6394 list_for_each_entry_safe(port, port_next, 6395 &ioc->port_table_list, list) { 6396 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) || 6397 port->flags & HBA_PORT_FLAG_NEW_PORT) 6398 continue; 6399 6400 drsprintk(ioc, ioc_info(ioc, 6401 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n", 6402 port, port->port_id, port->phy_mask)); 6403 list_del(&port->list); 6404 kfree(port); 6405 } 6406 } 6407 6408 /** 6409 * _scsih_sas_port_refresh - Update HBA port table after host reset 6410 * @ioc: per adapter object 6411 */ 6412 static void 6413 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) 6414 { 6415 u32 port_count = 0; 6416 struct hba_port *port_table; 6417 struct hba_port *port_table_entry; 6418 struct hba_port *port_entry = NULL; 6419 int i, j, count = 0, lcount = 0; 6420 int ret; 6421 u64 sas_addr; 6422 6423 drsprintk(ioc, ioc_info(ioc, 6424 "updating ports for sas_host(0x%016llx)\n", 6425 (unsigned long long)ioc->sas_hba.sas_address)); 6426 6427 port_table = kcalloc(ioc->sas_hba.num_phys, 6428 sizeof(struct hba_port), GFP_KERNEL); 6429 if (!port_table) 6430 return; 6431 6432 port_count = _scsih_get_port_table_after_reset(ioc, port_table); 6433 if (!port_count) 6434 return; 6435 6436 drsprintk(ioc, ioc_info(ioc, "New Port table\n")); 6437 for (j = 0; j < port_count; j++) 6438 drsprintk(ioc, ioc_info(ioc, 6439 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", 6440 port_table[j].port_id, 6441 port_table[j].phy_mask, port_table[j].sas_address)); 6442 6443 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) 6444 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT; 6445 6446 drsprintk(ioc, ioc_info(ioc, "Old Port table\n")); 6447 port_table_entry = NULL; 6448 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { 6449 drsprintk(ioc, ioc_info(ioc, 6450 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", 6451 port_table_entry->port_id, 6452 port_table_entry->phy_mask, 6453 port_table_entry->sas_address)); 6454 } 6455 6456 for (j = 0; j < port_count; j++) { 6457 ret = _scsih_look_and_get_matched_port_entry(ioc, 6458 &port_table[j], &port_entry, &count); 6459 if (!port_entry) { 6460 drsprintk(ioc, ioc_info(ioc, 6461 "No Matched entry for sas_addr(0x%16llx), Port:%d\n", 6462 port_table[j].sas_address, 6463 port_table[j].port_id)); 6464 continue; 6465 } 6466 6467 switch (ret) { 6468 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: 6469 case MATCHED_WITH_ADDR_AND_SUBPHYMASK: 6470 _scsih_add_or_del_phys_from_existing_port(ioc, 6471 port_entry, port_table, j, port_count); 6472 break; 6473 case MATCHED_WITH_ADDR: 6474 sas_addr = port_table[j].sas_address; 6475 for (i = 0; i < port_count; i++) { 6476 if (port_table[i].sas_address == sas_addr) 6477 lcount++; 6478 } 6479 6480 if (count > 1 || lcount > 1) 6481 port_entry = NULL; 6482 else 6483 _scsih_add_or_del_phys_from_existing_port(ioc, 6484 port_entry, port_table, j, port_count); 6485 } 6486 6487 if (!port_entry) 6488 continue; 6489 6490 if (port_entry->port_id != port_table[j].port_id) 6491 port_entry->port_id = port_table[j].port_id; 6492 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT; 6493 port_entry->phy_mask = port_table[j].phy_mask; 6494 } 6495 6496 port_table_entry = NULL; 6497 } 6498 6499 /** 6500 * _scsih_alloc_vphy - allocate virtual_phy object 6501 * @ioc: per adapter object 6502 * @port_id: Port ID number 6503 * @phy_num: HBA Phy number 6504 * 6505 * Returns allocated virtual_phy object. 6506 */ 6507 static struct virtual_phy * 6508 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num) 6509 { 6510 struct virtual_phy *vphy; 6511 struct hba_port *port; 6512 6513 port = mpt3sas_get_port_by_id(ioc, port_id, 0); 6514 if (!port) 6515 return NULL; 6516 6517 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num); 6518 if (!vphy) { 6519 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL); 6520 if (!vphy) 6521 return NULL; 6522 6523 if (!port->vphys_mask) 6524 INIT_LIST_HEAD(&port->vphys_list); 6525 6526 /* 6527 * Enable bit corresponding to HBA phy number on its 6528 * parent hba_port object's vphys_mask field. 6529 */ 6530 port->vphys_mask |= (1 << phy_num); 6531 vphy->phy_mask |= (1 << phy_num); 6532 6533 list_add_tail(&vphy->list, &port->vphys_list); 6534 6535 ioc_info(ioc, 6536 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n", 6537 vphy, port->port_id, phy_num); 6538 } 6539 return vphy; 6540 } 6541 6542 /** 6543 * _scsih_sas_host_refresh - refreshing sas host object contents 6544 * @ioc: per adapter object 6545 * Context: user 6546 * 6547 * During port enable, fw will send topology events for every device. Its 6548 * possible that the handles may change from the previous setting, so this 6549 * code keeping handles updating if changed. 6550 */ 6551 static void 6552 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) 6553 { 6554 u16 sz; 6555 u16 ioc_status; 6556 int i; 6557 Mpi2ConfigReply_t mpi_reply; 6558 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6559 u16 attached_handle; 6560 u8 link_rate, port_id; 6561 struct hba_port *port; 6562 Mpi2SasPhyPage0_t phy_pg0; 6563 6564 dtmprintk(ioc, 6565 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n", 6566 (u64)ioc->sas_hba.sas_address)); 6567 6568 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys 6569 * sizeof(Mpi2SasIOUnit0PhyData_t)); 6570 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6571 if (!sas_iounit_pg0) { 6572 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6573 __FILE__, __LINE__, __func__); 6574 return; 6575 } 6576 6577 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6578 sas_iounit_pg0, sz)) != 0) 6579 goto out; 6580 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6581 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6582 goto out; 6583 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 6584 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; 6585 if (i == 0) 6586 ioc->sas_hba.handle = le16_to_cpu( 6587 sas_iounit_pg0->PhyData[0].ControllerDevHandle); 6588 port_id = sas_iounit_pg0->PhyData[i].Port; 6589 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { 6590 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); 6591 if (!port) 6592 goto out; 6593 6594 port->port_id = port_id; 6595 ioc_info(ioc, 6596 "hba_port entry: %p, port: %d is added to hba_port list\n", 6597 port, port->port_id); 6598 if (ioc->shost_recovery) 6599 port->flags = HBA_PORT_FLAG_NEW_PORT; 6600 list_add_tail(&port->list, &ioc->port_table_list); 6601 } 6602 /* 6603 * Check whether current Phy belongs to HBA vSES device or not. 6604 */ 6605 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & 6606 MPI2_SAS_DEVICE_INFO_SEP && 6607 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { 6608 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, 6609 &phy_pg0, i))) { 6610 ioc_err(ioc, 6611 "failure at %s:%d/%s()!\n", 6612 __FILE__, __LINE__, __func__); 6613 goto out; 6614 } 6615 if (!(le32_to_cpu(phy_pg0.PhyInfo) & 6616 MPI2_SAS_PHYINFO_VIRTUAL_PHY)) 6617 continue; 6618 /* 6619 * Allocate a virtual_phy object for vSES device, if 6620 * this vSES device is hot added. 6621 */ 6622 if (!_scsih_alloc_vphy(ioc, port_id, i)) 6623 goto out; 6624 ioc->sas_hba.phy[i].hba_vphy = 1; 6625 } 6626 6627 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 6628 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. 6629 AttachedDevHandle); 6630 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 6631 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; 6632 ioc->sas_hba.phy[i].port = 6633 mpt3sas_get_port_by_id(ioc, port_id, 0); 6634 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, 6635 attached_handle, i, link_rate, 6636 ioc->sas_hba.phy[i].port); 6637 } 6638 out: 6639 kfree(sas_iounit_pg0); 6640 } 6641 6642 /** 6643 * _scsih_sas_host_add - create sas host object 6644 * @ioc: per adapter object 6645 * 6646 * Creating host side data object, stored in ioc->sas_hba 6647 */ 6648 static void 6649 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) 6650 { 6651 int i; 6652 Mpi2ConfigReply_t mpi_reply; 6653 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6654 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 6655 Mpi2SasPhyPage0_t phy_pg0; 6656 Mpi2SasDevicePage0_t sas_device_pg0; 6657 Mpi2SasEnclosurePage0_t enclosure_pg0; 6658 u16 ioc_status; 6659 u16 sz; 6660 u8 device_missing_delay; 6661 u8 num_phys, port_id; 6662 struct hba_port *port; 6663 6664 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 6665 if (!num_phys) { 6666 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6667 __FILE__, __LINE__, __func__); 6668 return; 6669 } 6670 ioc->sas_hba.phy = kcalloc(num_phys, 6671 sizeof(struct _sas_phy), GFP_KERNEL); 6672 if (!ioc->sas_hba.phy) { 6673 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6674 __FILE__, __LINE__, __func__); 6675 goto out; 6676 } 6677 ioc->sas_hba.num_phys = num_phys; 6678 6679 /* sas_iounit page 0 */ 6680 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * 6681 sizeof(Mpi2SasIOUnit0PhyData_t)); 6682 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6683 if (!sas_iounit_pg0) { 6684 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6685 __FILE__, __LINE__, __func__); 6686 return; 6687 } 6688 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6689 sas_iounit_pg0, sz))) { 6690 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6691 __FILE__, __LINE__, __func__); 6692 goto out; 6693 } 6694 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6695 MPI2_IOCSTATUS_MASK; 6696 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6697 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6698 __FILE__, __LINE__, __func__); 6699 goto out; 6700 } 6701 6702 /* sas_iounit page 1 */ 6703 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * 6704 sizeof(Mpi2SasIOUnit1PhyData_t)); 6705 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 6706 if (!sas_iounit_pg1) { 6707 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6708 __FILE__, __LINE__, __func__); 6709 goto out; 6710 } 6711 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 6712 sas_iounit_pg1, sz))) { 6713 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6714 __FILE__, __LINE__, __func__); 6715 goto out; 6716 } 6717 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6718 MPI2_IOCSTATUS_MASK; 6719 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6720 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6721 __FILE__, __LINE__, __func__); 6722 goto out; 6723 } 6724 6725 ioc->io_missing_delay = 6726 sas_iounit_pg1->IODeviceMissingDelay; 6727 device_missing_delay = 6728 sas_iounit_pg1->ReportDeviceMissingDelay; 6729 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 6730 ioc->device_missing_delay = (device_missing_delay & 6731 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 6732 else 6733 ioc->device_missing_delay = device_missing_delay & 6734 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 6735 6736 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; 6737 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 6738 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 6739 i))) { 6740 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6741 __FILE__, __LINE__, __func__); 6742 goto out; 6743 } 6744 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6745 MPI2_IOCSTATUS_MASK; 6746 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6747 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6748 __FILE__, __LINE__, __func__); 6749 goto out; 6750 } 6751 6752 if (i == 0) 6753 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> 6754 PhyData[0].ControllerDevHandle); 6755 6756 port_id = sas_iounit_pg0->PhyData[i].Port; 6757 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { 6758 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); 6759 if (!port) 6760 goto out; 6761 6762 port->port_id = port_id; 6763 ioc_info(ioc, 6764 "hba_port entry: %p, port: %d is added to hba_port list\n", 6765 port, port->port_id); 6766 list_add_tail(&port->list, 6767 &ioc->port_table_list); 6768 } 6769 6770 /* 6771 * Check whether current Phy belongs to HBA vSES device or not. 6772 */ 6773 if ((le32_to_cpu(phy_pg0.PhyInfo) & 6774 MPI2_SAS_PHYINFO_VIRTUAL_PHY) && 6775 (phy_pg0.NegotiatedLinkRate >> 4) >= 6776 MPI2_SAS_NEG_LINK_RATE_1_5) { 6777 /* 6778 * Allocate a virtual_phy object for vSES device. 6779 */ 6780 if (!_scsih_alloc_vphy(ioc, port_id, i)) 6781 goto out; 6782 ioc->sas_hba.phy[i].hba_vphy = 1; 6783 } 6784 6785 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 6786 ioc->sas_hba.phy[i].phy_id = i; 6787 ioc->sas_hba.phy[i].port = 6788 mpt3sas_get_port_by_id(ioc, port_id, 0); 6789 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], 6790 phy_pg0, ioc->sas_hba.parent_dev); 6791 } 6792 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 6793 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { 6794 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6795 __FILE__, __LINE__, __func__); 6796 goto out; 6797 } 6798 ioc->sas_hba.enclosure_handle = 6799 le16_to_cpu(sas_device_pg0.EnclosureHandle); 6800 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 6801 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 6802 ioc->sas_hba.handle, 6803 (u64)ioc->sas_hba.sas_address, 6804 ioc->sas_hba.num_phys); 6805 6806 if (ioc->sas_hba.enclosure_handle) { 6807 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 6808 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 6809 ioc->sas_hba.enclosure_handle))) 6810 ioc->sas_hba.enclosure_logical_id = 6811 le64_to_cpu(enclosure_pg0.EnclosureLogicalID); 6812 } 6813 6814 out: 6815 kfree(sas_iounit_pg1); 6816 kfree(sas_iounit_pg0); 6817 } 6818 6819 /** 6820 * _scsih_expander_add - creating expander object 6821 * @ioc: per adapter object 6822 * @handle: expander handle 6823 * 6824 * Creating expander object, stored in ioc->sas_expander_list. 6825 * 6826 * Return: 0 for success, else error. 6827 */ 6828 static int 6829 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) 6830 { 6831 struct _sas_node *sas_expander; 6832 struct _enclosure_node *enclosure_dev; 6833 Mpi2ConfigReply_t mpi_reply; 6834 Mpi2ExpanderPage0_t expander_pg0; 6835 Mpi2ExpanderPage1_t expander_pg1; 6836 u32 ioc_status; 6837 u16 parent_handle; 6838 u64 sas_address, sas_address_parent = 0; 6839 int i; 6840 unsigned long flags; 6841 struct _sas_port *mpt3sas_port = NULL; 6842 u8 port_id; 6843 6844 int rc = 0; 6845 6846 if (!handle) 6847 return -1; 6848 6849 if (ioc->shost_recovery || ioc->pci_error_recovery) 6850 return -1; 6851 6852 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 6853 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { 6854 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6855 __FILE__, __LINE__, __func__); 6856 return -1; 6857 } 6858 6859 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6860 MPI2_IOCSTATUS_MASK; 6861 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6862 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6863 __FILE__, __LINE__, __func__); 6864 return -1; 6865 } 6866 6867 /* handle out of order topology events */ 6868 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); 6869 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) 6870 != 0) { 6871 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6872 __FILE__, __LINE__, __func__); 6873 return -1; 6874 } 6875 6876 port_id = expander_pg0.PhysicalPort; 6877 if (sas_address_parent != ioc->sas_hba.sas_address) { 6878 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6879 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 6880 sas_address_parent, 6881 mpt3sas_get_port_by_id(ioc, port_id, 0)); 6882 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6883 if (!sas_expander) { 6884 rc = _scsih_expander_add(ioc, parent_handle); 6885 if (rc != 0) 6886 return rc; 6887 } 6888 } 6889 6890 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6891 sas_address = le64_to_cpu(expander_pg0.SASAddress); 6892 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 6893 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); 6894 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6895 6896 if (sas_expander) 6897 return 0; 6898 6899 sas_expander = kzalloc(sizeof(struct _sas_node), 6900 GFP_KERNEL); 6901 if (!sas_expander) { 6902 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6903 __FILE__, __LINE__, __func__); 6904 return -1; 6905 } 6906 6907 sas_expander->handle = handle; 6908 sas_expander->num_phys = expander_pg0.NumPhys; 6909 sas_expander->sas_address_parent = sas_address_parent; 6910 sas_expander->sas_address = sas_address; 6911 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0); 6912 if (!sas_expander->port) { 6913 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6914 __FILE__, __LINE__, __func__); 6915 rc = -1; 6916 goto out_fail; 6917 } 6918 6919 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 6920 handle, parent_handle, 6921 (u64)sas_expander->sas_address, sas_expander->num_phys); 6922 6923 if (!sas_expander->num_phys) { 6924 rc = -1; 6925 goto out_fail; 6926 } 6927 sas_expander->phy = kcalloc(sas_expander->num_phys, 6928 sizeof(struct _sas_phy), GFP_KERNEL); 6929 if (!sas_expander->phy) { 6930 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6931 __FILE__, __LINE__, __func__); 6932 rc = -1; 6933 goto out_fail; 6934 } 6935 6936 INIT_LIST_HEAD(&sas_expander->sas_port_list); 6937 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, 6938 sas_address_parent, sas_expander->port); 6939 if (!mpt3sas_port) { 6940 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6941 __FILE__, __LINE__, __func__); 6942 rc = -1; 6943 goto out_fail; 6944 } 6945 sas_expander->parent_dev = &mpt3sas_port->rphy->dev; 6946 sas_expander->rphy = mpt3sas_port->rphy; 6947 6948 for (i = 0 ; i < sas_expander->num_phys ; i++) { 6949 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 6950 &expander_pg1, i, handle))) { 6951 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6952 __FILE__, __LINE__, __func__); 6953 rc = -1; 6954 goto out_fail; 6955 } 6956 sas_expander->phy[i].handle = handle; 6957 sas_expander->phy[i].phy_id = i; 6958 sas_expander->phy[i].port = 6959 mpt3sas_get_port_by_id(ioc, port_id, 0); 6960 6961 if ((mpt3sas_transport_add_expander_phy(ioc, 6962 &sas_expander->phy[i], expander_pg1, 6963 sas_expander->parent_dev))) { 6964 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6965 __FILE__, __LINE__, __func__); 6966 rc = -1; 6967 goto out_fail; 6968 } 6969 } 6970 6971 if (sas_expander->enclosure_handle) { 6972 enclosure_dev = 6973 mpt3sas_scsih_enclosure_find_by_handle(ioc, 6974 sas_expander->enclosure_handle); 6975 if (enclosure_dev) 6976 sas_expander->enclosure_logical_id = 6977 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 6978 } 6979 6980 _scsih_expander_node_add(ioc, sas_expander); 6981 return 0; 6982 6983 out_fail: 6984 6985 if (mpt3sas_port) 6986 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 6987 sas_address_parent, sas_expander->port); 6988 kfree(sas_expander); 6989 return rc; 6990 } 6991 6992 /** 6993 * mpt3sas_expander_remove - removing expander object 6994 * @ioc: per adapter object 6995 * @sas_address: expander sas_address 6996 * @port: hba port entry 6997 */ 6998 void 6999 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 7000 struct hba_port *port) 7001 { 7002 struct _sas_node *sas_expander; 7003 unsigned long flags; 7004 7005 if (ioc->shost_recovery) 7006 return; 7007 7008 if (!port) 7009 return; 7010 7011 spin_lock_irqsave(&ioc->sas_node_lock, flags); 7012 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 7013 sas_address, port); 7014 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7015 if (sas_expander) 7016 _scsih_expander_node_remove(ioc, sas_expander); 7017 } 7018 7019 /** 7020 * _scsih_done - internal SCSI_IO callback handler. 7021 * @ioc: per adapter object 7022 * @smid: system request message index 7023 * @msix_index: MSIX table index supplied by the OS 7024 * @reply: reply message frame(lower 32bit addr) 7025 * 7026 * Callback handler when sending internal generated SCSI_IO. 7027 * The callback index passed is `ioc->scsih_cb_idx` 7028 * 7029 * Return: 1 meaning mf should be freed from _base_interrupt 7030 * 0 means the mf is freed from this function. 7031 */ 7032 static u8 7033 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 7034 { 7035 MPI2DefaultReply_t *mpi_reply; 7036 7037 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 7038 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED) 7039 return 1; 7040 if (ioc->scsih_cmds.smid != smid) 7041 return 1; 7042 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE; 7043 if (mpi_reply) { 7044 memcpy(ioc->scsih_cmds.reply, mpi_reply, 7045 mpi_reply->MsgLength*4); 7046 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID; 7047 } 7048 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING; 7049 complete(&ioc->scsih_cmds.done); 7050 return 1; 7051 } 7052 7053 7054 7055 7056 #define MPT3_MAX_LUNS (255) 7057 7058 7059 /** 7060 * _scsih_check_access_status - check access flags 7061 * @ioc: per adapter object 7062 * @sas_address: sas address 7063 * @handle: sas device handle 7064 * @access_status: errors returned during discovery of the device 7065 * 7066 * Return: 0 for success, else failure 7067 */ 7068 static u8 7069 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 7070 u16 handle, u8 access_status) 7071 { 7072 u8 rc = 1; 7073 char *desc = NULL; 7074 7075 switch (access_status) { 7076 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: 7077 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: 7078 rc = 0; 7079 break; 7080 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: 7081 desc = "sata capability failed"; 7082 break; 7083 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: 7084 desc = "sata affiliation conflict"; 7085 break; 7086 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: 7087 desc = "route not addressable"; 7088 break; 7089 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: 7090 desc = "smp error not addressable"; 7091 break; 7092 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: 7093 desc = "device blocked"; 7094 break; 7095 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: 7096 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: 7097 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: 7098 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: 7099 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: 7100 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: 7101 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: 7102 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: 7103 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: 7104 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: 7105 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: 7106 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: 7107 desc = "sata initialization failed"; 7108 break; 7109 default: 7110 desc = "unknown"; 7111 break; 7112 } 7113 7114 if (!rc) 7115 return 0; 7116 7117 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", 7118 desc, (u64)sas_address, handle); 7119 return rc; 7120 } 7121 7122 /** 7123 * _scsih_check_device - checking device responsiveness 7124 * @ioc: per adapter object 7125 * @parent_sas_address: sas address of parent expander or sas host 7126 * @handle: attached device handle 7127 * @phy_number: phy number 7128 * @link_rate: new link rate 7129 */ 7130 static void 7131 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, 7132 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate) 7133 { 7134 Mpi2ConfigReply_t mpi_reply; 7135 Mpi2SasDevicePage0_t sas_device_pg0; 7136 struct _sas_device *sas_device = NULL; 7137 struct _enclosure_node *enclosure_dev = NULL; 7138 u32 ioc_status; 7139 unsigned long flags; 7140 u64 sas_address; 7141 struct scsi_target *starget; 7142 struct MPT3SAS_TARGET *sas_target_priv_data; 7143 u32 device_info; 7144 struct hba_port *port; 7145 7146 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 7147 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) 7148 return; 7149 7150 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 7151 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 7152 return; 7153 7154 /* wide port handling ~ we need only handle device once for the phy that 7155 * is matched in sas device page zero 7156 */ 7157 if (phy_number != sas_device_pg0.PhyNum) 7158 return; 7159 7160 /* check if this is end device */ 7161 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 7162 if (!(_scsih_is_end_device(device_info))) 7163 return; 7164 7165 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7166 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 7167 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0); 7168 if (!port) 7169 goto out_unlock; 7170 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 7171 sas_address, port); 7172 7173 if (!sas_device) 7174 goto out_unlock; 7175 7176 if (unlikely(sas_device->handle != handle)) { 7177 starget = sas_device->starget; 7178 sas_target_priv_data = starget->hostdata; 7179 starget_printk(KERN_INFO, starget, 7180 "handle changed from(0x%04x) to (0x%04x)!!!\n", 7181 sas_device->handle, handle); 7182 sas_target_priv_data->handle = handle; 7183 sas_device->handle = handle; 7184 if (le16_to_cpu(sas_device_pg0.Flags) & 7185 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 7186 sas_device->enclosure_level = 7187 sas_device_pg0.EnclosureLevel; 7188 memcpy(sas_device->connector_name, 7189 sas_device_pg0.ConnectorName, 4); 7190 sas_device->connector_name[4] = '\0'; 7191 } else { 7192 sas_device->enclosure_level = 0; 7193 sas_device->connector_name[0] = '\0'; 7194 } 7195 7196 sas_device->enclosure_handle = 7197 le16_to_cpu(sas_device_pg0.EnclosureHandle); 7198 sas_device->is_chassis_slot_valid = 0; 7199 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, 7200 sas_device->enclosure_handle); 7201 if (enclosure_dev) { 7202 sas_device->enclosure_logical_id = 7203 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 7204 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 7205 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 7206 sas_device->is_chassis_slot_valid = 1; 7207 sas_device->chassis_slot = 7208 enclosure_dev->pg0.ChassisSlot; 7209 } 7210 } 7211 } 7212 7213 /* check if device is present */ 7214 if (!(le16_to_cpu(sas_device_pg0.Flags) & 7215 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 7216 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n", 7217 handle); 7218 goto out_unlock; 7219 } 7220 7221 /* check if there were any issues with discovery */ 7222 if (_scsih_check_access_status(ioc, sas_address, handle, 7223 sas_device_pg0.AccessStatus)) 7224 goto out_unlock; 7225 7226 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7227 _scsih_ublock_io_device(ioc, sas_address, port); 7228 7229 if (sas_device) 7230 sas_device_put(sas_device); 7231 return; 7232 7233 out_unlock: 7234 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7235 if (sas_device) 7236 sas_device_put(sas_device); 7237 } 7238 7239 /** 7240 * _scsih_add_device - creating sas device object 7241 * @ioc: per adapter object 7242 * @handle: sas device handle 7243 * @phy_num: phy number end device attached to 7244 * @is_pd: is this hidden raid component 7245 * 7246 * Creating end device object, stored in ioc->sas_device_list. 7247 * 7248 * Return: 0 for success, non-zero for failure. 7249 */ 7250 static int 7251 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, 7252 u8 is_pd) 7253 { 7254 Mpi2ConfigReply_t mpi_reply; 7255 Mpi2SasDevicePage0_t sas_device_pg0; 7256 struct _sas_device *sas_device; 7257 struct _enclosure_node *enclosure_dev = NULL; 7258 u32 ioc_status; 7259 u64 sas_address; 7260 u32 device_info; 7261 u8 port_id; 7262 7263 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 7264 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 7265 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7266 __FILE__, __LINE__, __func__); 7267 return -1; 7268 } 7269 7270 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 7271 MPI2_IOCSTATUS_MASK; 7272 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7273 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7274 __FILE__, __LINE__, __func__); 7275 return -1; 7276 } 7277 7278 /* check if this is end device */ 7279 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 7280 if (!(_scsih_is_end_device(device_info))) 7281 return -1; 7282 set_bit(handle, ioc->pend_os_device_add); 7283 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 7284 7285 /* check if device is present */ 7286 if (!(le16_to_cpu(sas_device_pg0.Flags) & 7287 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 7288 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", 7289 handle); 7290 return -1; 7291 } 7292 7293 /* check if there were any issues with discovery */ 7294 if (_scsih_check_access_status(ioc, sas_address, handle, 7295 sas_device_pg0.AccessStatus)) 7296 return -1; 7297 7298 port_id = sas_device_pg0.PhysicalPort; 7299 sas_device = mpt3sas_get_sdev_by_addr(ioc, 7300 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); 7301 if (sas_device) { 7302 clear_bit(handle, ioc->pend_os_device_add); 7303 sas_device_put(sas_device); 7304 return -1; 7305 } 7306 7307 if (sas_device_pg0.EnclosureHandle) { 7308 enclosure_dev = 7309 mpt3sas_scsih_enclosure_find_by_handle(ioc, 7310 le16_to_cpu(sas_device_pg0.EnclosureHandle)); 7311 if (enclosure_dev == NULL) 7312 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 7313 sas_device_pg0.EnclosureHandle); 7314 } 7315 7316 sas_device = kzalloc(sizeof(struct _sas_device), 7317 GFP_KERNEL); 7318 if (!sas_device) { 7319 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7320 __FILE__, __LINE__, __func__); 7321 return 0; 7322 } 7323 7324 kref_init(&sas_device->refcount); 7325 sas_device->handle = handle; 7326 if (_scsih_get_sas_address(ioc, 7327 le16_to_cpu(sas_device_pg0.ParentDevHandle), 7328 &sas_device->sas_address_parent) != 0) 7329 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7330 __FILE__, __LINE__, __func__); 7331 sas_device->enclosure_handle = 7332 le16_to_cpu(sas_device_pg0.EnclosureHandle); 7333 if (sas_device->enclosure_handle != 0) 7334 sas_device->slot = 7335 le16_to_cpu(sas_device_pg0.Slot); 7336 sas_device->device_info = device_info; 7337 sas_device->sas_address = sas_address; 7338 sas_device->phy = sas_device_pg0.PhyNum; 7339 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & 7340 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 7341 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0); 7342 if (!sas_device->port) { 7343 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7344 __FILE__, __LINE__, __func__); 7345 goto out; 7346 } 7347 7348 if (le16_to_cpu(sas_device_pg0.Flags) 7349 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 7350 sas_device->enclosure_level = 7351 sas_device_pg0.EnclosureLevel; 7352 memcpy(sas_device->connector_name, 7353 sas_device_pg0.ConnectorName, 4); 7354 sas_device->connector_name[4] = '\0'; 7355 } else { 7356 sas_device->enclosure_level = 0; 7357 sas_device->connector_name[0] = '\0'; 7358 } 7359 /* get enclosure_logical_id & chassis_slot*/ 7360 sas_device->is_chassis_slot_valid = 0; 7361 if (enclosure_dev) { 7362 sas_device->enclosure_logical_id = 7363 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 7364 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 7365 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 7366 sas_device->is_chassis_slot_valid = 1; 7367 sas_device->chassis_slot = 7368 enclosure_dev->pg0.ChassisSlot; 7369 } 7370 } 7371 7372 /* get device name */ 7373 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); 7374 7375 if (ioc->wait_for_discovery_to_complete) 7376 _scsih_sas_device_init_add(ioc, sas_device); 7377 else 7378 _scsih_sas_device_add(ioc, sas_device); 7379 7380 out: 7381 sas_device_put(sas_device); 7382 return 0; 7383 } 7384 7385 /** 7386 * _scsih_remove_device - removing sas device object 7387 * @ioc: per adapter object 7388 * @sas_device: the sas_device object 7389 */ 7390 static void 7391 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, 7392 struct _sas_device *sas_device) 7393 { 7394 struct MPT3SAS_TARGET *sas_target_priv_data; 7395 7396 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && 7397 (sas_device->pfa_led_on)) { 7398 _scsih_turn_off_pfa_led(ioc, sas_device); 7399 sas_device->pfa_led_on = 0; 7400 } 7401 7402 dewtprintk(ioc, 7403 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", 7404 __func__, 7405 sas_device->handle, (u64)sas_device->sas_address)); 7406 7407 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 7408 NULL, NULL)); 7409 7410 if (sas_device->starget && sas_device->starget->hostdata) { 7411 sas_target_priv_data = sas_device->starget->hostdata; 7412 sas_target_priv_data->deleted = 1; 7413 _scsih_ublock_io_device(ioc, sas_device->sas_address, 7414 sas_device->port); 7415 sas_target_priv_data->handle = 7416 MPT3SAS_INVALID_DEVICE_HANDLE; 7417 } 7418 7419 if (!ioc->hide_drives) 7420 mpt3sas_transport_port_remove(ioc, 7421 sas_device->sas_address, 7422 sas_device->sas_address_parent, 7423 sas_device->port); 7424 7425 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", 7426 sas_device->handle, (u64)sas_device->sas_address); 7427 7428 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 7429 7430 dewtprintk(ioc, 7431 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", 7432 __func__, 7433 sas_device->handle, (u64)sas_device->sas_address)); 7434 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 7435 NULL, NULL)); 7436 } 7437 7438 /** 7439 * _scsih_sas_topology_change_event_debug - debug for topology event 7440 * @ioc: per adapter object 7441 * @event_data: event data payload 7442 * Context: user. 7443 */ 7444 static void 7445 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7446 Mpi2EventDataSasTopologyChangeList_t *event_data) 7447 { 7448 int i; 7449 u16 handle; 7450 u16 reason_code; 7451 u8 phy_number; 7452 char *status_str = NULL; 7453 u8 link_rate, prev_link_rate; 7454 7455 switch (event_data->ExpStatus) { 7456 case MPI2_EVENT_SAS_TOPO_ES_ADDED: 7457 status_str = "add"; 7458 break; 7459 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 7460 status_str = "remove"; 7461 break; 7462 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: 7463 case 0: 7464 status_str = "responding"; 7465 break; 7466 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 7467 status_str = "remove delay"; 7468 break; 7469 default: 7470 status_str = "unknown status"; 7471 break; 7472 } 7473 ioc_info(ioc, "sas topology change: (%s)\n", status_str); 7474 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ 7475 "start_phy(%02d), count(%d)\n", 7476 le16_to_cpu(event_data->ExpanderDevHandle), 7477 le16_to_cpu(event_data->EnclosureHandle), 7478 event_data->StartPhyNum, event_data->NumEntries); 7479 for (i = 0; i < event_data->NumEntries; i++) { 7480 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 7481 if (!handle) 7482 continue; 7483 phy_number = event_data->StartPhyNum + i; 7484 reason_code = event_data->PHY[i].PhyStatus & 7485 MPI2_EVENT_SAS_TOPO_RC_MASK; 7486 switch (reason_code) { 7487 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 7488 status_str = "target add"; 7489 break; 7490 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 7491 status_str = "target remove"; 7492 break; 7493 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: 7494 status_str = "delay target remove"; 7495 break; 7496 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 7497 status_str = "link rate change"; 7498 break; 7499 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: 7500 status_str = "target responding"; 7501 break; 7502 default: 7503 status_str = "unknown"; 7504 break; 7505 } 7506 link_rate = event_data->PHY[i].LinkRate >> 4; 7507 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 7508 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \ 7509 " link rate: new(0x%02x), old(0x%02x)\n", phy_number, 7510 handle, status_str, link_rate, prev_link_rate); 7511 7512 } 7513 } 7514 7515 /** 7516 * _scsih_sas_topology_change_event - handle topology changes 7517 * @ioc: per adapter object 7518 * @fw_event: The fw_event_work object 7519 * Context: user. 7520 * 7521 */ 7522 static int 7523 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 7524 struct fw_event_work *fw_event) 7525 { 7526 int i; 7527 u16 parent_handle, handle; 7528 u16 reason_code; 7529 u8 phy_number, max_phys; 7530 struct _sas_node *sas_expander; 7531 u64 sas_address; 7532 unsigned long flags; 7533 u8 link_rate, prev_link_rate; 7534 struct hba_port *port; 7535 Mpi2EventDataSasTopologyChangeList_t *event_data = 7536 (Mpi2EventDataSasTopologyChangeList_t *) 7537 fw_event->event_data; 7538 7539 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7540 _scsih_sas_topology_change_event_debug(ioc, event_data); 7541 7542 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) 7543 return 0; 7544 7545 if (!ioc->sas_hba.num_phys) 7546 _scsih_sas_host_add(ioc); 7547 else 7548 _scsih_sas_host_refresh(ioc); 7549 7550 if (fw_event->ignore) { 7551 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n")); 7552 return 0; 7553 } 7554 7555 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); 7556 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0); 7557 7558 /* handle expander add */ 7559 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) 7560 if (_scsih_expander_add(ioc, parent_handle) != 0) 7561 return 0; 7562 7563 spin_lock_irqsave(&ioc->sas_node_lock, flags); 7564 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, 7565 parent_handle); 7566 if (sas_expander) { 7567 sas_address = sas_expander->sas_address; 7568 max_phys = sas_expander->num_phys; 7569 port = sas_expander->port; 7570 } else if (parent_handle < ioc->sas_hba.num_phys) { 7571 sas_address = ioc->sas_hba.sas_address; 7572 max_phys = ioc->sas_hba.num_phys; 7573 } else { 7574 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7575 return 0; 7576 } 7577 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7578 7579 /* handle siblings events */ 7580 for (i = 0; i < event_data->NumEntries; i++) { 7581 if (fw_event->ignore) { 7582 dewtprintk(ioc, 7583 ioc_info(ioc, "ignoring expander event\n")); 7584 return 0; 7585 } 7586 if (ioc->remove_host || ioc->pci_error_recovery) 7587 return 0; 7588 phy_number = event_data->StartPhyNum + i; 7589 if (phy_number >= max_phys) 7590 continue; 7591 reason_code = event_data->PHY[i].PhyStatus & 7592 MPI2_EVENT_SAS_TOPO_RC_MASK; 7593 if ((event_data->PHY[i].PhyStatus & 7594 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != 7595 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) 7596 continue; 7597 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 7598 if (!handle) 7599 continue; 7600 link_rate = event_data->PHY[i].LinkRate >> 4; 7601 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 7602 switch (reason_code) { 7603 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 7604 7605 if (ioc->shost_recovery) 7606 break; 7607 7608 if (link_rate == prev_link_rate) 7609 break; 7610 7611 mpt3sas_transport_update_links(ioc, sas_address, 7612 handle, phy_number, link_rate, port); 7613 7614 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 7615 break; 7616 7617 _scsih_check_device(ioc, sas_address, handle, 7618 phy_number, link_rate); 7619 7620 if (!test_bit(handle, ioc->pend_os_device_add)) 7621 break; 7622 7623 fallthrough; 7624 7625 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 7626 7627 if (ioc->shost_recovery) 7628 break; 7629 7630 mpt3sas_transport_update_links(ioc, sas_address, 7631 handle, phy_number, link_rate, port); 7632 7633 _scsih_add_device(ioc, handle, phy_number, 0); 7634 7635 break; 7636 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 7637 7638 _scsih_device_remove_by_handle(ioc, handle); 7639 break; 7640 } 7641 } 7642 7643 /* handle expander removal */ 7644 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && 7645 sas_expander) 7646 mpt3sas_expander_remove(ioc, sas_address, port); 7647 7648 return 0; 7649 } 7650 7651 /** 7652 * _scsih_sas_device_status_change_event_debug - debug for device event 7653 * @ioc: ? 7654 * @event_data: event data payload 7655 * Context: user. 7656 */ 7657 static void 7658 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7659 Mpi2EventDataSasDeviceStatusChange_t *event_data) 7660 { 7661 char *reason_str = NULL; 7662 7663 switch (event_data->ReasonCode) { 7664 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 7665 reason_str = "smart data"; 7666 break; 7667 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: 7668 reason_str = "unsupported device discovered"; 7669 break; 7670 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 7671 reason_str = "internal device reset"; 7672 break; 7673 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 7674 reason_str = "internal task abort"; 7675 break; 7676 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 7677 reason_str = "internal task abort set"; 7678 break; 7679 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 7680 reason_str = "internal clear task set"; 7681 break; 7682 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: 7683 reason_str = "internal query task"; 7684 break; 7685 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: 7686 reason_str = "sata init failure"; 7687 break; 7688 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 7689 reason_str = "internal device reset complete"; 7690 break; 7691 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 7692 reason_str = "internal task abort complete"; 7693 break; 7694 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: 7695 reason_str = "internal async notification"; 7696 break; 7697 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: 7698 reason_str = "expander reduced functionality"; 7699 break; 7700 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: 7701 reason_str = "expander reduced functionality complete"; 7702 break; 7703 default: 7704 reason_str = "unknown reason"; 7705 break; 7706 } 7707 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)", 7708 reason_str, le16_to_cpu(event_data->DevHandle), 7709 (u64)le64_to_cpu(event_data->SASAddress), 7710 le16_to_cpu(event_data->TaskTag)); 7711 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) 7712 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", 7713 event_data->ASC, event_data->ASCQ); 7714 pr_cont("\n"); 7715 } 7716 7717 /** 7718 * _scsih_sas_device_status_change_event - handle device status change 7719 * @ioc: per adapter object 7720 * @event_data: The fw event 7721 * Context: user. 7722 */ 7723 static void 7724 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 7725 Mpi2EventDataSasDeviceStatusChange_t *event_data) 7726 { 7727 struct MPT3SAS_TARGET *target_priv_data; 7728 struct _sas_device *sas_device; 7729 u64 sas_address; 7730 unsigned long flags; 7731 7732 /* In MPI Revision K (0xC), the internal device reset complete was 7733 * implemented, so avoid setting tm_busy flag for older firmware. 7734 */ 7735 if ((ioc->facts.HeaderVersion >> 8) < 0xC) 7736 return; 7737 7738 if (event_data->ReasonCode != 7739 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && 7740 event_data->ReasonCode != 7741 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) 7742 return; 7743 7744 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7745 sas_address = le64_to_cpu(event_data->SASAddress); 7746 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 7747 sas_address, 7748 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0)); 7749 7750 if (!sas_device || !sas_device->starget) 7751 goto out; 7752 7753 target_priv_data = sas_device->starget->hostdata; 7754 if (!target_priv_data) 7755 goto out; 7756 7757 if (event_data->ReasonCode == 7758 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) 7759 target_priv_data->tm_busy = 1; 7760 else 7761 target_priv_data->tm_busy = 0; 7762 7763 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7764 ioc_info(ioc, 7765 "%s tm_busy flag for handle(0x%04x)\n", 7766 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable", 7767 target_priv_data->handle); 7768 7769 out: 7770 if (sas_device) 7771 sas_device_put(sas_device); 7772 7773 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7774 } 7775 7776 7777 /** 7778 * _scsih_check_pcie_access_status - check access flags 7779 * @ioc: per adapter object 7780 * @wwid: wwid 7781 * @handle: sas device handle 7782 * @access_status: errors returned during discovery of the device 7783 * 7784 * Return: 0 for success, else failure 7785 */ 7786 static u8 7787 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, 7788 u16 handle, u8 access_status) 7789 { 7790 u8 rc = 1; 7791 char *desc = NULL; 7792 7793 switch (access_status) { 7794 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS: 7795 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION: 7796 rc = 0; 7797 break; 7798 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED: 7799 desc = "PCIe device capability failed"; 7800 break; 7801 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED: 7802 desc = "PCIe device blocked"; 7803 ioc_info(ioc, 7804 "Device with Access Status (%s): wwid(0x%016llx), " 7805 "handle(0x%04x)\n ll only be added to the internal list", 7806 desc, (u64)wwid, handle); 7807 rc = 0; 7808 break; 7809 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED: 7810 desc = "PCIe device mem space access failed"; 7811 break; 7812 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE: 7813 desc = "PCIe device unsupported"; 7814 break; 7815 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED: 7816 desc = "PCIe device MSIx Required"; 7817 break; 7818 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX: 7819 desc = "PCIe device init fail max"; 7820 break; 7821 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN: 7822 desc = "PCIe device status unknown"; 7823 break; 7824 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT: 7825 desc = "nvme ready timeout"; 7826 break; 7827 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED: 7828 desc = "nvme device configuration unsupported"; 7829 break; 7830 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED: 7831 desc = "nvme identify failed"; 7832 break; 7833 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED: 7834 desc = "nvme qconfig failed"; 7835 break; 7836 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED: 7837 desc = "nvme qcreation failed"; 7838 break; 7839 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED: 7840 desc = "nvme eventcfg failed"; 7841 break; 7842 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED: 7843 desc = "nvme get feature stat failed"; 7844 break; 7845 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT: 7846 desc = "nvme idle timeout"; 7847 break; 7848 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS: 7849 desc = "nvme failure status"; 7850 break; 7851 default: 7852 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n", 7853 access_status, (u64)wwid, handle); 7854 return rc; 7855 } 7856 7857 if (!rc) 7858 return rc; 7859 7860 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n", 7861 desc, (u64)wwid, handle); 7862 return rc; 7863 } 7864 7865 /** 7866 * _scsih_pcie_device_remove_from_sml - removing pcie device 7867 * from SML and free up associated memory 7868 * @ioc: per adapter object 7869 * @pcie_device: the pcie_device object 7870 */ 7871 static void 7872 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 7873 struct _pcie_device *pcie_device) 7874 { 7875 struct MPT3SAS_TARGET *sas_target_priv_data; 7876 7877 dewtprintk(ioc, 7878 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", 7879 __func__, 7880 pcie_device->handle, (u64)pcie_device->wwid)); 7881 if (pcie_device->enclosure_handle != 0) 7882 dewtprintk(ioc, 7883 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", 7884 __func__, 7885 (u64)pcie_device->enclosure_logical_id, 7886 pcie_device->slot)); 7887 if (pcie_device->connector_name[0] != '\0') 7888 dewtprintk(ioc, 7889 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n", 7890 __func__, 7891 pcie_device->enclosure_level, 7892 pcie_device->connector_name)); 7893 7894 if (pcie_device->starget && pcie_device->starget->hostdata) { 7895 sas_target_priv_data = pcie_device->starget->hostdata; 7896 sas_target_priv_data->deleted = 1; 7897 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL); 7898 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 7899 } 7900 7901 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 7902 pcie_device->handle, (u64)pcie_device->wwid); 7903 if (pcie_device->enclosure_handle != 0) 7904 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n", 7905 (u64)pcie_device->enclosure_logical_id, 7906 pcie_device->slot); 7907 if (pcie_device->connector_name[0] != '\0') 7908 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n", 7909 pcie_device->enclosure_level, 7910 pcie_device->connector_name); 7911 7912 if (pcie_device->starget && (pcie_device->access_status != 7913 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) 7914 scsi_remove_target(&pcie_device->starget->dev); 7915 dewtprintk(ioc, 7916 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", 7917 __func__, 7918 pcie_device->handle, (u64)pcie_device->wwid)); 7919 if (pcie_device->enclosure_handle != 0) 7920 dewtprintk(ioc, 7921 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", 7922 __func__, 7923 (u64)pcie_device->enclosure_logical_id, 7924 pcie_device->slot)); 7925 if (pcie_device->connector_name[0] != '\0') 7926 dewtprintk(ioc, 7927 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n", 7928 __func__, 7929 pcie_device->enclosure_level, 7930 pcie_device->connector_name)); 7931 7932 kfree(pcie_device->serial_number); 7933 } 7934 7935 7936 /** 7937 * _scsih_pcie_check_device - checking device responsiveness 7938 * @ioc: per adapter object 7939 * @handle: attached device handle 7940 */ 7941 static void 7942 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 7943 { 7944 Mpi2ConfigReply_t mpi_reply; 7945 Mpi26PCIeDevicePage0_t pcie_device_pg0; 7946 u32 ioc_status; 7947 struct _pcie_device *pcie_device; 7948 u64 wwid; 7949 unsigned long flags; 7950 struct scsi_target *starget; 7951 struct MPT3SAS_TARGET *sas_target_priv_data; 7952 u32 device_info; 7953 7954 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 7955 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) 7956 return; 7957 7958 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 7959 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 7960 return; 7961 7962 /* check if this is end device */ 7963 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 7964 if (!(_scsih_is_nvme_pciescsi_device(device_info))) 7965 return; 7966 7967 wwid = le64_to_cpu(pcie_device_pg0.WWID); 7968 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 7969 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 7970 7971 if (!pcie_device) { 7972 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 7973 return; 7974 } 7975 7976 if (unlikely(pcie_device->handle != handle)) { 7977 starget = pcie_device->starget; 7978 sas_target_priv_data = starget->hostdata; 7979 pcie_device->access_status = pcie_device_pg0.AccessStatus; 7980 starget_printk(KERN_INFO, starget, 7981 "handle changed from(0x%04x) to (0x%04x)!!!\n", 7982 pcie_device->handle, handle); 7983 sas_target_priv_data->handle = handle; 7984 pcie_device->handle = handle; 7985 7986 if (le32_to_cpu(pcie_device_pg0.Flags) & 7987 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 7988 pcie_device->enclosure_level = 7989 pcie_device_pg0.EnclosureLevel; 7990 memcpy(&pcie_device->connector_name[0], 7991 &pcie_device_pg0.ConnectorName[0], 4); 7992 } else { 7993 pcie_device->enclosure_level = 0; 7994 pcie_device->connector_name[0] = '\0'; 7995 } 7996 } 7997 7998 /* check if device is present */ 7999 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 8000 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 8001 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n", 8002 handle); 8003 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8004 pcie_device_put(pcie_device); 8005 return; 8006 } 8007 8008 /* check if there were any issues with discovery */ 8009 if (_scsih_check_pcie_access_status(ioc, wwid, handle, 8010 pcie_device_pg0.AccessStatus)) { 8011 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8012 pcie_device_put(pcie_device); 8013 return; 8014 } 8015 8016 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8017 pcie_device_put(pcie_device); 8018 8019 _scsih_ublock_io_device(ioc, wwid, NULL); 8020 8021 return; 8022 } 8023 8024 /** 8025 * _scsih_pcie_add_device - creating pcie device object 8026 * @ioc: per adapter object 8027 * @handle: pcie device handle 8028 * 8029 * Creating end device object, stored in ioc->pcie_device_list. 8030 * 8031 * Return: 1 means queue the event later, 0 means complete the event 8032 */ 8033 static int 8034 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 8035 { 8036 Mpi26PCIeDevicePage0_t pcie_device_pg0; 8037 Mpi26PCIeDevicePage2_t pcie_device_pg2; 8038 Mpi2ConfigReply_t mpi_reply; 8039 struct _pcie_device *pcie_device; 8040 struct _enclosure_node *enclosure_dev; 8041 u32 ioc_status; 8042 u64 wwid; 8043 8044 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 8045 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) { 8046 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8047 __FILE__, __LINE__, __func__); 8048 return 0; 8049 } 8050 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8051 MPI2_IOCSTATUS_MASK; 8052 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8053 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8054 __FILE__, __LINE__, __func__); 8055 return 0; 8056 } 8057 8058 set_bit(handle, ioc->pend_os_device_add); 8059 wwid = le64_to_cpu(pcie_device_pg0.WWID); 8060 8061 /* check if device is present */ 8062 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 8063 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 8064 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", 8065 handle); 8066 return 0; 8067 } 8068 8069 /* check if there were any issues with discovery */ 8070 if (_scsih_check_pcie_access_status(ioc, wwid, handle, 8071 pcie_device_pg0.AccessStatus)) 8072 return 0; 8073 8074 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu 8075 (pcie_device_pg0.DeviceInfo)))) 8076 return 0; 8077 8078 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid); 8079 if (pcie_device) { 8080 clear_bit(handle, ioc->pend_os_device_add); 8081 pcie_device_put(pcie_device); 8082 return 0; 8083 } 8084 8085 /* PCIe Device Page 2 contains read-only information about a 8086 * specific NVMe device; therefore, this page is only 8087 * valid for NVMe devices and skip for pcie devices of type scsi. 8088 */ 8089 if (!(mpt3sas_scsih_is_pcie_scsi_device( 8090 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { 8091 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, 8092 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 8093 handle)) { 8094 ioc_err(ioc, 8095 "failure at %s:%d/%s()!\n", __FILE__, 8096 __LINE__, __func__); 8097 return 0; 8098 } 8099 8100 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8101 MPI2_IOCSTATUS_MASK; 8102 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8103 ioc_err(ioc, 8104 "failure at %s:%d/%s()!\n", __FILE__, 8105 __LINE__, __func__); 8106 return 0; 8107 } 8108 } 8109 8110 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); 8111 if (!pcie_device) { 8112 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8113 __FILE__, __LINE__, __func__); 8114 return 0; 8115 } 8116 8117 kref_init(&pcie_device->refcount); 8118 pcie_device->id = ioc->pcie_target_id++; 8119 pcie_device->channel = PCIE_CHANNEL; 8120 pcie_device->handle = handle; 8121 pcie_device->access_status = pcie_device_pg0.AccessStatus; 8122 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 8123 pcie_device->wwid = wwid; 8124 pcie_device->port_num = pcie_device_pg0.PortNum; 8125 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) & 8126 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 8127 8128 pcie_device->enclosure_handle = 8129 le16_to_cpu(pcie_device_pg0.EnclosureHandle); 8130 if (pcie_device->enclosure_handle != 0) 8131 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot); 8132 8133 if (le32_to_cpu(pcie_device_pg0.Flags) & 8134 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 8135 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel; 8136 memcpy(&pcie_device->connector_name[0], 8137 &pcie_device_pg0.ConnectorName[0], 4); 8138 } else { 8139 pcie_device->enclosure_level = 0; 8140 pcie_device->connector_name[0] = '\0'; 8141 } 8142 8143 /* get enclosure_logical_id */ 8144 if (pcie_device->enclosure_handle) { 8145 enclosure_dev = 8146 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8147 pcie_device->enclosure_handle); 8148 if (enclosure_dev) 8149 pcie_device->enclosure_logical_id = 8150 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 8151 } 8152 /* TODO -- Add device name once FW supports it */ 8153 if (!(mpt3sas_scsih_is_pcie_scsi_device( 8154 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { 8155 pcie_device->nvme_mdts = 8156 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); 8157 pcie_device->shutdown_latency = 8158 le16_to_cpu(pcie_device_pg2.ShutdownLatency); 8159 /* 8160 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency 8161 * if drive's RTD3 Entry Latency is greater then IOC's 8162 * max_shutdown_latency. 8163 */ 8164 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency) 8165 ioc->max_shutdown_latency = 8166 pcie_device->shutdown_latency; 8167 if (pcie_device_pg2.ControllerResetTO) 8168 pcie_device->reset_timeout = 8169 pcie_device_pg2.ControllerResetTO; 8170 else 8171 pcie_device->reset_timeout = 30; 8172 } else 8173 pcie_device->reset_timeout = 30; 8174 8175 if (ioc->wait_for_discovery_to_complete) 8176 _scsih_pcie_device_init_add(ioc, pcie_device); 8177 else 8178 _scsih_pcie_device_add(ioc, pcie_device); 8179 8180 pcie_device_put(pcie_device); 8181 return 0; 8182 } 8183 8184 /** 8185 * _scsih_pcie_topology_change_event_debug - debug for topology 8186 * event 8187 * @ioc: per adapter object 8188 * @event_data: event data payload 8189 * Context: user. 8190 */ 8191 static void 8192 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8193 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 8194 { 8195 int i; 8196 u16 handle; 8197 u16 reason_code; 8198 u8 port_number; 8199 char *status_str = NULL; 8200 u8 link_rate, prev_link_rate; 8201 8202 switch (event_data->SwitchStatus) { 8203 case MPI26_EVENT_PCIE_TOPO_SS_ADDED: 8204 status_str = "add"; 8205 break; 8206 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 8207 status_str = "remove"; 8208 break; 8209 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING: 8210 case 0: 8211 status_str = "responding"; 8212 break; 8213 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 8214 status_str = "remove delay"; 8215 break; 8216 default: 8217 status_str = "unknown status"; 8218 break; 8219 } 8220 ioc_info(ioc, "pcie topology change: (%s)\n", status_str); 8221 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)" 8222 "start_port(%02d), count(%d)\n", 8223 le16_to_cpu(event_data->SwitchDevHandle), 8224 le16_to_cpu(event_data->EnclosureHandle), 8225 event_data->StartPortNum, event_data->NumEntries); 8226 for (i = 0; i < event_data->NumEntries; i++) { 8227 handle = 8228 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 8229 if (!handle) 8230 continue; 8231 port_number = event_data->StartPortNum + i; 8232 reason_code = event_data->PortEntry[i].PortStatus; 8233 switch (reason_code) { 8234 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: 8235 status_str = "target add"; 8236 break; 8237 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 8238 status_str = "target remove"; 8239 break; 8240 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 8241 status_str = "delay target remove"; 8242 break; 8243 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 8244 status_str = "link rate change"; 8245 break; 8246 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE: 8247 status_str = "target responding"; 8248 break; 8249 default: 8250 status_str = "unknown"; 8251 break; 8252 } 8253 link_rate = event_data->PortEntry[i].CurrentPortInfo & 8254 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8255 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo & 8256 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8257 pr_info("\tport(%02d), attached_handle(0x%04x): %s:" 8258 " link rate: new(0x%02x), old(0x%02x)\n", port_number, 8259 handle, status_str, link_rate, prev_link_rate); 8260 } 8261 } 8262 8263 /** 8264 * _scsih_pcie_topology_change_event - handle PCIe topology 8265 * changes 8266 * @ioc: per adapter object 8267 * @fw_event: The fw_event_work object 8268 * Context: user. 8269 * 8270 */ 8271 static void 8272 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 8273 struct fw_event_work *fw_event) 8274 { 8275 int i; 8276 u16 handle; 8277 u16 reason_code; 8278 u8 link_rate, prev_link_rate; 8279 unsigned long flags; 8280 int rc; 8281 Mpi26EventDataPCIeTopologyChangeList_t *event_data = 8282 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data; 8283 struct _pcie_device *pcie_device; 8284 8285 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8286 _scsih_pcie_topology_change_event_debug(ioc, event_data); 8287 8288 if (ioc->shost_recovery || ioc->remove_host || 8289 ioc->pci_error_recovery) 8290 return; 8291 8292 if (fw_event->ignore) { 8293 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n")); 8294 return; 8295 } 8296 8297 /* handle siblings events */ 8298 for (i = 0; i < event_data->NumEntries; i++) { 8299 if (fw_event->ignore) { 8300 dewtprintk(ioc, 8301 ioc_info(ioc, "ignoring switch event\n")); 8302 return; 8303 } 8304 if (ioc->remove_host || ioc->pci_error_recovery) 8305 return; 8306 reason_code = event_data->PortEntry[i].PortStatus; 8307 handle = 8308 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 8309 if (!handle) 8310 continue; 8311 8312 link_rate = event_data->PortEntry[i].CurrentPortInfo 8313 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8314 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo 8315 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8316 8317 switch (reason_code) { 8318 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 8319 if (ioc->shost_recovery) 8320 break; 8321 if (link_rate == prev_link_rate) 8322 break; 8323 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 8324 break; 8325 8326 _scsih_pcie_check_device(ioc, handle); 8327 8328 /* This code after this point handles the test case 8329 * where a device has been added, however its returning 8330 * BUSY for sometime. Then before the Device Missing 8331 * Delay expires and the device becomes READY, the 8332 * device is removed and added back. 8333 */ 8334 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8335 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 8336 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8337 8338 if (pcie_device) { 8339 pcie_device_put(pcie_device); 8340 break; 8341 } 8342 8343 if (!test_bit(handle, ioc->pend_os_device_add)) 8344 break; 8345 8346 dewtprintk(ioc, 8347 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n", 8348 handle)); 8349 event_data->PortEntry[i].PortStatus &= 0xF0; 8350 event_data->PortEntry[i].PortStatus |= 8351 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; 8352 fallthrough; 8353 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: 8354 if (ioc->shost_recovery) 8355 break; 8356 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 8357 break; 8358 8359 rc = _scsih_pcie_add_device(ioc, handle); 8360 if (!rc) { 8361 /* mark entry vacant */ 8362 /* TODO This needs to be reviewed and fixed, 8363 * we dont have an entry 8364 * to make an event void like vacant 8365 */ 8366 event_data->PortEntry[i].PortStatus |= 8367 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE; 8368 } 8369 break; 8370 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 8371 _scsih_pcie_device_remove_by_handle(ioc, handle); 8372 break; 8373 } 8374 } 8375 } 8376 8377 /** 8378 * _scsih_pcie_device_status_change_event_debug - debug for device event 8379 * @ioc: ? 8380 * @event_data: event data payload 8381 * Context: user. 8382 */ 8383 static void 8384 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8385 Mpi26EventDataPCIeDeviceStatusChange_t *event_data) 8386 { 8387 char *reason_str = NULL; 8388 8389 switch (event_data->ReasonCode) { 8390 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA: 8391 reason_str = "smart data"; 8392 break; 8393 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED: 8394 reason_str = "unsupported device discovered"; 8395 break; 8396 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET: 8397 reason_str = "internal device reset"; 8398 break; 8399 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL: 8400 reason_str = "internal task abort"; 8401 break; 8402 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 8403 reason_str = "internal task abort set"; 8404 break; 8405 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 8406 reason_str = "internal clear task set"; 8407 break; 8408 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL: 8409 reason_str = "internal query task"; 8410 break; 8411 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE: 8412 reason_str = "device init failure"; 8413 break; 8414 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 8415 reason_str = "internal device reset complete"; 8416 break; 8417 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 8418 reason_str = "internal task abort complete"; 8419 break; 8420 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION: 8421 reason_str = "internal async notification"; 8422 break; 8423 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED: 8424 reason_str = "pcie hot reset failed"; 8425 break; 8426 default: 8427 reason_str = "unknown reason"; 8428 break; 8429 } 8430 8431 ioc_info(ioc, "PCIE device status change: (%s)\n" 8432 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)", 8433 reason_str, le16_to_cpu(event_data->DevHandle), 8434 (u64)le64_to_cpu(event_data->WWID), 8435 le16_to_cpu(event_data->TaskTag)); 8436 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA) 8437 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", 8438 event_data->ASC, event_data->ASCQ); 8439 pr_cont("\n"); 8440 } 8441 8442 /** 8443 * _scsih_pcie_device_status_change_event - handle device status 8444 * change 8445 * @ioc: per adapter object 8446 * @fw_event: The fw_event_work object 8447 * Context: user. 8448 */ 8449 static void 8450 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 8451 struct fw_event_work *fw_event) 8452 { 8453 struct MPT3SAS_TARGET *target_priv_data; 8454 struct _pcie_device *pcie_device; 8455 u64 wwid; 8456 unsigned long flags; 8457 Mpi26EventDataPCIeDeviceStatusChange_t *event_data = 8458 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data; 8459 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8460 _scsih_pcie_device_status_change_event_debug(ioc, 8461 event_data); 8462 8463 if (event_data->ReasonCode != 8464 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET && 8465 event_data->ReasonCode != 8466 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET) 8467 return; 8468 8469 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8470 wwid = le64_to_cpu(event_data->WWID); 8471 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 8472 8473 if (!pcie_device || !pcie_device->starget) 8474 goto out; 8475 8476 target_priv_data = pcie_device->starget->hostdata; 8477 if (!target_priv_data) 8478 goto out; 8479 8480 if (event_data->ReasonCode == 8481 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET) 8482 target_priv_data->tm_busy = 1; 8483 else 8484 target_priv_data->tm_busy = 0; 8485 out: 8486 if (pcie_device) 8487 pcie_device_put(pcie_device); 8488 8489 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8490 } 8491 8492 /** 8493 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure 8494 * event 8495 * @ioc: per adapter object 8496 * @event_data: event data payload 8497 * Context: user. 8498 */ 8499 static void 8500 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8501 Mpi2EventDataSasEnclDevStatusChange_t *event_data) 8502 { 8503 char *reason_str = NULL; 8504 8505 switch (event_data->ReasonCode) { 8506 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 8507 reason_str = "enclosure add"; 8508 break; 8509 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 8510 reason_str = "enclosure remove"; 8511 break; 8512 default: 8513 reason_str = "unknown reason"; 8514 break; 8515 } 8516 8517 ioc_info(ioc, "enclosure status change: (%s)\n" 8518 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n", 8519 reason_str, 8520 le16_to_cpu(event_data->EnclosureHandle), 8521 (u64)le64_to_cpu(event_data->EnclosureLogicalID), 8522 le16_to_cpu(event_data->StartSlot)); 8523 } 8524 8525 /** 8526 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events 8527 * @ioc: per adapter object 8528 * @fw_event: The fw_event_work object 8529 * Context: user. 8530 */ 8531 static void 8532 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, 8533 struct fw_event_work *fw_event) 8534 { 8535 Mpi2ConfigReply_t mpi_reply; 8536 struct _enclosure_node *enclosure_dev = NULL; 8537 Mpi2EventDataSasEnclDevStatusChange_t *event_data = 8538 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data; 8539 int rc; 8540 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle); 8541 8542 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8543 _scsih_sas_enclosure_dev_status_change_event_debug(ioc, 8544 (Mpi2EventDataSasEnclDevStatusChange_t *) 8545 fw_event->event_data); 8546 if (ioc->shost_recovery) 8547 return; 8548 8549 if (enclosure_handle) 8550 enclosure_dev = 8551 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8552 enclosure_handle); 8553 switch (event_data->ReasonCode) { 8554 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 8555 if (!enclosure_dev) { 8556 enclosure_dev = 8557 kzalloc(sizeof(struct _enclosure_node), 8558 GFP_KERNEL); 8559 if (!enclosure_dev) { 8560 ioc_info(ioc, "failure at %s:%d/%s()!\n", 8561 __FILE__, __LINE__, __func__); 8562 return; 8563 } 8564 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 8565 &enclosure_dev->pg0, 8566 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 8567 enclosure_handle); 8568 8569 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & 8570 MPI2_IOCSTATUS_MASK)) { 8571 kfree(enclosure_dev); 8572 return; 8573 } 8574 8575 list_add_tail(&enclosure_dev->list, 8576 &ioc->enclosure_list); 8577 } 8578 break; 8579 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 8580 if (enclosure_dev) { 8581 list_del(&enclosure_dev->list); 8582 kfree(enclosure_dev); 8583 } 8584 break; 8585 default: 8586 break; 8587 } 8588 } 8589 8590 /** 8591 * _scsih_sas_broadcast_primitive_event - handle broadcast events 8592 * @ioc: per adapter object 8593 * @fw_event: The fw_event_work object 8594 * Context: user. 8595 */ 8596 static void 8597 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, 8598 struct fw_event_work *fw_event) 8599 { 8600 struct scsi_cmnd *scmd; 8601 struct scsi_device *sdev; 8602 struct scsiio_tracker *st; 8603 u16 smid, handle; 8604 u32 lun; 8605 struct MPT3SAS_DEVICE *sas_device_priv_data; 8606 u32 termination_count; 8607 u32 query_count; 8608 Mpi2SCSITaskManagementReply_t *mpi_reply; 8609 Mpi2EventDataSasBroadcastPrimitive_t *event_data = 8610 (Mpi2EventDataSasBroadcastPrimitive_t *) 8611 fw_event->event_data; 8612 u16 ioc_status; 8613 unsigned long flags; 8614 int r; 8615 u8 max_retries = 0; 8616 u8 task_abort_retries; 8617 8618 mutex_lock(&ioc->tm_cmds.mutex); 8619 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n", 8620 __func__, event_data->PhyNum, event_data->PortWidth); 8621 8622 _scsih_block_io_all_device(ioc); 8623 8624 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8625 mpi_reply = ioc->tm_cmds.reply; 8626 broadcast_aen_retry: 8627 8628 /* sanity checks for retrying this loop */ 8629 if (max_retries++ == 5) { 8630 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__)); 8631 goto out; 8632 } else if (max_retries > 1) 8633 dewtprintk(ioc, 8634 ioc_info(ioc, "%s: %d retry\n", 8635 __func__, max_retries - 1)); 8636 8637 termination_count = 0; 8638 query_count = 0; 8639 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 8640 if (ioc->shost_recovery) 8641 goto out; 8642 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 8643 if (!scmd) 8644 continue; 8645 st = scsi_cmd_priv(scmd); 8646 sdev = scmd->device; 8647 sas_device_priv_data = sdev->hostdata; 8648 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) 8649 continue; 8650 /* skip hidden raid components */ 8651 if (sas_device_priv_data->sas_target->flags & 8652 MPT_TARGET_FLAGS_RAID_COMPONENT) 8653 continue; 8654 /* skip volumes */ 8655 if (sas_device_priv_data->sas_target->flags & 8656 MPT_TARGET_FLAGS_VOLUME) 8657 continue; 8658 /* skip PCIe devices */ 8659 if (sas_device_priv_data->sas_target->flags & 8660 MPT_TARGET_FLAGS_PCIE_DEVICE) 8661 continue; 8662 8663 handle = sas_device_priv_data->sas_target->handle; 8664 lun = sas_device_priv_data->lun; 8665 query_count++; 8666 8667 if (ioc->shost_recovery) 8668 goto out; 8669 8670 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 8671 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 8672 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid, 8673 st->msix_io, 30, 0); 8674 if (r == FAILED) { 8675 sdev_printk(KERN_WARNING, sdev, 8676 "mpt3sas_scsih_issue_tm: FAILED when sending " 8677 "QUERY_TASK: scmd(%p)\n", scmd); 8678 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8679 goto broadcast_aen_retry; 8680 } 8681 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) 8682 & MPI2_IOCSTATUS_MASK; 8683 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8684 sdev_printk(KERN_WARNING, sdev, 8685 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", 8686 ioc_status, scmd); 8687 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8688 goto broadcast_aen_retry; 8689 } 8690 8691 /* see if IO is still owned by IOC and target */ 8692 if (mpi_reply->ResponseCode == 8693 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || 8694 mpi_reply->ResponseCode == 8695 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { 8696 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8697 continue; 8698 } 8699 task_abort_retries = 0; 8700 tm_retry: 8701 if (task_abort_retries++ == 60) { 8702 dewtprintk(ioc, 8703 ioc_info(ioc, "%s: ABORT_TASK: giving up\n", 8704 __func__)); 8705 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8706 goto broadcast_aen_retry; 8707 } 8708 8709 if (ioc->shost_recovery) 8710 goto out_no_lock; 8711 8712 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, 8713 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 8714 st->smid, st->msix_io, 30, 0); 8715 if (r == FAILED || st->cb_idx != 0xFF) { 8716 sdev_printk(KERN_WARNING, sdev, 8717 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " 8718 "scmd(%p)\n", scmd); 8719 goto tm_retry; 8720 } 8721 8722 if (task_abort_retries > 1) 8723 sdev_printk(KERN_WARNING, sdev, 8724 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" 8725 " scmd(%p)\n", 8726 task_abort_retries - 1, scmd); 8727 8728 termination_count += le32_to_cpu(mpi_reply->TerminationCount); 8729 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8730 } 8731 8732 if (ioc->broadcast_aen_pending) { 8733 dewtprintk(ioc, 8734 ioc_info(ioc, 8735 "%s: loop back due to pending AEN\n", 8736 __func__)); 8737 ioc->broadcast_aen_pending = 0; 8738 goto broadcast_aen_retry; 8739 } 8740 8741 out: 8742 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 8743 out_no_lock: 8744 8745 dewtprintk(ioc, 8746 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n", 8747 __func__, query_count, termination_count)); 8748 8749 ioc->broadcast_aen_busy = 0; 8750 if (!ioc->shost_recovery) 8751 _scsih_ublock_io_all_device(ioc); 8752 mutex_unlock(&ioc->tm_cmds.mutex); 8753 } 8754 8755 /** 8756 * _scsih_sas_discovery_event - handle discovery events 8757 * @ioc: per adapter object 8758 * @fw_event: The fw_event_work object 8759 * Context: user. 8760 */ 8761 static void 8762 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, 8763 struct fw_event_work *fw_event) 8764 { 8765 Mpi2EventDataSasDiscovery_t *event_data = 8766 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; 8767 8768 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { 8769 ioc_info(ioc, "discovery event: (%s)", 8770 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? 8771 "start" : "stop"); 8772 if (event_data->DiscoveryStatus) 8773 pr_cont("discovery_status(0x%08x)", 8774 le32_to_cpu(event_data->DiscoveryStatus)); 8775 pr_cont("\n"); 8776 } 8777 8778 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && 8779 !ioc->sas_hba.num_phys) { 8780 if (disable_discovery > 0 && ioc->shost_recovery) { 8781 /* Wait for the reset to complete */ 8782 while (ioc->shost_recovery) 8783 ssleep(1); 8784 } 8785 _scsih_sas_host_add(ioc); 8786 } 8787 } 8788 8789 /** 8790 * _scsih_sas_device_discovery_error_event - display SAS device discovery error 8791 * events 8792 * @ioc: per adapter object 8793 * @fw_event: The fw_event_work object 8794 * Context: user. 8795 */ 8796 static void 8797 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, 8798 struct fw_event_work *fw_event) 8799 { 8800 Mpi25EventDataSasDeviceDiscoveryError_t *event_data = 8801 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data; 8802 8803 switch (event_data->ReasonCode) { 8804 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED: 8805 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n", 8806 le16_to_cpu(event_data->DevHandle), 8807 (u64)le64_to_cpu(event_data->SASAddress), 8808 event_data->PhysicalPort); 8809 break; 8810 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: 8811 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n", 8812 le16_to_cpu(event_data->DevHandle), 8813 (u64)le64_to_cpu(event_data->SASAddress), 8814 event_data->PhysicalPort); 8815 break; 8816 default: 8817 break; 8818 } 8819 } 8820 8821 /** 8822 * _scsih_pcie_enumeration_event - handle enumeration events 8823 * @ioc: per adapter object 8824 * @fw_event: The fw_event_work object 8825 * Context: user. 8826 */ 8827 static void 8828 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, 8829 struct fw_event_work *fw_event) 8830 { 8831 Mpi26EventDataPCIeEnumeration_t *event_data = 8832 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data; 8833 8834 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)) 8835 return; 8836 8837 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x", 8838 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? 8839 "started" : "completed", 8840 event_data->Flags); 8841 if (event_data->EnumerationStatus) 8842 pr_cont("enumeration_status(0x%08x)", 8843 le32_to_cpu(event_data->EnumerationStatus)); 8844 pr_cont("\n"); 8845 } 8846 8847 /** 8848 * _scsih_ir_fastpath - turn on fastpath for IR physdisk 8849 * @ioc: per adapter object 8850 * @handle: device handle for physical disk 8851 * @phys_disk_num: physical disk number 8852 * 8853 * Return: 0 for success, else failure. 8854 */ 8855 static int 8856 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) 8857 { 8858 Mpi2RaidActionRequest_t *mpi_request; 8859 Mpi2RaidActionReply_t *mpi_reply; 8860 u16 smid; 8861 u8 issue_reset = 0; 8862 int rc = 0; 8863 u16 ioc_status; 8864 u32 log_info; 8865 8866 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) 8867 return rc; 8868 8869 mutex_lock(&ioc->scsih_cmds.mutex); 8870 8871 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 8872 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 8873 rc = -EAGAIN; 8874 goto out; 8875 } 8876 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 8877 8878 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 8879 if (!smid) { 8880 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 8881 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 8882 rc = -EAGAIN; 8883 goto out; 8884 } 8885 8886 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 8887 ioc->scsih_cmds.smid = smid; 8888 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); 8889 8890 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 8891 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; 8892 mpi_request->PhysDiskNum = phys_disk_num; 8893 8894 dewtprintk(ioc, 8895 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", 8896 handle, phys_disk_num)); 8897 8898 init_completion(&ioc->scsih_cmds.done); 8899 ioc->put_smid_default(ioc, smid); 8900 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 8901 8902 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 8903 mpt3sas_check_cmd_timeout(ioc, 8904 ioc->scsih_cmds.status, mpi_request, 8905 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset); 8906 rc = -EFAULT; 8907 goto out; 8908 } 8909 8910 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 8911 8912 mpi_reply = ioc->scsih_cmds.reply; 8913 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 8914 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 8915 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 8916 else 8917 log_info = 0; 8918 ioc_status &= MPI2_IOCSTATUS_MASK; 8919 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8920 dewtprintk(ioc, 8921 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", 8922 ioc_status, log_info)); 8923 rc = -EFAULT; 8924 } else 8925 dewtprintk(ioc, 8926 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n")); 8927 } 8928 8929 out: 8930 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 8931 mutex_unlock(&ioc->scsih_cmds.mutex); 8932 8933 if (issue_reset) 8934 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 8935 return rc; 8936 } 8937 8938 /** 8939 * _scsih_reprobe_lun - reprobing lun 8940 * @sdev: scsi device struct 8941 * @no_uld_attach: sdev->no_uld_attach flag setting 8942 * 8943 **/ 8944 static void 8945 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) 8946 { 8947 sdev->no_uld_attach = no_uld_attach ? 1 : 0; 8948 sdev_printk(KERN_INFO, sdev, "%s raid component\n", 8949 sdev->no_uld_attach ? "hiding" : "exposing"); 8950 WARN_ON(scsi_device_reprobe(sdev)); 8951 } 8952 8953 /** 8954 * _scsih_sas_volume_add - add new volume 8955 * @ioc: per adapter object 8956 * @element: IR config element data 8957 * Context: user. 8958 */ 8959 static void 8960 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, 8961 Mpi2EventIrConfigElement_t *element) 8962 { 8963 struct _raid_device *raid_device; 8964 unsigned long flags; 8965 u64 wwid; 8966 u16 handle = le16_to_cpu(element->VolDevHandle); 8967 int rc; 8968 8969 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 8970 if (!wwid) { 8971 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8972 __FILE__, __LINE__, __func__); 8973 return; 8974 } 8975 8976 spin_lock_irqsave(&ioc->raid_device_lock, flags); 8977 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); 8978 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 8979 8980 if (raid_device) 8981 return; 8982 8983 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 8984 if (!raid_device) { 8985 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8986 __FILE__, __LINE__, __func__); 8987 return; 8988 } 8989 8990 raid_device->id = ioc->sas_id++; 8991 raid_device->channel = RAID_CHANNEL; 8992 raid_device->handle = handle; 8993 raid_device->wwid = wwid; 8994 _scsih_raid_device_add(ioc, raid_device); 8995 if (!ioc->wait_for_discovery_to_complete) { 8996 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 8997 raid_device->id, 0); 8998 if (rc) 8999 _scsih_raid_device_remove(ioc, raid_device); 9000 } else { 9001 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9002 _scsih_determine_boot_device(ioc, raid_device, 1); 9003 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9004 } 9005 } 9006 9007 /** 9008 * _scsih_sas_volume_delete - delete volume 9009 * @ioc: per adapter object 9010 * @handle: volume device handle 9011 * Context: user. 9012 */ 9013 static void 9014 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) 9015 { 9016 struct _raid_device *raid_device; 9017 unsigned long flags; 9018 struct MPT3SAS_TARGET *sas_target_priv_data; 9019 struct scsi_target *starget = NULL; 9020 9021 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9022 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9023 if (raid_device) { 9024 if (raid_device->starget) { 9025 starget = raid_device->starget; 9026 sas_target_priv_data = starget->hostdata; 9027 sas_target_priv_data->deleted = 1; 9028 } 9029 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 9030 raid_device->handle, (u64)raid_device->wwid); 9031 list_del(&raid_device->list); 9032 kfree(raid_device); 9033 } 9034 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9035 if (starget) 9036 scsi_remove_target(&starget->dev); 9037 } 9038 9039 /** 9040 * _scsih_sas_pd_expose - expose pd component to /dev/sdX 9041 * @ioc: per adapter object 9042 * @element: IR config element data 9043 * Context: user. 9044 */ 9045 static void 9046 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, 9047 Mpi2EventIrConfigElement_t *element) 9048 { 9049 struct _sas_device *sas_device; 9050 struct scsi_target *starget = NULL; 9051 struct MPT3SAS_TARGET *sas_target_priv_data; 9052 unsigned long flags; 9053 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9054 9055 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9056 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 9057 if (sas_device) { 9058 sas_device->volume_handle = 0; 9059 sas_device->volume_wwid = 0; 9060 clear_bit(handle, ioc->pd_handles); 9061 if (sas_device->starget && sas_device->starget->hostdata) { 9062 starget = sas_device->starget; 9063 sas_target_priv_data = starget->hostdata; 9064 sas_target_priv_data->flags &= 9065 ~MPT_TARGET_FLAGS_RAID_COMPONENT; 9066 } 9067 } 9068 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9069 if (!sas_device) 9070 return; 9071 9072 /* exposing raid component */ 9073 if (starget) 9074 starget_for_each_device(starget, NULL, _scsih_reprobe_lun); 9075 9076 sas_device_put(sas_device); 9077 } 9078 9079 /** 9080 * _scsih_sas_pd_hide - hide pd component from /dev/sdX 9081 * @ioc: per adapter object 9082 * @element: IR config element data 9083 * Context: user. 9084 */ 9085 static void 9086 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, 9087 Mpi2EventIrConfigElement_t *element) 9088 { 9089 struct _sas_device *sas_device; 9090 struct scsi_target *starget = NULL; 9091 struct MPT3SAS_TARGET *sas_target_priv_data; 9092 unsigned long flags; 9093 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9094 u16 volume_handle = 0; 9095 u64 volume_wwid = 0; 9096 9097 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle); 9098 if (volume_handle) 9099 mpt3sas_config_get_volume_wwid(ioc, volume_handle, 9100 &volume_wwid); 9101 9102 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9103 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 9104 if (sas_device) { 9105 set_bit(handle, ioc->pd_handles); 9106 if (sas_device->starget && sas_device->starget->hostdata) { 9107 starget = sas_device->starget; 9108 sas_target_priv_data = starget->hostdata; 9109 sas_target_priv_data->flags |= 9110 MPT_TARGET_FLAGS_RAID_COMPONENT; 9111 sas_device->volume_handle = volume_handle; 9112 sas_device->volume_wwid = volume_wwid; 9113 } 9114 } 9115 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9116 if (!sas_device) 9117 return; 9118 9119 /* hiding raid component */ 9120 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9121 9122 if (starget) 9123 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); 9124 9125 sas_device_put(sas_device); 9126 } 9127 9128 /** 9129 * _scsih_sas_pd_delete - delete pd component 9130 * @ioc: per adapter object 9131 * @element: IR config element data 9132 * Context: user. 9133 */ 9134 static void 9135 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, 9136 Mpi2EventIrConfigElement_t *element) 9137 { 9138 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9139 9140 _scsih_device_remove_by_handle(ioc, handle); 9141 } 9142 9143 /** 9144 * _scsih_sas_pd_add - remove pd component 9145 * @ioc: per adapter object 9146 * @element: IR config element data 9147 * Context: user. 9148 */ 9149 static void 9150 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, 9151 Mpi2EventIrConfigElement_t *element) 9152 { 9153 struct _sas_device *sas_device; 9154 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9155 Mpi2ConfigReply_t mpi_reply; 9156 Mpi2SasDevicePage0_t sas_device_pg0; 9157 u32 ioc_status; 9158 u64 sas_address; 9159 u16 parent_handle; 9160 9161 set_bit(handle, ioc->pd_handles); 9162 9163 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 9164 if (sas_device) { 9165 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9166 sas_device_put(sas_device); 9167 return; 9168 } 9169 9170 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 9171 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 9172 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9173 __FILE__, __LINE__, __func__); 9174 return; 9175 } 9176 9177 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9178 MPI2_IOCSTATUS_MASK; 9179 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9180 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9181 __FILE__, __LINE__, __func__); 9182 return; 9183 } 9184 9185 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 9186 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) 9187 mpt3sas_transport_update_links(ioc, sas_address, handle, 9188 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 9189 mpt3sas_get_port_by_id(ioc, 9190 sas_device_pg0.PhysicalPort, 0)); 9191 9192 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9193 _scsih_add_device(ioc, handle, 0, 1); 9194 } 9195 9196 /** 9197 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events 9198 * @ioc: per adapter object 9199 * @event_data: event data payload 9200 * Context: user. 9201 */ 9202 static void 9203 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 9204 Mpi2EventDataIrConfigChangeList_t *event_data) 9205 { 9206 Mpi2EventIrConfigElement_t *element; 9207 u8 element_type; 9208 int i; 9209 char *reason_str = NULL, *element_str = NULL; 9210 9211 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 9212 9213 ioc_info(ioc, "raid config change: (%s), elements(%d)\n", 9214 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ? 9215 "foreign" : "native", 9216 event_data->NumElements); 9217 for (i = 0; i < event_data->NumElements; i++, element++) { 9218 switch (element->ReasonCode) { 9219 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 9220 reason_str = "add"; 9221 break; 9222 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 9223 reason_str = "remove"; 9224 break; 9225 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: 9226 reason_str = "no change"; 9227 break; 9228 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 9229 reason_str = "hide"; 9230 break; 9231 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 9232 reason_str = "unhide"; 9233 break; 9234 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 9235 reason_str = "volume_created"; 9236 break; 9237 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 9238 reason_str = "volume_deleted"; 9239 break; 9240 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 9241 reason_str = "pd_created"; 9242 break; 9243 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 9244 reason_str = "pd_deleted"; 9245 break; 9246 default: 9247 reason_str = "unknown reason"; 9248 break; 9249 } 9250 element_type = le16_to_cpu(element->ElementFlags) & 9251 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; 9252 switch (element_type) { 9253 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: 9254 element_str = "volume"; 9255 break; 9256 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: 9257 element_str = "phys disk"; 9258 break; 9259 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: 9260 element_str = "hot spare"; 9261 break; 9262 default: 9263 element_str = "unknown element"; 9264 break; 9265 } 9266 pr_info("\t(%s:%s), vol handle(0x%04x), " \ 9267 "pd handle(0x%04x), pd num(0x%02x)\n", element_str, 9268 reason_str, le16_to_cpu(element->VolDevHandle), 9269 le16_to_cpu(element->PhysDiskDevHandle), 9270 element->PhysDiskNum); 9271 } 9272 } 9273 9274 /** 9275 * _scsih_sas_ir_config_change_event - handle ir configuration change events 9276 * @ioc: per adapter object 9277 * @fw_event: The fw_event_work object 9278 * Context: user. 9279 */ 9280 static void 9281 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, 9282 struct fw_event_work *fw_event) 9283 { 9284 Mpi2EventIrConfigElement_t *element; 9285 int i; 9286 u8 foreign_config; 9287 Mpi2EventDataIrConfigChangeList_t *event_data = 9288 (Mpi2EventDataIrConfigChangeList_t *) 9289 fw_event->event_data; 9290 9291 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && 9292 (!ioc->hide_ir_msg)) 9293 _scsih_sas_ir_config_change_event_debug(ioc, event_data); 9294 9295 foreign_config = (le32_to_cpu(event_data->Flags) & 9296 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; 9297 9298 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 9299 if (ioc->shost_recovery && 9300 ioc->hba_mpi_version_belonged != MPI2_VERSION) { 9301 for (i = 0; i < event_data->NumElements; i++, element++) { 9302 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) 9303 _scsih_ir_fastpath(ioc, 9304 le16_to_cpu(element->PhysDiskDevHandle), 9305 element->PhysDiskNum); 9306 } 9307 return; 9308 } 9309 9310 for (i = 0; i < event_data->NumElements; i++, element++) { 9311 9312 switch (element->ReasonCode) { 9313 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 9314 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 9315 if (!foreign_config) 9316 _scsih_sas_volume_add(ioc, element); 9317 break; 9318 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 9319 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 9320 if (!foreign_config) 9321 _scsih_sas_volume_delete(ioc, 9322 le16_to_cpu(element->VolDevHandle)); 9323 break; 9324 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 9325 if (!ioc->is_warpdrive) 9326 _scsih_sas_pd_hide(ioc, element); 9327 break; 9328 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 9329 if (!ioc->is_warpdrive) 9330 _scsih_sas_pd_expose(ioc, element); 9331 break; 9332 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 9333 if (!ioc->is_warpdrive) 9334 _scsih_sas_pd_add(ioc, element); 9335 break; 9336 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 9337 if (!ioc->is_warpdrive) 9338 _scsih_sas_pd_delete(ioc, element); 9339 break; 9340 } 9341 } 9342 } 9343 9344 /** 9345 * _scsih_sas_ir_volume_event - IR volume event 9346 * @ioc: per adapter object 9347 * @fw_event: The fw_event_work object 9348 * Context: user. 9349 */ 9350 static void 9351 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, 9352 struct fw_event_work *fw_event) 9353 { 9354 u64 wwid; 9355 unsigned long flags; 9356 struct _raid_device *raid_device; 9357 u16 handle; 9358 u32 state; 9359 int rc; 9360 Mpi2EventDataIrVolume_t *event_data = 9361 (Mpi2EventDataIrVolume_t *) fw_event->event_data; 9362 9363 if (ioc->shost_recovery) 9364 return; 9365 9366 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 9367 return; 9368 9369 handle = le16_to_cpu(event_data->VolDevHandle); 9370 state = le32_to_cpu(event_data->NewValue); 9371 if (!ioc->hide_ir_msg) 9372 dewtprintk(ioc, 9373 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 9374 __func__, handle, 9375 le32_to_cpu(event_data->PreviousValue), 9376 state)); 9377 switch (state) { 9378 case MPI2_RAID_VOL_STATE_MISSING: 9379 case MPI2_RAID_VOL_STATE_FAILED: 9380 _scsih_sas_volume_delete(ioc, handle); 9381 break; 9382 9383 case MPI2_RAID_VOL_STATE_ONLINE: 9384 case MPI2_RAID_VOL_STATE_DEGRADED: 9385 case MPI2_RAID_VOL_STATE_OPTIMAL: 9386 9387 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9388 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9389 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9390 9391 if (raid_device) 9392 break; 9393 9394 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 9395 if (!wwid) { 9396 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9397 __FILE__, __LINE__, __func__); 9398 break; 9399 } 9400 9401 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 9402 if (!raid_device) { 9403 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9404 __FILE__, __LINE__, __func__); 9405 break; 9406 } 9407 9408 raid_device->id = ioc->sas_id++; 9409 raid_device->channel = RAID_CHANNEL; 9410 raid_device->handle = handle; 9411 raid_device->wwid = wwid; 9412 _scsih_raid_device_add(ioc, raid_device); 9413 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 9414 raid_device->id, 0); 9415 if (rc) 9416 _scsih_raid_device_remove(ioc, raid_device); 9417 break; 9418 9419 case MPI2_RAID_VOL_STATE_INITIALIZING: 9420 default: 9421 break; 9422 } 9423 } 9424 9425 /** 9426 * _scsih_sas_ir_physical_disk_event - PD event 9427 * @ioc: per adapter object 9428 * @fw_event: The fw_event_work object 9429 * Context: user. 9430 */ 9431 static void 9432 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, 9433 struct fw_event_work *fw_event) 9434 { 9435 u16 handle, parent_handle; 9436 u32 state; 9437 struct _sas_device *sas_device; 9438 Mpi2ConfigReply_t mpi_reply; 9439 Mpi2SasDevicePage0_t sas_device_pg0; 9440 u32 ioc_status; 9441 Mpi2EventDataIrPhysicalDisk_t *event_data = 9442 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data; 9443 u64 sas_address; 9444 9445 if (ioc->shost_recovery) 9446 return; 9447 9448 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) 9449 return; 9450 9451 handle = le16_to_cpu(event_data->PhysDiskDevHandle); 9452 state = le32_to_cpu(event_data->NewValue); 9453 9454 if (!ioc->hide_ir_msg) 9455 dewtprintk(ioc, 9456 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 9457 __func__, handle, 9458 le32_to_cpu(event_data->PreviousValue), 9459 state)); 9460 9461 switch (state) { 9462 case MPI2_RAID_PD_STATE_ONLINE: 9463 case MPI2_RAID_PD_STATE_DEGRADED: 9464 case MPI2_RAID_PD_STATE_REBUILDING: 9465 case MPI2_RAID_PD_STATE_OPTIMAL: 9466 case MPI2_RAID_PD_STATE_HOT_SPARE: 9467 9468 if (!ioc->is_warpdrive) 9469 set_bit(handle, ioc->pd_handles); 9470 9471 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 9472 if (sas_device) { 9473 sas_device_put(sas_device); 9474 return; 9475 } 9476 9477 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 9478 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 9479 handle))) { 9480 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9481 __FILE__, __LINE__, __func__); 9482 return; 9483 } 9484 9485 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9486 MPI2_IOCSTATUS_MASK; 9487 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9488 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9489 __FILE__, __LINE__, __func__); 9490 return; 9491 } 9492 9493 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 9494 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) 9495 mpt3sas_transport_update_links(ioc, sas_address, handle, 9496 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 9497 mpt3sas_get_port_by_id(ioc, 9498 sas_device_pg0.PhysicalPort, 0)); 9499 9500 _scsih_add_device(ioc, handle, 0, 1); 9501 9502 break; 9503 9504 case MPI2_RAID_PD_STATE_OFFLINE: 9505 case MPI2_RAID_PD_STATE_NOT_CONFIGURED: 9506 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: 9507 default: 9508 break; 9509 } 9510 } 9511 9512 /** 9513 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event 9514 * @ioc: per adapter object 9515 * @event_data: event data payload 9516 * Context: user. 9517 */ 9518 static void 9519 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, 9520 Mpi2EventDataIrOperationStatus_t *event_data) 9521 { 9522 char *reason_str = NULL; 9523 9524 switch (event_data->RAIDOperation) { 9525 case MPI2_EVENT_IR_RAIDOP_RESYNC: 9526 reason_str = "resync"; 9527 break; 9528 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: 9529 reason_str = "online capacity expansion"; 9530 break; 9531 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: 9532 reason_str = "consistency check"; 9533 break; 9534 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: 9535 reason_str = "background init"; 9536 break; 9537 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: 9538 reason_str = "make data consistent"; 9539 break; 9540 } 9541 9542 if (!reason_str) 9543 return; 9544 9545 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", 9546 reason_str, 9547 le16_to_cpu(event_data->VolDevHandle), 9548 event_data->PercentComplete); 9549 } 9550 9551 /** 9552 * _scsih_sas_ir_operation_status_event - handle RAID operation events 9553 * @ioc: per adapter object 9554 * @fw_event: The fw_event_work object 9555 * Context: user. 9556 */ 9557 static void 9558 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, 9559 struct fw_event_work *fw_event) 9560 { 9561 Mpi2EventDataIrOperationStatus_t *event_data = 9562 (Mpi2EventDataIrOperationStatus_t *) 9563 fw_event->event_data; 9564 static struct _raid_device *raid_device; 9565 unsigned long flags; 9566 u16 handle; 9567 9568 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && 9569 (!ioc->hide_ir_msg)) 9570 _scsih_sas_ir_operation_status_event_debug(ioc, 9571 event_data); 9572 9573 /* code added for raid transport support */ 9574 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { 9575 9576 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9577 handle = le16_to_cpu(event_data->VolDevHandle); 9578 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9579 if (raid_device) 9580 raid_device->percent_complete = 9581 event_data->PercentComplete; 9582 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9583 } 9584 } 9585 9586 /** 9587 * _scsih_prep_device_scan - initialize parameters prior to device scan 9588 * @ioc: per adapter object 9589 * 9590 * Set the deleted flag prior to device scan. If the device is found during 9591 * the scan, then we clear the deleted flag. 9592 */ 9593 static void 9594 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) 9595 { 9596 struct MPT3SAS_DEVICE *sas_device_priv_data; 9597 struct scsi_device *sdev; 9598 9599 shost_for_each_device(sdev, ioc->shost) { 9600 sas_device_priv_data = sdev->hostdata; 9601 if (sas_device_priv_data && sas_device_priv_data->sas_target) 9602 sas_device_priv_data->sas_target->deleted = 1; 9603 } 9604 } 9605 9606 /** 9607 * _scsih_mark_responding_sas_device - mark a sas_devices as responding 9608 * @ioc: per adapter object 9609 * @sas_device_pg0: SAS Device page 0 9610 * 9611 * After host reset, find out whether devices are still responding. 9612 * Used in _scsih_remove_unresponsive_sas_devices. 9613 */ 9614 static void 9615 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, 9616 Mpi2SasDevicePage0_t *sas_device_pg0) 9617 { 9618 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9619 struct scsi_target *starget; 9620 struct _sas_device *sas_device = NULL; 9621 struct _enclosure_node *enclosure_dev = NULL; 9622 unsigned long flags; 9623 struct hba_port *port = mpt3sas_get_port_by_id( 9624 ioc, sas_device_pg0->PhysicalPort, 0); 9625 9626 if (sas_device_pg0->EnclosureHandle) { 9627 enclosure_dev = 9628 mpt3sas_scsih_enclosure_find_by_handle(ioc, 9629 le16_to_cpu(sas_device_pg0->EnclosureHandle)); 9630 if (enclosure_dev == NULL) 9631 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 9632 sas_device_pg0->EnclosureHandle); 9633 } 9634 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9635 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 9636 if (sas_device->sas_address != le64_to_cpu( 9637 sas_device_pg0->SASAddress)) 9638 continue; 9639 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot)) 9640 continue; 9641 if (sas_device->port != port) 9642 continue; 9643 sas_device->responding = 1; 9644 starget = sas_device->starget; 9645 if (starget && starget->hostdata) { 9646 sas_target_priv_data = starget->hostdata; 9647 sas_target_priv_data->tm_busy = 0; 9648 sas_target_priv_data->deleted = 0; 9649 } else 9650 sas_target_priv_data = NULL; 9651 if (starget) { 9652 starget_printk(KERN_INFO, starget, 9653 "handle(0x%04x), sas_addr(0x%016llx)\n", 9654 le16_to_cpu(sas_device_pg0->DevHandle), 9655 (unsigned long long) 9656 sas_device->sas_address); 9657 9658 if (sas_device->enclosure_handle != 0) 9659 starget_printk(KERN_INFO, starget, 9660 "enclosure logical id(0x%016llx), slot(%d)\n", 9661 (unsigned long long) 9662 sas_device->enclosure_logical_id, 9663 sas_device->slot); 9664 } 9665 if (le16_to_cpu(sas_device_pg0->Flags) & 9666 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 9667 sas_device->enclosure_level = 9668 sas_device_pg0->EnclosureLevel; 9669 memcpy(&sas_device->connector_name[0], 9670 &sas_device_pg0->ConnectorName[0], 4); 9671 } else { 9672 sas_device->enclosure_level = 0; 9673 sas_device->connector_name[0] = '\0'; 9674 } 9675 9676 sas_device->enclosure_handle = 9677 le16_to_cpu(sas_device_pg0->EnclosureHandle); 9678 sas_device->is_chassis_slot_valid = 0; 9679 if (enclosure_dev) { 9680 sas_device->enclosure_logical_id = le64_to_cpu( 9681 enclosure_dev->pg0.EnclosureLogicalID); 9682 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 9683 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 9684 sas_device->is_chassis_slot_valid = 1; 9685 sas_device->chassis_slot = 9686 enclosure_dev->pg0.ChassisSlot; 9687 } 9688 } 9689 9690 if (sas_device->handle == le16_to_cpu( 9691 sas_device_pg0->DevHandle)) 9692 goto out; 9693 pr_info("\thandle changed from(0x%04x)!!!\n", 9694 sas_device->handle); 9695 sas_device->handle = le16_to_cpu( 9696 sas_device_pg0->DevHandle); 9697 if (sas_target_priv_data) 9698 sas_target_priv_data->handle = 9699 le16_to_cpu(sas_device_pg0->DevHandle); 9700 goto out; 9701 } 9702 out: 9703 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9704 } 9705 9706 /** 9707 * _scsih_create_enclosure_list_after_reset - Free Existing list, 9708 * And create enclosure list by scanning all Enclosure Page(0)s 9709 * @ioc: per adapter object 9710 */ 9711 static void 9712 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) 9713 { 9714 struct _enclosure_node *enclosure_dev; 9715 Mpi2ConfigReply_t mpi_reply; 9716 u16 enclosure_handle; 9717 int rc; 9718 9719 /* Free existing enclosure list */ 9720 mpt3sas_free_enclosure_list(ioc); 9721 9722 /* Re constructing enclosure list after reset*/ 9723 enclosure_handle = 0xFFFF; 9724 do { 9725 enclosure_dev = 9726 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL); 9727 if (!enclosure_dev) { 9728 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9729 __FILE__, __LINE__, __func__); 9730 return; 9731 } 9732 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 9733 &enclosure_dev->pg0, 9734 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE, 9735 enclosure_handle); 9736 9737 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & 9738 MPI2_IOCSTATUS_MASK)) { 9739 kfree(enclosure_dev); 9740 return; 9741 } 9742 list_add_tail(&enclosure_dev->list, 9743 &ioc->enclosure_list); 9744 enclosure_handle = 9745 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); 9746 } while (1); 9747 } 9748 9749 /** 9750 * _scsih_search_responding_sas_devices - 9751 * @ioc: per adapter object 9752 * 9753 * After host reset, find out whether devices are still responding. 9754 * If not remove. 9755 */ 9756 static void 9757 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) 9758 { 9759 Mpi2SasDevicePage0_t sas_device_pg0; 9760 Mpi2ConfigReply_t mpi_reply; 9761 u16 ioc_status; 9762 u16 handle; 9763 u32 device_info; 9764 9765 ioc_info(ioc, "search for end-devices: start\n"); 9766 9767 if (list_empty(&ioc->sas_device_list)) 9768 goto out; 9769 9770 handle = 0xFFFF; 9771 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 9772 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 9773 handle))) { 9774 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9775 MPI2_IOCSTATUS_MASK; 9776 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 9777 break; 9778 handle = le16_to_cpu(sas_device_pg0.DevHandle); 9779 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 9780 if (!(_scsih_is_end_device(device_info))) 9781 continue; 9782 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); 9783 } 9784 9785 out: 9786 ioc_info(ioc, "search for end-devices: complete\n"); 9787 } 9788 9789 /** 9790 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding 9791 * @ioc: per adapter object 9792 * @pcie_device_pg0: PCIe Device page 0 9793 * 9794 * After host reset, find out whether devices are still responding. 9795 * Used in _scsih_remove_unresponding_devices. 9796 */ 9797 static void 9798 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, 9799 Mpi26PCIeDevicePage0_t *pcie_device_pg0) 9800 { 9801 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9802 struct scsi_target *starget; 9803 struct _pcie_device *pcie_device; 9804 unsigned long flags; 9805 9806 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 9807 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 9808 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID)) 9809 && (pcie_device->slot == le16_to_cpu( 9810 pcie_device_pg0->Slot))) { 9811 pcie_device->access_status = 9812 pcie_device_pg0->AccessStatus; 9813 pcie_device->responding = 1; 9814 starget = pcie_device->starget; 9815 if (starget && starget->hostdata) { 9816 sas_target_priv_data = starget->hostdata; 9817 sas_target_priv_data->tm_busy = 0; 9818 sas_target_priv_data->deleted = 0; 9819 } else 9820 sas_target_priv_data = NULL; 9821 if (starget) { 9822 starget_printk(KERN_INFO, starget, 9823 "handle(0x%04x), wwid(0x%016llx) ", 9824 pcie_device->handle, 9825 (unsigned long long)pcie_device->wwid); 9826 if (pcie_device->enclosure_handle != 0) 9827 starget_printk(KERN_INFO, starget, 9828 "enclosure logical id(0x%016llx), " 9829 "slot(%d)\n", 9830 (unsigned long long) 9831 pcie_device->enclosure_logical_id, 9832 pcie_device->slot); 9833 } 9834 9835 if (((le32_to_cpu(pcie_device_pg0->Flags)) & 9836 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) && 9837 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { 9838 pcie_device->enclosure_level = 9839 pcie_device_pg0->EnclosureLevel; 9840 memcpy(&pcie_device->connector_name[0], 9841 &pcie_device_pg0->ConnectorName[0], 4); 9842 } else { 9843 pcie_device->enclosure_level = 0; 9844 pcie_device->connector_name[0] = '\0'; 9845 } 9846 9847 if (pcie_device->handle == le16_to_cpu( 9848 pcie_device_pg0->DevHandle)) 9849 goto out; 9850 pr_info("\thandle changed from(0x%04x)!!!\n", 9851 pcie_device->handle); 9852 pcie_device->handle = le16_to_cpu( 9853 pcie_device_pg0->DevHandle); 9854 if (sas_target_priv_data) 9855 sas_target_priv_data->handle = 9856 le16_to_cpu(pcie_device_pg0->DevHandle); 9857 goto out; 9858 } 9859 } 9860 9861 out: 9862 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 9863 } 9864 9865 /** 9866 * _scsih_search_responding_pcie_devices - 9867 * @ioc: per adapter object 9868 * 9869 * After host reset, find out whether devices are still responding. 9870 * If not remove. 9871 */ 9872 static void 9873 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) 9874 { 9875 Mpi26PCIeDevicePage0_t pcie_device_pg0; 9876 Mpi2ConfigReply_t mpi_reply; 9877 u16 ioc_status; 9878 u16 handle; 9879 u32 device_info; 9880 9881 ioc_info(ioc, "search for end-devices: start\n"); 9882 9883 if (list_empty(&ioc->pcie_device_list)) 9884 goto out; 9885 9886 handle = 0xFFFF; 9887 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 9888 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 9889 handle))) { 9890 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9891 MPI2_IOCSTATUS_MASK; 9892 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9893 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", 9894 __func__, ioc_status, 9895 le32_to_cpu(mpi_reply.IOCLogInfo)); 9896 break; 9897 } 9898 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 9899 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 9900 if (!(_scsih_is_nvme_pciescsi_device(device_info))) 9901 continue; 9902 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); 9903 } 9904 out: 9905 ioc_info(ioc, "search for PCIe end-devices: complete\n"); 9906 } 9907 9908 /** 9909 * _scsih_mark_responding_raid_device - mark a raid_device as responding 9910 * @ioc: per adapter object 9911 * @wwid: world wide identifier for raid volume 9912 * @handle: device handle 9913 * 9914 * After host reset, find out whether devices are still responding. 9915 * Used in _scsih_remove_unresponsive_raid_devices. 9916 */ 9917 static void 9918 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, 9919 u16 handle) 9920 { 9921 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9922 struct scsi_target *starget; 9923 struct _raid_device *raid_device; 9924 unsigned long flags; 9925 9926 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9927 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 9928 if (raid_device->wwid == wwid && raid_device->starget) { 9929 starget = raid_device->starget; 9930 if (starget && starget->hostdata) { 9931 sas_target_priv_data = starget->hostdata; 9932 sas_target_priv_data->deleted = 0; 9933 } else 9934 sas_target_priv_data = NULL; 9935 raid_device->responding = 1; 9936 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9937 starget_printk(KERN_INFO, raid_device->starget, 9938 "handle(0x%04x), wwid(0x%016llx)\n", handle, 9939 (unsigned long long)raid_device->wwid); 9940 9941 /* 9942 * WARPDRIVE: The handles of the PDs might have changed 9943 * across the host reset so re-initialize the 9944 * required data for Direct IO 9945 */ 9946 mpt3sas_init_warpdrive_properties(ioc, raid_device); 9947 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9948 if (raid_device->handle == handle) { 9949 spin_unlock_irqrestore(&ioc->raid_device_lock, 9950 flags); 9951 return; 9952 } 9953 pr_info("\thandle changed from(0x%04x)!!!\n", 9954 raid_device->handle); 9955 raid_device->handle = handle; 9956 if (sas_target_priv_data) 9957 sas_target_priv_data->handle = handle; 9958 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9959 return; 9960 } 9961 } 9962 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9963 } 9964 9965 /** 9966 * _scsih_search_responding_raid_devices - 9967 * @ioc: per adapter object 9968 * 9969 * After host reset, find out whether devices are still responding. 9970 * If not remove. 9971 */ 9972 static void 9973 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) 9974 { 9975 Mpi2RaidVolPage1_t volume_pg1; 9976 Mpi2RaidVolPage0_t volume_pg0; 9977 Mpi2RaidPhysDiskPage0_t pd_pg0; 9978 Mpi2ConfigReply_t mpi_reply; 9979 u16 ioc_status; 9980 u16 handle; 9981 u8 phys_disk_num; 9982 9983 if (!ioc->ir_firmware) 9984 return; 9985 9986 ioc_info(ioc, "search for raid volumes: start\n"); 9987 9988 if (list_empty(&ioc->raid_device_list)) 9989 goto out; 9990 9991 handle = 0xFFFF; 9992 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 9993 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 9994 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9995 MPI2_IOCSTATUS_MASK; 9996 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 9997 break; 9998 handle = le16_to_cpu(volume_pg1.DevHandle); 9999 10000 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 10001 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 10002 sizeof(Mpi2RaidVolPage0_t))) 10003 continue; 10004 10005 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 10006 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || 10007 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) 10008 _scsih_mark_responding_raid_device(ioc, 10009 le64_to_cpu(volume_pg1.WWID), handle); 10010 } 10011 10012 /* refresh the pd_handles */ 10013 if (!ioc->is_warpdrive) { 10014 phys_disk_num = 0xFF; 10015 memset(ioc->pd_handles, 0, ioc->pd_handles_sz); 10016 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 10017 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 10018 phys_disk_num))) { 10019 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10020 MPI2_IOCSTATUS_MASK; 10021 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 10022 break; 10023 phys_disk_num = pd_pg0.PhysDiskNum; 10024 handle = le16_to_cpu(pd_pg0.DevHandle); 10025 set_bit(handle, ioc->pd_handles); 10026 } 10027 } 10028 out: 10029 ioc_info(ioc, "search for responding raid volumes: complete\n"); 10030 } 10031 10032 /** 10033 * _scsih_mark_responding_expander - mark a expander as responding 10034 * @ioc: per adapter object 10035 * @expander_pg0:SAS Expander Config Page0 10036 * 10037 * After host reset, find out whether devices are still responding. 10038 * Used in _scsih_remove_unresponsive_expanders. 10039 */ 10040 static void 10041 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, 10042 Mpi2ExpanderPage0_t *expander_pg0) 10043 { 10044 struct _sas_node *sas_expander = NULL; 10045 unsigned long flags; 10046 int i; 10047 struct _enclosure_node *enclosure_dev = NULL; 10048 u16 handle = le16_to_cpu(expander_pg0->DevHandle); 10049 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); 10050 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); 10051 struct hba_port *port = mpt3sas_get_port_by_id( 10052 ioc, expander_pg0->PhysicalPort, 0); 10053 10054 if (enclosure_handle) 10055 enclosure_dev = 10056 mpt3sas_scsih_enclosure_find_by_handle(ioc, 10057 enclosure_handle); 10058 10059 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10060 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 10061 if (sas_expander->sas_address != sas_address) 10062 continue; 10063 if (sas_expander->port != port) 10064 continue; 10065 sas_expander->responding = 1; 10066 10067 if (enclosure_dev) { 10068 sas_expander->enclosure_logical_id = 10069 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 10070 sas_expander->enclosure_handle = 10071 le16_to_cpu(expander_pg0->EnclosureHandle); 10072 } 10073 10074 if (sas_expander->handle == handle) 10075 goto out; 10076 pr_info("\texpander(0x%016llx): handle changed" \ 10077 " from(0x%04x) to (0x%04x)!!!\n", 10078 (unsigned long long)sas_expander->sas_address, 10079 sas_expander->handle, handle); 10080 sas_expander->handle = handle; 10081 for (i = 0 ; i < sas_expander->num_phys ; i++) 10082 sas_expander->phy[i].handle = handle; 10083 goto out; 10084 } 10085 out: 10086 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10087 } 10088 10089 /** 10090 * _scsih_search_responding_expanders - 10091 * @ioc: per adapter object 10092 * 10093 * After host reset, find out whether devices are still responding. 10094 * If not remove. 10095 */ 10096 static void 10097 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) 10098 { 10099 Mpi2ExpanderPage0_t expander_pg0; 10100 Mpi2ConfigReply_t mpi_reply; 10101 u16 ioc_status; 10102 u64 sas_address; 10103 u16 handle; 10104 u8 port; 10105 10106 ioc_info(ioc, "search for expanders: start\n"); 10107 10108 if (list_empty(&ioc->sas_expander_list)) 10109 goto out; 10110 10111 handle = 0xFFFF; 10112 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 10113 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 10114 10115 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10116 MPI2_IOCSTATUS_MASK; 10117 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 10118 break; 10119 10120 handle = le16_to_cpu(expander_pg0.DevHandle); 10121 sas_address = le64_to_cpu(expander_pg0.SASAddress); 10122 port = expander_pg0.PhysicalPort; 10123 pr_info( 10124 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", 10125 handle, (unsigned long long)sas_address, 10126 (ioc->multipath_on_hba ? 10127 port : MULTIPATH_DISABLED_PORT_ID)); 10128 _scsih_mark_responding_expander(ioc, &expander_pg0); 10129 } 10130 10131 out: 10132 ioc_info(ioc, "search for expanders: complete\n"); 10133 } 10134 10135 /** 10136 * _scsih_remove_unresponding_devices - removing unresponding devices 10137 * @ioc: per adapter object 10138 */ 10139 static void 10140 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) 10141 { 10142 struct _sas_device *sas_device, *sas_device_next; 10143 struct _sas_node *sas_expander, *sas_expander_next; 10144 struct _raid_device *raid_device, *raid_device_next; 10145 struct _pcie_device *pcie_device, *pcie_device_next; 10146 struct list_head tmp_list; 10147 unsigned long flags; 10148 LIST_HEAD(head); 10149 10150 ioc_info(ioc, "removing unresponding devices: start\n"); 10151 10152 /* removing unresponding end devices */ 10153 ioc_info(ioc, "removing unresponding devices: end-devices\n"); 10154 /* 10155 * Iterate, pulling off devices marked as non-responding. We become the 10156 * owner for the reference the list had on any object we prune. 10157 */ 10158 spin_lock_irqsave(&ioc->sas_device_lock, flags); 10159 10160 /* 10161 * Clean up the sas_device_init_list list as 10162 * driver goes for fresh scan as part of diag reset. 10163 */ 10164 list_for_each_entry_safe(sas_device, sas_device_next, 10165 &ioc->sas_device_init_list, list) { 10166 list_del_init(&sas_device->list); 10167 sas_device_put(sas_device); 10168 } 10169 10170 list_for_each_entry_safe(sas_device, sas_device_next, 10171 &ioc->sas_device_list, list) { 10172 if (!sas_device->responding) 10173 list_move_tail(&sas_device->list, &head); 10174 else 10175 sas_device->responding = 0; 10176 } 10177 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 10178 10179 /* 10180 * Now, uninitialize and remove the unresponding devices we pruned. 10181 */ 10182 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { 10183 _scsih_remove_device(ioc, sas_device); 10184 list_del_init(&sas_device->list); 10185 sas_device_put(sas_device); 10186 } 10187 10188 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n"); 10189 INIT_LIST_HEAD(&head); 10190 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 10191 /* 10192 * Clean up the pcie_device_init_list list as 10193 * driver goes for fresh scan as part of diag reset. 10194 */ 10195 list_for_each_entry_safe(pcie_device, pcie_device_next, 10196 &ioc->pcie_device_init_list, list) { 10197 list_del_init(&pcie_device->list); 10198 pcie_device_put(pcie_device); 10199 } 10200 10201 list_for_each_entry_safe(pcie_device, pcie_device_next, 10202 &ioc->pcie_device_list, list) { 10203 if (!pcie_device->responding) 10204 list_move_tail(&pcie_device->list, &head); 10205 else 10206 pcie_device->responding = 0; 10207 } 10208 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 10209 10210 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) { 10211 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 10212 list_del_init(&pcie_device->list); 10213 pcie_device_put(pcie_device); 10214 } 10215 10216 /* removing unresponding volumes */ 10217 if (ioc->ir_firmware) { 10218 ioc_info(ioc, "removing unresponding devices: volumes\n"); 10219 list_for_each_entry_safe(raid_device, raid_device_next, 10220 &ioc->raid_device_list, list) { 10221 if (!raid_device->responding) 10222 _scsih_sas_volume_delete(ioc, 10223 raid_device->handle); 10224 else 10225 raid_device->responding = 0; 10226 } 10227 } 10228 10229 /* removing unresponding expanders */ 10230 ioc_info(ioc, "removing unresponding devices: expanders\n"); 10231 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10232 INIT_LIST_HEAD(&tmp_list); 10233 list_for_each_entry_safe(sas_expander, sas_expander_next, 10234 &ioc->sas_expander_list, list) { 10235 if (!sas_expander->responding) 10236 list_move_tail(&sas_expander->list, &tmp_list); 10237 else 10238 sas_expander->responding = 0; 10239 } 10240 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10241 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, 10242 list) { 10243 _scsih_expander_node_remove(ioc, sas_expander); 10244 } 10245 10246 ioc_info(ioc, "removing unresponding devices: complete\n"); 10247 10248 /* unblock devices */ 10249 _scsih_ublock_io_all_device(ioc); 10250 } 10251 10252 static void 10253 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, 10254 struct _sas_node *sas_expander, u16 handle) 10255 { 10256 Mpi2ExpanderPage1_t expander_pg1; 10257 Mpi2ConfigReply_t mpi_reply; 10258 int i; 10259 10260 for (i = 0 ; i < sas_expander->num_phys ; i++) { 10261 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 10262 &expander_pg1, i, handle))) { 10263 ioc_err(ioc, "failure at %s:%d/%s()!\n", 10264 __FILE__, __LINE__, __func__); 10265 return; 10266 } 10267 10268 mpt3sas_transport_update_links(ioc, sas_expander->sas_address, 10269 le16_to_cpu(expander_pg1.AttachedDevHandle), i, 10270 expander_pg1.NegotiatedLinkRate >> 4, 10271 sas_expander->port); 10272 } 10273 } 10274 10275 /** 10276 * _scsih_scan_for_devices_after_reset - scan for devices after host reset 10277 * @ioc: per adapter object 10278 */ 10279 static void 10280 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) 10281 { 10282 Mpi2ExpanderPage0_t expander_pg0; 10283 Mpi2SasDevicePage0_t sas_device_pg0; 10284 Mpi26PCIeDevicePage0_t pcie_device_pg0; 10285 Mpi2RaidVolPage1_t *volume_pg1; 10286 Mpi2RaidVolPage0_t *volume_pg0; 10287 Mpi2RaidPhysDiskPage0_t pd_pg0; 10288 Mpi2EventIrConfigElement_t element; 10289 Mpi2ConfigReply_t mpi_reply; 10290 u8 phys_disk_num, port_id; 10291 u16 ioc_status; 10292 u16 handle, parent_handle; 10293 u64 sas_address; 10294 struct _sas_device *sas_device; 10295 struct _pcie_device *pcie_device; 10296 struct _sas_node *expander_device; 10297 static struct _raid_device *raid_device; 10298 u8 retry_count; 10299 unsigned long flags; 10300 10301 volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL); 10302 if (!volume_pg0) 10303 return; 10304 10305 volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL); 10306 if (!volume_pg1) { 10307 kfree(volume_pg0); 10308 return; 10309 } 10310 10311 ioc_info(ioc, "scan devices: start\n"); 10312 10313 _scsih_sas_host_refresh(ioc); 10314 10315 ioc_info(ioc, "\tscan devices: expanders start\n"); 10316 10317 /* expanders */ 10318 handle = 0xFFFF; 10319 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 10320 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 10321 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10322 MPI2_IOCSTATUS_MASK; 10323 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10324 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10325 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10326 break; 10327 } 10328 handle = le16_to_cpu(expander_pg0.DevHandle); 10329 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10330 port_id = expander_pg0.PhysicalPort; 10331 expander_device = mpt3sas_scsih_expander_find_by_sas_address( 10332 ioc, le64_to_cpu(expander_pg0.SASAddress), 10333 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10334 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10335 if (expander_device) 10336 _scsih_refresh_expander_links(ioc, expander_device, 10337 handle); 10338 else { 10339 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", 10340 handle, 10341 (u64)le64_to_cpu(expander_pg0.SASAddress)); 10342 _scsih_expander_add(ioc, handle); 10343 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", 10344 handle, 10345 (u64)le64_to_cpu(expander_pg0.SASAddress)); 10346 } 10347 } 10348 10349 ioc_info(ioc, "\tscan devices: expanders complete\n"); 10350 10351 if (!ioc->ir_firmware) 10352 goto skip_to_sas; 10353 10354 ioc_info(ioc, "\tscan devices: phys disk start\n"); 10355 10356 /* phys disk */ 10357 phys_disk_num = 0xFF; 10358 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 10359 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 10360 phys_disk_num))) { 10361 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10362 MPI2_IOCSTATUS_MASK; 10363 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10364 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10365 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10366 break; 10367 } 10368 phys_disk_num = pd_pg0.PhysDiskNum; 10369 handle = le16_to_cpu(pd_pg0.DevHandle); 10370 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 10371 if (sas_device) { 10372 sas_device_put(sas_device); 10373 continue; 10374 } 10375 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 10376 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 10377 handle) != 0) 10378 continue; 10379 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10380 MPI2_IOCSTATUS_MASK; 10381 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10382 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", 10383 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10384 break; 10385 } 10386 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 10387 if (!_scsih_get_sas_address(ioc, parent_handle, 10388 &sas_address)) { 10389 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", 10390 handle, 10391 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10392 port_id = sas_device_pg0.PhysicalPort; 10393 mpt3sas_transport_update_links(ioc, sas_address, 10394 handle, sas_device_pg0.PhyNum, 10395 MPI2_SAS_NEG_LINK_RATE_1_5, 10396 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10397 set_bit(handle, ioc->pd_handles); 10398 retry_count = 0; 10399 /* This will retry adding the end device. 10400 * _scsih_add_device() will decide on retries and 10401 * return "1" when it should be retried 10402 */ 10403 while (_scsih_add_device(ioc, handle, retry_count++, 10404 1)) { 10405 ssleep(1); 10406 } 10407 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", 10408 handle, 10409 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10410 } 10411 } 10412 10413 ioc_info(ioc, "\tscan devices: phys disk complete\n"); 10414 10415 ioc_info(ioc, "\tscan devices: volumes start\n"); 10416 10417 /* volumes */ 10418 handle = 0xFFFF; 10419 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 10420 volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 10421 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10422 MPI2_IOCSTATUS_MASK; 10423 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10424 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10425 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10426 break; 10427 } 10428 handle = le16_to_cpu(volume_pg1->DevHandle); 10429 spin_lock_irqsave(&ioc->raid_device_lock, flags); 10430 raid_device = _scsih_raid_device_find_by_wwid(ioc, 10431 le64_to_cpu(volume_pg1->WWID)); 10432 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 10433 if (raid_device) 10434 continue; 10435 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 10436 volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 10437 sizeof(Mpi2RaidVolPage0_t))) 10438 continue; 10439 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10440 MPI2_IOCSTATUS_MASK; 10441 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10442 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10443 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10444 break; 10445 } 10446 if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 10447 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE || 10448 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { 10449 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); 10450 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; 10451 element.VolDevHandle = volume_pg1->DevHandle; 10452 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n", 10453 volume_pg1->DevHandle); 10454 _scsih_sas_volume_add(ioc, &element); 10455 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n", 10456 volume_pg1->DevHandle); 10457 } 10458 } 10459 10460 ioc_info(ioc, "\tscan devices: volumes complete\n"); 10461 10462 skip_to_sas: 10463 10464 ioc_info(ioc, "\tscan devices: end devices start\n"); 10465 10466 /* sas devices */ 10467 handle = 0xFFFF; 10468 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 10469 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 10470 handle))) { 10471 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10472 MPI2_IOCSTATUS_MASK; 10473 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10474 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10475 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10476 break; 10477 } 10478 handle = le16_to_cpu(sas_device_pg0.DevHandle); 10479 if (!(_scsih_is_end_device( 10480 le32_to_cpu(sas_device_pg0.DeviceInfo)))) 10481 continue; 10482 port_id = sas_device_pg0.PhysicalPort; 10483 sas_device = mpt3sas_get_sdev_by_addr(ioc, 10484 le64_to_cpu(sas_device_pg0.SASAddress), 10485 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10486 if (sas_device) { 10487 sas_device_put(sas_device); 10488 continue; 10489 } 10490 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 10491 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { 10492 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", 10493 handle, 10494 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10495 mpt3sas_transport_update_links(ioc, sas_address, handle, 10496 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 10497 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10498 retry_count = 0; 10499 /* This will retry adding the end device. 10500 * _scsih_add_device() will decide on retries and 10501 * return "1" when it should be retried 10502 */ 10503 while (_scsih_add_device(ioc, handle, retry_count++, 10504 0)) { 10505 ssleep(1); 10506 } 10507 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", 10508 handle, 10509 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10510 } 10511 } 10512 ioc_info(ioc, "\tscan devices: end devices complete\n"); 10513 ioc_info(ioc, "\tscan devices: pcie end devices start\n"); 10514 10515 /* pcie devices */ 10516 handle = 0xFFFF; 10517 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 10518 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 10519 handle))) { 10520 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) 10521 & MPI2_IOCSTATUS_MASK; 10522 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10523 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10524 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10525 break; 10526 } 10527 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 10528 if (!(_scsih_is_nvme_pciescsi_device( 10529 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) 10530 continue; 10531 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, 10532 le64_to_cpu(pcie_device_pg0.WWID)); 10533 if (pcie_device) { 10534 pcie_device_put(pcie_device); 10535 continue; 10536 } 10537 retry_count = 0; 10538 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle); 10539 _scsih_pcie_add_device(ioc, handle); 10540 10541 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n", 10542 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID)); 10543 } 10544 10545 kfree(volume_pg0); 10546 kfree(volume_pg1); 10547 10548 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n"); 10549 ioc_info(ioc, "scan devices: complete\n"); 10550 } 10551 10552 /** 10553 * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih) 10554 * @ioc: per adapter object 10555 * 10556 * The handler for doing any required cleanup or initialization. 10557 */ 10558 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) 10559 { 10560 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); 10561 } 10562 10563 /** 10564 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding 10565 * scsi & tm cmds. 10566 * @ioc: per adapter object 10567 * 10568 * The handler for doing any required cleanup or initialization. 10569 */ 10570 void 10571 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc) 10572 { 10573 dtmprintk(ioc, 10574 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__)); 10575 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { 10576 ioc->scsih_cmds.status |= MPT3_CMD_RESET; 10577 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); 10578 complete(&ioc->scsih_cmds.done); 10579 } 10580 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { 10581 ioc->tm_cmds.status |= MPT3_CMD_RESET; 10582 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); 10583 complete(&ioc->tm_cmds.done); 10584 } 10585 10586 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); 10587 memset(ioc->device_remove_in_progress, 0, 10588 ioc->device_remove_in_progress_sz); 10589 _scsih_fw_event_cleanup_queue(ioc); 10590 _scsih_flush_running_cmds(ioc); 10591 } 10592 10593 /** 10594 * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih) 10595 * @ioc: per adapter object 10596 * 10597 * The handler for doing any required cleanup or initialization. 10598 */ 10599 void 10600 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) 10601 { 10602 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); 10603 if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) { 10604 if (ioc->multipath_on_hba) { 10605 _scsih_sas_port_refresh(ioc); 10606 _scsih_update_vphys_after_reset(ioc); 10607 } 10608 _scsih_prep_device_scan(ioc); 10609 _scsih_create_enclosure_list_after_reset(ioc); 10610 _scsih_search_responding_sas_devices(ioc); 10611 _scsih_search_responding_pcie_devices(ioc); 10612 _scsih_search_responding_raid_devices(ioc); 10613 _scsih_search_responding_expanders(ioc); 10614 _scsih_error_recovery_delete_devices(ioc); 10615 } 10616 } 10617 10618 /** 10619 * _mpt3sas_fw_work - delayed task for processing firmware events 10620 * @ioc: per adapter object 10621 * @fw_event: The fw_event_work object 10622 * Context: user. 10623 */ 10624 static void 10625 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) 10626 { 10627 ioc->current_event = fw_event; 10628 _scsih_fw_event_del_from_list(ioc, fw_event); 10629 10630 /* the queue is being flushed so ignore this event */ 10631 if (ioc->remove_host || ioc->pci_error_recovery) { 10632 fw_event_work_put(fw_event); 10633 ioc->current_event = NULL; 10634 return; 10635 } 10636 10637 switch (fw_event->event) { 10638 case MPT3SAS_PROCESS_TRIGGER_DIAG: 10639 mpt3sas_process_trigger_data(ioc, 10640 (struct SL_WH_TRIGGERS_EVENT_DATA_T *) 10641 fw_event->event_data); 10642 break; 10643 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: 10644 while (scsi_host_in_recovery(ioc->shost) || 10645 ioc->shost_recovery) { 10646 /* 10647 * If we're unloading or cancelling the work, bail. 10648 * Otherwise, this can become an infinite loop. 10649 */ 10650 if (ioc->remove_host || ioc->fw_events_cleanup) 10651 goto out; 10652 ssleep(1); 10653 } 10654 _scsih_remove_unresponding_devices(ioc); 10655 _scsih_del_dirty_vphy(ioc); 10656 _scsih_del_dirty_port_entries(ioc); 10657 _scsih_scan_for_devices_after_reset(ioc); 10658 /* 10659 * If diag reset has occurred during the driver load 10660 * then driver has to complete the driver load operation 10661 * by executing the following items: 10662 *- Register the devices from sas_device_init_list to SML 10663 *- clear is_driver_loading flag, 10664 *- start the watchdog thread. 10665 * In happy driver load path, above things are taken care of when 10666 * driver executes scsih_scan_finished(). 10667 */ 10668 if (ioc->is_driver_loading) 10669 _scsih_complete_devices_scanning(ioc); 10670 _scsih_set_nvme_max_shutdown_latency(ioc); 10671 break; 10672 case MPT3SAS_PORT_ENABLE_COMPLETE: 10673 ioc->start_scan = 0; 10674 if (missing_delay[0] != -1 && missing_delay[1] != -1) 10675 mpt3sas_base_update_missing_delay(ioc, missing_delay[0], 10676 missing_delay[1]); 10677 dewtprintk(ioc, 10678 ioc_info(ioc, "port enable: complete from worker thread\n")); 10679 break; 10680 case MPT3SAS_TURN_ON_PFA_LED: 10681 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); 10682 break; 10683 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 10684 _scsih_sas_topology_change_event(ioc, fw_event); 10685 break; 10686 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 10687 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 10688 _scsih_sas_device_status_change_event_debug(ioc, 10689 (Mpi2EventDataSasDeviceStatusChange_t *) 10690 fw_event->event_data); 10691 break; 10692 case MPI2_EVENT_SAS_DISCOVERY: 10693 _scsih_sas_discovery_event(ioc, fw_event); 10694 break; 10695 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 10696 _scsih_sas_device_discovery_error_event(ioc, fw_event); 10697 break; 10698 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 10699 _scsih_sas_broadcast_primitive_event(ioc, fw_event); 10700 break; 10701 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 10702 _scsih_sas_enclosure_dev_status_change_event(ioc, 10703 fw_event); 10704 break; 10705 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 10706 _scsih_sas_ir_config_change_event(ioc, fw_event); 10707 break; 10708 case MPI2_EVENT_IR_VOLUME: 10709 _scsih_sas_ir_volume_event(ioc, fw_event); 10710 break; 10711 case MPI2_EVENT_IR_PHYSICAL_DISK: 10712 _scsih_sas_ir_physical_disk_event(ioc, fw_event); 10713 break; 10714 case MPI2_EVENT_IR_OPERATION_STATUS: 10715 _scsih_sas_ir_operation_status_event(ioc, fw_event); 10716 break; 10717 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 10718 _scsih_pcie_device_status_change_event(ioc, fw_event); 10719 break; 10720 case MPI2_EVENT_PCIE_ENUMERATION: 10721 _scsih_pcie_enumeration_event(ioc, fw_event); 10722 break; 10723 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10724 _scsih_pcie_topology_change_event(ioc, fw_event); 10725 ioc->current_event = NULL; 10726 return; 10727 break; 10728 } 10729 out: 10730 fw_event_work_put(fw_event); 10731 ioc->current_event = NULL; 10732 } 10733 10734 /** 10735 * _firmware_event_work 10736 * @work: The fw_event_work object 10737 * Context: user. 10738 * 10739 * wrappers for the work thread handling firmware events 10740 */ 10741 10742 static void 10743 _firmware_event_work(struct work_struct *work) 10744 { 10745 struct fw_event_work *fw_event = container_of(work, 10746 struct fw_event_work, work); 10747 10748 _mpt3sas_fw_work(fw_event->ioc, fw_event); 10749 } 10750 10751 /** 10752 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) 10753 * @ioc: per adapter object 10754 * @msix_index: MSIX table index supplied by the OS 10755 * @reply: reply message frame(lower 32bit addr) 10756 * Context: interrupt. 10757 * 10758 * This function merely adds a new work task into ioc->firmware_event_thread. 10759 * The tasks are worked from _firmware_event_work in user context. 10760 * 10761 * Return: 1 meaning mf should be freed from _base_interrupt 10762 * 0 means the mf is freed from this function. 10763 */ 10764 u8 10765 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 10766 u32 reply) 10767 { 10768 struct fw_event_work *fw_event; 10769 Mpi2EventNotificationReply_t *mpi_reply; 10770 u16 event; 10771 u16 sz; 10772 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; 10773 10774 /* events turned off due to host reset */ 10775 if (ioc->pci_error_recovery) 10776 return 1; 10777 10778 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 10779 10780 if (unlikely(!mpi_reply)) { 10781 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 10782 __FILE__, __LINE__, __func__); 10783 return 1; 10784 } 10785 10786 event = le16_to_cpu(mpi_reply->Event); 10787 10788 if (event != MPI2_EVENT_LOG_ENTRY_ADDED) 10789 mpt3sas_trigger_event(ioc, event, 0); 10790 10791 switch (event) { 10792 /* handle these */ 10793 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 10794 { 10795 Mpi2EventDataSasBroadcastPrimitive_t *baen_data = 10796 (Mpi2EventDataSasBroadcastPrimitive_t *) 10797 mpi_reply->EventData; 10798 10799 if (baen_data->Primitive != 10800 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) 10801 return 1; 10802 10803 if (ioc->broadcast_aen_busy) { 10804 ioc->broadcast_aen_pending++; 10805 return 1; 10806 } else 10807 ioc->broadcast_aen_busy = 1; 10808 break; 10809 } 10810 10811 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 10812 _scsih_check_topo_delete_events(ioc, 10813 (Mpi2EventDataSasTopologyChangeList_t *) 10814 mpi_reply->EventData); 10815 /* 10816 * No need to add the topology change list 10817 * event to fw event work queue when 10818 * diag reset is going on. Since during diag 10819 * reset driver scan the devices by reading 10820 * sas device page0's not by processing the 10821 * events. 10822 */ 10823 if (ioc->shost_recovery) 10824 return 1; 10825 break; 10826 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10827 _scsih_check_pcie_topo_remove_events(ioc, 10828 (Mpi26EventDataPCIeTopologyChangeList_t *) 10829 mpi_reply->EventData); 10830 if (ioc->shost_recovery) 10831 return 1; 10832 break; 10833 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 10834 _scsih_check_ir_config_unhide_events(ioc, 10835 (Mpi2EventDataIrConfigChangeList_t *) 10836 mpi_reply->EventData); 10837 break; 10838 case MPI2_EVENT_IR_VOLUME: 10839 _scsih_check_volume_delete_events(ioc, 10840 (Mpi2EventDataIrVolume_t *) 10841 mpi_reply->EventData); 10842 break; 10843 case MPI2_EVENT_LOG_ENTRY_ADDED: 10844 { 10845 Mpi2EventDataLogEntryAdded_t *log_entry; 10846 u32 *log_code; 10847 10848 if (!ioc->is_warpdrive) 10849 break; 10850 10851 log_entry = (Mpi2EventDataLogEntryAdded_t *) 10852 mpi_reply->EventData; 10853 log_code = (u32 *)log_entry->LogData; 10854 10855 if (le16_to_cpu(log_entry->LogEntryQualifier) 10856 != MPT2_WARPDRIVE_LOGENTRY) 10857 break; 10858 10859 switch (le32_to_cpu(*log_code)) { 10860 case MPT2_WARPDRIVE_LC_SSDT: 10861 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); 10862 break; 10863 case MPT2_WARPDRIVE_LC_SSDLW: 10864 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n"); 10865 break; 10866 case MPT2_WARPDRIVE_LC_SSDLF: 10867 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n"); 10868 break; 10869 case MPT2_WARPDRIVE_LC_BRMF: 10870 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); 10871 break; 10872 } 10873 10874 break; 10875 } 10876 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 10877 _scsih_sas_device_status_change_event(ioc, 10878 (Mpi2EventDataSasDeviceStatusChange_t *) 10879 mpi_reply->EventData); 10880 break; 10881 case MPI2_EVENT_IR_OPERATION_STATUS: 10882 case MPI2_EVENT_SAS_DISCOVERY: 10883 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 10884 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 10885 case MPI2_EVENT_IR_PHYSICAL_DISK: 10886 case MPI2_EVENT_PCIE_ENUMERATION: 10887 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 10888 break; 10889 10890 case MPI2_EVENT_TEMP_THRESHOLD: 10891 _scsih_temp_threshold_events(ioc, 10892 (Mpi2EventDataTemperature_t *) 10893 mpi_reply->EventData); 10894 break; 10895 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 10896 ActiveCableEventData = 10897 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; 10898 switch (ActiveCableEventData->ReasonCode) { 10899 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: 10900 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n", 10901 ActiveCableEventData->ReceptacleID); 10902 pr_notice("cannot be powered and devices connected\n"); 10903 pr_notice("to this active cable will not be seen\n"); 10904 pr_notice("This active cable requires %d mW of power\n", 10905 le32_to_cpu( 10906 ActiveCableEventData->ActiveCablePowerRequirement)); 10907 break; 10908 10909 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: 10910 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n", 10911 ActiveCableEventData->ReceptacleID); 10912 pr_notice( 10913 "is not running at optimal speed(12 Gb/s rate)\n"); 10914 break; 10915 } 10916 10917 break; 10918 10919 default: /* ignore the rest */ 10920 return 1; 10921 } 10922 10923 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; 10924 fw_event = alloc_fw_event_work(sz); 10925 if (!fw_event) { 10926 ioc_err(ioc, "failure at %s:%d/%s()!\n", 10927 __FILE__, __LINE__, __func__); 10928 return 1; 10929 } 10930 10931 memcpy(fw_event->event_data, mpi_reply->EventData, sz); 10932 fw_event->ioc = ioc; 10933 fw_event->VF_ID = mpi_reply->VF_ID; 10934 fw_event->VP_ID = mpi_reply->VP_ID; 10935 fw_event->event = event; 10936 _scsih_fw_event_add(ioc, fw_event); 10937 fw_event_work_put(fw_event); 10938 return 1; 10939 } 10940 10941 /** 10942 * _scsih_expander_node_remove - removing expander device from list. 10943 * @ioc: per adapter object 10944 * @sas_expander: the sas_device object 10945 * 10946 * Removing object and freeing associated memory from the 10947 * ioc->sas_expander_list. 10948 */ 10949 static void 10950 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 10951 struct _sas_node *sas_expander) 10952 { 10953 struct _sas_port *mpt3sas_port, *next; 10954 unsigned long flags; 10955 10956 /* remove sibling ports attached to this expander */ 10957 list_for_each_entry_safe(mpt3sas_port, next, 10958 &sas_expander->sas_port_list, port_list) { 10959 if (ioc->shost_recovery) 10960 return; 10961 if (mpt3sas_port->remote_identify.device_type == 10962 SAS_END_DEVICE) 10963 mpt3sas_device_remove_by_sas_address(ioc, 10964 mpt3sas_port->remote_identify.sas_address, 10965 mpt3sas_port->hba_port); 10966 else if (mpt3sas_port->remote_identify.device_type == 10967 SAS_EDGE_EXPANDER_DEVICE || 10968 mpt3sas_port->remote_identify.device_type == 10969 SAS_FANOUT_EXPANDER_DEVICE) 10970 mpt3sas_expander_remove(ioc, 10971 mpt3sas_port->remote_identify.sas_address, 10972 mpt3sas_port->hba_port); 10973 } 10974 10975 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 10976 sas_expander->sas_address_parent, sas_expander->port); 10977 10978 ioc_info(ioc, 10979 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", 10980 sas_expander->handle, (unsigned long long) 10981 sas_expander->sas_address, 10982 sas_expander->port->port_id); 10983 10984 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10985 list_del(&sas_expander->list); 10986 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10987 10988 kfree(sas_expander->phy); 10989 kfree(sas_expander); 10990 } 10991 10992 /** 10993 * _scsih_nvme_shutdown - NVMe shutdown notification 10994 * @ioc: per adapter object 10995 * 10996 * Sending IoUnitControl request with shutdown operation code to alert IOC that 10997 * the host system is shutting down so that IOC can issue NVMe shutdown to 10998 * NVMe drives attached to it. 10999 */ 11000 static void 11001 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc) 11002 { 11003 Mpi26IoUnitControlRequest_t *mpi_request; 11004 Mpi26IoUnitControlReply_t *mpi_reply; 11005 u16 smid; 11006 11007 /* are there any NVMe devices ? */ 11008 if (list_empty(&ioc->pcie_device_list)) 11009 return; 11010 11011 mutex_lock(&ioc->scsih_cmds.mutex); 11012 11013 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 11014 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 11015 goto out; 11016 } 11017 11018 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 11019 11020 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 11021 if (!smid) { 11022 ioc_err(ioc, 11023 "%s: failed obtaining a smid\n", __func__); 11024 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11025 goto out; 11026 } 11027 11028 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 11029 ioc->scsih_cmds.smid = smid; 11030 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); 11031 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; 11032 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN; 11033 11034 init_completion(&ioc->scsih_cmds.done); 11035 ioc->put_smid_default(ioc, smid); 11036 /* Wait for max_shutdown_latency seconds */ 11037 ioc_info(ioc, 11038 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n", 11039 ioc->max_shutdown_latency); 11040 wait_for_completion_timeout(&ioc->scsih_cmds.done, 11041 ioc->max_shutdown_latency*HZ); 11042 11043 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 11044 ioc_err(ioc, "%s: timeout\n", __func__); 11045 goto out; 11046 } 11047 11048 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 11049 mpi_reply = ioc->scsih_cmds.reply; 11050 ioc_info(ioc, "Io Unit Control shutdown (complete):" 11051 "ioc_status(0x%04x), loginfo(0x%08x)\n", 11052 le16_to_cpu(mpi_reply->IOCStatus), 11053 le32_to_cpu(mpi_reply->IOCLogInfo)); 11054 } 11055 out: 11056 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11057 mutex_unlock(&ioc->scsih_cmds.mutex); 11058 } 11059 11060 11061 /** 11062 * _scsih_ir_shutdown - IR shutdown notification 11063 * @ioc: per adapter object 11064 * 11065 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that 11066 * the host system is shutting down. 11067 */ 11068 static void 11069 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) 11070 { 11071 Mpi2RaidActionRequest_t *mpi_request; 11072 Mpi2RaidActionReply_t *mpi_reply; 11073 u16 smid; 11074 11075 /* is IR firmware build loaded ? */ 11076 if (!ioc->ir_firmware) 11077 return; 11078 11079 /* are there any volumes ? */ 11080 if (list_empty(&ioc->raid_device_list)) 11081 return; 11082 11083 mutex_lock(&ioc->scsih_cmds.mutex); 11084 11085 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 11086 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 11087 goto out; 11088 } 11089 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 11090 11091 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 11092 if (!smid) { 11093 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 11094 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11095 goto out; 11096 } 11097 11098 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 11099 ioc->scsih_cmds.smid = smid; 11100 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); 11101 11102 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 11103 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; 11104 11105 if (!ioc->hide_ir_msg) 11106 ioc_info(ioc, "IR shutdown (sending)\n"); 11107 init_completion(&ioc->scsih_cmds.done); 11108 ioc->put_smid_default(ioc, smid); 11109 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 11110 11111 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 11112 ioc_err(ioc, "%s: timeout\n", __func__); 11113 goto out; 11114 } 11115 11116 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 11117 mpi_reply = ioc->scsih_cmds.reply; 11118 if (!ioc->hide_ir_msg) 11119 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", 11120 le16_to_cpu(mpi_reply->IOCStatus), 11121 le32_to_cpu(mpi_reply->IOCLogInfo)); 11122 } 11123 11124 out: 11125 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11126 mutex_unlock(&ioc->scsih_cmds.mutex); 11127 } 11128 11129 /** 11130 * _scsih_get_shost_and_ioc - get shost and ioc 11131 * and verify whether they are NULL or not 11132 * @pdev: PCI device struct 11133 * @shost: address of scsi host pointer 11134 * @ioc: address of HBA adapter pointer 11135 * 11136 * Return zero if *shost and *ioc are not NULL otherwise return error number. 11137 */ 11138 static int 11139 _scsih_get_shost_and_ioc(struct pci_dev *pdev, 11140 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc) 11141 { 11142 *shost = pci_get_drvdata(pdev); 11143 if (*shost == NULL) { 11144 dev_err(&pdev->dev, "pdev's driver data is null\n"); 11145 return -ENXIO; 11146 } 11147 11148 *ioc = shost_priv(*shost); 11149 if (*ioc == NULL) { 11150 dev_err(&pdev->dev, "shost's private data is null\n"); 11151 return -ENXIO; 11152 } 11153 11154 return 0; 11155 } 11156 11157 /** 11158 * scsih_remove - detach and remove add host 11159 * @pdev: PCI device struct 11160 * 11161 * Routine called when unloading the driver. 11162 */ 11163 static void scsih_remove(struct pci_dev *pdev) 11164 { 11165 struct Scsi_Host *shost; 11166 struct MPT3SAS_ADAPTER *ioc; 11167 struct _sas_port *mpt3sas_port, *next_port; 11168 struct _raid_device *raid_device, *next; 11169 struct MPT3SAS_TARGET *sas_target_priv_data; 11170 struct _pcie_device *pcie_device, *pcienext; 11171 struct workqueue_struct *wq; 11172 unsigned long flags; 11173 Mpi2ConfigReply_t mpi_reply; 11174 struct hba_port *port, *port_next; 11175 11176 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 11177 return; 11178 11179 ioc->remove_host = 1; 11180 11181 if (!pci_device_is_present(pdev)) 11182 _scsih_flush_running_cmds(ioc); 11183 11184 _scsih_fw_event_cleanup_queue(ioc); 11185 11186 spin_lock_irqsave(&ioc->fw_event_lock, flags); 11187 wq = ioc->firmware_event_thread; 11188 ioc->firmware_event_thread = NULL; 11189 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 11190 if (wq) 11191 destroy_workqueue(wq); 11192 /* 11193 * Copy back the unmodified ioc page1. so that on next driver load, 11194 * current modified changes on ioc page1 won't take effect. 11195 */ 11196 if (ioc->is_aero_ioc) 11197 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, 11198 &ioc->ioc_pg1_copy); 11199 /* release all the volumes */ 11200 _scsih_ir_shutdown(ioc); 11201 mpt3sas_destroy_debugfs(ioc); 11202 sas_remove_host(shost); 11203 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, 11204 list) { 11205 if (raid_device->starget) { 11206 sas_target_priv_data = 11207 raid_device->starget->hostdata; 11208 sas_target_priv_data->deleted = 1; 11209 scsi_remove_target(&raid_device->starget->dev); 11210 } 11211 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 11212 raid_device->handle, (u64)raid_device->wwid); 11213 _scsih_raid_device_remove(ioc, raid_device); 11214 } 11215 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list, 11216 list) { 11217 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 11218 list_del_init(&pcie_device->list); 11219 pcie_device_put(pcie_device); 11220 } 11221 11222 /* free ports attached to the sas_host */ 11223 list_for_each_entry_safe(mpt3sas_port, next_port, 11224 &ioc->sas_hba.sas_port_list, port_list) { 11225 if (mpt3sas_port->remote_identify.device_type == 11226 SAS_END_DEVICE) 11227 mpt3sas_device_remove_by_sas_address(ioc, 11228 mpt3sas_port->remote_identify.sas_address, 11229 mpt3sas_port->hba_port); 11230 else if (mpt3sas_port->remote_identify.device_type == 11231 SAS_EDGE_EXPANDER_DEVICE || 11232 mpt3sas_port->remote_identify.device_type == 11233 SAS_FANOUT_EXPANDER_DEVICE) 11234 mpt3sas_expander_remove(ioc, 11235 mpt3sas_port->remote_identify.sas_address, 11236 mpt3sas_port->hba_port); 11237 } 11238 11239 list_for_each_entry_safe(port, port_next, 11240 &ioc->port_table_list, list) { 11241 list_del(&port->list); 11242 kfree(port); 11243 } 11244 11245 /* free phys attached to the sas_host */ 11246 if (ioc->sas_hba.num_phys) { 11247 kfree(ioc->sas_hba.phy); 11248 ioc->sas_hba.phy = NULL; 11249 ioc->sas_hba.num_phys = 0; 11250 } 11251 11252 mpt3sas_base_detach(ioc); 11253 spin_lock(&gioc_lock); 11254 list_del(&ioc->list); 11255 spin_unlock(&gioc_lock); 11256 scsi_host_put(shost); 11257 } 11258 11259 /** 11260 * scsih_shutdown - routine call during system shutdown 11261 * @pdev: PCI device struct 11262 */ 11263 static void 11264 scsih_shutdown(struct pci_dev *pdev) 11265 { 11266 struct Scsi_Host *shost; 11267 struct MPT3SAS_ADAPTER *ioc; 11268 struct workqueue_struct *wq; 11269 unsigned long flags; 11270 Mpi2ConfigReply_t mpi_reply; 11271 11272 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 11273 return; 11274 11275 ioc->remove_host = 1; 11276 11277 if (!pci_device_is_present(pdev)) 11278 _scsih_flush_running_cmds(ioc); 11279 11280 _scsih_fw_event_cleanup_queue(ioc); 11281 11282 spin_lock_irqsave(&ioc->fw_event_lock, flags); 11283 wq = ioc->firmware_event_thread; 11284 ioc->firmware_event_thread = NULL; 11285 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 11286 if (wq) 11287 destroy_workqueue(wq); 11288 /* 11289 * Copy back the unmodified ioc page1 so that on next driver load, 11290 * current modified changes on ioc page1 won't take effect. 11291 */ 11292 if (ioc->is_aero_ioc) 11293 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, 11294 &ioc->ioc_pg1_copy); 11295 11296 _scsih_ir_shutdown(ioc); 11297 _scsih_nvme_shutdown(ioc); 11298 mpt3sas_base_detach(ioc); 11299 } 11300 11301 11302 /** 11303 * _scsih_probe_boot_devices - reports 1st device 11304 * @ioc: per adapter object 11305 * 11306 * If specified in bios page 2, this routine reports the 1st 11307 * device scsi-ml or sas transport for persistent boot device 11308 * purposes. Please refer to function _scsih_determine_boot_device() 11309 */ 11310 static void 11311 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) 11312 { 11313 u32 channel; 11314 void *device; 11315 struct _sas_device *sas_device; 11316 struct _raid_device *raid_device; 11317 struct _pcie_device *pcie_device; 11318 u16 handle; 11319 u64 sas_address_parent; 11320 u64 sas_address; 11321 unsigned long flags; 11322 int rc; 11323 int tid; 11324 struct hba_port *port; 11325 11326 /* no Bios, return immediately */ 11327 if (!ioc->bios_pg3.BiosVersion) 11328 return; 11329 11330 device = NULL; 11331 if (ioc->req_boot_device.device) { 11332 device = ioc->req_boot_device.device; 11333 channel = ioc->req_boot_device.channel; 11334 } else if (ioc->req_alt_boot_device.device) { 11335 device = ioc->req_alt_boot_device.device; 11336 channel = ioc->req_alt_boot_device.channel; 11337 } else if (ioc->current_boot_device.device) { 11338 device = ioc->current_boot_device.device; 11339 channel = ioc->current_boot_device.channel; 11340 } 11341 11342 if (!device) 11343 return; 11344 11345 if (channel == RAID_CHANNEL) { 11346 raid_device = device; 11347 /* 11348 * If this boot vd is already registered with SML then 11349 * no need to register it again as part of device scanning 11350 * after diag reset during driver load operation. 11351 */ 11352 if (raid_device->starget) 11353 return; 11354 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 11355 raid_device->id, 0); 11356 if (rc) 11357 _scsih_raid_device_remove(ioc, raid_device); 11358 } else if (channel == PCIE_CHANNEL) { 11359 pcie_device = device; 11360 /* 11361 * If this boot NVMe device is already registered with SML then 11362 * no need to register it again as part of device scanning 11363 * after diag reset during driver load operation. 11364 */ 11365 if (pcie_device->starget) 11366 return; 11367 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11368 tid = pcie_device->id; 11369 list_move_tail(&pcie_device->list, &ioc->pcie_device_list); 11370 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11371 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0); 11372 if (rc) 11373 _scsih_pcie_device_remove(ioc, pcie_device); 11374 } else { 11375 sas_device = device; 11376 /* 11377 * If this boot sas/sata device is already registered with SML 11378 * then no need to register it again as part of device scanning 11379 * after diag reset during driver load operation. 11380 */ 11381 if (sas_device->starget) 11382 return; 11383 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11384 handle = sas_device->handle; 11385 sas_address_parent = sas_device->sas_address_parent; 11386 sas_address = sas_device->sas_address; 11387 port = sas_device->port; 11388 list_move_tail(&sas_device->list, &ioc->sas_device_list); 11389 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11390 11391 if (ioc->hide_drives) 11392 return; 11393 11394 if (!port) 11395 return; 11396 11397 if (!mpt3sas_transport_port_add(ioc, handle, 11398 sas_address_parent, port)) { 11399 _scsih_sas_device_remove(ioc, sas_device); 11400 } else if (!sas_device->starget) { 11401 if (!ioc->is_driver_loading) { 11402 mpt3sas_transport_port_remove(ioc, 11403 sas_address, 11404 sas_address_parent, port); 11405 _scsih_sas_device_remove(ioc, sas_device); 11406 } 11407 } 11408 } 11409 } 11410 11411 /** 11412 * _scsih_probe_raid - reporting raid volumes to scsi-ml 11413 * @ioc: per adapter object 11414 * 11415 * Called during initial loading of the driver. 11416 */ 11417 static void 11418 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) 11419 { 11420 struct _raid_device *raid_device, *raid_next; 11421 int rc; 11422 11423 list_for_each_entry_safe(raid_device, raid_next, 11424 &ioc->raid_device_list, list) { 11425 if (raid_device->starget) 11426 continue; 11427 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 11428 raid_device->id, 0); 11429 if (rc) 11430 _scsih_raid_device_remove(ioc, raid_device); 11431 } 11432 } 11433 11434 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc) 11435 { 11436 struct _sas_device *sas_device = NULL; 11437 unsigned long flags; 11438 11439 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11440 if (!list_empty(&ioc->sas_device_init_list)) { 11441 sas_device = list_first_entry(&ioc->sas_device_init_list, 11442 struct _sas_device, list); 11443 sas_device_get(sas_device); 11444 } 11445 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11446 11447 return sas_device; 11448 } 11449 11450 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc, 11451 struct _sas_device *sas_device) 11452 { 11453 unsigned long flags; 11454 11455 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11456 11457 /* 11458 * Since we dropped the lock during the call to port_add(), we need to 11459 * be careful here that somebody else didn't move or delete this item 11460 * while we were busy with other things. 11461 * 11462 * If it was on the list, we need a put() for the reference the list 11463 * had. Either way, we need a get() for the destination list. 11464 */ 11465 if (!list_empty(&sas_device->list)) { 11466 list_del_init(&sas_device->list); 11467 sas_device_put(sas_device); 11468 } 11469 11470 sas_device_get(sas_device); 11471 list_add_tail(&sas_device->list, &ioc->sas_device_list); 11472 11473 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11474 } 11475 11476 /** 11477 * _scsih_probe_sas - reporting sas devices to sas transport 11478 * @ioc: per adapter object 11479 * 11480 * Called during initial loading of the driver. 11481 */ 11482 static void 11483 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) 11484 { 11485 struct _sas_device *sas_device; 11486 11487 if (ioc->hide_drives) 11488 return; 11489 11490 while ((sas_device = get_next_sas_device(ioc))) { 11491 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 11492 sas_device->sas_address_parent, sas_device->port)) { 11493 _scsih_sas_device_remove(ioc, sas_device); 11494 sas_device_put(sas_device); 11495 continue; 11496 } else if (!sas_device->starget) { 11497 /* 11498 * When asyn scanning is enabled, its not possible to 11499 * remove devices while scanning is turned on due to an 11500 * oops in scsi_sysfs_add_sdev()->add_device()-> 11501 * sysfs_addrm_start() 11502 */ 11503 if (!ioc->is_driver_loading) { 11504 mpt3sas_transport_port_remove(ioc, 11505 sas_device->sas_address, 11506 sas_device->sas_address_parent, 11507 sas_device->port); 11508 _scsih_sas_device_remove(ioc, sas_device); 11509 sas_device_put(sas_device); 11510 continue; 11511 } 11512 } 11513 sas_device_make_active(ioc, sas_device); 11514 sas_device_put(sas_device); 11515 } 11516 } 11517 11518 /** 11519 * get_next_pcie_device - Get the next pcie device 11520 * @ioc: per adapter object 11521 * 11522 * Get the next pcie device from pcie_device_init_list list. 11523 * 11524 * Return: pcie device structure if pcie_device_init_list list is not empty 11525 * otherwise returns NULL 11526 */ 11527 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc) 11528 { 11529 struct _pcie_device *pcie_device = NULL; 11530 unsigned long flags; 11531 11532 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11533 if (!list_empty(&ioc->pcie_device_init_list)) { 11534 pcie_device = list_first_entry(&ioc->pcie_device_init_list, 11535 struct _pcie_device, list); 11536 pcie_device_get(pcie_device); 11537 } 11538 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11539 11540 return pcie_device; 11541 } 11542 11543 /** 11544 * pcie_device_make_active - Add pcie device to pcie_device_list list 11545 * @ioc: per adapter object 11546 * @pcie_device: pcie device object 11547 * 11548 * Add the pcie device which has registered with SCSI Transport Later to 11549 * pcie_device_list list 11550 */ 11551 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc, 11552 struct _pcie_device *pcie_device) 11553 { 11554 unsigned long flags; 11555 11556 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11557 11558 if (!list_empty(&pcie_device->list)) { 11559 list_del_init(&pcie_device->list); 11560 pcie_device_put(pcie_device); 11561 } 11562 pcie_device_get(pcie_device); 11563 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 11564 11565 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11566 } 11567 11568 /** 11569 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml 11570 * @ioc: per adapter object 11571 * 11572 * Called during initial loading of the driver. 11573 */ 11574 static void 11575 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc) 11576 { 11577 struct _pcie_device *pcie_device; 11578 int rc; 11579 11580 /* PCIe Device List */ 11581 while ((pcie_device = get_next_pcie_device(ioc))) { 11582 if (pcie_device->starget) { 11583 pcie_device_put(pcie_device); 11584 continue; 11585 } 11586 if (pcie_device->access_status == 11587 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { 11588 pcie_device_make_active(ioc, pcie_device); 11589 pcie_device_put(pcie_device); 11590 continue; 11591 } 11592 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, 11593 pcie_device->id, 0); 11594 if (rc) { 11595 _scsih_pcie_device_remove(ioc, pcie_device); 11596 pcie_device_put(pcie_device); 11597 continue; 11598 } else if (!pcie_device->starget) { 11599 /* 11600 * When async scanning is enabled, its not possible to 11601 * remove devices while scanning is turned on due to an 11602 * oops in scsi_sysfs_add_sdev()->add_device()-> 11603 * sysfs_addrm_start() 11604 */ 11605 if (!ioc->is_driver_loading) { 11606 /* TODO-- Need to find out whether this condition will 11607 * occur or not 11608 */ 11609 _scsih_pcie_device_remove(ioc, pcie_device); 11610 pcie_device_put(pcie_device); 11611 continue; 11612 } 11613 } 11614 pcie_device_make_active(ioc, pcie_device); 11615 pcie_device_put(pcie_device); 11616 } 11617 } 11618 11619 /** 11620 * _scsih_probe_devices - probing for devices 11621 * @ioc: per adapter object 11622 * 11623 * Called during initial loading of the driver. 11624 */ 11625 static void 11626 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) 11627 { 11628 u16 volume_mapping_flags; 11629 11630 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) 11631 return; /* return when IOC doesn't support initiator mode */ 11632 11633 _scsih_probe_boot_devices(ioc); 11634 11635 if (ioc->ir_firmware) { 11636 volume_mapping_flags = 11637 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & 11638 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; 11639 if (volume_mapping_flags == 11640 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { 11641 _scsih_probe_raid(ioc); 11642 _scsih_probe_sas(ioc); 11643 } else { 11644 _scsih_probe_sas(ioc); 11645 _scsih_probe_raid(ioc); 11646 } 11647 } else { 11648 _scsih_probe_sas(ioc); 11649 _scsih_probe_pcie(ioc); 11650 } 11651 } 11652 11653 /** 11654 * scsih_scan_start - scsi lld callback for .scan_start 11655 * @shost: SCSI host pointer 11656 * 11657 * The shost has the ability to discover targets on its own instead 11658 * of scanning the entire bus. In our implemention, we will kick off 11659 * firmware discovery. 11660 */ 11661 static void 11662 scsih_scan_start(struct Scsi_Host *shost) 11663 { 11664 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 11665 int rc; 11666 if (diag_buffer_enable != -1 && diag_buffer_enable != 0) 11667 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); 11668 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0) 11669 mpt3sas_enable_diag_buffer(ioc, 1); 11670 11671 if (disable_discovery > 0) 11672 return; 11673 11674 ioc->start_scan = 1; 11675 rc = mpt3sas_port_enable(ioc); 11676 11677 if (rc != 0) 11678 ioc_info(ioc, "port enable: FAILED\n"); 11679 } 11680 11681 /** 11682 * _scsih_complete_devices_scanning - add the devices to sml and 11683 * complete ioc initialization. 11684 * @ioc: per adapter object 11685 * 11686 * Return nothing. 11687 */ 11688 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc) 11689 { 11690 11691 if (ioc->wait_for_discovery_to_complete) { 11692 ioc->wait_for_discovery_to_complete = 0; 11693 _scsih_probe_devices(ioc); 11694 } 11695 11696 mpt3sas_base_start_watchdog(ioc); 11697 ioc->is_driver_loading = 0; 11698 } 11699 11700 /** 11701 * scsih_scan_finished - scsi lld callback for .scan_finished 11702 * @shost: SCSI host pointer 11703 * @time: elapsed time of the scan in jiffies 11704 * 11705 * This function will be called periodicallyn until it returns 1 with the 11706 * scsi_host and the elapsed time of the scan in jiffies. In our implemention, 11707 * we wait for firmware discovery to complete, then return 1. 11708 */ 11709 static int 11710 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) 11711 { 11712 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 11713 u32 ioc_state; 11714 int issue_hard_reset = 0; 11715 11716 if (disable_discovery > 0) { 11717 ioc->is_driver_loading = 0; 11718 ioc->wait_for_discovery_to_complete = 0; 11719 return 1; 11720 } 11721 11722 if (time >= (300 * HZ)) { 11723 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11724 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n"); 11725 ioc->is_driver_loading = 0; 11726 return 1; 11727 } 11728 11729 if (ioc->start_scan) { 11730 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 11731 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 11732 mpt3sas_print_fault_code(ioc, ioc_state & 11733 MPI2_DOORBELL_DATA_MASK); 11734 issue_hard_reset = 1; 11735 goto out; 11736 } else if ((ioc_state & MPI2_IOC_STATE_MASK) == 11737 MPI2_IOC_STATE_COREDUMP) { 11738 mpt3sas_base_coredump_info(ioc, ioc_state & 11739 MPI2_DOORBELL_DATA_MASK); 11740 mpt3sas_base_wait_for_coredump_completion(ioc, __func__); 11741 issue_hard_reset = 1; 11742 goto out; 11743 } 11744 return 0; 11745 } 11746 11747 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) { 11748 ioc_info(ioc, 11749 "port enable: aborted due to diag reset\n"); 11750 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11751 goto out; 11752 } 11753 if (ioc->start_scan_failed) { 11754 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n", 11755 ioc->start_scan_failed); 11756 ioc->is_driver_loading = 0; 11757 ioc->wait_for_discovery_to_complete = 0; 11758 ioc->remove_host = 1; 11759 return 1; 11760 } 11761 11762 ioc_info(ioc, "port enable: SUCCESS\n"); 11763 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11764 _scsih_complete_devices_scanning(ioc); 11765 11766 out: 11767 if (issue_hard_reset) { 11768 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11769 if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET)) 11770 ioc->is_driver_loading = 0; 11771 } 11772 return 1; 11773 } 11774 11775 /** 11776 * scsih_map_queues - map reply queues with request queues 11777 * @shost: SCSI host pointer 11778 */ 11779 static int scsih_map_queues(struct Scsi_Host *shost) 11780 { 11781 struct MPT3SAS_ADAPTER *ioc = 11782 (struct MPT3SAS_ADAPTER *)shost->hostdata; 11783 11784 if (ioc->shost->nr_hw_queues == 1) 11785 return 0; 11786 11787 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 11788 ioc->pdev, ioc->high_iops_queues); 11789 } 11790 11791 /* shost template for SAS 2.0 HBA devices */ 11792 static struct scsi_host_template mpt2sas_driver_template = { 11793 .module = THIS_MODULE, 11794 .name = "Fusion MPT SAS Host", 11795 .proc_name = MPT2SAS_DRIVER_NAME, 11796 .queuecommand = scsih_qcmd, 11797 .target_alloc = scsih_target_alloc, 11798 .slave_alloc = scsih_slave_alloc, 11799 .slave_configure = scsih_slave_configure, 11800 .target_destroy = scsih_target_destroy, 11801 .slave_destroy = scsih_slave_destroy, 11802 .scan_finished = scsih_scan_finished, 11803 .scan_start = scsih_scan_start, 11804 .change_queue_depth = scsih_change_queue_depth, 11805 .eh_abort_handler = scsih_abort, 11806 .eh_device_reset_handler = scsih_dev_reset, 11807 .eh_target_reset_handler = scsih_target_reset, 11808 .eh_host_reset_handler = scsih_host_reset, 11809 .bios_param = scsih_bios_param, 11810 .can_queue = 1, 11811 .this_id = -1, 11812 .sg_tablesize = MPT2SAS_SG_DEPTH, 11813 .max_sectors = 32767, 11814 .cmd_per_lun = 7, 11815 .shost_attrs = mpt3sas_host_attrs, 11816 .sdev_attrs = mpt3sas_dev_attrs, 11817 .track_queue_depth = 1, 11818 .cmd_size = sizeof(struct scsiio_tracker), 11819 }; 11820 11821 /* raid transport support for SAS 2.0 HBA devices */ 11822 static struct raid_function_template mpt2sas_raid_functions = { 11823 .cookie = &mpt2sas_driver_template, 11824 .is_raid = scsih_is_raid, 11825 .get_resync = scsih_get_resync, 11826 .get_state = scsih_get_state, 11827 }; 11828 11829 /* shost template for SAS 3.0 HBA devices */ 11830 static struct scsi_host_template mpt3sas_driver_template = { 11831 .module = THIS_MODULE, 11832 .name = "Fusion MPT SAS Host", 11833 .proc_name = MPT3SAS_DRIVER_NAME, 11834 .queuecommand = scsih_qcmd, 11835 .target_alloc = scsih_target_alloc, 11836 .slave_alloc = scsih_slave_alloc, 11837 .slave_configure = scsih_slave_configure, 11838 .target_destroy = scsih_target_destroy, 11839 .slave_destroy = scsih_slave_destroy, 11840 .scan_finished = scsih_scan_finished, 11841 .scan_start = scsih_scan_start, 11842 .change_queue_depth = scsih_change_queue_depth, 11843 .eh_abort_handler = scsih_abort, 11844 .eh_device_reset_handler = scsih_dev_reset, 11845 .eh_target_reset_handler = scsih_target_reset, 11846 .eh_host_reset_handler = scsih_host_reset, 11847 .bios_param = scsih_bios_param, 11848 .can_queue = 1, 11849 .this_id = -1, 11850 .sg_tablesize = MPT3SAS_SG_DEPTH, 11851 .max_sectors = 32767, 11852 .max_segment_size = 0xffffffff, 11853 .cmd_per_lun = 7, 11854 .shost_attrs = mpt3sas_host_attrs, 11855 .sdev_attrs = mpt3sas_dev_attrs, 11856 .track_queue_depth = 1, 11857 .cmd_size = sizeof(struct scsiio_tracker), 11858 .map_queues = scsih_map_queues, 11859 }; 11860 11861 /* raid transport support for SAS 3.0 HBA devices */ 11862 static struct raid_function_template mpt3sas_raid_functions = { 11863 .cookie = &mpt3sas_driver_template, 11864 .is_raid = scsih_is_raid, 11865 .get_resync = scsih_get_resync, 11866 .get_state = scsih_get_state, 11867 }; 11868 11869 /** 11870 * _scsih_determine_hba_mpi_version - determine in which MPI version class 11871 * this device belongs to. 11872 * @pdev: PCI device struct 11873 * 11874 * return MPI2_VERSION for SAS 2.0 HBA devices, 11875 * MPI25_VERSION for SAS 3.0 HBA devices, and 11876 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices 11877 */ 11878 static u16 11879 _scsih_determine_hba_mpi_version(struct pci_dev *pdev) 11880 { 11881 11882 switch (pdev->device) { 11883 case MPI2_MFGPAGE_DEVID_SSS6200: 11884 case MPI2_MFGPAGE_DEVID_SAS2004: 11885 case MPI2_MFGPAGE_DEVID_SAS2008: 11886 case MPI2_MFGPAGE_DEVID_SAS2108_1: 11887 case MPI2_MFGPAGE_DEVID_SAS2108_2: 11888 case MPI2_MFGPAGE_DEVID_SAS2108_3: 11889 case MPI2_MFGPAGE_DEVID_SAS2116_1: 11890 case MPI2_MFGPAGE_DEVID_SAS2116_2: 11891 case MPI2_MFGPAGE_DEVID_SAS2208_1: 11892 case MPI2_MFGPAGE_DEVID_SAS2208_2: 11893 case MPI2_MFGPAGE_DEVID_SAS2208_3: 11894 case MPI2_MFGPAGE_DEVID_SAS2208_4: 11895 case MPI2_MFGPAGE_DEVID_SAS2208_5: 11896 case MPI2_MFGPAGE_DEVID_SAS2208_6: 11897 case MPI2_MFGPAGE_DEVID_SAS2308_1: 11898 case MPI2_MFGPAGE_DEVID_SAS2308_2: 11899 case MPI2_MFGPAGE_DEVID_SAS2308_3: 11900 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: 11901 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: 11902 return MPI2_VERSION; 11903 case MPI25_MFGPAGE_DEVID_SAS3004: 11904 case MPI25_MFGPAGE_DEVID_SAS3008: 11905 case MPI25_MFGPAGE_DEVID_SAS3108_1: 11906 case MPI25_MFGPAGE_DEVID_SAS3108_2: 11907 case MPI25_MFGPAGE_DEVID_SAS3108_5: 11908 case MPI25_MFGPAGE_DEVID_SAS3108_6: 11909 return MPI25_VERSION; 11910 case MPI26_MFGPAGE_DEVID_SAS3216: 11911 case MPI26_MFGPAGE_DEVID_SAS3224: 11912 case MPI26_MFGPAGE_DEVID_SAS3316_1: 11913 case MPI26_MFGPAGE_DEVID_SAS3316_2: 11914 case MPI26_MFGPAGE_DEVID_SAS3316_3: 11915 case MPI26_MFGPAGE_DEVID_SAS3316_4: 11916 case MPI26_MFGPAGE_DEVID_SAS3324_1: 11917 case MPI26_MFGPAGE_DEVID_SAS3324_2: 11918 case MPI26_MFGPAGE_DEVID_SAS3324_3: 11919 case MPI26_MFGPAGE_DEVID_SAS3324_4: 11920 case MPI26_MFGPAGE_DEVID_SAS3508: 11921 case MPI26_MFGPAGE_DEVID_SAS3508_1: 11922 case MPI26_MFGPAGE_DEVID_SAS3408: 11923 case MPI26_MFGPAGE_DEVID_SAS3516: 11924 case MPI26_MFGPAGE_DEVID_SAS3516_1: 11925 case MPI26_MFGPAGE_DEVID_SAS3416: 11926 case MPI26_MFGPAGE_DEVID_SAS3616: 11927 case MPI26_ATLAS_PCIe_SWITCH_DEVID: 11928 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: 11929 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: 11930 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: 11931 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: 11932 case MPI26_MFGPAGE_DEVID_INVALID0_3916: 11933 case MPI26_MFGPAGE_DEVID_INVALID1_3916: 11934 case MPI26_MFGPAGE_DEVID_INVALID0_3816: 11935 case MPI26_MFGPAGE_DEVID_INVALID1_3816: 11936 return MPI26_VERSION; 11937 } 11938 return 0; 11939 } 11940 11941 /** 11942 * _scsih_probe - attach and add scsi host 11943 * @pdev: PCI device struct 11944 * @id: pci device id 11945 * 11946 * Return: 0 success, anything else error. 11947 */ 11948 static int 11949 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) 11950 { 11951 struct MPT3SAS_ADAPTER *ioc; 11952 struct Scsi_Host *shost = NULL; 11953 int rv; 11954 u16 hba_mpi_version; 11955 11956 /* Determine in which MPI version class this pci device belongs */ 11957 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); 11958 if (hba_mpi_version == 0) 11959 return -ENODEV; 11960 11961 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one, 11962 * for other generation HBA's return with -ENODEV 11963 */ 11964 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION)) 11965 return -ENODEV; 11966 11967 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two, 11968 * for other generation HBA's return with -ENODEV 11969 */ 11970 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION 11971 || hba_mpi_version == MPI26_VERSION))) 11972 return -ENODEV; 11973 11974 switch (hba_mpi_version) { 11975 case MPI2_VERSION: 11976 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 11977 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 11978 /* Use mpt2sas driver host template for SAS 2.0 HBA's */ 11979 shost = scsi_host_alloc(&mpt2sas_driver_template, 11980 sizeof(struct MPT3SAS_ADAPTER)); 11981 if (!shost) 11982 return -ENODEV; 11983 ioc = shost_priv(shost); 11984 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); 11985 ioc->hba_mpi_version_belonged = hba_mpi_version; 11986 ioc->id = mpt2_ids++; 11987 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME); 11988 switch (pdev->device) { 11989 case MPI2_MFGPAGE_DEVID_SSS6200: 11990 ioc->is_warpdrive = 1; 11991 ioc->hide_ir_msg = 1; 11992 break; 11993 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: 11994 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: 11995 ioc->is_mcpu_endpoint = 1; 11996 break; 11997 default: 11998 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; 11999 break; 12000 } 12001 12002 if (multipath_on_hba == -1 || multipath_on_hba == 0) 12003 ioc->multipath_on_hba = 0; 12004 else 12005 ioc->multipath_on_hba = 1; 12006 12007 break; 12008 case MPI25_VERSION: 12009 case MPI26_VERSION: 12010 /* Use mpt3sas driver host template for SAS 3.0 HBA's */ 12011 shost = scsi_host_alloc(&mpt3sas_driver_template, 12012 sizeof(struct MPT3SAS_ADAPTER)); 12013 if (!shost) 12014 return -ENODEV; 12015 ioc = shost_priv(shost); 12016 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); 12017 ioc->hba_mpi_version_belonged = hba_mpi_version; 12018 ioc->id = mpt3_ids++; 12019 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); 12020 switch (pdev->device) { 12021 case MPI26_MFGPAGE_DEVID_SAS3508: 12022 case MPI26_MFGPAGE_DEVID_SAS3508_1: 12023 case MPI26_MFGPAGE_DEVID_SAS3408: 12024 case MPI26_MFGPAGE_DEVID_SAS3516: 12025 case MPI26_MFGPAGE_DEVID_SAS3516_1: 12026 case MPI26_MFGPAGE_DEVID_SAS3416: 12027 case MPI26_MFGPAGE_DEVID_SAS3616: 12028 case MPI26_ATLAS_PCIe_SWITCH_DEVID: 12029 ioc->is_gen35_ioc = 1; 12030 break; 12031 case MPI26_MFGPAGE_DEVID_INVALID0_3816: 12032 case MPI26_MFGPAGE_DEVID_INVALID0_3916: 12033 dev_err(&pdev->dev, 12034 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid", 12035 pdev->device, pdev->subsystem_vendor, 12036 pdev->subsystem_device); 12037 return 1; 12038 case MPI26_MFGPAGE_DEVID_INVALID1_3816: 12039 case MPI26_MFGPAGE_DEVID_INVALID1_3916: 12040 dev_err(&pdev->dev, 12041 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered", 12042 pdev->device, pdev->subsystem_vendor, 12043 pdev->subsystem_device); 12044 return 1; 12045 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: 12046 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: 12047 dev_info(&pdev->dev, 12048 "HBA is in Configurable Secure mode\n"); 12049 fallthrough; 12050 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: 12051 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: 12052 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1; 12053 break; 12054 default: 12055 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0; 12056 } 12057 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && 12058 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || 12059 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) { 12060 ioc->combined_reply_queue = 1; 12061 if (ioc->is_gen35_ioc) 12062 ioc->combined_reply_index_count = 12063 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35; 12064 else 12065 ioc->combined_reply_index_count = 12066 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3; 12067 } 12068 12069 switch (ioc->is_gen35_ioc) { 12070 case 0: 12071 if (multipath_on_hba == -1 || multipath_on_hba == 0) 12072 ioc->multipath_on_hba = 0; 12073 else 12074 ioc->multipath_on_hba = 1; 12075 break; 12076 case 1: 12077 if (multipath_on_hba == -1 || multipath_on_hba > 0) 12078 ioc->multipath_on_hba = 1; 12079 else 12080 ioc->multipath_on_hba = 0; 12081 break; 12082 default: 12083 break; 12084 } 12085 12086 break; 12087 default: 12088 return -ENODEV; 12089 } 12090 12091 INIT_LIST_HEAD(&ioc->list); 12092 spin_lock(&gioc_lock); 12093 list_add_tail(&ioc->list, &mpt3sas_ioc_list); 12094 spin_unlock(&gioc_lock); 12095 ioc->shost = shost; 12096 ioc->pdev = pdev; 12097 ioc->scsi_io_cb_idx = scsi_io_cb_idx; 12098 ioc->tm_cb_idx = tm_cb_idx; 12099 ioc->ctl_cb_idx = ctl_cb_idx; 12100 ioc->base_cb_idx = base_cb_idx; 12101 ioc->port_enable_cb_idx = port_enable_cb_idx; 12102 ioc->transport_cb_idx = transport_cb_idx; 12103 ioc->scsih_cb_idx = scsih_cb_idx; 12104 ioc->config_cb_idx = config_cb_idx; 12105 ioc->tm_tr_cb_idx = tm_tr_cb_idx; 12106 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; 12107 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; 12108 ioc->logging_level = logging_level; 12109 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; 12110 /* Host waits for minimum of six seconds */ 12111 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; 12112 /* 12113 * Enable MEMORY MOVE support flag. 12114 */ 12115 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE; 12116 /* Enable ADDITIONAL QUERY support flag. */ 12117 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY; 12118 12119 ioc->enable_sdev_max_qd = enable_sdev_max_qd; 12120 12121 /* misc semaphores and spin locks */ 12122 mutex_init(&ioc->reset_in_progress_mutex); 12123 /* initializing pci_access_mutex lock */ 12124 mutex_init(&ioc->pci_access_mutex); 12125 spin_lock_init(&ioc->ioc_reset_in_progress_lock); 12126 spin_lock_init(&ioc->scsi_lookup_lock); 12127 spin_lock_init(&ioc->sas_device_lock); 12128 spin_lock_init(&ioc->sas_node_lock); 12129 spin_lock_init(&ioc->fw_event_lock); 12130 spin_lock_init(&ioc->raid_device_lock); 12131 spin_lock_init(&ioc->pcie_device_lock); 12132 spin_lock_init(&ioc->diag_trigger_lock); 12133 12134 INIT_LIST_HEAD(&ioc->sas_device_list); 12135 INIT_LIST_HEAD(&ioc->sas_device_init_list); 12136 INIT_LIST_HEAD(&ioc->sas_expander_list); 12137 INIT_LIST_HEAD(&ioc->enclosure_list); 12138 INIT_LIST_HEAD(&ioc->pcie_device_list); 12139 INIT_LIST_HEAD(&ioc->pcie_device_init_list); 12140 INIT_LIST_HEAD(&ioc->fw_event_list); 12141 INIT_LIST_HEAD(&ioc->raid_device_list); 12142 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); 12143 INIT_LIST_HEAD(&ioc->delayed_tr_list); 12144 INIT_LIST_HEAD(&ioc->delayed_sc_list); 12145 INIT_LIST_HEAD(&ioc->delayed_event_ack_list); 12146 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); 12147 INIT_LIST_HEAD(&ioc->reply_queue_list); 12148 INIT_LIST_HEAD(&ioc->port_table_list); 12149 12150 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); 12151 12152 /* init shost parameters */ 12153 shost->max_cmd_len = 32; 12154 shost->max_lun = max_lun; 12155 shost->transportt = mpt3sas_transport_template; 12156 shost->unique_id = ioc->id; 12157 12158 if (ioc->is_mcpu_endpoint) { 12159 /* mCPU MPI support 64K max IO */ 12160 shost->max_sectors = 128; 12161 ioc_info(ioc, "The max_sectors value is set to %d\n", 12162 shost->max_sectors); 12163 } else { 12164 if (max_sectors != 0xFFFF) { 12165 if (max_sectors < 64) { 12166 shost->max_sectors = 64; 12167 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n", 12168 max_sectors); 12169 } else if (max_sectors > 32767) { 12170 shost->max_sectors = 32767; 12171 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n", 12172 max_sectors); 12173 } else { 12174 shost->max_sectors = max_sectors & 0xFFFE; 12175 ioc_info(ioc, "The max_sectors value is set to %d\n", 12176 shost->max_sectors); 12177 } 12178 } 12179 } 12180 /* register EEDP capabilities with SCSI layer */ 12181 if (prot_mask >= 0) 12182 scsi_host_set_prot(shost, (prot_mask & 0x07)); 12183 else 12184 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION 12185 | SHOST_DIF_TYPE2_PROTECTION 12186 | SHOST_DIF_TYPE3_PROTECTION); 12187 12188 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 12189 12190 /* event thread */ 12191 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 12192 "fw_event_%s%d", ioc->driver_name, ioc->id); 12193 ioc->firmware_event_thread = alloc_ordered_workqueue( 12194 ioc->firmware_event_name, 0); 12195 if (!ioc->firmware_event_thread) { 12196 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12197 __FILE__, __LINE__, __func__); 12198 rv = -ENODEV; 12199 goto out_thread_fail; 12200 } 12201 12202 ioc->is_driver_loading = 1; 12203 if ((mpt3sas_base_attach(ioc))) { 12204 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12205 __FILE__, __LINE__, __func__); 12206 rv = -ENODEV; 12207 goto out_attach_fail; 12208 } 12209 12210 if (ioc->is_warpdrive) { 12211 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) 12212 ioc->hide_drives = 0; 12213 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS) 12214 ioc->hide_drives = 1; 12215 else { 12216 if (mpt3sas_get_num_volumes(ioc)) 12217 ioc->hide_drives = 1; 12218 else 12219 ioc->hide_drives = 0; 12220 } 12221 } else 12222 ioc->hide_drives = 0; 12223 12224 shost->host_tagset = 0; 12225 shost->nr_hw_queues = 1; 12226 12227 if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 && 12228 host_tagset_enable && ioc->smp_affinity_enable) { 12229 12230 shost->host_tagset = 1; 12231 shost->nr_hw_queues = 12232 ioc->reply_queue_count - ioc->high_iops_queues; 12233 12234 dev_info(&ioc->pdev->dev, 12235 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n", 12236 shost->can_queue, shost->nr_hw_queues); 12237 } 12238 12239 rv = scsi_add_host(shost, &pdev->dev); 12240 if (rv) { 12241 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12242 __FILE__, __LINE__, __func__); 12243 goto out_add_shost_fail; 12244 } 12245 12246 scsi_scan_host(shost); 12247 mpt3sas_setup_debugfs(ioc); 12248 return 0; 12249 out_add_shost_fail: 12250 mpt3sas_base_detach(ioc); 12251 out_attach_fail: 12252 destroy_workqueue(ioc->firmware_event_thread); 12253 out_thread_fail: 12254 spin_lock(&gioc_lock); 12255 list_del(&ioc->list); 12256 spin_unlock(&gioc_lock); 12257 scsi_host_put(shost); 12258 return rv; 12259 } 12260 12261 /** 12262 * scsih_suspend - power management suspend main entry point 12263 * @dev: Device struct 12264 * 12265 * Return: 0 success, anything else error. 12266 */ 12267 static int __maybe_unused 12268 scsih_suspend(struct device *dev) 12269 { 12270 struct pci_dev *pdev = to_pci_dev(dev); 12271 struct Scsi_Host *shost; 12272 struct MPT3SAS_ADAPTER *ioc; 12273 int rc; 12274 12275 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); 12276 if (rc) 12277 return rc; 12278 12279 mpt3sas_base_stop_watchdog(ioc); 12280 flush_scheduled_work(); 12281 scsi_block_requests(shost); 12282 _scsih_nvme_shutdown(ioc); 12283 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n", 12284 pdev, pci_name(pdev)); 12285 12286 mpt3sas_base_free_resources(ioc); 12287 return 0; 12288 } 12289 12290 /** 12291 * scsih_resume - power management resume main entry point 12292 * @dev: Device struct 12293 * 12294 * Return: 0 success, anything else error. 12295 */ 12296 static int __maybe_unused 12297 scsih_resume(struct device *dev) 12298 { 12299 struct pci_dev *pdev = to_pci_dev(dev); 12300 struct Scsi_Host *shost; 12301 struct MPT3SAS_ADAPTER *ioc; 12302 pci_power_t device_state = pdev->current_state; 12303 int r; 12304 12305 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); 12306 if (r) 12307 return r; 12308 12309 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 12310 pdev, pci_name(pdev), device_state); 12311 12312 ioc->pdev = pdev; 12313 r = mpt3sas_base_map_resources(ioc); 12314 if (r) 12315 return r; 12316 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n"); 12317 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET); 12318 scsi_unblock_requests(shost); 12319 mpt3sas_base_start_watchdog(ioc); 12320 return 0; 12321 } 12322 12323 /** 12324 * scsih_pci_error_detected - Called when a PCI error is detected. 12325 * @pdev: PCI device struct 12326 * @state: PCI channel state 12327 * 12328 * Description: Called when a PCI error is detected. 12329 * 12330 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. 12331 */ 12332 static pci_ers_result_t 12333 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 12334 { 12335 struct Scsi_Host *shost; 12336 struct MPT3SAS_ADAPTER *ioc; 12337 12338 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12339 return PCI_ERS_RESULT_DISCONNECT; 12340 12341 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state); 12342 12343 switch (state) { 12344 case pci_channel_io_normal: 12345 return PCI_ERS_RESULT_CAN_RECOVER; 12346 case pci_channel_io_frozen: 12347 /* Fatal error, prepare for slot reset */ 12348 ioc->pci_error_recovery = 1; 12349 scsi_block_requests(ioc->shost); 12350 mpt3sas_base_stop_watchdog(ioc); 12351 mpt3sas_base_free_resources(ioc); 12352 return PCI_ERS_RESULT_NEED_RESET; 12353 case pci_channel_io_perm_failure: 12354 /* Permanent error, prepare for device removal */ 12355 ioc->pci_error_recovery = 1; 12356 mpt3sas_base_stop_watchdog(ioc); 12357 _scsih_flush_running_cmds(ioc); 12358 return PCI_ERS_RESULT_DISCONNECT; 12359 } 12360 return PCI_ERS_RESULT_NEED_RESET; 12361 } 12362 12363 /** 12364 * scsih_pci_slot_reset - Called when PCI slot has been reset. 12365 * @pdev: PCI device struct 12366 * 12367 * Description: This routine is called by the pci error recovery 12368 * code after the PCI slot has been reset, just before we 12369 * should resume normal operations. 12370 */ 12371 static pci_ers_result_t 12372 scsih_pci_slot_reset(struct pci_dev *pdev) 12373 { 12374 struct Scsi_Host *shost; 12375 struct MPT3SAS_ADAPTER *ioc; 12376 int rc; 12377 12378 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12379 return PCI_ERS_RESULT_DISCONNECT; 12380 12381 ioc_info(ioc, "PCI error: slot reset callback!!\n"); 12382 12383 ioc->pci_error_recovery = 0; 12384 ioc->pdev = pdev; 12385 pci_restore_state(pdev); 12386 rc = mpt3sas_base_map_resources(ioc); 12387 if (rc) 12388 return PCI_ERS_RESULT_DISCONNECT; 12389 12390 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n"); 12391 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 12392 12393 ioc_warn(ioc, "hard reset: %s\n", 12394 (rc == 0) ? "success" : "failed"); 12395 12396 if (!rc) 12397 return PCI_ERS_RESULT_RECOVERED; 12398 else 12399 return PCI_ERS_RESULT_DISCONNECT; 12400 } 12401 12402 /** 12403 * scsih_pci_resume() - resume normal ops after PCI reset 12404 * @pdev: pointer to PCI device 12405 * 12406 * Called when the error recovery driver tells us that its 12407 * OK to resume normal operation. Use completion to allow 12408 * halted scsi ops to resume. 12409 */ 12410 static void 12411 scsih_pci_resume(struct pci_dev *pdev) 12412 { 12413 struct Scsi_Host *shost; 12414 struct MPT3SAS_ADAPTER *ioc; 12415 12416 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12417 return; 12418 12419 ioc_info(ioc, "PCI error: resume callback!!\n"); 12420 12421 mpt3sas_base_start_watchdog(ioc); 12422 scsi_unblock_requests(ioc->shost); 12423 } 12424 12425 /** 12426 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers 12427 * @pdev: pointer to PCI device 12428 */ 12429 static pci_ers_result_t 12430 scsih_pci_mmio_enabled(struct pci_dev *pdev) 12431 { 12432 struct Scsi_Host *shost; 12433 struct MPT3SAS_ADAPTER *ioc; 12434 12435 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12436 return PCI_ERS_RESULT_DISCONNECT; 12437 12438 ioc_info(ioc, "PCI error: mmio enabled callback!!\n"); 12439 12440 /* TODO - dump whatever for debugging purposes */ 12441 12442 /* This called only if scsih_pci_error_detected returns 12443 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still 12444 * works, no need to reset slot. 12445 */ 12446 return PCI_ERS_RESULT_RECOVERED; 12447 } 12448 12449 /** 12450 * scsih_ncq_prio_supp - Check for NCQ command priority support 12451 * @sdev: scsi device struct 12452 * 12453 * This is called when a user indicates they would like to enable 12454 * ncq command priorities. This works only on SATA devices. 12455 */ 12456 bool scsih_ncq_prio_supp(struct scsi_device *sdev) 12457 { 12458 unsigned char *buf; 12459 bool ncq_prio_supp = false; 12460 12461 if (!scsi_device_supports_vpd(sdev)) 12462 return ncq_prio_supp; 12463 12464 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL); 12465 if (!buf) 12466 return ncq_prio_supp; 12467 12468 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN)) 12469 ncq_prio_supp = (buf[213] >> 4) & 1; 12470 12471 kfree(buf); 12472 return ncq_prio_supp; 12473 } 12474 /* 12475 * The pci device ids are defined in mpi/mpi2_cnfg.h. 12476 */ 12477 static const struct pci_device_id mpt3sas_pci_table[] = { 12478 /* Spitfire ~ 2004 */ 12479 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, 12480 PCI_ANY_ID, PCI_ANY_ID }, 12481 /* Falcon ~ 2008 */ 12482 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, 12483 PCI_ANY_ID, PCI_ANY_ID }, 12484 /* Liberator ~ 2108 */ 12485 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, 12486 PCI_ANY_ID, PCI_ANY_ID }, 12487 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2, 12488 PCI_ANY_ID, PCI_ANY_ID }, 12489 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, 12490 PCI_ANY_ID, PCI_ANY_ID }, 12491 /* Meteor ~ 2116 */ 12492 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, 12493 PCI_ANY_ID, PCI_ANY_ID }, 12494 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, 12495 PCI_ANY_ID, PCI_ANY_ID }, 12496 /* Thunderbolt ~ 2208 */ 12497 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, 12498 PCI_ANY_ID, PCI_ANY_ID }, 12499 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, 12500 PCI_ANY_ID, PCI_ANY_ID }, 12501 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, 12502 PCI_ANY_ID, PCI_ANY_ID }, 12503 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, 12504 PCI_ANY_ID, PCI_ANY_ID }, 12505 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, 12506 PCI_ANY_ID, PCI_ANY_ID }, 12507 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, 12508 PCI_ANY_ID, PCI_ANY_ID }, 12509 /* Mustang ~ 2308 */ 12510 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, 12511 PCI_ANY_ID, PCI_ANY_ID }, 12512 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, 12513 PCI_ANY_ID, PCI_ANY_ID }, 12514 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, 12515 PCI_ANY_ID, PCI_ANY_ID }, 12516 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP, 12517 PCI_ANY_ID, PCI_ANY_ID }, 12518 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1, 12519 PCI_ANY_ID, PCI_ANY_ID }, 12520 /* SSS6200 */ 12521 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, 12522 PCI_ANY_ID, PCI_ANY_ID }, 12523 /* Fury ~ 3004 and 3008 */ 12524 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, 12525 PCI_ANY_ID, PCI_ANY_ID }, 12526 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, 12527 PCI_ANY_ID, PCI_ANY_ID }, 12528 /* Invader ~ 3108 */ 12529 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, 12530 PCI_ANY_ID, PCI_ANY_ID }, 12531 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, 12532 PCI_ANY_ID, PCI_ANY_ID }, 12533 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, 12534 PCI_ANY_ID, PCI_ANY_ID }, 12535 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, 12536 PCI_ANY_ID, PCI_ANY_ID }, 12537 /* Cutlass ~ 3216 and 3224 */ 12538 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216, 12539 PCI_ANY_ID, PCI_ANY_ID }, 12540 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224, 12541 PCI_ANY_ID, PCI_ANY_ID }, 12542 /* Intruder ~ 3316 and 3324 */ 12543 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1, 12544 PCI_ANY_ID, PCI_ANY_ID }, 12545 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2, 12546 PCI_ANY_ID, PCI_ANY_ID }, 12547 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3, 12548 PCI_ANY_ID, PCI_ANY_ID }, 12549 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4, 12550 PCI_ANY_ID, PCI_ANY_ID }, 12551 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1, 12552 PCI_ANY_ID, PCI_ANY_ID }, 12553 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2, 12554 PCI_ANY_ID, PCI_ANY_ID }, 12555 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3, 12556 PCI_ANY_ID, PCI_ANY_ID }, 12557 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, 12558 PCI_ANY_ID, PCI_ANY_ID }, 12559 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/ 12560 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, 12561 PCI_ANY_ID, PCI_ANY_ID }, 12562 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1, 12563 PCI_ANY_ID, PCI_ANY_ID }, 12564 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408, 12565 PCI_ANY_ID, PCI_ANY_ID }, 12566 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516, 12567 PCI_ANY_ID, PCI_ANY_ID }, 12568 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1, 12569 PCI_ANY_ID, PCI_ANY_ID }, 12570 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, 12571 PCI_ANY_ID, PCI_ANY_ID }, 12572 /* Mercator ~ 3616*/ 12573 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, 12574 PCI_ANY_ID, PCI_ANY_ID }, 12575 12576 /* Aero SI 0x00E1 Configurable Secure 12577 * 0x00E2 Hard Secure 12578 */ 12579 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916, 12580 PCI_ANY_ID, PCI_ANY_ID }, 12581 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916, 12582 PCI_ANY_ID, PCI_ANY_ID }, 12583 12584 /* 12585 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered 12586 */ 12587 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916, 12588 PCI_ANY_ID, PCI_ANY_ID }, 12589 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916, 12590 PCI_ANY_ID, PCI_ANY_ID }, 12591 12592 /* Atlas PCIe Switch Management Port */ 12593 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID, 12594 PCI_ANY_ID, PCI_ANY_ID }, 12595 12596 /* Sea SI 0x00E5 Configurable Secure 12597 * 0x00E6 Hard Secure 12598 */ 12599 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816, 12600 PCI_ANY_ID, PCI_ANY_ID }, 12601 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, 12602 PCI_ANY_ID, PCI_ANY_ID }, 12603 12604 /* 12605 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered 12606 */ 12607 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816, 12608 PCI_ANY_ID, PCI_ANY_ID }, 12609 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816, 12610 PCI_ANY_ID, PCI_ANY_ID }, 12611 12612 {0} /* Terminating entry */ 12613 }; 12614 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); 12615 12616 static struct pci_error_handlers _mpt3sas_err_handler = { 12617 .error_detected = scsih_pci_error_detected, 12618 .mmio_enabled = scsih_pci_mmio_enabled, 12619 .slot_reset = scsih_pci_slot_reset, 12620 .resume = scsih_pci_resume, 12621 }; 12622 12623 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume); 12624 12625 static struct pci_driver mpt3sas_driver = { 12626 .name = MPT3SAS_DRIVER_NAME, 12627 .id_table = mpt3sas_pci_table, 12628 .probe = _scsih_probe, 12629 .remove = scsih_remove, 12630 .shutdown = scsih_shutdown, 12631 .err_handler = &_mpt3sas_err_handler, 12632 .driver.pm = &scsih_pm_ops, 12633 }; 12634 12635 /** 12636 * scsih_init - main entry point for this driver. 12637 * 12638 * Return: 0 success, anything else error. 12639 */ 12640 static int 12641 scsih_init(void) 12642 { 12643 mpt2_ids = 0; 12644 mpt3_ids = 0; 12645 12646 mpt3sas_base_initialize_callback_handler(); 12647 12648 /* queuecommand callback hander */ 12649 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); 12650 12651 /* task management callback handler */ 12652 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); 12653 12654 /* base internal commands callback handler */ 12655 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done); 12656 port_enable_cb_idx = mpt3sas_base_register_callback_handler( 12657 mpt3sas_port_enable_done); 12658 12659 /* transport internal commands callback handler */ 12660 transport_cb_idx = mpt3sas_base_register_callback_handler( 12661 mpt3sas_transport_done); 12662 12663 /* scsih internal commands callback handler */ 12664 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done); 12665 12666 /* configuration page API internal commands callback handler */ 12667 config_cb_idx = mpt3sas_base_register_callback_handler( 12668 mpt3sas_config_done); 12669 12670 /* ctl module callback handler */ 12671 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done); 12672 12673 tm_tr_cb_idx = mpt3sas_base_register_callback_handler( 12674 _scsih_tm_tr_complete); 12675 12676 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( 12677 _scsih_tm_volume_tr_complete); 12678 12679 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( 12680 _scsih_sas_control_complete); 12681 12682 mpt3sas_init_debugfs(); 12683 return 0; 12684 } 12685 12686 /** 12687 * scsih_exit - exit point for this driver (when it is a module). 12688 * 12689 * Return: 0 success, anything else error. 12690 */ 12691 static void 12692 scsih_exit(void) 12693 { 12694 12695 mpt3sas_base_release_callback_handler(scsi_io_cb_idx); 12696 mpt3sas_base_release_callback_handler(tm_cb_idx); 12697 mpt3sas_base_release_callback_handler(base_cb_idx); 12698 mpt3sas_base_release_callback_handler(port_enable_cb_idx); 12699 mpt3sas_base_release_callback_handler(transport_cb_idx); 12700 mpt3sas_base_release_callback_handler(scsih_cb_idx); 12701 mpt3sas_base_release_callback_handler(config_cb_idx); 12702 mpt3sas_base_release_callback_handler(ctl_cb_idx); 12703 12704 mpt3sas_base_release_callback_handler(tm_tr_cb_idx); 12705 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx); 12706 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx); 12707 12708 /* raid transport support */ 12709 if (hbas_to_enumerate != 1) 12710 raid_class_release(mpt3sas_raid_template); 12711 if (hbas_to_enumerate != 2) 12712 raid_class_release(mpt2sas_raid_template); 12713 sas_release_transport(mpt3sas_transport_template); 12714 mpt3sas_exit_debugfs(); 12715 } 12716 12717 /** 12718 * _mpt3sas_init - main entry point for this driver. 12719 * 12720 * Return: 0 success, anything else error. 12721 */ 12722 static int __init 12723 _mpt3sas_init(void) 12724 { 12725 int error; 12726 12727 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, 12728 MPT3SAS_DRIVER_VERSION); 12729 12730 mpt3sas_transport_template = 12731 sas_attach_transport(&mpt3sas_transport_functions); 12732 if (!mpt3sas_transport_template) 12733 return -ENODEV; 12734 12735 /* No need attach mpt3sas raid functions template 12736 * if hbas_to_enumarate value is one. 12737 */ 12738 if (hbas_to_enumerate != 1) { 12739 mpt3sas_raid_template = 12740 raid_class_attach(&mpt3sas_raid_functions); 12741 if (!mpt3sas_raid_template) { 12742 sas_release_transport(mpt3sas_transport_template); 12743 return -ENODEV; 12744 } 12745 } 12746 12747 /* No need to attach mpt2sas raid functions template 12748 * if hbas_to_enumarate value is two 12749 */ 12750 if (hbas_to_enumerate != 2) { 12751 mpt2sas_raid_template = 12752 raid_class_attach(&mpt2sas_raid_functions); 12753 if (!mpt2sas_raid_template) { 12754 sas_release_transport(mpt3sas_transport_template); 12755 return -ENODEV; 12756 } 12757 } 12758 12759 error = scsih_init(); 12760 if (error) { 12761 scsih_exit(); 12762 return error; 12763 } 12764 12765 mpt3sas_ctl_init(hbas_to_enumerate); 12766 12767 error = pci_register_driver(&mpt3sas_driver); 12768 if (error) 12769 scsih_exit(); 12770 12771 return error; 12772 } 12773 12774 /** 12775 * _mpt3sas_exit - exit point for this driver (when it is a module). 12776 * 12777 */ 12778 static void __exit 12779 _mpt3sas_exit(void) 12780 { 12781 pr_info("mpt3sas version %s unloading\n", 12782 MPT3SAS_DRIVER_VERSION); 12783 12784 mpt3sas_ctl_exit(hbas_to_enumerate); 12785 12786 pci_unregister_driver(&mpt3sas_driver); 12787 12788 scsih_exit(); 12789 } 12790 12791 module_init(_mpt3sas_init); 12792 module_exit(_mpt3sas_exit); 12793