1 /* 2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers 3 * 4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c 5 * Copyright (C) 2012-2014 LSI Corporation 6 * Copyright (C) 2013-2014 Avago Technologies 7 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 2 12 * of the License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * NO WARRANTY 20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 24 * solely responsible for determining the appropriateness of using and 25 * distributing the Program and assumes all risks associated with its 26 * exercise of rights under this Agreement, including but not limited to 27 * the risks and costs of program errors, damage to or loss of data, 28 * programs or equipment, and unavailability or interruption of operations. 29 30 * DISCLAIMER OF LIABILITY 31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 38 39 * You should have received a copy of the GNU General Public License 40 * along with this program; if not, write to the Free Software 41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 42 * USA. 43 */ 44 45 #include <linux/module.h> 46 #include <linux/kernel.h> 47 #include <linux/init.h> 48 #include <linux/errno.h> 49 #include <linux/blkdev.h> 50 #include <linux/sched.h> 51 #include <linux/workqueue.h> 52 #include <linux/delay.h> 53 #include <linux/pci.h> 54 #include <linux/interrupt.h> 55 #include <linux/aer.h> 56 #include <linux/raid_class.h> 57 #include <linux/blk-mq-pci.h> 58 #include <asm/unaligned.h> 59 60 #include "mpt3sas_base.h" 61 62 #define RAID_CHANNEL 1 63 64 #define PCIE_CHANNEL 2 65 66 /* forward proto's */ 67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 68 struct _sas_node *sas_expander); 69 static void _firmware_event_work(struct work_struct *work); 70 71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, 72 struct _sas_device *sas_device); 73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, 74 u8 retry_count, u8 is_pd); 75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 77 struct _pcie_device *pcie_device); 78 static void 79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); 81 82 /* global parameters */ 83 LIST_HEAD(mpt3sas_ioc_list); 84 /* global ioc lock for list operations */ 85 DEFINE_SPINLOCK(gioc_lock); 86 87 MODULE_AUTHOR(MPT3SAS_AUTHOR); 88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION); 89 MODULE_LICENSE("GPL"); 90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION); 91 MODULE_ALIAS("mpt2sas"); 92 93 /* local parameters */ 94 static u8 scsi_io_cb_idx = -1; 95 static u8 tm_cb_idx = -1; 96 static u8 ctl_cb_idx = -1; 97 static u8 base_cb_idx = -1; 98 static u8 port_enable_cb_idx = -1; 99 static u8 transport_cb_idx = -1; 100 static u8 scsih_cb_idx = -1; 101 static u8 config_cb_idx = -1; 102 static int mpt2_ids; 103 static int mpt3_ids; 104 105 static u8 tm_tr_cb_idx = -1 ; 106 static u8 tm_tr_volume_cb_idx = -1 ; 107 static u8 tm_sas_control_cb_idx = -1; 108 109 /* command line options */ 110 static u32 logging_level; 111 MODULE_PARM_DESC(logging_level, 112 " bits for enabling additional logging info (default=0)"); 113 114 115 static ushort max_sectors = 0xFFFF; 116 module_param(max_sectors, ushort, 0444); 117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); 118 119 120 static int missing_delay[2] = {-1, -1}; 121 module_param_array(missing_delay, int, NULL, 0444); 122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); 123 124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ 125 #define MPT3SAS_MAX_LUN (16895) 126 static u64 max_lun = MPT3SAS_MAX_LUN; 127 module_param(max_lun, ullong, 0444); 128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); 129 130 static ushort hbas_to_enumerate; 131 module_param(hbas_to_enumerate, ushort, 0444); 132 MODULE_PARM_DESC(hbas_to_enumerate, 133 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \ 134 1 - enumerates only SAS 2.0 generation HBAs\n \ 135 2 - enumerates only SAS 3.0 generation HBAs (default=0)"); 136 137 /* diag_buffer_enable is bitwise 138 * bit 0 set = TRACE 139 * bit 1 set = SNAPSHOT 140 * bit 2 set = EXTENDED 141 * 142 * Either bit can be set, or both 143 */ 144 static int diag_buffer_enable = -1; 145 module_param(diag_buffer_enable, int, 0444); 146 MODULE_PARM_DESC(diag_buffer_enable, 147 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); 148 static int disable_discovery = -1; 149 module_param(disable_discovery, int, 0444); 150 MODULE_PARM_DESC(disable_discovery, " disable discovery "); 151 152 153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ 154 static int prot_mask = -1; 155 module_param(prot_mask, int, 0444); 156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); 157 158 static bool enable_sdev_max_qd; 159 module_param(enable_sdev_max_qd, bool, 0444); 160 MODULE_PARM_DESC(enable_sdev_max_qd, 161 "Enable sdev max qd as can_queue, def=disabled(0)"); 162 163 static int multipath_on_hba = -1; 164 module_param(multipath_on_hba, int, 0); 165 MODULE_PARM_DESC(multipath_on_hba, 166 "Multipath support to add same target device\n\t\t" 167 "as many times as it is visible to HBA from various paths\n\t\t" 168 "(by default:\n\t\t" 169 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t" 170 "\t SAS 3.5 HBA - This will be enabled)"); 171 172 static int host_tagset_enable = 1; 173 module_param(host_tagset_enable, int, 0444); 174 MODULE_PARM_DESC(host_tagset_enable, 175 "Shared host tagset enable/disable Default: enable(1)"); 176 177 /* raid transport support */ 178 static struct raid_template *mpt3sas_raid_template; 179 static struct raid_template *mpt2sas_raid_template; 180 181 182 /** 183 * struct sense_info - common structure for obtaining sense keys 184 * @skey: sense key 185 * @asc: additional sense code 186 * @ascq: additional sense code qualifier 187 */ 188 struct sense_info { 189 u8 skey; 190 u8 asc; 191 u8 ascq; 192 }; 193 194 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) 195 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC) 196 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) 197 #define MPT3SAS_ABRT_TASK_SET (0xFFFE) 198 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) 199 /** 200 * struct fw_event_work - firmware event struct 201 * @list: link list framework 202 * @work: work object (ioc->fault_reset_work_q) 203 * @ioc: per adapter object 204 * @device_handle: device handle 205 * @VF_ID: virtual function id 206 * @VP_ID: virtual port id 207 * @ignore: flag meaning this event has been marked to ignore 208 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h 209 * @refcount: kref for this event 210 * @event_data: reply event data payload follows 211 * 212 * This object stored on ioc->fw_event_list. 213 */ 214 struct fw_event_work { 215 struct list_head list; 216 struct work_struct work; 217 218 struct MPT3SAS_ADAPTER *ioc; 219 u16 device_handle; 220 u8 VF_ID; 221 u8 VP_ID; 222 u8 ignore; 223 u16 event; 224 struct kref refcount; 225 char event_data[] __aligned(4); 226 }; 227 228 static void fw_event_work_free(struct kref *r) 229 { 230 kfree(container_of(r, struct fw_event_work, refcount)); 231 } 232 233 static void fw_event_work_get(struct fw_event_work *fw_work) 234 { 235 kref_get(&fw_work->refcount); 236 } 237 238 static void fw_event_work_put(struct fw_event_work *fw_work) 239 { 240 kref_put(&fw_work->refcount, fw_event_work_free); 241 } 242 243 static struct fw_event_work *alloc_fw_event_work(int len) 244 { 245 struct fw_event_work *fw_event; 246 247 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); 248 if (!fw_event) 249 return NULL; 250 251 kref_init(&fw_event->refcount); 252 return fw_event; 253 } 254 255 /** 256 * struct _scsi_io_transfer - scsi io transfer 257 * @handle: sas device handle (assigned by firmware) 258 * @is_raid: flag set for hidden raid components 259 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, 260 * @data_length: data transfer length 261 * @data_dma: dma pointer to data 262 * @sense: sense data 263 * @lun: lun number 264 * @cdb_length: cdb length 265 * @cdb: cdb contents 266 * @timeout: timeout for this command 267 * @VF_ID: virtual function id 268 * @VP_ID: virtual port id 269 * @valid_reply: flag set for reply message 270 * @sense_length: sense length 271 * @ioc_status: ioc status 272 * @scsi_state: scsi state 273 * @scsi_status: scsi staus 274 * @log_info: log information 275 * @transfer_length: data length transfer when there is a reply message 276 * 277 * Used for sending internal scsi commands to devices within this module. 278 * Refer to _scsi_send_scsi_io(). 279 */ 280 struct _scsi_io_transfer { 281 u16 handle; 282 u8 is_raid; 283 enum dma_data_direction dir; 284 u32 data_length; 285 dma_addr_t data_dma; 286 u8 sense[SCSI_SENSE_BUFFERSIZE]; 287 u32 lun; 288 u8 cdb_length; 289 u8 cdb[32]; 290 u8 timeout; 291 u8 VF_ID; 292 u8 VP_ID; 293 u8 valid_reply; 294 /* the following bits are only valid when 'valid_reply = 1' */ 295 u32 sense_length; 296 u16 ioc_status; 297 u8 scsi_state; 298 u8 scsi_status; 299 u32 log_info; 300 u32 transfer_length; 301 }; 302 303 /** 304 * _scsih_set_debug_level - global setting of ioc->logging_level. 305 * @val: ? 306 * @kp: ? 307 * 308 * Note: The logging levels are defined in mpt3sas_debug.h. 309 */ 310 static int 311 _scsih_set_debug_level(const char *val, const struct kernel_param *kp) 312 { 313 int ret = param_set_int(val, kp); 314 struct MPT3SAS_ADAPTER *ioc; 315 316 if (ret) 317 return ret; 318 319 pr_info("setting logging_level(0x%08x)\n", logging_level); 320 spin_lock(&gioc_lock); 321 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) 322 ioc->logging_level = logging_level; 323 spin_unlock(&gioc_lock); 324 return 0; 325 } 326 module_param_call(logging_level, _scsih_set_debug_level, param_get_int, 327 &logging_level, 0644); 328 329 /** 330 * _scsih_srch_boot_sas_address - search based on sas_address 331 * @sas_address: sas address 332 * @boot_device: boot device object from bios page 2 333 * 334 * Return: 1 when there's a match, 0 means no match. 335 */ 336 static inline int 337 _scsih_srch_boot_sas_address(u64 sas_address, 338 Mpi2BootDeviceSasWwid_t *boot_device) 339 { 340 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; 341 } 342 343 /** 344 * _scsih_srch_boot_device_name - search based on device name 345 * @device_name: device name specified in INDENTIFY fram 346 * @boot_device: boot device object from bios page 2 347 * 348 * Return: 1 when there's a match, 0 means no match. 349 */ 350 static inline int 351 _scsih_srch_boot_device_name(u64 device_name, 352 Mpi2BootDeviceDeviceName_t *boot_device) 353 { 354 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; 355 } 356 357 /** 358 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot 359 * @enclosure_logical_id: enclosure logical id 360 * @slot_number: slot number 361 * @boot_device: boot device object from bios page 2 362 * 363 * Return: 1 when there's a match, 0 means no match. 364 */ 365 static inline int 366 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, 367 Mpi2BootDeviceEnclosureSlot_t *boot_device) 368 { 369 return (enclosure_logical_id == le64_to_cpu(boot_device-> 370 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> 371 SlotNumber)) ? 1 : 0; 372 } 373 374 /** 375 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided 376 * port number from port list 377 * @ioc: per adapter object 378 * @port_id: port number 379 * @bypass_dirty_port_flag: when set look the matching hba port entry even 380 * if hba port entry is marked as dirty. 381 * 382 * Search for hba port entry corresponding to provided port number, 383 * if available return port object otherwise return NULL. 384 */ 385 struct hba_port * 386 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc, 387 u8 port_id, u8 bypass_dirty_port_flag) 388 { 389 struct hba_port *port, *port_next; 390 391 /* 392 * When multipath_on_hba is disabled then 393 * search the hba_port entry using default 394 * port id i.e. 255 395 */ 396 if (!ioc->multipath_on_hba) 397 port_id = MULTIPATH_DISABLED_PORT_ID; 398 399 list_for_each_entry_safe(port, port_next, 400 &ioc->port_table_list, list) { 401 if (port->port_id != port_id) 402 continue; 403 if (bypass_dirty_port_flag) 404 return port; 405 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT) 406 continue; 407 return port; 408 } 409 410 /* 411 * Allocate hba_port object for default port id (i.e. 255) 412 * when multipath_on_hba is disabled for the HBA. 413 * And add this object to port_table_list. 414 */ 415 if (!ioc->multipath_on_hba) { 416 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); 417 if (!port) 418 return NULL; 419 420 port->port_id = port_id; 421 ioc_info(ioc, 422 "hba_port entry: %p, port: %d is added to hba_port list\n", 423 port, port->port_id); 424 list_add_tail(&port->list, 425 &ioc->port_table_list); 426 return port; 427 } 428 return NULL; 429 } 430 431 /** 432 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number 433 * @ioc: per adapter object 434 * @port: hba_port object 435 * @phy: phy number 436 * 437 * Return virtual_phy object corresponding to phy number. 438 */ 439 struct virtual_phy * 440 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc, 441 struct hba_port *port, u32 phy) 442 { 443 struct virtual_phy *vphy, *vphy_next; 444 445 if (!port->vphys_mask) 446 return NULL; 447 448 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { 449 if (vphy->phy_mask & (1 << phy)) 450 return vphy; 451 } 452 return NULL; 453 } 454 455 /** 456 * _scsih_is_boot_device - search for matching boot device. 457 * @sas_address: sas address 458 * @device_name: device name specified in INDENTIFY fram 459 * @enclosure_logical_id: enclosure logical id 460 * @slot: slot number 461 * @form: specifies boot device form 462 * @boot_device: boot device object from bios page 2 463 * 464 * Return: 1 when there's a match, 0 means no match. 465 */ 466 static int 467 _scsih_is_boot_device(u64 sas_address, u64 device_name, 468 u64 enclosure_logical_id, u16 slot, u8 form, 469 Mpi2BiosPage2BootDevice_t *boot_device) 470 { 471 int rc = 0; 472 473 switch (form) { 474 case MPI2_BIOSPAGE2_FORM_SAS_WWID: 475 if (!sas_address) 476 break; 477 rc = _scsih_srch_boot_sas_address( 478 sas_address, &boot_device->SasWwid); 479 break; 480 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: 481 if (!enclosure_logical_id) 482 break; 483 rc = _scsih_srch_boot_encl_slot( 484 enclosure_logical_id, 485 slot, &boot_device->EnclosureSlot); 486 break; 487 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: 488 if (!device_name) 489 break; 490 rc = _scsih_srch_boot_device_name( 491 device_name, &boot_device->DeviceName); 492 break; 493 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: 494 break; 495 } 496 497 return rc; 498 } 499 500 /** 501 * _scsih_get_sas_address - set the sas_address for given device handle 502 * @ioc: ? 503 * @handle: device handle 504 * @sas_address: sas address 505 * 506 * Return: 0 success, non-zero when failure 507 */ 508 static int 509 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, 510 u64 *sas_address) 511 { 512 Mpi2SasDevicePage0_t sas_device_pg0; 513 Mpi2ConfigReply_t mpi_reply; 514 u32 ioc_status; 515 516 *sas_address = 0; 517 518 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 519 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 520 ioc_err(ioc, "failure at %s:%d/%s()!\n", 521 __FILE__, __LINE__, __func__); 522 return -ENXIO; 523 } 524 525 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 526 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 527 /* For HBA, vSES doesn't return HBA SAS address. Instead return 528 * vSES's sas address. 529 */ 530 if ((handle <= ioc->sas_hba.num_phys) && 531 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & 532 MPI2_SAS_DEVICE_INFO_SEP))) 533 *sas_address = ioc->sas_hba.sas_address; 534 else 535 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 536 return 0; 537 } 538 539 /* we hit this because the given parent handle doesn't exist */ 540 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 541 return -ENXIO; 542 543 /* else error case */ 544 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", 545 handle, ioc_status, __FILE__, __LINE__, __func__); 546 return -EIO; 547 } 548 549 /** 550 * _scsih_determine_boot_device - determine boot device. 551 * @ioc: per adapter object 552 * @device: sas_device or pcie_device object 553 * @channel: SAS or PCIe channel 554 * 555 * Determines whether this device should be first reported device to 556 * to scsi-ml or sas transport, this purpose is for persistent boot device. 557 * There are primary, alternate, and current entries in bios page 2. The order 558 * priority is primary, alternate, then current. This routine saves 559 * the corresponding device object. 560 * The saved data to be used later in _scsih_probe_boot_devices(). 561 */ 562 static void 563 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, 564 u32 channel) 565 { 566 struct _sas_device *sas_device; 567 struct _pcie_device *pcie_device; 568 struct _raid_device *raid_device; 569 u64 sas_address; 570 u64 device_name; 571 u64 enclosure_logical_id; 572 u16 slot; 573 574 /* only process this function when driver loads */ 575 if (!ioc->is_driver_loading) 576 return; 577 578 /* no Bios, return immediately */ 579 if (!ioc->bios_pg3.BiosVersion) 580 return; 581 582 if (channel == RAID_CHANNEL) { 583 raid_device = device; 584 sas_address = raid_device->wwid; 585 device_name = 0; 586 enclosure_logical_id = 0; 587 slot = 0; 588 } else if (channel == PCIE_CHANNEL) { 589 pcie_device = device; 590 sas_address = pcie_device->wwid; 591 device_name = 0; 592 enclosure_logical_id = 0; 593 slot = 0; 594 } else { 595 sas_device = device; 596 sas_address = sas_device->sas_address; 597 device_name = sas_device->device_name; 598 enclosure_logical_id = sas_device->enclosure_logical_id; 599 slot = sas_device->slot; 600 } 601 602 if (!ioc->req_boot_device.device) { 603 if (_scsih_is_boot_device(sas_address, device_name, 604 enclosure_logical_id, slot, 605 (ioc->bios_pg2.ReqBootDeviceForm & 606 MPI2_BIOSPAGE2_FORM_MASK), 607 &ioc->bios_pg2.RequestedBootDevice)) { 608 dinitprintk(ioc, 609 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n", 610 __func__, (u64)sas_address)); 611 ioc->req_boot_device.device = device; 612 ioc->req_boot_device.channel = channel; 613 } 614 } 615 616 if (!ioc->req_alt_boot_device.device) { 617 if (_scsih_is_boot_device(sas_address, device_name, 618 enclosure_logical_id, slot, 619 (ioc->bios_pg2.ReqAltBootDeviceForm & 620 MPI2_BIOSPAGE2_FORM_MASK), 621 &ioc->bios_pg2.RequestedAltBootDevice)) { 622 dinitprintk(ioc, 623 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n", 624 __func__, (u64)sas_address)); 625 ioc->req_alt_boot_device.device = device; 626 ioc->req_alt_boot_device.channel = channel; 627 } 628 } 629 630 if (!ioc->current_boot_device.device) { 631 if (_scsih_is_boot_device(sas_address, device_name, 632 enclosure_logical_id, slot, 633 (ioc->bios_pg2.CurrentBootDeviceForm & 634 MPI2_BIOSPAGE2_FORM_MASK), 635 &ioc->bios_pg2.CurrentBootDevice)) { 636 dinitprintk(ioc, 637 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n", 638 __func__, (u64)sas_address)); 639 ioc->current_boot_device.device = device; 640 ioc->current_boot_device.channel = channel; 641 } 642 } 643 } 644 645 static struct _sas_device * 646 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, 647 struct MPT3SAS_TARGET *tgt_priv) 648 { 649 struct _sas_device *ret; 650 651 assert_spin_locked(&ioc->sas_device_lock); 652 653 ret = tgt_priv->sas_dev; 654 if (ret) 655 sas_device_get(ret); 656 657 return ret; 658 } 659 660 static struct _sas_device * 661 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, 662 struct MPT3SAS_TARGET *tgt_priv) 663 { 664 struct _sas_device *ret; 665 unsigned long flags; 666 667 spin_lock_irqsave(&ioc->sas_device_lock, flags); 668 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv); 669 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 670 671 return ret; 672 } 673 674 static struct _pcie_device * 675 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, 676 struct MPT3SAS_TARGET *tgt_priv) 677 { 678 struct _pcie_device *ret; 679 680 assert_spin_locked(&ioc->pcie_device_lock); 681 682 ret = tgt_priv->pcie_dev; 683 if (ret) 684 pcie_device_get(ret); 685 686 return ret; 687 } 688 689 /** 690 * mpt3sas_get_pdev_from_target - pcie device search 691 * @ioc: per adapter object 692 * @tgt_priv: starget private object 693 * 694 * Context: This function will acquire ioc->pcie_device_lock and will release 695 * before returning the pcie_device object. 696 * 697 * This searches for pcie_device from target, then return pcie_device object. 698 */ 699 static struct _pcie_device * 700 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, 701 struct MPT3SAS_TARGET *tgt_priv) 702 { 703 struct _pcie_device *ret; 704 unsigned long flags; 705 706 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 707 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv); 708 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 709 710 return ret; 711 } 712 713 714 /** 715 * __mpt3sas_get_sdev_by_rphy - sas device search 716 * @ioc: per adapter object 717 * @rphy: sas_rphy pointer 718 * 719 * Context: This function will acquire ioc->sas_device_lock and will release 720 * before returning the sas_device object. 721 * 722 * This searches for sas_device from rphy object 723 * then return sas_device object. 724 */ 725 struct _sas_device * 726 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, 727 struct sas_rphy *rphy) 728 { 729 struct _sas_device *sas_device; 730 731 assert_spin_locked(&ioc->sas_device_lock); 732 733 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 734 if (sas_device->rphy != rphy) 735 continue; 736 sas_device_get(sas_device); 737 return sas_device; 738 } 739 740 sas_device = NULL; 741 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { 742 if (sas_device->rphy != rphy) 743 continue; 744 sas_device_get(sas_device); 745 return sas_device; 746 } 747 748 return NULL; 749 } 750 751 /** 752 * mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided 753 * sas address from sas_device_list list 754 * @ioc: per adapter object 755 * @port: port number 756 * 757 * Search for _sas_device object corresponding to provided sas address, 758 * if available return _sas_device object address otherwise return NULL. 759 */ 760 struct _sas_device * 761 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 762 u64 sas_address, struct hba_port *port) 763 { 764 struct _sas_device *sas_device; 765 766 if (!port) 767 return NULL; 768 769 assert_spin_locked(&ioc->sas_device_lock); 770 771 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 772 if (sas_device->sas_address != sas_address) 773 continue; 774 if (sas_device->port != port) 775 continue; 776 sas_device_get(sas_device); 777 return sas_device; 778 } 779 780 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) { 781 if (sas_device->sas_address != sas_address) 782 continue; 783 if (sas_device->port != port) 784 continue; 785 sas_device_get(sas_device); 786 return sas_device; 787 } 788 789 return NULL; 790 } 791 792 /** 793 * mpt3sas_get_sdev_by_addr - sas device search 794 * @ioc: per adapter object 795 * @sas_address: sas address 796 * @port: hba port entry 797 * Context: Calling function should acquire ioc->sas_device_lock 798 * 799 * This searches for sas_device based on sas_address & port number, 800 * then return sas_device object. 801 */ 802 struct _sas_device * 803 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 804 u64 sas_address, struct hba_port *port) 805 { 806 struct _sas_device *sas_device; 807 unsigned long flags; 808 809 spin_lock_irqsave(&ioc->sas_device_lock, flags); 810 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 811 sas_address, port); 812 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 813 814 return sas_device; 815 } 816 817 static struct _sas_device * 818 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 819 { 820 struct _sas_device *sas_device; 821 822 assert_spin_locked(&ioc->sas_device_lock); 823 824 list_for_each_entry(sas_device, &ioc->sas_device_list, list) 825 if (sas_device->handle == handle) 826 goto found_device; 827 828 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) 829 if (sas_device->handle == handle) 830 goto found_device; 831 832 return NULL; 833 834 found_device: 835 sas_device_get(sas_device); 836 return sas_device; 837 } 838 839 /** 840 * mpt3sas_get_sdev_by_handle - sas device search 841 * @ioc: per adapter object 842 * @handle: sas device handle (assigned by firmware) 843 * Context: Calling function should acquire ioc->sas_device_lock 844 * 845 * This searches for sas_device based on sas_address, then return sas_device 846 * object. 847 */ 848 struct _sas_device * 849 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 850 { 851 struct _sas_device *sas_device; 852 unsigned long flags; 853 854 spin_lock_irqsave(&ioc->sas_device_lock, flags); 855 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 856 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 857 858 return sas_device; 859 } 860 861 /** 862 * _scsih_display_enclosure_chassis_info - display device location info 863 * @ioc: per adapter object 864 * @sas_device: per sas device object 865 * @sdev: scsi device struct 866 * @starget: scsi target struct 867 */ 868 static void 869 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, 870 struct _sas_device *sas_device, struct scsi_device *sdev, 871 struct scsi_target *starget) 872 { 873 if (sdev) { 874 if (sas_device->enclosure_handle != 0) 875 sdev_printk(KERN_INFO, sdev, 876 "enclosure logical id (0x%016llx), slot(%d) \n", 877 (unsigned long long) 878 sas_device->enclosure_logical_id, 879 sas_device->slot); 880 if (sas_device->connector_name[0] != '\0') 881 sdev_printk(KERN_INFO, sdev, 882 "enclosure level(0x%04x), connector name( %s)\n", 883 sas_device->enclosure_level, 884 sas_device->connector_name); 885 if (sas_device->is_chassis_slot_valid) 886 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", 887 sas_device->chassis_slot); 888 } else if (starget) { 889 if (sas_device->enclosure_handle != 0) 890 starget_printk(KERN_INFO, starget, 891 "enclosure logical id(0x%016llx), slot(%d) \n", 892 (unsigned long long) 893 sas_device->enclosure_logical_id, 894 sas_device->slot); 895 if (sas_device->connector_name[0] != '\0') 896 starget_printk(KERN_INFO, starget, 897 "enclosure level(0x%04x), connector name( %s)\n", 898 sas_device->enclosure_level, 899 sas_device->connector_name); 900 if (sas_device->is_chassis_slot_valid) 901 starget_printk(KERN_INFO, starget, 902 "chassis slot(0x%04x)\n", 903 sas_device->chassis_slot); 904 } else { 905 if (sas_device->enclosure_handle != 0) 906 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n", 907 (u64)sas_device->enclosure_logical_id, 908 sas_device->slot); 909 if (sas_device->connector_name[0] != '\0') 910 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n", 911 sas_device->enclosure_level, 912 sas_device->connector_name); 913 if (sas_device->is_chassis_slot_valid) 914 ioc_info(ioc, "chassis slot(0x%04x)\n", 915 sas_device->chassis_slot); 916 } 917 } 918 919 /** 920 * _scsih_sas_device_remove - remove sas_device from list. 921 * @ioc: per adapter object 922 * @sas_device: the sas_device object 923 * Context: This function will acquire ioc->sas_device_lock. 924 * 925 * If sas_device is on the list, remove it and decrement its reference count. 926 */ 927 static void 928 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, 929 struct _sas_device *sas_device) 930 { 931 unsigned long flags; 932 933 if (!sas_device) 934 return; 935 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", 936 sas_device->handle, (u64)sas_device->sas_address); 937 938 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 939 940 /* 941 * The lock serializes access to the list, but we still need to verify 942 * that nobody removed the entry while we were waiting on the lock. 943 */ 944 spin_lock_irqsave(&ioc->sas_device_lock, flags); 945 if (!list_empty(&sas_device->list)) { 946 list_del_init(&sas_device->list); 947 sas_device_put(sas_device); 948 } 949 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 950 } 951 952 /** 953 * _scsih_device_remove_by_handle - removing device object by handle 954 * @ioc: per adapter object 955 * @handle: device handle 956 */ 957 static void 958 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 959 { 960 struct _sas_device *sas_device; 961 unsigned long flags; 962 963 if (ioc->shost_recovery) 964 return; 965 966 spin_lock_irqsave(&ioc->sas_device_lock, flags); 967 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 968 if (sas_device) { 969 list_del_init(&sas_device->list); 970 sas_device_put(sas_device); 971 } 972 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 973 if (sas_device) { 974 _scsih_remove_device(ioc, sas_device); 975 sas_device_put(sas_device); 976 } 977 } 978 979 /** 980 * mpt3sas_device_remove_by_sas_address - removing device object by 981 * sas address & port number 982 * @ioc: per adapter object 983 * @sas_address: device sas_address 984 * @port: hba port entry 985 * 986 * Return nothing. 987 */ 988 void 989 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, 990 u64 sas_address, struct hba_port *port) 991 { 992 struct _sas_device *sas_device; 993 unsigned long flags; 994 995 if (ioc->shost_recovery) 996 return; 997 998 spin_lock_irqsave(&ioc->sas_device_lock, flags); 999 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port); 1000 if (sas_device) { 1001 list_del_init(&sas_device->list); 1002 sas_device_put(sas_device); 1003 } 1004 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1005 if (sas_device) { 1006 _scsih_remove_device(ioc, sas_device); 1007 sas_device_put(sas_device); 1008 } 1009 } 1010 1011 /** 1012 * _scsih_sas_device_add - insert sas_device to the list. 1013 * @ioc: per adapter object 1014 * @sas_device: the sas_device object 1015 * Context: This function will acquire ioc->sas_device_lock. 1016 * 1017 * Adding new object to the ioc->sas_device_list. 1018 */ 1019 static void 1020 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, 1021 struct _sas_device *sas_device) 1022 { 1023 unsigned long flags; 1024 1025 dewtprintk(ioc, 1026 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 1027 __func__, sas_device->handle, 1028 (u64)sas_device->sas_address)); 1029 1030 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 1031 NULL, NULL)); 1032 1033 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1034 sas_device_get(sas_device); 1035 list_add_tail(&sas_device->list, &ioc->sas_device_list); 1036 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1037 1038 if (ioc->hide_drives) { 1039 clear_bit(sas_device->handle, ioc->pend_os_device_add); 1040 return; 1041 } 1042 1043 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 1044 sas_device->sas_address_parent, sas_device->port)) { 1045 _scsih_sas_device_remove(ioc, sas_device); 1046 } else if (!sas_device->starget) { 1047 /* 1048 * When asyn scanning is enabled, its not possible to remove 1049 * devices while scanning is turned on due to an oops in 1050 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() 1051 */ 1052 if (!ioc->is_driver_loading) { 1053 mpt3sas_transport_port_remove(ioc, 1054 sas_device->sas_address, 1055 sas_device->sas_address_parent, 1056 sas_device->port); 1057 _scsih_sas_device_remove(ioc, sas_device); 1058 } 1059 } else 1060 clear_bit(sas_device->handle, ioc->pend_os_device_add); 1061 } 1062 1063 /** 1064 * _scsih_sas_device_init_add - insert sas_device to the list. 1065 * @ioc: per adapter object 1066 * @sas_device: the sas_device object 1067 * Context: This function will acquire ioc->sas_device_lock. 1068 * 1069 * Adding new object at driver load time to the ioc->sas_device_init_list. 1070 */ 1071 static void 1072 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, 1073 struct _sas_device *sas_device) 1074 { 1075 unsigned long flags; 1076 1077 dewtprintk(ioc, 1078 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 1079 __func__, sas_device->handle, 1080 (u64)sas_device->sas_address)); 1081 1082 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 1083 NULL, NULL)); 1084 1085 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1086 sas_device_get(sas_device); 1087 list_add_tail(&sas_device->list, &ioc->sas_device_init_list); 1088 _scsih_determine_boot_device(ioc, sas_device, 0); 1089 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1090 } 1091 1092 1093 static struct _pcie_device * 1094 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1095 { 1096 struct _pcie_device *pcie_device; 1097 1098 assert_spin_locked(&ioc->pcie_device_lock); 1099 1100 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1101 if (pcie_device->wwid == wwid) 1102 goto found_device; 1103 1104 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1105 if (pcie_device->wwid == wwid) 1106 goto found_device; 1107 1108 return NULL; 1109 1110 found_device: 1111 pcie_device_get(pcie_device); 1112 return pcie_device; 1113 } 1114 1115 1116 /** 1117 * mpt3sas_get_pdev_by_wwid - pcie device search 1118 * @ioc: per adapter object 1119 * @wwid: wwid 1120 * 1121 * Context: This function will acquire ioc->pcie_device_lock and will release 1122 * before returning the pcie_device object. 1123 * 1124 * This searches for pcie_device based on wwid, then return pcie_device object. 1125 */ 1126 static struct _pcie_device * 1127 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1128 { 1129 struct _pcie_device *pcie_device; 1130 unsigned long flags; 1131 1132 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1133 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 1134 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1135 1136 return pcie_device; 1137 } 1138 1139 1140 static struct _pcie_device * 1141 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id, 1142 int channel) 1143 { 1144 struct _pcie_device *pcie_device; 1145 1146 assert_spin_locked(&ioc->pcie_device_lock); 1147 1148 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1149 if (pcie_device->id == id && pcie_device->channel == channel) 1150 goto found_device; 1151 1152 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1153 if (pcie_device->id == id && pcie_device->channel == channel) 1154 goto found_device; 1155 1156 return NULL; 1157 1158 found_device: 1159 pcie_device_get(pcie_device); 1160 return pcie_device; 1161 } 1162 1163 static struct _pcie_device * 1164 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1165 { 1166 struct _pcie_device *pcie_device; 1167 1168 assert_spin_locked(&ioc->pcie_device_lock); 1169 1170 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1171 if (pcie_device->handle == handle) 1172 goto found_device; 1173 1174 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1175 if (pcie_device->handle == handle) 1176 goto found_device; 1177 1178 return NULL; 1179 1180 found_device: 1181 pcie_device_get(pcie_device); 1182 return pcie_device; 1183 } 1184 1185 1186 /** 1187 * mpt3sas_get_pdev_by_handle - pcie device search 1188 * @ioc: per adapter object 1189 * @handle: Firmware device handle 1190 * 1191 * Context: This function will acquire ioc->pcie_device_lock and will release 1192 * before returning the pcie_device object. 1193 * 1194 * This searches for pcie_device based on handle, then return pcie_device 1195 * object. 1196 */ 1197 struct _pcie_device * 1198 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1199 { 1200 struct _pcie_device *pcie_device; 1201 unsigned long flags; 1202 1203 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1204 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 1205 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1206 1207 return pcie_device; 1208 } 1209 1210 /** 1211 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency. 1212 * @ioc: per adapter object 1213 * Context: This function will acquire ioc->pcie_device_lock 1214 * 1215 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency 1216 * which has reported maximum among all available NVMe drives. 1217 * Minimum max_shutdown_latency will be six seconds. 1218 */ 1219 static void 1220 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc) 1221 { 1222 struct _pcie_device *pcie_device; 1223 unsigned long flags; 1224 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; 1225 1226 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1227 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 1228 if (pcie_device->shutdown_latency) { 1229 if (shutdown_latency < pcie_device->shutdown_latency) 1230 shutdown_latency = 1231 pcie_device->shutdown_latency; 1232 } 1233 } 1234 ioc->max_shutdown_latency = shutdown_latency; 1235 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1236 } 1237 1238 /** 1239 * _scsih_pcie_device_remove - remove pcie_device from list. 1240 * @ioc: per adapter object 1241 * @pcie_device: the pcie_device object 1242 * Context: This function will acquire ioc->pcie_device_lock. 1243 * 1244 * If pcie_device is on the list, remove it and decrement its reference count. 1245 */ 1246 static void 1247 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, 1248 struct _pcie_device *pcie_device) 1249 { 1250 unsigned long flags; 1251 int was_on_pcie_device_list = 0; 1252 u8 update_latency = 0; 1253 1254 if (!pcie_device) 1255 return; 1256 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 1257 pcie_device->handle, (u64)pcie_device->wwid); 1258 if (pcie_device->enclosure_handle != 0) 1259 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n", 1260 (u64)pcie_device->enclosure_logical_id, 1261 pcie_device->slot); 1262 if (pcie_device->connector_name[0] != '\0') 1263 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n", 1264 pcie_device->enclosure_level, 1265 pcie_device->connector_name); 1266 1267 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1268 if (!list_empty(&pcie_device->list)) { 1269 list_del_init(&pcie_device->list); 1270 was_on_pcie_device_list = 1; 1271 } 1272 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) 1273 update_latency = 1; 1274 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1275 if (was_on_pcie_device_list) { 1276 kfree(pcie_device->serial_number); 1277 pcie_device_put(pcie_device); 1278 } 1279 1280 /* 1281 * This device's RTD3 Entry Latency matches IOC's 1282 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency 1283 * from the available drives as current drive is getting removed. 1284 */ 1285 if (update_latency) 1286 _scsih_set_nvme_max_shutdown_latency(ioc); 1287 } 1288 1289 1290 /** 1291 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle 1292 * @ioc: per adapter object 1293 * @handle: device handle 1294 */ 1295 static void 1296 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1297 { 1298 struct _pcie_device *pcie_device; 1299 unsigned long flags; 1300 int was_on_pcie_device_list = 0; 1301 u8 update_latency = 0; 1302 1303 if (ioc->shost_recovery) 1304 return; 1305 1306 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1307 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 1308 if (pcie_device) { 1309 if (!list_empty(&pcie_device->list)) { 1310 list_del_init(&pcie_device->list); 1311 was_on_pcie_device_list = 1; 1312 pcie_device_put(pcie_device); 1313 } 1314 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) 1315 update_latency = 1; 1316 } 1317 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1318 if (was_on_pcie_device_list) { 1319 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 1320 pcie_device_put(pcie_device); 1321 } 1322 1323 /* 1324 * This device's RTD3 Entry Latency matches IOC's 1325 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency 1326 * from the available drives as current drive is getting removed. 1327 */ 1328 if (update_latency) 1329 _scsih_set_nvme_max_shutdown_latency(ioc); 1330 } 1331 1332 /** 1333 * _scsih_pcie_device_add - add pcie_device object 1334 * @ioc: per adapter object 1335 * @pcie_device: pcie_device object 1336 * 1337 * This is added to the pcie_device_list link list. 1338 */ 1339 static void 1340 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc, 1341 struct _pcie_device *pcie_device) 1342 { 1343 unsigned long flags; 1344 1345 dewtprintk(ioc, 1346 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", 1347 __func__, 1348 pcie_device->handle, (u64)pcie_device->wwid)); 1349 if (pcie_device->enclosure_handle != 0) 1350 dewtprintk(ioc, 1351 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1352 __func__, 1353 (u64)pcie_device->enclosure_logical_id, 1354 pcie_device->slot)); 1355 if (pcie_device->connector_name[0] != '\0') 1356 dewtprintk(ioc, 1357 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", 1358 __func__, pcie_device->enclosure_level, 1359 pcie_device->connector_name)); 1360 1361 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1362 pcie_device_get(pcie_device); 1363 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 1364 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1365 1366 if (pcie_device->access_status == 1367 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { 1368 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1369 return; 1370 } 1371 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) { 1372 _scsih_pcie_device_remove(ioc, pcie_device); 1373 } else if (!pcie_device->starget) { 1374 if (!ioc->is_driver_loading) { 1375 /*TODO-- Need to find out whether this condition will occur or not*/ 1376 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1377 } 1378 } else 1379 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1380 } 1381 1382 /* 1383 * _scsih_pcie_device_init_add - insert pcie_device to the init list. 1384 * @ioc: per adapter object 1385 * @pcie_device: the pcie_device object 1386 * Context: This function will acquire ioc->pcie_device_lock. 1387 * 1388 * Adding new object at driver load time to the ioc->pcie_device_init_list. 1389 */ 1390 static void 1391 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc, 1392 struct _pcie_device *pcie_device) 1393 { 1394 unsigned long flags; 1395 1396 dewtprintk(ioc, 1397 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", 1398 __func__, 1399 pcie_device->handle, (u64)pcie_device->wwid)); 1400 if (pcie_device->enclosure_handle != 0) 1401 dewtprintk(ioc, 1402 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1403 __func__, 1404 (u64)pcie_device->enclosure_logical_id, 1405 pcie_device->slot)); 1406 if (pcie_device->connector_name[0] != '\0') 1407 dewtprintk(ioc, 1408 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", 1409 __func__, pcie_device->enclosure_level, 1410 pcie_device->connector_name)); 1411 1412 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1413 pcie_device_get(pcie_device); 1414 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list); 1415 if (pcie_device->access_status != 1416 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) 1417 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL); 1418 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1419 } 1420 /** 1421 * _scsih_raid_device_find_by_id - raid device search 1422 * @ioc: per adapter object 1423 * @id: sas device target id 1424 * @channel: sas device channel 1425 * Context: Calling function should acquire ioc->raid_device_lock 1426 * 1427 * This searches for raid_device based on target id, then return raid_device 1428 * object. 1429 */ 1430 static struct _raid_device * 1431 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) 1432 { 1433 struct _raid_device *raid_device, *r; 1434 1435 r = NULL; 1436 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1437 if (raid_device->id == id && raid_device->channel == channel) { 1438 r = raid_device; 1439 goto out; 1440 } 1441 } 1442 1443 out: 1444 return r; 1445 } 1446 1447 /** 1448 * mpt3sas_raid_device_find_by_handle - raid device search 1449 * @ioc: per adapter object 1450 * @handle: sas device handle (assigned by firmware) 1451 * Context: Calling function should acquire ioc->raid_device_lock 1452 * 1453 * This searches for raid_device based on handle, then return raid_device 1454 * object. 1455 */ 1456 struct _raid_device * 1457 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1458 { 1459 struct _raid_device *raid_device, *r; 1460 1461 r = NULL; 1462 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1463 if (raid_device->handle != handle) 1464 continue; 1465 r = raid_device; 1466 goto out; 1467 } 1468 1469 out: 1470 return r; 1471 } 1472 1473 /** 1474 * _scsih_raid_device_find_by_wwid - raid device search 1475 * @ioc: per adapter object 1476 * @wwid: ? 1477 * Context: Calling function should acquire ioc->raid_device_lock 1478 * 1479 * This searches for raid_device based on wwid, then return raid_device 1480 * object. 1481 */ 1482 static struct _raid_device * 1483 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1484 { 1485 struct _raid_device *raid_device, *r; 1486 1487 r = NULL; 1488 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1489 if (raid_device->wwid != wwid) 1490 continue; 1491 r = raid_device; 1492 goto out; 1493 } 1494 1495 out: 1496 return r; 1497 } 1498 1499 /** 1500 * _scsih_raid_device_add - add raid_device object 1501 * @ioc: per adapter object 1502 * @raid_device: raid_device object 1503 * 1504 * This is added to the raid_device_list link list. 1505 */ 1506 static void 1507 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, 1508 struct _raid_device *raid_device) 1509 { 1510 unsigned long flags; 1511 1512 dewtprintk(ioc, 1513 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n", 1514 __func__, 1515 raid_device->handle, (u64)raid_device->wwid)); 1516 1517 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1518 list_add_tail(&raid_device->list, &ioc->raid_device_list); 1519 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1520 } 1521 1522 /** 1523 * _scsih_raid_device_remove - delete raid_device object 1524 * @ioc: per adapter object 1525 * @raid_device: raid_device object 1526 * 1527 */ 1528 static void 1529 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc, 1530 struct _raid_device *raid_device) 1531 { 1532 unsigned long flags; 1533 1534 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1535 list_del(&raid_device->list); 1536 kfree(raid_device); 1537 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1538 } 1539 1540 /** 1541 * mpt3sas_scsih_expander_find_by_handle - expander device search 1542 * @ioc: per adapter object 1543 * @handle: expander handle (assigned by firmware) 1544 * Context: Calling function should acquire ioc->sas_device_lock 1545 * 1546 * This searches for expander device based on handle, then returns the 1547 * sas_node object. 1548 */ 1549 struct _sas_node * 1550 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1551 { 1552 struct _sas_node *sas_expander, *r; 1553 1554 r = NULL; 1555 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 1556 if (sas_expander->handle != handle) 1557 continue; 1558 r = sas_expander; 1559 goto out; 1560 } 1561 out: 1562 return r; 1563 } 1564 1565 /** 1566 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search 1567 * @ioc: per adapter object 1568 * @handle: enclosure handle (assigned by firmware) 1569 * Context: Calling function should acquire ioc->sas_device_lock 1570 * 1571 * This searches for enclosure device based on handle, then returns the 1572 * enclosure object. 1573 */ 1574 static struct _enclosure_node * 1575 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1576 { 1577 struct _enclosure_node *enclosure_dev, *r; 1578 1579 r = NULL; 1580 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { 1581 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) 1582 continue; 1583 r = enclosure_dev; 1584 goto out; 1585 } 1586 out: 1587 return r; 1588 } 1589 /** 1590 * mpt3sas_scsih_expander_find_by_sas_address - expander device search 1591 * @ioc: per adapter object 1592 * @sas_address: sas address 1593 * @port: hba port entry 1594 * Context: Calling function should acquire ioc->sas_node_lock. 1595 * 1596 * This searches for expander device based on sas_address & port number, 1597 * then returns the sas_node object. 1598 */ 1599 struct _sas_node * 1600 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, 1601 u64 sas_address, struct hba_port *port) 1602 { 1603 struct _sas_node *sas_expander, *r = NULL; 1604 1605 if (!port) 1606 return r; 1607 1608 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 1609 if (sas_expander->sas_address != sas_address) 1610 continue; 1611 if (sas_expander->port != port) 1612 continue; 1613 r = sas_expander; 1614 goto out; 1615 } 1616 out: 1617 return r; 1618 } 1619 1620 /** 1621 * _scsih_expander_node_add - insert expander device to the list. 1622 * @ioc: per adapter object 1623 * @sas_expander: the sas_device object 1624 * Context: This function will acquire ioc->sas_node_lock. 1625 * 1626 * Adding new object to the ioc->sas_expander_list. 1627 */ 1628 static void 1629 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, 1630 struct _sas_node *sas_expander) 1631 { 1632 unsigned long flags; 1633 1634 spin_lock_irqsave(&ioc->sas_node_lock, flags); 1635 list_add_tail(&sas_expander->list, &ioc->sas_expander_list); 1636 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 1637 } 1638 1639 /** 1640 * _scsih_is_end_device - determines if device is an end device 1641 * @device_info: bitfield providing information about the device. 1642 * Context: none 1643 * 1644 * Return: 1 if end device. 1645 */ 1646 static int 1647 _scsih_is_end_device(u32 device_info) 1648 { 1649 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && 1650 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | 1651 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | 1652 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) 1653 return 1; 1654 else 1655 return 0; 1656 } 1657 1658 /** 1659 * _scsih_is_nvme_pciescsi_device - determines if 1660 * device is an pcie nvme/scsi device 1661 * @device_info: bitfield providing information about the device. 1662 * Context: none 1663 * 1664 * Returns 1 if device is pcie device type nvme/scsi. 1665 */ 1666 static int 1667 _scsih_is_nvme_pciescsi_device(u32 device_info) 1668 { 1669 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) 1670 == MPI26_PCIE_DEVINFO_NVME) || 1671 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) 1672 == MPI26_PCIE_DEVINFO_SCSI)) 1673 return 1; 1674 else 1675 return 0; 1676 } 1677 1678 /** 1679 * _scsih_scsi_lookup_find_by_target - search for matching channel:id 1680 * @ioc: per adapter object 1681 * @id: target id 1682 * @channel: channel 1683 * Context: This function will acquire ioc->scsi_lookup_lock. 1684 * 1685 * This will search for a matching channel:id in the scsi_lookup array, 1686 * returning 1 if found. 1687 */ 1688 static u8 1689 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id, 1690 int channel) 1691 { 1692 int smid; 1693 struct scsi_cmnd *scmd; 1694 1695 for (smid = 1; 1696 smid <= ioc->shost->can_queue; smid++) { 1697 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 1698 if (!scmd) 1699 continue; 1700 if (scmd->device->id == id && 1701 scmd->device->channel == channel) 1702 return 1; 1703 } 1704 return 0; 1705 } 1706 1707 /** 1708 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun 1709 * @ioc: per adapter object 1710 * @id: target id 1711 * @lun: lun number 1712 * @channel: channel 1713 * Context: This function will acquire ioc->scsi_lookup_lock. 1714 * 1715 * This will search for a matching channel:id:lun in the scsi_lookup array, 1716 * returning 1 if found. 1717 */ 1718 static u8 1719 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, 1720 unsigned int lun, int channel) 1721 { 1722 int smid; 1723 struct scsi_cmnd *scmd; 1724 1725 for (smid = 1; smid <= ioc->shost->can_queue; smid++) { 1726 1727 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 1728 if (!scmd) 1729 continue; 1730 if (scmd->device->id == id && 1731 scmd->device->channel == channel && 1732 scmd->device->lun == lun) 1733 return 1; 1734 } 1735 return 0; 1736 } 1737 1738 /** 1739 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry 1740 * @ioc: per adapter object 1741 * @smid: system request message index 1742 * 1743 * Return: the smid stored scmd pointer. 1744 * Then will dereference the stored scmd pointer. 1745 */ 1746 struct scsi_cmnd * 1747 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1748 { 1749 struct scsi_cmnd *scmd = NULL; 1750 struct scsiio_tracker *st; 1751 Mpi25SCSIIORequest_t *mpi_request; 1752 u16 tag = smid - 1; 1753 1754 if (smid > 0 && 1755 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { 1756 u32 unique_tag = 1757 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag; 1758 1759 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1760 1761 /* 1762 * If SCSI IO request is outstanding at driver level then 1763 * DevHandle filed must be non-zero. If DevHandle is zero 1764 * then it means that this smid is free at driver level, 1765 * so return NULL. 1766 */ 1767 if (!mpi_request->DevHandle) 1768 return scmd; 1769 1770 scmd = scsi_host_find_tag(ioc->shost, unique_tag); 1771 if (scmd) { 1772 st = scsi_cmd_priv(scmd); 1773 if (st->cb_idx == 0xFF || st->smid == 0) 1774 scmd = NULL; 1775 } 1776 } 1777 return scmd; 1778 } 1779 1780 /** 1781 * scsih_change_queue_depth - setting device queue depth 1782 * @sdev: scsi device struct 1783 * @qdepth: requested queue depth 1784 * 1785 * Return: queue depth. 1786 */ 1787 static int 1788 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1789 { 1790 struct Scsi_Host *shost = sdev->host; 1791 int max_depth; 1792 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1793 struct MPT3SAS_DEVICE *sas_device_priv_data; 1794 struct MPT3SAS_TARGET *sas_target_priv_data; 1795 struct _sas_device *sas_device; 1796 unsigned long flags; 1797 1798 max_depth = shost->can_queue; 1799 1800 /* 1801 * limit max device queue for SATA to 32 if enable_sdev_max_qd 1802 * is disabled. 1803 */ 1804 if (ioc->enable_sdev_max_qd) 1805 goto not_sata; 1806 1807 sas_device_priv_data = sdev->hostdata; 1808 if (!sas_device_priv_data) 1809 goto not_sata; 1810 sas_target_priv_data = sas_device_priv_data->sas_target; 1811 if (!sas_target_priv_data) 1812 goto not_sata; 1813 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) 1814 goto not_sata; 1815 1816 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1817 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 1818 if (sas_device) { 1819 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 1820 max_depth = MPT3SAS_SATA_QUEUE_DEPTH; 1821 1822 sas_device_put(sas_device); 1823 } 1824 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1825 1826 not_sata: 1827 1828 if (!sdev->tagged_supported) 1829 max_depth = 1; 1830 if (qdepth > max_depth) 1831 qdepth = max_depth; 1832 scsi_change_queue_depth(sdev, qdepth); 1833 sdev_printk(KERN_INFO, sdev, 1834 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", 1835 sdev->queue_depth, sdev->tagged_supported, 1836 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); 1837 return sdev->queue_depth; 1838 } 1839 1840 /** 1841 * mpt3sas_scsih_change_queue_depth - setting device queue depth 1842 * @sdev: scsi device struct 1843 * @qdepth: requested queue depth 1844 * 1845 * Returns nothing. 1846 */ 1847 void 1848 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1849 { 1850 struct Scsi_Host *shost = sdev->host; 1851 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1852 1853 if (ioc->enable_sdev_max_qd) 1854 qdepth = shost->can_queue; 1855 1856 scsih_change_queue_depth(sdev, qdepth); 1857 } 1858 1859 /** 1860 * scsih_target_alloc - target add routine 1861 * @starget: scsi target struct 1862 * 1863 * Return: 0 if ok. Any other return is assumed to be an error and 1864 * the device is ignored. 1865 */ 1866 static int 1867 scsih_target_alloc(struct scsi_target *starget) 1868 { 1869 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1870 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1871 struct MPT3SAS_TARGET *sas_target_priv_data; 1872 struct _sas_device *sas_device; 1873 struct _raid_device *raid_device; 1874 struct _pcie_device *pcie_device; 1875 unsigned long flags; 1876 struct sas_rphy *rphy; 1877 1878 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), 1879 GFP_KERNEL); 1880 if (!sas_target_priv_data) 1881 return -ENOMEM; 1882 1883 starget->hostdata = sas_target_priv_data; 1884 sas_target_priv_data->starget = starget; 1885 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 1886 1887 /* RAID volumes */ 1888 if (starget->channel == RAID_CHANNEL) { 1889 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1890 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, 1891 starget->channel); 1892 if (raid_device) { 1893 sas_target_priv_data->handle = raid_device->handle; 1894 sas_target_priv_data->sas_address = raid_device->wwid; 1895 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; 1896 if (ioc->is_warpdrive) 1897 sas_target_priv_data->raid_device = raid_device; 1898 raid_device->starget = starget; 1899 } 1900 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1901 return 0; 1902 } 1903 1904 /* PCIe devices */ 1905 if (starget->channel == PCIE_CHANNEL) { 1906 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1907 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id, 1908 starget->channel); 1909 if (pcie_device) { 1910 sas_target_priv_data->handle = pcie_device->handle; 1911 sas_target_priv_data->sas_address = pcie_device->wwid; 1912 sas_target_priv_data->port = NULL; 1913 sas_target_priv_data->pcie_dev = pcie_device; 1914 pcie_device->starget = starget; 1915 pcie_device->id = starget->id; 1916 pcie_device->channel = starget->channel; 1917 sas_target_priv_data->flags |= 1918 MPT_TARGET_FLAGS_PCIE_DEVICE; 1919 if (pcie_device->fast_path) 1920 sas_target_priv_data->flags |= 1921 MPT_TARGET_FASTPATH_IO; 1922 } 1923 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1924 return 0; 1925 } 1926 1927 /* sas/sata devices */ 1928 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1929 rphy = dev_to_rphy(starget->dev.parent); 1930 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy); 1931 1932 if (sas_device) { 1933 sas_target_priv_data->handle = sas_device->handle; 1934 sas_target_priv_data->sas_address = sas_device->sas_address; 1935 sas_target_priv_data->port = sas_device->port; 1936 sas_target_priv_data->sas_dev = sas_device; 1937 sas_device->starget = starget; 1938 sas_device->id = starget->id; 1939 sas_device->channel = starget->channel; 1940 if (test_bit(sas_device->handle, ioc->pd_handles)) 1941 sas_target_priv_data->flags |= 1942 MPT_TARGET_FLAGS_RAID_COMPONENT; 1943 if (sas_device->fast_path) 1944 sas_target_priv_data->flags |= 1945 MPT_TARGET_FASTPATH_IO; 1946 } 1947 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1948 1949 return 0; 1950 } 1951 1952 /** 1953 * scsih_target_destroy - target destroy routine 1954 * @starget: scsi target struct 1955 */ 1956 static void 1957 scsih_target_destroy(struct scsi_target *starget) 1958 { 1959 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1960 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1961 struct MPT3SAS_TARGET *sas_target_priv_data; 1962 struct _sas_device *sas_device; 1963 struct _raid_device *raid_device; 1964 struct _pcie_device *pcie_device; 1965 unsigned long flags; 1966 1967 sas_target_priv_data = starget->hostdata; 1968 if (!sas_target_priv_data) 1969 return; 1970 1971 if (starget->channel == RAID_CHANNEL) { 1972 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1973 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, 1974 starget->channel); 1975 if (raid_device) { 1976 raid_device->starget = NULL; 1977 raid_device->sdev = NULL; 1978 } 1979 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1980 goto out; 1981 } 1982 1983 if (starget->channel == PCIE_CHANNEL) { 1984 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1985 pcie_device = __mpt3sas_get_pdev_from_target(ioc, 1986 sas_target_priv_data); 1987 if (pcie_device && (pcie_device->starget == starget) && 1988 (pcie_device->id == starget->id) && 1989 (pcie_device->channel == starget->channel)) 1990 pcie_device->starget = NULL; 1991 1992 if (pcie_device) { 1993 /* 1994 * Corresponding get() is in _scsih_target_alloc() 1995 */ 1996 sas_target_priv_data->pcie_dev = NULL; 1997 pcie_device_put(pcie_device); 1998 pcie_device_put(pcie_device); 1999 } 2000 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2001 goto out; 2002 } 2003 2004 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2005 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 2006 if (sas_device && (sas_device->starget == starget) && 2007 (sas_device->id == starget->id) && 2008 (sas_device->channel == starget->channel)) 2009 sas_device->starget = NULL; 2010 2011 if (sas_device) { 2012 /* 2013 * Corresponding get() is in _scsih_target_alloc() 2014 */ 2015 sas_target_priv_data->sas_dev = NULL; 2016 sas_device_put(sas_device); 2017 2018 sas_device_put(sas_device); 2019 } 2020 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2021 2022 out: 2023 kfree(sas_target_priv_data); 2024 starget->hostdata = NULL; 2025 } 2026 2027 /** 2028 * scsih_slave_alloc - device add routine 2029 * @sdev: scsi device struct 2030 * 2031 * Return: 0 if ok. Any other return is assumed to be an error and 2032 * the device is ignored. 2033 */ 2034 static int 2035 scsih_slave_alloc(struct scsi_device *sdev) 2036 { 2037 struct Scsi_Host *shost; 2038 struct MPT3SAS_ADAPTER *ioc; 2039 struct MPT3SAS_TARGET *sas_target_priv_data; 2040 struct MPT3SAS_DEVICE *sas_device_priv_data; 2041 struct scsi_target *starget; 2042 struct _raid_device *raid_device; 2043 struct _sas_device *sas_device; 2044 struct _pcie_device *pcie_device; 2045 unsigned long flags; 2046 2047 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), 2048 GFP_KERNEL); 2049 if (!sas_device_priv_data) 2050 return -ENOMEM; 2051 2052 sas_device_priv_data->lun = sdev->lun; 2053 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; 2054 2055 starget = scsi_target(sdev); 2056 sas_target_priv_data = starget->hostdata; 2057 sas_target_priv_data->num_luns++; 2058 sas_device_priv_data->sas_target = sas_target_priv_data; 2059 sdev->hostdata = sas_device_priv_data; 2060 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) 2061 sdev->no_uld_attach = 1; 2062 2063 shost = dev_to_shost(&starget->dev); 2064 ioc = shost_priv(shost); 2065 if (starget->channel == RAID_CHANNEL) { 2066 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2067 raid_device = _scsih_raid_device_find_by_id(ioc, 2068 starget->id, starget->channel); 2069 if (raid_device) 2070 raid_device->sdev = sdev; /* raid is single lun */ 2071 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2072 } 2073 if (starget->channel == PCIE_CHANNEL) { 2074 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2075 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, 2076 sas_target_priv_data->sas_address); 2077 if (pcie_device && (pcie_device->starget == NULL)) { 2078 sdev_printk(KERN_INFO, sdev, 2079 "%s : pcie_device->starget set to starget @ %d\n", 2080 __func__, __LINE__); 2081 pcie_device->starget = starget; 2082 } 2083 2084 if (pcie_device) 2085 pcie_device_put(pcie_device); 2086 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2087 2088 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 2089 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2090 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 2091 sas_target_priv_data->sas_address, 2092 sas_target_priv_data->port); 2093 if (sas_device && (sas_device->starget == NULL)) { 2094 sdev_printk(KERN_INFO, sdev, 2095 "%s : sas_device->starget set to starget @ %d\n", 2096 __func__, __LINE__); 2097 sas_device->starget = starget; 2098 } 2099 2100 if (sas_device) 2101 sas_device_put(sas_device); 2102 2103 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2104 } 2105 2106 return 0; 2107 } 2108 2109 /** 2110 * scsih_slave_destroy - device destroy routine 2111 * @sdev: scsi device struct 2112 */ 2113 static void 2114 scsih_slave_destroy(struct scsi_device *sdev) 2115 { 2116 struct MPT3SAS_TARGET *sas_target_priv_data; 2117 struct scsi_target *starget; 2118 struct Scsi_Host *shost; 2119 struct MPT3SAS_ADAPTER *ioc; 2120 struct _sas_device *sas_device; 2121 struct _pcie_device *pcie_device; 2122 unsigned long flags; 2123 2124 if (!sdev->hostdata) 2125 return; 2126 2127 starget = scsi_target(sdev); 2128 sas_target_priv_data = starget->hostdata; 2129 sas_target_priv_data->num_luns--; 2130 2131 shost = dev_to_shost(&starget->dev); 2132 ioc = shost_priv(shost); 2133 2134 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 2135 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2136 pcie_device = __mpt3sas_get_pdev_from_target(ioc, 2137 sas_target_priv_data); 2138 if (pcie_device && !sas_target_priv_data->num_luns) 2139 pcie_device->starget = NULL; 2140 2141 if (pcie_device) 2142 pcie_device_put(pcie_device); 2143 2144 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2145 2146 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 2147 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2148 sas_device = __mpt3sas_get_sdev_from_target(ioc, 2149 sas_target_priv_data); 2150 if (sas_device && !sas_target_priv_data->num_luns) 2151 sas_device->starget = NULL; 2152 2153 if (sas_device) 2154 sas_device_put(sas_device); 2155 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2156 } 2157 2158 kfree(sdev->hostdata); 2159 sdev->hostdata = NULL; 2160 } 2161 2162 /** 2163 * _scsih_display_sata_capabilities - sata capabilities 2164 * @ioc: per adapter object 2165 * @handle: device handle 2166 * @sdev: scsi device struct 2167 */ 2168 static void 2169 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, 2170 u16 handle, struct scsi_device *sdev) 2171 { 2172 Mpi2ConfigReply_t mpi_reply; 2173 Mpi2SasDevicePage0_t sas_device_pg0; 2174 u32 ioc_status; 2175 u16 flags; 2176 u32 device_info; 2177 2178 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 2179 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 2180 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2181 __FILE__, __LINE__, __func__); 2182 return; 2183 } 2184 2185 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 2186 MPI2_IOCSTATUS_MASK; 2187 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 2188 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2189 __FILE__, __LINE__, __func__); 2190 return; 2191 } 2192 2193 flags = le16_to_cpu(sas_device_pg0.Flags); 2194 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 2195 2196 sdev_printk(KERN_INFO, sdev, 2197 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " 2198 "sw_preserve(%s)\n", 2199 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n", 2200 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n", 2201 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" : 2202 "n", 2203 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n", 2204 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n", 2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); 2206 } 2207 2208 /* 2209 * raid transport support - 2210 * Enabled for SLES11 and newer, in older kernels the driver will panic when 2211 * unloading the driver followed by a load - I believe that the subroutine 2212 * raid_class_release() is not cleaning up properly. 2213 */ 2214 2215 /** 2216 * scsih_is_raid - return boolean indicating device is raid volume 2217 * @dev: the device struct object 2218 */ 2219 static int 2220 scsih_is_raid(struct device *dev) 2221 { 2222 struct scsi_device *sdev = to_scsi_device(dev); 2223 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2224 2225 if (ioc->is_warpdrive) 2226 return 0; 2227 return (sdev->channel == RAID_CHANNEL) ? 1 : 0; 2228 } 2229 2230 static int 2231 scsih_is_nvme(struct device *dev) 2232 { 2233 struct scsi_device *sdev = to_scsi_device(dev); 2234 2235 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0; 2236 } 2237 2238 /** 2239 * scsih_get_resync - get raid volume resync percent complete 2240 * @dev: the device struct object 2241 */ 2242 static void 2243 scsih_get_resync(struct device *dev) 2244 { 2245 struct scsi_device *sdev = to_scsi_device(dev); 2246 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2247 static struct _raid_device *raid_device; 2248 unsigned long flags; 2249 Mpi2RaidVolPage0_t vol_pg0; 2250 Mpi2ConfigReply_t mpi_reply; 2251 u32 volume_status_flags; 2252 u8 percent_complete; 2253 u16 handle; 2254 2255 percent_complete = 0; 2256 handle = 0; 2257 if (ioc->is_warpdrive) 2258 goto out; 2259 2260 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2261 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 2262 sdev->channel); 2263 if (raid_device) { 2264 handle = raid_device->handle; 2265 percent_complete = raid_device->percent_complete; 2266 } 2267 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2268 2269 if (!handle) 2270 goto out; 2271 2272 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 2273 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 2274 sizeof(Mpi2RaidVolPage0_t))) { 2275 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2276 __FILE__, __LINE__, __func__); 2277 percent_complete = 0; 2278 goto out; 2279 } 2280 2281 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); 2282 if (!(volume_status_flags & 2283 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) 2284 percent_complete = 0; 2285 2286 out: 2287 2288 switch (ioc->hba_mpi_version_belonged) { 2289 case MPI2_VERSION: 2290 raid_set_resync(mpt2sas_raid_template, dev, percent_complete); 2291 break; 2292 case MPI25_VERSION: 2293 case MPI26_VERSION: 2294 raid_set_resync(mpt3sas_raid_template, dev, percent_complete); 2295 break; 2296 } 2297 } 2298 2299 /** 2300 * scsih_get_state - get raid volume level 2301 * @dev: the device struct object 2302 */ 2303 static void 2304 scsih_get_state(struct device *dev) 2305 { 2306 struct scsi_device *sdev = to_scsi_device(dev); 2307 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 2308 static struct _raid_device *raid_device; 2309 unsigned long flags; 2310 Mpi2RaidVolPage0_t vol_pg0; 2311 Mpi2ConfigReply_t mpi_reply; 2312 u32 volstate; 2313 enum raid_state state = RAID_STATE_UNKNOWN; 2314 u16 handle = 0; 2315 2316 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2317 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 2318 sdev->channel); 2319 if (raid_device) 2320 handle = raid_device->handle; 2321 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2322 2323 if (!raid_device) 2324 goto out; 2325 2326 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 2327 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 2328 sizeof(Mpi2RaidVolPage0_t))) { 2329 ioc_err(ioc, "failure at %s:%d/%s()!\n", 2330 __FILE__, __LINE__, __func__); 2331 goto out; 2332 } 2333 2334 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); 2335 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { 2336 state = RAID_STATE_RESYNCING; 2337 goto out; 2338 } 2339 2340 switch (vol_pg0.VolumeState) { 2341 case MPI2_RAID_VOL_STATE_OPTIMAL: 2342 case MPI2_RAID_VOL_STATE_ONLINE: 2343 state = RAID_STATE_ACTIVE; 2344 break; 2345 case MPI2_RAID_VOL_STATE_DEGRADED: 2346 state = RAID_STATE_DEGRADED; 2347 break; 2348 case MPI2_RAID_VOL_STATE_FAILED: 2349 case MPI2_RAID_VOL_STATE_MISSING: 2350 state = RAID_STATE_OFFLINE; 2351 break; 2352 } 2353 out: 2354 switch (ioc->hba_mpi_version_belonged) { 2355 case MPI2_VERSION: 2356 raid_set_state(mpt2sas_raid_template, dev, state); 2357 break; 2358 case MPI25_VERSION: 2359 case MPI26_VERSION: 2360 raid_set_state(mpt3sas_raid_template, dev, state); 2361 break; 2362 } 2363 } 2364 2365 /** 2366 * _scsih_set_level - set raid level 2367 * @ioc: ? 2368 * @sdev: scsi device struct 2369 * @volume_type: volume type 2370 */ 2371 static void 2372 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc, 2373 struct scsi_device *sdev, u8 volume_type) 2374 { 2375 enum raid_level level = RAID_LEVEL_UNKNOWN; 2376 2377 switch (volume_type) { 2378 case MPI2_RAID_VOL_TYPE_RAID0: 2379 level = RAID_LEVEL_0; 2380 break; 2381 case MPI2_RAID_VOL_TYPE_RAID10: 2382 level = RAID_LEVEL_10; 2383 break; 2384 case MPI2_RAID_VOL_TYPE_RAID1E: 2385 level = RAID_LEVEL_1E; 2386 break; 2387 case MPI2_RAID_VOL_TYPE_RAID1: 2388 level = RAID_LEVEL_1; 2389 break; 2390 } 2391 2392 switch (ioc->hba_mpi_version_belonged) { 2393 case MPI2_VERSION: 2394 raid_set_level(mpt2sas_raid_template, 2395 &sdev->sdev_gendev, level); 2396 break; 2397 case MPI25_VERSION: 2398 case MPI26_VERSION: 2399 raid_set_level(mpt3sas_raid_template, 2400 &sdev->sdev_gendev, level); 2401 break; 2402 } 2403 } 2404 2405 2406 /** 2407 * _scsih_get_volume_capabilities - volume capabilities 2408 * @ioc: per adapter object 2409 * @raid_device: the raid_device object 2410 * 2411 * Return: 0 for success, else 1 2412 */ 2413 static int 2414 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, 2415 struct _raid_device *raid_device) 2416 { 2417 Mpi2RaidVolPage0_t *vol_pg0; 2418 Mpi2RaidPhysDiskPage0_t pd_pg0; 2419 Mpi2SasDevicePage0_t sas_device_pg0; 2420 Mpi2ConfigReply_t mpi_reply; 2421 u16 sz; 2422 u8 num_pds; 2423 2424 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, 2425 &num_pds)) || !num_pds) { 2426 dfailprintk(ioc, 2427 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2428 __FILE__, __LINE__, __func__)); 2429 return 1; 2430 } 2431 2432 raid_device->num_pds = num_pds; 2433 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * 2434 sizeof(Mpi2RaidVol0PhysDisk_t)); 2435 vol_pg0 = kzalloc(sz, GFP_KERNEL); 2436 if (!vol_pg0) { 2437 dfailprintk(ioc, 2438 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2439 __FILE__, __LINE__, __func__)); 2440 return 1; 2441 } 2442 2443 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, 2444 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { 2445 dfailprintk(ioc, 2446 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2447 __FILE__, __LINE__, __func__)); 2448 kfree(vol_pg0); 2449 return 1; 2450 } 2451 2452 raid_device->volume_type = vol_pg0->VolumeType; 2453 2454 /* figure out what the underlying devices are by 2455 * obtaining the device_info bits for the 1st device 2456 */ 2457 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 2458 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, 2459 vol_pg0->PhysDisk[0].PhysDiskNum))) { 2460 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 2461 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 2462 le16_to_cpu(pd_pg0.DevHandle)))) { 2463 raid_device->device_info = 2464 le32_to_cpu(sas_device_pg0.DeviceInfo); 2465 } 2466 } 2467 2468 kfree(vol_pg0); 2469 return 0; 2470 } 2471 2472 /** 2473 * _scsih_enable_tlr - setting TLR flags 2474 * @ioc: per adapter object 2475 * @sdev: scsi device struct 2476 * 2477 * Enabling Transaction Layer Retries for tape devices when 2478 * vpd page 0x90 is present 2479 * 2480 */ 2481 static void 2482 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) 2483 { 2484 2485 /* only for TAPE */ 2486 if (sdev->type != TYPE_TAPE) 2487 return; 2488 2489 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) 2490 return; 2491 2492 sas_enable_tlr(sdev); 2493 sdev_printk(KERN_INFO, sdev, "TLR %s\n", 2494 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); 2495 return; 2496 2497 } 2498 2499 /** 2500 * scsih_slave_configure - device configure routine. 2501 * @sdev: scsi device struct 2502 * 2503 * Return: 0 if ok. Any other return is assumed to be an error and 2504 * the device is ignored. 2505 */ 2506 static int 2507 scsih_slave_configure(struct scsi_device *sdev) 2508 { 2509 struct Scsi_Host *shost = sdev->host; 2510 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2511 struct MPT3SAS_DEVICE *sas_device_priv_data; 2512 struct MPT3SAS_TARGET *sas_target_priv_data; 2513 struct _sas_device *sas_device; 2514 struct _pcie_device *pcie_device; 2515 struct _raid_device *raid_device; 2516 unsigned long flags; 2517 int qdepth; 2518 u8 ssp_target = 0; 2519 char *ds = ""; 2520 char *r_level = ""; 2521 u16 handle, volume_handle = 0; 2522 u64 volume_wwid = 0; 2523 2524 qdepth = 1; 2525 sas_device_priv_data = sdev->hostdata; 2526 sas_device_priv_data->configured_lun = 1; 2527 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; 2528 sas_target_priv_data = sas_device_priv_data->sas_target; 2529 handle = sas_target_priv_data->handle; 2530 2531 /* raid volume handling */ 2532 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { 2533 2534 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2535 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 2536 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2537 if (!raid_device) { 2538 dfailprintk(ioc, 2539 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2540 __FILE__, __LINE__, __func__)); 2541 return 1; 2542 } 2543 2544 if (_scsih_get_volume_capabilities(ioc, raid_device)) { 2545 dfailprintk(ioc, 2546 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2547 __FILE__, __LINE__, __func__)); 2548 return 1; 2549 } 2550 2551 /* 2552 * WARPDRIVE: Initialize the required data for Direct IO 2553 */ 2554 mpt3sas_init_warpdrive_properties(ioc, raid_device); 2555 2556 /* RAID Queue Depth Support 2557 * IS volume = underlying qdepth of drive type, either 2558 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH 2559 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH) 2560 */ 2561 if (raid_device->device_info & 2562 MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2563 qdepth = MPT3SAS_SAS_QUEUE_DEPTH; 2564 ds = "SSP"; 2565 } else { 2566 qdepth = MPT3SAS_SATA_QUEUE_DEPTH; 2567 if (raid_device->device_info & 2568 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 2569 ds = "SATA"; 2570 else 2571 ds = "STP"; 2572 } 2573 2574 switch (raid_device->volume_type) { 2575 case MPI2_RAID_VOL_TYPE_RAID0: 2576 r_level = "RAID0"; 2577 break; 2578 case MPI2_RAID_VOL_TYPE_RAID1E: 2579 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2580 if (ioc->manu_pg10.OEMIdentifier && 2581 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & 2582 MFG10_GF0_R10_DISPLAY) && 2583 !(raid_device->num_pds % 2)) 2584 r_level = "RAID10"; 2585 else 2586 r_level = "RAID1E"; 2587 break; 2588 case MPI2_RAID_VOL_TYPE_RAID1: 2589 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2590 r_level = "RAID1"; 2591 break; 2592 case MPI2_RAID_VOL_TYPE_RAID10: 2593 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2594 r_level = "RAID10"; 2595 break; 2596 case MPI2_RAID_VOL_TYPE_UNKNOWN: 2597 default: 2598 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2599 r_level = "RAIDX"; 2600 break; 2601 } 2602 2603 if (!ioc->hide_ir_msg) 2604 sdev_printk(KERN_INFO, sdev, 2605 "%s: handle(0x%04x), wwid(0x%016llx)," 2606 " pd_count(%d), type(%s)\n", 2607 r_level, raid_device->handle, 2608 (unsigned long long)raid_device->wwid, 2609 raid_device->num_pds, ds); 2610 2611 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { 2612 blk_queue_max_hw_sectors(sdev->request_queue, 2613 MPT3SAS_RAID_MAX_SECTORS); 2614 sdev_printk(KERN_INFO, sdev, 2615 "Set queue's max_sector to: %u\n", 2616 MPT3SAS_RAID_MAX_SECTORS); 2617 } 2618 2619 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2620 2621 /* raid transport support */ 2622 if (!ioc->is_warpdrive) 2623 _scsih_set_level(ioc, sdev, raid_device->volume_type); 2624 return 0; 2625 } 2626 2627 /* non-raid handling */ 2628 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { 2629 if (mpt3sas_config_get_volume_handle(ioc, handle, 2630 &volume_handle)) { 2631 dfailprintk(ioc, 2632 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2633 __FILE__, __LINE__, __func__)); 2634 return 1; 2635 } 2636 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, 2637 volume_handle, &volume_wwid)) { 2638 dfailprintk(ioc, 2639 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2640 __FILE__, __LINE__, __func__)); 2641 return 1; 2642 } 2643 } 2644 2645 /* PCIe handling */ 2646 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 2647 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2648 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, 2649 sas_device_priv_data->sas_target->sas_address); 2650 if (!pcie_device) { 2651 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2652 dfailprintk(ioc, 2653 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2654 __FILE__, __LINE__, __func__)); 2655 return 1; 2656 } 2657 2658 qdepth = MPT3SAS_NVME_QUEUE_DEPTH; 2659 ds = "NVMe"; 2660 sdev_printk(KERN_INFO, sdev, 2661 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n", 2662 ds, handle, (unsigned long long)pcie_device->wwid, 2663 pcie_device->port_num); 2664 if (pcie_device->enclosure_handle != 0) 2665 sdev_printk(KERN_INFO, sdev, 2666 "%s: enclosure logical id(0x%016llx), slot(%d)\n", 2667 ds, 2668 (unsigned long long)pcie_device->enclosure_logical_id, 2669 pcie_device->slot); 2670 if (pcie_device->connector_name[0] != '\0') 2671 sdev_printk(KERN_INFO, sdev, 2672 "%s: enclosure level(0x%04x)," 2673 "connector name( %s)\n", ds, 2674 pcie_device->enclosure_level, 2675 pcie_device->connector_name); 2676 2677 if (pcie_device->nvme_mdts) 2678 blk_queue_max_hw_sectors(sdev->request_queue, 2679 pcie_device->nvme_mdts/512); 2680 2681 pcie_device_put(pcie_device); 2682 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2683 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2684 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be 2685 ** merged and can eliminate holes created during merging 2686 ** operation. 2687 **/ 2688 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, 2689 sdev->request_queue); 2690 blk_queue_virt_boundary(sdev->request_queue, 2691 ioc->page_size - 1); 2692 return 0; 2693 } 2694 2695 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2696 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 2697 sas_device_priv_data->sas_target->sas_address, 2698 sas_device_priv_data->sas_target->port); 2699 if (!sas_device) { 2700 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2701 dfailprintk(ioc, 2702 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2703 __FILE__, __LINE__, __func__)); 2704 return 1; 2705 } 2706 2707 sas_device->volume_handle = volume_handle; 2708 sas_device->volume_wwid = volume_wwid; 2709 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2710 qdepth = MPT3SAS_SAS_QUEUE_DEPTH; 2711 ssp_target = 1; 2712 if (sas_device->device_info & 2713 MPI2_SAS_DEVICE_INFO_SEP) { 2714 sdev_printk(KERN_WARNING, sdev, 2715 "set ignore_delay_remove for handle(0x%04x)\n", 2716 sas_device_priv_data->sas_target->handle); 2717 sas_device_priv_data->ignore_delay_remove = 1; 2718 ds = "SES"; 2719 } else 2720 ds = "SSP"; 2721 } else { 2722 qdepth = MPT3SAS_SATA_QUEUE_DEPTH; 2723 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) 2724 ds = "STP"; 2725 else if (sas_device->device_info & 2726 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 2727 ds = "SATA"; 2728 } 2729 2730 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \ 2731 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", 2732 ds, handle, (unsigned long long)sas_device->sas_address, 2733 sas_device->phy, (unsigned long long)sas_device->device_name); 2734 2735 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); 2736 2737 sas_device_put(sas_device); 2738 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2739 2740 if (!ssp_target) 2741 _scsih_display_sata_capabilities(ioc, handle, sdev); 2742 2743 2744 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 2745 2746 if (ssp_target) { 2747 sas_read_port_mode_page(sdev); 2748 _scsih_enable_tlr(ioc, sdev); 2749 } 2750 2751 return 0; 2752 } 2753 2754 /** 2755 * scsih_bios_param - fetch head, sector, cylinder info for a disk 2756 * @sdev: scsi device struct 2757 * @bdev: pointer to block device context 2758 * @capacity: device size (in 512 byte sectors) 2759 * @params: three element array to place output: 2760 * params[0] number of heads (max 255) 2761 * params[1] number of sectors (max 63) 2762 * params[2] number of cylinders 2763 */ 2764 static int 2765 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2766 sector_t capacity, int params[]) 2767 { 2768 int heads; 2769 int sectors; 2770 sector_t cylinders; 2771 ulong dummy; 2772 2773 heads = 64; 2774 sectors = 32; 2775 2776 dummy = heads * sectors; 2777 cylinders = capacity; 2778 sector_div(cylinders, dummy); 2779 2780 /* 2781 * Handle extended translation size for logical drives 2782 * > 1Gb 2783 */ 2784 if ((ulong)capacity >= 0x200000) { 2785 heads = 255; 2786 sectors = 63; 2787 dummy = heads * sectors; 2788 cylinders = capacity; 2789 sector_div(cylinders, dummy); 2790 } 2791 2792 /* return result */ 2793 params[0] = heads; 2794 params[1] = sectors; 2795 params[2] = cylinders; 2796 2797 return 0; 2798 } 2799 2800 /** 2801 * _scsih_response_code - translation of device response code 2802 * @ioc: per adapter object 2803 * @response_code: response code returned by the device 2804 */ 2805 static void 2806 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) 2807 { 2808 char *desc; 2809 2810 switch (response_code) { 2811 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 2812 desc = "task management request completed"; 2813 break; 2814 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 2815 desc = "invalid frame"; 2816 break; 2817 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 2818 desc = "task management request not supported"; 2819 break; 2820 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 2821 desc = "task management request failed"; 2822 break; 2823 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 2824 desc = "task management request succeeded"; 2825 break; 2826 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 2827 desc = "invalid lun"; 2828 break; 2829 case 0xA: 2830 desc = "overlapped tag attempted"; 2831 break; 2832 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 2833 desc = "task queued, however not sent to target"; 2834 break; 2835 default: 2836 desc = "unknown"; 2837 break; 2838 } 2839 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc); 2840 } 2841 2842 /** 2843 * _scsih_tm_done - tm completion routine 2844 * @ioc: per adapter object 2845 * @smid: system request message index 2846 * @msix_index: MSIX table index supplied by the OS 2847 * @reply: reply message frame(lower 32bit addr) 2848 * Context: none. 2849 * 2850 * The callback handler when using scsih_issue_tm. 2851 * 2852 * Return: 1 meaning mf should be freed from _base_interrupt 2853 * 0 means the mf is freed from this function. 2854 */ 2855 static u8 2856 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 2857 { 2858 MPI2DefaultReply_t *mpi_reply; 2859 2860 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED) 2861 return 1; 2862 if (ioc->tm_cmds.smid != smid) 2863 return 1; 2864 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; 2865 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 2866 if (mpi_reply) { 2867 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 2868 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID; 2869 } 2870 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING; 2871 complete(&ioc->tm_cmds.done); 2872 return 1; 2873 } 2874 2875 /** 2876 * mpt3sas_scsih_set_tm_flag - set per target tm_busy 2877 * @ioc: per adapter object 2878 * @handle: device handle 2879 * 2880 * During taskmangement request, we need to freeze the device queue. 2881 */ 2882 void 2883 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2884 { 2885 struct MPT3SAS_DEVICE *sas_device_priv_data; 2886 struct scsi_device *sdev; 2887 u8 skip = 0; 2888 2889 shost_for_each_device(sdev, ioc->shost) { 2890 if (skip) 2891 continue; 2892 sas_device_priv_data = sdev->hostdata; 2893 if (!sas_device_priv_data) 2894 continue; 2895 if (sas_device_priv_data->sas_target->handle == handle) { 2896 sas_device_priv_data->sas_target->tm_busy = 1; 2897 skip = 1; 2898 ioc->ignore_loginfos = 1; 2899 } 2900 } 2901 } 2902 2903 /** 2904 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy 2905 * @ioc: per adapter object 2906 * @handle: device handle 2907 * 2908 * During taskmangement request, we need to freeze the device queue. 2909 */ 2910 void 2911 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2912 { 2913 struct MPT3SAS_DEVICE *sas_device_priv_data; 2914 struct scsi_device *sdev; 2915 u8 skip = 0; 2916 2917 shost_for_each_device(sdev, ioc->shost) { 2918 if (skip) 2919 continue; 2920 sas_device_priv_data = sdev->hostdata; 2921 if (!sas_device_priv_data) 2922 continue; 2923 if (sas_device_priv_data->sas_target->handle == handle) { 2924 sas_device_priv_data->sas_target->tm_busy = 0; 2925 skip = 1; 2926 ioc->ignore_loginfos = 0; 2927 } 2928 } 2929 } 2930 2931 /** 2932 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status 2933 * @ioc: per adapter object 2934 * @channel: the channel assigned by the OS 2935 * @id: the id assigned by the OS 2936 * @lun: lun number 2937 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2938 * @smid_task: smid assigned to the task 2939 * 2940 * Look whether TM has aborted the timed out SCSI command, if 2941 * TM has aborted the IO then return SUCCESS else return FAILED. 2942 */ 2943 static int 2944 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel, 2945 uint id, uint lun, u8 type, u16 smid_task) 2946 { 2947 2948 if (smid_task <= ioc->shost->can_queue) { 2949 switch (type) { 2950 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 2951 if (!(_scsih_scsi_lookup_find_by_target(ioc, 2952 id, channel))) 2953 return SUCCESS; 2954 break; 2955 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 2956 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 2957 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id, 2958 lun, channel))) 2959 return SUCCESS; 2960 break; 2961 default: 2962 return SUCCESS; 2963 } 2964 } else if (smid_task == ioc->scsih_cmds.smid) { 2965 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) || 2966 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED)) 2967 return SUCCESS; 2968 } else if (smid_task == ioc->ctl_cmds.smid) { 2969 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) || 2970 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED)) 2971 return SUCCESS; 2972 } 2973 2974 return FAILED; 2975 } 2976 2977 /** 2978 * scsih_tm_post_processing - post processing of target & LUN reset 2979 * @ioc: per adapter object 2980 * @handle: device handle 2981 * @channel: the channel assigned by the OS 2982 * @id: the id assigned by the OS 2983 * @lun: lun number 2984 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2985 * @smid_task: smid assigned to the task 2986 * 2987 * Post processing of target & LUN reset. Due to interrupt latency 2988 * issue it possible that interrupt for aborted IO might not be 2989 * received yet. So before returning failure status, poll the 2990 * reply descriptor pools for the reply of timed out SCSI command. 2991 * Return FAILED status if reply for timed out is not received 2992 * otherwise return SUCCESS. 2993 */ 2994 static int 2995 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle, 2996 uint channel, uint id, uint lun, u8 type, u16 smid_task) 2997 { 2998 int rc; 2999 3000 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); 3001 if (rc == SUCCESS) 3002 return rc; 3003 3004 ioc_info(ioc, 3005 "Poll ReplyDescriptor queues for completion of" 3006 " smid(%d), task_type(0x%02x), handle(0x%04x)\n", 3007 smid_task, type, handle); 3008 3009 /* 3010 * Due to interrupt latency issues, driver may receive interrupt for 3011 * TM first and then for aborted SCSI IO command. So, poll all the 3012 * ReplyDescriptor pools before returning the FAILED status to SML. 3013 */ 3014 mpt3sas_base_mask_interrupts(ioc); 3015 mpt3sas_base_sync_reply_irqs(ioc, 1); 3016 mpt3sas_base_unmask_interrupts(ioc); 3017 3018 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); 3019 } 3020 3021 /** 3022 * mpt3sas_scsih_issue_tm - main routine for sending tm requests 3023 * @ioc: per adapter struct 3024 * @handle: device handle 3025 * @channel: the channel assigned by the OS 3026 * @id: the id assigned by the OS 3027 * @lun: lun number 3028 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 3029 * @smid_task: smid assigned to the task 3030 * @msix_task: MSIX table index supplied by the OS 3031 * @timeout: timeout in seconds 3032 * @tr_method: Target Reset Method 3033 * Context: user 3034 * 3035 * A generic API for sending task management requests to firmware. 3036 * 3037 * The callback index is set inside `ioc->tm_cb_idx`. 3038 * The caller is responsible to check for outstanding commands. 3039 * 3040 * Return: SUCCESS or FAILED. 3041 */ 3042 int 3043 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, 3044 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task, 3045 u8 timeout, u8 tr_method) 3046 { 3047 Mpi2SCSITaskManagementRequest_t *mpi_request; 3048 Mpi2SCSITaskManagementReply_t *mpi_reply; 3049 Mpi25SCSIIORequest_t *request; 3050 u16 smid = 0; 3051 u32 ioc_state; 3052 int rc; 3053 u8 issue_reset = 0; 3054 3055 lockdep_assert_held(&ioc->tm_cmds.mutex); 3056 3057 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { 3058 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__); 3059 return FAILED; 3060 } 3061 3062 if (ioc->shost_recovery || ioc->remove_host || 3063 ioc->pci_error_recovery) { 3064 ioc_info(ioc, "%s: host reset in progress!\n", __func__); 3065 return FAILED; 3066 } 3067 3068 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 3069 if (ioc_state & MPI2_DOORBELL_USED) { 3070 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); 3071 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3072 return (!rc) ? SUCCESS : FAILED; 3073 } 3074 3075 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 3076 mpt3sas_print_fault_code(ioc, ioc_state & 3077 MPI2_DOORBELL_DATA_MASK); 3078 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3079 return (!rc) ? SUCCESS : FAILED; 3080 } else if ((ioc_state & MPI2_IOC_STATE_MASK) == 3081 MPI2_IOC_STATE_COREDUMP) { 3082 mpt3sas_print_coredump_info(ioc, ioc_state & 3083 MPI2_DOORBELL_DATA_MASK); 3084 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3085 return (!rc) ? SUCCESS : FAILED; 3086 } 3087 3088 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); 3089 if (!smid) { 3090 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 3091 return FAILED; 3092 } 3093 3094 dtmprintk(ioc, 3095 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n", 3096 handle, type, smid_task, timeout, tr_method)); 3097 ioc->tm_cmds.status = MPT3_CMD_PENDING; 3098 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3099 ioc->tm_cmds.smid = smid; 3100 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 3101 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); 3102 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3103 mpi_request->DevHandle = cpu_to_le16(handle); 3104 mpi_request->TaskType = type; 3105 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 3106 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3107 mpi_request->MsgFlags = tr_method; 3108 mpi_request->TaskMID = cpu_to_le16(smid_task); 3109 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); 3110 mpt3sas_scsih_set_tm_flag(ioc, handle); 3111 init_completion(&ioc->tm_cmds.done); 3112 ioc->put_smid_hi_priority(ioc, smid, msix_task); 3113 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); 3114 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { 3115 mpt3sas_check_cmd_timeout(ioc, 3116 ioc->tm_cmds.status, mpi_request, 3117 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset); 3118 if (issue_reset) { 3119 rc = mpt3sas_base_hard_reset_handler(ioc, 3120 FORCE_BIG_HAMMER); 3121 rc = (!rc) ? SUCCESS : FAILED; 3122 goto out; 3123 } 3124 } 3125 3126 /* sync IRQs in case those were busy during flush. */ 3127 mpt3sas_base_sync_reply_irqs(ioc, 0); 3128 3129 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { 3130 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 3131 mpi_reply = ioc->tm_cmds.reply; 3132 dtmprintk(ioc, 3133 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", 3134 le16_to_cpu(mpi_reply->IOCStatus), 3135 le32_to_cpu(mpi_reply->IOCLogInfo), 3136 le32_to_cpu(mpi_reply->TerminationCount))); 3137 if (ioc->logging_level & MPT_DEBUG_TM) { 3138 _scsih_response_code(ioc, mpi_reply->ResponseCode); 3139 if (mpi_reply->IOCStatus) 3140 _debug_dump_mf(mpi_request, 3141 sizeof(Mpi2SCSITaskManagementRequest_t)/4); 3142 } 3143 } 3144 3145 switch (type) { 3146 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: 3147 rc = SUCCESS; 3148 /* 3149 * If DevHandle filed in smid_task's entry of request pool 3150 * doesn't match with device handle on which this task abort 3151 * TM is received then it means that TM has successfully 3152 * aborted the timed out command. Since smid_task's entry in 3153 * request pool will be memset to zero once the timed out 3154 * command is returned to the SML. If the command is not 3155 * aborted then smid_task’s entry won’t be cleared and it 3156 * will have same DevHandle value on which this task abort TM 3157 * is received and driver will return the TM status as FAILED. 3158 */ 3159 request = mpt3sas_base_get_msg_frame(ioc, smid_task); 3160 if (le16_to_cpu(request->DevHandle) != handle) 3161 break; 3162 3163 ioc_info(ioc, "Task abort tm failed: handle(0x%04x)," 3164 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n", 3165 handle, timeout, tr_method, smid_task, msix_task); 3166 rc = FAILED; 3167 break; 3168 3169 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3170 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: 3171 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3172 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun, 3173 type, smid_task); 3174 break; 3175 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: 3176 rc = SUCCESS; 3177 break; 3178 default: 3179 rc = FAILED; 3180 break; 3181 } 3182 3183 out: 3184 mpt3sas_scsih_clear_tm_flag(ioc, handle); 3185 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 3186 return rc; 3187 } 3188 3189 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 3190 uint channel, uint id, u64 lun, u8 type, u16 smid_task, 3191 u16 msix_task, u8 timeout, u8 tr_method) 3192 { 3193 int ret; 3194 3195 mutex_lock(&ioc->tm_cmds.mutex); 3196 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type, 3197 smid_task, msix_task, timeout, tr_method); 3198 mutex_unlock(&ioc->tm_cmds.mutex); 3199 3200 return ret; 3201 } 3202 3203 /** 3204 * _scsih_tm_display_info - displays info about the device 3205 * @ioc: per adapter struct 3206 * @scmd: pointer to scsi command object 3207 * 3208 * Called by task management callback handlers. 3209 */ 3210 static void 3211 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) 3212 { 3213 struct scsi_target *starget = scmd->device->sdev_target; 3214 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 3215 struct _sas_device *sas_device = NULL; 3216 struct _pcie_device *pcie_device = NULL; 3217 unsigned long flags; 3218 char *device_str = NULL; 3219 3220 if (!priv_target) 3221 return; 3222 if (ioc->hide_ir_msg) 3223 device_str = "WarpDrive"; 3224 else 3225 device_str = "volume"; 3226 3227 scsi_print_command(scmd); 3228 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 3229 starget_printk(KERN_INFO, starget, 3230 "%s handle(0x%04x), %s wwid(0x%016llx)\n", 3231 device_str, priv_target->handle, 3232 device_str, (unsigned long long)priv_target->sas_address); 3233 3234 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 3235 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 3236 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target); 3237 if (pcie_device) { 3238 starget_printk(KERN_INFO, starget, 3239 "handle(0x%04x), wwid(0x%016llx), port(%d)\n", 3240 pcie_device->handle, 3241 (unsigned long long)pcie_device->wwid, 3242 pcie_device->port_num); 3243 if (pcie_device->enclosure_handle != 0) 3244 starget_printk(KERN_INFO, starget, 3245 "enclosure logical id(0x%016llx), slot(%d)\n", 3246 (unsigned long long) 3247 pcie_device->enclosure_logical_id, 3248 pcie_device->slot); 3249 if (pcie_device->connector_name[0] != '\0') 3250 starget_printk(KERN_INFO, starget, 3251 "enclosure level(0x%04x), connector name( %s)\n", 3252 pcie_device->enclosure_level, 3253 pcie_device->connector_name); 3254 pcie_device_put(pcie_device); 3255 } 3256 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 3257 3258 } else { 3259 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3260 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target); 3261 if (sas_device) { 3262 if (priv_target->flags & 3263 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3264 starget_printk(KERN_INFO, starget, 3265 "volume handle(0x%04x), " 3266 "volume wwid(0x%016llx)\n", 3267 sas_device->volume_handle, 3268 (unsigned long long)sas_device->volume_wwid); 3269 } 3270 starget_printk(KERN_INFO, starget, 3271 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", 3272 sas_device->handle, 3273 (unsigned long long)sas_device->sas_address, 3274 sas_device->phy); 3275 3276 _scsih_display_enclosure_chassis_info(NULL, sas_device, 3277 NULL, starget); 3278 3279 sas_device_put(sas_device); 3280 } 3281 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3282 } 3283 } 3284 3285 /** 3286 * scsih_abort - eh threads main abort routine 3287 * @scmd: pointer to scsi command object 3288 * 3289 * Return: SUCCESS if command aborted else FAILED 3290 */ 3291 static int 3292 scsih_abort(struct scsi_cmnd *scmd) 3293 { 3294 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3295 struct MPT3SAS_DEVICE *sas_device_priv_data; 3296 struct scsiio_tracker *st = scsi_cmd_priv(scmd); 3297 u16 handle; 3298 int r; 3299 3300 u8 timeout = 30; 3301 struct _pcie_device *pcie_device = NULL; 3302 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!" 3303 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n", 3304 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), 3305 (scmd->request->timeout / HZ) * 1000); 3306 _scsih_tm_display_info(ioc, scmd); 3307 3308 sas_device_priv_data = scmd->device->hostdata; 3309 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3310 ioc->remove_host) { 3311 sdev_printk(KERN_INFO, scmd->device, 3312 "device been deleted! scmd(0x%p)\n", scmd); 3313 scmd->result = DID_NO_CONNECT << 16; 3314 scmd->scsi_done(scmd); 3315 r = SUCCESS; 3316 goto out; 3317 } 3318 3319 /* check for completed command */ 3320 if (st == NULL || st->cb_idx == 0xFF) { 3321 sdev_printk(KERN_INFO, scmd->device, "No reference found at " 3322 "driver, assuming scmd(0x%p) might have completed\n", scmd); 3323 scmd->result = DID_RESET << 16; 3324 r = SUCCESS; 3325 goto out; 3326 } 3327 3328 /* for hidden raid components and volumes this is not supported */ 3329 if (sas_device_priv_data->sas_target->flags & 3330 MPT_TARGET_FLAGS_RAID_COMPONENT || 3331 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { 3332 scmd->result = DID_RESET << 16; 3333 r = FAILED; 3334 goto out; 3335 } 3336 3337 mpt3sas_halt_firmware(ioc); 3338 3339 handle = sas_device_priv_data->sas_target->handle; 3340 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3341 if (pcie_device && (!ioc->tm_custom_handling) && 3342 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) 3343 timeout = ioc->nvme_abort_timeout; 3344 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3345 scmd->device->id, scmd->device->lun, 3346 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 3347 st->smid, st->msix_io, timeout, 0); 3348 /* Command must be cleared after abort */ 3349 if (r == SUCCESS && st->cb_idx != 0xFF) 3350 r = FAILED; 3351 out: 3352 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n", 3353 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3354 if (pcie_device) 3355 pcie_device_put(pcie_device); 3356 return r; 3357 } 3358 3359 /** 3360 * scsih_dev_reset - eh threads main device reset routine 3361 * @scmd: pointer to scsi command object 3362 * 3363 * Return: SUCCESS if command aborted else FAILED 3364 */ 3365 static int 3366 scsih_dev_reset(struct scsi_cmnd *scmd) 3367 { 3368 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3369 struct MPT3SAS_DEVICE *sas_device_priv_data; 3370 struct _sas_device *sas_device = NULL; 3371 struct _pcie_device *pcie_device = NULL; 3372 u16 handle; 3373 u8 tr_method = 0; 3374 u8 tr_timeout = 30; 3375 int r; 3376 3377 struct scsi_target *starget = scmd->device->sdev_target; 3378 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 3379 3380 sdev_printk(KERN_INFO, scmd->device, 3381 "attempting device reset! scmd(0x%p)\n", scmd); 3382 _scsih_tm_display_info(ioc, scmd); 3383 3384 sas_device_priv_data = scmd->device->hostdata; 3385 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3386 ioc->remove_host) { 3387 sdev_printk(KERN_INFO, scmd->device, 3388 "device been deleted! scmd(0x%p)\n", scmd); 3389 scmd->result = DID_NO_CONNECT << 16; 3390 scmd->scsi_done(scmd); 3391 r = SUCCESS; 3392 goto out; 3393 } 3394 3395 /* for hidden raid components obtain the volume_handle */ 3396 handle = 0; 3397 if (sas_device_priv_data->sas_target->flags & 3398 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3399 sas_device = mpt3sas_get_sdev_from_target(ioc, 3400 target_priv_data); 3401 if (sas_device) 3402 handle = sas_device->volume_handle; 3403 } else 3404 handle = sas_device_priv_data->sas_target->handle; 3405 3406 if (!handle) { 3407 scmd->result = DID_RESET << 16; 3408 r = FAILED; 3409 goto out; 3410 } 3411 3412 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3413 3414 if (pcie_device && (!ioc->tm_custom_handling) && 3415 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { 3416 tr_timeout = pcie_device->reset_timeout; 3417 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3418 } else 3419 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3420 3421 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3422 scmd->device->id, scmd->device->lun, 3423 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 3424 tr_timeout, tr_method); 3425 /* Check for busy commands after reset */ 3426 if (r == SUCCESS && atomic_read(&scmd->device->device_busy)) 3427 r = FAILED; 3428 out: 3429 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n", 3430 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3431 3432 if (sas_device) 3433 sas_device_put(sas_device); 3434 if (pcie_device) 3435 pcie_device_put(pcie_device); 3436 3437 return r; 3438 } 3439 3440 /** 3441 * scsih_target_reset - eh threads main target reset routine 3442 * @scmd: pointer to scsi command object 3443 * 3444 * Return: SUCCESS if command aborted else FAILED 3445 */ 3446 static int 3447 scsih_target_reset(struct scsi_cmnd *scmd) 3448 { 3449 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3450 struct MPT3SAS_DEVICE *sas_device_priv_data; 3451 struct _sas_device *sas_device = NULL; 3452 struct _pcie_device *pcie_device = NULL; 3453 u16 handle; 3454 u8 tr_method = 0; 3455 u8 tr_timeout = 30; 3456 int r; 3457 struct scsi_target *starget = scmd->device->sdev_target; 3458 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 3459 3460 starget_printk(KERN_INFO, starget, 3461 "attempting target reset! scmd(0x%p)\n", scmd); 3462 _scsih_tm_display_info(ioc, scmd); 3463 3464 sas_device_priv_data = scmd->device->hostdata; 3465 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 3466 ioc->remove_host) { 3467 starget_printk(KERN_INFO, starget, 3468 "target been deleted! scmd(0x%p)\n", scmd); 3469 scmd->result = DID_NO_CONNECT << 16; 3470 scmd->scsi_done(scmd); 3471 r = SUCCESS; 3472 goto out; 3473 } 3474 3475 /* for hidden raid components obtain the volume_handle */ 3476 handle = 0; 3477 if (sas_device_priv_data->sas_target->flags & 3478 MPT_TARGET_FLAGS_RAID_COMPONENT) { 3479 sas_device = mpt3sas_get_sdev_from_target(ioc, 3480 target_priv_data); 3481 if (sas_device) 3482 handle = sas_device->volume_handle; 3483 } else 3484 handle = sas_device_priv_data->sas_target->handle; 3485 3486 if (!handle) { 3487 scmd->result = DID_RESET << 16; 3488 r = FAILED; 3489 goto out; 3490 } 3491 3492 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3493 3494 if (pcie_device && (!ioc->tm_custom_handling) && 3495 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { 3496 tr_timeout = pcie_device->reset_timeout; 3497 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3498 } else 3499 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3500 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel, 3501 scmd->device->id, 0, 3502 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 3503 tr_timeout, tr_method); 3504 /* Check for busy commands after reset */ 3505 if (r == SUCCESS && atomic_read(&starget->target_busy)) 3506 r = FAILED; 3507 out: 3508 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n", 3509 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3510 3511 if (sas_device) 3512 sas_device_put(sas_device); 3513 if (pcie_device) 3514 pcie_device_put(pcie_device); 3515 return r; 3516 } 3517 3518 3519 /** 3520 * scsih_host_reset - eh threads main host reset routine 3521 * @scmd: pointer to scsi command object 3522 * 3523 * Return: SUCCESS if command aborted else FAILED 3524 */ 3525 static int 3526 scsih_host_reset(struct scsi_cmnd *scmd) 3527 { 3528 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3529 int r, retval; 3530 3531 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd); 3532 scsi_print_command(scmd); 3533 3534 if (ioc->is_driver_loading || ioc->remove_host) { 3535 ioc_info(ioc, "Blocking the host reset\n"); 3536 r = FAILED; 3537 goto out; 3538 } 3539 3540 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3541 r = (retval < 0) ? FAILED : SUCCESS; 3542 out: 3543 ioc_info(ioc, "host reset: %s scmd(0x%p)\n", 3544 r == SUCCESS ? "SUCCESS" : "FAILED", scmd); 3545 3546 return r; 3547 } 3548 3549 /** 3550 * _scsih_fw_event_add - insert and queue up fw_event 3551 * @ioc: per adapter object 3552 * @fw_event: object describing the event 3553 * Context: This function will acquire ioc->fw_event_lock. 3554 * 3555 * This adds the firmware event object into link list, then queues it up to 3556 * be processed from user context. 3557 */ 3558 static void 3559 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) 3560 { 3561 unsigned long flags; 3562 3563 if (ioc->firmware_event_thread == NULL) 3564 return; 3565 3566 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3567 fw_event_work_get(fw_event); 3568 INIT_LIST_HEAD(&fw_event->list); 3569 list_add_tail(&fw_event->list, &ioc->fw_event_list); 3570 INIT_WORK(&fw_event->work, _firmware_event_work); 3571 fw_event_work_get(fw_event); 3572 queue_work(ioc->firmware_event_thread, &fw_event->work); 3573 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3574 } 3575 3576 /** 3577 * _scsih_fw_event_del_from_list - delete fw_event from the list 3578 * @ioc: per adapter object 3579 * @fw_event: object describing the event 3580 * Context: This function will acquire ioc->fw_event_lock. 3581 * 3582 * If the fw_event is on the fw_event_list, remove it and do a put. 3583 */ 3584 static void 3585 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work 3586 *fw_event) 3587 { 3588 unsigned long flags; 3589 3590 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3591 if (!list_empty(&fw_event->list)) { 3592 list_del_init(&fw_event->list); 3593 fw_event_work_put(fw_event); 3594 } 3595 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3596 } 3597 3598 3599 /** 3600 * mpt3sas_send_trigger_data_event - send event for processing trigger data 3601 * @ioc: per adapter object 3602 * @event_data: trigger event data 3603 */ 3604 void 3605 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, 3606 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) 3607 { 3608 struct fw_event_work *fw_event; 3609 u16 sz; 3610 3611 if (ioc->is_driver_loading) 3612 return; 3613 sz = sizeof(*event_data); 3614 fw_event = alloc_fw_event_work(sz); 3615 if (!fw_event) 3616 return; 3617 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; 3618 fw_event->ioc = ioc; 3619 memcpy(fw_event->event_data, event_data, sizeof(*event_data)); 3620 _scsih_fw_event_add(ioc, fw_event); 3621 fw_event_work_put(fw_event); 3622 } 3623 3624 /** 3625 * _scsih_error_recovery_delete_devices - remove devices not responding 3626 * @ioc: per adapter object 3627 */ 3628 static void 3629 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) 3630 { 3631 struct fw_event_work *fw_event; 3632 3633 if (ioc->is_driver_loading) 3634 return; 3635 fw_event = alloc_fw_event_work(0); 3636 if (!fw_event) 3637 return; 3638 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; 3639 fw_event->ioc = ioc; 3640 _scsih_fw_event_add(ioc, fw_event); 3641 fw_event_work_put(fw_event); 3642 } 3643 3644 /** 3645 * mpt3sas_port_enable_complete - port enable completed (fake event) 3646 * @ioc: per adapter object 3647 */ 3648 void 3649 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) 3650 { 3651 struct fw_event_work *fw_event; 3652 3653 fw_event = alloc_fw_event_work(0); 3654 if (!fw_event) 3655 return; 3656 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; 3657 fw_event->ioc = ioc; 3658 _scsih_fw_event_add(ioc, fw_event); 3659 fw_event_work_put(fw_event); 3660 } 3661 3662 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) 3663 { 3664 unsigned long flags; 3665 struct fw_event_work *fw_event = NULL; 3666 3667 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3668 if (!list_empty(&ioc->fw_event_list)) { 3669 fw_event = list_first_entry(&ioc->fw_event_list, 3670 struct fw_event_work, list); 3671 list_del_init(&fw_event->list); 3672 } 3673 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3674 3675 return fw_event; 3676 } 3677 3678 /** 3679 * _scsih_fw_event_cleanup_queue - cleanup event queue 3680 * @ioc: per adapter object 3681 * 3682 * Walk the firmware event queue, either killing timers, or waiting 3683 * for outstanding events to complete 3684 * 3685 * Context: task, can sleep 3686 */ 3687 static void 3688 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) 3689 { 3690 struct fw_event_work *fw_event; 3691 3692 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) || 3693 !ioc->firmware_event_thread) 3694 return; 3695 3696 ioc->fw_events_cleanup = 1; 3697 while ((fw_event = dequeue_next_fw_event(ioc)) || 3698 (fw_event = ioc->current_event)) { 3699 /* 3700 * Wait on the fw_event to complete. If this returns 1, then 3701 * the event was never executed, and we need a put for the 3702 * reference the work had on the fw_event. 3703 * 3704 * If it did execute, we wait for it to finish, and the put will 3705 * happen from _firmware_event_work() 3706 */ 3707 if (cancel_work_sync(&fw_event->work)) 3708 fw_event_work_put(fw_event); 3709 3710 fw_event_work_put(fw_event); 3711 } 3712 ioc->fw_events_cleanup = 0; 3713 } 3714 3715 /** 3716 * _scsih_internal_device_block - block the sdev device 3717 * @sdev: per device object 3718 * @sas_device_priv_data : per device driver private data 3719 * 3720 * make sure device is blocked without error, if not 3721 * print an error 3722 */ 3723 static void 3724 _scsih_internal_device_block(struct scsi_device *sdev, 3725 struct MPT3SAS_DEVICE *sas_device_priv_data) 3726 { 3727 int r = 0; 3728 3729 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", 3730 sas_device_priv_data->sas_target->handle); 3731 sas_device_priv_data->block = 1; 3732 3733 r = scsi_internal_device_block_nowait(sdev); 3734 if (r == -EINVAL) 3735 sdev_printk(KERN_WARNING, sdev, 3736 "device_block failed with return(%d) for handle(0x%04x)\n", 3737 r, sas_device_priv_data->sas_target->handle); 3738 } 3739 3740 /** 3741 * _scsih_internal_device_unblock - unblock the sdev device 3742 * @sdev: per device object 3743 * @sas_device_priv_data : per device driver private data 3744 * make sure device is unblocked without error, if not retry 3745 * by blocking and then unblocking 3746 */ 3747 3748 static void 3749 _scsih_internal_device_unblock(struct scsi_device *sdev, 3750 struct MPT3SAS_DEVICE *sas_device_priv_data) 3751 { 3752 int r = 0; 3753 3754 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " 3755 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); 3756 sas_device_priv_data->block = 0; 3757 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 3758 if (r == -EINVAL) { 3759 /* The device has been set to SDEV_RUNNING by SD layer during 3760 * device addition but the request queue is still stopped by 3761 * our earlier block call. We need to perform a block again 3762 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */ 3763 3764 sdev_printk(KERN_WARNING, sdev, 3765 "device_unblock failed with return(%d) for handle(0x%04x) " 3766 "performing a block followed by an unblock\n", 3767 r, sas_device_priv_data->sas_target->handle); 3768 sas_device_priv_data->block = 1; 3769 r = scsi_internal_device_block_nowait(sdev); 3770 if (r) 3771 sdev_printk(KERN_WARNING, sdev, "retried device_block " 3772 "failed with return(%d) for handle(0x%04x)\n", 3773 r, sas_device_priv_data->sas_target->handle); 3774 3775 sas_device_priv_data->block = 0; 3776 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 3777 if (r) 3778 sdev_printk(KERN_WARNING, sdev, "retried device_unblock" 3779 " failed with return(%d) for handle(0x%04x)\n", 3780 r, sas_device_priv_data->sas_target->handle); 3781 } 3782 } 3783 3784 /** 3785 * _scsih_ublock_io_all_device - unblock every device 3786 * @ioc: per adapter object 3787 * 3788 * change the device state from block to running 3789 */ 3790 static void 3791 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3792 { 3793 struct MPT3SAS_DEVICE *sas_device_priv_data; 3794 struct scsi_device *sdev; 3795 3796 shost_for_each_device(sdev, ioc->shost) { 3797 sas_device_priv_data = sdev->hostdata; 3798 if (!sas_device_priv_data) 3799 continue; 3800 if (!sas_device_priv_data->block) 3801 continue; 3802 3803 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, 3804 "device_running, handle(0x%04x)\n", 3805 sas_device_priv_data->sas_target->handle)); 3806 _scsih_internal_device_unblock(sdev, sas_device_priv_data); 3807 } 3808 } 3809 3810 3811 /** 3812 * _scsih_ublock_io_device - prepare device to be deleted 3813 * @ioc: per adapter object 3814 * @sas_address: sas address 3815 * @port: hba port entry 3816 * 3817 * unblock then put device in offline state 3818 */ 3819 static void 3820 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, 3821 u64 sas_address, struct hba_port *port) 3822 { 3823 struct MPT3SAS_DEVICE *sas_device_priv_data; 3824 struct scsi_device *sdev; 3825 3826 shost_for_each_device(sdev, ioc->shost) { 3827 sas_device_priv_data = sdev->hostdata; 3828 if (!sas_device_priv_data) 3829 continue; 3830 if (sas_device_priv_data->sas_target->sas_address 3831 != sas_address) 3832 continue; 3833 if (sas_device_priv_data->sas_target->port != port) 3834 continue; 3835 if (sas_device_priv_data->block) 3836 _scsih_internal_device_unblock(sdev, 3837 sas_device_priv_data); 3838 } 3839 } 3840 3841 /** 3842 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK 3843 * @ioc: per adapter object 3844 * 3845 * During device pull we need to appropriately set the sdev state. 3846 */ 3847 static void 3848 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3849 { 3850 struct MPT3SAS_DEVICE *sas_device_priv_data; 3851 struct scsi_device *sdev; 3852 3853 shost_for_each_device(sdev, ioc->shost) { 3854 sas_device_priv_data = sdev->hostdata; 3855 if (!sas_device_priv_data) 3856 continue; 3857 if (sas_device_priv_data->block) 3858 continue; 3859 if (sas_device_priv_data->ignore_delay_remove) { 3860 sdev_printk(KERN_INFO, sdev, 3861 "%s skip device_block for SES handle(0x%04x)\n", 3862 __func__, sas_device_priv_data->sas_target->handle); 3863 continue; 3864 } 3865 _scsih_internal_device_block(sdev, sas_device_priv_data); 3866 } 3867 } 3868 3869 /** 3870 * _scsih_block_io_device - set the device state to SDEV_BLOCK 3871 * @ioc: per adapter object 3872 * @handle: device handle 3873 * 3874 * During device pull we need to appropriately set the sdev state. 3875 */ 3876 static void 3877 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 3878 { 3879 struct MPT3SAS_DEVICE *sas_device_priv_data; 3880 struct scsi_device *sdev; 3881 struct _sas_device *sas_device; 3882 3883 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 3884 3885 shost_for_each_device(sdev, ioc->shost) { 3886 sas_device_priv_data = sdev->hostdata; 3887 if (!sas_device_priv_data) 3888 continue; 3889 if (sas_device_priv_data->sas_target->handle != handle) 3890 continue; 3891 if (sas_device_priv_data->block) 3892 continue; 3893 if (sas_device && sas_device->pend_sas_rphy_add) 3894 continue; 3895 if (sas_device_priv_data->ignore_delay_remove) { 3896 sdev_printk(KERN_INFO, sdev, 3897 "%s skip device_block for SES handle(0x%04x)\n", 3898 __func__, sas_device_priv_data->sas_target->handle); 3899 continue; 3900 } 3901 _scsih_internal_device_block(sdev, sas_device_priv_data); 3902 } 3903 3904 if (sas_device) 3905 sas_device_put(sas_device); 3906 } 3907 3908 /** 3909 * _scsih_block_io_to_children_attached_to_ex 3910 * @ioc: per adapter object 3911 * @sas_expander: the sas_device object 3912 * 3913 * This routine set sdev state to SDEV_BLOCK for all devices 3914 * attached to this expander. This function called when expander is 3915 * pulled. 3916 */ 3917 static void 3918 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, 3919 struct _sas_node *sas_expander) 3920 { 3921 struct _sas_port *mpt3sas_port; 3922 struct _sas_device *sas_device; 3923 struct _sas_node *expander_sibling; 3924 unsigned long flags; 3925 3926 if (!sas_expander) 3927 return; 3928 3929 list_for_each_entry(mpt3sas_port, 3930 &sas_expander->sas_port_list, port_list) { 3931 if (mpt3sas_port->remote_identify.device_type == 3932 SAS_END_DEVICE) { 3933 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3934 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 3935 mpt3sas_port->remote_identify.sas_address, 3936 mpt3sas_port->hba_port); 3937 if (sas_device) { 3938 set_bit(sas_device->handle, 3939 ioc->blocking_handles); 3940 sas_device_put(sas_device); 3941 } 3942 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3943 } 3944 } 3945 3946 list_for_each_entry(mpt3sas_port, 3947 &sas_expander->sas_port_list, port_list) { 3948 3949 if (mpt3sas_port->remote_identify.device_type == 3950 SAS_EDGE_EXPANDER_DEVICE || 3951 mpt3sas_port->remote_identify.device_type == 3952 SAS_FANOUT_EXPANDER_DEVICE) { 3953 expander_sibling = 3954 mpt3sas_scsih_expander_find_by_sas_address( 3955 ioc, mpt3sas_port->remote_identify.sas_address, 3956 mpt3sas_port->hba_port); 3957 _scsih_block_io_to_children_attached_to_ex(ioc, 3958 expander_sibling); 3959 } 3960 } 3961 } 3962 3963 /** 3964 * _scsih_block_io_to_children_attached_directly 3965 * @ioc: per adapter object 3966 * @event_data: topology change event data 3967 * 3968 * This routine set sdev state to SDEV_BLOCK for all devices 3969 * direct attached during device pull. 3970 */ 3971 static void 3972 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, 3973 Mpi2EventDataSasTopologyChangeList_t *event_data) 3974 { 3975 int i; 3976 u16 handle; 3977 u16 reason_code; 3978 3979 for (i = 0; i < event_data->NumEntries; i++) { 3980 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 3981 if (!handle) 3982 continue; 3983 reason_code = event_data->PHY[i].PhyStatus & 3984 MPI2_EVENT_SAS_TOPO_RC_MASK; 3985 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) 3986 _scsih_block_io_device(ioc, handle); 3987 } 3988 } 3989 3990 /** 3991 * _scsih_block_io_to_pcie_children_attached_directly 3992 * @ioc: per adapter object 3993 * @event_data: topology change event data 3994 * 3995 * This routine set sdev state to SDEV_BLOCK for all devices 3996 * direct attached during device pull/reconnect. 3997 */ 3998 static void 3999 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, 4000 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 4001 { 4002 int i; 4003 u16 handle; 4004 u16 reason_code; 4005 4006 for (i = 0; i < event_data->NumEntries; i++) { 4007 handle = 4008 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 4009 if (!handle) 4010 continue; 4011 reason_code = event_data->PortEntry[i].PortStatus; 4012 if (reason_code == 4013 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING) 4014 _scsih_block_io_device(ioc, handle); 4015 } 4016 } 4017 /** 4018 * _scsih_tm_tr_send - send task management request 4019 * @ioc: per adapter object 4020 * @handle: device handle 4021 * Context: interrupt time. 4022 * 4023 * This code is to initiate the device removal handshake protocol 4024 * with controller firmware. This function will issue target reset 4025 * using high priority request queue. It will send a sas iounit 4026 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. 4027 * 4028 * This is designed to send muliple task management request at the same 4029 * time to the fifo. If the fifo is full, we will append the request, 4030 * and process it in a future completion. 4031 */ 4032 static void 4033 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4034 { 4035 Mpi2SCSITaskManagementRequest_t *mpi_request; 4036 u16 smid; 4037 struct _sas_device *sas_device = NULL; 4038 struct _pcie_device *pcie_device = NULL; 4039 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 4040 u64 sas_address = 0; 4041 unsigned long flags; 4042 struct _tr_list *delayed_tr; 4043 u32 ioc_state; 4044 u8 tr_method = 0; 4045 struct hba_port *port = NULL; 4046 4047 if (ioc->pci_error_recovery) { 4048 dewtprintk(ioc, 4049 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n", 4050 __func__, handle)); 4051 return; 4052 } 4053 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4054 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4055 dewtprintk(ioc, 4056 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n", 4057 __func__, handle)); 4058 return; 4059 } 4060 4061 /* if PD, then return */ 4062 if (test_bit(handle, ioc->pd_handles)) 4063 return; 4064 4065 clear_bit(handle, ioc->pend_os_device_add); 4066 4067 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4068 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 4069 if (sas_device && sas_device->starget && 4070 sas_device->starget->hostdata) { 4071 sas_target_priv_data = sas_device->starget->hostdata; 4072 sas_target_priv_data->deleted = 1; 4073 sas_address = sas_device->sas_address; 4074 port = sas_device->port; 4075 } 4076 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4077 if (!sas_device) { 4078 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 4079 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 4080 if (pcie_device && pcie_device->starget && 4081 pcie_device->starget->hostdata) { 4082 sas_target_priv_data = pcie_device->starget->hostdata; 4083 sas_target_priv_data->deleted = 1; 4084 sas_address = pcie_device->wwid; 4085 } 4086 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 4087 if (pcie_device && (!ioc->tm_custom_handling) && 4088 (!(mpt3sas_scsih_is_pcie_scsi_device( 4089 pcie_device->device_info)))) 4090 tr_method = 4091 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 4092 else 4093 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 4094 } 4095 if (sas_target_priv_data) { 4096 dewtprintk(ioc, 4097 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", 4098 handle, (u64)sas_address)); 4099 if (sas_device) { 4100 if (sas_device->enclosure_handle != 0) 4101 dewtprintk(ioc, 4102 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n", 4103 (u64)sas_device->enclosure_logical_id, 4104 sas_device->slot)); 4105 if (sas_device->connector_name[0] != '\0') 4106 dewtprintk(ioc, 4107 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n", 4108 sas_device->enclosure_level, 4109 sas_device->connector_name)); 4110 } else if (pcie_device) { 4111 if (pcie_device->enclosure_handle != 0) 4112 dewtprintk(ioc, 4113 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n", 4114 (u64)pcie_device->enclosure_logical_id, 4115 pcie_device->slot)); 4116 if (pcie_device->connector_name[0] != '\0') 4117 dewtprintk(ioc, 4118 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n", 4119 pcie_device->enclosure_level, 4120 pcie_device->connector_name)); 4121 } 4122 _scsih_ublock_io_device(ioc, sas_address, port); 4123 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 4124 } 4125 4126 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); 4127 if (!smid) { 4128 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4129 if (!delayed_tr) 4130 goto out; 4131 INIT_LIST_HEAD(&delayed_tr->list); 4132 delayed_tr->handle = handle; 4133 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 4134 dewtprintk(ioc, 4135 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4136 handle)); 4137 goto out; 4138 } 4139 4140 dewtprintk(ioc, 4141 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4142 handle, smid, ioc->tm_tr_cb_idx)); 4143 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4144 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 4145 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4146 mpi_request->DevHandle = cpu_to_le16(handle); 4147 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 4148 mpi_request->MsgFlags = tr_method; 4149 set_bit(handle, ioc->device_remove_in_progress); 4150 ioc->put_smid_hi_priority(ioc, smid, 0); 4151 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); 4152 4153 out: 4154 if (sas_device) 4155 sas_device_put(sas_device); 4156 if (pcie_device) 4157 pcie_device_put(pcie_device); 4158 } 4159 4160 /** 4161 * _scsih_tm_tr_complete - 4162 * @ioc: per adapter object 4163 * @smid: system request message index 4164 * @msix_index: MSIX table index supplied by the OS 4165 * @reply: reply message frame(lower 32bit addr) 4166 * Context: interrupt time. 4167 * 4168 * This is the target reset completion routine. 4169 * This code is part of the code to initiate the device removal 4170 * handshake protocol with controller firmware. 4171 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) 4172 * 4173 * Return: 1 meaning mf should be freed from _base_interrupt 4174 * 0 means the mf is freed from this function. 4175 */ 4176 static u8 4177 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 4178 u32 reply) 4179 { 4180 u16 handle; 4181 Mpi2SCSITaskManagementRequest_t *mpi_request_tm; 4182 Mpi2SCSITaskManagementReply_t *mpi_reply = 4183 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4184 Mpi2SasIoUnitControlRequest_t *mpi_request; 4185 u16 smid_sas_ctrl; 4186 u32 ioc_state; 4187 struct _sc_list *delayed_sc; 4188 4189 if (ioc->pci_error_recovery) { 4190 dewtprintk(ioc, 4191 ioc_info(ioc, "%s: host in pci error recovery\n", 4192 __func__)); 4193 return 1; 4194 } 4195 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4196 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4197 dewtprintk(ioc, 4198 ioc_info(ioc, "%s: host is not operational\n", 4199 __func__)); 4200 return 1; 4201 } 4202 if (unlikely(!mpi_reply)) { 4203 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4204 __FILE__, __LINE__, __func__); 4205 return 1; 4206 } 4207 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 4208 handle = le16_to_cpu(mpi_request_tm->DevHandle); 4209 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 4210 dewtprintk(ioc, 4211 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 4212 handle, 4213 le16_to_cpu(mpi_reply->DevHandle), smid)); 4214 return 0; 4215 } 4216 4217 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 4218 dewtprintk(ioc, 4219 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", 4220 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 4221 le32_to_cpu(mpi_reply->IOCLogInfo), 4222 le32_to_cpu(mpi_reply->TerminationCount))); 4223 4224 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); 4225 if (!smid_sas_ctrl) { 4226 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); 4227 if (!delayed_sc) 4228 return _scsih_check_for_pending_tm(ioc, smid); 4229 INIT_LIST_HEAD(&delayed_sc->list); 4230 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); 4231 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); 4232 dewtprintk(ioc, 4233 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n", 4234 handle)); 4235 return _scsih_check_for_pending_tm(ioc, smid); 4236 } 4237 4238 dewtprintk(ioc, 4239 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4240 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); 4241 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); 4242 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 4243 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 4244 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 4245 mpi_request->DevHandle = mpi_request_tm->DevHandle; 4246 ioc->put_smid_default(ioc, smid_sas_ctrl); 4247 4248 return _scsih_check_for_pending_tm(ioc, smid); 4249 } 4250 4251 /** _scsih_allow_scmd_to_device - check whether scmd needs to 4252 * issue to IOC or not. 4253 * @ioc: per adapter object 4254 * @scmd: pointer to scsi command object 4255 * 4256 * Returns true if scmd can be issued to IOC otherwise returns false. 4257 */ 4258 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc, 4259 struct scsi_cmnd *scmd) 4260 { 4261 4262 if (ioc->pci_error_recovery) 4263 return false; 4264 4265 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) { 4266 if (ioc->remove_host) 4267 return false; 4268 4269 return true; 4270 } 4271 4272 if (ioc->remove_host) { 4273 4274 switch (scmd->cmnd[0]) { 4275 case SYNCHRONIZE_CACHE: 4276 case START_STOP: 4277 return true; 4278 default: 4279 return false; 4280 } 4281 } 4282 4283 return true; 4284 } 4285 4286 /** 4287 * _scsih_sas_control_complete - completion routine 4288 * @ioc: per adapter object 4289 * @smid: system request message index 4290 * @msix_index: MSIX table index supplied by the OS 4291 * @reply: reply message frame(lower 32bit addr) 4292 * Context: interrupt time. 4293 * 4294 * This is the sas iounit control completion routine. 4295 * This code is part of the code to initiate the device removal 4296 * handshake protocol with controller firmware. 4297 * 4298 * Return: 1 meaning mf should be freed from _base_interrupt 4299 * 0 means the mf is freed from this function. 4300 */ 4301 static u8 4302 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4303 u8 msix_index, u32 reply) 4304 { 4305 Mpi2SasIoUnitControlReply_t *mpi_reply = 4306 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4307 4308 if (likely(mpi_reply)) { 4309 dewtprintk(ioc, 4310 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", 4311 le16_to_cpu(mpi_reply->DevHandle), smid, 4312 le16_to_cpu(mpi_reply->IOCStatus), 4313 le32_to_cpu(mpi_reply->IOCLogInfo))); 4314 if (le16_to_cpu(mpi_reply->IOCStatus) == 4315 MPI2_IOCSTATUS_SUCCESS) { 4316 clear_bit(le16_to_cpu(mpi_reply->DevHandle), 4317 ioc->device_remove_in_progress); 4318 } 4319 } else { 4320 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4321 __FILE__, __LINE__, __func__); 4322 } 4323 return mpt3sas_check_for_pending_internal_cmds(ioc, smid); 4324 } 4325 4326 /** 4327 * _scsih_tm_tr_volume_send - send target reset request for volumes 4328 * @ioc: per adapter object 4329 * @handle: device handle 4330 * Context: interrupt time. 4331 * 4332 * This is designed to send muliple task management request at the same 4333 * time to the fifo. If the fifo is full, we will append the request, 4334 * and process it in a future completion. 4335 */ 4336 static void 4337 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4338 { 4339 Mpi2SCSITaskManagementRequest_t *mpi_request; 4340 u16 smid; 4341 struct _tr_list *delayed_tr; 4342 4343 if (ioc->pci_error_recovery) { 4344 dewtprintk(ioc, 4345 ioc_info(ioc, "%s: host reset in progress!\n", 4346 __func__)); 4347 return; 4348 } 4349 4350 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); 4351 if (!smid) { 4352 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4353 if (!delayed_tr) 4354 return; 4355 INIT_LIST_HEAD(&delayed_tr->list); 4356 delayed_tr->handle = handle; 4357 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); 4358 dewtprintk(ioc, 4359 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4360 handle)); 4361 return; 4362 } 4363 4364 dewtprintk(ioc, 4365 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4366 handle, smid, ioc->tm_tr_volume_cb_idx)); 4367 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4368 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 4369 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 4370 mpi_request->DevHandle = cpu_to_le16(handle); 4371 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 4372 ioc->put_smid_hi_priority(ioc, smid, 0); 4373 } 4374 4375 /** 4376 * _scsih_tm_volume_tr_complete - target reset completion 4377 * @ioc: per adapter object 4378 * @smid: system request message index 4379 * @msix_index: MSIX table index supplied by the OS 4380 * @reply: reply message frame(lower 32bit addr) 4381 * Context: interrupt time. 4382 * 4383 * Return: 1 meaning mf should be freed from _base_interrupt 4384 * 0 means the mf is freed from this function. 4385 */ 4386 static u8 4387 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, 4388 u8 msix_index, u32 reply) 4389 { 4390 u16 handle; 4391 Mpi2SCSITaskManagementRequest_t *mpi_request_tm; 4392 Mpi2SCSITaskManagementReply_t *mpi_reply = 4393 mpt3sas_base_get_reply_virt_addr(ioc, reply); 4394 4395 if (ioc->shost_recovery || ioc->pci_error_recovery) { 4396 dewtprintk(ioc, 4397 ioc_info(ioc, "%s: host reset in progress!\n", 4398 __func__)); 4399 return 1; 4400 } 4401 if (unlikely(!mpi_reply)) { 4402 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 4403 __FILE__, __LINE__, __func__); 4404 return 1; 4405 } 4406 4407 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 4408 handle = le16_to_cpu(mpi_request_tm->DevHandle); 4409 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 4410 dewtprintk(ioc, 4411 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 4412 handle, le16_to_cpu(mpi_reply->DevHandle), 4413 smid)); 4414 return 0; 4415 } 4416 4417 dewtprintk(ioc, 4418 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", 4419 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 4420 le32_to_cpu(mpi_reply->IOCLogInfo), 4421 le32_to_cpu(mpi_reply->TerminationCount))); 4422 4423 return _scsih_check_for_pending_tm(ioc, smid); 4424 } 4425 4426 /** 4427 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages 4428 * @ioc: per adapter object 4429 * @smid: system request message index 4430 * @event: Event ID 4431 * @event_context: used to track events uniquely 4432 * 4433 * Context - processed in interrupt context. 4434 */ 4435 static void 4436 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, 4437 U32 event_context) 4438 { 4439 Mpi2EventAckRequest_t *ack_request; 4440 int i = smid - ioc->internal_smid; 4441 unsigned long flags; 4442 4443 /* Without releasing the smid just update the 4444 * call back index and reuse the same smid for 4445 * processing this delayed request 4446 */ 4447 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4448 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; 4449 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4450 4451 dewtprintk(ioc, 4452 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", 4453 le16_to_cpu(event), smid, ioc->base_cb_idx)); 4454 ack_request = mpt3sas_base_get_msg_frame(ioc, smid); 4455 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 4456 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 4457 ack_request->Event = event; 4458 ack_request->EventContext = event_context; 4459 ack_request->VF_ID = 0; /* TODO */ 4460 ack_request->VP_ID = 0; 4461 ioc->put_smid_default(ioc, smid); 4462 } 4463 4464 /** 4465 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed 4466 * sas_io_unit_ctrl messages 4467 * @ioc: per adapter object 4468 * @smid: system request message index 4469 * @handle: device handle 4470 * 4471 * Context - processed in interrupt context. 4472 */ 4473 static void 4474 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, 4475 u16 smid, u16 handle) 4476 { 4477 Mpi2SasIoUnitControlRequest_t *mpi_request; 4478 u32 ioc_state; 4479 int i = smid - ioc->internal_smid; 4480 unsigned long flags; 4481 4482 if (ioc->remove_host) { 4483 dewtprintk(ioc, 4484 ioc_info(ioc, "%s: host has been removed\n", 4485 __func__)); 4486 return; 4487 } else if (ioc->pci_error_recovery) { 4488 dewtprintk(ioc, 4489 ioc_info(ioc, "%s: host in pci error recovery\n", 4490 __func__)); 4491 return; 4492 } 4493 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 4494 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 4495 dewtprintk(ioc, 4496 ioc_info(ioc, "%s: host is not operational\n", 4497 __func__)); 4498 return; 4499 } 4500 4501 /* Without releasing the smid just update the 4502 * call back index and reuse the same smid for 4503 * processing this delayed request 4504 */ 4505 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4506 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; 4507 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4508 4509 dewtprintk(ioc, 4510 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 4511 handle, smid, ioc->tm_sas_control_cb_idx)); 4512 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4513 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 4514 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 4515 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 4516 mpi_request->DevHandle = cpu_to_le16(handle); 4517 ioc->put_smid_default(ioc, smid); 4518 } 4519 4520 /** 4521 * _scsih_check_for_pending_internal_cmds - check for pending internal messages 4522 * @ioc: per adapter object 4523 * @smid: system request message index 4524 * 4525 * Context: Executed in interrupt context 4526 * 4527 * This will check delayed internal messages list, and process the 4528 * next request. 4529 * 4530 * Return: 1 meaning mf should be freed from _base_interrupt 4531 * 0 means the mf is freed from this function. 4532 */ 4533 u8 4534 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4535 { 4536 struct _sc_list *delayed_sc; 4537 struct _event_ack_list *delayed_event_ack; 4538 4539 if (!list_empty(&ioc->delayed_event_ack_list)) { 4540 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, 4541 struct _event_ack_list, list); 4542 _scsih_issue_delayed_event_ack(ioc, smid, 4543 delayed_event_ack->Event, delayed_event_ack->EventContext); 4544 list_del(&delayed_event_ack->list); 4545 kfree(delayed_event_ack); 4546 return 0; 4547 } 4548 4549 if (!list_empty(&ioc->delayed_sc_list)) { 4550 delayed_sc = list_entry(ioc->delayed_sc_list.next, 4551 struct _sc_list, list); 4552 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid, 4553 delayed_sc->handle); 4554 list_del(&delayed_sc->list); 4555 kfree(delayed_sc); 4556 return 0; 4557 } 4558 return 1; 4559 } 4560 4561 /** 4562 * _scsih_check_for_pending_tm - check for pending task management 4563 * @ioc: per adapter object 4564 * @smid: system request message index 4565 * 4566 * This will check delayed target reset list, and feed the 4567 * next reqeust. 4568 * 4569 * Return: 1 meaning mf should be freed from _base_interrupt 4570 * 0 means the mf is freed from this function. 4571 */ 4572 static u8 4573 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4574 { 4575 struct _tr_list *delayed_tr; 4576 4577 if (!list_empty(&ioc->delayed_tr_volume_list)) { 4578 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, 4579 struct _tr_list, list); 4580 mpt3sas_base_free_smid(ioc, smid); 4581 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle); 4582 list_del(&delayed_tr->list); 4583 kfree(delayed_tr); 4584 return 0; 4585 } 4586 4587 if (!list_empty(&ioc->delayed_tr_list)) { 4588 delayed_tr = list_entry(ioc->delayed_tr_list.next, 4589 struct _tr_list, list); 4590 mpt3sas_base_free_smid(ioc, smid); 4591 _scsih_tm_tr_send(ioc, delayed_tr->handle); 4592 list_del(&delayed_tr->list); 4593 kfree(delayed_tr); 4594 return 0; 4595 } 4596 4597 return 1; 4598 } 4599 4600 /** 4601 * _scsih_check_topo_delete_events - sanity check on topo events 4602 * @ioc: per adapter object 4603 * @event_data: the event data payload 4604 * 4605 * This routine added to better handle cable breaker. 4606 * 4607 * This handles the case where driver receives multiple expander 4608 * add and delete events in a single shot. When there is a delete event 4609 * the routine will void any pending add events waiting in the event queue. 4610 */ 4611 static void 4612 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, 4613 Mpi2EventDataSasTopologyChangeList_t *event_data) 4614 { 4615 struct fw_event_work *fw_event; 4616 Mpi2EventDataSasTopologyChangeList_t *local_event_data; 4617 u16 expander_handle; 4618 struct _sas_node *sas_expander; 4619 unsigned long flags; 4620 int i, reason_code; 4621 u16 handle; 4622 4623 for (i = 0 ; i < event_data->NumEntries; i++) { 4624 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 4625 if (!handle) 4626 continue; 4627 reason_code = event_data->PHY[i].PhyStatus & 4628 MPI2_EVENT_SAS_TOPO_RC_MASK; 4629 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) 4630 _scsih_tm_tr_send(ioc, handle); 4631 } 4632 4633 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); 4634 if (expander_handle < ioc->sas_hba.num_phys) { 4635 _scsih_block_io_to_children_attached_directly(ioc, event_data); 4636 return; 4637 } 4638 if (event_data->ExpStatus == 4639 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { 4640 /* put expander attached devices into blocking state */ 4641 spin_lock_irqsave(&ioc->sas_node_lock, flags); 4642 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, 4643 expander_handle); 4644 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); 4645 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 4646 do { 4647 handle = find_first_bit(ioc->blocking_handles, 4648 ioc->facts.MaxDevHandle); 4649 if (handle < ioc->facts.MaxDevHandle) 4650 _scsih_block_io_device(ioc, handle); 4651 } while (test_and_clear_bit(handle, ioc->blocking_handles)); 4652 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) 4653 _scsih_block_io_to_children_attached_directly(ioc, event_data); 4654 4655 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4656 return; 4657 4658 /* mark ignore flag for pending events */ 4659 spin_lock_irqsave(&ioc->fw_event_lock, flags); 4660 list_for_each_entry(fw_event, &ioc->fw_event_list, list) { 4661 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || 4662 fw_event->ignore) 4663 continue; 4664 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) 4665 fw_event->event_data; 4666 if (local_event_data->ExpStatus == 4667 MPI2_EVENT_SAS_TOPO_ES_ADDED || 4668 local_event_data->ExpStatus == 4669 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4670 if (le16_to_cpu(local_event_data->ExpanderDevHandle) == 4671 expander_handle) { 4672 dewtprintk(ioc, 4673 ioc_info(ioc, "setting ignoring flag\n")); 4674 fw_event->ignore = 1; 4675 } 4676 } 4677 } 4678 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 4679 } 4680 4681 /** 4682 * _scsih_check_pcie_topo_remove_events - sanity check on topo 4683 * events 4684 * @ioc: per adapter object 4685 * @event_data: the event data payload 4686 * 4687 * This handles the case where driver receives multiple switch 4688 * or device add and delete events in a single shot. When there 4689 * is a delete event the routine will void any pending add 4690 * events waiting in the event queue. 4691 */ 4692 static void 4693 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, 4694 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 4695 { 4696 struct fw_event_work *fw_event; 4697 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data; 4698 unsigned long flags; 4699 int i, reason_code; 4700 u16 handle, switch_handle; 4701 4702 for (i = 0; i < event_data->NumEntries; i++) { 4703 handle = 4704 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 4705 if (!handle) 4706 continue; 4707 reason_code = event_data->PortEntry[i].PortStatus; 4708 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING) 4709 _scsih_tm_tr_send(ioc, handle); 4710 } 4711 4712 switch_handle = le16_to_cpu(event_data->SwitchDevHandle); 4713 if (!switch_handle) { 4714 _scsih_block_io_to_pcie_children_attached_directly( 4715 ioc, event_data); 4716 return; 4717 } 4718 /* TODO We are not supporting cascaded PCIe Switch removal yet*/ 4719 if ((event_data->SwitchStatus 4720 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) || 4721 (event_data->SwitchStatus == 4722 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING)) 4723 _scsih_block_io_to_pcie_children_attached_directly( 4724 ioc, event_data); 4725 4726 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4727 return; 4728 4729 /* mark ignore flag for pending events */ 4730 spin_lock_irqsave(&ioc->fw_event_lock, flags); 4731 list_for_each_entry(fw_event, &ioc->fw_event_list, list) { 4732 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST || 4733 fw_event->ignore) 4734 continue; 4735 local_event_data = 4736 (Mpi26EventDataPCIeTopologyChangeList_t *) 4737 fw_event->event_data; 4738 if (local_event_data->SwitchStatus == 4739 MPI2_EVENT_SAS_TOPO_ES_ADDED || 4740 local_event_data->SwitchStatus == 4741 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4742 if (le16_to_cpu(local_event_data->SwitchDevHandle) == 4743 switch_handle) { 4744 dewtprintk(ioc, 4745 ioc_info(ioc, "setting ignoring flag for switch event\n")); 4746 fw_event->ignore = 1; 4747 } 4748 } 4749 } 4750 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 4751 } 4752 4753 /** 4754 * _scsih_set_volume_delete_flag - setting volume delete flag 4755 * @ioc: per adapter object 4756 * @handle: device handle 4757 * 4758 * This returns nothing. 4759 */ 4760 static void 4761 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4762 { 4763 struct _raid_device *raid_device; 4764 struct MPT3SAS_TARGET *sas_target_priv_data; 4765 unsigned long flags; 4766 4767 spin_lock_irqsave(&ioc->raid_device_lock, flags); 4768 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 4769 if (raid_device && raid_device->starget && 4770 raid_device->starget->hostdata) { 4771 sas_target_priv_data = 4772 raid_device->starget->hostdata; 4773 sas_target_priv_data->deleted = 1; 4774 dewtprintk(ioc, 4775 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", 4776 handle, (u64)raid_device->wwid)); 4777 } 4778 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 4779 } 4780 4781 /** 4782 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume 4783 * @handle: input handle 4784 * @a: handle for volume a 4785 * @b: handle for volume b 4786 * 4787 * IR firmware only supports two raid volumes. The purpose of this 4788 * routine is to set the volume handle in either a or b. When the given 4789 * input handle is non-zero, or when a and b have not been set before. 4790 */ 4791 static void 4792 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) 4793 { 4794 if (!handle || handle == *a || handle == *b) 4795 return; 4796 if (!*a) 4797 *a = handle; 4798 else if (!*b) 4799 *b = handle; 4800 } 4801 4802 /** 4803 * _scsih_check_ir_config_unhide_events - check for UNHIDE events 4804 * @ioc: per adapter object 4805 * @event_data: the event data payload 4806 * Context: interrupt time. 4807 * 4808 * This routine will send target reset to volume, followed by target 4809 * resets to the PDs. This is called when a PD has been removed, or 4810 * volume has been deleted or removed. When the target reset is sent 4811 * to volume, the PD target resets need to be queued to start upon 4812 * completion of the volume target reset. 4813 */ 4814 static void 4815 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, 4816 Mpi2EventDataIrConfigChangeList_t *event_data) 4817 { 4818 Mpi2EventIrConfigElement_t *element; 4819 int i; 4820 u16 handle, volume_handle, a, b; 4821 struct _tr_list *delayed_tr; 4822 4823 a = 0; 4824 b = 0; 4825 4826 if (ioc->is_warpdrive) 4827 return; 4828 4829 /* Volume Resets for Deleted or Removed */ 4830 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4831 for (i = 0; i < event_data->NumElements; i++, element++) { 4832 if (le32_to_cpu(event_data->Flags) & 4833 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) 4834 continue; 4835 if (element->ReasonCode == 4836 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || 4837 element->ReasonCode == 4838 MPI2_EVENT_IR_CHANGE_RC_REMOVED) { 4839 volume_handle = le16_to_cpu(element->VolDevHandle); 4840 _scsih_set_volume_delete_flag(ioc, volume_handle); 4841 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); 4842 } 4843 } 4844 4845 /* Volume Resets for UNHIDE events */ 4846 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4847 for (i = 0; i < event_data->NumElements; i++, element++) { 4848 if (le32_to_cpu(event_data->Flags) & 4849 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) 4850 continue; 4851 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { 4852 volume_handle = le16_to_cpu(element->VolDevHandle); 4853 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); 4854 } 4855 } 4856 4857 if (a) 4858 _scsih_tm_tr_volume_send(ioc, a); 4859 if (b) 4860 _scsih_tm_tr_volume_send(ioc, b); 4861 4862 /* PD target resets */ 4863 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4864 for (i = 0; i < event_data->NumElements; i++, element++) { 4865 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) 4866 continue; 4867 handle = le16_to_cpu(element->PhysDiskDevHandle); 4868 volume_handle = le16_to_cpu(element->VolDevHandle); 4869 clear_bit(handle, ioc->pd_handles); 4870 if (!volume_handle) 4871 _scsih_tm_tr_send(ioc, handle); 4872 else if (volume_handle == a || volume_handle == b) { 4873 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4874 BUG_ON(!delayed_tr); 4875 INIT_LIST_HEAD(&delayed_tr->list); 4876 delayed_tr->handle = handle; 4877 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 4878 dewtprintk(ioc, 4879 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4880 handle)); 4881 } else 4882 _scsih_tm_tr_send(ioc, handle); 4883 } 4884 } 4885 4886 4887 /** 4888 * _scsih_check_volume_delete_events - set delete flag for volumes 4889 * @ioc: per adapter object 4890 * @event_data: the event data payload 4891 * Context: interrupt time. 4892 * 4893 * This will handle the case when the cable connected to entire volume is 4894 * pulled. We will take care of setting the deleted flag so normal IO will 4895 * not be sent. 4896 */ 4897 static void 4898 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, 4899 Mpi2EventDataIrVolume_t *event_data) 4900 { 4901 u32 state; 4902 4903 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 4904 return; 4905 state = le32_to_cpu(event_data->NewValue); 4906 if (state == MPI2_RAID_VOL_STATE_MISSING || state == 4907 MPI2_RAID_VOL_STATE_FAILED) 4908 _scsih_set_volume_delete_flag(ioc, 4909 le16_to_cpu(event_data->VolDevHandle)); 4910 } 4911 4912 /** 4913 * _scsih_temp_threshold_events - display temperature threshold exceeded events 4914 * @ioc: per adapter object 4915 * @event_data: the temp threshold event data 4916 * Context: interrupt time. 4917 */ 4918 static void 4919 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, 4920 Mpi2EventDataTemperature_t *event_data) 4921 { 4922 u32 doorbell; 4923 if (ioc->temp_sensors_count >= event_data->SensorNum) { 4924 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n", 4925 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ", 4926 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ", 4927 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ", 4928 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ", 4929 event_data->SensorNum); 4930 ioc_err(ioc, "Current Temp In Celsius: %d\n", 4931 event_data->CurrentTemperature); 4932 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { 4933 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 4934 if ((doorbell & MPI2_IOC_STATE_MASK) == 4935 MPI2_IOC_STATE_FAULT) { 4936 mpt3sas_print_fault_code(ioc, 4937 doorbell & MPI2_DOORBELL_DATA_MASK); 4938 } else if ((doorbell & MPI2_IOC_STATE_MASK) == 4939 MPI2_IOC_STATE_COREDUMP) { 4940 mpt3sas_print_coredump_info(ioc, 4941 doorbell & MPI2_DOORBELL_DATA_MASK); 4942 } 4943 } 4944 } 4945 } 4946 4947 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) 4948 { 4949 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; 4950 4951 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) 4952 return 0; 4953 4954 if (pending) 4955 return test_and_set_bit(0, &priv->ata_command_pending); 4956 4957 clear_bit(0, &priv->ata_command_pending); 4958 return 0; 4959 } 4960 4961 /** 4962 * _scsih_flush_running_cmds - completing outstanding commands. 4963 * @ioc: per adapter object 4964 * 4965 * The flushing out of all pending scmd commands following host reset, 4966 * where all IO is dropped to the floor. 4967 */ 4968 static void 4969 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) 4970 { 4971 struct scsi_cmnd *scmd; 4972 struct scsiio_tracker *st; 4973 u16 smid; 4974 int count = 0; 4975 4976 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 4977 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 4978 if (!scmd) 4979 continue; 4980 count++; 4981 _scsih_set_satl_pending(scmd, false); 4982 st = scsi_cmd_priv(scmd); 4983 mpt3sas_base_clear_st(ioc, st); 4984 scsi_dma_unmap(scmd); 4985 if (ioc->pci_error_recovery || ioc->remove_host) 4986 scmd->result = DID_NO_CONNECT << 16; 4987 else 4988 scmd->result = DID_RESET << 16; 4989 scmd->scsi_done(scmd); 4990 } 4991 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count)); 4992 } 4993 4994 /** 4995 * _scsih_setup_eedp - setup MPI request for EEDP transfer 4996 * @ioc: per adapter object 4997 * @scmd: pointer to scsi command object 4998 * @mpi_request: pointer to the SCSI_IO request message frame 4999 * 5000 * Supporting protection 1 and 3. 5001 */ 5002 static void 5003 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 5004 Mpi25SCSIIORequest_t *mpi_request) 5005 { 5006 u16 eedp_flags; 5007 unsigned char prot_op = scsi_get_prot_op(scmd); 5008 unsigned char prot_type = scsi_get_prot_type(scmd); 5009 Mpi25SCSIIORequest_t *mpi_request_3v = 5010 (Mpi25SCSIIORequest_t *)mpi_request; 5011 5012 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL) 5013 return; 5014 5015 if (prot_op == SCSI_PROT_READ_STRIP) 5016 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; 5017 else if (prot_op == SCSI_PROT_WRITE_INSERT) 5018 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; 5019 else 5020 return; 5021 5022 switch (prot_type) { 5023 case SCSI_PROT_DIF_TYPE1: 5024 case SCSI_PROT_DIF_TYPE2: 5025 5026 /* 5027 * enable ref/guard checking 5028 * auto increment ref tag 5029 */ 5030 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 5031 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 5032 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 5033 mpi_request->CDB.EEDP32.PrimaryReferenceTag = 5034 cpu_to_be32(t10_pi_ref_tag(scmd->request)); 5035 break; 5036 5037 case SCSI_PROT_DIF_TYPE3: 5038 5039 /* 5040 * enable guard checking 5041 */ 5042 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 5043 5044 break; 5045 } 5046 5047 mpi_request_3v->EEDPBlockSize = 5048 cpu_to_le16(scmd->device->sector_size); 5049 5050 if (ioc->is_gen35_ioc) 5051 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; 5052 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); 5053 } 5054 5055 /** 5056 * _scsih_eedp_error_handling - return sense code for EEDP errors 5057 * @scmd: pointer to scsi command object 5058 * @ioc_status: ioc status 5059 */ 5060 static void 5061 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) 5062 { 5063 u8 ascq; 5064 5065 switch (ioc_status) { 5066 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5067 ascq = 0x01; 5068 break; 5069 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5070 ascq = 0x02; 5071 break; 5072 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5073 ascq = 0x03; 5074 break; 5075 default: 5076 ascq = 0x00; 5077 break; 5078 } 5079 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10, 5080 ascq); 5081 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) | 5082 SAM_STAT_CHECK_CONDITION; 5083 } 5084 5085 /** 5086 * scsih_qcmd - main scsi request entry point 5087 * @shost: SCSI host pointer 5088 * @scmd: pointer to scsi command object 5089 * 5090 * The callback index is set inside `ioc->scsi_io_cb_idx`. 5091 * 5092 * Return: 0 on success. If there's a failure, return either: 5093 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or 5094 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 5095 */ 5096 static int 5097 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 5098 { 5099 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 5100 struct MPT3SAS_DEVICE *sas_device_priv_data; 5101 struct MPT3SAS_TARGET *sas_target_priv_data; 5102 struct _raid_device *raid_device; 5103 struct request *rq = scmd->request; 5104 int class; 5105 Mpi25SCSIIORequest_t *mpi_request; 5106 struct _pcie_device *pcie_device = NULL; 5107 u32 mpi_control; 5108 u16 smid; 5109 u16 handle; 5110 5111 if (ioc->logging_level & MPT_DEBUG_SCSI) 5112 scsi_print_command(scmd); 5113 5114 sas_device_priv_data = scmd->device->hostdata; 5115 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 5116 scmd->result = DID_NO_CONNECT << 16; 5117 scmd->scsi_done(scmd); 5118 return 0; 5119 } 5120 5121 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) { 5122 scmd->result = DID_NO_CONNECT << 16; 5123 scmd->scsi_done(scmd); 5124 return 0; 5125 } 5126 5127 sas_target_priv_data = sas_device_priv_data->sas_target; 5128 5129 /* invalid device handle */ 5130 handle = sas_target_priv_data->handle; 5131 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { 5132 scmd->result = DID_NO_CONNECT << 16; 5133 scmd->scsi_done(scmd); 5134 return 0; 5135 } 5136 5137 5138 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { 5139 /* host recovery or link resets sent via IOCTLs */ 5140 return SCSI_MLQUEUE_HOST_BUSY; 5141 } else if (sas_target_priv_data->deleted) { 5142 /* device has been deleted */ 5143 scmd->result = DID_NO_CONNECT << 16; 5144 scmd->scsi_done(scmd); 5145 return 0; 5146 } else if (sas_target_priv_data->tm_busy || 5147 sas_device_priv_data->block) { 5148 /* device busy with task management */ 5149 return SCSI_MLQUEUE_DEVICE_BUSY; 5150 } 5151 5152 /* 5153 * Bug work around for firmware SATL handling. The loop 5154 * is based on atomic operations and ensures consistency 5155 * since we're lockless at this point 5156 */ 5157 do { 5158 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) 5159 return SCSI_MLQUEUE_DEVICE_BUSY; 5160 } while (_scsih_set_satl_pending(scmd, true)); 5161 5162 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 5163 mpi_control = MPI2_SCSIIO_CONTROL_READ; 5164 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 5165 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 5166 else 5167 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 5168 5169 /* set tags */ 5170 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 5171 /* NCQ Prio supported, make sure control indicated high priority */ 5172 if (sas_device_priv_data->ncq_prio_enable) { 5173 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 5174 if (class == IOPRIO_CLASS_RT) 5175 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT; 5176 } 5177 /* Make sure Device is not raid volume. 5178 * We do not expose raid functionality to upper layer for warpdrive. 5179 */ 5180 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)) 5181 && !scsih_is_nvme(&scmd->device->sdev_gendev)) 5182 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) 5183 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 5184 5185 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 5186 if (!smid) { 5187 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 5188 _scsih_set_satl_pending(scmd, false); 5189 goto out; 5190 } 5191 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5192 memset(mpi_request, 0, ioc->request_sz); 5193 _scsih_setup_eedp(ioc, scmd, mpi_request); 5194 5195 if (scmd->cmd_len == 32) 5196 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 5197 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 5198 if (sas_device_priv_data->sas_target->flags & 5199 MPT_TARGET_FLAGS_RAID_COMPONENT) 5200 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 5201 else 5202 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 5203 mpi_request->DevHandle = cpu_to_le16(handle); 5204 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 5205 mpi_request->Control = cpu_to_le32(mpi_control); 5206 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); 5207 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; 5208 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 5209 mpi_request->SenseBufferLowAddress = 5210 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 5211 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4; 5212 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) 5213 mpi_request->LUN); 5214 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 5215 5216 if (mpi_request->DataLength) { 5217 pcie_device = sas_target_priv_data->pcie_dev; 5218 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) { 5219 mpt3sas_base_free_smid(ioc, smid); 5220 _scsih_set_satl_pending(scmd, false); 5221 goto out; 5222 } 5223 } else 5224 ioc->build_zero_len_sge(ioc, &mpi_request->SGL); 5225 5226 raid_device = sas_target_priv_data->raid_device; 5227 if (raid_device && raid_device->direct_io_enabled) 5228 mpt3sas_setup_direct_io(ioc, scmd, 5229 raid_device, mpi_request); 5230 5231 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { 5232 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { 5233 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | 5234 MPI25_SCSIIO_IOFLAGS_FAST_PATH); 5235 ioc->put_smid_fast_path(ioc, smid, handle); 5236 } else 5237 ioc->put_smid_scsi_io(ioc, smid, 5238 le16_to_cpu(mpi_request->DevHandle)); 5239 } else 5240 ioc->put_smid_default(ioc, smid); 5241 return 0; 5242 5243 out: 5244 return SCSI_MLQUEUE_HOST_BUSY; 5245 } 5246 5247 /** 5248 * _scsih_normalize_sense - normalize descriptor and fixed format sense data 5249 * @sense_buffer: sense data returned by target 5250 * @data: normalized skey/asc/ascq 5251 */ 5252 static void 5253 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) 5254 { 5255 if ((sense_buffer[0] & 0x7F) >= 0x72) { 5256 /* descriptor format */ 5257 data->skey = sense_buffer[1] & 0x0F; 5258 data->asc = sense_buffer[2]; 5259 data->ascq = sense_buffer[3]; 5260 } else { 5261 /* fixed format */ 5262 data->skey = sense_buffer[2] & 0x0F; 5263 data->asc = sense_buffer[12]; 5264 data->ascq = sense_buffer[13]; 5265 } 5266 } 5267 5268 /** 5269 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request 5270 * @ioc: per adapter object 5271 * @scmd: pointer to scsi command object 5272 * @mpi_reply: reply mf payload returned from firmware 5273 * @smid: ? 5274 * 5275 * scsi_status - SCSI Status code returned from target device 5276 * scsi_state - state info associated with SCSI_IO determined by ioc 5277 * ioc_status - ioc supplied status info 5278 */ 5279 static void 5280 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 5281 Mpi2SCSIIOReply_t *mpi_reply, u16 smid) 5282 { 5283 u32 response_info; 5284 u8 *response_bytes; 5285 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & 5286 MPI2_IOCSTATUS_MASK; 5287 u8 scsi_state = mpi_reply->SCSIState; 5288 u8 scsi_status = mpi_reply->SCSIStatus; 5289 char *desc_ioc_state = NULL; 5290 char *desc_scsi_status = NULL; 5291 char *desc_scsi_state = ioc->tmp_string; 5292 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 5293 struct _sas_device *sas_device = NULL; 5294 struct _pcie_device *pcie_device = NULL; 5295 struct scsi_target *starget = scmd->device->sdev_target; 5296 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 5297 char *device_str = NULL; 5298 5299 if (!priv_target) 5300 return; 5301 if (ioc->hide_ir_msg) 5302 device_str = "WarpDrive"; 5303 else 5304 device_str = "volume"; 5305 5306 if (log_info == 0x31170000) 5307 return; 5308 5309 switch (ioc_status) { 5310 case MPI2_IOCSTATUS_SUCCESS: 5311 desc_ioc_state = "success"; 5312 break; 5313 case MPI2_IOCSTATUS_INVALID_FUNCTION: 5314 desc_ioc_state = "invalid function"; 5315 break; 5316 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 5317 desc_ioc_state = "scsi recovered error"; 5318 break; 5319 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 5320 desc_ioc_state = "scsi invalid dev handle"; 5321 break; 5322 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 5323 desc_ioc_state = "scsi device not there"; 5324 break; 5325 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 5326 desc_ioc_state = "scsi data overrun"; 5327 break; 5328 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 5329 desc_ioc_state = "scsi data underrun"; 5330 break; 5331 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 5332 desc_ioc_state = "scsi io data error"; 5333 break; 5334 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 5335 desc_ioc_state = "scsi protocol error"; 5336 break; 5337 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 5338 desc_ioc_state = "scsi task terminated"; 5339 break; 5340 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 5341 desc_ioc_state = "scsi residual mismatch"; 5342 break; 5343 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 5344 desc_ioc_state = "scsi task mgmt failed"; 5345 break; 5346 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 5347 desc_ioc_state = "scsi ioc terminated"; 5348 break; 5349 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 5350 desc_ioc_state = "scsi ext terminated"; 5351 break; 5352 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5353 desc_ioc_state = "eedp guard error"; 5354 break; 5355 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5356 desc_ioc_state = "eedp ref tag error"; 5357 break; 5358 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5359 desc_ioc_state = "eedp app tag error"; 5360 break; 5361 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 5362 desc_ioc_state = "insufficient power"; 5363 break; 5364 default: 5365 desc_ioc_state = "unknown"; 5366 break; 5367 } 5368 5369 switch (scsi_status) { 5370 case MPI2_SCSI_STATUS_GOOD: 5371 desc_scsi_status = "good"; 5372 break; 5373 case MPI2_SCSI_STATUS_CHECK_CONDITION: 5374 desc_scsi_status = "check condition"; 5375 break; 5376 case MPI2_SCSI_STATUS_CONDITION_MET: 5377 desc_scsi_status = "condition met"; 5378 break; 5379 case MPI2_SCSI_STATUS_BUSY: 5380 desc_scsi_status = "busy"; 5381 break; 5382 case MPI2_SCSI_STATUS_INTERMEDIATE: 5383 desc_scsi_status = "intermediate"; 5384 break; 5385 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: 5386 desc_scsi_status = "intermediate condmet"; 5387 break; 5388 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 5389 desc_scsi_status = "reservation conflict"; 5390 break; 5391 case MPI2_SCSI_STATUS_COMMAND_TERMINATED: 5392 desc_scsi_status = "command terminated"; 5393 break; 5394 case MPI2_SCSI_STATUS_TASK_SET_FULL: 5395 desc_scsi_status = "task set full"; 5396 break; 5397 case MPI2_SCSI_STATUS_ACA_ACTIVE: 5398 desc_scsi_status = "aca active"; 5399 break; 5400 case MPI2_SCSI_STATUS_TASK_ABORTED: 5401 desc_scsi_status = "task aborted"; 5402 break; 5403 default: 5404 desc_scsi_status = "unknown"; 5405 break; 5406 } 5407 5408 desc_scsi_state[0] = '\0'; 5409 if (!scsi_state) 5410 desc_scsi_state = " "; 5411 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 5412 strcat(desc_scsi_state, "response info "); 5413 if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5414 strcat(desc_scsi_state, "state terminated "); 5415 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) 5416 strcat(desc_scsi_state, "no status "); 5417 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) 5418 strcat(desc_scsi_state, "autosense failed "); 5419 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) 5420 strcat(desc_scsi_state, "autosense valid "); 5421 5422 scsi_print_command(scmd); 5423 5424 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 5425 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n", 5426 device_str, (u64)priv_target->sas_address); 5427 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 5428 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target); 5429 if (pcie_device) { 5430 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n", 5431 (u64)pcie_device->wwid, pcie_device->port_num); 5432 if (pcie_device->enclosure_handle != 0) 5433 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n", 5434 (u64)pcie_device->enclosure_logical_id, 5435 pcie_device->slot); 5436 if (pcie_device->connector_name[0]) 5437 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n", 5438 pcie_device->enclosure_level, 5439 pcie_device->connector_name); 5440 pcie_device_put(pcie_device); 5441 } 5442 } else { 5443 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target); 5444 if (sas_device) { 5445 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", 5446 (u64)sas_device->sas_address, sas_device->phy); 5447 5448 _scsih_display_enclosure_chassis_info(ioc, sas_device, 5449 NULL, NULL); 5450 5451 sas_device_put(sas_device); 5452 } 5453 } 5454 5455 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", 5456 le16_to_cpu(mpi_reply->DevHandle), 5457 desc_ioc_state, ioc_status, smid); 5458 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n", 5459 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd)); 5460 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", 5461 le16_to_cpu(mpi_reply->TaskTag), 5462 le32_to_cpu(mpi_reply->TransferCount), scmd->result); 5463 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", 5464 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); 5465 5466 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 5467 struct sense_info data; 5468 _scsih_normalize_sense(scmd->sense_buffer, &data); 5469 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", 5470 data.skey, data.asc, data.ascq, 5471 le32_to_cpu(mpi_reply->SenseCount)); 5472 } 5473 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 5474 response_info = le32_to_cpu(mpi_reply->ResponseInfo); 5475 response_bytes = (u8 *)&response_info; 5476 _scsih_response_code(ioc, response_bytes[0]); 5477 } 5478 } 5479 5480 /** 5481 * _scsih_turn_on_pfa_led - illuminate PFA LED 5482 * @ioc: per adapter object 5483 * @handle: device handle 5484 * Context: process 5485 */ 5486 static void 5487 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5488 { 5489 Mpi2SepReply_t mpi_reply; 5490 Mpi2SepRequest_t mpi_request; 5491 struct _sas_device *sas_device; 5492 5493 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 5494 if (!sas_device) 5495 return; 5496 5497 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 5498 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 5499 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 5500 mpi_request.SlotStatus = 5501 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); 5502 mpi_request.DevHandle = cpu_to_le16(handle); 5503 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; 5504 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 5505 &mpi_request)) != 0) { 5506 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5507 __FILE__, __LINE__, __func__); 5508 goto out; 5509 } 5510 sas_device->pfa_led_on = 1; 5511 5512 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 5513 dewtprintk(ioc, 5514 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 5515 le16_to_cpu(mpi_reply.IOCStatus), 5516 le32_to_cpu(mpi_reply.IOCLogInfo))); 5517 goto out; 5518 } 5519 out: 5520 sas_device_put(sas_device); 5521 } 5522 5523 /** 5524 * _scsih_turn_off_pfa_led - turn off Fault LED 5525 * @ioc: per adapter object 5526 * @sas_device: sas device whose PFA LED has to turned off 5527 * Context: process 5528 */ 5529 static void 5530 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, 5531 struct _sas_device *sas_device) 5532 { 5533 Mpi2SepReply_t mpi_reply; 5534 Mpi2SepRequest_t mpi_request; 5535 5536 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 5537 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 5538 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 5539 mpi_request.SlotStatus = 0; 5540 mpi_request.Slot = cpu_to_le16(sas_device->slot); 5541 mpi_request.DevHandle = 0; 5542 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); 5543 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; 5544 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 5545 &mpi_request)) != 0) { 5546 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5547 __FILE__, __LINE__, __func__); 5548 return; 5549 } 5550 5551 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 5552 dewtprintk(ioc, 5553 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 5554 le16_to_cpu(mpi_reply.IOCStatus), 5555 le32_to_cpu(mpi_reply.IOCLogInfo))); 5556 return; 5557 } 5558 } 5559 5560 /** 5561 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event 5562 * @ioc: per adapter object 5563 * @handle: device handle 5564 * Context: interrupt. 5565 */ 5566 static void 5567 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5568 { 5569 struct fw_event_work *fw_event; 5570 5571 fw_event = alloc_fw_event_work(0); 5572 if (!fw_event) 5573 return; 5574 fw_event->event = MPT3SAS_TURN_ON_PFA_LED; 5575 fw_event->device_handle = handle; 5576 fw_event->ioc = ioc; 5577 _scsih_fw_event_add(ioc, fw_event); 5578 fw_event_work_put(fw_event); 5579 } 5580 5581 /** 5582 * _scsih_smart_predicted_fault - process smart errors 5583 * @ioc: per adapter object 5584 * @handle: device handle 5585 * Context: interrupt. 5586 */ 5587 static void 5588 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5589 { 5590 struct scsi_target *starget; 5591 struct MPT3SAS_TARGET *sas_target_priv_data; 5592 Mpi2EventNotificationReply_t *event_reply; 5593 Mpi2EventDataSasDeviceStatusChange_t *event_data; 5594 struct _sas_device *sas_device; 5595 ssize_t sz; 5596 unsigned long flags; 5597 5598 /* only handle non-raid devices */ 5599 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5600 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 5601 if (!sas_device) 5602 goto out_unlock; 5603 5604 starget = sas_device->starget; 5605 sas_target_priv_data = starget->hostdata; 5606 5607 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || 5608 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) 5609 goto out_unlock; 5610 5611 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget); 5612 5613 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5614 5615 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) 5616 _scsih_send_event_to_turn_on_pfa_led(ioc, handle); 5617 5618 /* insert into event log */ 5619 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 5620 sizeof(Mpi2EventDataSasDeviceStatusChange_t); 5621 event_reply = kzalloc(sz, GFP_ATOMIC); 5622 if (!event_reply) { 5623 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5624 __FILE__, __LINE__, __func__); 5625 goto out; 5626 } 5627 5628 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 5629 event_reply->Event = 5630 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 5631 event_reply->MsgLength = sz/4; 5632 event_reply->EventDataLength = 5633 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); 5634 event_data = (Mpi2EventDataSasDeviceStatusChange_t *) 5635 event_reply->EventData; 5636 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; 5637 event_data->ASC = 0x5D; 5638 event_data->DevHandle = cpu_to_le16(handle); 5639 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); 5640 mpt3sas_ctl_add_to_event_log(ioc, event_reply); 5641 kfree(event_reply); 5642 out: 5643 if (sas_device) 5644 sas_device_put(sas_device); 5645 return; 5646 5647 out_unlock: 5648 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5649 goto out; 5650 } 5651 5652 /** 5653 * _scsih_io_done - scsi request callback 5654 * @ioc: per adapter object 5655 * @smid: system request message index 5656 * @msix_index: MSIX table index supplied by the OS 5657 * @reply: reply message frame(lower 32bit addr) 5658 * 5659 * Callback handler when using _scsih_qcmd. 5660 * 5661 * Return: 1 meaning mf should be freed from _base_interrupt 5662 * 0 means the mf is freed from this function. 5663 */ 5664 static u8 5665 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 5666 { 5667 Mpi25SCSIIORequest_t *mpi_request; 5668 Mpi2SCSIIOReply_t *mpi_reply; 5669 struct scsi_cmnd *scmd; 5670 struct scsiio_tracker *st; 5671 u16 ioc_status; 5672 u32 xfer_cnt; 5673 u8 scsi_state; 5674 u8 scsi_status; 5675 u32 log_info; 5676 struct MPT3SAS_DEVICE *sas_device_priv_data; 5677 u32 response_code = 0; 5678 5679 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 5680 5681 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 5682 if (scmd == NULL) 5683 return 1; 5684 5685 _scsih_set_satl_pending(scmd, false); 5686 5687 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5688 5689 if (mpi_reply == NULL) { 5690 scmd->result = DID_OK << 16; 5691 goto out; 5692 } 5693 5694 sas_device_priv_data = scmd->device->hostdata; 5695 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 5696 sas_device_priv_data->sas_target->deleted) { 5697 scmd->result = DID_NO_CONNECT << 16; 5698 goto out; 5699 } 5700 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 5701 5702 /* 5703 * WARPDRIVE: If direct_io is set then it is directIO, 5704 * the failed direct I/O should be redirected to volume 5705 */ 5706 st = scsi_cmd_priv(scmd); 5707 if (st->direct_io && 5708 ((ioc_status & MPI2_IOCSTATUS_MASK) 5709 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { 5710 st->direct_io = 0; 5711 st->scmd = scmd; 5712 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 5713 mpi_request->DevHandle = 5714 cpu_to_le16(sas_device_priv_data->sas_target->handle); 5715 ioc->put_smid_scsi_io(ioc, smid, 5716 sas_device_priv_data->sas_target->handle); 5717 return 0; 5718 } 5719 /* turning off TLR */ 5720 scsi_state = mpi_reply->SCSIState; 5721 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 5722 response_code = 5723 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; 5724 if (!sas_device_priv_data->tlr_snoop_check) { 5725 sas_device_priv_data->tlr_snoop_check++; 5726 if ((!ioc->is_warpdrive && 5727 !scsih_is_raid(&scmd->device->sdev_gendev) && 5728 !scsih_is_nvme(&scmd->device->sdev_gendev)) 5729 && sas_is_tlr_enabled(scmd->device) && 5730 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { 5731 sas_disable_tlr(scmd->device); 5732 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); 5733 } 5734 } 5735 5736 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); 5737 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); 5738 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 5739 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 5740 else 5741 log_info = 0; 5742 ioc_status &= MPI2_IOCSTATUS_MASK; 5743 scsi_status = mpi_reply->SCSIStatus; 5744 5745 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && 5746 (scsi_status == MPI2_SCSI_STATUS_BUSY || 5747 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || 5748 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { 5749 ioc_status = MPI2_IOCSTATUS_SUCCESS; 5750 } 5751 5752 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 5753 struct sense_info data; 5754 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc, 5755 smid); 5756 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, 5757 le32_to_cpu(mpi_reply->SenseCount)); 5758 memcpy(scmd->sense_buffer, sense_data, sz); 5759 _scsih_normalize_sense(scmd->sense_buffer, &data); 5760 /* failure prediction threshold exceeded */ 5761 if (data.asc == 0x5D) 5762 _scsih_smart_predicted_fault(ioc, 5763 le16_to_cpu(mpi_reply->DevHandle)); 5764 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); 5765 5766 if ((ioc->logging_level & MPT_DEBUG_REPLY) && 5767 ((scmd->sense_buffer[2] == UNIT_ATTENTION) || 5768 (scmd->sense_buffer[2] == MEDIUM_ERROR) || 5769 (scmd->sense_buffer[2] == HARDWARE_ERROR))) 5770 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid); 5771 } 5772 switch (ioc_status) { 5773 case MPI2_IOCSTATUS_BUSY: 5774 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 5775 scmd->result = SAM_STAT_BUSY; 5776 break; 5777 5778 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 5779 scmd->result = DID_NO_CONNECT << 16; 5780 break; 5781 5782 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 5783 if (sas_device_priv_data->block) { 5784 scmd->result = DID_TRANSPORT_DISRUPTED << 16; 5785 goto out; 5786 } 5787 if (log_info == 0x31110630) { 5788 if (scmd->retries > 2) { 5789 scmd->result = DID_NO_CONNECT << 16; 5790 scsi_device_set_state(scmd->device, 5791 SDEV_OFFLINE); 5792 } else { 5793 scmd->result = DID_SOFT_ERROR << 16; 5794 scmd->device->expecting_cc_ua = 1; 5795 } 5796 break; 5797 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { 5798 scmd->result = DID_RESET << 16; 5799 break; 5800 } else if ((scmd->device->channel == RAID_CHANNEL) && 5801 (scsi_state == (MPI2_SCSI_STATE_TERMINATED | 5802 MPI2_SCSI_STATE_NO_SCSI_STATUS))) { 5803 scmd->result = DID_RESET << 16; 5804 break; 5805 } 5806 scmd->result = DID_SOFT_ERROR << 16; 5807 break; 5808 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 5809 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 5810 scmd->result = DID_RESET << 16; 5811 break; 5812 5813 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 5814 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) 5815 scmd->result = DID_SOFT_ERROR << 16; 5816 else 5817 scmd->result = (DID_OK << 16) | scsi_status; 5818 break; 5819 5820 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 5821 scmd->result = (DID_OK << 16) | scsi_status; 5822 5823 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) 5824 break; 5825 5826 if (xfer_cnt < scmd->underflow) { 5827 if (scsi_status == SAM_STAT_BUSY) 5828 scmd->result = SAM_STAT_BUSY; 5829 else 5830 scmd->result = DID_SOFT_ERROR << 16; 5831 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 5832 MPI2_SCSI_STATE_NO_SCSI_STATUS)) 5833 scmd->result = DID_SOFT_ERROR << 16; 5834 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5835 scmd->result = DID_RESET << 16; 5836 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { 5837 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; 5838 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; 5839 scmd->result = (DRIVER_SENSE << 24) | 5840 SAM_STAT_CHECK_CONDITION; 5841 scmd->sense_buffer[0] = 0x70; 5842 scmd->sense_buffer[2] = ILLEGAL_REQUEST; 5843 scmd->sense_buffer[12] = 0x20; 5844 scmd->sense_buffer[13] = 0; 5845 } 5846 break; 5847 5848 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 5849 scsi_set_resid(scmd, 0); 5850 fallthrough; 5851 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 5852 case MPI2_IOCSTATUS_SUCCESS: 5853 scmd->result = (DID_OK << 16) | scsi_status; 5854 if (response_code == 5855 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || 5856 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 5857 MPI2_SCSI_STATE_NO_SCSI_STATUS))) 5858 scmd->result = DID_SOFT_ERROR << 16; 5859 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5860 scmd->result = DID_RESET << 16; 5861 break; 5862 5863 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5864 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5865 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5866 _scsih_eedp_error_handling(scmd, ioc_status); 5867 break; 5868 5869 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 5870 case MPI2_IOCSTATUS_INVALID_FUNCTION: 5871 case MPI2_IOCSTATUS_INVALID_SGL: 5872 case MPI2_IOCSTATUS_INTERNAL_ERROR: 5873 case MPI2_IOCSTATUS_INVALID_FIELD: 5874 case MPI2_IOCSTATUS_INVALID_STATE: 5875 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 5876 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 5877 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 5878 default: 5879 scmd->result = DID_SOFT_ERROR << 16; 5880 break; 5881 5882 } 5883 5884 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) 5885 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); 5886 5887 out: 5888 5889 scsi_dma_unmap(scmd); 5890 mpt3sas_base_free_smid(ioc, smid); 5891 scmd->scsi_done(scmd); 5892 return 0; 5893 } 5894 5895 /** 5896 * _scsih_update_vphys_after_reset - update the Port's 5897 * vphys_list after reset 5898 * @ioc: per adapter object 5899 * 5900 * Returns nothing. 5901 */ 5902 static void 5903 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc) 5904 { 5905 u16 sz, ioc_status; 5906 int i; 5907 Mpi2ConfigReply_t mpi_reply; 5908 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 5909 u16 attached_handle; 5910 u64 attached_sas_addr; 5911 u8 found = 0, port_id; 5912 Mpi2SasPhyPage0_t phy_pg0; 5913 struct hba_port *port, *port_next, *mport; 5914 struct virtual_phy *vphy, *vphy_next; 5915 struct _sas_device *sas_device; 5916 5917 /* 5918 * Mark all the vphys objects as dirty. 5919 */ 5920 list_for_each_entry_safe(port, port_next, 5921 &ioc->port_table_list, list) { 5922 if (!port->vphys_mask) 5923 continue; 5924 list_for_each_entry_safe(vphy, vphy_next, 5925 &port->vphys_list, list) { 5926 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY; 5927 } 5928 } 5929 5930 /* 5931 * Read SASIOUnitPage0 to get each HBA Phy's data. 5932 */ 5933 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + 5934 (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t)); 5935 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 5936 if (!sas_iounit_pg0) { 5937 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5938 __FILE__, __LINE__, __func__); 5939 return; 5940 } 5941 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 5942 sas_iounit_pg0, sz)) != 0) 5943 goto out; 5944 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 5945 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 5946 goto out; 5947 /* 5948 * Loop over each HBA Phy. 5949 */ 5950 for (i = 0; i < ioc->sas_hba.num_phys; i++) { 5951 /* 5952 * Check whether Phy's Negotiation Link Rate is > 1.5G or not. 5953 */ 5954 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < 5955 MPI2_SAS_NEG_LINK_RATE_1_5) 5956 continue; 5957 /* 5958 * Check whether Phy is connected to SEP device or not, 5959 * if it is SEP device then read the Phy's SASPHYPage0 data to 5960 * determine whether Phy is a virtual Phy or not. if it is 5961 * virtual phy then it is conformed that the attached remote 5962 * device is a HBA's vSES device. 5963 */ 5964 if (!(le32_to_cpu( 5965 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & 5966 MPI2_SAS_DEVICE_INFO_SEP)) 5967 continue; 5968 5969 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 5970 i))) { 5971 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5972 __FILE__, __LINE__, __func__); 5973 continue; 5974 } 5975 5976 if (!(le32_to_cpu(phy_pg0.PhyInfo) & 5977 MPI2_SAS_PHYINFO_VIRTUAL_PHY)) 5978 continue; 5979 /* 5980 * Get the vSES device's SAS Address. 5981 */ 5982 attached_handle = le16_to_cpu( 5983 sas_iounit_pg0->PhyData[i].AttachedDevHandle); 5984 if (_scsih_get_sas_address(ioc, attached_handle, 5985 &attached_sas_addr) != 0) { 5986 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5987 __FILE__, __LINE__, __func__); 5988 continue; 5989 } 5990 5991 found = 0; 5992 port = port_next = NULL; 5993 /* 5994 * Loop over each virtual_phy object from 5995 * each port's vphys_list. 5996 */ 5997 list_for_each_entry_safe(port, 5998 port_next, &ioc->port_table_list, list) { 5999 if (!port->vphys_mask) 6000 continue; 6001 list_for_each_entry_safe(vphy, vphy_next, 6002 &port->vphys_list, list) { 6003 /* 6004 * Continue with next virtual_phy object 6005 * if the object is not marked as dirty. 6006 */ 6007 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY)) 6008 continue; 6009 6010 /* 6011 * Continue with next virtual_phy object 6012 * if the object's SAS Address is not equals 6013 * to current Phy's vSES device SAS Address. 6014 */ 6015 if (vphy->sas_address != attached_sas_addr) 6016 continue; 6017 /* 6018 * Enable current Phy number bit in object's 6019 * phy_mask field. 6020 */ 6021 if (!(vphy->phy_mask & (1 << i))) 6022 vphy->phy_mask = (1 << i); 6023 /* 6024 * Get hba_port object from hba_port table 6025 * corresponding to current phy's Port ID. 6026 * if there is no hba_port object corresponding 6027 * to Phy's Port ID then create a new hba_port 6028 * object & add to hba_port table. 6029 */ 6030 port_id = sas_iounit_pg0->PhyData[i].Port; 6031 mport = mpt3sas_get_port_by_id(ioc, port_id, 1); 6032 if (!mport) { 6033 mport = kzalloc( 6034 sizeof(struct hba_port), GFP_KERNEL); 6035 if (!mport) 6036 break; 6037 mport->port_id = port_id; 6038 ioc_info(ioc, 6039 "%s: hba_port entry: %p, port: %d is added to hba_port list\n", 6040 __func__, mport, mport->port_id); 6041 list_add_tail(&mport->list, 6042 &ioc->port_table_list); 6043 } 6044 /* 6045 * If mport & port pointers are not pointing to 6046 * same hba_port object then it means that vSES 6047 * device's Port ID got changed after reset and 6048 * hence move current virtual_phy object from 6049 * port's vphys_list to mport's vphys_list. 6050 */ 6051 if (port != mport) { 6052 if (!mport->vphys_mask) 6053 INIT_LIST_HEAD( 6054 &mport->vphys_list); 6055 mport->vphys_mask |= (1 << i); 6056 port->vphys_mask &= ~(1 << i); 6057 list_move(&vphy->list, 6058 &mport->vphys_list); 6059 sas_device = mpt3sas_get_sdev_by_addr( 6060 ioc, attached_sas_addr, port); 6061 if (sas_device) 6062 sas_device->port = mport; 6063 } 6064 /* 6065 * Earlier while updating the hba_port table, 6066 * it is determined that there is no other 6067 * direct attached device with mport's Port ID, 6068 * Hence mport was marked as dirty. Only vSES 6069 * device has this Port ID, so unmark the mport 6070 * as dirt. 6071 */ 6072 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) { 6073 mport->sas_address = 0; 6074 mport->phy_mask = 0; 6075 mport->flags &= 6076 ~HBA_PORT_FLAG_DIRTY_PORT; 6077 } 6078 /* 6079 * Unmark current virtual_phy object as dirty. 6080 */ 6081 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY; 6082 found = 1; 6083 break; 6084 } 6085 if (found) 6086 break; 6087 } 6088 } 6089 out: 6090 kfree(sas_iounit_pg0); 6091 } 6092 6093 /** 6094 * _scsih_get_port_table_after_reset - Construct temporary port table 6095 * @ioc: per adapter object 6096 * @port_table: address where port table needs to be constructed 6097 * 6098 * return number of HBA port entries available after reset. 6099 */ 6100 static int 6101 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc, 6102 struct hba_port *port_table) 6103 { 6104 u16 sz, ioc_status; 6105 int i, j; 6106 Mpi2ConfigReply_t mpi_reply; 6107 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6108 u16 attached_handle; 6109 u64 attached_sas_addr; 6110 u8 found = 0, port_count = 0, port_id; 6111 6112 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys 6113 * sizeof(Mpi2SasIOUnit0PhyData_t)); 6114 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6115 if (!sas_iounit_pg0) { 6116 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6117 __FILE__, __LINE__, __func__); 6118 return port_count; 6119 } 6120 6121 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6122 sas_iounit_pg0, sz)) != 0) 6123 goto out; 6124 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6125 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6126 goto out; 6127 for (i = 0; i < ioc->sas_hba.num_phys; i++) { 6128 found = 0; 6129 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < 6130 MPI2_SAS_NEG_LINK_RATE_1_5) 6131 continue; 6132 attached_handle = 6133 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); 6134 if (_scsih_get_sas_address( 6135 ioc, attached_handle, &attached_sas_addr) != 0) { 6136 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6137 __FILE__, __LINE__, __func__); 6138 continue; 6139 } 6140 6141 for (j = 0; j < port_count; j++) { 6142 port_id = sas_iounit_pg0->PhyData[i].Port; 6143 if (port_table[j].port_id == port_id && 6144 port_table[j].sas_address == attached_sas_addr) { 6145 port_table[j].phy_mask |= (1 << i); 6146 found = 1; 6147 break; 6148 } 6149 } 6150 6151 if (found) 6152 continue; 6153 6154 port_id = sas_iounit_pg0->PhyData[i].Port; 6155 port_table[port_count].port_id = port_id; 6156 port_table[port_count].phy_mask = (1 << i); 6157 port_table[port_count].sas_address = attached_sas_addr; 6158 port_count++; 6159 } 6160 out: 6161 kfree(sas_iounit_pg0); 6162 return port_count; 6163 } 6164 6165 enum hba_port_matched_codes { 6166 NOT_MATCHED = 0, 6167 MATCHED_WITH_ADDR_AND_PHYMASK, 6168 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT, 6169 MATCHED_WITH_ADDR_AND_SUBPHYMASK, 6170 MATCHED_WITH_ADDR, 6171 }; 6172 6173 /** 6174 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry 6175 * from HBA port table 6176 * @ioc: per adapter object 6177 * @port_entry - hba port entry from temporary port table which needs to be 6178 * searched for matched entry in the HBA port table 6179 * @matched_port_entry - save matched hba port entry here 6180 * @count - count of matched entries 6181 * 6182 * return type of matched entry found. 6183 */ 6184 static enum hba_port_matched_codes 6185 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc, 6186 struct hba_port *port_entry, 6187 struct hba_port **matched_port_entry, int *count) 6188 { 6189 struct hba_port *port_table_entry, *matched_port = NULL; 6190 enum hba_port_matched_codes matched_code = NOT_MATCHED; 6191 int lcount = 0; 6192 *matched_port_entry = NULL; 6193 6194 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { 6195 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT)) 6196 continue; 6197 6198 if ((port_table_entry->sas_address == port_entry->sas_address) 6199 && (port_table_entry->phy_mask == port_entry->phy_mask)) { 6200 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK; 6201 matched_port = port_table_entry; 6202 break; 6203 } 6204 6205 if ((port_table_entry->sas_address == port_entry->sas_address) 6206 && (port_table_entry->phy_mask & port_entry->phy_mask) 6207 && (port_table_entry->port_id == port_entry->port_id)) { 6208 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; 6209 matched_port = port_table_entry; 6210 continue; 6211 } 6212 6213 if ((port_table_entry->sas_address == port_entry->sas_address) 6214 && (port_table_entry->phy_mask & port_entry->phy_mask)) { 6215 if (matched_code == 6216 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) 6217 continue; 6218 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK; 6219 matched_port = port_table_entry; 6220 continue; 6221 } 6222 6223 if (port_table_entry->sas_address == port_entry->sas_address) { 6224 if (matched_code == 6225 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) 6226 continue; 6227 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK) 6228 continue; 6229 matched_code = MATCHED_WITH_ADDR; 6230 matched_port = port_table_entry; 6231 lcount++; 6232 } 6233 } 6234 6235 *matched_port_entry = matched_port; 6236 if (matched_code == MATCHED_WITH_ADDR) 6237 *count = lcount; 6238 return matched_code; 6239 } 6240 6241 /** 6242 * _scsih_del_phy_part_of_anther_port - remove phy if it 6243 * is a part of anther port 6244 *@ioc: per adapter object 6245 *@port_table: port table after reset 6246 *@index: hba port entry index 6247 *@port_count: number of ports available after host reset 6248 *@offset: HBA phy bit offset 6249 * 6250 */ 6251 static void 6252 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc, 6253 struct hba_port *port_table, 6254 int index, u8 port_count, int offset) 6255 { 6256 struct _sas_node *sas_node = &ioc->sas_hba; 6257 u32 i, found = 0; 6258 6259 for (i = 0; i < port_count; i++) { 6260 if (i == index) 6261 continue; 6262 6263 if (port_table[i].phy_mask & (1 << offset)) { 6264 mpt3sas_transport_del_phy_from_an_existing_port( 6265 ioc, sas_node, &sas_node->phy[offset]); 6266 found = 1; 6267 break; 6268 } 6269 } 6270 if (!found) 6271 port_table[index].phy_mask |= (1 << offset); 6272 } 6273 6274 /** 6275 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from 6276 * right port 6277 *@ioc: per adapter object 6278 *@hba_port_entry: hba port table entry 6279 *@port_table: temporary port table 6280 *@index: hba port entry index 6281 *@port_count: number of ports available after host reset 6282 * 6283 */ 6284 static void 6285 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc, 6286 struct hba_port *hba_port_entry, struct hba_port *port_table, 6287 int index, int port_count) 6288 { 6289 u32 phy_mask, offset = 0; 6290 struct _sas_node *sas_node = &ioc->sas_hba; 6291 6292 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask; 6293 6294 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) { 6295 if (phy_mask & (1 << offset)) { 6296 if (!(port_table[index].phy_mask & (1 << offset))) { 6297 _scsih_del_phy_part_of_anther_port( 6298 ioc, port_table, index, port_count, 6299 offset); 6300 continue; 6301 } 6302 if (sas_node->phy[offset].phy_belongs_to_port) 6303 mpt3sas_transport_del_phy_from_an_existing_port( 6304 ioc, sas_node, &sas_node->phy[offset]); 6305 mpt3sas_transport_add_phy_to_an_existing_port( 6306 ioc, sas_node, &sas_node->phy[offset], 6307 hba_port_entry->sas_address, 6308 hba_port_entry); 6309 } 6310 } 6311 } 6312 6313 /** 6314 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty. 6315 * @ioc: per adapter object 6316 * 6317 * Returns nothing. 6318 */ 6319 static void 6320 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc) 6321 { 6322 struct hba_port *port, *port_next; 6323 struct virtual_phy *vphy, *vphy_next; 6324 6325 list_for_each_entry_safe(port, port_next, 6326 &ioc->port_table_list, list) { 6327 if (!port->vphys_mask) 6328 continue; 6329 list_for_each_entry_safe(vphy, vphy_next, 6330 &port->vphys_list, list) { 6331 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) { 6332 drsprintk(ioc, ioc_info(ioc, 6333 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n", 6334 vphy, port->port_id, 6335 vphy->phy_mask)); 6336 port->vphys_mask &= ~vphy->phy_mask; 6337 list_del(&vphy->list); 6338 kfree(vphy); 6339 } 6340 } 6341 if (!port->vphys_mask && !port->sas_address) 6342 port->flags |= HBA_PORT_FLAG_DIRTY_PORT; 6343 } 6344 } 6345 6346 /** 6347 * _scsih_del_dirty_port_entries - delete dirty port entries from port list 6348 * after host reset 6349 *@ioc: per adapter object 6350 * 6351 */ 6352 static void 6353 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc) 6354 { 6355 struct hba_port *port, *port_next; 6356 6357 list_for_each_entry_safe(port, port_next, 6358 &ioc->port_table_list, list) { 6359 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) || 6360 port->flags & HBA_PORT_FLAG_NEW_PORT) 6361 continue; 6362 6363 drsprintk(ioc, ioc_info(ioc, 6364 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n", 6365 port, port->port_id, port->phy_mask)); 6366 list_del(&port->list); 6367 kfree(port); 6368 } 6369 } 6370 6371 /** 6372 * _scsih_sas_port_refresh - Update HBA port table after host reset 6373 * @ioc: per adapter object 6374 */ 6375 static void 6376 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc) 6377 { 6378 u32 port_count = 0; 6379 struct hba_port *port_table; 6380 struct hba_port *port_table_entry; 6381 struct hba_port *port_entry = NULL; 6382 int i, j, count = 0, lcount = 0; 6383 int ret; 6384 u64 sas_addr; 6385 6386 drsprintk(ioc, ioc_info(ioc, 6387 "updating ports for sas_host(0x%016llx)\n", 6388 (unsigned long long)ioc->sas_hba.sas_address)); 6389 6390 port_table = kcalloc(ioc->sas_hba.num_phys, 6391 sizeof(struct hba_port), GFP_KERNEL); 6392 if (!port_table) 6393 return; 6394 6395 port_count = _scsih_get_port_table_after_reset(ioc, port_table); 6396 if (!port_count) 6397 return; 6398 6399 drsprintk(ioc, ioc_info(ioc, "New Port table\n")); 6400 for (j = 0; j < port_count; j++) 6401 drsprintk(ioc, ioc_info(ioc, 6402 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", 6403 port_table[j].port_id, 6404 port_table[j].phy_mask, port_table[j].sas_address)); 6405 6406 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) 6407 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT; 6408 6409 drsprintk(ioc, ioc_info(ioc, "Old Port table\n")); 6410 port_table_entry = NULL; 6411 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { 6412 drsprintk(ioc, ioc_info(ioc, 6413 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", 6414 port_table_entry->port_id, 6415 port_table_entry->phy_mask, 6416 port_table_entry->sas_address)); 6417 } 6418 6419 for (j = 0; j < port_count; j++) { 6420 ret = _scsih_look_and_get_matched_port_entry(ioc, 6421 &port_table[j], &port_entry, &count); 6422 if (!port_entry) { 6423 drsprintk(ioc, ioc_info(ioc, 6424 "No Matched entry for sas_addr(0x%16llx), Port:%d\n", 6425 port_table[j].sas_address, 6426 port_table[j].port_id)); 6427 continue; 6428 } 6429 6430 switch (ret) { 6431 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: 6432 case MATCHED_WITH_ADDR_AND_SUBPHYMASK: 6433 _scsih_add_or_del_phys_from_existing_port(ioc, 6434 port_entry, port_table, j, port_count); 6435 break; 6436 case MATCHED_WITH_ADDR: 6437 sas_addr = port_table[j].sas_address; 6438 for (i = 0; i < port_count; i++) { 6439 if (port_table[i].sas_address == sas_addr) 6440 lcount++; 6441 } 6442 6443 if (count > 1 || lcount > 1) 6444 port_entry = NULL; 6445 else 6446 _scsih_add_or_del_phys_from_existing_port(ioc, 6447 port_entry, port_table, j, port_count); 6448 } 6449 6450 if (!port_entry) 6451 continue; 6452 6453 if (port_entry->port_id != port_table[j].port_id) 6454 port_entry->port_id = port_table[j].port_id; 6455 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT; 6456 port_entry->phy_mask = port_table[j].phy_mask; 6457 } 6458 6459 port_table_entry = NULL; 6460 } 6461 6462 /** 6463 * _scsih_alloc_vphy - allocate virtual_phy object 6464 * @ioc: per adapter object 6465 * @port_id: Port ID number 6466 * @phy_num: HBA Phy number 6467 * 6468 * Returns allocated virtual_phy object. 6469 */ 6470 static struct virtual_phy * 6471 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num) 6472 { 6473 struct virtual_phy *vphy; 6474 struct hba_port *port; 6475 6476 port = mpt3sas_get_port_by_id(ioc, port_id, 0); 6477 if (!port) 6478 return NULL; 6479 6480 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num); 6481 if (!vphy) { 6482 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL); 6483 if (!vphy) 6484 return NULL; 6485 6486 /* 6487 * Enable bit corresponding to HBA phy number on its 6488 * parent hba_port object's vphys_mask field. 6489 */ 6490 port->vphys_mask |= (1 << phy_num); 6491 vphy->phy_mask |= (1 << phy_num); 6492 6493 INIT_LIST_HEAD(&port->vphys_list); 6494 list_add_tail(&vphy->list, &port->vphys_list); 6495 6496 ioc_info(ioc, 6497 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n", 6498 vphy, port->port_id, phy_num); 6499 } 6500 return vphy; 6501 } 6502 6503 /** 6504 * _scsih_sas_host_refresh - refreshing sas host object contents 6505 * @ioc: per adapter object 6506 * Context: user 6507 * 6508 * During port enable, fw will send topology events for every device. Its 6509 * possible that the handles may change from the previous setting, so this 6510 * code keeping handles updating if changed. 6511 */ 6512 static void 6513 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) 6514 { 6515 u16 sz; 6516 u16 ioc_status; 6517 int i; 6518 Mpi2ConfigReply_t mpi_reply; 6519 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6520 u16 attached_handle; 6521 u8 link_rate, port_id; 6522 struct hba_port *port; 6523 Mpi2SasPhyPage0_t phy_pg0; 6524 6525 dtmprintk(ioc, 6526 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n", 6527 (u64)ioc->sas_hba.sas_address)); 6528 6529 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys 6530 * sizeof(Mpi2SasIOUnit0PhyData_t)); 6531 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6532 if (!sas_iounit_pg0) { 6533 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6534 __FILE__, __LINE__, __func__); 6535 return; 6536 } 6537 6538 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6539 sas_iounit_pg0, sz)) != 0) 6540 goto out; 6541 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6542 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6543 goto out; 6544 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 6545 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; 6546 if (i == 0) 6547 ioc->sas_hba.handle = le16_to_cpu( 6548 sas_iounit_pg0->PhyData[0].ControllerDevHandle); 6549 port_id = sas_iounit_pg0->PhyData[i].Port; 6550 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { 6551 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); 6552 if (!port) 6553 goto out; 6554 6555 port->port_id = port_id; 6556 ioc_info(ioc, 6557 "hba_port entry: %p, port: %d is added to hba_port list\n", 6558 port, port->port_id); 6559 if (ioc->shost_recovery) 6560 port->flags = HBA_PORT_FLAG_NEW_PORT; 6561 list_add_tail(&port->list, &ioc->port_table_list); 6562 } 6563 /* 6564 * Check whether current Phy belongs to HBA vSES device or not. 6565 */ 6566 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) & 6567 MPI2_SAS_DEVICE_INFO_SEP && 6568 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { 6569 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, 6570 &phy_pg0, i))) { 6571 ioc_err(ioc, 6572 "failure at %s:%d/%s()!\n", 6573 __FILE__, __LINE__, __func__); 6574 goto out; 6575 } 6576 if (!(le32_to_cpu(phy_pg0.PhyInfo) & 6577 MPI2_SAS_PHYINFO_VIRTUAL_PHY)) 6578 continue; 6579 /* 6580 * Allocate a virtual_phy object for vSES device, if 6581 * this vSES device is hot added. 6582 */ 6583 if (!_scsih_alloc_vphy(ioc, port_id, i)) 6584 goto out; 6585 ioc->sas_hba.phy[i].hba_vphy = 1; 6586 } 6587 6588 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 6589 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. 6590 AttachedDevHandle); 6591 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 6592 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; 6593 ioc->sas_hba.phy[i].port = 6594 mpt3sas_get_port_by_id(ioc, port_id, 0); 6595 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, 6596 attached_handle, i, link_rate, 6597 ioc->sas_hba.phy[i].port); 6598 } 6599 out: 6600 kfree(sas_iounit_pg0); 6601 } 6602 6603 /** 6604 * _scsih_sas_host_add - create sas host object 6605 * @ioc: per adapter object 6606 * 6607 * Creating host side data object, stored in ioc->sas_hba 6608 */ 6609 static void 6610 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) 6611 { 6612 int i; 6613 Mpi2ConfigReply_t mpi_reply; 6614 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 6615 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 6616 Mpi2SasPhyPage0_t phy_pg0; 6617 Mpi2SasDevicePage0_t sas_device_pg0; 6618 Mpi2SasEnclosurePage0_t enclosure_pg0; 6619 u16 ioc_status; 6620 u16 sz; 6621 u8 device_missing_delay; 6622 u8 num_phys, port_id; 6623 struct hba_port *port; 6624 6625 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 6626 if (!num_phys) { 6627 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6628 __FILE__, __LINE__, __func__); 6629 return; 6630 } 6631 ioc->sas_hba.phy = kcalloc(num_phys, 6632 sizeof(struct _sas_phy), GFP_KERNEL); 6633 if (!ioc->sas_hba.phy) { 6634 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6635 __FILE__, __LINE__, __func__); 6636 goto out; 6637 } 6638 ioc->sas_hba.num_phys = num_phys; 6639 6640 /* sas_iounit page 0 */ 6641 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * 6642 sizeof(Mpi2SasIOUnit0PhyData_t)); 6643 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 6644 if (!sas_iounit_pg0) { 6645 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6646 __FILE__, __LINE__, __func__); 6647 return; 6648 } 6649 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 6650 sas_iounit_pg0, sz))) { 6651 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6652 __FILE__, __LINE__, __func__); 6653 goto out; 6654 } 6655 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6656 MPI2_IOCSTATUS_MASK; 6657 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6658 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6659 __FILE__, __LINE__, __func__); 6660 goto out; 6661 } 6662 6663 /* sas_iounit page 1 */ 6664 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * 6665 sizeof(Mpi2SasIOUnit1PhyData_t)); 6666 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 6667 if (!sas_iounit_pg1) { 6668 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6669 __FILE__, __LINE__, __func__); 6670 goto out; 6671 } 6672 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 6673 sas_iounit_pg1, sz))) { 6674 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6675 __FILE__, __LINE__, __func__); 6676 goto out; 6677 } 6678 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6679 MPI2_IOCSTATUS_MASK; 6680 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6681 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6682 __FILE__, __LINE__, __func__); 6683 goto out; 6684 } 6685 6686 ioc->io_missing_delay = 6687 sas_iounit_pg1->IODeviceMissingDelay; 6688 device_missing_delay = 6689 sas_iounit_pg1->ReportDeviceMissingDelay; 6690 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 6691 ioc->device_missing_delay = (device_missing_delay & 6692 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 6693 else 6694 ioc->device_missing_delay = device_missing_delay & 6695 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 6696 6697 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; 6698 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 6699 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 6700 i))) { 6701 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6702 __FILE__, __LINE__, __func__); 6703 goto out; 6704 } 6705 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6706 MPI2_IOCSTATUS_MASK; 6707 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6708 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6709 __FILE__, __LINE__, __func__); 6710 goto out; 6711 } 6712 6713 if (i == 0) 6714 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> 6715 PhyData[0].ControllerDevHandle); 6716 6717 port_id = sas_iounit_pg0->PhyData[i].Port; 6718 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) { 6719 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL); 6720 if (!port) 6721 goto out; 6722 6723 port->port_id = port_id; 6724 ioc_info(ioc, 6725 "hba_port entry: %p, port: %d is added to hba_port list\n", 6726 port, port->port_id); 6727 list_add_tail(&port->list, 6728 &ioc->port_table_list); 6729 } 6730 6731 /* 6732 * Check whether current Phy belongs to HBA vSES device or not. 6733 */ 6734 if ((le32_to_cpu(phy_pg0.PhyInfo) & 6735 MPI2_SAS_PHYINFO_VIRTUAL_PHY) && 6736 (phy_pg0.NegotiatedLinkRate >> 4) >= 6737 MPI2_SAS_NEG_LINK_RATE_1_5) { 6738 /* 6739 * Allocate a virtual_phy object for vSES device. 6740 */ 6741 if (!_scsih_alloc_vphy(ioc, port_id, i)) 6742 goto out; 6743 ioc->sas_hba.phy[i].hba_vphy = 1; 6744 } 6745 6746 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 6747 ioc->sas_hba.phy[i].phy_id = i; 6748 ioc->sas_hba.phy[i].port = 6749 mpt3sas_get_port_by_id(ioc, port_id, 0); 6750 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], 6751 phy_pg0, ioc->sas_hba.parent_dev); 6752 } 6753 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 6754 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { 6755 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6756 __FILE__, __LINE__, __func__); 6757 goto out; 6758 } 6759 ioc->sas_hba.enclosure_handle = 6760 le16_to_cpu(sas_device_pg0.EnclosureHandle); 6761 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 6762 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 6763 ioc->sas_hba.handle, 6764 (u64)ioc->sas_hba.sas_address, 6765 ioc->sas_hba.num_phys); 6766 6767 if (ioc->sas_hba.enclosure_handle) { 6768 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 6769 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 6770 ioc->sas_hba.enclosure_handle))) 6771 ioc->sas_hba.enclosure_logical_id = 6772 le64_to_cpu(enclosure_pg0.EnclosureLogicalID); 6773 } 6774 6775 out: 6776 kfree(sas_iounit_pg1); 6777 kfree(sas_iounit_pg0); 6778 } 6779 6780 /** 6781 * _scsih_expander_add - creating expander object 6782 * @ioc: per adapter object 6783 * @handle: expander handle 6784 * 6785 * Creating expander object, stored in ioc->sas_expander_list. 6786 * 6787 * Return: 0 for success, else error. 6788 */ 6789 static int 6790 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) 6791 { 6792 struct _sas_node *sas_expander; 6793 struct _enclosure_node *enclosure_dev; 6794 Mpi2ConfigReply_t mpi_reply; 6795 Mpi2ExpanderPage0_t expander_pg0; 6796 Mpi2ExpanderPage1_t expander_pg1; 6797 u32 ioc_status; 6798 u16 parent_handle; 6799 u64 sas_address, sas_address_parent = 0; 6800 int i; 6801 unsigned long flags; 6802 struct _sas_port *mpt3sas_port = NULL; 6803 u8 port_id; 6804 6805 int rc = 0; 6806 6807 if (!handle) 6808 return -1; 6809 6810 if (ioc->shost_recovery || ioc->pci_error_recovery) 6811 return -1; 6812 6813 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 6814 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { 6815 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6816 __FILE__, __LINE__, __func__); 6817 return -1; 6818 } 6819 6820 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6821 MPI2_IOCSTATUS_MASK; 6822 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6823 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6824 __FILE__, __LINE__, __func__); 6825 return -1; 6826 } 6827 6828 /* handle out of order topology events */ 6829 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); 6830 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) 6831 != 0) { 6832 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6833 __FILE__, __LINE__, __func__); 6834 return -1; 6835 } 6836 6837 port_id = expander_pg0.PhysicalPort; 6838 if (sas_address_parent != ioc->sas_hba.sas_address) { 6839 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6840 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 6841 sas_address_parent, 6842 mpt3sas_get_port_by_id(ioc, port_id, 0)); 6843 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6844 if (!sas_expander) { 6845 rc = _scsih_expander_add(ioc, parent_handle); 6846 if (rc != 0) 6847 return rc; 6848 } 6849 } 6850 6851 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6852 sas_address = le64_to_cpu(expander_pg0.SASAddress); 6853 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 6854 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); 6855 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6856 6857 if (sas_expander) 6858 return 0; 6859 6860 sas_expander = kzalloc(sizeof(struct _sas_node), 6861 GFP_KERNEL); 6862 if (!sas_expander) { 6863 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6864 __FILE__, __LINE__, __func__); 6865 return -1; 6866 } 6867 6868 sas_expander->handle = handle; 6869 sas_expander->num_phys = expander_pg0.NumPhys; 6870 sas_expander->sas_address_parent = sas_address_parent; 6871 sas_expander->sas_address = sas_address; 6872 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0); 6873 if (!sas_expander->port) { 6874 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6875 __FILE__, __LINE__, __func__); 6876 rc = -1; 6877 goto out_fail; 6878 } 6879 6880 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 6881 handle, parent_handle, 6882 (u64)sas_expander->sas_address, sas_expander->num_phys); 6883 6884 if (!sas_expander->num_phys) 6885 goto out_fail; 6886 sas_expander->phy = kcalloc(sas_expander->num_phys, 6887 sizeof(struct _sas_phy), GFP_KERNEL); 6888 if (!sas_expander->phy) { 6889 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6890 __FILE__, __LINE__, __func__); 6891 rc = -1; 6892 goto out_fail; 6893 } 6894 6895 INIT_LIST_HEAD(&sas_expander->sas_port_list); 6896 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, 6897 sas_address_parent, sas_expander->port); 6898 if (!mpt3sas_port) { 6899 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6900 __FILE__, __LINE__, __func__); 6901 rc = -1; 6902 goto out_fail; 6903 } 6904 sas_expander->parent_dev = &mpt3sas_port->rphy->dev; 6905 sas_expander->rphy = mpt3sas_port->rphy; 6906 6907 for (i = 0 ; i < sas_expander->num_phys ; i++) { 6908 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 6909 &expander_pg1, i, handle))) { 6910 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6911 __FILE__, __LINE__, __func__); 6912 rc = -1; 6913 goto out_fail; 6914 } 6915 sas_expander->phy[i].handle = handle; 6916 sas_expander->phy[i].phy_id = i; 6917 sas_expander->phy[i].port = 6918 mpt3sas_get_port_by_id(ioc, port_id, 0); 6919 6920 if ((mpt3sas_transport_add_expander_phy(ioc, 6921 &sas_expander->phy[i], expander_pg1, 6922 sas_expander->parent_dev))) { 6923 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6924 __FILE__, __LINE__, __func__); 6925 rc = -1; 6926 goto out_fail; 6927 } 6928 } 6929 6930 if (sas_expander->enclosure_handle) { 6931 enclosure_dev = 6932 mpt3sas_scsih_enclosure_find_by_handle(ioc, 6933 sas_expander->enclosure_handle); 6934 if (enclosure_dev) 6935 sas_expander->enclosure_logical_id = 6936 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 6937 } 6938 6939 _scsih_expander_node_add(ioc, sas_expander); 6940 return 0; 6941 6942 out_fail: 6943 6944 if (mpt3sas_port) 6945 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 6946 sas_address_parent, sas_expander->port); 6947 kfree(sas_expander); 6948 return rc; 6949 } 6950 6951 /** 6952 * mpt3sas_expander_remove - removing expander object 6953 * @ioc: per adapter object 6954 * @sas_address: expander sas_address 6955 */ 6956 void 6957 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 6958 struct hba_port *port) 6959 { 6960 struct _sas_node *sas_expander; 6961 unsigned long flags; 6962 6963 if (ioc->shost_recovery) 6964 return; 6965 6966 if (!port) 6967 return; 6968 6969 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6970 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 6971 sas_address, port); 6972 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6973 if (sas_expander) 6974 _scsih_expander_node_remove(ioc, sas_expander); 6975 } 6976 6977 /** 6978 * _scsih_done - internal SCSI_IO callback handler. 6979 * @ioc: per adapter object 6980 * @smid: system request message index 6981 * @msix_index: MSIX table index supplied by the OS 6982 * @reply: reply message frame(lower 32bit addr) 6983 * 6984 * Callback handler when sending internal generated SCSI_IO. 6985 * The callback index passed is `ioc->scsih_cb_idx` 6986 * 6987 * Return: 1 meaning mf should be freed from _base_interrupt 6988 * 0 means the mf is freed from this function. 6989 */ 6990 static u8 6991 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 6992 { 6993 MPI2DefaultReply_t *mpi_reply; 6994 6995 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 6996 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED) 6997 return 1; 6998 if (ioc->scsih_cmds.smid != smid) 6999 return 1; 7000 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE; 7001 if (mpi_reply) { 7002 memcpy(ioc->scsih_cmds.reply, mpi_reply, 7003 mpi_reply->MsgLength*4); 7004 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID; 7005 } 7006 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING; 7007 complete(&ioc->scsih_cmds.done); 7008 return 1; 7009 } 7010 7011 7012 7013 7014 #define MPT3_MAX_LUNS (255) 7015 7016 7017 /** 7018 * _scsih_check_access_status - check access flags 7019 * @ioc: per adapter object 7020 * @sas_address: sas address 7021 * @handle: sas device handle 7022 * @access_status: errors returned during discovery of the device 7023 * 7024 * Return: 0 for success, else failure 7025 */ 7026 static u8 7027 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 7028 u16 handle, u8 access_status) 7029 { 7030 u8 rc = 1; 7031 char *desc = NULL; 7032 7033 switch (access_status) { 7034 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: 7035 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: 7036 rc = 0; 7037 break; 7038 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: 7039 desc = "sata capability failed"; 7040 break; 7041 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: 7042 desc = "sata affiliation conflict"; 7043 break; 7044 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: 7045 desc = "route not addressable"; 7046 break; 7047 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: 7048 desc = "smp error not addressable"; 7049 break; 7050 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: 7051 desc = "device blocked"; 7052 break; 7053 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: 7054 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: 7055 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: 7056 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: 7057 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: 7058 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: 7059 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: 7060 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: 7061 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: 7062 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: 7063 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: 7064 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: 7065 desc = "sata initialization failed"; 7066 break; 7067 default: 7068 desc = "unknown"; 7069 break; 7070 } 7071 7072 if (!rc) 7073 return 0; 7074 7075 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", 7076 desc, (u64)sas_address, handle); 7077 return rc; 7078 } 7079 7080 /** 7081 * _scsih_check_device - checking device responsiveness 7082 * @ioc: per adapter object 7083 * @parent_sas_address: sas address of parent expander or sas host 7084 * @handle: attached device handle 7085 * @phy_number: phy number 7086 * @link_rate: new link rate 7087 */ 7088 static void 7089 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, 7090 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate) 7091 { 7092 Mpi2ConfigReply_t mpi_reply; 7093 Mpi2SasDevicePage0_t sas_device_pg0; 7094 struct _sas_device *sas_device = NULL; 7095 struct _enclosure_node *enclosure_dev = NULL; 7096 u32 ioc_status; 7097 unsigned long flags; 7098 u64 sas_address; 7099 struct scsi_target *starget; 7100 struct MPT3SAS_TARGET *sas_target_priv_data; 7101 u32 device_info; 7102 struct hba_port *port; 7103 7104 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 7105 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) 7106 return; 7107 7108 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 7109 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 7110 return; 7111 7112 /* wide port handling ~ we need only handle device once for the phy that 7113 * is matched in sas device page zero 7114 */ 7115 if (phy_number != sas_device_pg0.PhyNum) 7116 return; 7117 7118 /* check if this is end device */ 7119 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 7120 if (!(_scsih_is_end_device(device_info))) 7121 return; 7122 7123 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7124 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 7125 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0); 7126 if (!port) 7127 goto out_unlock; 7128 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 7129 sas_address, port); 7130 7131 if (!sas_device) 7132 goto out_unlock; 7133 7134 if (unlikely(sas_device->handle != handle)) { 7135 starget = sas_device->starget; 7136 sas_target_priv_data = starget->hostdata; 7137 starget_printk(KERN_INFO, starget, 7138 "handle changed from(0x%04x) to (0x%04x)!!!\n", 7139 sas_device->handle, handle); 7140 sas_target_priv_data->handle = handle; 7141 sas_device->handle = handle; 7142 if (le16_to_cpu(sas_device_pg0.Flags) & 7143 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 7144 sas_device->enclosure_level = 7145 sas_device_pg0.EnclosureLevel; 7146 memcpy(sas_device->connector_name, 7147 sas_device_pg0.ConnectorName, 4); 7148 sas_device->connector_name[4] = '\0'; 7149 } else { 7150 sas_device->enclosure_level = 0; 7151 sas_device->connector_name[0] = '\0'; 7152 } 7153 7154 sas_device->enclosure_handle = 7155 le16_to_cpu(sas_device_pg0.EnclosureHandle); 7156 sas_device->is_chassis_slot_valid = 0; 7157 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, 7158 sas_device->enclosure_handle); 7159 if (enclosure_dev) { 7160 sas_device->enclosure_logical_id = 7161 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 7162 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 7163 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 7164 sas_device->is_chassis_slot_valid = 1; 7165 sas_device->chassis_slot = 7166 enclosure_dev->pg0.ChassisSlot; 7167 } 7168 } 7169 } 7170 7171 /* check if device is present */ 7172 if (!(le16_to_cpu(sas_device_pg0.Flags) & 7173 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 7174 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n", 7175 handle); 7176 goto out_unlock; 7177 } 7178 7179 /* check if there were any issues with discovery */ 7180 if (_scsih_check_access_status(ioc, sas_address, handle, 7181 sas_device_pg0.AccessStatus)) 7182 goto out_unlock; 7183 7184 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7185 _scsih_ublock_io_device(ioc, sas_address, port); 7186 7187 if (sas_device) 7188 sas_device_put(sas_device); 7189 return; 7190 7191 out_unlock: 7192 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7193 if (sas_device) 7194 sas_device_put(sas_device); 7195 } 7196 7197 /** 7198 * _scsih_add_device - creating sas device object 7199 * @ioc: per adapter object 7200 * @handle: sas device handle 7201 * @phy_num: phy number end device attached to 7202 * @is_pd: is this hidden raid component 7203 * 7204 * Creating end device object, stored in ioc->sas_device_list. 7205 * 7206 * Return: 0 for success, non-zero for failure. 7207 */ 7208 static int 7209 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, 7210 u8 is_pd) 7211 { 7212 Mpi2ConfigReply_t mpi_reply; 7213 Mpi2SasDevicePage0_t sas_device_pg0; 7214 struct _sas_device *sas_device; 7215 struct _enclosure_node *enclosure_dev = NULL; 7216 u32 ioc_status; 7217 u64 sas_address; 7218 u32 device_info; 7219 u8 port_id; 7220 7221 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 7222 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 7223 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7224 __FILE__, __LINE__, __func__); 7225 return -1; 7226 } 7227 7228 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 7229 MPI2_IOCSTATUS_MASK; 7230 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7231 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7232 __FILE__, __LINE__, __func__); 7233 return -1; 7234 } 7235 7236 /* check if this is end device */ 7237 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 7238 if (!(_scsih_is_end_device(device_info))) 7239 return -1; 7240 set_bit(handle, ioc->pend_os_device_add); 7241 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 7242 7243 /* check if device is present */ 7244 if (!(le16_to_cpu(sas_device_pg0.Flags) & 7245 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 7246 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", 7247 handle); 7248 return -1; 7249 } 7250 7251 /* check if there were any issues with discovery */ 7252 if (_scsih_check_access_status(ioc, sas_address, handle, 7253 sas_device_pg0.AccessStatus)) 7254 return -1; 7255 7256 port_id = sas_device_pg0.PhysicalPort; 7257 sas_device = mpt3sas_get_sdev_by_addr(ioc, 7258 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0)); 7259 if (sas_device) { 7260 clear_bit(handle, ioc->pend_os_device_add); 7261 sas_device_put(sas_device); 7262 return -1; 7263 } 7264 7265 if (sas_device_pg0.EnclosureHandle) { 7266 enclosure_dev = 7267 mpt3sas_scsih_enclosure_find_by_handle(ioc, 7268 le16_to_cpu(sas_device_pg0.EnclosureHandle)); 7269 if (enclosure_dev == NULL) 7270 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 7271 sas_device_pg0.EnclosureHandle); 7272 } 7273 7274 sas_device = kzalloc(sizeof(struct _sas_device), 7275 GFP_KERNEL); 7276 if (!sas_device) { 7277 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7278 __FILE__, __LINE__, __func__); 7279 return 0; 7280 } 7281 7282 kref_init(&sas_device->refcount); 7283 sas_device->handle = handle; 7284 if (_scsih_get_sas_address(ioc, 7285 le16_to_cpu(sas_device_pg0.ParentDevHandle), 7286 &sas_device->sas_address_parent) != 0) 7287 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7288 __FILE__, __LINE__, __func__); 7289 sas_device->enclosure_handle = 7290 le16_to_cpu(sas_device_pg0.EnclosureHandle); 7291 if (sas_device->enclosure_handle != 0) 7292 sas_device->slot = 7293 le16_to_cpu(sas_device_pg0.Slot); 7294 sas_device->device_info = device_info; 7295 sas_device->sas_address = sas_address; 7296 sas_device->phy = sas_device_pg0.PhyNum; 7297 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & 7298 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 7299 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0); 7300 if (!sas_device->port) { 7301 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7302 __FILE__, __LINE__, __func__); 7303 goto out; 7304 } 7305 7306 if (le16_to_cpu(sas_device_pg0.Flags) 7307 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 7308 sas_device->enclosure_level = 7309 sas_device_pg0.EnclosureLevel; 7310 memcpy(sas_device->connector_name, 7311 sas_device_pg0.ConnectorName, 4); 7312 sas_device->connector_name[4] = '\0'; 7313 } else { 7314 sas_device->enclosure_level = 0; 7315 sas_device->connector_name[0] = '\0'; 7316 } 7317 /* get enclosure_logical_id & chassis_slot*/ 7318 sas_device->is_chassis_slot_valid = 0; 7319 if (enclosure_dev) { 7320 sas_device->enclosure_logical_id = 7321 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 7322 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 7323 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 7324 sas_device->is_chassis_slot_valid = 1; 7325 sas_device->chassis_slot = 7326 enclosure_dev->pg0.ChassisSlot; 7327 } 7328 } 7329 7330 /* get device name */ 7331 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); 7332 7333 if (ioc->wait_for_discovery_to_complete) 7334 _scsih_sas_device_init_add(ioc, sas_device); 7335 else 7336 _scsih_sas_device_add(ioc, sas_device); 7337 7338 out: 7339 sas_device_put(sas_device); 7340 return 0; 7341 } 7342 7343 /** 7344 * _scsih_remove_device - removing sas device object 7345 * @ioc: per adapter object 7346 * @sas_device: the sas_device object 7347 */ 7348 static void 7349 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, 7350 struct _sas_device *sas_device) 7351 { 7352 struct MPT3SAS_TARGET *sas_target_priv_data; 7353 7354 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && 7355 (sas_device->pfa_led_on)) { 7356 _scsih_turn_off_pfa_led(ioc, sas_device); 7357 sas_device->pfa_led_on = 0; 7358 } 7359 7360 dewtprintk(ioc, 7361 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", 7362 __func__, 7363 sas_device->handle, (u64)sas_device->sas_address)); 7364 7365 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 7366 NULL, NULL)); 7367 7368 if (sas_device->starget && sas_device->starget->hostdata) { 7369 sas_target_priv_data = sas_device->starget->hostdata; 7370 sas_target_priv_data->deleted = 1; 7371 _scsih_ublock_io_device(ioc, sas_device->sas_address, 7372 sas_device->port); 7373 sas_target_priv_data->handle = 7374 MPT3SAS_INVALID_DEVICE_HANDLE; 7375 } 7376 7377 if (!ioc->hide_drives) 7378 mpt3sas_transport_port_remove(ioc, 7379 sas_device->sas_address, 7380 sas_device->sas_address_parent, 7381 sas_device->port); 7382 7383 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", 7384 sas_device->handle, (u64)sas_device->sas_address); 7385 7386 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 7387 7388 dewtprintk(ioc, 7389 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", 7390 __func__, 7391 sas_device->handle, (u64)sas_device->sas_address)); 7392 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 7393 NULL, NULL)); 7394 } 7395 7396 /** 7397 * _scsih_sas_topology_change_event_debug - debug for topology event 7398 * @ioc: per adapter object 7399 * @event_data: event data payload 7400 * Context: user. 7401 */ 7402 static void 7403 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7404 Mpi2EventDataSasTopologyChangeList_t *event_data) 7405 { 7406 int i; 7407 u16 handle; 7408 u16 reason_code; 7409 u8 phy_number; 7410 char *status_str = NULL; 7411 u8 link_rate, prev_link_rate; 7412 7413 switch (event_data->ExpStatus) { 7414 case MPI2_EVENT_SAS_TOPO_ES_ADDED: 7415 status_str = "add"; 7416 break; 7417 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 7418 status_str = "remove"; 7419 break; 7420 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: 7421 case 0: 7422 status_str = "responding"; 7423 break; 7424 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 7425 status_str = "remove delay"; 7426 break; 7427 default: 7428 status_str = "unknown status"; 7429 break; 7430 } 7431 ioc_info(ioc, "sas topology change: (%s)\n", status_str); 7432 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ 7433 "start_phy(%02d), count(%d)\n", 7434 le16_to_cpu(event_data->ExpanderDevHandle), 7435 le16_to_cpu(event_data->EnclosureHandle), 7436 event_data->StartPhyNum, event_data->NumEntries); 7437 for (i = 0; i < event_data->NumEntries; i++) { 7438 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 7439 if (!handle) 7440 continue; 7441 phy_number = event_data->StartPhyNum + i; 7442 reason_code = event_data->PHY[i].PhyStatus & 7443 MPI2_EVENT_SAS_TOPO_RC_MASK; 7444 switch (reason_code) { 7445 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 7446 status_str = "target add"; 7447 break; 7448 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 7449 status_str = "target remove"; 7450 break; 7451 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: 7452 status_str = "delay target remove"; 7453 break; 7454 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 7455 status_str = "link rate change"; 7456 break; 7457 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: 7458 status_str = "target responding"; 7459 break; 7460 default: 7461 status_str = "unknown"; 7462 break; 7463 } 7464 link_rate = event_data->PHY[i].LinkRate >> 4; 7465 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 7466 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \ 7467 " link rate: new(0x%02x), old(0x%02x)\n", phy_number, 7468 handle, status_str, link_rate, prev_link_rate); 7469 7470 } 7471 } 7472 7473 /** 7474 * _scsih_sas_topology_change_event - handle topology changes 7475 * @ioc: per adapter object 7476 * @fw_event: The fw_event_work object 7477 * Context: user. 7478 * 7479 */ 7480 static int 7481 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 7482 struct fw_event_work *fw_event) 7483 { 7484 int i; 7485 u16 parent_handle, handle; 7486 u16 reason_code; 7487 u8 phy_number, max_phys; 7488 struct _sas_node *sas_expander; 7489 u64 sas_address; 7490 unsigned long flags; 7491 u8 link_rate, prev_link_rate; 7492 struct hba_port *port; 7493 Mpi2EventDataSasTopologyChangeList_t *event_data = 7494 (Mpi2EventDataSasTopologyChangeList_t *) 7495 fw_event->event_data; 7496 7497 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7498 _scsih_sas_topology_change_event_debug(ioc, event_data); 7499 7500 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) 7501 return 0; 7502 7503 if (!ioc->sas_hba.num_phys) 7504 _scsih_sas_host_add(ioc); 7505 else 7506 _scsih_sas_host_refresh(ioc); 7507 7508 if (fw_event->ignore) { 7509 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n")); 7510 return 0; 7511 } 7512 7513 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); 7514 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0); 7515 7516 /* handle expander add */ 7517 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) 7518 if (_scsih_expander_add(ioc, parent_handle) != 0) 7519 return 0; 7520 7521 spin_lock_irqsave(&ioc->sas_node_lock, flags); 7522 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, 7523 parent_handle); 7524 if (sas_expander) { 7525 sas_address = sas_expander->sas_address; 7526 max_phys = sas_expander->num_phys; 7527 port = sas_expander->port; 7528 } else if (parent_handle < ioc->sas_hba.num_phys) { 7529 sas_address = ioc->sas_hba.sas_address; 7530 max_phys = ioc->sas_hba.num_phys; 7531 } else { 7532 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7533 return 0; 7534 } 7535 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 7536 7537 /* handle siblings events */ 7538 for (i = 0; i < event_data->NumEntries; i++) { 7539 if (fw_event->ignore) { 7540 dewtprintk(ioc, 7541 ioc_info(ioc, "ignoring expander event\n")); 7542 return 0; 7543 } 7544 if (ioc->remove_host || ioc->pci_error_recovery) 7545 return 0; 7546 phy_number = event_data->StartPhyNum + i; 7547 if (phy_number >= max_phys) 7548 continue; 7549 reason_code = event_data->PHY[i].PhyStatus & 7550 MPI2_EVENT_SAS_TOPO_RC_MASK; 7551 if ((event_data->PHY[i].PhyStatus & 7552 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != 7553 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) 7554 continue; 7555 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 7556 if (!handle) 7557 continue; 7558 link_rate = event_data->PHY[i].LinkRate >> 4; 7559 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 7560 switch (reason_code) { 7561 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 7562 7563 if (ioc->shost_recovery) 7564 break; 7565 7566 if (link_rate == prev_link_rate) 7567 break; 7568 7569 mpt3sas_transport_update_links(ioc, sas_address, 7570 handle, phy_number, link_rate, port); 7571 7572 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 7573 break; 7574 7575 _scsih_check_device(ioc, sas_address, handle, 7576 phy_number, link_rate); 7577 7578 if (!test_bit(handle, ioc->pend_os_device_add)) 7579 break; 7580 7581 fallthrough; 7582 7583 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 7584 7585 if (ioc->shost_recovery) 7586 break; 7587 7588 mpt3sas_transport_update_links(ioc, sas_address, 7589 handle, phy_number, link_rate, port); 7590 7591 _scsih_add_device(ioc, handle, phy_number, 0); 7592 7593 break; 7594 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 7595 7596 _scsih_device_remove_by_handle(ioc, handle); 7597 break; 7598 } 7599 } 7600 7601 /* handle expander removal */ 7602 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && 7603 sas_expander) 7604 mpt3sas_expander_remove(ioc, sas_address, port); 7605 7606 return 0; 7607 } 7608 7609 /** 7610 * _scsih_sas_device_status_change_event_debug - debug for device event 7611 * @ioc: ? 7612 * @event_data: event data payload 7613 * Context: user. 7614 */ 7615 static void 7616 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7617 Mpi2EventDataSasDeviceStatusChange_t *event_data) 7618 { 7619 char *reason_str = NULL; 7620 7621 switch (event_data->ReasonCode) { 7622 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 7623 reason_str = "smart data"; 7624 break; 7625 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: 7626 reason_str = "unsupported device discovered"; 7627 break; 7628 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 7629 reason_str = "internal device reset"; 7630 break; 7631 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 7632 reason_str = "internal task abort"; 7633 break; 7634 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 7635 reason_str = "internal task abort set"; 7636 break; 7637 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 7638 reason_str = "internal clear task set"; 7639 break; 7640 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: 7641 reason_str = "internal query task"; 7642 break; 7643 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: 7644 reason_str = "sata init failure"; 7645 break; 7646 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 7647 reason_str = "internal device reset complete"; 7648 break; 7649 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 7650 reason_str = "internal task abort complete"; 7651 break; 7652 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: 7653 reason_str = "internal async notification"; 7654 break; 7655 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: 7656 reason_str = "expander reduced functionality"; 7657 break; 7658 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: 7659 reason_str = "expander reduced functionality complete"; 7660 break; 7661 default: 7662 reason_str = "unknown reason"; 7663 break; 7664 } 7665 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)", 7666 reason_str, le16_to_cpu(event_data->DevHandle), 7667 (u64)le64_to_cpu(event_data->SASAddress), 7668 le16_to_cpu(event_data->TaskTag)); 7669 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) 7670 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", 7671 event_data->ASC, event_data->ASCQ); 7672 pr_cont("\n"); 7673 } 7674 7675 /** 7676 * _scsih_sas_device_status_change_event - handle device status change 7677 * @ioc: per adapter object 7678 * @event_data: The fw event 7679 * Context: user. 7680 */ 7681 static void 7682 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 7683 Mpi2EventDataSasDeviceStatusChange_t *event_data) 7684 { 7685 struct MPT3SAS_TARGET *target_priv_data; 7686 struct _sas_device *sas_device; 7687 u64 sas_address; 7688 unsigned long flags; 7689 7690 /* In MPI Revision K (0xC), the internal device reset complete was 7691 * implemented, so avoid setting tm_busy flag for older firmware. 7692 */ 7693 if ((ioc->facts.HeaderVersion >> 8) < 0xC) 7694 return; 7695 7696 if (event_data->ReasonCode != 7697 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && 7698 event_data->ReasonCode != 7699 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) 7700 return; 7701 7702 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7703 sas_address = le64_to_cpu(event_data->SASAddress); 7704 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 7705 sas_address, 7706 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0)); 7707 7708 if (!sas_device || !sas_device->starget) 7709 goto out; 7710 7711 target_priv_data = sas_device->starget->hostdata; 7712 if (!target_priv_data) 7713 goto out; 7714 7715 if (event_data->ReasonCode == 7716 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) 7717 target_priv_data->tm_busy = 1; 7718 else 7719 target_priv_data->tm_busy = 0; 7720 7721 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7722 ioc_info(ioc, 7723 "%s tm_busy flag for handle(0x%04x)\n", 7724 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable", 7725 target_priv_data->handle); 7726 7727 out: 7728 if (sas_device) 7729 sas_device_put(sas_device); 7730 7731 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7732 } 7733 7734 7735 /** 7736 * _scsih_check_pcie_access_status - check access flags 7737 * @ioc: per adapter object 7738 * @wwid: wwid 7739 * @handle: sas device handle 7740 * @access_status: errors returned during discovery of the device 7741 * 7742 * Return: 0 for success, else failure 7743 */ 7744 static u8 7745 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, 7746 u16 handle, u8 access_status) 7747 { 7748 u8 rc = 1; 7749 char *desc = NULL; 7750 7751 switch (access_status) { 7752 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS: 7753 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION: 7754 rc = 0; 7755 break; 7756 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED: 7757 desc = "PCIe device capability failed"; 7758 break; 7759 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED: 7760 desc = "PCIe device blocked"; 7761 ioc_info(ioc, 7762 "Device with Access Status (%s): wwid(0x%016llx), " 7763 "handle(0x%04x)\n ll only be added to the internal list", 7764 desc, (u64)wwid, handle); 7765 rc = 0; 7766 break; 7767 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED: 7768 desc = "PCIe device mem space access failed"; 7769 break; 7770 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE: 7771 desc = "PCIe device unsupported"; 7772 break; 7773 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED: 7774 desc = "PCIe device MSIx Required"; 7775 break; 7776 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX: 7777 desc = "PCIe device init fail max"; 7778 break; 7779 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN: 7780 desc = "PCIe device status unknown"; 7781 break; 7782 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT: 7783 desc = "nvme ready timeout"; 7784 break; 7785 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED: 7786 desc = "nvme device configuration unsupported"; 7787 break; 7788 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED: 7789 desc = "nvme identify failed"; 7790 break; 7791 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED: 7792 desc = "nvme qconfig failed"; 7793 break; 7794 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED: 7795 desc = "nvme qcreation failed"; 7796 break; 7797 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED: 7798 desc = "nvme eventcfg failed"; 7799 break; 7800 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED: 7801 desc = "nvme get feature stat failed"; 7802 break; 7803 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT: 7804 desc = "nvme idle timeout"; 7805 break; 7806 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS: 7807 desc = "nvme failure status"; 7808 break; 7809 default: 7810 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n", 7811 access_status, (u64)wwid, handle); 7812 return rc; 7813 } 7814 7815 if (!rc) 7816 return rc; 7817 7818 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n", 7819 desc, (u64)wwid, handle); 7820 return rc; 7821 } 7822 7823 /** 7824 * _scsih_pcie_device_remove_from_sml - removing pcie device 7825 * from SML and free up associated memory 7826 * @ioc: per adapter object 7827 * @pcie_device: the pcie_device object 7828 */ 7829 static void 7830 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 7831 struct _pcie_device *pcie_device) 7832 { 7833 struct MPT3SAS_TARGET *sas_target_priv_data; 7834 7835 dewtprintk(ioc, 7836 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", 7837 __func__, 7838 pcie_device->handle, (u64)pcie_device->wwid)); 7839 if (pcie_device->enclosure_handle != 0) 7840 dewtprintk(ioc, 7841 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", 7842 __func__, 7843 (u64)pcie_device->enclosure_logical_id, 7844 pcie_device->slot)); 7845 if (pcie_device->connector_name[0] != '\0') 7846 dewtprintk(ioc, 7847 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n", 7848 __func__, 7849 pcie_device->enclosure_level, 7850 pcie_device->connector_name)); 7851 7852 if (pcie_device->starget && pcie_device->starget->hostdata) { 7853 sas_target_priv_data = pcie_device->starget->hostdata; 7854 sas_target_priv_data->deleted = 1; 7855 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL); 7856 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 7857 } 7858 7859 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 7860 pcie_device->handle, (u64)pcie_device->wwid); 7861 if (pcie_device->enclosure_handle != 0) 7862 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n", 7863 (u64)pcie_device->enclosure_logical_id, 7864 pcie_device->slot); 7865 if (pcie_device->connector_name[0] != '\0') 7866 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n", 7867 pcie_device->enclosure_level, 7868 pcie_device->connector_name); 7869 7870 if (pcie_device->starget && (pcie_device->access_status != 7871 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) 7872 scsi_remove_target(&pcie_device->starget->dev); 7873 dewtprintk(ioc, 7874 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", 7875 __func__, 7876 pcie_device->handle, (u64)pcie_device->wwid)); 7877 if (pcie_device->enclosure_handle != 0) 7878 dewtprintk(ioc, 7879 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", 7880 __func__, 7881 (u64)pcie_device->enclosure_logical_id, 7882 pcie_device->slot)); 7883 if (pcie_device->connector_name[0] != '\0') 7884 dewtprintk(ioc, 7885 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n", 7886 __func__, 7887 pcie_device->enclosure_level, 7888 pcie_device->connector_name)); 7889 7890 kfree(pcie_device->serial_number); 7891 } 7892 7893 7894 /** 7895 * _scsih_pcie_check_device - checking device responsiveness 7896 * @ioc: per adapter object 7897 * @handle: attached device handle 7898 */ 7899 static void 7900 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 7901 { 7902 Mpi2ConfigReply_t mpi_reply; 7903 Mpi26PCIeDevicePage0_t pcie_device_pg0; 7904 u32 ioc_status; 7905 struct _pcie_device *pcie_device; 7906 u64 wwid; 7907 unsigned long flags; 7908 struct scsi_target *starget; 7909 struct MPT3SAS_TARGET *sas_target_priv_data; 7910 u32 device_info; 7911 7912 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 7913 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) 7914 return; 7915 7916 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 7917 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 7918 return; 7919 7920 /* check if this is end device */ 7921 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 7922 if (!(_scsih_is_nvme_pciescsi_device(device_info))) 7923 return; 7924 7925 wwid = le64_to_cpu(pcie_device_pg0.WWID); 7926 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 7927 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 7928 7929 if (!pcie_device) { 7930 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 7931 return; 7932 } 7933 7934 if (unlikely(pcie_device->handle != handle)) { 7935 starget = pcie_device->starget; 7936 sas_target_priv_data = starget->hostdata; 7937 pcie_device->access_status = pcie_device_pg0.AccessStatus; 7938 starget_printk(KERN_INFO, starget, 7939 "handle changed from(0x%04x) to (0x%04x)!!!\n", 7940 pcie_device->handle, handle); 7941 sas_target_priv_data->handle = handle; 7942 pcie_device->handle = handle; 7943 7944 if (le32_to_cpu(pcie_device_pg0.Flags) & 7945 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 7946 pcie_device->enclosure_level = 7947 pcie_device_pg0.EnclosureLevel; 7948 memcpy(&pcie_device->connector_name[0], 7949 &pcie_device_pg0.ConnectorName[0], 4); 7950 } else { 7951 pcie_device->enclosure_level = 0; 7952 pcie_device->connector_name[0] = '\0'; 7953 } 7954 } 7955 7956 /* check if device is present */ 7957 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 7958 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 7959 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n", 7960 handle); 7961 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 7962 pcie_device_put(pcie_device); 7963 return; 7964 } 7965 7966 /* check if there were any issues with discovery */ 7967 if (_scsih_check_pcie_access_status(ioc, wwid, handle, 7968 pcie_device_pg0.AccessStatus)) { 7969 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 7970 pcie_device_put(pcie_device); 7971 return; 7972 } 7973 7974 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 7975 pcie_device_put(pcie_device); 7976 7977 _scsih_ublock_io_device(ioc, wwid, NULL); 7978 7979 return; 7980 } 7981 7982 /** 7983 * _scsih_pcie_add_device - creating pcie device object 7984 * @ioc: per adapter object 7985 * @handle: pcie device handle 7986 * 7987 * Creating end device object, stored in ioc->pcie_device_list. 7988 * 7989 * Return: 1 means queue the event later, 0 means complete the event 7990 */ 7991 static int 7992 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 7993 { 7994 Mpi26PCIeDevicePage0_t pcie_device_pg0; 7995 Mpi26PCIeDevicePage2_t pcie_device_pg2; 7996 Mpi2ConfigReply_t mpi_reply; 7997 struct _pcie_device *pcie_device; 7998 struct _enclosure_node *enclosure_dev; 7999 u32 ioc_status; 8000 u64 wwid; 8001 8002 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 8003 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) { 8004 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8005 __FILE__, __LINE__, __func__); 8006 return 0; 8007 } 8008 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8009 MPI2_IOCSTATUS_MASK; 8010 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8011 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8012 __FILE__, __LINE__, __func__); 8013 return 0; 8014 } 8015 8016 set_bit(handle, ioc->pend_os_device_add); 8017 wwid = le64_to_cpu(pcie_device_pg0.WWID); 8018 8019 /* check if device is present */ 8020 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 8021 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 8022 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", 8023 handle); 8024 return 0; 8025 } 8026 8027 /* check if there were any issues with discovery */ 8028 if (_scsih_check_pcie_access_status(ioc, wwid, handle, 8029 pcie_device_pg0.AccessStatus)) 8030 return 0; 8031 8032 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu 8033 (pcie_device_pg0.DeviceInfo)))) 8034 return 0; 8035 8036 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid); 8037 if (pcie_device) { 8038 clear_bit(handle, ioc->pend_os_device_add); 8039 pcie_device_put(pcie_device); 8040 return 0; 8041 } 8042 8043 /* PCIe Device Page 2 contains read-only information about a 8044 * specific NVMe device; therefore, this page is only 8045 * valid for NVMe devices and skip for pcie devices of type scsi. 8046 */ 8047 if (!(mpt3sas_scsih_is_pcie_scsi_device( 8048 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { 8049 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, 8050 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 8051 handle)) { 8052 ioc_err(ioc, 8053 "failure at %s:%d/%s()!\n", __FILE__, 8054 __LINE__, __func__); 8055 return 0; 8056 } 8057 8058 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8059 MPI2_IOCSTATUS_MASK; 8060 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8061 ioc_err(ioc, 8062 "failure at %s:%d/%s()!\n", __FILE__, 8063 __LINE__, __func__); 8064 return 0; 8065 } 8066 } 8067 8068 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); 8069 if (!pcie_device) { 8070 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8071 __FILE__, __LINE__, __func__); 8072 return 0; 8073 } 8074 8075 kref_init(&pcie_device->refcount); 8076 pcie_device->id = ioc->pcie_target_id++; 8077 pcie_device->channel = PCIE_CHANNEL; 8078 pcie_device->handle = handle; 8079 pcie_device->access_status = pcie_device_pg0.AccessStatus; 8080 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 8081 pcie_device->wwid = wwid; 8082 pcie_device->port_num = pcie_device_pg0.PortNum; 8083 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) & 8084 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 8085 8086 pcie_device->enclosure_handle = 8087 le16_to_cpu(pcie_device_pg0.EnclosureHandle); 8088 if (pcie_device->enclosure_handle != 0) 8089 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot); 8090 8091 if (le32_to_cpu(pcie_device_pg0.Flags) & 8092 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 8093 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel; 8094 memcpy(&pcie_device->connector_name[0], 8095 &pcie_device_pg0.ConnectorName[0], 4); 8096 } else { 8097 pcie_device->enclosure_level = 0; 8098 pcie_device->connector_name[0] = '\0'; 8099 } 8100 8101 /* get enclosure_logical_id */ 8102 if (pcie_device->enclosure_handle) { 8103 enclosure_dev = 8104 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8105 pcie_device->enclosure_handle); 8106 if (enclosure_dev) 8107 pcie_device->enclosure_logical_id = 8108 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 8109 } 8110 /* TODO -- Add device name once FW supports it */ 8111 if (!(mpt3sas_scsih_is_pcie_scsi_device( 8112 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { 8113 pcie_device->nvme_mdts = 8114 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); 8115 pcie_device->shutdown_latency = 8116 le16_to_cpu(pcie_device_pg2.ShutdownLatency); 8117 /* 8118 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency 8119 * if drive's RTD3 Entry Latency is greater then IOC's 8120 * max_shutdown_latency. 8121 */ 8122 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency) 8123 ioc->max_shutdown_latency = 8124 pcie_device->shutdown_latency; 8125 if (pcie_device_pg2.ControllerResetTO) 8126 pcie_device->reset_timeout = 8127 pcie_device_pg2.ControllerResetTO; 8128 else 8129 pcie_device->reset_timeout = 30; 8130 } else 8131 pcie_device->reset_timeout = 30; 8132 8133 if (ioc->wait_for_discovery_to_complete) 8134 _scsih_pcie_device_init_add(ioc, pcie_device); 8135 else 8136 _scsih_pcie_device_add(ioc, pcie_device); 8137 8138 pcie_device_put(pcie_device); 8139 return 0; 8140 } 8141 8142 /** 8143 * _scsih_pcie_topology_change_event_debug - debug for topology 8144 * event 8145 * @ioc: per adapter object 8146 * @event_data: event data payload 8147 * Context: user. 8148 */ 8149 static void 8150 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8151 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 8152 { 8153 int i; 8154 u16 handle; 8155 u16 reason_code; 8156 u8 port_number; 8157 char *status_str = NULL; 8158 u8 link_rate, prev_link_rate; 8159 8160 switch (event_data->SwitchStatus) { 8161 case MPI26_EVENT_PCIE_TOPO_SS_ADDED: 8162 status_str = "add"; 8163 break; 8164 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 8165 status_str = "remove"; 8166 break; 8167 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING: 8168 case 0: 8169 status_str = "responding"; 8170 break; 8171 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 8172 status_str = "remove delay"; 8173 break; 8174 default: 8175 status_str = "unknown status"; 8176 break; 8177 } 8178 ioc_info(ioc, "pcie topology change: (%s)\n", status_str); 8179 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)" 8180 "start_port(%02d), count(%d)\n", 8181 le16_to_cpu(event_data->SwitchDevHandle), 8182 le16_to_cpu(event_data->EnclosureHandle), 8183 event_data->StartPortNum, event_data->NumEntries); 8184 for (i = 0; i < event_data->NumEntries; i++) { 8185 handle = 8186 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 8187 if (!handle) 8188 continue; 8189 port_number = event_data->StartPortNum + i; 8190 reason_code = event_data->PortEntry[i].PortStatus; 8191 switch (reason_code) { 8192 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: 8193 status_str = "target add"; 8194 break; 8195 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 8196 status_str = "target remove"; 8197 break; 8198 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 8199 status_str = "delay target remove"; 8200 break; 8201 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 8202 status_str = "link rate change"; 8203 break; 8204 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE: 8205 status_str = "target responding"; 8206 break; 8207 default: 8208 status_str = "unknown"; 8209 break; 8210 } 8211 link_rate = event_data->PortEntry[i].CurrentPortInfo & 8212 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8213 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo & 8214 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8215 pr_info("\tport(%02d), attached_handle(0x%04x): %s:" 8216 " link rate: new(0x%02x), old(0x%02x)\n", port_number, 8217 handle, status_str, link_rate, prev_link_rate); 8218 } 8219 } 8220 8221 /** 8222 * _scsih_pcie_topology_change_event - handle PCIe topology 8223 * changes 8224 * @ioc: per adapter object 8225 * @fw_event: The fw_event_work object 8226 * Context: user. 8227 * 8228 */ 8229 static void 8230 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 8231 struct fw_event_work *fw_event) 8232 { 8233 int i; 8234 u16 handle; 8235 u16 reason_code; 8236 u8 link_rate, prev_link_rate; 8237 unsigned long flags; 8238 int rc; 8239 Mpi26EventDataPCIeTopologyChangeList_t *event_data = 8240 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data; 8241 struct _pcie_device *pcie_device; 8242 8243 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8244 _scsih_pcie_topology_change_event_debug(ioc, event_data); 8245 8246 if (ioc->shost_recovery || ioc->remove_host || 8247 ioc->pci_error_recovery) 8248 return; 8249 8250 if (fw_event->ignore) { 8251 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n")); 8252 return; 8253 } 8254 8255 /* handle siblings events */ 8256 for (i = 0; i < event_data->NumEntries; i++) { 8257 if (fw_event->ignore) { 8258 dewtprintk(ioc, 8259 ioc_info(ioc, "ignoring switch event\n")); 8260 return; 8261 } 8262 if (ioc->remove_host || ioc->pci_error_recovery) 8263 return; 8264 reason_code = event_data->PortEntry[i].PortStatus; 8265 handle = 8266 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 8267 if (!handle) 8268 continue; 8269 8270 link_rate = event_data->PortEntry[i].CurrentPortInfo 8271 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8272 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo 8273 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 8274 8275 switch (reason_code) { 8276 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 8277 if (ioc->shost_recovery) 8278 break; 8279 if (link_rate == prev_link_rate) 8280 break; 8281 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 8282 break; 8283 8284 _scsih_pcie_check_device(ioc, handle); 8285 8286 /* This code after this point handles the test case 8287 * where a device has been added, however its returning 8288 * BUSY for sometime. Then before the Device Missing 8289 * Delay expires and the device becomes READY, the 8290 * device is removed and added back. 8291 */ 8292 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8293 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 8294 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8295 8296 if (pcie_device) { 8297 pcie_device_put(pcie_device); 8298 break; 8299 } 8300 8301 if (!test_bit(handle, ioc->pend_os_device_add)) 8302 break; 8303 8304 dewtprintk(ioc, 8305 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n", 8306 handle)); 8307 event_data->PortEntry[i].PortStatus &= 0xF0; 8308 event_data->PortEntry[i].PortStatus |= 8309 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; 8310 fallthrough; 8311 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: 8312 if (ioc->shost_recovery) 8313 break; 8314 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 8315 break; 8316 8317 rc = _scsih_pcie_add_device(ioc, handle); 8318 if (!rc) { 8319 /* mark entry vacant */ 8320 /* TODO This needs to be reviewed and fixed, 8321 * we dont have an entry 8322 * to make an event void like vacant 8323 */ 8324 event_data->PortEntry[i].PortStatus |= 8325 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE; 8326 } 8327 break; 8328 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 8329 _scsih_pcie_device_remove_by_handle(ioc, handle); 8330 break; 8331 } 8332 } 8333 } 8334 8335 /** 8336 * _scsih_pcie_device_status_change_event_debug - debug for device event 8337 * @ioc: ? 8338 * @event_data: event data payload 8339 * Context: user. 8340 */ 8341 static void 8342 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8343 Mpi26EventDataPCIeDeviceStatusChange_t *event_data) 8344 { 8345 char *reason_str = NULL; 8346 8347 switch (event_data->ReasonCode) { 8348 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA: 8349 reason_str = "smart data"; 8350 break; 8351 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED: 8352 reason_str = "unsupported device discovered"; 8353 break; 8354 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET: 8355 reason_str = "internal device reset"; 8356 break; 8357 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL: 8358 reason_str = "internal task abort"; 8359 break; 8360 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 8361 reason_str = "internal task abort set"; 8362 break; 8363 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 8364 reason_str = "internal clear task set"; 8365 break; 8366 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL: 8367 reason_str = "internal query task"; 8368 break; 8369 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE: 8370 reason_str = "device init failure"; 8371 break; 8372 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 8373 reason_str = "internal device reset complete"; 8374 break; 8375 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 8376 reason_str = "internal task abort complete"; 8377 break; 8378 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION: 8379 reason_str = "internal async notification"; 8380 break; 8381 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED: 8382 reason_str = "pcie hot reset failed"; 8383 break; 8384 default: 8385 reason_str = "unknown reason"; 8386 break; 8387 } 8388 8389 ioc_info(ioc, "PCIE device status change: (%s)\n" 8390 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)", 8391 reason_str, le16_to_cpu(event_data->DevHandle), 8392 (u64)le64_to_cpu(event_data->WWID), 8393 le16_to_cpu(event_data->TaskTag)); 8394 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA) 8395 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", 8396 event_data->ASC, event_data->ASCQ); 8397 pr_cont("\n"); 8398 } 8399 8400 /** 8401 * _scsih_pcie_device_status_change_event - handle device status 8402 * change 8403 * @ioc: per adapter object 8404 * @fw_event: The fw_event_work object 8405 * Context: user. 8406 */ 8407 static void 8408 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 8409 struct fw_event_work *fw_event) 8410 { 8411 struct MPT3SAS_TARGET *target_priv_data; 8412 struct _pcie_device *pcie_device; 8413 u64 wwid; 8414 unsigned long flags; 8415 Mpi26EventDataPCIeDeviceStatusChange_t *event_data = 8416 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data; 8417 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8418 _scsih_pcie_device_status_change_event_debug(ioc, 8419 event_data); 8420 8421 if (event_data->ReasonCode != 8422 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET && 8423 event_data->ReasonCode != 8424 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET) 8425 return; 8426 8427 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8428 wwid = le64_to_cpu(event_data->WWID); 8429 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 8430 8431 if (!pcie_device || !pcie_device->starget) 8432 goto out; 8433 8434 target_priv_data = pcie_device->starget->hostdata; 8435 if (!target_priv_data) 8436 goto out; 8437 8438 if (event_data->ReasonCode == 8439 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET) 8440 target_priv_data->tm_busy = 1; 8441 else 8442 target_priv_data->tm_busy = 0; 8443 out: 8444 if (pcie_device) 8445 pcie_device_put(pcie_device); 8446 8447 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8448 } 8449 8450 /** 8451 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure 8452 * event 8453 * @ioc: per adapter object 8454 * @event_data: event data payload 8455 * Context: user. 8456 */ 8457 static void 8458 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 8459 Mpi2EventDataSasEnclDevStatusChange_t *event_data) 8460 { 8461 char *reason_str = NULL; 8462 8463 switch (event_data->ReasonCode) { 8464 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 8465 reason_str = "enclosure add"; 8466 break; 8467 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 8468 reason_str = "enclosure remove"; 8469 break; 8470 default: 8471 reason_str = "unknown reason"; 8472 break; 8473 } 8474 8475 ioc_info(ioc, "enclosure status change: (%s)\n" 8476 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n", 8477 reason_str, 8478 le16_to_cpu(event_data->EnclosureHandle), 8479 (u64)le64_to_cpu(event_data->EnclosureLogicalID), 8480 le16_to_cpu(event_data->StartSlot)); 8481 } 8482 8483 /** 8484 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events 8485 * @ioc: per adapter object 8486 * @fw_event: The fw_event_work object 8487 * Context: user. 8488 */ 8489 static void 8490 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, 8491 struct fw_event_work *fw_event) 8492 { 8493 Mpi2ConfigReply_t mpi_reply; 8494 struct _enclosure_node *enclosure_dev = NULL; 8495 Mpi2EventDataSasEnclDevStatusChange_t *event_data = 8496 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data; 8497 int rc; 8498 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle); 8499 8500 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 8501 _scsih_sas_enclosure_dev_status_change_event_debug(ioc, 8502 (Mpi2EventDataSasEnclDevStatusChange_t *) 8503 fw_event->event_data); 8504 if (ioc->shost_recovery) 8505 return; 8506 8507 if (enclosure_handle) 8508 enclosure_dev = 8509 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8510 enclosure_handle); 8511 switch (event_data->ReasonCode) { 8512 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 8513 if (!enclosure_dev) { 8514 enclosure_dev = 8515 kzalloc(sizeof(struct _enclosure_node), 8516 GFP_KERNEL); 8517 if (!enclosure_dev) { 8518 ioc_info(ioc, "failure at %s:%d/%s()!\n", 8519 __FILE__, __LINE__, __func__); 8520 return; 8521 } 8522 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 8523 &enclosure_dev->pg0, 8524 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 8525 enclosure_handle); 8526 8527 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & 8528 MPI2_IOCSTATUS_MASK)) { 8529 kfree(enclosure_dev); 8530 return; 8531 } 8532 8533 list_add_tail(&enclosure_dev->list, 8534 &ioc->enclosure_list); 8535 } 8536 break; 8537 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 8538 if (enclosure_dev) { 8539 list_del(&enclosure_dev->list); 8540 kfree(enclosure_dev); 8541 } 8542 break; 8543 default: 8544 break; 8545 } 8546 } 8547 8548 /** 8549 * _scsih_sas_broadcast_primitive_event - handle broadcast events 8550 * @ioc: per adapter object 8551 * @fw_event: The fw_event_work object 8552 * Context: user. 8553 */ 8554 static void 8555 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, 8556 struct fw_event_work *fw_event) 8557 { 8558 struct scsi_cmnd *scmd; 8559 struct scsi_device *sdev; 8560 struct scsiio_tracker *st; 8561 u16 smid, handle; 8562 u32 lun; 8563 struct MPT3SAS_DEVICE *sas_device_priv_data; 8564 u32 termination_count; 8565 u32 query_count; 8566 Mpi2SCSITaskManagementReply_t *mpi_reply; 8567 Mpi2EventDataSasBroadcastPrimitive_t *event_data = 8568 (Mpi2EventDataSasBroadcastPrimitive_t *) 8569 fw_event->event_data; 8570 u16 ioc_status; 8571 unsigned long flags; 8572 int r; 8573 u8 max_retries = 0; 8574 u8 task_abort_retries; 8575 8576 mutex_lock(&ioc->tm_cmds.mutex); 8577 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n", 8578 __func__, event_data->PhyNum, event_data->PortWidth); 8579 8580 _scsih_block_io_all_device(ioc); 8581 8582 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8583 mpi_reply = ioc->tm_cmds.reply; 8584 broadcast_aen_retry: 8585 8586 /* sanity checks for retrying this loop */ 8587 if (max_retries++ == 5) { 8588 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__)); 8589 goto out; 8590 } else if (max_retries > 1) 8591 dewtprintk(ioc, 8592 ioc_info(ioc, "%s: %d retry\n", 8593 __func__, max_retries - 1)); 8594 8595 termination_count = 0; 8596 query_count = 0; 8597 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 8598 if (ioc->shost_recovery) 8599 goto out; 8600 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 8601 if (!scmd) 8602 continue; 8603 st = scsi_cmd_priv(scmd); 8604 sdev = scmd->device; 8605 sas_device_priv_data = sdev->hostdata; 8606 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) 8607 continue; 8608 /* skip hidden raid components */ 8609 if (sas_device_priv_data->sas_target->flags & 8610 MPT_TARGET_FLAGS_RAID_COMPONENT) 8611 continue; 8612 /* skip volumes */ 8613 if (sas_device_priv_data->sas_target->flags & 8614 MPT_TARGET_FLAGS_VOLUME) 8615 continue; 8616 /* skip PCIe devices */ 8617 if (sas_device_priv_data->sas_target->flags & 8618 MPT_TARGET_FLAGS_PCIE_DEVICE) 8619 continue; 8620 8621 handle = sas_device_priv_data->sas_target->handle; 8622 lun = sas_device_priv_data->lun; 8623 query_count++; 8624 8625 if (ioc->shost_recovery) 8626 goto out; 8627 8628 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 8629 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 8630 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid, 8631 st->msix_io, 30, 0); 8632 if (r == FAILED) { 8633 sdev_printk(KERN_WARNING, sdev, 8634 "mpt3sas_scsih_issue_tm: FAILED when sending " 8635 "QUERY_TASK: scmd(%p)\n", scmd); 8636 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8637 goto broadcast_aen_retry; 8638 } 8639 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) 8640 & MPI2_IOCSTATUS_MASK; 8641 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8642 sdev_printk(KERN_WARNING, sdev, 8643 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", 8644 ioc_status, scmd); 8645 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8646 goto broadcast_aen_retry; 8647 } 8648 8649 /* see if IO is still owned by IOC and target */ 8650 if (mpi_reply->ResponseCode == 8651 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || 8652 mpi_reply->ResponseCode == 8653 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { 8654 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8655 continue; 8656 } 8657 task_abort_retries = 0; 8658 tm_retry: 8659 if (task_abort_retries++ == 60) { 8660 dewtprintk(ioc, 8661 ioc_info(ioc, "%s: ABORT_TASK: giving up\n", 8662 __func__)); 8663 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8664 goto broadcast_aen_retry; 8665 } 8666 8667 if (ioc->shost_recovery) 8668 goto out_no_lock; 8669 8670 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, 8671 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 8672 st->smid, st->msix_io, 30, 0); 8673 if (r == FAILED || st->cb_idx != 0xFF) { 8674 sdev_printk(KERN_WARNING, sdev, 8675 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " 8676 "scmd(%p)\n", scmd); 8677 goto tm_retry; 8678 } 8679 8680 if (task_abort_retries > 1) 8681 sdev_printk(KERN_WARNING, sdev, 8682 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" 8683 " scmd(%p)\n", 8684 task_abort_retries - 1, scmd); 8685 8686 termination_count += le32_to_cpu(mpi_reply->TerminationCount); 8687 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 8688 } 8689 8690 if (ioc->broadcast_aen_pending) { 8691 dewtprintk(ioc, 8692 ioc_info(ioc, 8693 "%s: loop back due to pending AEN\n", 8694 __func__)); 8695 ioc->broadcast_aen_pending = 0; 8696 goto broadcast_aen_retry; 8697 } 8698 8699 out: 8700 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 8701 out_no_lock: 8702 8703 dewtprintk(ioc, 8704 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n", 8705 __func__, query_count, termination_count)); 8706 8707 ioc->broadcast_aen_busy = 0; 8708 if (!ioc->shost_recovery) 8709 _scsih_ublock_io_all_device(ioc); 8710 mutex_unlock(&ioc->tm_cmds.mutex); 8711 } 8712 8713 /** 8714 * _scsih_sas_discovery_event - handle discovery events 8715 * @ioc: per adapter object 8716 * @fw_event: The fw_event_work object 8717 * Context: user. 8718 */ 8719 static void 8720 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, 8721 struct fw_event_work *fw_event) 8722 { 8723 Mpi2EventDataSasDiscovery_t *event_data = 8724 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; 8725 8726 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { 8727 ioc_info(ioc, "discovery event: (%s)", 8728 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? 8729 "start" : "stop"); 8730 if (event_data->DiscoveryStatus) 8731 pr_cont("discovery_status(0x%08x)", 8732 le32_to_cpu(event_data->DiscoveryStatus)); 8733 pr_cont("\n"); 8734 } 8735 8736 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && 8737 !ioc->sas_hba.num_phys) { 8738 if (disable_discovery > 0 && ioc->shost_recovery) { 8739 /* Wait for the reset to complete */ 8740 while (ioc->shost_recovery) 8741 ssleep(1); 8742 } 8743 _scsih_sas_host_add(ioc); 8744 } 8745 } 8746 8747 /** 8748 * _scsih_sas_device_discovery_error_event - display SAS device discovery error 8749 * events 8750 * @ioc: per adapter object 8751 * @fw_event: The fw_event_work object 8752 * Context: user. 8753 */ 8754 static void 8755 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, 8756 struct fw_event_work *fw_event) 8757 { 8758 Mpi25EventDataSasDeviceDiscoveryError_t *event_data = 8759 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data; 8760 8761 switch (event_data->ReasonCode) { 8762 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED: 8763 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n", 8764 le16_to_cpu(event_data->DevHandle), 8765 (u64)le64_to_cpu(event_data->SASAddress), 8766 event_data->PhysicalPort); 8767 break; 8768 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: 8769 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n", 8770 le16_to_cpu(event_data->DevHandle), 8771 (u64)le64_to_cpu(event_data->SASAddress), 8772 event_data->PhysicalPort); 8773 break; 8774 default: 8775 break; 8776 } 8777 } 8778 8779 /** 8780 * _scsih_pcie_enumeration_event - handle enumeration events 8781 * @ioc: per adapter object 8782 * @fw_event: The fw_event_work object 8783 * Context: user. 8784 */ 8785 static void 8786 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, 8787 struct fw_event_work *fw_event) 8788 { 8789 Mpi26EventDataPCIeEnumeration_t *event_data = 8790 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data; 8791 8792 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)) 8793 return; 8794 8795 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x", 8796 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? 8797 "started" : "completed", 8798 event_data->Flags); 8799 if (event_data->EnumerationStatus) 8800 pr_cont("enumeration_status(0x%08x)", 8801 le32_to_cpu(event_data->EnumerationStatus)); 8802 pr_cont("\n"); 8803 } 8804 8805 /** 8806 * _scsih_ir_fastpath - turn on fastpath for IR physdisk 8807 * @ioc: per adapter object 8808 * @handle: device handle for physical disk 8809 * @phys_disk_num: physical disk number 8810 * 8811 * Return: 0 for success, else failure. 8812 */ 8813 static int 8814 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) 8815 { 8816 Mpi2RaidActionRequest_t *mpi_request; 8817 Mpi2RaidActionReply_t *mpi_reply; 8818 u16 smid; 8819 u8 issue_reset = 0; 8820 int rc = 0; 8821 u16 ioc_status; 8822 u32 log_info; 8823 8824 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) 8825 return rc; 8826 8827 mutex_lock(&ioc->scsih_cmds.mutex); 8828 8829 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 8830 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 8831 rc = -EAGAIN; 8832 goto out; 8833 } 8834 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 8835 8836 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 8837 if (!smid) { 8838 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 8839 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 8840 rc = -EAGAIN; 8841 goto out; 8842 } 8843 8844 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 8845 ioc->scsih_cmds.smid = smid; 8846 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); 8847 8848 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 8849 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; 8850 mpi_request->PhysDiskNum = phys_disk_num; 8851 8852 dewtprintk(ioc, 8853 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", 8854 handle, phys_disk_num)); 8855 8856 init_completion(&ioc->scsih_cmds.done); 8857 ioc->put_smid_default(ioc, smid); 8858 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 8859 8860 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 8861 mpt3sas_check_cmd_timeout(ioc, 8862 ioc->scsih_cmds.status, mpi_request, 8863 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset); 8864 rc = -EFAULT; 8865 goto out; 8866 } 8867 8868 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 8869 8870 mpi_reply = ioc->scsih_cmds.reply; 8871 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 8872 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 8873 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 8874 else 8875 log_info = 0; 8876 ioc_status &= MPI2_IOCSTATUS_MASK; 8877 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8878 dewtprintk(ioc, 8879 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", 8880 ioc_status, log_info)); 8881 rc = -EFAULT; 8882 } else 8883 dewtprintk(ioc, 8884 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n")); 8885 } 8886 8887 out: 8888 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 8889 mutex_unlock(&ioc->scsih_cmds.mutex); 8890 8891 if (issue_reset) 8892 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 8893 return rc; 8894 } 8895 8896 /** 8897 * _scsih_reprobe_lun - reprobing lun 8898 * @sdev: scsi device struct 8899 * @no_uld_attach: sdev->no_uld_attach flag setting 8900 * 8901 **/ 8902 static void 8903 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) 8904 { 8905 sdev->no_uld_attach = no_uld_attach ? 1 : 0; 8906 sdev_printk(KERN_INFO, sdev, "%s raid component\n", 8907 sdev->no_uld_attach ? "hiding" : "exposing"); 8908 WARN_ON(scsi_device_reprobe(sdev)); 8909 } 8910 8911 /** 8912 * _scsih_sas_volume_add - add new volume 8913 * @ioc: per adapter object 8914 * @element: IR config element data 8915 * Context: user. 8916 */ 8917 static void 8918 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, 8919 Mpi2EventIrConfigElement_t *element) 8920 { 8921 struct _raid_device *raid_device; 8922 unsigned long flags; 8923 u64 wwid; 8924 u16 handle = le16_to_cpu(element->VolDevHandle); 8925 int rc; 8926 8927 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 8928 if (!wwid) { 8929 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8930 __FILE__, __LINE__, __func__); 8931 return; 8932 } 8933 8934 spin_lock_irqsave(&ioc->raid_device_lock, flags); 8935 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); 8936 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 8937 8938 if (raid_device) 8939 return; 8940 8941 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 8942 if (!raid_device) { 8943 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8944 __FILE__, __LINE__, __func__); 8945 return; 8946 } 8947 8948 raid_device->id = ioc->sas_id++; 8949 raid_device->channel = RAID_CHANNEL; 8950 raid_device->handle = handle; 8951 raid_device->wwid = wwid; 8952 _scsih_raid_device_add(ioc, raid_device); 8953 if (!ioc->wait_for_discovery_to_complete) { 8954 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 8955 raid_device->id, 0); 8956 if (rc) 8957 _scsih_raid_device_remove(ioc, raid_device); 8958 } else { 8959 spin_lock_irqsave(&ioc->raid_device_lock, flags); 8960 _scsih_determine_boot_device(ioc, raid_device, 1); 8961 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 8962 } 8963 } 8964 8965 /** 8966 * _scsih_sas_volume_delete - delete volume 8967 * @ioc: per adapter object 8968 * @handle: volume device handle 8969 * Context: user. 8970 */ 8971 static void 8972 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) 8973 { 8974 struct _raid_device *raid_device; 8975 unsigned long flags; 8976 struct MPT3SAS_TARGET *sas_target_priv_data; 8977 struct scsi_target *starget = NULL; 8978 8979 spin_lock_irqsave(&ioc->raid_device_lock, flags); 8980 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 8981 if (raid_device) { 8982 if (raid_device->starget) { 8983 starget = raid_device->starget; 8984 sas_target_priv_data = starget->hostdata; 8985 sas_target_priv_data->deleted = 1; 8986 } 8987 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 8988 raid_device->handle, (u64)raid_device->wwid); 8989 list_del(&raid_device->list); 8990 kfree(raid_device); 8991 } 8992 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 8993 if (starget) 8994 scsi_remove_target(&starget->dev); 8995 } 8996 8997 /** 8998 * _scsih_sas_pd_expose - expose pd component to /dev/sdX 8999 * @ioc: per adapter object 9000 * @element: IR config element data 9001 * Context: user. 9002 */ 9003 static void 9004 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, 9005 Mpi2EventIrConfigElement_t *element) 9006 { 9007 struct _sas_device *sas_device; 9008 struct scsi_target *starget = NULL; 9009 struct MPT3SAS_TARGET *sas_target_priv_data; 9010 unsigned long flags; 9011 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9012 9013 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9014 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 9015 if (sas_device) { 9016 sas_device->volume_handle = 0; 9017 sas_device->volume_wwid = 0; 9018 clear_bit(handle, ioc->pd_handles); 9019 if (sas_device->starget && sas_device->starget->hostdata) { 9020 starget = sas_device->starget; 9021 sas_target_priv_data = starget->hostdata; 9022 sas_target_priv_data->flags &= 9023 ~MPT_TARGET_FLAGS_RAID_COMPONENT; 9024 } 9025 } 9026 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9027 if (!sas_device) 9028 return; 9029 9030 /* exposing raid component */ 9031 if (starget) 9032 starget_for_each_device(starget, NULL, _scsih_reprobe_lun); 9033 9034 sas_device_put(sas_device); 9035 } 9036 9037 /** 9038 * _scsih_sas_pd_hide - hide pd component from /dev/sdX 9039 * @ioc: per adapter object 9040 * @element: IR config element data 9041 * Context: user. 9042 */ 9043 static void 9044 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, 9045 Mpi2EventIrConfigElement_t *element) 9046 { 9047 struct _sas_device *sas_device; 9048 struct scsi_target *starget = NULL; 9049 struct MPT3SAS_TARGET *sas_target_priv_data; 9050 unsigned long flags; 9051 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9052 u16 volume_handle = 0; 9053 u64 volume_wwid = 0; 9054 9055 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle); 9056 if (volume_handle) 9057 mpt3sas_config_get_volume_wwid(ioc, volume_handle, 9058 &volume_wwid); 9059 9060 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9061 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 9062 if (sas_device) { 9063 set_bit(handle, ioc->pd_handles); 9064 if (sas_device->starget && sas_device->starget->hostdata) { 9065 starget = sas_device->starget; 9066 sas_target_priv_data = starget->hostdata; 9067 sas_target_priv_data->flags |= 9068 MPT_TARGET_FLAGS_RAID_COMPONENT; 9069 sas_device->volume_handle = volume_handle; 9070 sas_device->volume_wwid = volume_wwid; 9071 } 9072 } 9073 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9074 if (!sas_device) 9075 return; 9076 9077 /* hiding raid component */ 9078 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9079 9080 if (starget) 9081 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); 9082 9083 sas_device_put(sas_device); 9084 } 9085 9086 /** 9087 * _scsih_sas_pd_delete - delete pd component 9088 * @ioc: per adapter object 9089 * @element: IR config element data 9090 * Context: user. 9091 */ 9092 static void 9093 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, 9094 Mpi2EventIrConfigElement_t *element) 9095 { 9096 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9097 9098 _scsih_device_remove_by_handle(ioc, handle); 9099 } 9100 9101 /** 9102 * _scsih_sas_pd_add - remove pd component 9103 * @ioc: per adapter object 9104 * @element: IR config element data 9105 * Context: user. 9106 */ 9107 static void 9108 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, 9109 Mpi2EventIrConfigElement_t *element) 9110 { 9111 struct _sas_device *sas_device; 9112 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 9113 Mpi2ConfigReply_t mpi_reply; 9114 Mpi2SasDevicePage0_t sas_device_pg0; 9115 u32 ioc_status; 9116 u64 sas_address; 9117 u16 parent_handle; 9118 9119 set_bit(handle, ioc->pd_handles); 9120 9121 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 9122 if (sas_device) { 9123 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9124 sas_device_put(sas_device); 9125 return; 9126 } 9127 9128 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 9129 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 9130 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9131 __FILE__, __LINE__, __func__); 9132 return; 9133 } 9134 9135 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9136 MPI2_IOCSTATUS_MASK; 9137 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9138 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9139 __FILE__, __LINE__, __func__); 9140 return; 9141 } 9142 9143 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 9144 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) 9145 mpt3sas_transport_update_links(ioc, sas_address, handle, 9146 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 9147 mpt3sas_get_port_by_id(ioc, 9148 sas_device_pg0.PhysicalPort, 0)); 9149 9150 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 9151 _scsih_add_device(ioc, handle, 0, 1); 9152 } 9153 9154 /** 9155 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events 9156 * @ioc: per adapter object 9157 * @event_data: event data payload 9158 * Context: user. 9159 */ 9160 static void 9161 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 9162 Mpi2EventDataIrConfigChangeList_t *event_data) 9163 { 9164 Mpi2EventIrConfigElement_t *element; 9165 u8 element_type; 9166 int i; 9167 char *reason_str = NULL, *element_str = NULL; 9168 9169 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 9170 9171 ioc_info(ioc, "raid config change: (%s), elements(%d)\n", 9172 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ? 9173 "foreign" : "native", 9174 event_data->NumElements); 9175 for (i = 0; i < event_data->NumElements; i++, element++) { 9176 switch (element->ReasonCode) { 9177 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 9178 reason_str = "add"; 9179 break; 9180 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 9181 reason_str = "remove"; 9182 break; 9183 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: 9184 reason_str = "no change"; 9185 break; 9186 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 9187 reason_str = "hide"; 9188 break; 9189 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 9190 reason_str = "unhide"; 9191 break; 9192 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 9193 reason_str = "volume_created"; 9194 break; 9195 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 9196 reason_str = "volume_deleted"; 9197 break; 9198 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 9199 reason_str = "pd_created"; 9200 break; 9201 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 9202 reason_str = "pd_deleted"; 9203 break; 9204 default: 9205 reason_str = "unknown reason"; 9206 break; 9207 } 9208 element_type = le16_to_cpu(element->ElementFlags) & 9209 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; 9210 switch (element_type) { 9211 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: 9212 element_str = "volume"; 9213 break; 9214 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: 9215 element_str = "phys disk"; 9216 break; 9217 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: 9218 element_str = "hot spare"; 9219 break; 9220 default: 9221 element_str = "unknown element"; 9222 break; 9223 } 9224 pr_info("\t(%s:%s), vol handle(0x%04x), " \ 9225 "pd handle(0x%04x), pd num(0x%02x)\n", element_str, 9226 reason_str, le16_to_cpu(element->VolDevHandle), 9227 le16_to_cpu(element->PhysDiskDevHandle), 9228 element->PhysDiskNum); 9229 } 9230 } 9231 9232 /** 9233 * _scsih_sas_ir_config_change_event - handle ir configuration change events 9234 * @ioc: per adapter object 9235 * @fw_event: The fw_event_work object 9236 * Context: user. 9237 */ 9238 static void 9239 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, 9240 struct fw_event_work *fw_event) 9241 { 9242 Mpi2EventIrConfigElement_t *element; 9243 int i; 9244 u8 foreign_config; 9245 Mpi2EventDataIrConfigChangeList_t *event_data = 9246 (Mpi2EventDataIrConfigChangeList_t *) 9247 fw_event->event_data; 9248 9249 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && 9250 (!ioc->hide_ir_msg)) 9251 _scsih_sas_ir_config_change_event_debug(ioc, event_data); 9252 9253 foreign_config = (le32_to_cpu(event_data->Flags) & 9254 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; 9255 9256 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 9257 if (ioc->shost_recovery && 9258 ioc->hba_mpi_version_belonged != MPI2_VERSION) { 9259 for (i = 0; i < event_data->NumElements; i++, element++) { 9260 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) 9261 _scsih_ir_fastpath(ioc, 9262 le16_to_cpu(element->PhysDiskDevHandle), 9263 element->PhysDiskNum); 9264 } 9265 return; 9266 } 9267 9268 for (i = 0; i < event_data->NumElements; i++, element++) { 9269 9270 switch (element->ReasonCode) { 9271 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 9272 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 9273 if (!foreign_config) 9274 _scsih_sas_volume_add(ioc, element); 9275 break; 9276 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 9277 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 9278 if (!foreign_config) 9279 _scsih_sas_volume_delete(ioc, 9280 le16_to_cpu(element->VolDevHandle)); 9281 break; 9282 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 9283 if (!ioc->is_warpdrive) 9284 _scsih_sas_pd_hide(ioc, element); 9285 break; 9286 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 9287 if (!ioc->is_warpdrive) 9288 _scsih_sas_pd_expose(ioc, element); 9289 break; 9290 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 9291 if (!ioc->is_warpdrive) 9292 _scsih_sas_pd_add(ioc, element); 9293 break; 9294 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 9295 if (!ioc->is_warpdrive) 9296 _scsih_sas_pd_delete(ioc, element); 9297 break; 9298 } 9299 } 9300 } 9301 9302 /** 9303 * _scsih_sas_ir_volume_event - IR volume event 9304 * @ioc: per adapter object 9305 * @fw_event: The fw_event_work object 9306 * Context: user. 9307 */ 9308 static void 9309 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, 9310 struct fw_event_work *fw_event) 9311 { 9312 u64 wwid; 9313 unsigned long flags; 9314 struct _raid_device *raid_device; 9315 u16 handle; 9316 u32 state; 9317 int rc; 9318 Mpi2EventDataIrVolume_t *event_data = 9319 (Mpi2EventDataIrVolume_t *) fw_event->event_data; 9320 9321 if (ioc->shost_recovery) 9322 return; 9323 9324 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 9325 return; 9326 9327 handle = le16_to_cpu(event_data->VolDevHandle); 9328 state = le32_to_cpu(event_data->NewValue); 9329 if (!ioc->hide_ir_msg) 9330 dewtprintk(ioc, 9331 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 9332 __func__, handle, 9333 le32_to_cpu(event_data->PreviousValue), 9334 state)); 9335 switch (state) { 9336 case MPI2_RAID_VOL_STATE_MISSING: 9337 case MPI2_RAID_VOL_STATE_FAILED: 9338 _scsih_sas_volume_delete(ioc, handle); 9339 break; 9340 9341 case MPI2_RAID_VOL_STATE_ONLINE: 9342 case MPI2_RAID_VOL_STATE_DEGRADED: 9343 case MPI2_RAID_VOL_STATE_OPTIMAL: 9344 9345 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9346 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9347 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9348 9349 if (raid_device) 9350 break; 9351 9352 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 9353 if (!wwid) { 9354 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9355 __FILE__, __LINE__, __func__); 9356 break; 9357 } 9358 9359 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 9360 if (!raid_device) { 9361 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9362 __FILE__, __LINE__, __func__); 9363 break; 9364 } 9365 9366 raid_device->id = ioc->sas_id++; 9367 raid_device->channel = RAID_CHANNEL; 9368 raid_device->handle = handle; 9369 raid_device->wwid = wwid; 9370 _scsih_raid_device_add(ioc, raid_device); 9371 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 9372 raid_device->id, 0); 9373 if (rc) 9374 _scsih_raid_device_remove(ioc, raid_device); 9375 break; 9376 9377 case MPI2_RAID_VOL_STATE_INITIALIZING: 9378 default: 9379 break; 9380 } 9381 } 9382 9383 /** 9384 * _scsih_sas_ir_physical_disk_event - PD event 9385 * @ioc: per adapter object 9386 * @fw_event: The fw_event_work object 9387 * Context: user. 9388 */ 9389 static void 9390 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, 9391 struct fw_event_work *fw_event) 9392 { 9393 u16 handle, parent_handle; 9394 u32 state; 9395 struct _sas_device *sas_device; 9396 Mpi2ConfigReply_t mpi_reply; 9397 Mpi2SasDevicePage0_t sas_device_pg0; 9398 u32 ioc_status; 9399 Mpi2EventDataIrPhysicalDisk_t *event_data = 9400 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data; 9401 u64 sas_address; 9402 9403 if (ioc->shost_recovery) 9404 return; 9405 9406 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) 9407 return; 9408 9409 handle = le16_to_cpu(event_data->PhysDiskDevHandle); 9410 state = le32_to_cpu(event_data->NewValue); 9411 9412 if (!ioc->hide_ir_msg) 9413 dewtprintk(ioc, 9414 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 9415 __func__, handle, 9416 le32_to_cpu(event_data->PreviousValue), 9417 state)); 9418 9419 switch (state) { 9420 case MPI2_RAID_PD_STATE_ONLINE: 9421 case MPI2_RAID_PD_STATE_DEGRADED: 9422 case MPI2_RAID_PD_STATE_REBUILDING: 9423 case MPI2_RAID_PD_STATE_OPTIMAL: 9424 case MPI2_RAID_PD_STATE_HOT_SPARE: 9425 9426 if (!ioc->is_warpdrive) 9427 set_bit(handle, ioc->pd_handles); 9428 9429 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 9430 if (sas_device) { 9431 sas_device_put(sas_device); 9432 return; 9433 } 9434 9435 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 9436 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 9437 handle))) { 9438 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9439 __FILE__, __LINE__, __func__); 9440 return; 9441 } 9442 9443 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9444 MPI2_IOCSTATUS_MASK; 9445 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9446 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9447 __FILE__, __LINE__, __func__); 9448 return; 9449 } 9450 9451 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 9452 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) 9453 mpt3sas_transport_update_links(ioc, sas_address, handle, 9454 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 9455 mpt3sas_get_port_by_id(ioc, 9456 sas_device_pg0.PhysicalPort, 0)); 9457 9458 _scsih_add_device(ioc, handle, 0, 1); 9459 9460 break; 9461 9462 case MPI2_RAID_PD_STATE_OFFLINE: 9463 case MPI2_RAID_PD_STATE_NOT_CONFIGURED: 9464 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: 9465 default: 9466 break; 9467 } 9468 } 9469 9470 /** 9471 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event 9472 * @ioc: per adapter object 9473 * @event_data: event data payload 9474 * Context: user. 9475 */ 9476 static void 9477 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, 9478 Mpi2EventDataIrOperationStatus_t *event_data) 9479 { 9480 char *reason_str = NULL; 9481 9482 switch (event_data->RAIDOperation) { 9483 case MPI2_EVENT_IR_RAIDOP_RESYNC: 9484 reason_str = "resync"; 9485 break; 9486 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: 9487 reason_str = "online capacity expansion"; 9488 break; 9489 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: 9490 reason_str = "consistency check"; 9491 break; 9492 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: 9493 reason_str = "background init"; 9494 break; 9495 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: 9496 reason_str = "make data consistent"; 9497 break; 9498 } 9499 9500 if (!reason_str) 9501 return; 9502 9503 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", 9504 reason_str, 9505 le16_to_cpu(event_data->VolDevHandle), 9506 event_data->PercentComplete); 9507 } 9508 9509 /** 9510 * _scsih_sas_ir_operation_status_event - handle RAID operation events 9511 * @ioc: per adapter object 9512 * @fw_event: The fw_event_work object 9513 * Context: user. 9514 */ 9515 static void 9516 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, 9517 struct fw_event_work *fw_event) 9518 { 9519 Mpi2EventDataIrOperationStatus_t *event_data = 9520 (Mpi2EventDataIrOperationStatus_t *) 9521 fw_event->event_data; 9522 static struct _raid_device *raid_device; 9523 unsigned long flags; 9524 u16 handle; 9525 9526 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && 9527 (!ioc->hide_ir_msg)) 9528 _scsih_sas_ir_operation_status_event_debug(ioc, 9529 event_data); 9530 9531 /* code added for raid transport support */ 9532 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { 9533 9534 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9535 handle = le16_to_cpu(event_data->VolDevHandle); 9536 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 9537 if (raid_device) 9538 raid_device->percent_complete = 9539 event_data->PercentComplete; 9540 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9541 } 9542 } 9543 9544 /** 9545 * _scsih_prep_device_scan - initialize parameters prior to device scan 9546 * @ioc: per adapter object 9547 * 9548 * Set the deleted flag prior to device scan. If the device is found during 9549 * the scan, then we clear the deleted flag. 9550 */ 9551 static void 9552 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) 9553 { 9554 struct MPT3SAS_DEVICE *sas_device_priv_data; 9555 struct scsi_device *sdev; 9556 9557 shost_for_each_device(sdev, ioc->shost) { 9558 sas_device_priv_data = sdev->hostdata; 9559 if (sas_device_priv_data && sas_device_priv_data->sas_target) 9560 sas_device_priv_data->sas_target->deleted = 1; 9561 } 9562 } 9563 9564 /** 9565 * _scsih_mark_responding_sas_device - mark a sas_devices as responding 9566 * @ioc: per adapter object 9567 * @sas_device_pg0: SAS Device page 0 9568 * 9569 * After host reset, find out whether devices are still responding. 9570 * Used in _scsih_remove_unresponsive_sas_devices. 9571 */ 9572 static void 9573 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, 9574 Mpi2SasDevicePage0_t *sas_device_pg0) 9575 { 9576 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9577 struct scsi_target *starget; 9578 struct _sas_device *sas_device = NULL; 9579 struct _enclosure_node *enclosure_dev = NULL; 9580 unsigned long flags; 9581 struct hba_port *port = mpt3sas_get_port_by_id( 9582 ioc, sas_device_pg0->PhysicalPort, 0); 9583 9584 if (sas_device_pg0->EnclosureHandle) { 9585 enclosure_dev = 9586 mpt3sas_scsih_enclosure_find_by_handle(ioc, 9587 le16_to_cpu(sas_device_pg0->EnclosureHandle)); 9588 if (enclosure_dev == NULL) 9589 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 9590 sas_device_pg0->EnclosureHandle); 9591 } 9592 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9593 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 9594 if (sas_device->sas_address != le64_to_cpu( 9595 sas_device_pg0->SASAddress)) 9596 continue; 9597 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot)) 9598 continue; 9599 if (sas_device->port != port) 9600 continue; 9601 sas_device->responding = 1; 9602 starget = sas_device->starget; 9603 if (starget && starget->hostdata) { 9604 sas_target_priv_data = starget->hostdata; 9605 sas_target_priv_data->tm_busy = 0; 9606 sas_target_priv_data->deleted = 0; 9607 } else 9608 sas_target_priv_data = NULL; 9609 if (starget) { 9610 starget_printk(KERN_INFO, starget, 9611 "handle(0x%04x), sas_addr(0x%016llx)\n", 9612 le16_to_cpu(sas_device_pg0->DevHandle), 9613 (unsigned long long) 9614 sas_device->sas_address); 9615 9616 if (sas_device->enclosure_handle != 0) 9617 starget_printk(KERN_INFO, starget, 9618 "enclosure logical id(0x%016llx), slot(%d)\n", 9619 (unsigned long long) 9620 sas_device->enclosure_logical_id, 9621 sas_device->slot); 9622 } 9623 if (le16_to_cpu(sas_device_pg0->Flags) & 9624 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 9625 sas_device->enclosure_level = 9626 sas_device_pg0->EnclosureLevel; 9627 memcpy(&sas_device->connector_name[0], 9628 &sas_device_pg0->ConnectorName[0], 4); 9629 } else { 9630 sas_device->enclosure_level = 0; 9631 sas_device->connector_name[0] = '\0'; 9632 } 9633 9634 sas_device->enclosure_handle = 9635 le16_to_cpu(sas_device_pg0->EnclosureHandle); 9636 sas_device->is_chassis_slot_valid = 0; 9637 if (enclosure_dev) { 9638 sas_device->enclosure_logical_id = le64_to_cpu( 9639 enclosure_dev->pg0.EnclosureLogicalID); 9640 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 9641 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 9642 sas_device->is_chassis_slot_valid = 1; 9643 sas_device->chassis_slot = 9644 enclosure_dev->pg0.ChassisSlot; 9645 } 9646 } 9647 9648 if (sas_device->handle == le16_to_cpu( 9649 sas_device_pg0->DevHandle)) 9650 goto out; 9651 pr_info("\thandle changed from(0x%04x)!!!\n", 9652 sas_device->handle); 9653 sas_device->handle = le16_to_cpu( 9654 sas_device_pg0->DevHandle); 9655 if (sas_target_priv_data) 9656 sas_target_priv_data->handle = 9657 le16_to_cpu(sas_device_pg0->DevHandle); 9658 goto out; 9659 } 9660 out: 9661 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9662 } 9663 9664 /** 9665 * _scsih_create_enclosure_list_after_reset - Free Existing list, 9666 * And create enclosure list by scanning all Enclosure Page(0)s 9667 * @ioc: per adapter object 9668 */ 9669 static void 9670 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) 9671 { 9672 struct _enclosure_node *enclosure_dev; 9673 Mpi2ConfigReply_t mpi_reply; 9674 u16 enclosure_handle; 9675 int rc; 9676 9677 /* Free existing enclosure list */ 9678 mpt3sas_free_enclosure_list(ioc); 9679 9680 /* Re constructing enclosure list after reset*/ 9681 enclosure_handle = 0xFFFF; 9682 do { 9683 enclosure_dev = 9684 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL); 9685 if (!enclosure_dev) { 9686 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9687 __FILE__, __LINE__, __func__); 9688 return; 9689 } 9690 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 9691 &enclosure_dev->pg0, 9692 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE, 9693 enclosure_handle); 9694 9695 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & 9696 MPI2_IOCSTATUS_MASK)) { 9697 kfree(enclosure_dev); 9698 return; 9699 } 9700 list_add_tail(&enclosure_dev->list, 9701 &ioc->enclosure_list); 9702 enclosure_handle = 9703 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); 9704 } while (1); 9705 } 9706 9707 /** 9708 * _scsih_search_responding_sas_devices - 9709 * @ioc: per adapter object 9710 * 9711 * After host reset, find out whether devices are still responding. 9712 * If not remove. 9713 */ 9714 static void 9715 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) 9716 { 9717 Mpi2SasDevicePage0_t sas_device_pg0; 9718 Mpi2ConfigReply_t mpi_reply; 9719 u16 ioc_status; 9720 u16 handle; 9721 u32 device_info; 9722 9723 ioc_info(ioc, "search for end-devices: start\n"); 9724 9725 if (list_empty(&ioc->sas_device_list)) 9726 goto out; 9727 9728 handle = 0xFFFF; 9729 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 9730 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 9731 handle))) { 9732 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9733 MPI2_IOCSTATUS_MASK; 9734 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 9735 break; 9736 handle = le16_to_cpu(sas_device_pg0.DevHandle); 9737 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 9738 if (!(_scsih_is_end_device(device_info))) 9739 continue; 9740 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); 9741 } 9742 9743 out: 9744 ioc_info(ioc, "search for end-devices: complete\n"); 9745 } 9746 9747 /** 9748 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding 9749 * @ioc: per adapter object 9750 * @pcie_device_pg0: PCIe Device page 0 9751 * 9752 * After host reset, find out whether devices are still responding. 9753 * Used in _scsih_remove_unresponding_devices. 9754 */ 9755 static void 9756 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, 9757 Mpi26PCIeDevicePage0_t *pcie_device_pg0) 9758 { 9759 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9760 struct scsi_target *starget; 9761 struct _pcie_device *pcie_device; 9762 unsigned long flags; 9763 9764 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 9765 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 9766 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID)) 9767 && (pcie_device->slot == le16_to_cpu( 9768 pcie_device_pg0->Slot))) { 9769 pcie_device->access_status = 9770 pcie_device_pg0->AccessStatus; 9771 pcie_device->responding = 1; 9772 starget = pcie_device->starget; 9773 if (starget && starget->hostdata) { 9774 sas_target_priv_data = starget->hostdata; 9775 sas_target_priv_data->tm_busy = 0; 9776 sas_target_priv_data->deleted = 0; 9777 } else 9778 sas_target_priv_data = NULL; 9779 if (starget) { 9780 starget_printk(KERN_INFO, starget, 9781 "handle(0x%04x), wwid(0x%016llx) ", 9782 pcie_device->handle, 9783 (unsigned long long)pcie_device->wwid); 9784 if (pcie_device->enclosure_handle != 0) 9785 starget_printk(KERN_INFO, starget, 9786 "enclosure logical id(0x%016llx), " 9787 "slot(%d)\n", 9788 (unsigned long long) 9789 pcie_device->enclosure_logical_id, 9790 pcie_device->slot); 9791 } 9792 9793 if (((le32_to_cpu(pcie_device_pg0->Flags)) & 9794 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) && 9795 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { 9796 pcie_device->enclosure_level = 9797 pcie_device_pg0->EnclosureLevel; 9798 memcpy(&pcie_device->connector_name[0], 9799 &pcie_device_pg0->ConnectorName[0], 4); 9800 } else { 9801 pcie_device->enclosure_level = 0; 9802 pcie_device->connector_name[0] = '\0'; 9803 } 9804 9805 if (pcie_device->handle == le16_to_cpu( 9806 pcie_device_pg0->DevHandle)) 9807 goto out; 9808 pr_info("\thandle changed from(0x%04x)!!!\n", 9809 pcie_device->handle); 9810 pcie_device->handle = le16_to_cpu( 9811 pcie_device_pg0->DevHandle); 9812 if (sas_target_priv_data) 9813 sas_target_priv_data->handle = 9814 le16_to_cpu(pcie_device_pg0->DevHandle); 9815 goto out; 9816 } 9817 } 9818 9819 out: 9820 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 9821 } 9822 9823 /** 9824 * _scsih_search_responding_pcie_devices - 9825 * @ioc: per adapter object 9826 * 9827 * After host reset, find out whether devices are still responding. 9828 * If not remove. 9829 */ 9830 static void 9831 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) 9832 { 9833 Mpi26PCIeDevicePage0_t pcie_device_pg0; 9834 Mpi2ConfigReply_t mpi_reply; 9835 u16 ioc_status; 9836 u16 handle; 9837 u32 device_info; 9838 9839 ioc_info(ioc, "search for end-devices: start\n"); 9840 9841 if (list_empty(&ioc->pcie_device_list)) 9842 goto out; 9843 9844 handle = 0xFFFF; 9845 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 9846 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 9847 handle))) { 9848 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9849 MPI2_IOCSTATUS_MASK; 9850 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9851 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", 9852 __func__, ioc_status, 9853 le32_to_cpu(mpi_reply.IOCLogInfo)); 9854 break; 9855 } 9856 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 9857 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 9858 if (!(_scsih_is_nvme_pciescsi_device(device_info))) 9859 continue; 9860 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); 9861 } 9862 out: 9863 ioc_info(ioc, "search for PCIe end-devices: complete\n"); 9864 } 9865 9866 /** 9867 * _scsih_mark_responding_raid_device - mark a raid_device as responding 9868 * @ioc: per adapter object 9869 * @wwid: world wide identifier for raid volume 9870 * @handle: device handle 9871 * 9872 * After host reset, find out whether devices are still responding. 9873 * Used in _scsih_remove_unresponsive_raid_devices. 9874 */ 9875 static void 9876 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, 9877 u16 handle) 9878 { 9879 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 9880 struct scsi_target *starget; 9881 struct _raid_device *raid_device; 9882 unsigned long flags; 9883 9884 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9885 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 9886 if (raid_device->wwid == wwid && raid_device->starget) { 9887 starget = raid_device->starget; 9888 if (starget && starget->hostdata) { 9889 sas_target_priv_data = starget->hostdata; 9890 sas_target_priv_data->deleted = 0; 9891 } else 9892 sas_target_priv_data = NULL; 9893 raid_device->responding = 1; 9894 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9895 starget_printk(KERN_INFO, raid_device->starget, 9896 "handle(0x%04x), wwid(0x%016llx)\n", handle, 9897 (unsigned long long)raid_device->wwid); 9898 9899 /* 9900 * WARPDRIVE: The handles of the PDs might have changed 9901 * across the host reset so re-initialize the 9902 * required data for Direct IO 9903 */ 9904 mpt3sas_init_warpdrive_properties(ioc, raid_device); 9905 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9906 if (raid_device->handle == handle) { 9907 spin_unlock_irqrestore(&ioc->raid_device_lock, 9908 flags); 9909 return; 9910 } 9911 pr_info("\thandle changed from(0x%04x)!!!\n", 9912 raid_device->handle); 9913 raid_device->handle = handle; 9914 if (sas_target_priv_data) 9915 sas_target_priv_data->handle = handle; 9916 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9917 return; 9918 } 9919 } 9920 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9921 } 9922 9923 /** 9924 * _scsih_search_responding_raid_devices - 9925 * @ioc: per adapter object 9926 * 9927 * After host reset, find out whether devices are still responding. 9928 * If not remove. 9929 */ 9930 static void 9931 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) 9932 { 9933 Mpi2RaidVolPage1_t volume_pg1; 9934 Mpi2RaidVolPage0_t volume_pg0; 9935 Mpi2RaidPhysDiskPage0_t pd_pg0; 9936 Mpi2ConfigReply_t mpi_reply; 9937 u16 ioc_status; 9938 u16 handle; 9939 u8 phys_disk_num; 9940 9941 if (!ioc->ir_firmware) 9942 return; 9943 9944 ioc_info(ioc, "search for raid volumes: start\n"); 9945 9946 if (list_empty(&ioc->raid_device_list)) 9947 goto out; 9948 9949 handle = 0xFFFF; 9950 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 9951 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 9952 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9953 MPI2_IOCSTATUS_MASK; 9954 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 9955 break; 9956 handle = le16_to_cpu(volume_pg1.DevHandle); 9957 9958 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 9959 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 9960 sizeof(Mpi2RaidVolPage0_t))) 9961 continue; 9962 9963 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 9964 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || 9965 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) 9966 _scsih_mark_responding_raid_device(ioc, 9967 le64_to_cpu(volume_pg1.WWID), handle); 9968 } 9969 9970 /* refresh the pd_handles */ 9971 if (!ioc->is_warpdrive) { 9972 phys_disk_num = 0xFF; 9973 memset(ioc->pd_handles, 0, ioc->pd_handles_sz); 9974 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 9975 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 9976 phys_disk_num))) { 9977 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9978 MPI2_IOCSTATUS_MASK; 9979 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 9980 break; 9981 phys_disk_num = pd_pg0.PhysDiskNum; 9982 handle = le16_to_cpu(pd_pg0.DevHandle); 9983 set_bit(handle, ioc->pd_handles); 9984 } 9985 } 9986 out: 9987 ioc_info(ioc, "search for responding raid volumes: complete\n"); 9988 } 9989 9990 /** 9991 * _scsih_mark_responding_expander - mark a expander as responding 9992 * @ioc: per adapter object 9993 * @expander_pg0:SAS Expander Config Page0 9994 * 9995 * After host reset, find out whether devices are still responding. 9996 * Used in _scsih_remove_unresponsive_expanders. 9997 */ 9998 static void 9999 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, 10000 Mpi2ExpanderPage0_t *expander_pg0) 10001 { 10002 struct _sas_node *sas_expander = NULL; 10003 unsigned long flags; 10004 int i; 10005 struct _enclosure_node *enclosure_dev = NULL; 10006 u16 handle = le16_to_cpu(expander_pg0->DevHandle); 10007 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); 10008 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); 10009 struct hba_port *port = mpt3sas_get_port_by_id( 10010 ioc, expander_pg0->PhysicalPort, 0); 10011 10012 if (enclosure_handle) 10013 enclosure_dev = 10014 mpt3sas_scsih_enclosure_find_by_handle(ioc, 10015 enclosure_handle); 10016 10017 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10018 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 10019 if (sas_expander->sas_address != sas_address) 10020 continue; 10021 if (sas_expander->port != port) 10022 continue; 10023 sas_expander->responding = 1; 10024 10025 if (enclosure_dev) { 10026 sas_expander->enclosure_logical_id = 10027 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 10028 sas_expander->enclosure_handle = 10029 le16_to_cpu(expander_pg0->EnclosureHandle); 10030 } 10031 10032 if (sas_expander->handle == handle) 10033 goto out; 10034 pr_info("\texpander(0x%016llx): handle changed" \ 10035 " from(0x%04x) to (0x%04x)!!!\n", 10036 (unsigned long long)sas_expander->sas_address, 10037 sas_expander->handle, handle); 10038 sas_expander->handle = handle; 10039 for (i = 0 ; i < sas_expander->num_phys ; i++) 10040 sas_expander->phy[i].handle = handle; 10041 goto out; 10042 } 10043 out: 10044 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10045 } 10046 10047 /** 10048 * _scsih_search_responding_expanders - 10049 * @ioc: per adapter object 10050 * 10051 * After host reset, find out whether devices are still responding. 10052 * If not remove. 10053 */ 10054 static void 10055 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) 10056 { 10057 Mpi2ExpanderPage0_t expander_pg0; 10058 Mpi2ConfigReply_t mpi_reply; 10059 u16 ioc_status; 10060 u64 sas_address; 10061 u16 handle; 10062 u8 port; 10063 10064 ioc_info(ioc, "search for expanders: start\n"); 10065 10066 if (list_empty(&ioc->sas_expander_list)) 10067 goto out; 10068 10069 handle = 0xFFFF; 10070 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 10071 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 10072 10073 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10074 MPI2_IOCSTATUS_MASK; 10075 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 10076 break; 10077 10078 handle = le16_to_cpu(expander_pg0.DevHandle); 10079 sas_address = le64_to_cpu(expander_pg0.SASAddress); 10080 port = expander_pg0.PhysicalPort; 10081 pr_info( 10082 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", 10083 handle, (unsigned long long)sas_address, 10084 (ioc->multipath_on_hba ? 10085 port : MULTIPATH_DISABLED_PORT_ID)); 10086 _scsih_mark_responding_expander(ioc, &expander_pg0); 10087 } 10088 10089 out: 10090 ioc_info(ioc, "search for expanders: complete\n"); 10091 } 10092 10093 /** 10094 * _scsih_remove_unresponding_devices - removing unresponding devices 10095 * @ioc: per adapter object 10096 */ 10097 static void 10098 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) 10099 { 10100 struct _sas_device *sas_device, *sas_device_next; 10101 struct _sas_node *sas_expander, *sas_expander_next; 10102 struct _raid_device *raid_device, *raid_device_next; 10103 struct _pcie_device *pcie_device, *pcie_device_next; 10104 struct list_head tmp_list; 10105 unsigned long flags; 10106 LIST_HEAD(head); 10107 10108 ioc_info(ioc, "removing unresponding devices: start\n"); 10109 10110 /* removing unresponding end devices */ 10111 ioc_info(ioc, "removing unresponding devices: end-devices\n"); 10112 /* 10113 * Iterate, pulling off devices marked as non-responding. We become the 10114 * owner for the reference the list had on any object we prune. 10115 */ 10116 spin_lock_irqsave(&ioc->sas_device_lock, flags); 10117 list_for_each_entry_safe(sas_device, sas_device_next, 10118 &ioc->sas_device_list, list) { 10119 if (!sas_device->responding) 10120 list_move_tail(&sas_device->list, &head); 10121 else 10122 sas_device->responding = 0; 10123 } 10124 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 10125 10126 /* 10127 * Now, uninitialize and remove the unresponding devices we pruned. 10128 */ 10129 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { 10130 _scsih_remove_device(ioc, sas_device); 10131 list_del_init(&sas_device->list); 10132 sas_device_put(sas_device); 10133 } 10134 10135 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n"); 10136 INIT_LIST_HEAD(&head); 10137 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 10138 list_for_each_entry_safe(pcie_device, pcie_device_next, 10139 &ioc->pcie_device_list, list) { 10140 if (!pcie_device->responding) 10141 list_move_tail(&pcie_device->list, &head); 10142 else 10143 pcie_device->responding = 0; 10144 } 10145 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 10146 10147 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) { 10148 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 10149 list_del_init(&pcie_device->list); 10150 pcie_device_put(pcie_device); 10151 } 10152 10153 /* removing unresponding volumes */ 10154 if (ioc->ir_firmware) { 10155 ioc_info(ioc, "removing unresponding devices: volumes\n"); 10156 list_for_each_entry_safe(raid_device, raid_device_next, 10157 &ioc->raid_device_list, list) { 10158 if (!raid_device->responding) 10159 _scsih_sas_volume_delete(ioc, 10160 raid_device->handle); 10161 else 10162 raid_device->responding = 0; 10163 } 10164 } 10165 10166 /* removing unresponding expanders */ 10167 ioc_info(ioc, "removing unresponding devices: expanders\n"); 10168 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10169 INIT_LIST_HEAD(&tmp_list); 10170 list_for_each_entry_safe(sas_expander, sas_expander_next, 10171 &ioc->sas_expander_list, list) { 10172 if (!sas_expander->responding) 10173 list_move_tail(&sas_expander->list, &tmp_list); 10174 else 10175 sas_expander->responding = 0; 10176 } 10177 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10178 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, 10179 list) { 10180 _scsih_expander_node_remove(ioc, sas_expander); 10181 } 10182 10183 ioc_info(ioc, "removing unresponding devices: complete\n"); 10184 10185 /* unblock devices */ 10186 _scsih_ublock_io_all_device(ioc); 10187 } 10188 10189 static void 10190 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, 10191 struct _sas_node *sas_expander, u16 handle) 10192 { 10193 Mpi2ExpanderPage1_t expander_pg1; 10194 Mpi2ConfigReply_t mpi_reply; 10195 int i; 10196 10197 for (i = 0 ; i < sas_expander->num_phys ; i++) { 10198 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 10199 &expander_pg1, i, handle))) { 10200 ioc_err(ioc, "failure at %s:%d/%s()!\n", 10201 __FILE__, __LINE__, __func__); 10202 return; 10203 } 10204 10205 mpt3sas_transport_update_links(ioc, sas_expander->sas_address, 10206 le16_to_cpu(expander_pg1.AttachedDevHandle), i, 10207 expander_pg1.NegotiatedLinkRate >> 4, 10208 sas_expander->port); 10209 } 10210 } 10211 10212 /** 10213 * _scsih_scan_for_devices_after_reset - scan for devices after host reset 10214 * @ioc: per adapter object 10215 */ 10216 static void 10217 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) 10218 { 10219 Mpi2ExpanderPage0_t expander_pg0; 10220 Mpi2SasDevicePage0_t sas_device_pg0; 10221 Mpi26PCIeDevicePage0_t pcie_device_pg0; 10222 Mpi2RaidVolPage1_t volume_pg1; 10223 Mpi2RaidVolPage0_t volume_pg0; 10224 Mpi2RaidPhysDiskPage0_t pd_pg0; 10225 Mpi2EventIrConfigElement_t element; 10226 Mpi2ConfigReply_t mpi_reply; 10227 u8 phys_disk_num, port_id; 10228 u16 ioc_status; 10229 u16 handle, parent_handle; 10230 u64 sas_address; 10231 struct _sas_device *sas_device; 10232 struct _pcie_device *pcie_device; 10233 struct _sas_node *expander_device; 10234 static struct _raid_device *raid_device; 10235 u8 retry_count; 10236 unsigned long flags; 10237 10238 ioc_info(ioc, "scan devices: start\n"); 10239 10240 _scsih_sas_host_refresh(ioc); 10241 10242 ioc_info(ioc, "\tscan devices: expanders start\n"); 10243 10244 /* expanders */ 10245 handle = 0xFFFF; 10246 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 10247 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 10248 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10249 MPI2_IOCSTATUS_MASK; 10250 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10251 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10252 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10253 break; 10254 } 10255 handle = le16_to_cpu(expander_pg0.DevHandle); 10256 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10257 port_id = expander_pg0.PhysicalPort; 10258 expander_device = mpt3sas_scsih_expander_find_by_sas_address( 10259 ioc, le64_to_cpu(expander_pg0.SASAddress), 10260 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10261 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10262 if (expander_device) 10263 _scsih_refresh_expander_links(ioc, expander_device, 10264 handle); 10265 else { 10266 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", 10267 handle, 10268 (u64)le64_to_cpu(expander_pg0.SASAddress)); 10269 _scsih_expander_add(ioc, handle); 10270 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", 10271 handle, 10272 (u64)le64_to_cpu(expander_pg0.SASAddress)); 10273 } 10274 } 10275 10276 ioc_info(ioc, "\tscan devices: expanders complete\n"); 10277 10278 if (!ioc->ir_firmware) 10279 goto skip_to_sas; 10280 10281 ioc_info(ioc, "\tscan devices: phys disk start\n"); 10282 10283 /* phys disk */ 10284 phys_disk_num = 0xFF; 10285 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 10286 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 10287 phys_disk_num))) { 10288 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10289 MPI2_IOCSTATUS_MASK; 10290 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10291 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10292 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10293 break; 10294 } 10295 phys_disk_num = pd_pg0.PhysDiskNum; 10296 handle = le16_to_cpu(pd_pg0.DevHandle); 10297 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 10298 if (sas_device) { 10299 sas_device_put(sas_device); 10300 continue; 10301 } 10302 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 10303 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 10304 handle) != 0) 10305 continue; 10306 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10307 MPI2_IOCSTATUS_MASK; 10308 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10309 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", 10310 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10311 break; 10312 } 10313 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 10314 if (!_scsih_get_sas_address(ioc, parent_handle, 10315 &sas_address)) { 10316 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", 10317 handle, 10318 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10319 port_id = sas_device_pg0.PhysicalPort; 10320 mpt3sas_transport_update_links(ioc, sas_address, 10321 handle, sas_device_pg0.PhyNum, 10322 MPI2_SAS_NEG_LINK_RATE_1_5, 10323 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10324 set_bit(handle, ioc->pd_handles); 10325 retry_count = 0; 10326 /* This will retry adding the end device. 10327 * _scsih_add_device() will decide on retries and 10328 * return "1" when it should be retried 10329 */ 10330 while (_scsih_add_device(ioc, handle, retry_count++, 10331 1)) { 10332 ssleep(1); 10333 } 10334 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", 10335 handle, 10336 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10337 } 10338 } 10339 10340 ioc_info(ioc, "\tscan devices: phys disk complete\n"); 10341 10342 ioc_info(ioc, "\tscan devices: volumes start\n"); 10343 10344 /* volumes */ 10345 handle = 0xFFFF; 10346 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 10347 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 10348 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10349 MPI2_IOCSTATUS_MASK; 10350 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10351 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10352 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10353 break; 10354 } 10355 handle = le16_to_cpu(volume_pg1.DevHandle); 10356 spin_lock_irqsave(&ioc->raid_device_lock, flags); 10357 raid_device = _scsih_raid_device_find_by_wwid(ioc, 10358 le64_to_cpu(volume_pg1.WWID)); 10359 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 10360 if (raid_device) 10361 continue; 10362 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 10363 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 10364 sizeof(Mpi2RaidVolPage0_t))) 10365 continue; 10366 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10367 MPI2_IOCSTATUS_MASK; 10368 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10369 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10370 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10371 break; 10372 } 10373 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 10374 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || 10375 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { 10376 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); 10377 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; 10378 element.VolDevHandle = volume_pg1.DevHandle; 10379 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n", 10380 volume_pg1.DevHandle); 10381 _scsih_sas_volume_add(ioc, &element); 10382 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n", 10383 volume_pg1.DevHandle); 10384 } 10385 } 10386 10387 ioc_info(ioc, "\tscan devices: volumes complete\n"); 10388 10389 skip_to_sas: 10390 10391 ioc_info(ioc, "\tscan devices: end devices start\n"); 10392 10393 /* sas devices */ 10394 handle = 0xFFFF; 10395 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 10396 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 10397 handle))) { 10398 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 10399 MPI2_IOCSTATUS_MASK; 10400 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10401 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10402 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10403 break; 10404 } 10405 handle = le16_to_cpu(sas_device_pg0.DevHandle); 10406 if (!(_scsih_is_end_device( 10407 le32_to_cpu(sas_device_pg0.DeviceInfo)))) 10408 continue; 10409 port_id = sas_device_pg0.PhysicalPort; 10410 sas_device = mpt3sas_get_sdev_by_addr(ioc, 10411 le64_to_cpu(sas_device_pg0.SASAddress), 10412 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10413 if (sas_device) { 10414 sas_device_put(sas_device); 10415 continue; 10416 } 10417 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 10418 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { 10419 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", 10420 handle, 10421 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10422 mpt3sas_transport_update_links(ioc, sas_address, handle, 10423 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5, 10424 mpt3sas_get_port_by_id(ioc, port_id, 0)); 10425 retry_count = 0; 10426 /* This will retry adding the end device. 10427 * _scsih_add_device() will decide on retries and 10428 * return "1" when it should be retried 10429 */ 10430 while (_scsih_add_device(ioc, handle, retry_count++, 10431 0)) { 10432 ssleep(1); 10433 } 10434 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", 10435 handle, 10436 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 10437 } 10438 } 10439 ioc_info(ioc, "\tscan devices: end devices complete\n"); 10440 ioc_info(ioc, "\tscan devices: pcie end devices start\n"); 10441 10442 /* pcie devices */ 10443 handle = 0xFFFF; 10444 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 10445 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 10446 handle))) { 10447 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) 10448 & MPI2_IOCSTATUS_MASK; 10449 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 10450 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 10451 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 10452 break; 10453 } 10454 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 10455 if (!(_scsih_is_nvme_pciescsi_device( 10456 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) 10457 continue; 10458 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, 10459 le64_to_cpu(pcie_device_pg0.WWID)); 10460 if (pcie_device) { 10461 pcie_device_put(pcie_device); 10462 continue; 10463 } 10464 retry_count = 0; 10465 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle); 10466 _scsih_pcie_add_device(ioc, handle); 10467 10468 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n", 10469 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID)); 10470 } 10471 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n"); 10472 ioc_info(ioc, "scan devices: complete\n"); 10473 } 10474 10475 /** 10476 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) 10477 * @ioc: per adapter object 10478 * 10479 * The handler for doing any required cleanup or initialization. 10480 */ 10481 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) 10482 { 10483 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); 10484 } 10485 10486 /** 10487 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding 10488 * scsi & tm cmds. 10489 * @ioc: per adapter object 10490 * 10491 * The handler for doing any required cleanup or initialization. 10492 */ 10493 void 10494 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc) 10495 { 10496 dtmprintk(ioc, 10497 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__)); 10498 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { 10499 ioc->scsih_cmds.status |= MPT3_CMD_RESET; 10500 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); 10501 complete(&ioc->scsih_cmds.done); 10502 } 10503 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { 10504 ioc->tm_cmds.status |= MPT3_CMD_RESET; 10505 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); 10506 complete(&ioc->tm_cmds.done); 10507 } 10508 10509 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); 10510 memset(ioc->device_remove_in_progress, 0, 10511 ioc->device_remove_in_progress_sz); 10512 _scsih_fw_event_cleanup_queue(ioc); 10513 _scsih_flush_running_cmds(ioc); 10514 } 10515 10516 /** 10517 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) 10518 * @ioc: per adapter object 10519 * 10520 * The handler for doing any required cleanup or initialization. 10521 */ 10522 void 10523 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) 10524 { 10525 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); 10526 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 && 10527 !ioc->sas_hba.num_phys)) { 10528 if (ioc->multipath_on_hba) { 10529 _scsih_sas_port_refresh(ioc); 10530 _scsih_update_vphys_after_reset(ioc); 10531 } 10532 _scsih_prep_device_scan(ioc); 10533 _scsih_create_enclosure_list_after_reset(ioc); 10534 _scsih_search_responding_sas_devices(ioc); 10535 _scsih_search_responding_pcie_devices(ioc); 10536 _scsih_search_responding_raid_devices(ioc); 10537 _scsih_search_responding_expanders(ioc); 10538 _scsih_error_recovery_delete_devices(ioc); 10539 } 10540 } 10541 10542 /** 10543 * _mpt3sas_fw_work - delayed task for processing firmware events 10544 * @ioc: per adapter object 10545 * @fw_event: The fw_event_work object 10546 * Context: user. 10547 */ 10548 static void 10549 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) 10550 { 10551 ioc->current_event = fw_event; 10552 _scsih_fw_event_del_from_list(ioc, fw_event); 10553 10554 /* the queue is being flushed so ignore this event */ 10555 if (ioc->remove_host || ioc->pci_error_recovery) { 10556 fw_event_work_put(fw_event); 10557 ioc->current_event = NULL; 10558 return; 10559 } 10560 10561 switch (fw_event->event) { 10562 case MPT3SAS_PROCESS_TRIGGER_DIAG: 10563 mpt3sas_process_trigger_data(ioc, 10564 (struct SL_WH_TRIGGERS_EVENT_DATA_T *) 10565 fw_event->event_data); 10566 break; 10567 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: 10568 while (scsi_host_in_recovery(ioc->shost) || 10569 ioc->shost_recovery) { 10570 /* 10571 * If we're unloading or cancelling the work, bail. 10572 * Otherwise, this can become an infinite loop. 10573 */ 10574 if (ioc->remove_host || ioc->fw_events_cleanup) 10575 goto out; 10576 ssleep(1); 10577 } 10578 _scsih_remove_unresponding_devices(ioc); 10579 _scsih_del_dirty_vphy(ioc); 10580 _scsih_del_dirty_port_entries(ioc); 10581 _scsih_scan_for_devices_after_reset(ioc); 10582 _scsih_set_nvme_max_shutdown_latency(ioc); 10583 break; 10584 case MPT3SAS_PORT_ENABLE_COMPLETE: 10585 ioc->start_scan = 0; 10586 if (missing_delay[0] != -1 && missing_delay[1] != -1) 10587 mpt3sas_base_update_missing_delay(ioc, missing_delay[0], 10588 missing_delay[1]); 10589 dewtprintk(ioc, 10590 ioc_info(ioc, "port enable: complete from worker thread\n")); 10591 break; 10592 case MPT3SAS_TURN_ON_PFA_LED: 10593 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); 10594 break; 10595 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 10596 _scsih_sas_topology_change_event(ioc, fw_event); 10597 break; 10598 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 10599 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 10600 _scsih_sas_device_status_change_event_debug(ioc, 10601 (Mpi2EventDataSasDeviceStatusChange_t *) 10602 fw_event->event_data); 10603 break; 10604 case MPI2_EVENT_SAS_DISCOVERY: 10605 _scsih_sas_discovery_event(ioc, fw_event); 10606 break; 10607 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 10608 _scsih_sas_device_discovery_error_event(ioc, fw_event); 10609 break; 10610 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 10611 _scsih_sas_broadcast_primitive_event(ioc, fw_event); 10612 break; 10613 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 10614 _scsih_sas_enclosure_dev_status_change_event(ioc, 10615 fw_event); 10616 break; 10617 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 10618 _scsih_sas_ir_config_change_event(ioc, fw_event); 10619 break; 10620 case MPI2_EVENT_IR_VOLUME: 10621 _scsih_sas_ir_volume_event(ioc, fw_event); 10622 break; 10623 case MPI2_EVENT_IR_PHYSICAL_DISK: 10624 _scsih_sas_ir_physical_disk_event(ioc, fw_event); 10625 break; 10626 case MPI2_EVENT_IR_OPERATION_STATUS: 10627 _scsih_sas_ir_operation_status_event(ioc, fw_event); 10628 break; 10629 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 10630 _scsih_pcie_device_status_change_event(ioc, fw_event); 10631 break; 10632 case MPI2_EVENT_PCIE_ENUMERATION: 10633 _scsih_pcie_enumeration_event(ioc, fw_event); 10634 break; 10635 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10636 _scsih_pcie_topology_change_event(ioc, fw_event); 10637 ioc->current_event = NULL; 10638 return; 10639 break; 10640 } 10641 out: 10642 fw_event_work_put(fw_event); 10643 ioc->current_event = NULL; 10644 } 10645 10646 /** 10647 * _firmware_event_work 10648 * @work: The fw_event_work object 10649 * Context: user. 10650 * 10651 * wrappers for the work thread handling firmware events 10652 */ 10653 10654 static void 10655 _firmware_event_work(struct work_struct *work) 10656 { 10657 struct fw_event_work *fw_event = container_of(work, 10658 struct fw_event_work, work); 10659 10660 _mpt3sas_fw_work(fw_event->ioc, fw_event); 10661 } 10662 10663 /** 10664 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) 10665 * @ioc: per adapter object 10666 * @msix_index: MSIX table index supplied by the OS 10667 * @reply: reply message frame(lower 32bit addr) 10668 * Context: interrupt. 10669 * 10670 * This function merely adds a new work task into ioc->firmware_event_thread. 10671 * The tasks are worked from _firmware_event_work in user context. 10672 * 10673 * Return: 1 meaning mf should be freed from _base_interrupt 10674 * 0 means the mf is freed from this function. 10675 */ 10676 u8 10677 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 10678 u32 reply) 10679 { 10680 struct fw_event_work *fw_event; 10681 Mpi2EventNotificationReply_t *mpi_reply; 10682 u16 event; 10683 u16 sz; 10684 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; 10685 10686 /* events turned off due to host reset */ 10687 if (ioc->pci_error_recovery) 10688 return 1; 10689 10690 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 10691 10692 if (unlikely(!mpi_reply)) { 10693 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 10694 __FILE__, __LINE__, __func__); 10695 return 1; 10696 } 10697 10698 event = le16_to_cpu(mpi_reply->Event); 10699 10700 if (event != MPI2_EVENT_LOG_ENTRY_ADDED) 10701 mpt3sas_trigger_event(ioc, event, 0); 10702 10703 switch (event) { 10704 /* handle these */ 10705 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 10706 { 10707 Mpi2EventDataSasBroadcastPrimitive_t *baen_data = 10708 (Mpi2EventDataSasBroadcastPrimitive_t *) 10709 mpi_reply->EventData; 10710 10711 if (baen_data->Primitive != 10712 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) 10713 return 1; 10714 10715 if (ioc->broadcast_aen_busy) { 10716 ioc->broadcast_aen_pending++; 10717 return 1; 10718 } else 10719 ioc->broadcast_aen_busy = 1; 10720 break; 10721 } 10722 10723 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 10724 _scsih_check_topo_delete_events(ioc, 10725 (Mpi2EventDataSasTopologyChangeList_t *) 10726 mpi_reply->EventData); 10727 break; 10728 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10729 _scsih_check_pcie_topo_remove_events(ioc, 10730 (Mpi26EventDataPCIeTopologyChangeList_t *) 10731 mpi_reply->EventData); 10732 break; 10733 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 10734 _scsih_check_ir_config_unhide_events(ioc, 10735 (Mpi2EventDataIrConfigChangeList_t *) 10736 mpi_reply->EventData); 10737 break; 10738 case MPI2_EVENT_IR_VOLUME: 10739 _scsih_check_volume_delete_events(ioc, 10740 (Mpi2EventDataIrVolume_t *) 10741 mpi_reply->EventData); 10742 break; 10743 case MPI2_EVENT_LOG_ENTRY_ADDED: 10744 { 10745 Mpi2EventDataLogEntryAdded_t *log_entry; 10746 u32 *log_code; 10747 10748 if (!ioc->is_warpdrive) 10749 break; 10750 10751 log_entry = (Mpi2EventDataLogEntryAdded_t *) 10752 mpi_reply->EventData; 10753 log_code = (u32 *)log_entry->LogData; 10754 10755 if (le16_to_cpu(log_entry->LogEntryQualifier) 10756 != MPT2_WARPDRIVE_LOGENTRY) 10757 break; 10758 10759 switch (le32_to_cpu(*log_code)) { 10760 case MPT2_WARPDRIVE_LC_SSDT: 10761 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); 10762 break; 10763 case MPT2_WARPDRIVE_LC_SSDLW: 10764 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n"); 10765 break; 10766 case MPT2_WARPDRIVE_LC_SSDLF: 10767 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n"); 10768 break; 10769 case MPT2_WARPDRIVE_LC_BRMF: 10770 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); 10771 break; 10772 } 10773 10774 break; 10775 } 10776 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 10777 _scsih_sas_device_status_change_event(ioc, 10778 (Mpi2EventDataSasDeviceStatusChange_t *) 10779 mpi_reply->EventData); 10780 break; 10781 case MPI2_EVENT_IR_OPERATION_STATUS: 10782 case MPI2_EVENT_SAS_DISCOVERY: 10783 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 10784 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 10785 case MPI2_EVENT_IR_PHYSICAL_DISK: 10786 case MPI2_EVENT_PCIE_ENUMERATION: 10787 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 10788 break; 10789 10790 case MPI2_EVENT_TEMP_THRESHOLD: 10791 _scsih_temp_threshold_events(ioc, 10792 (Mpi2EventDataTemperature_t *) 10793 mpi_reply->EventData); 10794 break; 10795 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 10796 ActiveCableEventData = 10797 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; 10798 switch (ActiveCableEventData->ReasonCode) { 10799 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: 10800 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n", 10801 ActiveCableEventData->ReceptacleID); 10802 pr_notice("cannot be powered and devices connected\n"); 10803 pr_notice("to this active cable will not be seen\n"); 10804 pr_notice("This active cable requires %d mW of power\n", 10805 ActiveCableEventData->ActiveCablePowerRequirement); 10806 break; 10807 10808 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: 10809 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n", 10810 ActiveCableEventData->ReceptacleID); 10811 pr_notice( 10812 "is not running at optimal speed(12 Gb/s rate)\n"); 10813 break; 10814 } 10815 10816 break; 10817 10818 default: /* ignore the rest */ 10819 return 1; 10820 } 10821 10822 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; 10823 fw_event = alloc_fw_event_work(sz); 10824 if (!fw_event) { 10825 ioc_err(ioc, "failure at %s:%d/%s()!\n", 10826 __FILE__, __LINE__, __func__); 10827 return 1; 10828 } 10829 10830 memcpy(fw_event->event_data, mpi_reply->EventData, sz); 10831 fw_event->ioc = ioc; 10832 fw_event->VF_ID = mpi_reply->VF_ID; 10833 fw_event->VP_ID = mpi_reply->VP_ID; 10834 fw_event->event = event; 10835 _scsih_fw_event_add(ioc, fw_event); 10836 fw_event_work_put(fw_event); 10837 return 1; 10838 } 10839 10840 /** 10841 * _scsih_expander_node_remove - removing expander device from list. 10842 * @ioc: per adapter object 10843 * @sas_expander: the sas_device object 10844 * 10845 * Removing object and freeing associated memory from the 10846 * ioc->sas_expander_list. 10847 */ 10848 static void 10849 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 10850 struct _sas_node *sas_expander) 10851 { 10852 struct _sas_port *mpt3sas_port, *next; 10853 unsigned long flags; 10854 10855 /* remove sibling ports attached to this expander */ 10856 list_for_each_entry_safe(mpt3sas_port, next, 10857 &sas_expander->sas_port_list, port_list) { 10858 if (ioc->shost_recovery) 10859 return; 10860 if (mpt3sas_port->remote_identify.device_type == 10861 SAS_END_DEVICE) 10862 mpt3sas_device_remove_by_sas_address(ioc, 10863 mpt3sas_port->remote_identify.sas_address, 10864 mpt3sas_port->hba_port); 10865 else if (mpt3sas_port->remote_identify.device_type == 10866 SAS_EDGE_EXPANDER_DEVICE || 10867 mpt3sas_port->remote_identify.device_type == 10868 SAS_FANOUT_EXPANDER_DEVICE) 10869 mpt3sas_expander_remove(ioc, 10870 mpt3sas_port->remote_identify.sas_address, 10871 mpt3sas_port->hba_port); 10872 } 10873 10874 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 10875 sas_expander->sas_address_parent, sas_expander->port); 10876 10877 ioc_info(ioc, 10878 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", 10879 sas_expander->handle, (unsigned long long) 10880 sas_expander->sas_address, 10881 sas_expander->port->port_id); 10882 10883 spin_lock_irqsave(&ioc->sas_node_lock, flags); 10884 list_del(&sas_expander->list); 10885 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 10886 10887 kfree(sas_expander->phy); 10888 kfree(sas_expander); 10889 } 10890 10891 /** 10892 * _scsih_nvme_shutdown - NVMe shutdown notification 10893 * @ioc: per adapter object 10894 * 10895 * Sending IoUnitControl request with shutdown operation code to alert IOC that 10896 * the host system is shutting down so that IOC can issue NVMe shutdown to 10897 * NVMe drives attached to it. 10898 */ 10899 static void 10900 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc) 10901 { 10902 Mpi26IoUnitControlRequest_t *mpi_request; 10903 Mpi26IoUnitControlReply_t *mpi_reply; 10904 u16 smid; 10905 10906 /* are there any NVMe devices ? */ 10907 if (list_empty(&ioc->pcie_device_list)) 10908 return; 10909 10910 mutex_lock(&ioc->scsih_cmds.mutex); 10911 10912 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 10913 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 10914 goto out; 10915 } 10916 10917 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 10918 10919 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 10920 if (!smid) { 10921 ioc_err(ioc, 10922 "%s: failed obtaining a smid\n", __func__); 10923 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 10924 goto out; 10925 } 10926 10927 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 10928 ioc->scsih_cmds.smid = smid; 10929 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); 10930 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; 10931 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN; 10932 10933 init_completion(&ioc->scsih_cmds.done); 10934 ioc->put_smid_default(ioc, smid); 10935 /* Wait for max_shutdown_latency seconds */ 10936 ioc_info(ioc, 10937 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n", 10938 ioc->max_shutdown_latency); 10939 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10940 ioc->max_shutdown_latency*HZ); 10941 10942 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 10943 ioc_err(ioc, "%s: timeout\n", __func__); 10944 goto out; 10945 } 10946 10947 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 10948 mpi_reply = ioc->scsih_cmds.reply; 10949 ioc_info(ioc, "Io Unit Control shutdown (complete):" 10950 "ioc_status(0x%04x), loginfo(0x%08x)\n", 10951 le16_to_cpu(mpi_reply->IOCStatus), 10952 le32_to_cpu(mpi_reply->IOCLogInfo)); 10953 } 10954 out: 10955 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 10956 mutex_unlock(&ioc->scsih_cmds.mutex); 10957 } 10958 10959 10960 /** 10961 * _scsih_ir_shutdown - IR shutdown notification 10962 * @ioc: per adapter object 10963 * 10964 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that 10965 * the host system is shutting down. 10966 */ 10967 static void 10968 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) 10969 { 10970 Mpi2RaidActionRequest_t *mpi_request; 10971 Mpi2RaidActionReply_t *mpi_reply; 10972 u16 smid; 10973 10974 /* is IR firmware build loaded ? */ 10975 if (!ioc->ir_firmware) 10976 return; 10977 10978 /* are there any volumes ? */ 10979 if (list_empty(&ioc->raid_device_list)) 10980 return; 10981 10982 mutex_lock(&ioc->scsih_cmds.mutex); 10983 10984 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 10985 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 10986 goto out; 10987 } 10988 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 10989 10990 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 10991 if (!smid) { 10992 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 10993 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 10994 goto out; 10995 } 10996 10997 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 10998 ioc->scsih_cmds.smid = smid; 10999 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); 11000 11001 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 11002 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; 11003 11004 if (!ioc->hide_ir_msg) 11005 ioc_info(ioc, "IR shutdown (sending)\n"); 11006 init_completion(&ioc->scsih_cmds.done); 11007 ioc->put_smid_default(ioc, smid); 11008 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 11009 11010 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 11011 ioc_err(ioc, "%s: timeout\n", __func__); 11012 goto out; 11013 } 11014 11015 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 11016 mpi_reply = ioc->scsih_cmds.reply; 11017 if (!ioc->hide_ir_msg) 11018 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", 11019 le16_to_cpu(mpi_reply->IOCStatus), 11020 le32_to_cpu(mpi_reply->IOCLogInfo)); 11021 } 11022 11023 out: 11024 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 11025 mutex_unlock(&ioc->scsih_cmds.mutex); 11026 } 11027 11028 /** 11029 * _scsih_get_shost_and_ioc - get shost and ioc 11030 * and verify whether they are NULL or not 11031 * @pdev: PCI device struct 11032 * @shost: address of scsi host pointer 11033 * @ioc: address of HBA adapter pointer 11034 * 11035 * Return zero if *shost and *ioc are not NULL otherwise return error number. 11036 */ 11037 static int 11038 _scsih_get_shost_and_ioc(struct pci_dev *pdev, 11039 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc) 11040 { 11041 *shost = pci_get_drvdata(pdev); 11042 if (*shost == NULL) { 11043 dev_err(&pdev->dev, "pdev's driver data is null\n"); 11044 return -ENXIO; 11045 } 11046 11047 *ioc = shost_priv(*shost); 11048 if (*ioc == NULL) { 11049 dev_err(&pdev->dev, "shost's private data is null\n"); 11050 return -ENXIO; 11051 } 11052 11053 return 0; 11054 } 11055 11056 /** 11057 * scsih_remove - detach and remove add host 11058 * @pdev: PCI device struct 11059 * 11060 * Routine called when unloading the driver. 11061 */ 11062 static void scsih_remove(struct pci_dev *pdev) 11063 { 11064 struct Scsi_Host *shost; 11065 struct MPT3SAS_ADAPTER *ioc; 11066 struct _sas_port *mpt3sas_port, *next_port; 11067 struct _raid_device *raid_device, *next; 11068 struct MPT3SAS_TARGET *sas_target_priv_data; 11069 struct _pcie_device *pcie_device, *pcienext; 11070 struct workqueue_struct *wq; 11071 unsigned long flags; 11072 Mpi2ConfigReply_t mpi_reply; 11073 struct hba_port *port, *port_next; 11074 11075 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 11076 return; 11077 11078 ioc->remove_host = 1; 11079 11080 if (!pci_device_is_present(pdev)) 11081 _scsih_flush_running_cmds(ioc); 11082 11083 _scsih_fw_event_cleanup_queue(ioc); 11084 11085 spin_lock_irqsave(&ioc->fw_event_lock, flags); 11086 wq = ioc->firmware_event_thread; 11087 ioc->firmware_event_thread = NULL; 11088 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 11089 if (wq) 11090 destroy_workqueue(wq); 11091 /* 11092 * Copy back the unmodified ioc page1. so that on next driver load, 11093 * current modified changes on ioc page1 won't take effect. 11094 */ 11095 if (ioc->is_aero_ioc) 11096 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, 11097 &ioc->ioc_pg1_copy); 11098 /* release all the volumes */ 11099 _scsih_ir_shutdown(ioc); 11100 mpt3sas_destroy_debugfs(ioc); 11101 sas_remove_host(shost); 11102 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, 11103 list) { 11104 if (raid_device->starget) { 11105 sas_target_priv_data = 11106 raid_device->starget->hostdata; 11107 sas_target_priv_data->deleted = 1; 11108 scsi_remove_target(&raid_device->starget->dev); 11109 } 11110 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 11111 raid_device->handle, (u64)raid_device->wwid); 11112 _scsih_raid_device_remove(ioc, raid_device); 11113 } 11114 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list, 11115 list) { 11116 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 11117 list_del_init(&pcie_device->list); 11118 pcie_device_put(pcie_device); 11119 } 11120 11121 /* free ports attached to the sas_host */ 11122 list_for_each_entry_safe(mpt3sas_port, next_port, 11123 &ioc->sas_hba.sas_port_list, port_list) { 11124 if (mpt3sas_port->remote_identify.device_type == 11125 SAS_END_DEVICE) 11126 mpt3sas_device_remove_by_sas_address(ioc, 11127 mpt3sas_port->remote_identify.sas_address, 11128 mpt3sas_port->hba_port); 11129 else if (mpt3sas_port->remote_identify.device_type == 11130 SAS_EDGE_EXPANDER_DEVICE || 11131 mpt3sas_port->remote_identify.device_type == 11132 SAS_FANOUT_EXPANDER_DEVICE) 11133 mpt3sas_expander_remove(ioc, 11134 mpt3sas_port->remote_identify.sas_address, 11135 mpt3sas_port->hba_port); 11136 } 11137 11138 list_for_each_entry_safe(port, port_next, 11139 &ioc->port_table_list, list) { 11140 list_del(&port->list); 11141 kfree(port); 11142 } 11143 11144 /* free phys attached to the sas_host */ 11145 if (ioc->sas_hba.num_phys) { 11146 kfree(ioc->sas_hba.phy); 11147 ioc->sas_hba.phy = NULL; 11148 ioc->sas_hba.num_phys = 0; 11149 } 11150 11151 mpt3sas_base_detach(ioc); 11152 spin_lock(&gioc_lock); 11153 list_del(&ioc->list); 11154 spin_unlock(&gioc_lock); 11155 scsi_host_put(shost); 11156 } 11157 11158 /** 11159 * scsih_shutdown - routine call during system shutdown 11160 * @pdev: PCI device struct 11161 */ 11162 static void 11163 scsih_shutdown(struct pci_dev *pdev) 11164 { 11165 struct Scsi_Host *shost; 11166 struct MPT3SAS_ADAPTER *ioc; 11167 struct workqueue_struct *wq; 11168 unsigned long flags; 11169 Mpi2ConfigReply_t mpi_reply; 11170 11171 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 11172 return; 11173 11174 ioc->remove_host = 1; 11175 11176 if (!pci_device_is_present(pdev)) 11177 _scsih_flush_running_cmds(ioc); 11178 11179 _scsih_fw_event_cleanup_queue(ioc); 11180 11181 spin_lock_irqsave(&ioc->fw_event_lock, flags); 11182 wq = ioc->firmware_event_thread; 11183 ioc->firmware_event_thread = NULL; 11184 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 11185 if (wq) 11186 destroy_workqueue(wq); 11187 /* 11188 * Copy back the unmodified ioc page1 so that on next driver load, 11189 * current modified changes on ioc page1 won't take effect. 11190 */ 11191 if (ioc->is_aero_ioc) 11192 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, 11193 &ioc->ioc_pg1_copy); 11194 11195 _scsih_ir_shutdown(ioc); 11196 _scsih_nvme_shutdown(ioc); 11197 mpt3sas_base_detach(ioc); 11198 } 11199 11200 11201 /** 11202 * _scsih_probe_boot_devices - reports 1st device 11203 * @ioc: per adapter object 11204 * 11205 * If specified in bios page 2, this routine reports the 1st 11206 * device scsi-ml or sas transport for persistent boot device 11207 * purposes. Please refer to function _scsih_determine_boot_device() 11208 */ 11209 static void 11210 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) 11211 { 11212 u32 channel; 11213 void *device; 11214 struct _sas_device *sas_device; 11215 struct _raid_device *raid_device; 11216 struct _pcie_device *pcie_device; 11217 u16 handle; 11218 u64 sas_address_parent; 11219 u64 sas_address; 11220 unsigned long flags; 11221 int rc; 11222 int tid; 11223 struct hba_port *port; 11224 11225 /* no Bios, return immediately */ 11226 if (!ioc->bios_pg3.BiosVersion) 11227 return; 11228 11229 device = NULL; 11230 if (ioc->req_boot_device.device) { 11231 device = ioc->req_boot_device.device; 11232 channel = ioc->req_boot_device.channel; 11233 } else if (ioc->req_alt_boot_device.device) { 11234 device = ioc->req_alt_boot_device.device; 11235 channel = ioc->req_alt_boot_device.channel; 11236 } else if (ioc->current_boot_device.device) { 11237 device = ioc->current_boot_device.device; 11238 channel = ioc->current_boot_device.channel; 11239 } 11240 11241 if (!device) 11242 return; 11243 11244 if (channel == RAID_CHANNEL) { 11245 raid_device = device; 11246 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 11247 raid_device->id, 0); 11248 if (rc) 11249 _scsih_raid_device_remove(ioc, raid_device); 11250 } else if (channel == PCIE_CHANNEL) { 11251 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11252 pcie_device = device; 11253 tid = pcie_device->id; 11254 list_move_tail(&pcie_device->list, &ioc->pcie_device_list); 11255 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11256 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0); 11257 if (rc) 11258 _scsih_pcie_device_remove(ioc, pcie_device); 11259 } else { 11260 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11261 sas_device = device; 11262 handle = sas_device->handle; 11263 sas_address_parent = sas_device->sas_address_parent; 11264 sas_address = sas_device->sas_address; 11265 port = sas_device->port; 11266 list_move_tail(&sas_device->list, &ioc->sas_device_list); 11267 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11268 11269 if (ioc->hide_drives) 11270 return; 11271 11272 if (!port) 11273 return; 11274 11275 if (!mpt3sas_transport_port_add(ioc, handle, 11276 sas_address_parent, port)) { 11277 _scsih_sas_device_remove(ioc, sas_device); 11278 } else if (!sas_device->starget) { 11279 if (!ioc->is_driver_loading) { 11280 mpt3sas_transport_port_remove(ioc, 11281 sas_address, 11282 sas_address_parent, port); 11283 _scsih_sas_device_remove(ioc, sas_device); 11284 } 11285 } 11286 } 11287 } 11288 11289 /** 11290 * _scsih_probe_raid - reporting raid volumes to scsi-ml 11291 * @ioc: per adapter object 11292 * 11293 * Called during initial loading of the driver. 11294 */ 11295 static void 11296 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) 11297 { 11298 struct _raid_device *raid_device, *raid_next; 11299 int rc; 11300 11301 list_for_each_entry_safe(raid_device, raid_next, 11302 &ioc->raid_device_list, list) { 11303 if (raid_device->starget) 11304 continue; 11305 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 11306 raid_device->id, 0); 11307 if (rc) 11308 _scsih_raid_device_remove(ioc, raid_device); 11309 } 11310 } 11311 11312 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc) 11313 { 11314 struct _sas_device *sas_device = NULL; 11315 unsigned long flags; 11316 11317 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11318 if (!list_empty(&ioc->sas_device_init_list)) { 11319 sas_device = list_first_entry(&ioc->sas_device_init_list, 11320 struct _sas_device, list); 11321 sas_device_get(sas_device); 11322 } 11323 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11324 11325 return sas_device; 11326 } 11327 11328 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc, 11329 struct _sas_device *sas_device) 11330 { 11331 unsigned long flags; 11332 11333 spin_lock_irqsave(&ioc->sas_device_lock, flags); 11334 11335 /* 11336 * Since we dropped the lock during the call to port_add(), we need to 11337 * be careful here that somebody else didn't move or delete this item 11338 * while we were busy with other things. 11339 * 11340 * If it was on the list, we need a put() for the reference the list 11341 * had. Either way, we need a get() for the destination list. 11342 */ 11343 if (!list_empty(&sas_device->list)) { 11344 list_del_init(&sas_device->list); 11345 sas_device_put(sas_device); 11346 } 11347 11348 sas_device_get(sas_device); 11349 list_add_tail(&sas_device->list, &ioc->sas_device_list); 11350 11351 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 11352 } 11353 11354 /** 11355 * _scsih_probe_sas - reporting sas devices to sas transport 11356 * @ioc: per adapter object 11357 * 11358 * Called during initial loading of the driver. 11359 */ 11360 static void 11361 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) 11362 { 11363 struct _sas_device *sas_device; 11364 11365 if (ioc->hide_drives) 11366 return; 11367 11368 while ((sas_device = get_next_sas_device(ioc))) { 11369 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 11370 sas_device->sas_address_parent, sas_device->port)) { 11371 _scsih_sas_device_remove(ioc, sas_device); 11372 sas_device_put(sas_device); 11373 continue; 11374 } else if (!sas_device->starget) { 11375 /* 11376 * When asyn scanning is enabled, its not possible to 11377 * remove devices while scanning is turned on due to an 11378 * oops in scsi_sysfs_add_sdev()->add_device()-> 11379 * sysfs_addrm_start() 11380 */ 11381 if (!ioc->is_driver_loading) { 11382 mpt3sas_transport_port_remove(ioc, 11383 sas_device->sas_address, 11384 sas_device->sas_address_parent, 11385 sas_device->port); 11386 _scsih_sas_device_remove(ioc, sas_device); 11387 sas_device_put(sas_device); 11388 continue; 11389 } 11390 } 11391 sas_device_make_active(ioc, sas_device); 11392 sas_device_put(sas_device); 11393 } 11394 } 11395 11396 /** 11397 * get_next_pcie_device - Get the next pcie device 11398 * @ioc: per adapter object 11399 * 11400 * Get the next pcie device from pcie_device_init_list list. 11401 * 11402 * Return: pcie device structure if pcie_device_init_list list is not empty 11403 * otherwise returns NULL 11404 */ 11405 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc) 11406 { 11407 struct _pcie_device *pcie_device = NULL; 11408 unsigned long flags; 11409 11410 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11411 if (!list_empty(&ioc->pcie_device_init_list)) { 11412 pcie_device = list_first_entry(&ioc->pcie_device_init_list, 11413 struct _pcie_device, list); 11414 pcie_device_get(pcie_device); 11415 } 11416 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11417 11418 return pcie_device; 11419 } 11420 11421 /** 11422 * pcie_device_make_active - Add pcie device to pcie_device_list list 11423 * @ioc: per adapter object 11424 * @pcie_device: pcie device object 11425 * 11426 * Add the pcie device which has registered with SCSI Transport Later to 11427 * pcie_device_list list 11428 */ 11429 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc, 11430 struct _pcie_device *pcie_device) 11431 { 11432 unsigned long flags; 11433 11434 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 11435 11436 if (!list_empty(&pcie_device->list)) { 11437 list_del_init(&pcie_device->list); 11438 pcie_device_put(pcie_device); 11439 } 11440 pcie_device_get(pcie_device); 11441 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 11442 11443 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 11444 } 11445 11446 /** 11447 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml 11448 * @ioc: per adapter object 11449 * 11450 * Called during initial loading of the driver. 11451 */ 11452 static void 11453 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc) 11454 { 11455 struct _pcie_device *pcie_device; 11456 int rc; 11457 11458 /* PCIe Device List */ 11459 while ((pcie_device = get_next_pcie_device(ioc))) { 11460 if (pcie_device->starget) { 11461 pcie_device_put(pcie_device); 11462 continue; 11463 } 11464 if (pcie_device->access_status == 11465 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { 11466 pcie_device_make_active(ioc, pcie_device); 11467 pcie_device_put(pcie_device); 11468 continue; 11469 } 11470 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, 11471 pcie_device->id, 0); 11472 if (rc) { 11473 _scsih_pcie_device_remove(ioc, pcie_device); 11474 pcie_device_put(pcie_device); 11475 continue; 11476 } else if (!pcie_device->starget) { 11477 /* 11478 * When async scanning is enabled, its not possible to 11479 * remove devices while scanning is turned on due to an 11480 * oops in scsi_sysfs_add_sdev()->add_device()-> 11481 * sysfs_addrm_start() 11482 */ 11483 if (!ioc->is_driver_loading) { 11484 /* TODO-- Need to find out whether this condition will 11485 * occur or not 11486 */ 11487 _scsih_pcie_device_remove(ioc, pcie_device); 11488 pcie_device_put(pcie_device); 11489 continue; 11490 } 11491 } 11492 pcie_device_make_active(ioc, pcie_device); 11493 pcie_device_put(pcie_device); 11494 } 11495 } 11496 11497 /** 11498 * _scsih_probe_devices - probing for devices 11499 * @ioc: per adapter object 11500 * 11501 * Called during initial loading of the driver. 11502 */ 11503 static void 11504 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) 11505 { 11506 u16 volume_mapping_flags; 11507 11508 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) 11509 return; /* return when IOC doesn't support initiator mode */ 11510 11511 _scsih_probe_boot_devices(ioc); 11512 11513 if (ioc->ir_firmware) { 11514 volume_mapping_flags = 11515 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & 11516 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; 11517 if (volume_mapping_flags == 11518 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { 11519 _scsih_probe_raid(ioc); 11520 _scsih_probe_sas(ioc); 11521 } else { 11522 _scsih_probe_sas(ioc); 11523 _scsih_probe_raid(ioc); 11524 } 11525 } else { 11526 _scsih_probe_sas(ioc); 11527 _scsih_probe_pcie(ioc); 11528 } 11529 } 11530 11531 /** 11532 * scsih_scan_start - scsi lld callback for .scan_start 11533 * @shost: SCSI host pointer 11534 * 11535 * The shost has the ability to discover targets on its own instead 11536 * of scanning the entire bus. In our implemention, we will kick off 11537 * firmware discovery. 11538 */ 11539 static void 11540 scsih_scan_start(struct Scsi_Host *shost) 11541 { 11542 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 11543 int rc; 11544 if (diag_buffer_enable != -1 && diag_buffer_enable != 0) 11545 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); 11546 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0) 11547 mpt3sas_enable_diag_buffer(ioc, 1); 11548 11549 if (disable_discovery > 0) 11550 return; 11551 11552 ioc->start_scan = 1; 11553 rc = mpt3sas_port_enable(ioc); 11554 11555 if (rc != 0) 11556 ioc_info(ioc, "port enable: FAILED\n"); 11557 } 11558 11559 /** 11560 * scsih_scan_finished - scsi lld callback for .scan_finished 11561 * @shost: SCSI host pointer 11562 * @time: elapsed time of the scan in jiffies 11563 * 11564 * This function will be called periodicallyn until it returns 1 with the 11565 * scsi_host and the elapsed time of the scan in jiffies. In our implemention, 11566 * we wait for firmware discovery to complete, then return 1. 11567 */ 11568 static int 11569 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) 11570 { 11571 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 11572 11573 if (disable_discovery > 0) { 11574 ioc->is_driver_loading = 0; 11575 ioc->wait_for_discovery_to_complete = 0; 11576 return 1; 11577 } 11578 11579 if (time >= (300 * HZ)) { 11580 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11581 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n"); 11582 ioc->is_driver_loading = 0; 11583 return 1; 11584 } 11585 11586 if (ioc->start_scan) 11587 return 0; 11588 11589 if (ioc->start_scan_failed) { 11590 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n", 11591 ioc->start_scan_failed); 11592 ioc->is_driver_loading = 0; 11593 ioc->wait_for_discovery_to_complete = 0; 11594 ioc->remove_host = 1; 11595 return 1; 11596 } 11597 11598 ioc_info(ioc, "port enable: SUCCESS\n"); 11599 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 11600 11601 if (ioc->wait_for_discovery_to_complete) { 11602 ioc->wait_for_discovery_to_complete = 0; 11603 _scsih_probe_devices(ioc); 11604 } 11605 mpt3sas_base_start_watchdog(ioc); 11606 ioc->is_driver_loading = 0; 11607 return 1; 11608 } 11609 11610 /** 11611 * scsih_map_queues - map reply queues with request queues 11612 * @shost: SCSI host pointer 11613 */ 11614 static int scsih_map_queues(struct Scsi_Host *shost) 11615 { 11616 struct MPT3SAS_ADAPTER *ioc = 11617 (struct MPT3SAS_ADAPTER *)shost->hostdata; 11618 11619 if (ioc->shost->nr_hw_queues == 1) 11620 return 0; 11621 11622 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 11623 ioc->pdev, ioc->high_iops_queues); 11624 } 11625 11626 /* shost template for SAS 2.0 HBA devices */ 11627 static struct scsi_host_template mpt2sas_driver_template = { 11628 .module = THIS_MODULE, 11629 .name = "Fusion MPT SAS Host", 11630 .proc_name = MPT2SAS_DRIVER_NAME, 11631 .queuecommand = scsih_qcmd, 11632 .target_alloc = scsih_target_alloc, 11633 .slave_alloc = scsih_slave_alloc, 11634 .slave_configure = scsih_slave_configure, 11635 .target_destroy = scsih_target_destroy, 11636 .slave_destroy = scsih_slave_destroy, 11637 .scan_finished = scsih_scan_finished, 11638 .scan_start = scsih_scan_start, 11639 .change_queue_depth = scsih_change_queue_depth, 11640 .eh_abort_handler = scsih_abort, 11641 .eh_device_reset_handler = scsih_dev_reset, 11642 .eh_target_reset_handler = scsih_target_reset, 11643 .eh_host_reset_handler = scsih_host_reset, 11644 .bios_param = scsih_bios_param, 11645 .can_queue = 1, 11646 .this_id = -1, 11647 .sg_tablesize = MPT2SAS_SG_DEPTH, 11648 .max_sectors = 32767, 11649 .cmd_per_lun = 7, 11650 .shost_attrs = mpt3sas_host_attrs, 11651 .sdev_attrs = mpt3sas_dev_attrs, 11652 .track_queue_depth = 1, 11653 .cmd_size = sizeof(struct scsiio_tracker), 11654 }; 11655 11656 /* raid transport support for SAS 2.0 HBA devices */ 11657 static struct raid_function_template mpt2sas_raid_functions = { 11658 .cookie = &mpt2sas_driver_template, 11659 .is_raid = scsih_is_raid, 11660 .get_resync = scsih_get_resync, 11661 .get_state = scsih_get_state, 11662 }; 11663 11664 /* shost template for SAS 3.0 HBA devices */ 11665 static struct scsi_host_template mpt3sas_driver_template = { 11666 .module = THIS_MODULE, 11667 .name = "Fusion MPT SAS Host", 11668 .proc_name = MPT3SAS_DRIVER_NAME, 11669 .queuecommand = scsih_qcmd, 11670 .target_alloc = scsih_target_alloc, 11671 .slave_alloc = scsih_slave_alloc, 11672 .slave_configure = scsih_slave_configure, 11673 .target_destroy = scsih_target_destroy, 11674 .slave_destroy = scsih_slave_destroy, 11675 .scan_finished = scsih_scan_finished, 11676 .scan_start = scsih_scan_start, 11677 .change_queue_depth = scsih_change_queue_depth, 11678 .eh_abort_handler = scsih_abort, 11679 .eh_device_reset_handler = scsih_dev_reset, 11680 .eh_target_reset_handler = scsih_target_reset, 11681 .eh_host_reset_handler = scsih_host_reset, 11682 .bios_param = scsih_bios_param, 11683 .can_queue = 1, 11684 .this_id = -1, 11685 .sg_tablesize = MPT3SAS_SG_DEPTH, 11686 .max_sectors = 32767, 11687 .max_segment_size = 0xffffffff, 11688 .cmd_per_lun = 7, 11689 .shost_attrs = mpt3sas_host_attrs, 11690 .sdev_attrs = mpt3sas_dev_attrs, 11691 .track_queue_depth = 1, 11692 .cmd_size = sizeof(struct scsiio_tracker), 11693 .map_queues = scsih_map_queues, 11694 }; 11695 11696 /* raid transport support for SAS 3.0 HBA devices */ 11697 static struct raid_function_template mpt3sas_raid_functions = { 11698 .cookie = &mpt3sas_driver_template, 11699 .is_raid = scsih_is_raid, 11700 .get_resync = scsih_get_resync, 11701 .get_state = scsih_get_state, 11702 }; 11703 11704 /** 11705 * _scsih_determine_hba_mpi_version - determine in which MPI version class 11706 * this device belongs to. 11707 * @pdev: PCI device struct 11708 * 11709 * return MPI2_VERSION for SAS 2.0 HBA devices, 11710 * MPI25_VERSION for SAS 3.0 HBA devices, and 11711 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices 11712 */ 11713 static u16 11714 _scsih_determine_hba_mpi_version(struct pci_dev *pdev) 11715 { 11716 11717 switch (pdev->device) { 11718 case MPI2_MFGPAGE_DEVID_SSS6200: 11719 case MPI2_MFGPAGE_DEVID_SAS2004: 11720 case MPI2_MFGPAGE_DEVID_SAS2008: 11721 case MPI2_MFGPAGE_DEVID_SAS2108_1: 11722 case MPI2_MFGPAGE_DEVID_SAS2108_2: 11723 case MPI2_MFGPAGE_DEVID_SAS2108_3: 11724 case MPI2_MFGPAGE_DEVID_SAS2116_1: 11725 case MPI2_MFGPAGE_DEVID_SAS2116_2: 11726 case MPI2_MFGPAGE_DEVID_SAS2208_1: 11727 case MPI2_MFGPAGE_DEVID_SAS2208_2: 11728 case MPI2_MFGPAGE_DEVID_SAS2208_3: 11729 case MPI2_MFGPAGE_DEVID_SAS2208_4: 11730 case MPI2_MFGPAGE_DEVID_SAS2208_5: 11731 case MPI2_MFGPAGE_DEVID_SAS2208_6: 11732 case MPI2_MFGPAGE_DEVID_SAS2308_1: 11733 case MPI2_MFGPAGE_DEVID_SAS2308_2: 11734 case MPI2_MFGPAGE_DEVID_SAS2308_3: 11735 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: 11736 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: 11737 return MPI2_VERSION; 11738 case MPI25_MFGPAGE_DEVID_SAS3004: 11739 case MPI25_MFGPAGE_DEVID_SAS3008: 11740 case MPI25_MFGPAGE_DEVID_SAS3108_1: 11741 case MPI25_MFGPAGE_DEVID_SAS3108_2: 11742 case MPI25_MFGPAGE_DEVID_SAS3108_5: 11743 case MPI25_MFGPAGE_DEVID_SAS3108_6: 11744 return MPI25_VERSION; 11745 case MPI26_MFGPAGE_DEVID_SAS3216: 11746 case MPI26_MFGPAGE_DEVID_SAS3224: 11747 case MPI26_MFGPAGE_DEVID_SAS3316_1: 11748 case MPI26_MFGPAGE_DEVID_SAS3316_2: 11749 case MPI26_MFGPAGE_DEVID_SAS3316_3: 11750 case MPI26_MFGPAGE_DEVID_SAS3316_4: 11751 case MPI26_MFGPAGE_DEVID_SAS3324_1: 11752 case MPI26_MFGPAGE_DEVID_SAS3324_2: 11753 case MPI26_MFGPAGE_DEVID_SAS3324_3: 11754 case MPI26_MFGPAGE_DEVID_SAS3324_4: 11755 case MPI26_MFGPAGE_DEVID_SAS3508: 11756 case MPI26_MFGPAGE_DEVID_SAS3508_1: 11757 case MPI26_MFGPAGE_DEVID_SAS3408: 11758 case MPI26_MFGPAGE_DEVID_SAS3516: 11759 case MPI26_MFGPAGE_DEVID_SAS3516_1: 11760 case MPI26_MFGPAGE_DEVID_SAS3416: 11761 case MPI26_MFGPAGE_DEVID_SAS3616: 11762 case MPI26_ATLAS_PCIe_SWITCH_DEVID: 11763 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: 11764 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: 11765 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: 11766 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: 11767 case MPI26_MFGPAGE_DEVID_INVALID0_3916: 11768 case MPI26_MFGPAGE_DEVID_INVALID1_3916: 11769 case MPI26_MFGPAGE_DEVID_INVALID0_3816: 11770 case MPI26_MFGPAGE_DEVID_INVALID1_3816: 11771 return MPI26_VERSION; 11772 } 11773 return 0; 11774 } 11775 11776 /** 11777 * _scsih_probe - attach and add scsi host 11778 * @pdev: PCI device struct 11779 * @id: pci device id 11780 * 11781 * Return: 0 success, anything else error. 11782 */ 11783 static int 11784 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) 11785 { 11786 struct MPT3SAS_ADAPTER *ioc; 11787 struct Scsi_Host *shost = NULL; 11788 int rv; 11789 u16 hba_mpi_version; 11790 11791 /* Determine in which MPI version class this pci device belongs */ 11792 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); 11793 if (hba_mpi_version == 0) 11794 return -ENODEV; 11795 11796 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one, 11797 * for other generation HBA's return with -ENODEV 11798 */ 11799 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION)) 11800 return -ENODEV; 11801 11802 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two, 11803 * for other generation HBA's return with -ENODEV 11804 */ 11805 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION 11806 || hba_mpi_version == MPI26_VERSION))) 11807 return -ENODEV; 11808 11809 switch (hba_mpi_version) { 11810 case MPI2_VERSION: 11811 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 11812 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 11813 /* Use mpt2sas driver host template for SAS 2.0 HBA's */ 11814 shost = scsi_host_alloc(&mpt2sas_driver_template, 11815 sizeof(struct MPT3SAS_ADAPTER)); 11816 if (!shost) 11817 return -ENODEV; 11818 ioc = shost_priv(shost); 11819 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); 11820 ioc->hba_mpi_version_belonged = hba_mpi_version; 11821 ioc->id = mpt2_ids++; 11822 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME); 11823 switch (pdev->device) { 11824 case MPI2_MFGPAGE_DEVID_SSS6200: 11825 ioc->is_warpdrive = 1; 11826 ioc->hide_ir_msg = 1; 11827 break; 11828 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: 11829 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: 11830 ioc->is_mcpu_endpoint = 1; 11831 break; 11832 default: 11833 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; 11834 break; 11835 } 11836 11837 if (multipath_on_hba == -1 || multipath_on_hba == 0) 11838 ioc->multipath_on_hba = 0; 11839 else 11840 ioc->multipath_on_hba = 1; 11841 11842 break; 11843 case MPI25_VERSION: 11844 case MPI26_VERSION: 11845 /* Use mpt3sas driver host template for SAS 3.0 HBA's */ 11846 shost = scsi_host_alloc(&mpt3sas_driver_template, 11847 sizeof(struct MPT3SAS_ADAPTER)); 11848 if (!shost) 11849 return -ENODEV; 11850 ioc = shost_priv(shost); 11851 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); 11852 ioc->hba_mpi_version_belonged = hba_mpi_version; 11853 ioc->id = mpt3_ids++; 11854 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); 11855 switch (pdev->device) { 11856 case MPI26_MFGPAGE_DEVID_SAS3508: 11857 case MPI26_MFGPAGE_DEVID_SAS3508_1: 11858 case MPI26_MFGPAGE_DEVID_SAS3408: 11859 case MPI26_MFGPAGE_DEVID_SAS3516: 11860 case MPI26_MFGPAGE_DEVID_SAS3516_1: 11861 case MPI26_MFGPAGE_DEVID_SAS3416: 11862 case MPI26_MFGPAGE_DEVID_SAS3616: 11863 case MPI26_ATLAS_PCIe_SWITCH_DEVID: 11864 ioc->is_gen35_ioc = 1; 11865 break; 11866 case MPI26_MFGPAGE_DEVID_INVALID0_3816: 11867 case MPI26_MFGPAGE_DEVID_INVALID0_3916: 11868 dev_err(&pdev->dev, 11869 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid", 11870 pdev->device, pdev->subsystem_vendor, 11871 pdev->subsystem_device); 11872 return 1; 11873 case MPI26_MFGPAGE_DEVID_INVALID1_3816: 11874 case MPI26_MFGPAGE_DEVID_INVALID1_3916: 11875 dev_err(&pdev->dev, 11876 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered", 11877 pdev->device, pdev->subsystem_vendor, 11878 pdev->subsystem_device); 11879 return 1; 11880 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: 11881 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: 11882 dev_info(&pdev->dev, 11883 "HBA is in Configurable Secure mode\n"); 11884 fallthrough; 11885 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816: 11886 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: 11887 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1; 11888 break; 11889 default: 11890 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0; 11891 } 11892 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && 11893 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || 11894 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) { 11895 ioc->combined_reply_queue = 1; 11896 if (ioc->is_gen35_ioc) 11897 ioc->combined_reply_index_count = 11898 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35; 11899 else 11900 ioc->combined_reply_index_count = 11901 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3; 11902 } 11903 11904 switch (ioc->is_gen35_ioc) { 11905 case 0: 11906 if (multipath_on_hba == -1 || multipath_on_hba == 0) 11907 ioc->multipath_on_hba = 0; 11908 else 11909 ioc->multipath_on_hba = 1; 11910 break; 11911 case 1: 11912 if (multipath_on_hba == -1 || multipath_on_hba > 0) 11913 ioc->multipath_on_hba = 1; 11914 else 11915 ioc->multipath_on_hba = 0; 11916 default: 11917 break; 11918 } 11919 11920 break; 11921 default: 11922 return -ENODEV; 11923 } 11924 11925 INIT_LIST_HEAD(&ioc->list); 11926 spin_lock(&gioc_lock); 11927 list_add_tail(&ioc->list, &mpt3sas_ioc_list); 11928 spin_unlock(&gioc_lock); 11929 ioc->shost = shost; 11930 ioc->pdev = pdev; 11931 ioc->scsi_io_cb_idx = scsi_io_cb_idx; 11932 ioc->tm_cb_idx = tm_cb_idx; 11933 ioc->ctl_cb_idx = ctl_cb_idx; 11934 ioc->base_cb_idx = base_cb_idx; 11935 ioc->port_enable_cb_idx = port_enable_cb_idx; 11936 ioc->transport_cb_idx = transport_cb_idx; 11937 ioc->scsih_cb_idx = scsih_cb_idx; 11938 ioc->config_cb_idx = config_cb_idx; 11939 ioc->tm_tr_cb_idx = tm_tr_cb_idx; 11940 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; 11941 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; 11942 ioc->logging_level = logging_level; 11943 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; 11944 /* Host waits for minimum of six seconds */ 11945 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; 11946 /* 11947 * Enable MEMORY MOVE support flag. 11948 */ 11949 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE; 11950 /* Enable ADDITIONAL QUERY support flag. */ 11951 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY; 11952 11953 ioc->enable_sdev_max_qd = enable_sdev_max_qd; 11954 11955 /* misc semaphores and spin locks */ 11956 mutex_init(&ioc->reset_in_progress_mutex); 11957 /* initializing pci_access_mutex lock */ 11958 mutex_init(&ioc->pci_access_mutex); 11959 spin_lock_init(&ioc->ioc_reset_in_progress_lock); 11960 spin_lock_init(&ioc->scsi_lookup_lock); 11961 spin_lock_init(&ioc->sas_device_lock); 11962 spin_lock_init(&ioc->sas_node_lock); 11963 spin_lock_init(&ioc->fw_event_lock); 11964 spin_lock_init(&ioc->raid_device_lock); 11965 spin_lock_init(&ioc->pcie_device_lock); 11966 spin_lock_init(&ioc->diag_trigger_lock); 11967 11968 INIT_LIST_HEAD(&ioc->sas_device_list); 11969 INIT_LIST_HEAD(&ioc->sas_device_init_list); 11970 INIT_LIST_HEAD(&ioc->sas_expander_list); 11971 INIT_LIST_HEAD(&ioc->enclosure_list); 11972 INIT_LIST_HEAD(&ioc->pcie_device_list); 11973 INIT_LIST_HEAD(&ioc->pcie_device_init_list); 11974 INIT_LIST_HEAD(&ioc->fw_event_list); 11975 INIT_LIST_HEAD(&ioc->raid_device_list); 11976 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); 11977 INIT_LIST_HEAD(&ioc->delayed_tr_list); 11978 INIT_LIST_HEAD(&ioc->delayed_sc_list); 11979 INIT_LIST_HEAD(&ioc->delayed_event_ack_list); 11980 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); 11981 INIT_LIST_HEAD(&ioc->reply_queue_list); 11982 INIT_LIST_HEAD(&ioc->port_table_list); 11983 11984 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); 11985 11986 /* init shost parameters */ 11987 shost->max_cmd_len = 32; 11988 shost->max_lun = max_lun; 11989 shost->transportt = mpt3sas_transport_template; 11990 shost->unique_id = ioc->id; 11991 11992 if (ioc->is_mcpu_endpoint) { 11993 /* mCPU MPI support 64K max IO */ 11994 shost->max_sectors = 128; 11995 ioc_info(ioc, "The max_sectors value is set to %d\n", 11996 shost->max_sectors); 11997 } else { 11998 if (max_sectors != 0xFFFF) { 11999 if (max_sectors < 64) { 12000 shost->max_sectors = 64; 12001 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n", 12002 max_sectors); 12003 } else if (max_sectors > 32767) { 12004 shost->max_sectors = 32767; 12005 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n", 12006 max_sectors); 12007 } else { 12008 shost->max_sectors = max_sectors & 0xFFFE; 12009 ioc_info(ioc, "The max_sectors value is set to %d\n", 12010 shost->max_sectors); 12011 } 12012 } 12013 } 12014 /* register EEDP capabilities with SCSI layer */ 12015 if (prot_mask >= 0) 12016 scsi_host_set_prot(shost, (prot_mask & 0x07)); 12017 else 12018 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION 12019 | SHOST_DIF_TYPE2_PROTECTION 12020 | SHOST_DIF_TYPE3_PROTECTION); 12021 12022 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 12023 12024 /* event thread */ 12025 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 12026 "fw_event_%s%d", ioc->driver_name, ioc->id); 12027 ioc->firmware_event_thread = alloc_ordered_workqueue( 12028 ioc->firmware_event_name, 0); 12029 if (!ioc->firmware_event_thread) { 12030 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12031 __FILE__, __LINE__, __func__); 12032 rv = -ENODEV; 12033 goto out_thread_fail; 12034 } 12035 12036 ioc->is_driver_loading = 1; 12037 if ((mpt3sas_base_attach(ioc))) { 12038 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12039 __FILE__, __LINE__, __func__); 12040 rv = -ENODEV; 12041 goto out_attach_fail; 12042 } 12043 12044 if (ioc->is_warpdrive) { 12045 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) 12046 ioc->hide_drives = 0; 12047 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS) 12048 ioc->hide_drives = 1; 12049 else { 12050 if (mpt3sas_get_num_volumes(ioc)) 12051 ioc->hide_drives = 1; 12052 else 12053 ioc->hide_drives = 0; 12054 } 12055 } else 12056 ioc->hide_drives = 0; 12057 12058 shost->host_tagset = 0; 12059 shost->nr_hw_queues = 1; 12060 12061 if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 && 12062 host_tagset_enable && ioc->smp_affinity_enable) { 12063 12064 shost->host_tagset = 1; 12065 shost->nr_hw_queues = 12066 ioc->reply_queue_count - ioc->high_iops_queues; 12067 12068 dev_info(&ioc->pdev->dev, 12069 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n", 12070 shost->can_queue, shost->nr_hw_queues); 12071 } 12072 12073 rv = scsi_add_host(shost, &pdev->dev); 12074 if (rv) { 12075 ioc_err(ioc, "failure at %s:%d/%s()!\n", 12076 __FILE__, __LINE__, __func__); 12077 goto out_add_shost_fail; 12078 } 12079 12080 scsi_scan_host(shost); 12081 mpt3sas_setup_debugfs(ioc); 12082 return 0; 12083 out_add_shost_fail: 12084 mpt3sas_base_detach(ioc); 12085 out_attach_fail: 12086 destroy_workqueue(ioc->firmware_event_thread); 12087 out_thread_fail: 12088 spin_lock(&gioc_lock); 12089 list_del(&ioc->list); 12090 spin_unlock(&gioc_lock); 12091 scsi_host_put(shost); 12092 return rv; 12093 } 12094 12095 /** 12096 * scsih_suspend - power management suspend main entry point 12097 * @dev: Device struct 12098 * 12099 * Return: 0 success, anything else error. 12100 */ 12101 static int __maybe_unused 12102 scsih_suspend(struct device *dev) 12103 { 12104 struct pci_dev *pdev = to_pci_dev(dev); 12105 struct Scsi_Host *shost; 12106 struct MPT3SAS_ADAPTER *ioc; 12107 int rc; 12108 12109 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); 12110 if (rc) 12111 return rc; 12112 12113 mpt3sas_base_stop_watchdog(ioc); 12114 flush_scheduled_work(); 12115 scsi_block_requests(shost); 12116 _scsih_nvme_shutdown(ioc); 12117 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n", 12118 pdev, pci_name(pdev)); 12119 12120 mpt3sas_base_free_resources(ioc); 12121 return 0; 12122 } 12123 12124 /** 12125 * scsih_resume - power management resume main entry point 12126 * @dev: Device struct 12127 * 12128 * Return: 0 success, anything else error. 12129 */ 12130 static int __maybe_unused 12131 scsih_resume(struct device *dev) 12132 { 12133 struct pci_dev *pdev = to_pci_dev(dev); 12134 struct Scsi_Host *shost; 12135 struct MPT3SAS_ADAPTER *ioc; 12136 pci_power_t device_state = pdev->current_state; 12137 int r; 12138 12139 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc); 12140 if (r) 12141 return r; 12142 12143 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 12144 pdev, pci_name(pdev), device_state); 12145 12146 ioc->pdev = pdev; 12147 r = mpt3sas_base_map_resources(ioc); 12148 if (r) 12149 return r; 12150 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n"); 12151 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET); 12152 scsi_unblock_requests(shost); 12153 mpt3sas_base_start_watchdog(ioc); 12154 return 0; 12155 } 12156 12157 /** 12158 * scsih_pci_error_detected - Called when a PCI error is detected. 12159 * @pdev: PCI device struct 12160 * @state: PCI channel state 12161 * 12162 * Description: Called when a PCI error is detected. 12163 * 12164 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. 12165 */ 12166 static pci_ers_result_t 12167 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 12168 { 12169 struct Scsi_Host *shost; 12170 struct MPT3SAS_ADAPTER *ioc; 12171 12172 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12173 return PCI_ERS_RESULT_DISCONNECT; 12174 12175 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state); 12176 12177 switch (state) { 12178 case pci_channel_io_normal: 12179 return PCI_ERS_RESULT_CAN_RECOVER; 12180 case pci_channel_io_frozen: 12181 /* Fatal error, prepare for slot reset */ 12182 ioc->pci_error_recovery = 1; 12183 scsi_block_requests(ioc->shost); 12184 mpt3sas_base_stop_watchdog(ioc); 12185 mpt3sas_base_free_resources(ioc); 12186 return PCI_ERS_RESULT_NEED_RESET; 12187 case pci_channel_io_perm_failure: 12188 /* Permanent error, prepare for device removal */ 12189 ioc->pci_error_recovery = 1; 12190 mpt3sas_base_stop_watchdog(ioc); 12191 _scsih_flush_running_cmds(ioc); 12192 return PCI_ERS_RESULT_DISCONNECT; 12193 } 12194 return PCI_ERS_RESULT_NEED_RESET; 12195 } 12196 12197 /** 12198 * scsih_pci_slot_reset - Called when PCI slot has been reset. 12199 * @pdev: PCI device struct 12200 * 12201 * Description: This routine is called by the pci error recovery 12202 * code after the PCI slot has been reset, just before we 12203 * should resume normal operations. 12204 */ 12205 static pci_ers_result_t 12206 scsih_pci_slot_reset(struct pci_dev *pdev) 12207 { 12208 struct Scsi_Host *shost; 12209 struct MPT3SAS_ADAPTER *ioc; 12210 int rc; 12211 12212 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12213 return PCI_ERS_RESULT_DISCONNECT; 12214 12215 ioc_info(ioc, "PCI error: slot reset callback!!\n"); 12216 12217 ioc->pci_error_recovery = 0; 12218 ioc->pdev = pdev; 12219 pci_restore_state(pdev); 12220 rc = mpt3sas_base_map_resources(ioc); 12221 if (rc) 12222 return PCI_ERS_RESULT_DISCONNECT; 12223 12224 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n"); 12225 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 12226 12227 ioc_warn(ioc, "hard reset: %s\n", 12228 (rc == 0) ? "success" : "failed"); 12229 12230 if (!rc) 12231 return PCI_ERS_RESULT_RECOVERED; 12232 else 12233 return PCI_ERS_RESULT_DISCONNECT; 12234 } 12235 12236 /** 12237 * scsih_pci_resume() - resume normal ops after PCI reset 12238 * @pdev: pointer to PCI device 12239 * 12240 * Called when the error recovery driver tells us that its 12241 * OK to resume normal operation. Use completion to allow 12242 * halted scsi ops to resume. 12243 */ 12244 static void 12245 scsih_pci_resume(struct pci_dev *pdev) 12246 { 12247 struct Scsi_Host *shost; 12248 struct MPT3SAS_ADAPTER *ioc; 12249 12250 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12251 return; 12252 12253 ioc_info(ioc, "PCI error: resume callback!!\n"); 12254 12255 mpt3sas_base_start_watchdog(ioc); 12256 scsi_unblock_requests(ioc->shost); 12257 } 12258 12259 /** 12260 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers 12261 * @pdev: pointer to PCI device 12262 */ 12263 static pci_ers_result_t 12264 scsih_pci_mmio_enabled(struct pci_dev *pdev) 12265 { 12266 struct Scsi_Host *shost; 12267 struct MPT3SAS_ADAPTER *ioc; 12268 12269 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc)) 12270 return PCI_ERS_RESULT_DISCONNECT; 12271 12272 ioc_info(ioc, "PCI error: mmio enabled callback!!\n"); 12273 12274 /* TODO - dump whatever for debugging purposes */ 12275 12276 /* This called only if scsih_pci_error_detected returns 12277 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still 12278 * works, no need to reset slot. 12279 */ 12280 return PCI_ERS_RESULT_RECOVERED; 12281 } 12282 12283 /** 12284 * scsih__ncq_prio_supp - Check for NCQ command priority support 12285 * @sdev: scsi device struct 12286 * 12287 * This is called when a user indicates they would like to enable 12288 * ncq command priorities. This works only on SATA devices. 12289 */ 12290 bool scsih_ncq_prio_supp(struct scsi_device *sdev) 12291 { 12292 unsigned char *buf; 12293 bool ncq_prio_supp = false; 12294 12295 if (!scsi_device_supports_vpd(sdev)) 12296 return ncq_prio_supp; 12297 12298 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL); 12299 if (!buf) 12300 return ncq_prio_supp; 12301 12302 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN)) 12303 ncq_prio_supp = (buf[213] >> 4) & 1; 12304 12305 kfree(buf); 12306 return ncq_prio_supp; 12307 } 12308 /* 12309 * The pci device ids are defined in mpi/mpi2_cnfg.h. 12310 */ 12311 static const struct pci_device_id mpt3sas_pci_table[] = { 12312 /* Spitfire ~ 2004 */ 12313 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, 12314 PCI_ANY_ID, PCI_ANY_ID }, 12315 /* Falcon ~ 2008 */ 12316 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, 12317 PCI_ANY_ID, PCI_ANY_ID }, 12318 /* Liberator ~ 2108 */ 12319 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, 12320 PCI_ANY_ID, PCI_ANY_ID }, 12321 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2, 12322 PCI_ANY_ID, PCI_ANY_ID }, 12323 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, 12324 PCI_ANY_ID, PCI_ANY_ID }, 12325 /* Meteor ~ 2116 */ 12326 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, 12327 PCI_ANY_ID, PCI_ANY_ID }, 12328 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, 12329 PCI_ANY_ID, PCI_ANY_ID }, 12330 /* Thunderbolt ~ 2208 */ 12331 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, 12332 PCI_ANY_ID, PCI_ANY_ID }, 12333 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, 12334 PCI_ANY_ID, PCI_ANY_ID }, 12335 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, 12336 PCI_ANY_ID, PCI_ANY_ID }, 12337 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, 12338 PCI_ANY_ID, PCI_ANY_ID }, 12339 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, 12340 PCI_ANY_ID, PCI_ANY_ID }, 12341 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, 12342 PCI_ANY_ID, PCI_ANY_ID }, 12343 /* Mustang ~ 2308 */ 12344 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, 12345 PCI_ANY_ID, PCI_ANY_ID }, 12346 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, 12347 PCI_ANY_ID, PCI_ANY_ID }, 12348 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, 12349 PCI_ANY_ID, PCI_ANY_ID }, 12350 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP, 12351 PCI_ANY_ID, PCI_ANY_ID }, 12352 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1, 12353 PCI_ANY_ID, PCI_ANY_ID }, 12354 /* SSS6200 */ 12355 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, 12356 PCI_ANY_ID, PCI_ANY_ID }, 12357 /* Fury ~ 3004 and 3008 */ 12358 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, 12359 PCI_ANY_ID, PCI_ANY_ID }, 12360 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, 12361 PCI_ANY_ID, PCI_ANY_ID }, 12362 /* Invader ~ 3108 */ 12363 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, 12364 PCI_ANY_ID, PCI_ANY_ID }, 12365 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, 12366 PCI_ANY_ID, PCI_ANY_ID }, 12367 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, 12368 PCI_ANY_ID, PCI_ANY_ID }, 12369 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, 12370 PCI_ANY_ID, PCI_ANY_ID }, 12371 /* Cutlass ~ 3216 and 3224 */ 12372 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216, 12373 PCI_ANY_ID, PCI_ANY_ID }, 12374 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224, 12375 PCI_ANY_ID, PCI_ANY_ID }, 12376 /* Intruder ~ 3316 and 3324 */ 12377 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1, 12378 PCI_ANY_ID, PCI_ANY_ID }, 12379 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2, 12380 PCI_ANY_ID, PCI_ANY_ID }, 12381 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3, 12382 PCI_ANY_ID, PCI_ANY_ID }, 12383 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4, 12384 PCI_ANY_ID, PCI_ANY_ID }, 12385 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1, 12386 PCI_ANY_ID, PCI_ANY_ID }, 12387 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2, 12388 PCI_ANY_ID, PCI_ANY_ID }, 12389 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3, 12390 PCI_ANY_ID, PCI_ANY_ID }, 12391 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, 12392 PCI_ANY_ID, PCI_ANY_ID }, 12393 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/ 12394 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, 12395 PCI_ANY_ID, PCI_ANY_ID }, 12396 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1, 12397 PCI_ANY_ID, PCI_ANY_ID }, 12398 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408, 12399 PCI_ANY_ID, PCI_ANY_ID }, 12400 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516, 12401 PCI_ANY_ID, PCI_ANY_ID }, 12402 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1, 12403 PCI_ANY_ID, PCI_ANY_ID }, 12404 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, 12405 PCI_ANY_ID, PCI_ANY_ID }, 12406 /* Mercator ~ 3616*/ 12407 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, 12408 PCI_ANY_ID, PCI_ANY_ID }, 12409 12410 /* Aero SI 0x00E1 Configurable Secure 12411 * 0x00E2 Hard Secure 12412 */ 12413 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916, 12414 PCI_ANY_ID, PCI_ANY_ID }, 12415 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916, 12416 PCI_ANY_ID, PCI_ANY_ID }, 12417 12418 /* 12419 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered 12420 */ 12421 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916, 12422 PCI_ANY_ID, PCI_ANY_ID }, 12423 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916, 12424 PCI_ANY_ID, PCI_ANY_ID }, 12425 12426 /* Atlas PCIe Switch Management Port */ 12427 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID, 12428 PCI_ANY_ID, PCI_ANY_ID }, 12429 12430 /* Sea SI 0x00E5 Configurable Secure 12431 * 0x00E6 Hard Secure 12432 */ 12433 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816, 12434 PCI_ANY_ID, PCI_ANY_ID }, 12435 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816, 12436 PCI_ANY_ID, PCI_ANY_ID }, 12437 12438 /* 12439 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered 12440 */ 12441 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816, 12442 PCI_ANY_ID, PCI_ANY_ID }, 12443 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816, 12444 PCI_ANY_ID, PCI_ANY_ID }, 12445 12446 {0} /* Terminating entry */ 12447 }; 12448 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); 12449 12450 static struct pci_error_handlers _mpt3sas_err_handler = { 12451 .error_detected = scsih_pci_error_detected, 12452 .mmio_enabled = scsih_pci_mmio_enabled, 12453 .slot_reset = scsih_pci_slot_reset, 12454 .resume = scsih_pci_resume, 12455 }; 12456 12457 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume); 12458 12459 static struct pci_driver mpt3sas_driver = { 12460 .name = MPT3SAS_DRIVER_NAME, 12461 .id_table = mpt3sas_pci_table, 12462 .probe = _scsih_probe, 12463 .remove = scsih_remove, 12464 .shutdown = scsih_shutdown, 12465 .err_handler = &_mpt3sas_err_handler, 12466 .driver.pm = &scsih_pm_ops, 12467 }; 12468 12469 /** 12470 * scsih_init - main entry point for this driver. 12471 * 12472 * Return: 0 success, anything else error. 12473 */ 12474 static int 12475 scsih_init(void) 12476 { 12477 mpt2_ids = 0; 12478 mpt3_ids = 0; 12479 12480 mpt3sas_base_initialize_callback_handler(); 12481 12482 /* queuecommand callback hander */ 12483 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); 12484 12485 /* task management callback handler */ 12486 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); 12487 12488 /* base internal commands callback handler */ 12489 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done); 12490 port_enable_cb_idx = mpt3sas_base_register_callback_handler( 12491 mpt3sas_port_enable_done); 12492 12493 /* transport internal commands callback handler */ 12494 transport_cb_idx = mpt3sas_base_register_callback_handler( 12495 mpt3sas_transport_done); 12496 12497 /* scsih internal commands callback handler */ 12498 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done); 12499 12500 /* configuration page API internal commands callback handler */ 12501 config_cb_idx = mpt3sas_base_register_callback_handler( 12502 mpt3sas_config_done); 12503 12504 /* ctl module callback handler */ 12505 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done); 12506 12507 tm_tr_cb_idx = mpt3sas_base_register_callback_handler( 12508 _scsih_tm_tr_complete); 12509 12510 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( 12511 _scsih_tm_volume_tr_complete); 12512 12513 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( 12514 _scsih_sas_control_complete); 12515 12516 mpt3sas_init_debugfs(); 12517 return 0; 12518 } 12519 12520 /** 12521 * scsih_exit - exit point for this driver (when it is a module). 12522 * 12523 * Return: 0 success, anything else error. 12524 */ 12525 static void 12526 scsih_exit(void) 12527 { 12528 12529 mpt3sas_base_release_callback_handler(scsi_io_cb_idx); 12530 mpt3sas_base_release_callback_handler(tm_cb_idx); 12531 mpt3sas_base_release_callback_handler(base_cb_idx); 12532 mpt3sas_base_release_callback_handler(port_enable_cb_idx); 12533 mpt3sas_base_release_callback_handler(transport_cb_idx); 12534 mpt3sas_base_release_callback_handler(scsih_cb_idx); 12535 mpt3sas_base_release_callback_handler(config_cb_idx); 12536 mpt3sas_base_release_callback_handler(ctl_cb_idx); 12537 12538 mpt3sas_base_release_callback_handler(tm_tr_cb_idx); 12539 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx); 12540 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx); 12541 12542 /* raid transport support */ 12543 if (hbas_to_enumerate != 1) 12544 raid_class_release(mpt3sas_raid_template); 12545 if (hbas_to_enumerate != 2) 12546 raid_class_release(mpt2sas_raid_template); 12547 sas_release_transport(mpt3sas_transport_template); 12548 mpt3sas_exit_debugfs(); 12549 } 12550 12551 /** 12552 * _mpt3sas_init - main entry point for this driver. 12553 * 12554 * Return: 0 success, anything else error. 12555 */ 12556 static int __init 12557 _mpt3sas_init(void) 12558 { 12559 int error; 12560 12561 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, 12562 MPT3SAS_DRIVER_VERSION); 12563 12564 mpt3sas_transport_template = 12565 sas_attach_transport(&mpt3sas_transport_functions); 12566 if (!mpt3sas_transport_template) 12567 return -ENODEV; 12568 12569 /* No need attach mpt3sas raid functions template 12570 * if hbas_to_enumarate value is one. 12571 */ 12572 if (hbas_to_enumerate != 1) { 12573 mpt3sas_raid_template = 12574 raid_class_attach(&mpt3sas_raid_functions); 12575 if (!mpt3sas_raid_template) { 12576 sas_release_transport(mpt3sas_transport_template); 12577 return -ENODEV; 12578 } 12579 } 12580 12581 /* No need to attach mpt2sas raid functions template 12582 * if hbas_to_enumarate value is two 12583 */ 12584 if (hbas_to_enumerate != 2) { 12585 mpt2sas_raid_template = 12586 raid_class_attach(&mpt2sas_raid_functions); 12587 if (!mpt2sas_raid_template) { 12588 sas_release_transport(mpt3sas_transport_template); 12589 return -ENODEV; 12590 } 12591 } 12592 12593 error = scsih_init(); 12594 if (error) { 12595 scsih_exit(); 12596 return error; 12597 } 12598 12599 mpt3sas_ctl_init(hbas_to_enumerate); 12600 12601 error = pci_register_driver(&mpt3sas_driver); 12602 if (error) 12603 scsih_exit(); 12604 12605 return error; 12606 } 12607 12608 /** 12609 * _mpt3sas_exit - exit point for this driver (when it is a module). 12610 * 12611 */ 12612 static void __exit 12613 _mpt3sas_exit(void) 12614 { 12615 pr_info("mpt3sas version %s unloading\n", 12616 MPT3SAS_DRIVER_VERSION); 12617 12618 mpt3sas_ctl_exit(hbas_to_enumerate); 12619 12620 pci_unregister_driver(&mpt3sas_driver); 12621 12622 scsih_exit(); 12623 } 12624 12625 module_init(_mpt3sas_init); 12626 module_exit(_mpt3sas_exit); 12627