1 /* 2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers 3 * 4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c 5 * Copyright (C) 2012-2014 LSI Corporation 6 * Copyright (C) 2013-2014 Avago Technologies 7 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 2 12 * of the License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * NO WARRANTY 20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 24 * solely responsible for determining the appropriateness of using and 25 * distributing the Program and assumes all risks associated with its 26 * exercise of rights under this Agreement, including but not limited to 27 * the risks and costs of program errors, damage to or loss of data, 28 * programs or equipment, and unavailability or interruption of operations. 29 30 * DISCLAIMER OF LIABILITY 31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 38 39 * You should have received a copy of the GNU General Public License 40 * along with this program; if not, write to the Free Software 41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 42 * USA. 43 */ 44 45 #include <linux/module.h> 46 #include <linux/kernel.h> 47 #include <linux/init.h> 48 #include <linux/errno.h> 49 #include <linux/blkdev.h> 50 #include <linux/sched.h> 51 #include <linux/workqueue.h> 52 #include <linux/delay.h> 53 #include <linux/pci.h> 54 #include <linux/pci-aspm.h> 55 #include <linux/interrupt.h> 56 #include <linux/aer.h> 57 #include <linux/raid_class.h> 58 #include <asm/unaligned.h> 59 60 #include "mpt3sas_base.h" 61 62 #define RAID_CHANNEL 1 63 64 #define PCIE_CHANNEL 2 65 66 /* forward proto's */ 67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 68 struct _sas_node *sas_expander); 69 static void _firmware_event_work(struct work_struct *work); 70 71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, 72 struct _sas_device *sas_device); 73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, 74 u8 retry_count, u8 is_pd); 75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 77 struct _pcie_device *pcie_device); 78 static void 79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle); 80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); 81 82 /* global parameters */ 83 LIST_HEAD(mpt3sas_ioc_list); 84 /* global ioc lock for list operations */ 85 DEFINE_SPINLOCK(gioc_lock); 86 87 MODULE_AUTHOR(MPT3SAS_AUTHOR); 88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION); 89 MODULE_LICENSE("GPL"); 90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION); 91 MODULE_ALIAS("mpt2sas"); 92 93 /* local parameters */ 94 static u8 scsi_io_cb_idx = -1; 95 static u8 tm_cb_idx = -1; 96 static u8 ctl_cb_idx = -1; 97 static u8 base_cb_idx = -1; 98 static u8 port_enable_cb_idx = -1; 99 static u8 transport_cb_idx = -1; 100 static u8 scsih_cb_idx = -1; 101 static u8 config_cb_idx = -1; 102 static int mpt2_ids; 103 static int mpt3_ids; 104 105 static u8 tm_tr_cb_idx = -1 ; 106 static u8 tm_tr_volume_cb_idx = -1 ; 107 static u8 tm_sas_control_cb_idx = -1; 108 109 /* command line options */ 110 static u32 logging_level; 111 MODULE_PARM_DESC(logging_level, 112 " bits for enabling additional logging info (default=0)"); 113 114 115 static ushort max_sectors = 0xFFFF; 116 module_param(max_sectors, ushort, 0); 117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); 118 119 120 static int missing_delay[2] = {-1, -1}; 121 module_param_array(missing_delay, int, NULL, 0); 122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); 123 124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */ 125 #define MPT3SAS_MAX_LUN (16895) 126 static u64 max_lun = MPT3SAS_MAX_LUN; 127 module_param(max_lun, ullong, 0); 128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); 129 130 static ushort hbas_to_enumerate; 131 module_param(hbas_to_enumerate, ushort, 0); 132 MODULE_PARM_DESC(hbas_to_enumerate, 133 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \ 134 1 - enumerates only SAS 2.0 generation HBAs\n \ 135 2 - enumerates only SAS 3.0 generation HBAs (default=0)"); 136 137 /* diag_buffer_enable is bitwise 138 * bit 0 set = TRACE 139 * bit 1 set = SNAPSHOT 140 * bit 2 set = EXTENDED 141 * 142 * Either bit can be set, or both 143 */ 144 static int diag_buffer_enable = -1; 145 module_param(diag_buffer_enable, int, 0); 146 MODULE_PARM_DESC(diag_buffer_enable, 147 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); 148 static int disable_discovery = -1; 149 module_param(disable_discovery, int, 0); 150 MODULE_PARM_DESC(disable_discovery, " disable discovery "); 151 152 153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ 154 static int prot_mask = -1; 155 module_param(prot_mask, int, 0); 156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); 157 158 159 /* raid transport support */ 160 static struct raid_template *mpt3sas_raid_template; 161 static struct raid_template *mpt2sas_raid_template; 162 163 164 /** 165 * struct sense_info - common structure for obtaining sense keys 166 * @skey: sense key 167 * @asc: additional sense code 168 * @ascq: additional sense code qualifier 169 */ 170 struct sense_info { 171 u8 skey; 172 u8 asc; 173 u8 ascq; 174 }; 175 176 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) 177 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC) 178 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) 179 #define MPT3SAS_ABRT_TASK_SET (0xFFFE) 180 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) 181 /** 182 * struct fw_event_work - firmware event struct 183 * @list: link list framework 184 * @work: work object (ioc->fault_reset_work_q) 185 * @ioc: per adapter object 186 * @device_handle: device handle 187 * @VF_ID: virtual function id 188 * @VP_ID: virtual port id 189 * @ignore: flag meaning this event has been marked to ignore 190 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h 191 * @refcount: kref for this event 192 * @event_data: reply event data payload follows 193 * 194 * This object stored on ioc->fw_event_list. 195 */ 196 struct fw_event_work { 197 struct list_head list; 198 struct work_struct work; 199 200 struct MPT3SAS_ADAPTER *ioc; 201 u16 device_handle; 202 u8 VF_ID; 203 u8 VP_ID; 204 u8 ignore; 205 u16 event; 206 struct kref refcount; 207 char event_data[0] __aligned(4); 208 }; 209 210 static void fw_event_work_free(struct kref *r) 211 { 212 kfree(container_of(r, struct fw_event_work, refcount)); 213 } 214 215 static void fw_event_work_get(struct fw_event_work *fw_work) 216 { 217 kref_get(&fw_work->refcount); 218 } 219 220 static void fw_event_work_put(struct fw_event_work *fw_work) 221 { 222 kref_put(&fw_work->refcount, fw_event_work_free); 223 } 224 225 static struct fw_event_work *alloc_fw_event_work(int len) 226 { 227 struct fw_event_work *fw_event; 228 229 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); 230 if (!fw_event) 231 return NULL; 232 233 kref_init(&fw_event->refcount); 234 return fw_event; 235 } 236 237 /** 238 * struct _scsi_io_transfer - scsi io transfer 239 * @handle: sas device handle (assigned by firmware) 240 * @is_raid: flag set for hidden raid components 241 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, 242 * @data_length: data transfer length 243 * @data_dma: dma pointer to data 244 * @sense: sense data 245 * @lun: lun number 246 * @cdb_length: cdb length 247 * @cdb: cdb contents 248 * @timeout: timeout for this command 249 * @VF_ID: virtual function id 250 * @VP_ID: virtual port id 251 * @valid_reply: flag set for reply message 252 * @sense_length: sense length 253 * @ioc_status: ioc status 254 * @scsi_state: scsi state 255 * @scsi_status: scsi staus 256 * @log_info: log information 257 * @transfer_length: data length transfer when there is a reply message 258 * 259 * Used for sending internal scsi commands to devices within this module. 260 * Refer to _scsi_send_scsi_io(). 261 */ 262 struct _scsi_io_transfer { 263 u16 handle; 264 u8 is_raid; 265 enum dma_data_direction dir; 266 u32 data_length; 267 dma_addr_t data_dma; 268 u8 sense[SCSI_SENSE_BUFFERSIZE]; 269 u32 lun; 270 u8 cdb_length; 271 u8 cdb[32]; 272 u8 timeout; 273 u8 VF_ID; 274 u8 VP_ID; 275 u8 valid_reply; 276 /* the following bits are only valid when 'valid_reply = 1' */ 277 u32 sense_length; 278 u16 ioc_status; 279 u8 scsi_state; 280 u8 scsi_status; 281 u32 log_info; 282 u32 transfer_length; 283 }; 284 285 /** 286 * _scsih_set_debug_level - global setting of ioc->logging_level. 287 * @val: ? 288 * @kp: ? 289 * 290 * Note: The logging levels are defined in mpt3sas_debug.h. 291 */ 292 static int 293 _scsih_set_debug_level(const char *val, const struct kernel_param *kp) 294 { 295 int ret = param_set_int(val, kp); 296 struct MPT3SAS_ADAPTER *ioc; 297 298 if (ret) 299 return ret; 300 301 pr_info("setting logging_level(0x%08x)\n", logging_level); 302 spin_lock(&gioc_lock); 303 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) 304 ioc->logging_level = logging_level; 305 spin_unlock(&gioc_lock); 306 return 0; 307 } 308 module_param_call(logging_level, _scsih_set_debug_level, param_get_int, 309 &logging_level, 0644); 310 311 /** 312 * _scsih_srch_boot_sas_address - search based on sas_address 313 * @sas_address: sas address 314 * @boot_device: boot device object from bios page 2 315 * 316 * Return: 1 when there's a match, 0 means no match. 317 */ 318 static inline int 319 _scsih_srch_boot_sas_address(u64 sas_address, 320 Mpi2BootDeviceSasWwid_t *boot_device) 321 { 322 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; 323 } 324 325 /** 326 * _scsih_srch_boot_device_name - search based on device name 327 * @device_name: device name specified in INDENTIFY fram 328 * @boot_device: boot device object from bios page 2 329 * 330 * Return: 1 when there's a match, 0 means no match. 331 */ 332 static inline int 333 _scsih_srch_boot_device_name(u64 device_name, 334 Mpi2BootDeviceDeviceName_t *boot_device) 335 { 336 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; 337 } 338 339 /** 340 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot 341 * @enclosure_logical_id: enclosure logical id 342 * @slot_number: slot number 343 * @boot_device: boot device object from bios page 2 344 * 345 * Return: 1 when there's a match, 0 means no match. 346 */ 347 static inline int 348 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, 349 Mpi2BootDeviceEnclosureSlot_t *boot_device) 350 { 351 return (enclosure_logical_id == le64_to_cpu(boot_device-> 352 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> 353 SlotNumber)) ? 1 : 0; 354 } 355 356 /** 357 * _scsih_is_boot_device - search for matching boot device. 358 * @sas_address: sas address 359 * @device_name: device name specified in INDENTIFY fram 360 * @enclosure_logical_id: enclosure logical id 361 * @slot: slot number 362 * @form: specifies boot device form 363 * @boot_device: boot device object from bios page 2 364 * 365 * Return: 1 when there's a match, 0 means no match. 366 */ 367 static int 368 _scsih_is_boot_device(u64 sas_address, u64 device_name, 369 u64 enclosure_logical_id, u16 slot, u8 form, 370 Mpi2BiosPage2BootDevice_t *boot_device) 371 { 372 int rc = 0; 373 374 switch (form) { 375 case MPI2_BIOSPAGE2_FORM_SAS_WWID: 376 if (!sas_address) 377 break; 378 rc = _scsih_srch_boot_sas_address( 379 sas_address, &boot_device->SasWwid); 380 break; 381 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: 382 if (!enclosure_logical_id) 383 break; 384 rc = _scsih_srch_boot_encl_slot( 385 enclosure_logical_id, 386 slot, &boot_device->EnclosureSlot); 387 break; 388 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: 389 if (!device_name) 390 break; 391 rc = _scsih_srch_boot_device_name( 392 device_name, &boot_device->DeviceName); 393 break; 394 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: 395 break; 396 } 397 398 return rc; 399 } 400 401 /** 402 * _scsih_get_sas_address - set the sas_address for given device handle 403 * @ioc: ? 404 * @handle: device handle 405 * @sas_address: sas address 406 * 407 * Return: 0 success, non-zero when failure 408 */ 409 static int 410 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, 411 u64 *sas_address) 412 { 413 Mpi2SasDevicePage0_t sas_device_pg0; 414 Mpi2ConfigReply_t mpi_reply; 415 u32 ioc_status; 416 417 *sas_address = 0; 418 419 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 420 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 421 ioc_err(ioc, "failure at %s:%d/%s()!\n", 422 __FILE__, __LINE__, __func__); 423 return -ENXIO; 424 } 425 426 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 427 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 428 /* For HBA, vSES doesn't return HBA SAS address. Instead return 429 * vSES's sas address. 430 */ 431 if ((handle <= ioc->sas_hba.num_phys) && 432 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & 433 MPI2_SAS_DEVICE_INFO_SEP))) 434 *sas_address = ioc->sas_hba.sas_address; 435 else 436 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 437 return 0; 438 } 439 440 /* we hit this because the given parent handle doesn't exist */ 441 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 442 return -ENXIO; 443 444 /* else error case */ 445 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", 446 handle, ioc_status, __FILE__, __LINE__, __func__); 447 return -EIO; 448 } 449 450 /** 451 * _scsih_determine_boot_device - determine boot device. 452 * @ioc: per adapter object 453 * @device: sas_device or pcie_device object 454 * @channel: SAS or PCIe channel 455 * 456 * Determines whether this device should be first reported device to 457 * to scsi-ml or sas transport, this purpose is for persistent boot device. 458 * There are primary, alternate, and current entries in bios page 2. The order 459 * priority is primary, alternate, then current. This routine saves 460 * the corresponding device object. 461 * The saved data to be used later in _scsih_probe_boot_devices(). 462 */ 463 static void 464 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device, 465 u32 channel) 466 { 467 struct _sas_device *sas_device; 468 struct _pcie_device *pcie_device; 469 struct _raid_device *raid_device; 470 u64 sas_address; 471 u64 device_name; 472 u64 enclosure_logical_id; 473 u16 slot; 474 475 /* only process this function when driver loads */ 476 if (!ioc->is_driver_loading) 477 return; 478 479 /* no Bios, return immediately */ 480 if (!ioc->bios_pg3.BiosVersion) 481 return; 482 483 if (channel == RAID_CHANNEL) { 484 raid_device = device; 485 sas_address = raid_device->wwid; 486 device_name = 0; 487 enclosure_logical_id = 0; 488 slot = 0; 489 } else if (channel == PCIE_CHANNEL) { 490 pcie_device = device; 491 sas_address = pcie_device->wwid; 492 device_name = 0; 493 enclosure_logical_id = 0; 494 slot = 0; 495 } else { 496 sas_device = device; 497 sas_address = sas_device->sas_address; 498 device_name = sas_device->device_name; 499 enclosure_logical_id = sas_device->enclosure_logical_id; 500 slot = sas_device->slot; 501 } 502 503 if (!ioc->req_boot_device.device) { 504 if (_scsih_is_boot_device(sas_address, device_name, 505 enclosure_logical_id, slot, 506 (ioc->bios_pg2.ReqBootDeviceForm & 507 MPI2_BIOSPAGE2_FORM_MASK), 508 &ioc->bios_pg2.RequestedBootDevice)) { 509 dinitprintk(ioc, 510 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n", 511 __func__, (u64)sas_address)); 512 ioc->req_boot_device.device = device; 513 ioc->req_boot_device.channel = channel; 514 } 515 } 516 517 if (!ioc->req_alt_boot_device.device) { 518 if (_scsih_is_boot_device(sas_address, device_name, 519 enclosure_logical_id, slot, 520 (ioc->bios_pg2.ReqAltBootDeviceForm & 521 MPI2_BIOSPAGE2_FORM_MASK), 522 &ioc->bios_pg2.RequestedAltBootDevice)) { 523 dinitprintk(ioc, 524 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n", 525 __func__, (u64)sas_address)); 526 ioc->req_alt_boot_device.device = device; 527 ioc->req_alt_boot_device.channel = channel; 528 } 529 } 530 531 if (!ioc->current_boot_device.device) { 532 if (_scsih_is_boot_device(sas_address, device_name, 533 enclosure_logical_id, slot, 534 (ioc->bios_pg2.CurrentBootDeviceForm & 535 MPI2_BIOSPAGE2_FORM_MASK), 536 &ioc->bios_pg2.CurrentBootDevice)) { 537 dinitprintk(ioc, 538 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n", 539 __func__, (u64)sas_address)); 540 ioc->current_boot_device.device = device; 541 ioc->current_boot_device.channel = channel; 542 } 543 } 544 } 545 546 static struct _sas_device * 547 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, 548 struct MPT3SAS_TARGET *tgt_priv) 549 { 550 struct _sas_device *ret; 551 552 assert_spin_locked(&ioc->sas_device_lock); 553 554 ret = tgt_priv->sas_dev; 555 if (ret) 556 sas_device_get(ret); 557 558 return ret; 559 } 560 561 static struct _sas_device * 562 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, 563 struct MPT3SAS_TARGET *tgt_priv) 564 { 565 struct _sas_device *ret; 566 unsigned long flags; 567 568 spin_lock_irqsave(&ioc->sas_device_lock, flags); 569 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv); 570 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 571 572 return ret; 573 } 574 575 static struct _pcie_device * 576 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, 577 struct MPT3SAS_TARGET *tgt_priv) 578 { 579 struct _pcie_device *ret; 580 581 assert_spin_locked(&ioc->pcie_device_lock); 582 583 ret = tgt_priv->pcie_dev; 584 if (ret) 585 pcie_device_get(ret); 586 587 return ret; 588 } 589 590 /** 591 * mpt3sas_get_pdev_from_target - pcie device search 592 * @ioc: per adapter object 593 * @tgt_priv: starget private object 594 * 595 * Context: This function will acquire ioc->pcie_device_lock and will release 596 * before returning the pcie_device object. 597 * 598 * This searches for pcie_device from target, then return pcie_device object. 599 */ 600 static struct _pcie_device * 601 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc, 602 struct MPT3SAS_TARGET *tgt_priv) 603 { 604 struct _pcie_device *ret; 605 unsigned long flags; 606 607 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 608 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv); 609 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 610 611 return ret; 612 } 613 614 struct _sas_device * 615 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 616 u64 sas_address) 617 { 618 struct _sas_device *sas_device; 619 620 assert_spin_locked(&ioc->sas_device_lock); 621 622 list_for_each_entry(sas_device, &ioc->sas_device_list, list) 623 if (sas_device->sas_address == sas_address) 624 goto found_device; 625 626 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) 627 if (sas_device->sas_address == sas_address) 628 goto found_device; 629 630 return NULL; 631 632 found_device: 633 sas_device_get(sas_device); 634 return sas_device; 635 } 636 637 /** 638 * mpt3sas_get_sdev_by_addr - sas device search 639 * @ioc: per adapter object 640 * @sas_address: sas address 641 * Context: Calling function should acquire ioc->sas_device_lock 642 * 643 * This searches for sas_device based on sas_address, then return sas_device 644 * object. 645 */ 646 struct _sas_device * 647 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, 648 u64 sas_address) 649 { 650 struct _sas_device *sas_device; 651 unsigned long flags; 652 653 spin_lock_irqsave(&ioc->sas_device_lock, flags); 654 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 655 sas_address); 656 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 657 658 return sas_device; 659 } 660 661 static struct _sas_device * 662 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 663 { 664 struct _sas_device *sas_device; 665 666 assert_spin_locked(&ioc->sas_device_lock); 667 668 list_for_each_entry(sas_device, &ioc->sas_device_list, list) 669 if (sas_device->handle == handle) 670 goto found_device; 671 672 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) 673 if (sas_device->handle == handle) 674 goto found_device; 675 676 return NULL; 677 678 found_device: 679 sas_device_get(sas_device); 680 return sas_device; 681 } 682 683 /** 684 * mpt3sas_get_sdev_by_handle - sas device search 685 * @ioc: per adapter object 686 * @handle: sas device handle (assigned by firmware) 687 * Context: Calling function should acquire ioc->sas_device_lock 688 * 689 * This searches for sas_device based on sas_address, then return sas_device 690 * object. 691 */ 692 struct _sas_device * 693 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 694 { 695 struct _sas_device *sas_device; 696 unsigned long flags; 697 698 spin_lock_irqsave(&ioc->sas_device_lock, flags); 699 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 700 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 701 702 return sas_device; 703 } 704 705 /** 706 * _scsih_display_enclosure_chassis_info - display device location info 707 * @ioc: per adapter object 708 * @sas_device: per sas device object 709 * @sdev: scsi device struct 710 * @starget: scsi target struct 711 */ 712 static void 713 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, 714 struct _sas_device *sas_device, struct scsi_device *sdev, 715 struct scsi_target *starget) 716 { 717 if (sdev) { 718 if (sas_device->enclosure_handle != 0) 719 sdev_printk(KERN_INFO, sdev, 720 "enclosure logical id (0x%016llx), slot(%d) \n", 721 (unsigned long long) 722 sas_device->enclosure_logical_id, 723 sas_device->slot); 724 if (sas_device->connector_name[0] != '\0') 725 sdev_printk(KERN_INFO, sdev, 726 "enclosure level(0x%04x), connector name( %s)\n", 727 sas_device->enclosure_level, 728 sas_device->connector_name); 729 if (sas_device->is_chassis_slot_valid) 730 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", 731 sas_device->chassis_slot); 732 } else if (starget) { 733 if (sas_device->enclosure_handle != 0) 734 starget_printk(KERN_INFO, starget, 735 "enclosure logical id(0x%016llx), slot(%d) \n", 736 (unsigned long long) 737 sas_device->enclosure_logical_id, 738 sas_device->slot); 739 if (sas_device->connector_name[0] != '\0') 740 starget_printk(KERN_INFO, starget, 741 "enclosure level(0x%04x), connector name( %s)\n", 742 sas_device->enclosure_level, 743 sas_device->connector_name); 744 if (sas_device->is_chassis_slot_valid) 745 starget_printk(KERN_INFO, starget, 746 "chassis slot(0x%04x)\n", 747 sas_device->chassis_slot); 748 } else { 749 if (sas_device->enclosure_handle != 0) 750 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n", 751 (u64)sas_device->enclosure_logical_id, 752 sas_device->slot); 753 if (sas_device->connector_name[0] != '\0') 754 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n", 755 sas_device->enclosure_level, 756 sas_device->connector_name); 757 if (sas_device->is_chassis_slot_valid) 758 ioc_info(ioc, "chassis slot(0x%04x)\n", 759 sas_device->chassis_slot); 760 } 761 } 762 763 /** 764 * _scsih_sas_device_remove - remove sas_device from list. 765 * @ioc: per adapter object 766 * @sas_device: the sas_device object 767 * Context: This function will acquire ioc->sas_device_lock. 768 * 769 * If sas_device is on the list, remove it and decrement its reference count. 770 */ 771 static void 772 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, 773 struct _sas_device *sas_device) 774 { 775 unsigned long flags; 776 777 if (!sas_device) 778 return; 779 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", 780 sas_device->handle, (u64)sas_device->sas_address); 781 782 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 783 784 /* 785 * The lock serializes access to the list, but we still need to verify 786 * that nobody removed the entry while we were waiting on the lock. 787 */ 788 spin_lock_irqsave(&ioc->sas_device_lock, flags); 789 if (!list_empty(&sas_device->list)) { 790 list_del_init(&sas_device->list); 791 sas_device_put(sas_device); 792 } 793 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 794 } 795 796 /** 797 * _scsih_device_remove_by_handle - removing device object by handle 798 * @ioc: per adapter object 799 * @handle: device handle 800 */ 801 static void 802 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 803 { 804 struct _sas_device *sas_device; 805 unsigned long flags; 806 807 if (ioc->shost_recovery) 808 return; 809 810 spin_lock_irqsave(&ioc->sas_device_lock, flags); 811 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 812 if (sas_device) { 813 list_del_init(&sas_device->list); 814 sas_device_put(sas_device); 815 } 816 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 817 if (sas_device) { 818 _scsih_remove_device(ioc, sas_device); 819 sas_device_put(sas_device); 820 } 821 } 822 823 /** 824 * mpt3sas_device_remove_by_sas_address - removing device object by sas address 825 * @ioc: per adapter object 826 * @sas_address: device sas_address 827 */ 828 void 829 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, 830 u64 sas_address) 831 { 832 struct _sas_device *sas_device; 833 unsigned long flags; 834 835 if (ioc->shost_recovery) 836 return; 837 838 spin_lock_irqsave(&ioc->sas_device_lock, flags); 839 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address); 840 if (sas_device) { 841 list_del_init(&sas_device->list); 842 sas_device_put(sas_device); 843 } 844 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 845 if (sas_device) { 846 _scsih_remove_device(ioc, sas_device); 847 sas_device_put(sas_device); 848 } 849 } 850 851 /** 852 * _scsih_sas_device_add - insert sas_device to the list. 853 * @ioc: per adapter object 854 * @sas_device: the sas_device object 855 * Context: This function will acquire ioc->sas_device_lock. 856 * 857 * Adding new object to the ioc->sas_device_list. 858 */ 859 static void 860 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, 861 struct _sas_device *sas_device) 862 { 863 unsigned long flags; 864 865 dewtprintk(ioc, 866 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 867 __func__, sas_device->handle, 868 (u64)sas_device->sas_address)); 869 870 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 871 NULL, NULL)); 872 873 spin_lock_irqsave(&ioc->sas_device_lock, flags); 874 sas_device_get(sas_device); 875 list_add_tail(&sas_device->list, &ioc->sas_device_list); 876 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 877 878 if (ioc->hide_drives) { 879 clear_bit(sas_device->handle, ioc->pend_os_device_add); 880 return; 881 } 882 883 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 884 sas_device->sas_address_parent)) { 885 _scsih_sas_device_remove(ioc, sas_device); 886 } else if (!sas_device->starget) { 887 /* 888 * When asyn scanning is enabled, its not possible to remove 889 * devices while scanning is turned on due to an oops in 890 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() 891 */ 892 if (!ioc->is_driver_loading) { 893 mpt3sas_transport_port_remove(ioc, 894 sas_device->sas_address, 895 sas_device->sas_address_parent); 896 _scsih_sas_device_remove(ioc, sas_device); 897 } 898 } else 899 clear_bit(sas_device->handle, ioc->pend_os_device_add); 900 } 901 902 /** 903 * _scsih_sas_device_init_add - insert sas_device to the list. 904 * @ioc: per adapter object 905 * @sas_device: the sas_device object 906 * Context: This function will acquire ioc->sas_device_lock. 907 * 908 * Adding new object at driver load time to the ioc->sas_device_init_list. 909 */ 910 static void 911 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, 912 struct _sas_device *sas_device) 913 { 914 unsigned long flags; 915 916 dewtprintk(ioc, 917 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n", 918 __func__, sas_device->handle, 919 (u64)sas_device->sas_address)); 920 921 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 922 NULL, NULL)); 923 924 spin_lock_irqsave(&ioc->sas_device_lock, flags); 925 sas_device_get(sas_device); 926 list_add_tail(&sas_device->list, &ioc->sas_device_init_list); 927 _scsih_determine_boot_device(ioc, sas_device, 0); 928 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 929 } 930 931 932 static struct _pcie_device * 933 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 934 { 935 struct _pcie_device *pcie_device; 936 937 assert_spin_locked(&ioc->pcie_device_lock); 938 939 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 940 if (pcie_device->wwid == wwid) 941 goto found_device; 942 943 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 944 if (pcie_device->wwid == wwid) 945 goto found_device; 946 947 return NULL; 948 949 found_device: 950 pcie_device_get(pcie_device); 951 return pcie_device; 952 } 953 954 955 /** 956 * mpt3sas_get_pdev_by_wwid - pcie device search 957 * @ioc: per adapter object 958 * @wwid: wwid 959 * 960 * Context: This function will acquire ioc->pcie_device_lock and will release 961 * before returning the pcie_device object. 962 * 963 * This searches for pcie_device based on wwid, then return pcie_device object. 964 */ 965 static struct _pcie_device * 966 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 967 { 968 struct _pcie_device *pcie_device; 969 unsigned long flags; 970 971 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 972 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 973 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 974 975 return pcie_device; 976 } 977 978 979 static struct _pcie_device * 980 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id, 981 int channel) 982 { 983 struct _pcie_device *pcie_device; 984 985 assert_spin_locked(&ioc->pcie_device_lock); 986 987 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 988 if (pcie_device->id == id && pcie_device->channel == channel) 989 goto found_device; 990 991 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 992 if (pcie_device->id == id && pcie_device->channel == channel) 993 goto found_device; 994 995 return NULL; 996 997 found_device: 998 pcie_device_get(pcie_device); 999 return pcie_device; 1000 } 1001 1002 static struct _pcie_device * 1003 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1004 { 1005 struct _pcie_device *pcie_device; 1006 1007 assert_spin_locked(&ioc->pcie_device_lock); 1008 1009 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) 1010 if (pcie_device->handle == handle) 1011 goto found_device; 1012 1013 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list) 1014 if (pcie_device->handle == handle) 1015 goto found_device; 1016 1017 return NULL; 1018 1019 found_device: 1020 pcie_device_get(pcie_device); 1021 return pcie_device; 1022 } 1023 1024 1025 /** 1026 * mpt3sas_get_pdev_by_handle - pcie device search 1027 * @ioc: per adapter object 1028 * @handle: Firmware device handle 1029 * 1030 * Context: This function will acquire ioc->pcie_device_lock and will release 1031 * before returning the pcie_device object. 1032 * 1033 * This searches for pcie_device based on handle, then return pcie_device 1034 * object. 1035 */ 1036 struct _pcie_device * 1037 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1038 { 1039 struct _pcie_device *pcie_device; 1040 unsigned long flags; 1041 1042 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1043 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 1044 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1045 1046 return pcie_device; 1047 } 1048 1049 /** 1050 * _scsih_pcie_device_remove - remove pcie_device from list. 1051 * @ioc: per adapter object 1052 * @pcie_device: the pcie_device object 1053 * Context: This function will acquire ioc->pcie_device_lock. 1054 * 1055 * If pcie_device is on the list, remove it and decrement its reference count. 1056 */ 1057 static void 1058 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, 1059 struct _pcie_device *pcie_device) 1060 { 1061 unsigned long flags; 1062 int was_on_pcie_device_list = 0; 1063 1064 if (!pcie_device) 1065 return; 1066 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 1067 pcie_device->handle, (u64)pcie_device->wwid); 1068 if (pcie_device->enclosure_handle != 0) 1069 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n", 1070 (u64)pcie_device->enclosure_logical_id, 1071 pcie_device->slot); 1072 if (pcie_device->connector_name[0] != '\0') 1073 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n", 1074 pcie_device->enclosure_level, 1075 pcie_device->connector_name); 1076 1077 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1078 if (!list_empty(&pcie_device->list)) { 1079 list_del_init(&pcie_device->list); 1080 was_on_pcie_device_list = 1; 1081 } 1082 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1083 if (was_on_pcie_device_list) { 1084 kfree(pcie_device->serial_number); 1085 pcie_device_put(pcie_device); 1086 } 1087 } 1088 1089 1090 /** 1091 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle 1092 * @ioc: per adapter object 1093 * @handle: device handle 1094 */ 1095 static void 1096 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1097 { 1098 struct _pcie_device *pcie_device; 1099 unsigned long flags; 1100 int was_on_pcie_device_list = 0; 1101 1102 if (ioc->shost_recovery) 1103 return; 1104 1105 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1106 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 1107 if (pcie_device) { 1108 if (!list_empty(&pcie_device->list)) { 1109 list_del_init(&pcie_device->list); 1110 was_on_pcie_device_list = 1; 1111 pcie_device_put(pcie_device); 1112 } 1113 } 1114 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1115 if (was_on_pcie_device_list) { 1116 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 1117 pcie_device_put(pcie_device); 1118 } 1119 } 1120 1121 /** 1122 * _scsih_pcie_device_add - add pcie_device object 1123 * @ioc: per adapter object 1124 * @pcie_device: pcie_device object 1125 * 1126 * This is added to the pcie_device_list link list. 1127 */ 1128 static void 1129 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc, 1130 struct _pcie_device *pcie_device) 1131 { 1132 unsigned long flags; 1133 1134 dewtprintk(ioc, 1135 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", 1136 __func__, 1137 pcie_device->handle, (u64)pcie_device->wwid)); 1138 if (pcie_device->enclosure_handle != 0) 1139 dewtprintk(ioc, 1140 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1141 __func__, 1142 (u64)pcie_device->enclosure_logical_id, 1143 pcie_device->slot)); 1144 if (pcie_device->connector_name[0] != '\0') 1145 dewtprintk(ioc, 1146 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", 1147 __func__, pcie_device->enclosure_level, 1148 pcie_device->connector_name)); 1149 1150 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1151 pcie_device_get(pcie_device); 1152 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 1153 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1154 1155 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) { 1156 _scsih_pcie_device_remove(ioc, pcie_device); 1157 } else if (!pcie_device->starget) { 1158 if (!ioc->is_driver_loading) { 1159 /*TODO-- Need to find out whether this condition will occur or not*/ 1160 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1161 } 1162 } else 1163 clear_bit(pcie_device->handle, ioc->pend_os_device_add); 1164 } 1165 1166 /* 1167 * _scsih_pcie_device_init_add - insert pcie_device to the init list. 1168 * @ioc: per adapter object 1169 * @pcie_device: the pcie_device object 1170 * Context: This function will acquire ioc->pcie_device_lock. 1171 * 1172 * Adding new object at driver load time to the ioc->pcie_device_init_list. 1173 */ 1174 static void 1175 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc, 1176 struct _pcie_device *pcie_device) 1177 { 1178 unsigned long flags; 1179 1180 dewtprintk(ioc, 1181 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n", 1182 __func__, 1183 pcie_device->handle, (u64)pcie_device->wwid)); 1184 if (pcie_device->enclosure_handle != 0) 1185 dewtprintk(ioc, 1186 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n", 1187 __func__, 1188 (u64)pcie_device->enclosure_logical_id, 1189 pcie_device->slot)); 1190 if (pcie_device->connector_name[0] != '\0') 1191 dewtprintk(ioc, 1192 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n", 1193 __func__, pcie_device->enclosure_level, 1194 pcie_device->connector_name)); 1195 1196 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1197 pcie_device_get(pcie_device); 1198 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list); 1199 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL); 1200 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1201 } 1202 /** 1203 * _scsih_raid_device_find_by_id - raid device search 1204 * @ioc: per adapter object 1205 * @id: sas device target id 1206 * @channel: sas device channel 1207 * Context: Calling function should acquire ioc->raid_device_lock 1208 * 1209 * This searches for raid_device based on target id, then return raid_device 1210 * object. 1211 */ 1212 static struct _raid_device * 1213 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) 1214 { 1215 struct _raid_device *raid_device, *r; 1216 1217 r = NULL; 1218 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1219 if (raid_device->id == id && raid_device->channel == channel) { 1220 r = raid_device; 1221 goto out; 1222 } 1223 } 1224 1225 out: 1226 return r; 1227 } 1228 1229 /** 1230 * mpt3sas_raid_device_find_by_handle - raid device search 1231 * @ioc: per adapter object 1232 * @handle: sas device handle (assigned by firmware) 1233 * Context: Calling function should acquire ioc->raid_device_lock 1234 * 1235 * This searches for raid_device based on handle, then return raid_device 1236 * object. 1237 */ 1238 struct _raid_device * 1239 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1240 { 1241 struct _raid_device *raid_device, *r; 1242 1243 r = NULL; 1244 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1245 if (raid_device->handle != handle) 1246 continue; 1247 r = raid_device; 1248 goto out; 1249 } 1250 1251 out: 1252 return r; 1253 } 1254 1255 /** 1256 * _scsih_raid_device_find_by_wwid - raid device search 1257 * @ioc: per adapter object 1258 * @wwid: ? 1259 * Context: Calling function should acquire ioc->raid_device_lock 1260 * 1261 * This searches for raid_device based on wwid, then return raid_device 1262 * object. 1263 */ 1264 static struct _raid_device * 1265 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) 1266 { 1267 struct _raid_device *raid_device, *r; 1268 1269 r = NULL; 1270 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1271 if (raid_device->wwid != wwid) 1272 continue; 1273 r = raid_device; 1274 goto out; 1275 } 1276 1277 out: 1278 return r; 1279 } 1280 1281 /** 1282 * _scsih_raid_device_add - add raid_device object 1283 * @ioc: per adapter object 1284 * @raid_device: raid_device object 1285 * 1286 * This is added to the raid_device_list link list. 1287 */ 1288 static void 1289 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, 1290 struct _raid_device *raid_device) 1291 { 1292 unsigned long flags; 1293 1294 dewtprintk(ioc, 1295 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n", 1296 __func__, 1297 raid_device->handle, (u64)raid_device->wwid)); 1298 1299 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1300 list_add_tail(&raid_device->list, &ioc->raid_device_list); 1301 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1302 } 1303 1304 /** 1305 * _scsih_raid_device_remove - delete raid_device object 1306 * @ioc: per adapter object 1307 * @raid_device: raid_device object 1308 * 1309 */ 1310 static void 1311 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc, 1312 struct _raid_device *raid_device) 1313 { 1314 unsigned long flags; 1315 1316 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1317 list_del(&raid_device->list); 1318 kfree(raid_device); 1319 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1320 } 1321 1322 /** 1323 * mpt3sas_scsih_expander_find_by_handle - expander device search 1324 * @ioc: per adapter object 1325 * @handle: expander handle (assigned by firmware) 1326 * Context: Calling function should acquire ioc->sas_device_lock 1327 * 1328 * This searches for expander device based on handle, then returns the 1329 * sas_node object. 1330 */ 1331 struct _sas_node * 1332 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1333 { 1334 struct _sas_node *sas_expander, *r; 1335 1336 r = NULL; 1337 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 1338 if (sas_expander->handle != handle) 1339 continue; 1340 r = sas_expander; 1341 goto out; 1342 } 1343 out: 1344 return r; 1345 } 1346 1347 /** 1348 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search 1349 * @ioc: per adapter object 1350 * @handle: enclosure handle (assigned by firmware) 1351 * Context: Calling function should acquire ioc->sas_device_lock 1352 * 1353 * This searches for enclosure device based on handle, then returns the 1354 * enclosure object. 1355 */ 1356 static struct _enclosure_node * 1357 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 1358 { 1359 struct _enclosure_node *enclosure_dev, *r; 1360 1361 r = NULL; 1362 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { 1363 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) 1364 continue; 1365 r = enclosure_dev; 1366 goto out; 1367 } 1368 out: 1369 return r; 1370 } 1371 /** 1372 * mpt3sas_scsih_expander_find_by_sas_address - expander device search 1373 * @ioc: per adapter object 1374 * @sas_address: sas address 1375 * Context: Calling function should acquire ioc->sas_node_lock. 1376 * 1377 * This searches for expander device based on sas_address, then returns the 1378 * sas_node object. 1379 */ 1380 struct _sas_node * 1381 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, 1382 u64 sas_address) 1383 { 1384 struct _sas_node *sas_expander, *r; 1385 1386 r = NULL; 1387 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 1388 if (sas_expander->sas_address != sas_address) 1389 continue; 1390 r = sas_expander; 1391 goto out; 1392 } 1393 out: 1394 return r; 1395 } 1396 1397 /** 1398 * _scsih_expander_node_add - insert expander device to the list. 1399 * @ioc: per adapter object 1400 * @sas_expander: the sas_device object 1401 * Context: This function will acquire ioc->sas_node_lock. 1402 * 1403 * Adding new object to the ioc->sas_expander_list. 1404 */ 1405 static void 1406 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, 1407 struct _sas_node *sas_expander) 1408 { 1409 unsigned long flags; 1410 1411 spin_lock_irqsave(&ioc->sas_node_lock, flags); 1412 list_add_tail(&sas_expander->list, &ioc->sas_expander_list); 1413 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 1414 } 1415 1416 /** 1417 * _scsih_is_end_device - determines if device is an end device 1418 * @device_info: bitfield providing information about the device. 1419 * Context: none 1420 * 1421 * Return: 1 if end device. 1422 */ 1423 static int 1424 _scsih_is_end_device(u32 device_info) 1425 { 1426 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && 1427 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | 1428 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | 1429 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) 1430 return 1; 1431 else 1432 return 0; 1433 } 1434 1435 /** 1436 * _scsih_is_nvme_device - determines if device is an nvme device 1437 * @device_info: bitfield providing information about the device. 1438 * Context: none 1439 * 1440 * Return: 1 if nvme device. 1441 */ 1442 static int 1443 _scsih_is_nvme_device(u32 device_info) 1444 { 1445 if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) 1446 == MPI26_PCIE_DEVINFO_NVME) 1447 return 1; 1448 else 1449 return 0; 1450 } 1451 1452 /** 1453 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry 1454 * @ioc: per adapter object 1455 * @smid: system request message index 1456 * 1457 * Return: the smid stored scmd pointer. 1458 * Then will dereference the stored scmd pointer. 1459 */ 1460 struct scsi_cmnd * 1461 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1462 { 1463 struct scsi_cmnd *scmd = NULL; 1464 struct scsiio_tracker *st; 1465 1466 if (smid > 0 && 1467 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { 1468 u32 unique_tag = smid - 1; 1469 1470 scmd = scsi_host_find_tag(ioc->shost, unique_tag); 1471 if (scmd) { 1472 st = scsi_cmd_priv(scmd); 1473 if (st->cb_idx == 0xFF || st->smid == 0) 1474 scmd = NULL; 1475 } 1476 } 1477 return scmd; 1478 } 1479 1480 /** 1481 * scsih_change_queue_depth - setting device queue depth 1482 * @sdev: scsi device struct 1483 * @qdepth: requested queue depth 1484 * 1485 * Return: queue depth. 1486 */ 1487 static int 1488 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1489 { 1490 struct Scsi_Host *shost = sdev->host; 1491 int max_depth; 1492 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1493 struct MPT3SAS_DEVICE *sas_device_priv_data; 1494 struct MPT3SAS_TARGET *sas_target_priv_data; 1495 struct _sas_device *sas_device; 1496 unsigned long flags; 1497 1498 max_depth = shost->can_queue; 1499 1500 /* limit max device queue for SATA to 32 */ 1501 sas_device_priv_data = sdev->hostdata; 1502 if (!sas_device_priv_data) 1503 goto not_sata; 1504 sas_target_priv_data = sas_device_priv_data->sas_target; 1505 if (!sas_target_priv_data) 1506 goto not_sata; 1507 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) 1508 goto not_sata; 1509 1510 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1511 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 1512 if (sas_device) { 1513 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 1514 max_depth = MPT3SAS_SATA_QUEUE_DEPTH; 1515 1516 sas_device_put(sas_device); 1517 } 1518 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1519 1520 not_sata: 1521 1522 if (!sdev->tagged_supported) 1523 max_depth = 1; 1524 if (qdepth > max_depth) 1525 qdepth = max_depth; 1526 return scsi_change_queue_depth(sdev, qdepth); 1527 } 1528 1529 /** 1530 * scsih_target_alloc - target add routine 1531 * @starget: scsi target struct 1532 * 1533 * Return: 0 if ok. Any other return is assumed to be an error and 1534 * the device is ignored. 1535 */ 1536 static int 1537 scsih_target_alloc(struct scsi_target *starget) 1538 { 1539 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1540 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1541 struct MPT3SAS_TARGET *sas_target_priv_data; 1542 struct _sas_device *sas_device; 1543 struct _raid_device *raid_device; 1544 struct _pcie_device *pcie_device; 1545 unsigned long flags; 1546 struct sas_rphy *rphy; 1547 1548 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), 1549 GFP_KERNEL); 1550 if (!sas_target_priv_data) 1551 return -ENOMEM; 1552 1553 starget->hostdata = sas_target_priv_data; 1554 sas_target_priv_data->starget = starget; 1555 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 1556 1557 /* RAID volumes */ 1558 if (starget->channel == RAID_CHANNEL) { 1559 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1560 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, 1561 starget->channel); 1562 if (raid_device) { 1563 sas_target_priv_data->handle = raid_device->handle; 1564 sas_target_priv_data->sas_address = raid_device->wwid; 1565 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; 1566 if (ioc->is_warpdrive) 1567 sas_target_priv_data->raid_device = raid_device; 1568 raid_device->starget = starget; 1569 } 1570 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1571 return 0; 1572 } 1573 1574 /* PCIe devices */ 1575 if (starget->channel == PCIE_CHANNEL) { 1576 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1577 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id, 1578 starget->channel); 1579 if (pcie_device) { 1580 sas_target_priv_data->handle = pcie_device->handle; 1581 sas_target_priv_data->sas_address = pcie_device->wwid; 1582 sas_target_priv_data->pcie_dev = pcie_device; 1583 pcie_device->starget = starget; 1584 pcie_device->id = starget->id; 1585 pcie_device->channel = starget->channel; 1586 sas_target_priv_data->flags |= 1587 MPT_TARGET_FLAGS_PCIE_DEVICE; 1588 if (pcie_device->fast_path) 1589 sas_target_priv_data->flags |= 1590 MPT_TARGET_FASTPATH_IO; 1591 } 1592 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1593 return 0; 1594 } 1595 1596 /* sas/sata devices */ 1597 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1598 rphy = dev_to_rphy(starget->dev.parent); 1599 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 1600 rphy->identify.sas_address); 1601 1602 if (sas_device) { 1603 sas_target_priv_data->handle = sas_device->handle; 1604 sas_target_priv_data->sas_address = sas_device->sas_address; 1605 sas_target_priv_data->sas_dev = sas_device; 1606 sas_device->starget = starget; 1607 sas_device->id = starget->id; 1608 sas_device->channel = starget->channel; 1609 if (test_bit(sas_device->handle, ioc->pd_handles)) 1610 sas_target_priv_data->flags |= 1611 MPT_TARGET_FLAGS_RAID_COMPONENT; 1612 if (sas_device->fast_path) 1613 sas_target_priv_data->flags |= 1614 MPT_TARGET_FASTPATH_IO; 1615 } 1616 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1617 1618 return 0; 1619 } 1620 1621 /** 1622 * scsih_target_destroy - target destroy routine 1623 * @starget: scsi target struct 1624 */ 1625 static void 1626 scsih_target_destroy(struct scsi_target *starget) 1627 { 1628 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1629 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1630 struct MPT3SAS_TARGET *sas_target_priv_data; 1631 struct _sas_device *sas_device; 1632 struct _raid_device *raid_device; 1633 struct _pcie_device *pcie_device; 1634 unsigned long flags; 1635 1636 sas_target_priv_data = starget->hostdata; 1637 if (!sas_target_priv_data) 1638 return; 1639 1640 if (starget->channel == RAID_CHANNEL) { 1641 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1642 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, 1643 starget->channel); 1644 if (raid_device) { 1645 raid_device->starget = NULL; 1646 raid_device->sdev = NULL; 1647 } 1648 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1649 goto out; 1650 } 1651 1652 if (starget->channel == PCIE_CHANNEL) { 1653 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1654 pcie_device = __mpt3sas_get_pdev_from_target(ioc, 1655 sas_target_priv_data); 1656 if (pcie_device && (pcie_device->starget == starget) && 1657 (pcie_device->id == starget->id) && 1658 (pcie_device->channel == starget->channel)) 1659 pcie_device->starget = NULL; 1660 1661 if (pcie_device) { 1662 /* 1663 * Corresponding get() is in _scsih_target_alloc() 1664 */ 1665 sas_target_priv_data->pcie_dev = NULL; 1666 pcie_device_put(pcie_device); 1667 pcie_device_put(pcie_device); 1668 } 1669 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1670 goto out; 1671 } 1672 1673 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1674 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 1675 if (sas_device && (sas_device->starget == starget) && 1676 (sas_device->id == starget->id) && 1677 (sas_device->channel == starget->channel)) 1678 sas_device->starget = NULL; 1679 1680 if (sas_device) { 1681 /* 1682 * Corresponding get() is in _scsih_target_alloc() 1683 */ 1684 sas_target_priv_data->sas_dev = NULL; 1685 sas_device_put(sas_device); 1686 1687 sas_device_put(sas_device); 1688 } 1689 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1690 1691 out: 1692 kfree(sas_target_priv_data); 1693 starget->hostdata = NULL; 1694 } 1695 1696 /** 1697 * scsih_slave_alloc - device add routine 1698 * @sdev: scsi device struct 1699 * 1700 * Return: 0 if ok. Any other return is assumed to be an error and 1701 * the device is ignored. 1702 */ 1703 static int 1704 scsih_slave_alloc(struct scsi_device *sdev) 1705 { 1706 struct Scsi_Host *shost; 1707 struct MPT3SAS_ADAPTER *ioc; 1708 struct MPT3SAS_TARGET *sas_target_priv_data; 1709 struct MPT3SAS_DEVICE *sas_device_priv_data; 1710 struct scsi_target *starget; 1711 struct _raid_device *raid_device; 1712 struct _sas_device *sas_device; 1713 struct _pcie_device *pcie_device; 1714 unsigned long flags; 1715 1716 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), 1717 GFP_KERNEL); 1718 if (!sas_device_priv_data) 1719 return -ENOMEM; 1720 1721 sas_device_priv_data->lun = sdev->lun; 1722 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; 1723 1724 starget = scsi_target(sdev); 1725 sas_target_priv_data = starget->hostdata; 1726 sas_target_priv_data->num_luns++; 1727 sas_device_priv_data->sas_target = sas_target_priv_data; 1728 sdev->hostdata = sas_device_priv_data; 1729 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) 1730 sdev->no_uld_attach = 1; 1731 1732 shost = dev_to_shost(&starget->dev); 1733 ioc = shost_priv(shost); 1734 if (starget->channel == RAID_CHANNEL) { 1735 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1736 raid_device = _scsih_raid_device_find_by_id(ioc, 1737 starget->id, starget->channel); 1738 if (raid_device) 1739 raid_device->sdev = sdev; /* raid is single lun */ 1740 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1741 } 1742 if (starget->channel == PCIE_CHANNEL) { 1743 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1744 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, 1745 sas_target_priv_data->sas_address); 1746 if (pcie_device && (pcie_device->starget == NULL)) { 1747 sdev_printk(KERN_INFO, sdev, 1748 "%s : pcie_device->starget set to starget @ %d\n", 1749 __func__, __LINE__); 1750 pcie_device->starget = starget; 1751 } 1752 1753 if (pcie_device) 1754 pcie_device_put(pcie_device); 1755 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1756 1757 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 1758 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1759 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 1760 sas_target_priv_data->sas_address); 1761 if (sas_device && (sas_device->starget == NULL)) { 1762 sdev_printk(KERN_INFO, sdev, 1763 "%s : sas_device->starget set to starget @ %d\n", 1764 __func__, __LINE__); 1765 sas_device->starget = starget; 1766 } 1767 1768 if (sas_device) 1769 sas_device_put(sas_device); 1770 1771 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1772 } 1773 1774 return 0; 1775 } 1776 1777 /** 1778 * scsih_slave_destroy - device destroy routine 1779 * @sdev: scsi device struct 1780 */ 1781 static void 1782 scsih_slave_destroy(struct scsi_device *sdev) 1783 { 1784 struct MPT3SAS_TARGET *sas_target_priv_data; 1785 struct scsi_target *starget; 1786 struct Scsi_Host *shost; 1787 struct MPT3SAS_ADAPTER *ioc; 1788 struct _sas_device *sas_device; 1789 struct _pcie_device *pcie_device; 1790 unsigned long flags; 1791 1792 if (!sdev->hostdata) 1793 return; 1794 1795 starget = scsi_target(sdev); 1796 sas_target_priv_data = starget->hostdata; 1797 sas_target_priv_data->num_luns--; 1798 1799 shost = dev_to_shost(&starget->dev); 1800 ioc = shost_priv(shost); 1801 1802 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 1803 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1804 pcie_device = __mpt3sas_get_pdev_from_target(ioc, 1805 sas_target_priv_data); 1806 if (pcie_device && !sas_target_priv_data->num_luns) 1807 pcie_device->starget = NULL; 1808 1809 if (pcie_device) 1810 pcie_device_put(pcie_device); 1811 1812 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1813 1814 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { 1815 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1816 sas_device = __mpt3sas_get_sdev_from_target(ioc, 1817 sas_target_priv_data); 1818 if (sas_device && !sas_target_priv_data->num_luns) 1819 sas_device->starget = NULL; 1820 1821 if (sas_device) 1822 sas_device_put(sas_device); 1823 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1824 } 1825 1826 kfree(sdev->hostdata); 1827 sdev->hostdata = NULL; 1828 } 1829 1830 /** 1831 * _scsih_display_sata_capabilities - sata capabilities 1832 * @ioc: per adapter object 1833 * @handle: device handle 1834 * @sdev: scsi device struct 1835 */ 1836 static void 1837 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, 1838 u16 handle, struct scsi_device *sdev) 1839 { 1840 Mpi2ConfigReply_t mpi_reply; 1841 Mpi2SasDevicePage0_t sas_device_pg0; 1842 u32 ioc_status; 1843 u16 flags; 1844 u32 device_info; 1845 1846 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 1847 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 1848 ioc_err(ioc, "failure at %s:%d/%s()!\n", 1849 __FILE__, __LINE__, __func__); 1850 return; 1851 } 1852 1853 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 1854 MPI2_IOCSTATUS_MASK; 1855 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 1856 ioc_err(ioc, "failure at %s:%d/%s()!\n", 1857 __FILE__, __LINE__, __func__); 1858 return; 1859 } 1860 1861 flags = le16_to_cpu(sas_device_pg0.Flags); 1862 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 1863 1864 sdev_printk(KERN_INFO, sdev, 1865 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " 1866 "sw_preserve(%s)\n", 1867 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n", 1868 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n", 1869 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" : 1870 "n", 1871 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n", 1872 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n", 1873 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); 1874 } 1875 1876 /* 1877 * raid transport support - 1878 * Enabled for SLES11 and newer, in older kernels the driver will panic when 1879 * unloading the driver followed by a load - I believe that the subroutine 1880 * raid_class_release() is not cleaning up properly. 1881 */ 1882 1883 /** 1884 * scsih_is_raid - return boolean indicating device is raid volume 1885 * @dev: the device struct object 1886 */ 1887 static int 1888 scsih_is_raid(struct device *dev) 1889 { 1890 struct scsi_device *sdev = to_scsi_device(dev); 1891 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 1892 1893 if (ioc->is_warpdrive) 1894 return 0; 1895 return (sdev->channel == RAID_CHANNEL) ? 1 : 0; 1896 } 1897 1898 static int 1899 scsih_is_nvme(struct device *dev) 1900 { 1901 struct scsi_device *sdev = to_scsi_device(dev); 1902 1903 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0; 1904 } 1905 1906 /** 1907 * scsih_get_resync - get raid volume resync percent complete 1908 * @dev: the device struct object 1909 */ 1910 static void 1911 scsih_get_resync(struct device *dev) 1912 { 1913 struct scsi_device *sdev = to_scsi_device(dev); 1914 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 1915 static struct _raid_device *raid_device; 1916 unsigned long flags; 1917 Mpi2RaidVolPage0_t vol_pg0; 1918 Mpi2ConfigReply_t mpi_reply; 1919 u32 volume_status_flags; 1920 u8 percent_complete; 1921 u16 handle; 1922 1923 percent_complete = 0; 1924 handle = 0; 1925 if (ioc->is_warpdrive) 1926 goto out; 1927 1928 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1929 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 1930 sdev->channel); 1931 if (raid_device) { 1932 handle = raid_device->handle; 1933 percent_complete = raid_device->percent_complete; 1934 } 1935 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1936 1937 if (!handle) 1938 goto out; 1939 1940 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 1941 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 1942 sizeof(Mpi2RaidVolPage0_t))) { 1943 ioc_err(ioc, "failure at %s:%d/%s()!\n", 1944 __FILE__, __LINE__, __func__); 1945 percent_complete = 0; 1946 goto out; 1947 } 1948 1949 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); 1950 if (!(volume_status_flags & 1951 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) 1952 percent_complete = 0; 1953 1954 out: 1955 1956 switch (ioc->hba_mpi_version_belonged) { 1957 case MPI2_VERSION: 1958 raid_set_resync(mpt2sas_raid_template, dev, percent_complete); 1959 break; 1960 case MPI25_VERSION: 1961 case MPI26_VERSION: 1962 raid_set_resync(mpt3sas_raid_template, dev, percent_complete); 1963 break; 1964 } 1965 } 1966 1967 /** 1968 * scsih_get_state - get raid volume level 1969 * @dev: the device struct object 1970 */ 1971 static void 1972 scsih_get_state(struct device *dev) 1973 { 1974 struct scsi_device *sdev = to_scsi_device(dev); 1975 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); 1976 static struct _raid_device *raid_device; 1977 unsigned long flags; 1978 Mpi2RaidVolPage0_t vol_pg0; 1979 Mpi2ConfigReply_t mpi_reply; 1980 u32 volstate; 1981 enum raid_state state = RAID_STATE_UNKNOWN; 1982 u16 handle = 0; 1983 1984 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1985 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, 1986 sdev->channel); 1987 if (raid_device) 1988 handle = raid_device->handle; 1989 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1990 1991 if (!raid_device) 1992 goto out; 1993 1994 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, 1995 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 1996 sizeof(Mpi2RaidVolPage0_t))) { 1997 ioc_err(ioc, "failure at %s:%d/%s()!\n", 1998 __FILE__, __LINE__, __func__); 1999 goto out; 2000 } 2001 2002 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); 2003 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { 2004 state = RAID_STATE_RESYNCING; 2005 goto out; 2006 } 2007 2008 switch (vol_pg0.VolumeState) { 2009 case MPI2_RAID_VOL_STATE_OPTIMAL: 2010 case MPI2_RAID_VOL_STATE_ONLINE: 2011 state = RAID_STATE_ACTIVE; 2012 break; 2013 case MPI2_RAID_VOL_STATE_DEGRADED: 2014 state = RAID_STATE_DEGRADED; 2015 break; 2016 case MPI2_RAID_VOL_STATE_FAILED: 2017 case MPI2_RAID_VOL_STATE_MISSING: 2018 state = RAID_STATE_OFFLINE; 2019 break; 2020 } 2021 out: 2022 switch (ioc->hba_mpi_version_belonged) { 2023 case MPI2_VERSION: 2024 raid_set_state(mpt2sas_raid_template, dev, state); 2025 break; 2026 case MPI25_VERSION: 2027 case MPI26_VERSION: 2028 raid_set_state(mpt3sas_raid_template, dev, state); 2029 break; 2030 } 2031 } 2032 2033 /** 2034 * _scsih_set_level - set raid level 2035 * @ioc: ? 2036 * @sdev: scsi device struct 2037 * @volume_type: volume type 2038 */ 2039 static void 2040 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc, 2041 struct scsi_device *sdev, u8 volume_type) 2042 { 2043 enum raid_level level = RAID_LEVEL_UNKNOWN; 2044 2045 switch (volume_type) { 2046 case MPI2_RAID_VOL_TYPE_RAID0: 2047 level = RAID_LEVEL_0; 2048 break; 2049 case MPI2_RAID_VOL_TYPE_RAID10: 2050 level = RAID_LEVEL_10; 2051 break; 2052 case MPI2_RAID_VOL_TYPE_RAID1E: 2053 level = RAID_LEVEL_1E; 2054 break; 2055 case MPI2_RAID_VOL_TYPE_RAID1: 2056 level = RAID_LEVEL_1; 2057 break; 2058 } 2059 2060 switch (ioc->hba_mpi_version_belonged) { 2061 case MPI2_VERSION: 2062 raid_set_level(mpt2sas_raid_template, 2063 &sdev->sdev_gendev, level); 2064 break; 2065 case MPI25_VERSION: 2066 case MPI26_VERSION: 2067 raid_set_level(mpt3sas_raid_template, 2068 &sdev->sdev_gendev, level); 2069 break; 2070 } 2071 } 2072 2073 2074 /** 2075 * _scsih_get_volume_capabilities - volume capabilities 2076 * @ioc: per adapter object 2077 * @raid_device: the raid_device object 2078 * 2079 * Return: 0 for success, else 1 2080 */ 2081 static int 2082 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, 2083 struct _raid_device *raid_device) 2084 { 2085 Mpi2RaidVolPage0_t *vol_pg0; 2086 Mpi2RaidPhysDiskPage0_t pd_pg0; 2087 Mpi2SasDevicePage0_t sas_device_pg0; 2088 Mpi2ConfigReply_t mpi_reply; 2089 u16 sz; 2090 u8 num_pds; 2091 2092 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, 2093 &num_pds)) || !num_pds) { 2094 dfailprintk(ioc, 2095 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2096 __FILE__, __LINE__, __func__)); 2097 return 1; 2098 } 2099 2100 raid_device->num_pds = num_pds; 2101 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * 2102 sizeof(Mpi2RaidVol0PhysDisk_t)); 2103 vol_pg0 = kzalloc(sz, GFP_KERNEL); 2104 if (!vol_pg0) { 2105 dfailprintk(ioc, 2106 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2107 __FILE__, __LINE__, __func__)); 2108 return 1; 2109 } 2110 2111 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, 2112 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { 2113 dfailprintk(ioc, 2114 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2115 __FILE__, __LINE__, __func__)); 2116 kfree(vol_pg0); 2117 return 1; 2118 } 2119 2120 raid_device->volume_type = vol_pg0->VolumeType; 2121 2122 /* figure out what the underlying devices are by 2123 * obtaining the device_info bits for the 1st device 2124 */ 2125 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 2126 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, 2127 vol_pg0->PhysDisk[0].PhysDiskNum))) { 2128 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 2129 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 2130 le16_to_cpu(pd_pg0.DevHandle)))) { 2131 raid_device->device_info = 2132 le32_to_cpu(sas_device_pg0.DeviceInfo); 2133 } 2134 } 2135 2136 kfree(vol_pg0); 2137 return 0; 2138 } 2139 2140 /** 2141 * _scsih_enable_tlr - setting TLR flags 2142 * @ioc: per adapter object 2143 * @sdev: scsi device struct 2144 * 2145 * Enabling Transaction Layer Retries for tape devices when 2146 * vpd page 0x90 is present 2147 * 2148 */ 2149 static void 2150 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) 2151 { 2152 2153 /* only for TAPE */ 2154 if (sdev->type != TYPE_TAPE) 2155 return; 2156 2157 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) 2158 return; 2159 2160 sas_enable_tlr(sdev); 2161 sdev_printk(KERN_INFO, sdev, "TLR %s\n", 2162 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); 2163 return; 2164 2165 } 2166 2167 /** 2168 * scsih_slave_configure - device configure routine. 2169 * @sdev: scsi device struct 2170 * 2171 * Return: 0 if ok. Any other return is assumed to be an error and 2172 * the device is ignored. 2173 */ 2174 static int 2175 scsih_slave_configure(struct scsi_device *sdev) 2176 { 2177 struct Scsi_Host *shost = sdev->host; 2178 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2179 struct MPT3SAS_DEVICE *sas_device_priv_data; 2180 struct MPT3SAS_TARGET *sas_target_priv_data; 2181 struct _sas_device *sas_device; 2182 struct _pcie_device *pcie_device; 2183 struct _raid_device *raid_device; 2184 unsigned long flags; 2185 int qdepth; 2186 u8 ssp_target = 0; 2187 char *ds = ""; 2188 char *r_level = ""; 2189 u16 handle, volume_handle = 0; 2190 u64 volume_wwid = 0; 2191 2192 qdepth = 1; 2193 sas_device_priv_data = sdev->hostdata; 2194 sas_device_priv_data->configured_lun = 1; 2195 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; 2196 sas_target_priv_data = sas_device_priv_data->sas_target; 2197 handle = sas_target_priv_data->handle; 2198 2199 /* raid volume handling */ 2200 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { 2201 2202 spin_lock_irqsave(&ioc->raid_device_lock, flags); 2203 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 2204 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 2205 if (!raid_device) { 2206 dfailprintk(ioc, 2207 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2208 __FILE__, __LINE__, __func__)); 2209 return 1; 2210 } 2211 2212 if (_scsih_get_volume_capabilities(ioc, raid_device)) { 2213 dfailprintk(ioc, 2214 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2215 __FILE__, __LINE__, __func__)); 2216 return 1; 2217 } 2218 2219 /* 2220 * WARPDRIVE: Initialize the required data for Direct IO 2221 */ 2222 mpt3sas_init_warpdrive_properties(ioc, raid_device); 2223 2224 /* RAID Queue Depth Support 2225 * IS volume = underlying qdepth of drive type, either 2226 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH 2227 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH) 2228 */ 2229 if (raid_device->device_info & 2230 MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2231 qdepth = MPT3SAS_SAS_QUEUE_DEPTH; 2232 ds = "SSP"; 2233 } else { 2234 qdepth = MPT3SAS_SATA_QUEUE_DEPTH; 2235 if (raid_device->device_info & 2236 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 2237 ds = "SATA"; 2238 else 2239 ds = "STP"; 2240 } 2241 2242 switch (raid_device->volume_type) { 2243 case MPI2_RAID_VOL_TYPE_RAID0: 2244 r_level = "RAID0"; 2245 break; 2246 case MPI2_RAID_VOL_TYPE_RAID1E: 2247 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2248 if (ioc->manu_pg10.OEMIdentifier && 2249 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & 2250 MFG10_GF0_R10_DISPLAY) && 2251 !(raid_device->num_pds % 2)) 2252 r_level = "RAID10"; 2253 else 2254 r_level = "RAID1E"; 2255 break; 2256 case MPI2_RAID_VOL_TYPE_RAID1: 2257 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2258 r_level = "RAID1"; 2259 break; 2260 case MPI2_RAID_VOL_TYPE_RAID10: 2261 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2262 r_level = "RAID10"; 2263 break; 2264 case MPI2_RAID_VOL_TYPE_UNKNOWN: 2265 default: 2266 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 2267 r_level = "RAIDX"; 2268 break; 2269 } 2270 2271 if (!ioc->hide_ir_msg) 2272 sdev_printk(KERN_INFO, sdev, 2273 "%s: handle(0x%04x), wwid(0x%016llx)," 2274 " pd_count(%d), type(%s)\n", 2275 r_level, raid_device->handle, 2276 (unsigned long long)raid_device->wwid, 2277 raid_device->num_pds, ds); 2278 2279 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) { 2280 blk_queue_max_hw_sectors(sdev->request_queue, 2281 MPT3SAS_RAID_MAX_SECTORS); 2282 sdev_printk(KERN_INFO, sdev, 2283 "Set queue's max_sector to: %u\n", 2284 MPT3SAS_RAID_MAX_SECTORS); 2285 } 2286 2287 scsih_change_queue_depth(sdev, qdepth); 2288 2289 /* raid transport support */ 2290 if (!ioc->is_warpdrive) 2291 _scsih_set_level(ioc, sdev, raid_device->volume_type); 2292 return 0; 2293 } 2294 2295 /* non-raid handling */ 2296 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { 2297 if (mpt3sas_config_get_volume_handle(ioc, handle, 2298 &volume_handle)) { 2299 dfailprintk(ioc, 2300 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2301 __FILE__, __LINE__, __func__)); 2302 return 1; 2303 } 2304 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, 2305 volume_handle, &volume_wwid)) { 2306 dfailprintk(ioc, 2307 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2308 __FILE__, __LINE__, __func__)); 2309 return 1; 2310 } 2311 } 2312 2313 /* PCIe handling */ 2314 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 2315 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2316 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, 2317 sas_device_priv_data->sas_target->sas_address); 2318 if (!pcie_device) { 2319 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2320 dfailprintk(ioc, 2321 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2322 __FILE__, __LINE__, __func__)); 2323 return 1; 2324 } 2325 2326 qdepth = MPT3SAS_NVME_QUEUE_DEPTH; 2327 ds = "NVMe"; 2328 sdev_printk(KERN_INFO, sdev, 2329 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n", 2330 ds, handle, (unsigned long long)pcie_device->wwid, 2331 pcie_device->port_num); 2332 if (pcie_device->enclosure_handle != 0) 2333 sdev_printk(KERN_INFO, sdev, 2334 "%s: enclosure logical id(0x%016llx), slot(%d)\n", 2335 ds, 2336 (unsigned long long)pcie_device->enclosure_logical_id, 2337 pcie_device->slot); 2338 if (pcie_device->connector_name[0] != '\0') 2339 sdev_printk(KERN_INFO, sdev, 2340 "%s: enclosure level(0x%04x)," 2341 "connector name( %s)\n", ds, 2342 pcie_device->enclosure_level, 2343 pcie_device->connector_name); 2344 2345 if (pcie_device->nvme_mdts) 2346 blk_queue_max_hw_sectors(sdev->request_queue, 2347 pcie_device->nvme_mdts/512); 2348 2349 pcie_device_put(pcie_device); 2350 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2351 scsih_change_queue_depth(sdev, qdepth); 2352 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be 2353 ** merged and can eliminate holes created during merging 2354 ** operation. 2355 **/ 2356 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, 2357 sdev->request_queue); 2358 blk_queue_virt_boundary(sdev->request_queue, 2359 ioc->page_size - 1); 2360 return 0; 2361 } 2362 2363 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2364 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 2365 sas_device_priv_data->sas_target->sas_address); 2366 if (!sas_device) { 2367 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2368 dfailprintk(ioc, 2369 ioc_warn(ioc, "failure at %s:%d/%s()!\n", 2370 __FILE__, __LINE__, __func__)); 2371 return 1; 2372 } 2373 2374 sas_device->volume_handle = volume_handle; 2375 sas_device->volume_wwid = volume_wwid; 2376 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { 2377 qdepth = MPT3SAS_SAS_QUEUE_DEPTH; 2378 ssp_target = 1; 2379 if (sas_device->device_info & 2380 MPI2_SAS_DEVICE_INFO_SEP) { 2381 sdev_printk(KERN_WARNING, sdev, 2382 "set ignore_delay_remove for handle(0x%04x)\n", 2383 sas_device_priv_data->sas_target->handle); 2384 sas_device_priv_data->ignore_delay_remove = 1; 2385 ds = "SES"; 2386 } else 2387 ds = "SSP"; 2388 } else { 2389 qdepth = MPT3SAS_SATA_QUEUE_DEPTH; 2390 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) 2391 ds = "STP"; 2392 else if (sas_device->device_info & 2393 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) 2394 ds = "SATA"; 2395 } 2396 2397 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \ 2398 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", 2399 ds, handle, (unsigned long long)sas_device->sas_address, 2400 sas_device->phy, (unsigned long long)sas_device->device_name); 2401 2402 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL); 2403 2404 sas_device_put(sas_device); 2405 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2406 2407 if (!ssp_target) 2408 _scsih_display_sata_capabilities(ioc, handle, sdev); 2409 2410 2411 scsih_change_queue_depth(sdev, qdepth); 2412 2413 if (ssp_target) { 2414 sas_read_port_mode_page(sdev); 2415 _scsih_enable_tlr(ioc, sdev); 2416 } 2417 2418 return 0; 2419 } 2420 2421 /** 2422 * scsih_bios_param - fetch head, sector, cylinder info for a disk 2423 * @sdev: scsi device struct 2424 * @bdev: pointer to block device context 2425 * @capacity: device size (in 512 byte sectors) 2426 * @params: three element array to place output: 2427 * params[0] number of heads (max 255) 2428 * params[1] number of sectors (max 63) 2429 * params[2] number of cylinders 2430 */ 2431 static int 2432 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2433 sector_t capacity, int params[]) 2434 { 2435 int heads; 2436 int sectors; 2437 sector_t cylinders; 2438 ulong dummy; 2439 2440 heads = 64; 2441 sectors = 32; 2442 2443 dummy = heads * sectors; 2444 cylinders = capacity; 2445 sector_div(cylinders, dummy); 2446 2447 /* 2448 * Handle extended translation size for logical drives 2449 * > 1Gb 2450 */ 2451 if ((ulong)capacity >= 0x200000) { 2452 heads = 255; 2453 sectors = 63; 2454 dummy = heads * sectors; 2455 cylinders = capacity; 2456 sector_div(cylinders, dummy); 2457 } 2458 2459 /* return result */ 2460 params[0] = heads; 2461 params[1] = sectors; 2462 params[2] = cylinders; 2463 2464 return 0; 2465 } 2466 2467 /** 2468 * _scsih_response_code - translation of device response code 2469 * @ioc: per adapter object 2470 * @response_code: response code returned by the device 2471 */ 2472 static void 2473 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) 2474 { 2475 char *desc; 2476 2477 switch (response_code) { 2478 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 2479 desc = "task management request completed"; 2480 break; 2481 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 2482 desc = "invalid frame"; 2483 break; 2484 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 2485 desc = "task management request not supported"; 2486 break; 2487 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 2488 desc = "task management request failed"; 2489 break; 2490 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 2491 desc = "task management request succeeded"; 2492 break; 2493 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 2494 desc = "invalid lun"; 2495 break; 2496 case 0xA: 2497 desc = "overlapped tag attempted"; 2498 break; 2499 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 2500 desc = "task queued, however not sent to target"; 2501 break; 2502 default: 2503 desc = "unknown"; 2504 break; 2505 } 2506 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc); 2507 } 2508 2509 /** 2510 * _scsih_tm_done - tm completion routine 2511 * @ioc: per adapter object 2512 * @smid: system request message index 2513 * @msix_index: MSIX table index supplied by the OS 2514 * @reply: reply message frame(lower 32bit addr) 2515 * Context: none. 2516 * 2517 * The callback handler when using scsih_issue_tm. 2518 * 2519 * Return: 1 meaning mf should be freed from _base_interrupt 2520 * 0 means the mf is freed from this function. 2521 */ 2522 static u8 2523 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 2524 { 2525 MPI2DefaultReply_t *mpi_reply; 2526 2527 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED) 2528 return 1; 2529 if (ioc->tm_cmds.smid != smid) 2530 return 1; 2531 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; 2532 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 2533 if (mpi_reply) { 2534 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 2535 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID; 2536 } 2537 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING; 2538 complete(&ioc->tm_cmds.done); 2539 return 1; 2540 } 2541 2542 /** 2543 * mpt3sas_scsih_set_tm_flag - set per target tm_busy 2544 * @ioc: per adapter object 2545 * @handle: device handle 2546 * 2547 * During taskmangement request, we need to freeze the device queue. 2548 */ 2549 void 2550 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2551 { 2552 struct MPT3SAS_DEVICE *sas_device_priv_data; 2553 struct scsi_device *sdev; 2554 u8 skip = 0; 2555 2556 shost_for_each_device(sdev, ioc->shost) { 2557 if (skip) 2558 continue; 2559 sas_device_priv_data = sdev->hostdata; 2560 if (!sas_device_priv_data) 2561 continue; 2562 if (sas_device_priv_data->sas_target->handle == handle) { 2563 sas_device_priv_data->sas_target->tm_busy = 1; 2564 skip = 1; 2565 ioc->ignore_loginfos = 1; 2566 } 2567 } 2568 } 2569 2570 /** 2571 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy 2572 * @ioc: per adapter object 2573 * @handle: device handle 2574 * 2575 * During taskmangement request, we need to freeze the device queue. 2576 */ 2577 void 2578 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 2579 { 2580 struct MPT3SAS_DEVICE *sas_device_priv_data; 2581 struct scsi_device *sdev; 2582 u8 skip = 0; 2583 2584 shost_for_each_device(sdev, ioc->shost) { 2585 if (skip) 2586 continue; 2587 sas_device_priv_data = sdev->hostdata; 2588 if (!sas_device_priv_data) 2589 continue; 2590 if (sas_device_priv_data->sas_target->handle == handle) { 2591 sas_device_priv_data->sas_target->tm_busy = 0; 2592 skip = 1; 2593 ioc->ignore_loginfos = 0; 2594 } 2595 } 2596 } 2597 2598 /** 2599 * mpt3sas_scsih_issue_tm - main routine for sending tm requests 2600 * @ioc: per adapter struct 2601 * @handle: device handle 2602 * @lun: lun number 2603 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2604 * @smid_task: smid assigned to the task 2605 * @msix_task: MSIX table index supplied by the OS 2606 * @timeout: timeout in seconds 2607 * @tr_method: Target Reset Method 2608 * Context: user 2609 * 2610 * A generic API for sending task management requests to firmware. 2611 * 2612 * The callback index is set inside `ioc->tm_cb_idx`. 2613 * The caller is responsible to check for outstanding commands. 2614 * 2615 * Return: SUCCESS or FAILED. 2616 */ 2617 int 2618 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, 2619 u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method) 2620 { 2621 Mpi2SCSITaskManagementRequest_t *mpi_request; 2622 Mpi2SCSITaskManagementReply_t *mpi_reply; 2623 u16 smid = 0; 2624 u32 ioc_state; 2625 int rc; 2626 2627 lockdep_assert_held(&ioc->tm_cmds.mutex); 2628 2629 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { 2630 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__); 2631 return FAILED; 2632 } 2633 2634 if (ioc->shost_recovery || ioc->remove_host || 2635 ioc->pci_error_recovery) { 2636 ioc_info(ioc, "%s: host reset in progress!\n", __func__); 2637 return FAILED; 2638 } 2639 2640 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 2641 if (ioc_state & MPI2_DOORBELL_USED) { 2642 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); 2643 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 2644 return (!rc) ? SUCCESS : FAILED; 2645 } 2646 2647 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 2648 mpt3sas_base_fault_info(ioc, ioc_state & 2649 MPI2_DOORBELL_DATA_MASK); 2650 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 2651 return (!rc) ? SUCCESS : FAILED; 2652 } 2653 2654 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); 2655 if (!smid) { 2656 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 2657 return FAILED; 2658 } 2659 2660 dtmprintk(ioc, 2661 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n", 2662 handle, type, smid_task, timeout, tr_method)); 2663 ioc->tm_cmds.status = MPT3_CMD_PENDING; 2664 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2665 ioc->tm_cmds.smid = smid; 2666 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 2667 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); 2668 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 2669 mpi_request->DevHandle = cpu_to_le16(handle); 2670 mpi_request->TaskType = type; 2671 mpi_request->MsgFlags = tr_method; 2672 mpi_request->TaskMID = cpu_to_le16(smid_task); 2673 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); 2674 mpt3sas_scsih_set_tm_flag(ioc, handle); 2675 init_completion(&ioc->tm_cmds.done); 2676 mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task); 2677 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); 2678 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { 2679 if (mpt3sas_base_check_cmd_timeout(ioc, 2680 ioc->tm_cmds.status, mpi_request, 2681 sizeof(Mpi2SCSITaskManagementRequest_t)/4)) { 2682 rc = mpt3sas_base_hard_reset_handler(ioc, 2683 FORCE_BIG_HAMMER); 2684 rc = (!rc) ? SUCCESS : FAILED; 2685 goto out; 2686 } 2687 } 2688 2689 /* sync IRQs in case those were busy during flush. */ 2690 mpt3sas_base_sync_reply_irqs(ioc); 2691 2692 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { 2693 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 2694 mpi_reply = ioc->tm_cmds.reply; 2695 dtmprintk(ioc, 2696 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", 2697 le16_to_cpu(mpi_reply->IOCStatus), 2698 le32_to_cpu(mpi_reply->IOCLogInfo), 2699 le32_to_cpu(mpi_reply->TerminationCount))); 2700 if (ioc->logging_level & MPT_DEBUG_TM) { 2701 _scsih_response_code(ioc, mpi_reply->ResponseCode); 2702 if (mpi_reply->IOCStatus) 2703 _debug_dump_mf(mpi_request, 2704 sizeof(Mpi2SCSITaskManagementRequest_t)/4); 2705 } 2706 } 2707 rc = SUCCESS; 2708 2709 out: 2710 mpt3sas_scsih_clear_tm_flag(ioc, handle); 2711 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 2712 return rc; 2713 } 2714 2715 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 2716 u64 lun, u8 type, u16 smid_task, u16 msix_task, 2717 u8 timeout, u8 tr_method) 2718 { 2719 int ret; 2720 2721 mutex_lock(&ioc->tm_cmds.mutex); 2722 ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task, 2723 msix_task, timeout, tr_method); 2724 mutex_unlock(&ioc->tm_cmds.mutex); 2725 2726 return ret; 2727 } 2728 2729 /** 2730 * _scsih_tm_display_info - displays info about the device 2731 * @ioc: per adapter struct 2732 * @scmd: pointer to scsi command object 2733 * 2734 * Called by task management callback handlers. 2735 */ 2736 static void 2737 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) 2738 { 2739 struct scsi_target *starget = scmd->device->sdev_target; 2740 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 2741 struct _sas_device *sas_device = NULL; 2742 struct _pcie_device *pcie_device = NULL; 2743 unsigned long flags; 2744 char *device_str = NULL; 2745 2746 if (!priv_target) 2747 return; 2748 if (ioc->hide_ir_msg) 2749 device_str = "WarpDrive"; 2750 else 2751 device_str = "volume"; 2752 2753 scsi_print_command(scmd); 2754 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 2755 starget_printk(KERN_INFO, starget, 2756 "%s handle(0x%04x), %s wwid(0x%016llx)\n", 2757 device_str, priv_target->handle, 2758 device_str, (unsigned long long)priv_target->sas_address); 2759 2760 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 2761 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 2762 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target); 2763 if (pcie_device) { 2764 starget_printk(KERN_INFO, starget, 2765 "handle(0x%04x), wwid(0x%016llx), port(%d)\n", 2766 pcie_device->handle, 2767 (unsigned long long)pcie_device->wwid, 2768 pcie_device->port_num); 2769 if (pcie_device->enclosure_handle != 0) 2770 starget_printk(KERN_INFO, starget, 2771 "enclosure logical id(0x%016llx), slot(%d)\n", 2772 (unsigned long long) 2773 pcie_device->enclosure_logical_id, 2774 pcie_device->slot); 2775 if (pcie_device->connector_name[0] != '\0') 2776 starget_printk(KERN_INFO, starget, 2777 "enclosure level(0x%04x), connector name( %s)\n", 2778 pcie_device->enclosure_level, 2779 pcie_device->connector_name); 2780 pcie_device_put(pcie_device); 2781 } 2782 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 2783 2784 } else { 2785 spin_lock_irqsave(&ioc->sas_device_lock, flags); 2786 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target); 2787 if (sas_device) { 2788 if (priv_target->flags & 2789 MPT_TARGET_FLAGS_RAID_COMPONENT) { 2790 starget_printk(KERN_INFO, starget, 2791 "volume handle(0x%04x), " 2792 "volume wwid(0x%016llx)\n", 2793 sas_device->volume_handle, 2794 (unsigned long long)sas_device->volume_wwid); 2795 } 2796 starget_printk(KERN_INFO, starget, 2797 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", 2798 sas_device->handle, 2799 (unsigned long long)sas_device->sas_address, 2800 sas_device->phy); 2801 2802 _scsih_display_enclosure_chassis_info(NULL, sas_device, 2803 NULL, starget); 2804 2805 sas_device_put(sas_device); 2806 } 2807 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 2808 } 2809 } 2810 2811 /** 2812 * scsih_abort - eh threads main abort routine 2813 * @scmd: pointer to scsi command object 2814 * 2815 * Return: SUCCESS if command aborted else FAILED 2816 */ 2817 static int 2818 scsih_abort(struct scsi_cmnd *scmd) 2819 { 2820 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2821 struct MPT3SAS_DEVICE *sas_device_priv_data; 2822 struct scsiio_tracker *st = scsi_cmd_priv(scmd); 2823 u16 handle; 2824 int r; 2825 2826 u8 timeout = 30; 2827 struct _pcie_device *pcie_device = NULL; 2828 sdev_printk(KERN_INFO, scmd->device, 2829 "attempting task abort! scmd(%p)\n", scmd); 2830 _scsih_tm_display_info(ioc, scmd); 2831 2832 sas_device_priv_data = scmd->device->hostdata; 2833 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 2834 ioc->remove_host) { 2835 sdev_printk(KERN_INFO, scmd->device, 2836 "device been deleted! scmd(%p)\n", scmd); 2837 scmd->result = DID_NO_CONNECT << 16; 2838 scmd->scsi_done(scmd); 2839 r = SUCCESS; 2840 goto out; 2841 } 2842 2843 /* check for completed command */ 2844 if (st == NULL || st->cb_idx == 0xFF) { 2845 scmd->result = DID_RESET << 16; 2846 r = SUCCESS; 2847 goto out; 2848 } 2849 2850 /* for hidden raid components and volumes this is not supported */ 2851 if (sas_device_priv_data->sas_target->flags & 2852 MPT_TARGET_FLAGS_RAID_COMPONENT || 2853 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { 2854 scmd->result = DID_RESET << 16; 2855 r = FAILED; 2856 goto out; 2857 } 2858 2859 mpt3sas_halt_firmware(ioc); 2860 2861 handle = sas_device_priv_data->sas_target->handle; 2862 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 2863 if (pcie_device && (!ioc->tm_custom_handling)) 2864 timeout = ioc->nvme_abort_timeout; 2865 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun, 2866 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 2867 st->smid, st->msix_io, timeout, 0); 2868 /* Command must be cleared after abort */ 2869 if (r == SUCCESS && st->cb_idx != 0xFF) 2870 r = FAILED; 2871 out: 2872 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", 2873 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2874 if (pcie_device) 2875 pcie_device_put(pcie_device); 2876 return r; 2877 } 2878 2879 /** 2880 * scsih_dev_reset - eh threads main device reset routine 2881 * @scmd: pointer to scsi command object 2882 * 2883 * Return: SUCCESS if command aborted else FAILED 2884 */ 2885 static int 2886 scsih_dev_reset(struct scsi_cmnd *scmd) 2887 { 2888 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2889 struct MPT3SAS_DEVICE *sas_device_priv_data; 2890 struct _sas_device *sas_device = NULL; 2891 struct _pcie_device *pcie_device = NULL; 2892 u16 handle; 2893 u8 tr_method = 0; 2894 u8 tr_timeout = 30; 2895 int r; 2896 2897 struct scsi_target *starget = scmd->device->sdev_target; 2898 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 2899 2900 sdev_printk(KERN_INFO, scmd->device, 2901 "attempting device reset! scmd(%p)\n", scmd); 2902 _scsih_tm_display_info(ioc, scmd); 2903 2904 sas_device_priv_data = scmd->device->hostdata; 2905 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 2906 ioc->remove_host) { 2907 sdev_printk(KERN_INFO, scmd->device, 2908 "device been deleted! scmd(%p)\n", scmd); 2909 scmd->result = DID_NO_CONNECT << 16; 2910 scmd->scsi_done(scmd); 2911 r = SUCCESS; 2912 goto out; 2913 } 2914 2915 /* for hidden raid components obtain the volume_handle */ 2916 handle = 0; 2917 if (sas_device_priv_data->sas_target->flags & 2918 MPT_TARGET_FLAGS_RAID_COMPONENT) { 2919 sas_device = mpt3sas_get_sdev_from_target(ioc, 2920 target_priv_data); 2921 if (sas_device) 2922 handle = sas_device->volume_handle; 2923 } else 2924 handle = sas_device_priv_data->sas_target->handle; 2925 2926 if (!handle) { 2927 scmd->result = DID_RESET << 16; 2928 r = FAILED; 2929 goto out; 2930 } 2931 2932 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 2933 2934 if (pcie_device && (!ioc->tm_custom_handling)) { 2935 tr_timeout = pcie_device->reset_timeout; 2936 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 2937 } else 2938 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 2939 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun, 2940 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 2941 tr_timeout, tr_method); 2942 /* Check for busy commands after reset */ 2943 if (r == SUCCESS && atomic_read(&scmd->device->device_busy)) 2944 r = FAILED; 2945 out: 2946 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", 2947 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2948 2949 if (sas_device) 2950 sas_device_put(sas_device); 2951 if (pcie_device) 2952 pcie_device_put(pcie_device); 2953 2954 return r; 2955 } 2956 2957 /** 2958 * scsih_target_reset - eh threads main target reset routine 2959 * @scmd: pointer to scsi command object 2960 * 2961 * Return: SUCCESS if command aborted else FAILED 2962 */ 2963 static int 2964 scsih_target_reset(struct scsi_cmnd *scmd) 2965 { 2966 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2967 struct MPT3SAS_DEVICE *sas_device_priv_data; 2968 struct _sas_device *sas_device = NULL; 2969 struct _pcie_device *pcie_device = NULL; 2970 u16 handle; 2971 u8 tr_method = 0; 2972 u8 tr_timeout = 30; 2973 int r; 2974 struct scsi_target *starget = scmd->device->sdev_target; 2975 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; 2976 2977 starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n", 2978 scmd); 2979 _scsih_tm_display_info(ioc, scmd); 2980 2981 sas_device_priv_data = scmd->device->hostdata; 2982 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 2983 ioc->remove_host) { 2984 starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", 2985 scmd); 2986 scmd->result = DID_NO_CONNECT << 16; 2987 scmd->scsi_done(scmd); 2988 r = SUCCESS; 2989 goto out; 2990 } 2991 2992 /* for hidden raid components obtain the volume_handle */ 2993 handle = 0; 2994 if (sas_device_priv_data->sas_target->flags & 2995 MPT_TARGET_FLAGS_RAID_COMPONENT) { 2996 sas_device = mpt3sas_get_sdev_from_target(ioc, 2997 target_priv_data); 2998 if (sas_device) 2999 handle = sas_device->volume_handle; 3000 } else 3001 handle = sas_device_priv_data->sas_target->handle; 3002 3003 if (!handle) { 3004 scmd->result = DID_RESET << 16; 3005 r = FAILED; 3006 goto out; 3007 } 3008 3009 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); 3010 3011 if (pcie_device && (!ioc->tm_custom_handling)) { 3012 tr_timeout = pcie_device->reset_timeout; 3013 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3014 } else 3015 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3016 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0, 3017 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 3018 tr_timeout, tr_method); 3019 /* Check for busy commands after reset */ 3020 if (r == SUCCESS && atomic_read(&starget->target_busy)) 3021 r = FAILED; 3022 out: 3023 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", 3024 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3025 3026 if (sas_device) 3027 sas_device_put(sas_device); 3028 if (pcie_device) 3029 pcie_device_put(pcie_device); 3030 return r; 3031 } 3032 3033 3034 /** 3035 * scsih_host_reset - eh threads main host reset routine 3036 * @scmd: pointer to scsi command object 3037 * 3038 * Return: SUCCESS if command aborted else FAILED 3039 */ 3040 static int 3041 scsih_host_reset(struct scsi_cmnd *scmd) 3042 { 3043 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 3044 int r, retval; 3045 3046 ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd); 3047 scsi_print_command(scmd); 3048 3049 if (ioc->is_driver_loading || ioc->remove_host) { 3050 ioc_info(ioc, "Blocking the host reset\n"); 3051 r = FAILED; 3052 goto out; 3053 } 3054 3055 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3056 r = (retval < 0) ? FAILED : SUCCESS; 3057 out: 3058 ioc_info(ioc, "host reset: %s scmd(%p)\n", 3059 r == SUCCESS ? "SUCCESS" : "FAILED", scmd); 3060 3061 return r; 3062 } 3063 3064 /** 3065 * _scsih_fw_event_add - insert and queue up fw_event 3066 * @ioc: per adapter object 3067 * @fw_event: object describing the event 3068 * Context: This function will acquire ioc->fw_event_lock. 3069 * 3070 * This adds the firmware event object into link list, then queues it up to 3071 * be processed from user context. 3072 */ 3073 static void 3074 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) 3075 { 3076 unsigned long flags; 3077 3078 if (ioc->firmware_event_thread == NULL) 3079 return; 3080 3081 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3082 fw_event_work_get(fw_event); 3083 INIT_LIST_HEAD(&fw_event->list); 3084 list_add_tail(&fw_event->list, &ioc->fw_event_list); 3085 INIT_WORK(&fw_event->work, _firmware_event_work); 3086 fw_event_work_get(fw_event); 3087 queue_work(ioc->firmware_event_thread, &fw_event->work); 3088 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3089 } 3090 3091 /** 3092 * _scsih_fw_event_del_from_list - delete fw_event from the list 3093 * @ioc: per adapter object 3094 * @fw_event: object describing the event 3095 * Context: This function will acquire ioc->fw_event_lock. 3096 * 3097 * If the fw_event is on the fw_event_list, remove it and do a put. 3098 */ 3099 static void 3100 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work 3101 *fw_event) 3102 { 3103 unsigned long flags; 3104 3105 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3106 if (!list_empty(&fw_event->list)) { 3107 list_del_init(&fw_event->list); 3108 fw_event_work_put(fw_event); 3109 } 3110 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3111 } 3112 3113 3114 /** 3115 * mpt3sas_send_trigger_data_event - send event for processing trigger data 3116 * @ioc: per adapter object 3117 * @event_data: trigger event data 3118 */ 3119 void 3120 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, 3121 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) 3122 { 3123 struct fw_event_work *fw_event; 3124 u16 sz; 3125 3126 if (ioc->is_driver_loading) 3127 return; 3128 sz = sizeof(*event_data); 3129 fw_event = alloc_fw_event_work(sz); 3130 if (!fw_event) 3131 return; 3132 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; 3133 fw_event->ioc = ioc; 3134 memcpy(fw_event->event_data, event_data, sizeof(*event_data)); 3135 _scsih_fw_event_add(ioc, fw_event); 3136 fw_event_work_put(fw_event); 3137 } 3138 3139 /** 3140 * _scsih_error_recovery_delete_devices - remove devices not responding 3141 * @ioc: per adapter object 3142 */ 3143 static void 3144 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) 3145 { 3146 struct fw_event_work *fw_event; 3147 3148 if (ioc->is_driver_loading) 3149 return; 3150 fw_event = alloc_fw_event_work(0); 3151 if (!fw_event) 3152 return; 3153 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; 3154 fw_event->ioc = ioc; 3155 _scsih_fw_event_add(ioc, fw_event); 3156 fw_event_work_put(fw_event); 3157 } 3158 3159 /** 3160 * mpt3sas_port_enable_complete - port enable completed (fake event) 3161 * @ioc: per adapter object 3162 */ 3163 void 3164 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) 3165 { 3166 struct fw_event_work *fw_event; 3167 3168 fw_event = alloc_fw_event_work(0); 3169 if (!fw_event) 3170 return; 3171 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; 3172 fw_event->ioc = ioc; 3173 _scsih_fw_event_add(ioc, fw_event); 3174 fw_event_work_put(fw_event); 3175 } 3176 3177 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) 3178 { 3179 unsigned long flags; 3180 struct fw_event_work *fw_event = NULL; 3181 3182 spin_lock_irqsave(&ioc->fw_event_lock, flags); 3183 if (!list_empty(&ioc->fw_event_list)) { 3184 fw_event = list_first_entry(&ioc->fw_event_list, 3185 struct fw_event_work, list); 3186 list_del_init(&fw_event->list); 3187 } 3188 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 3189 3190 return fw_event; 3191 } 3192 3193 /** 3194 * _scsih_fw_event_cleanup_queue - cleanup event queue 3195 * @ioc: per adapter object 3196 * 3197 * Walk the firmware event queue, either killing timers, or waiting 3198 * for outstanding events to complete 3199 */ 3200 static void 3201 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) 3202 { 3203 struct fw_event_work *fw_event; 3204 3205 if (list_empty(&ioc->fw_event_list) || 3206 !ioc->firmware_event_thread || in_interrupt()) 3207 return; 3208 3209 while ((fw_event = dequeue_next_fw_event(ioc))) { 3210 /* 3211 * Wait on the fw_event to complete. If this returns 1, then 3212 * the event was never executed, and we need a put for the 3213 * reference the work had on the fw_event. 3214 * 3215 * If it did execute, we wait for it to finish, and the put will 3216 * happen from _firmware_event_work() 3217 */ 3218 if (cancel_work_sync(&fw_event->work)) 3219 fw_event_work_put(fw_event); 3220 3221 fw_event_work_put(fw_event); 3222 } 3223 } 3224 3225 /** 3226 * _scsih_internal_device_block - block the sdev device 3227 * @sdev: per device object 3228 * @sas_device_priv_data : per device driver private data 3229 * 3230 * make sure device is blocked without error, if not 3231 * print an error 3232 */ 3233 static void 3234 _scsih_internal_device_block(struct scsi_device *sdev, 3235 struct MPT3SAS_DEVICE *sas_device_priv_data) 3236 { 3237 int r = 0; 3238 3239 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", 3240 sas_device_priv_data->sas_target->handle); 3241 sas_device_priv_data->block = 1; 3242 3243 r = scsi_internal_device_block_nowait(sdev); 3244 if (r == -EINVAL) 3245 sdev_printk(KERN_WARNING, sdev, 3246 "device_block failed with return(%d) for handle(0x%04x)\n", 3247 r, sas_device_priv_data->sas_target->handle); 3248 } 3249 3250 /** 3251 * _scsih_internal_device_unblock - unblock the sdev device 3252 * @sdev: per device object 3253 * @sas_device_priv_data : per device driver private data 3254 * make sure device is unblocked without error, if not retry 3255 * by blocking and then unblocking 3256 */ 3257 3258 static void 3259 _scsih_internal_device_unblock(struct scsi_device *sdev, 3260 struct MPT3SAS_DEVICE *sas_device_priv_data) 3261 { 3262 int r = 0; 3263 3264 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " 3265 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); 3266 sas_device_priv_data->block = 0; 3267 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 3268 if (r == -EINVAL) { 3269 /* The device has been set to SDEV_RUNNING by SD layer during 3270 * device addition but the request queue is still stopped by 3271 * our earlier block call. We need to perform a block again 3272 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */ 3273 3274 sdev_printk(KERN_WARNING, sdev, 3275 "device_unblock failed with return(%d) for handle(0x%04x) " 3276 "performing a block followed by an unblock\n", 3277 r, sas_device_priv_data->sas_target->handle); 3278 sas_device_priv_data->block = 1; 3279 r = scsi_internal_device_block_nowait(sdev); 3280 if (r) 3281 sdev_printk(KERN_WARNING, sdev, "retried device_block " 3282 "failed with return(%d) for handle(0x%04x)\n", 3283 r, sas_device_priv_data->sas_target->handle); 3284 3285 sas_device_priv_data->block = 0; 3286 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); 3287 if (r) 3288 sdev_printk(KERN_WARNING, sdev, "retried device_unblock" 3289 " failed with return(%d) for handle(0x%04x)\n", 3290 r, sas_device_priv_data->sas_target->handle); 3291 } 3292 } 3293 3294 /** 3295 * _scsih_ublock_io_all_device - unblock every device 3296 * @ioc: per adapter object 3297 * 3298 * change the device state from block to running 3299 */ 3300 static void 3301 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3302 { 3303 struct MPT3SAS_DEVICE *sas_device_priv_data; 3304 struct scsi_device *sdev; 3305 3306 shost_for_each_device(sdev, ioc->shost) { 3307 sas_device_priv_data = sdev->hostdata; 3308 if (!sas_device_priv_data) 3309 continue; 3310 if (!sas_device_priv_data->block) 3311 continue; 3312 3313 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, 3314 "device_running, handle(0x%04x)\n", 3315 sas_device_priv_data->sas_target->handle)); 3316 _scsih_internal_device_unblock(sdev, sas_device_priv_data); 3317 } 3318 } 3319 3320 3321 /** 3322 * _scsih_ublock_io_device - prepare device to be deleted 3323 * @ioc: per adapter object 3324 * @sas_address: sas address 3325 * 3326 * unblock then put device in offline state 3327 */ 3328 static void 3329 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) 3330 { 3331 struct MPT3SAS_DEVICE *sas_device_priv_data; 3332 struct scsi_device *sdev; 3333 3334 shost_for_each_device(sdev, ioc->shost) { 3335 sas_device_priv_data = sdev->hostdata; 3336 if (!sas_device_priv_data) 3337 continue; 3338 if (sas_device_priv_data->sas_target->sas_address 3339 != sas_address) 3340 continue; 3341 if (sas_device_priv_data->block) 3342 _scsih_internal_device_unblock(sdev, 3343 sas_device_priv_data); 3344 } 3345 } 3346 3347 /** 3348 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK 3349 * @ioc: per adapter object 3350 * 3351 * During device pull we need to appropriately set the sdev state. 3352 */ 3353 static void 3354 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) 3355 { 3356 struct MPT3SAS_DEVICE *sas_device_priv_data; 3357 struct scsi_device *sdev; 3358 3359 shost_for_each_device(sdev, ioc->shost) { 3360 sas_device_priv_data = sdev->hostdata; 3361 if (!sas_device_priv_data) 3362 continue; 3363 if (sas_device_priv_data->block) 3364 continue; 3365 if (sas_device_priv_data->ignore_delay_remove) { 3366 sdev_printk(KERN_INFO, sdev, 3367 "%s skip device_block for SES handle(0x%04x)\n", 3368 __func__, sas_device_priv_data->sas_target->handle); 3369 continue; 3370 } 3371 _scsih_internal_device_block(sdev, sas_device_priv_data); 3372 } 3373 } 3374 3375 /** 3376 * _scsih_block_io_device - set the device state to SDEV_BLOCK 3377 * @ioc: per adapter object 3378 * @handle: device handle 3379 * 3380 * During device pull we need to appropriately set the sdev state. 3381 */ 3382 static void 3383 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 3384 { 3385 struct MPT3SAS_DEVICE *sas_device_priv_data; 3386 struct scsi_device *sdev; 3387 struct _sas_device *sas_device; 3388 3389 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 3390 3391 shost_for_each_device(sdev, ioc->shost) { 3392 sas_device_priv_data = sdev->hostdata; 3393 if (!sas_device_priv_data) 3394 continue; 3395 if (sas_device_priv_data->sas_target->handle != handle) 3396 continue; 3397 if (sas_device_priv_data->block) 3398 continue; 3399 if (sas_device && sas_device->pend_sas_rphy_add) 3400 continue; 3401 if (sas_device_priv_data->ignore_delay_remove) { 3402 sdev_printk(KERN_INFO, sdev, 3403 "%s skip device_block for SES handle(0x%04x)\n", 3404 __func__, sas_device_priv_data->sas_target->handle); 3405 continue; 3406 } 3407 _scsih_internal_device_block(sdev, sas_device_priv_data); 3408 } 3409 3410 if (sas_device) 3411 sas_device_put(sas_device); 3412 } 3413 3414 /** 3415 * _scsih_block_io_to_children_attached_to_ex 3416 * @ioc: per adapter object 3417 * @sas_expander: the sas_device object 3418 * 3419 * This routine set sdev state to SDEV_BLOCK for all devices 3420 * attached to this expander. This function called when expander is 3421 * pulled. 3422 */ 3423 static void 3424 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, 3425 struct _sas_node *sas_expander) 3426 { 3427 struct _sas_port *mpt3sas_port; 3428 struct _sas_device *sas_device; 3429 struct _sas_node *expander_sibling; 3430 unsigned long flags; 3431 3432 if (!sas_expander) 3433 return; 3434 3435 list_for_each_entry(mpt3sas_port, 3436 &sas_expander->sas_port_list, port_list) { 3437 if (mpt3sas_port->remote_identify.device_type == 3438 SAS_END_DEVICE) { 3439 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3440 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 3441 mpt3sas_port->remote_identify.sas_address); 3442 if (sas_device) { 3443 set_bit(sas_device->handle, 3444 ioc->blocking_handles); 3445 sas_device_put(sas_device); 3446 } 3447 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3448 } 3449 } 3450 3451 list_for_each_entry(mpt3sas_port, 3452 &sas_expander->sas_port_list, port_list) { 3453 3454 if (mpt3sas_port->remote_identify.device_type == 3455 SAS_EDGE_EXPANDER_DEVICE || 3456 mpt3sas_port->remote_identify.device_type == 3457 SAS_FANOUT_EXPANDER_DEVICE) { 3458 expander_sibling = 3459 mpt3sas_scsih_expander_find_by_sas_address( 3460 ioc, mpt3sas_port->remote_identify.sas_address); 3461 _scsih_block_io_to_children_attached_to_ex(ioc, 3462 expander_sibling); 3463 } 3464 } 3465 } 3466 3467 /** 3468 * _scsih_block_io_to_children_attached_directly 3469 * @ioc: per adapter object 3470 * @event_data: topology change event data 3471 * 3472 * This routine set sdev state to SDEV_BLOCK for all devices 3473 * direct attached during device pull. 3474 */ 3475 static void 3476 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, 3477 Mpi2EventDataSasTopologyChangeList_t *event_data) 3478 { 3479 int i; 3480 u16 handle; 3481 u16 reason_code; 3482 3483 for (i = 0; i < event_data->NumEntries; i++) { 3484 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 3485 if (!handle) 3486 continue; 3487 reason_code = event_data->PHY[i].PhyStatus & 3488 MPI2_EVENT_SAS_TOPO_RC_MASK; 3489 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) 3490 _scsih_block_io_device(ioc, handle); 3491 } 3492 } 3493 3494 /** 3495 * _scsih_block_io_to_pcie_children_attached_directly 3496 * @ioc: per adapter object 3497 * @event_data: topology change event data 3498 * 3499 * This routine set sdev state to SDEV_BLOCK for all devices 3500 * direct attached during device pull/reconnect. 3501 */ 3502 static void 3503 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, 3504 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 3505 { 3506 int i; 3507 u16 handle; 3508 u16 reason_code; 3509 3510 for (i = 0; i < event_data->NumEntries; i++) { 3511 handle = 3512 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 3513 if (!handle) 3514 continue; 3515 reason_code = event_data->PortEntry[i].PortStatus; 3516 if (reason_code == 3517 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING) 3518 _scsih_block_io_device(ioc, handle); 3519 } 3520 } 3521 /** 3522 * _scsih_tm_tr_send - send task management request 3523 * @ioc: per adapter object 3524 * @handle: device handle 3525 * Context: interrupt time. 3526 * 3527 * This code is to initiate the device removal handshake protocol 3528 * with controller firmware. This function will issue target reset 3529 * using high priority request queue. It will send a sas iounit 3530 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. 3531 * 3532 * This is designed to send muliple task management request at the same 3533 * time to the fifo. If the fifo is full, we will append the request, 3534 * and process it in a future completion. 3535 */ 3536 static void 3537 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) 3538 { 3539 Mpi2SCSITaskManagementRequest_t *mpi_request; 3540 u16 smid; 3541 struct _sas_device *sas_device = NULL; 3542 struct _pcie_device *pcie_device = NULL; 3543 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 3544 u64 sas_address = 0; 3545 unsigned long flags; 3546 struct _tr_list *delayed_tr; 3547 u32 ioc_state; 3548 u8 tr_method = 0; 3549 3550 if (ioc->pci_error_recovery) { 3551 dewtprintk(ioc, 3552 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n", 3553 __func__, handle)); 3554 return; 3555 } 3556 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 3557 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 3558 dewtprintk(ioc, 3559 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n", 3560 __func__, handle)); 3561 return; 3562 } 3563 3564 /* if PD, then return */ 3565 if (test_bit(handle, ioc->pd_handles)) 3566 return; 3567 3568 clear_bit(handle, ioc->pend_os_device_add); 3569 3570 spin_lock_irqsave(&ioc->sas_device_lock, flags); 3571 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 3572 if (sas_device && sas_device->starget && 3573 sas_device->starget->hostdata) { 3574 sas_target_priv_data = sas_device->starget->hostdata; 3575 sas_target_priv_data->deleted = 1; 3576 sas_address = sas_device->sas_address; 3577 } 3578 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 3579 if (!sas_device) { 3580 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 3581 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 3582 if (pcie_device && pcie_device->starget && 3583 pcie_device->starget->hostdata) { 3584 sas_target_priv_data = pcie_device->starget->hostdata; 3585 sas_target_priv_data->deleted = 1; 3586 sas_address = pcie_device->wwid; 3587 } 3588 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 3589 if (pcie_device && (!ioc->tm_custom_handling)) 3590 tr_method = 3591 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 3592 else 3593 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3594 } 3595 if (sas_target_priv_data) { 3596 dewtprintk(ioc, 3597 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", 3598 handle, (u64)sas_address)); 3599 if (sas_device) { 3600 if (sas_device->enclosure_handle != 0) 3601 dewtprintk(ioc, 3602 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n", 3603 (u64)sas_device->enclosure_logical_id, 3604 sas_device->slot)); 3605 if (sas_device->connector_name[0] != '\0') 3606 dewtprintk(ioc, 3607 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n", 3608 sas_device->enclosure_level, 3609 sas_device->connector_name)); 3610 } else if (pcie_device) { 3611 if (pcie_device->enclosure_handle != 0) 3612 dewtprintk(ioc, 3613 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n", 3614 (u64)pcie_device->enclosure_logical_id, 3615 pcie_device->slot)); 3616 if (pcie_device->connector_name[0] != '\0') 3617 dewtprintk(ioc, 3618 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n", 3619 pcie_device->enclosure_level, 3620 pcie_device->connector_name)); 3621 } 3622 _scsih_ublock_io_device(ioc, sas_address); 3623 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 3624 } 3625 3626 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); 3627 if (!smid) { 3628 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 3629 if (!delayed_tr) 3630 goto out; 3631 INIT_LIST_HEAD(&delayed_tr->list); 3632 delayed_tr->handle = handle; 3633 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 3634 dewtprintk(ioc, 3635 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 3636 handle)); 3637 goto out; 3638 } 3639 3640 dewtprintk(ioc, 3641 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 3642 handle, smid, ioc->tm_tr_cb_idx)); 3643 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3644 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 3645 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3646 mpi_request->DevHandle = cpu_to_le16(handle); 3647 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 3648 mpi_request->MsgFlags = tr_method; 3649 set_bit(handle, ioc->device_remove_in_progress); 3650 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); 3651 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); 3652 3653 out: 3654 if (sas_device) 3655 sas_device_put(sas_device); 3656 if (pcie_device) 3657 pcie_device_put(pcie_device); 3658 } 3659 3660 /** 3661 * _scsih_tm_tr_complete - 3662 * @ioc: per adapter object 3663 * @smid: system request message index 3664 * @msix_index: MSIX table index supplied by the OS 3665 * @reply: reply message frame(lower 32bit addr) 3666 * Context: interrupt time. 3667 * 3668 * This is the target reset completion routine. 3669 * This code is part of the code to initiate the device removal 3670 * handshake protocol with controller firmware. 3671 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) 3672 * 3673 * Return: 1 meaning mf should be freed from _base_interrupt 3674 * 0 means the mf is freed from this function. 3675 */ 3676 static u8 3677 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 3678 u32 reply) 3679 { 3680 u16 handle; 3681 Mpi2SCSITaskManagementRequest_t *mpi_request_tm; 3682 Mpi2SCSITaskManagementReply_t *mpi_reply = 3683 mpt3sas_base_get_reply_virt_addr(ioc, reply); 3684 Mpi2SasIoUnitControlRequest_t *mpi_request; 3685 u16 smid_sas_ctrl; 3686 u32 ioc_state; 3687 struct _sc_list *delayed_sc; 3688 3689 if (ioc->pci_error_recovery) { 3690 dewtprintk(ioc, 3691 ioc_info(ioc, "%s: host in pci error recovery\n", 3692 __func__)); 3693 return 1; 3694 } 3695 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 3696 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 3697 dewtprintk(ioc, 3698 ioc_info(ioc, "%s: host is not operational\n", 3699 __func__)); 3700 return 1; 3701 } 3702 if (unlikely(!mpi_reply)) { 3703 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 3704 __FILE__, __LINE__, __func__); 3705 return 1; 3706 } 3707 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 3708 handle = le16_to_cpu(mpi_request_tm->DevHandle); 3709 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 3710 dewtprintk(ioc, 3711 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 3712 handle, 3713 le16_to_cpu(mpi_reply->DevHandle), smid)); 3714 return 0; 3715 } 3716 3717 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 3718 dewtprintk(ioc, 3719 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", 3720 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 3721 le32_to_cpu(mpi_reply->IOCLogInfo), 3722 le32_to_cpu(mpi_reply->TerminationCount))); 3723 3724 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); 3725 if (!smid_sas_ctrl) { 3726 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); 3727 if (!delayed_sc) 3728 return _scsih_check_for_pending_tm(ioc, smid); 3729 INIT_LIST_HEAD(&delayed_sc->list); 3730 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); 3731 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); 3732 dewtprintk(ioc, 3733 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n", 3734 handle)); 3735 return _scsih_check_for_pending_tm(ioc, smid); 3736 } 3737 3738 dewtprintk(ioc, 3739 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 3740 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); 3741 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); 3742 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 3743 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 3744 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 3745 mpi_request->DevHandle = mpi_request_tm->DevHandle; 3746 mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl); 3747 3748 return _scsih_check_for_pending_tm(ioc, smid); 3749 } 3750 3751 3752 /** 3753 * _scsih_sas_control_complete - completion routine 3754 * @ioc: per adapter object 3755 * @smid: system request message index 3756 * @msix_index: MSIX table index supplied by the OS 3757 * @reply: reply message frame(lower 32bit addr) 3758 * Context: interrupt time. 3759 * 3760 * This is the sas iounit control completion routine. 3761 * This code is part of the code to initiate the device removal 3762 * handshake protocol with controller firmware. 3763 * 3764 * Return: 1 meaning mf should be freed from _base_interrupt 3765 * 0 means the mf is freed from this function. 3766 */ 3767 static u8 3768 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3769 u8 msix_index, u32 reply) 3770 { 3771 Mpi2SasIoUnitControlReply_t *mpi_reply = 3772 mpt3sas_base_get_reply_virt_addr(ioc, reply); 3773 3774 if (likely(mpi_reply)) { 3775 dewtprintk(ioc, 3776 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", 3777 le16_to_cpu(mpi_reply->DevHandle), smid, 3778 le16_to_cpu(mpi_reply->IOCStatus), 3779 le32_to_cpu(mpi_reply->IOCLogInfo))); 3780 if (le16_to_cpu(mpi_reply->IOCStatus) == 3781 MPI2_IOCSTATUS_SUCCESS) { 3782 clear_bit(le16_to_cpu(mpi_reply->DevHandle), 3783 ioc->device_remove_in_progress); 3784 } 3785 } else { 3786 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 3787 __FILE__, __LINE__, __func__); 3788 } 3789 return mpt3sas_check_for_pending_internal_cmds(ioc, smid); 3790 } 3791 3792 /** 3793 * _scsih_tm_tr_volume_send - send target reset request for volumes 3794 * @ioc: per adapter object 3795 * @handle: device handle 3796 * Context: interrupt time. 3797 * 3798 * This is designed to send muliple task management request at the same 3799 * time to the fifo. If the fifo is full, we will append the request, 3800 * and process it in a future completion. 3801 */ 3802 static void 3803 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) 3804 { 3805 Mpi2SCSITaskManagementRequest_t *mpi_request; 3806 u16 smid; 3807 struct _tr_list *delayed_tr; 3808 3809 if (ioc->pci_error_recovery) { 3810 dewtprintk(ioc, 3811 ioc_info(ioc, "%s: host reset in progress!\n", 3812 __func__)); 3813 return; 3814 } 3815 3816 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); 3817 if (!smid) { 3818 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 3819 if (!delayed_tr) 3820 return; 3821 INIT_LIST_HEAD(&delayed_tr->list); 3822 delayed_tr->handle = handle; 3823 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); 3824 dewtprintk(ioc, 3825 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 3826 handle)); 3827 return; 3828 } 3829 3830 dewtprintk(ioc, 3831 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 3832 handle, smid, ioc->tm_tr_volume_cb_idx)); 3833 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3834 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); 3835 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3836 mpi_request->DevHandle = cpu_to_le16(handle); 3837 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 3838 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); 3839 } 3840 3841 /** 3842 * _scsih_tm_volume_tr_complete - target reset completion 3843 * @ioc: per adapter object 3844 * @smid: system request message index 3845 * @msix_index: MSIX table index supplied by the OS 3846 * @reply: reply message frame(lower 32bit addr) 3847 * Context: interrupt time. 3848 * 3849 * Return: 1 meaning mf should be freed from _base_interrupt 3850 * 0 means the mf is freed from this function. 3851 */ 3852 static u8 3853 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, 3854 u8 msix_index, u32 reply) 3855 { 3856 u16 handle; 3857 Mpi2SCSITaskManagementRequest_t *mpi_request_tm; 3858 Mpi2SCSITaskManagementReply_t *mpi_reply = 3859 mpt3sas_base_get_reply_virt_addr(ioc, reply); 3860 3861 if (ioc->shost_recovery || ioc->pci_error_recovery) { 3862 dewtprintk(ioc, 3863 ioc_info(ioc, "%s: host reset in progress!\n", 3864 __func__)); 3865 return 1; 3866 } 3867 if (unlikely(!mpi_reply)) { 3868 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 3869 __FILE__, __LINE__, __func__); 3870 return 1; 3871 } 3872 3873 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); 3874 handle = le16_to_cpu(mpi_request_tm->DevHandle); 3875 if (handle != le16_to_cpu(mpi_reply->DevHandle)) { 3876 dewtprintk(ioc, 3877 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", 3878 handle, le16_to_cpu(mpi_reply->DevHandle), 3879 smid)); 3880 return 0; 3881 } 3882 3883 dewtprintk(ioc, 3884 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", 3885 handle, smid, le16_to_cpu(mpi_reply->IOCStatus), 3886 le32_to_cpu(mpi_reply->IOCLogInfo), 3887 le32_to_cpu(mpi_reply->TerminationCount))); 3888 3889 return _scsih_check_for_pending_tm(ioc, smid); 3890 } 3891 3892 /** 3893 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages 3894 * @ioc: per adapter object 3895 * @smid: system request message index 3896 * @event: Event ID 3897 * @event_context: used to track events uniquely 3898 * 3899 * Context - processed in interrupt context. 3900 */ 3901 static void 3902 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, 3903 U32 event_context) 3904 { 3905 Mpi2EventAckRequest_t *ack_request; 3906 int i = smid - ioc->internal_smid; 3907 unsigned long flags; 3908 3909 /* Without releasing the smid just update the 3910 * call back index and reuse the same smid for 3911 * processing this delayed request 3912 */ 3913 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 3914 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; 3915 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3916 3917 dewtprintk(ioc, 3918 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", 3919 le16_to_cpu(event), smid, ioc->base_cb_idx)); 3920 ack_request = mpt3sas_base_get_msg_frame(ioc, smid); 3921 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 3922 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 3923 ack_request->Event = event; 3924 ack_request->EventContext = event_context; 3925 ack_request->VF_ID = 0; /* TODO */ 3926 ack_request->VP_ID = 0; 3927 mpt3sas_base_put_smid_default(ioc, smid); 3928 } 3929 3930 /** 3931 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed 3932 * sas_io_unit_ctrl messages 3933 * @ioc: per adapter object 3934 * @smid: system request message index 3935 * @handle: device handle 3936 * 3937 * Context - processed in interrupt context. 3938 */ 3939 static void 3940 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, 3941 u16 smid, u16 handle) 3942 { 3943 Mpi2SasIoUnitControlRequest_t *mpi_request; 3944 u32 ioc_state; 3945 int i = smid - ioc->internal_smid; 3946 unsigned long flags; 3947 3948 if (ioc->remove_host) { 3949 dewtprintk(ioc, 3950 ioc_info(ioc, "%s: host has been removed\n", 3951 __func__)); 3952 return; 3953 } else if (ioc->pci_error_recovery) { 3954 dewtprintk(ioc, 3955 ioc_info(ioc, "%s: host in pci error recovery\n", 3956 __func__)); 3957 return; 3958 } 3959 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 3960 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 3961 dewtprintk(ioc, 3962 ioc_info(ioc, "%s: host is not operational\n", 3963 __func__)); 3964 return; 3965 } 3966 3967 /* Without releasing the smid just update the 3968 * call back index and reuse the same smid for 3969 * processing this delayed request 3970 */ 3971 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 3972 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; 3973 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 3974 3975 dewtprintk(ioc, 3976 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", 3977 handle, smid, ioc->tm_sas_control_cb_idx)); 3978 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3979 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); 3980 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 3981 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 3982 mpi_request->DevHandle = cpu_to_le16(handle); 3983 mpt3sas_base_put_smid_default(ioc, smid); 3984 } 3985 3986 /** 3987 * _scsih_check_for_pending_internal_cmds - check for pending internal messages 3988 * @ioc: per adapter object 3989 * @smid: system request message index 3990 * 3991 * Context: Executed in interrupt context 3992 * 3993 * This will check delayed internal messages list, and process the 3994 * next request. 3995 * 3996 * Return: 1 meaning mf should be freed from _base_interrupt 3997 * 0 means the mf is freed from this function. 3998 */ 3999 u8 4000 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4001 { 4002 struct _sc_list *delayed_sc; 4003 struct _event_ack_list *delayed_event_ack; 4004 4005 if (!list_empty(&ioc->delayed_event_ack_list)) { 4006 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, 4007 struct _event_ack_list, list); 4008 _scsih_issue_delayed_event_ack(ioc, smid, 4009 delayed_event_ack->Event, delayed_event_ack->EventContext); 4010 list_del(&delayed_event_ack->list); 4011 kfree(delayed_event_ack); 4012 return 0; 4013 } 4014 4015 if (!list_empty(&ioc->delayed_sc_list)) { 4016 delayed_sc = list_entry(ioc->delayed_sc_list.next, 4017 struct _sc_list, list); 4018 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid, 4019 delayed_sc->handle); 4020 list_del(&delayed_sc->list); 4021 kfree(delayed_sc); 4022 return 0; 4023 } 4024 return 1; 4025 } 4026 4027 /** 4028 * _scsih_check_for_pending_tm - check for pending task management 4029 * @ioc: per adapter object 4030 * @smid: system request message index 4031 * 4032 * This will check delayed target reset list, and feed the 4033 * next reqeust. 4034 * 4035 * Return: 1 meaning mf should be freed from _base_interrupt 4036 * 0 means the mf is freed from this function. 4037 */ 4038 static u8 4039 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) 4040 { 4041 struct _tr_list *delayed_tr; 4042 4043 if (!list_empty(&ioc->delayed_tr_volume_list)) { 4044 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, 4045 struct _tr_list, list); 4046 mpt3sas_base_free_smid(ioc, smid); 4047 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle); 4048 list_del(&delayed_tr->list); 4049 kfree(delayed_tr); 4050 return 0; 4051 } 4052 4053 if (!list_empty(&ioc->delayed_tr_list)) { 4054 delayed_tr = list_entry(ioc->delayed_tr_list.next, 4055 struct _tr_list, list); 4056 mpt3sas_base_free_smid(ioc, smid); 4057 _scsih_tm_tr_send(ioc, delayed_tr->handle); 4058 list_del(&delayed_tr->list); 4059 kfree(delayed_tr); 4060 return 0; 4061 } 4062 4063 return 1; 4064 } 4065 4066 /** 4067 * _scsih_check_topo_delete_events - sanity check on topo events 4068 * @ioc: per adapter object 4069 * @event_data: the event data payload 4070 * 4071 * This routine added to better handle cable breaker. 4072 * 4073 * This handles the case where driver receives multiple expander 4074 * add and delete events in a single shot. When there is a delete event 4075 * the routine will void any pending add events waiting in the event queue. 4076 */ 4077 static void 4078 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, 4079 Mpi2EventDataSasTopologyChangeList_t *event_data) 4080 { 4081 struct fw_event_work *fw_event; 4082 Mpi2EventDataSasTopologyChangeList_t *local_event_data; 4083 u16 expander_handle; 4084 struct _sas_node *sas_expander; 4085 unsigned long flags; 4086 int i, reason_code; 4087 u16 handle; 4088 4089 for (i = 0 ; i < event_data->NumEntries; i++) { 4090 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 4091 if (!handle) 4092 continue; 4093 reason_code = event_data->PHY[i].PhyStatus & 4094 MPI2_EVENT_SAS_TOPO_RC_MASK; 4095 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) 4096 _scsih_tm_tr_send(ioc, handle); 4097 } 4098 4099 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); 4100 if (expander_handle < ioc->sas_hba.num_phys) { 4101 _scsih_block_io_to_children_attached_directly(ioc, event_data); 4102 return; 4103 } 4104 if (event_data->ExpStatus == 4105 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { 4106 /* put expander attached devices into blocking state */ 4107 spin_lock_irqsave(&ioc->sas_node_lock, flags); 4108 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, 4109 expander_handle); 4110 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); 4111 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 4112 do { 4113 handle = find_first_bit(ioc->blocking_handles, 4114 ioc->facts.MaxDevHandle); 4115 if (handle < ioc->facts.MaxDevHandle) 4116 _scsih_block_io_device(ioc, handle); 4117 } while (test_and_clear_bit(handle, ioc->blocking_handles)); 4118 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) 4119 _scsih_block_io_to_children_attached_directly(ioc, event_data); 4120 4121 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4122 return; 4123 4124 /* mark ignore flag for pending events */ 4125 spin_lock_irqsave(&ioc->fw_event_lock, flags); 4126 list_for_each_entry(fw_event, &ioc->fw_event_list, list) { 4127 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || 4128 fw_event->ignore) 4129 continue; 4130 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) 4131 fw_event->event_data; 4132 if (local_event_data->ExpStatus == 4133 MPI2_EVENT_SAS_TOPO_ES_ADDED || 4134 local_event_data->ExpStatus == 4135 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4136 if (le16_to_cpu(local_event_data->ExpanderDevHandle) == 4137 expander_handle) { 4138 dewtprintk(ioc, 4139 ioc_info(ioc, "setting ignoring flag\n")); 4140 fw_event->ignore = 1; 4141 } 4142 } 4143 } 4144 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 4145 } 4146 4147 /** 4148 * _scsih_check_pcie_topo_remove_events - sanity check on topo 4149 * events 4150 * @ioc: per adapter object 4151 * @event_data: the event data payload 4152 * 4153 * This handles the case where driver receives multiple switch 4154 * or device add and delete events in a single shot. When there 4155 * is a delete event the routine will void any pending add 4156 * events waiting in the event queue. 4157 */ 4158 static void 4159 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, 4160 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 4161 { 4162 struct fw_event_work *fw_event; 4163 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data; 4164 unsigned long flags; 4165 int i, reason_code; 4166 u16 handle, switch_handle; 4167 4168 for (i = 0; i < event_data->NumEntries; i++) { 4169 handle = 4170 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 4171 if (!handle) 4172 continue; 4173 reason_code = event_data->PortEntry[i].PortStatus; 4174 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING) 4175 _scsih_tm_tr_send(ioc, handle); 4176 } 4177 4178 switch_handle = le16_to_cpu(event_data->SwitchDevHandle); 4179 if (!switch_handle) { 4180 _scsih_block_io_to_pcie_children_attached_directly( 4181 ioc, event_data); 4182 return; 4183 } 4184 /* TODO We are not supporting cascaded PCIe Switch removal yet*/ 4185 if ((event_data->SwitchStatus 4186 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) || 4187 (event_data->SwitchStatus == 4188 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING)) 4189 _scsih_block_io_to_pcie_children_attached_directly( 4190 ioc, event_data); 4191 4192 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) 4193 return; 4194 4195 /* mark ignore flag for pending events */ 4196 spin_lock_irqsave(&ioc->fw_event_lock, flags); 4197 list_for_each_entry(fw_event, &ioc->fw_event_list, list) { 4198 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST || 4199 fw_event->ignore) 4200 continue; 4201 local_event_data = 4202 (Mpi26EventDataPCIeTopologyChangeList_t *) 4203 fw_event->event_data; 4204 if (local_event_data->SwitchStatus == 4205 MPI2_EVENT_SAS_TOPO_ES_ADDED || 4206 local_event_data->SwitchStatus == 4207 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { 4208 if (le16_to_cpu(local_event_data->SwitchDevHandle) == 4209 switch_handle) { 4210 dewtprintk(ioc, 4211 ioc_info(ioc, "setting ignoring flag for switch event\n")); 4212 fw_event->ignore = 1; 4213 } 4214 } 4215 } 4216 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 4217 } 4218 4219 /** 4220 * _scsih_set_volume_delete_flag - setting volume delete flag 4221 * @ioc: per adapter object 4222 * @handle: device handle 4223 * 4224 * This returns nothing. 4225 */ 4226 static void 4227 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4228 { 4229 struct _raid_device *raid_device; 4230 struct MPT3SAS_TARGET *sas_target_priv_data; 4231 unsigned long flags; 4232 4233 spin_lock_irqsave(&ioc->raid_device_lock, flags); 4234 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 4235 if (raid_device && raid_device->starget && 4236 raid_device->starget->hostdata) { 4237 sas_target_priv_data = 4238 raid_device->starget->hostdata; 4239 sas_target_priv_data->deleted = 1; 4240 dewtprintk(ioc, 4241 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", 4242 handle, (u64)raid_device->wwid)); 4243 } 4244 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 4245 } 4246 4247 /** 4248 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume 4249 * @handle: input handle 4250 * @a: handle for volume a 4251 * @b: handle for volume b 4252 * 4253 * IR firmware only supports two raid volumes. The purpose of this 4254 * routine is to set the volume handle in either a or b. When the given 4255 * input handle is non-zero, or when a and b have not been set before. 4256 */ 4257 static void 4258 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) 4259 { 4260 if (!handle || handle == *a || handle == *b) 4261 return; 4262 if (!*a) 4263 *a = handle; 4264 else if (!*b) 4265 *b = handle; 4266 } 4267 4268 /** 4269 * _scsih_check_ir_config_unhide_events - check for UNHIDE events 4270 * @ioc: per adapter object 4271 * @event_data: the event data payload 4272 * Context: interrupt time. 4273 * 4274 * This routine will send target reset to volume, followed by target 4275 * resets to the PDs. This is called when a PD has been removed, or 4276 * volume has been deleted or removed. When the target reset is sent 4277 * to volume, the PD target resets need to be queued to start upon 4278 * completion of the volume target reset. 4279 */ 4280 static void 4281 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, 4282 Mpi2EventDataIrConfigChangeList_t *event_data) 4283 { 4284 Mpi2EventIrConfigElement_t *element; 4285 int i; 4286 u16 handle, volume_handle, a, b; 4287 struct _tr_list *delayed_tr; 4288 4289 a = 0; 4290 b = 0; 4291 4292 if (ioc->is_warpdrive) 4293 return; 4294 4295 /* Volume Resets for Deleted or Removed */ 4296 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4297 for (i = 0; i < event_data->NumElements; i++, element++) { 4298 if (le32_to_cpu(event_data->Flags) & 4299 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) 4300 continue; 4301 if (element->ReasonCode == 4302 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || 4303 element->ReasonCode == 4304 MPI2_EVENT_IR_CHANGE_RC_REMOVED) { 4305 volume_handle = le16_to_cpu(element->VolDevHandle); 4306 _scsih_set_volume_delete_flag(ioc, volume_handle); 4307 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); 4308 } 4309 } 4310 4311 /* Volume Resets for UNHIDE events */ 4312 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4313 for (i = 0; i < event_data->NumElements; i++, element++) { 4314 if (le32_to_cpu(event_data->Flags) & 4315 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) 4316 continue; 4317 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { 4318 volume_handle = le16_to_cpu(element->VolDevHandle); 4319 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); 4320 } 4321 } 4322 4323 if (a) 4324 _scsih_tm_tr_volume_send(ioc, a); 4325 if (b) 4326 _scsih_tm_tr_volume_send(ioc, b); 4327 4328 /* PD target resets */ 4329 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4330 for (i = 0; i < event_data->NumElements; i++, element++) { 4331 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) 4332 continue; 4333 handle = le16_to_cpu(element->PhysDiskDevHandle); 4334 volume_handle = le16_to_cpu(element->VolDevHandle); 4335 clear_bit(handle, ioc->pd_handles); 4336 if (!volume_handle) 4337 _scsih_tm_tr_send(ioc, handle); 4338 else if (volume_handle == a || volume_handle == b) { 4339 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); 4340 BUG_ON(!delayed_tr); 4341 INIT_LIST_HEAD(&delayed_tr->list); 4342 delayed_tr->handle = handle; 4343 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); 4344 dewtprintk(ioc, 4345 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n", 4346 handle)); 4347 } else 4348 _scsih_tm_tr_send(ioc, handle); 4349 } 4350 } 4351 4352 4353 /** 4354 * _scsih_check_volume_delete_events - set delete flag for volumes 4355 * @ioc: per adapter object 4356 * @event_data: the event data payload 4357 * Context: interrupt time. 4358 * 4359 * This will handle the case when the cable connected to entire volume is 4360 * pulled. We will take care of setting the deleted flag so normal IO will 4361 * not be sent. 4362 */ 4363 static void 4364 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, 4365 Mpi2EventDataIrVolume_t *event_data) 4366 { 4367 u32 state; 4368 4369 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 4370 return; 4371 state = le32_to_cpu(event_data->NewValue); 4372 if (state == MPI2_RAID_VOL_STATE_MISSING || state == 4373 MPI2_RAID_VOL_STATE_FAILED) 4374 _scsih_set_volume_delete_flag(ioc, 4375 le16_to_cpu(event_data->VolDevHandle)); 4376 } 4377 4378 /** 4379 * _scsih_temp_threshold_events - display temperature threshold exceeded events 4380 * @ioc: per adapter object 4381 * @event_data: the temp threshold event data 4382 * Context: interrupt time. 4383 */ 4384 static void 4385 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, 4386 Mpi2EventDataTemperature_t *event_data) 4387 { 4388 if (ioc->temp_sensors_count >= event_data->SensorNum) { 4389 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n", 4390 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ", 4391 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ", 4392 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ", 4393 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ", 4394 event_data->SensorNum); 4395 ioc_err(ioc, "Current Temp In Celsius: %d\n", 4396 event_data->CurrentTemperature); 4397 } 4398 } 4399 4400 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) 4401 { 4402 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; 4403 4404 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) 4405 return 0; 4406 4407 if (pending) 4408 return test_and_set_bit(0, &priv->ata_command_pending); 4409 4410 clear_bit(0, &priv->ata_command_pending); 4411 return 0; 4412 } 4413 4414 /** 4415 * _scsih_flush_running_cmds - completing outstanding commands. 4416 * @ioc: per adapter object 4417 * 4418 * The flushing out of all pending scmd commands following host reset, 4419 * where all IO is dropped to the floor. 4420 */ 4421 static void 4422 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) 4423 { 4424 struct scsi_cmnd *scmd; 4425 struct scsiio_tracker *st; 4426 u16 smid; 4427 int count = 0; 4428 4429 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 4430 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 4431 if (!scmd) 4432 continue; 4433 count++; 4434 _scsih_set_satl_pending(scmd, false); 4435 st = scsi_cmd_priv(scmd); 4436 mpt3sas_base_clear_st(ioc, st); 4437 scsi_dma_unmap(scmd); 4438 if (ioc->pci_error_recovery || ioc->remove_host) 4439 scmd->result = DID_NO_CONNECT << 16; 4440 else 4441 scmd->result = DID_RESET << 16; 4442 scmd->scsi_done(scmd); 4443 } 4444 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count)); 4445 } 4446 4447 /** 4448 * _scsih_setup_eedp - setup MPI request for EEDP transfer 4449 * @ioc: per adapter object 4450 * @scmd: pointer to scsi command object 4451 * @mpi_request: pointer to the SCSI_IO request message frame 4452 * 4453 * Supporting protection 1 and 3. 4454 */ 4455 static void 4456 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 4457 Mpi25SCSIIORequest_t *mpi_request) 4458 { 4459 u16 eedp_flags; 4460 unsigned char prot_op = scsi_get_prot_op(scmd); 4461 unsigned char prot_type = scsi_get_prot_type(scmd); 4462 Mpi25SCSIIORequest_t *mpi_request_3v = 4463 (Mpi25SCSIIORequest_t *)mpi_request; 4464 4465 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL) 4466 return; 4467 4468 if (prot_op == SCSI_PROT_READ_STRIP) 4469 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; 4470 else if (prot_op == SCSI_PROT_WRITE_INSERT) 4471 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; 4472 else 4473 return; 4474 4475 switch (prot_type) { 4476 case SCSI_PROT_DIF_TYPE1: 4477 case SCSI_PROT_DIF_TYPE2: 4478 4479 /* 4480 * enable ref/guard checking 4481 * auto increment ref tag 4482 */ 4483 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 4484 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 4485 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 4486 mpi_request->CDB.EEDP32.PrimaryReferenceTag = 4487 cpu_to_be32(t10_pi_ref_tag(scmd->request)); 4488 break; 4489 4490 case SCSI_PROT_DIF_TYPE3: 4491 4492 /* 4493 * enable guard checking 4494 */ 4495 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; 4496 4497 break; 4498 } 4499 4500 mpi_request_3v->EEDPBlockSize = 4501 cpu_to_le16(scmd->device->sector_size); 4502 4503 if (ioc->is_gen35_ioc) 4504 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE; 4505 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); 4506 } 4507 4508 /** 4509 * _scsih_eedp_error_handling - return sense code for EEDP errors 4510 * @scmd: pointer to scsi command object 4511 * @ioc_status: ioc status 4512 */ 4513 static void 4514 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) 4515 { 4516 u8 ascq; 4517 4518 switch (ioc_status) { 4519 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 4520 ascq = 0x01; 4521 break; 4522 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 4523 ascq = 0x02; 4524 break; 4525 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 4526 ascq = 0x03; 4527 break; 4528 default: 4529 ascq = 0x00; 4530 break; 4531 } 4532 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10, 4533 ascq); 4534 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) | 4535 SAM_STAT_CHECK_CONDITION; 4536 } 4537 4538 /** 4539 * scsih_qcmd - main scsi request entry point 4540 * @shost: SCSI host pointer 4541 * @scmd: pointer to scsi command object 4542 * 4543 * The callback index is set inside `ioc->scsi_io_cb_idx`. 4544 * 4545 * Return: 0 on success. If there's a failure, return either: 4546 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or 4547 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 4548 */ 4549 static int 4550 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 4551 { 4552 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 4553 struct MPT3SAS_DEVICE *sas_device_priv_data; 4554 struct MPT3SAS_TARGET *sas_target_priv_data; 4555 struct _raid_device *raid_device; 4556 struct request *rq = scmd->request; 4557 int class; 4558 Mpi25SCSIIORequest_t *mpi_request; 4559 struct _pcie_device *pcie_device = NULL; 4560 u32 mpi_control; 4561 u16 smid; 4562 u16 handle; 4563 4564 if (ioc->logging_level & MPT_DEBUG_SCSI) 4565 scsi_print_command(scmd); 4566 4567 sas_device_priv_data = scmd->device->hostdata; 4568 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 4569 scmd->result = DID_NO_CONNECT << 16; 4570 scmd->scsi_done(scmd); 4571 return 0; 4572 } 4573 4574 if (ioc->pci_error_recovery || ioc->remove_host) { 4575 scmd->result = DID_NO_CONNECT << 16; 4576 scmd->scsi_done(scmd); 4577 return 0; 4578 } 4579 4580 sas_target_priv_data = sas_device_priv_data->sas_target; 4581 4582 /* invalid device handle */ 4583 handle = sas_target_priv_data->handle; 4584 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { 4585 scmd->result = DID_NO_CONNECT << 16; 4586 scmd->scsi_done(scmd); 4587 return 0; 4588 } 4589 4590 4591 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { 4592 /* host recovery or link resets sent via IOCTLs */ 4593 return SCSI_MLQUEUE_HOST_BUSY; 4594 } else if (sas_target_priv_data->deleted) { 4595 /* device has been deleted */ 4596 scmd->result = DID_NO_CONNECT << 16; 4597 scmd->scsi_done(scmd); 4598 return 0; 4599 } else if (sas_target_priv_data->tm_busy || 4600 sas_device_priv_data->block) { 4601 /* device busy with task management */ 4602 return SCSI_MLQUEUE_DEVICE_BUSY; 4603 } 4604 4605 /* 4606 * Bug work around for firmware SATL handling. The loop 4607 * is based on atomic operations and ensures consistency 4608 * since we're lockless at this point 4609 */ 4610 do { 4611 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { 4612 scmd->result = SAM_STAT_BUSY; 4613 scmd->scsi_done(scmd); 4614 return 0; 4615 } 4616 } while (_scsih_set_satl_pending(scmd, true)); 4617 4618 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 4619 mpi_control = MPI2_SCSIIO_CONTROL_READ; 4620 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 4621 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 4622 else 4623 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 4624 4625 /* set tags */ 4626 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 4627 /* NCQ Prio supported, make sure control indicated high priority */ 4628 if (sas_device_priv_data->ncq_prio_enable) { 4629 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 4630 if (class == IOPRIO_CLASS_RT) 4631 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT; 4632 } 4633 /* Make sure Device is not raid volume. 4634 * We do not expose raid functionality to upper layer for warpdrive. 4635 */ 4636 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)) 4637 && !scsih_is_nvme(&scmd->device->sdev_gendev)) 4638 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) 4639 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 4640 4641 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 4642 if (!smid) { 4643 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 4644 _scsih_set_satl_pending(scmd, false); 4645 goto out; 4646 } 4647 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4648 memset(mpi_request, 0, ioc->request_sz); 4649 _scsih_setup_eedp(ioc, scmd, mpi_request); 4650 4651 if (scmd->cmd_len == 32) 4652 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 4653 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 4654 if (sas_device_priv_data->sas_target->flags & 4655 MPT_TARGET_FLAGS_RAID_COMPONENT) 4656 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 4657 else 4658 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 4659 mpi_request->DevHandle = cpu_to_le16(handle); 4660 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 4661 mpi_request->Control = cpu_to_le32(mpi_control); 4662 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); 4663 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; 4664 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 4665 mpi_request->SenseBufferLowAddress = 4666 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 4667 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4; 4668 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) 4669 mpi_request->LUN); 4670 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 4671 4672 if (mpi_request->DataLength) { 4673 pcie_device = sas_target_priv_data->pcie_dev; 4674 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) { 4675 mpt3sas_base_free_smid(ioc, smid); 4676 _scsih_set_satl_pending(scmd, false); 4677 goto out; 4678 } 4679 } else 4680 ioc->build_zero_len_sge(ioc, &mpi_request->SGL); 4681 4682 raid_device = sas_target_priv_data->raid_device; 4683 if (raid_device && raid_device->direct_io_enabled) 4684 mpt3sas_setup_direct_io(ioc, scmd, 4685 raid_device, mpi_request); 4686 4687 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { 4688 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { 4689 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | 4690 MPI25_SCSIIO_IOFLAGS_FAST_PATH); 4691 mpt3sas_base_put_smid_fast_path(ioc, smid, handle); 4692 } else 4693 ioc->put_smid_scsi_io(ioc, smid, 4694 le16_to_cpu(mpi_request->DevHandle)); 4695 } else 4696 mpt3sas_base_put_smid_default(ioc, smid); 4697 return 0; 4698 4699 out: 4700 return SCSI_MLQUEUE_HOST_BUSY; 4701 } 4702 4703 /** 4704 * _scsih_normalize_sense - normalize descriptor and fixed format sense data 4705 * @sense_buffer: sense data returned by target 4706 * @data: normalized skey/asc/ascq 4707 */ 4708 static void 4709 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) 4710 { 4711 if ((sense_buffer[0] & 0x7F) >= 0x72) { 4712 /* descriptor format */ 4713 data->skey = sense_buffer[1] & 0x0F; 4714 data->asc = sense_buffer[2]; 4715 data->ascq = sense_buffer[3]; 4716 } else { 4717 /* fixed format */ 4718 data->skey = sense_buffer[2] & 0x0F; 4719 data->asc = sense_buffer[12]; 4720 data->ascq = sense_buffer[13]; 4721 } 4722 } 4723 4724 /** 4725 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request 4726 * @ioc: per adapter object 4727 * @scmd: pointer to scsi command object 4728 * @mpi_reply: reply mf payload returned from firmware 4729 * @smid: ? 4730 * 4731 * scsi_status - SCSI Status code returned from target device 4732 * scsi_state - state info associated with SCSI_IO determined by ioc 4733 * ioc_status - ioc supplied status info 4734 */ 4735 static void 4736 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, 4737 Mpi2SCSIIOReply_t *mpi_reply, u16 smid) 4738 { 4739 u32 response_info; 4740 u8 *response_bytes; 4741 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & 4742 MPI2_IOCSTATUS_MASK; 4743 u8 scsi_state = mpi_reply->SCSIState; 4744 u8 scsi_status = mpi_reply->SCSIStatus; 4745 char *desc_ioc_state = NULL; 4746 char *desc_scsi_status = NULL; 4747 char *desc_scsi_state = ioc->tmp_string; 4748 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 4749 struct _sas_device *sas_device = NULL; 4750 struct _pcie_device *pcie_device = NULL; 4751 struct scsi_target *starget = scmd->device->sdev_target; 4752 struct MPT3SAS_TARGET *priv_target = starget->hostdata; 4753 char *device_str = NULL; 4754 4755 if (!priv_target) 4756 return; 4757 if (ioc->hide_ir_msg) 4758 device_str = "WarpDrive"; 4759 else 4760 device_str = "volume"; 4761 4762 if (log_info == 0x31170000) 4763 return; 4764 4765 switch (ioc_status) { 4766 case MPI2_IOCSTATUS_SUCCESS: 4767 desc_ioc_state = "success"; 4768 break; 4769 case MPI2_IOCSTATUS_INVALID_FUNCTION: 4770 desc_ioc_state = "invalid function"; 4771 break; 4772 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 4773 desc_ioc_state = "scsi recovered error"; 4774 break; 4775 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 4776 desc_ioc_state = "scsi invalid dev handle"; 4777 break; 4778 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 4779 desc_ioc_state = "scsi device not there"; 4780 break; 4781 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 4782 desc_ioc_state = "scsi data overrun"; 4783 break; 4784 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 4785 desc_ioc_state = "scsi data underrun"; 4786 break; 4787 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 4788 desc_ioc_state = "scsi io data error"; 4789 break; 4790 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 4791 desc_ioc_state = "scsi protocol error"; 4792 break; 4793 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 4794 desc_ioc_state = "scsi task terminated"; 4795 break; 4796 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 4797 desc_ioc_state = "scsi residual mismatch"; 4798 break; 4799 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 4800 desc_ioc_state = "scsi task mgmt failed"; 4801 break; 4802 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 4803 desc_ioc_state = "scsi ioc terminated"; 4804 break; 4805 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 4806 desc_ioc_state = "scsi ext terminated"; 4807 break; 4808 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 4809 desc_ioc_state = "eedp guard error"; 4810 break; 4811 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 4812 desc_ioc_state = "eedp ref tag error"; 4813 break; 4814 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 4815 desc_ioc_state = "eedp app tag error"; 4816 break; 4817 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 4818 desc_ioc_state = "insufficient power"; 4819 break; 4820 default: 4821 desc_ioc_state = "unknown"; 4822 break; 4823 } 4824 4825 switch (scsi_status) { 4826 case MPI2_SCSI_STATUS_GOOD: 4827 desc_scsi_status = "good"; 4828 break; 4829 case MPI2_SCSI_STATUS_CHECK_CONDITION: 4830 desc_scsi_status = "check condition"; 4831 break; 4832 case MPI2_SCSI_STATUS_CONDITION_MET: 4833 desc_scsi_status = "condition met"; 4834 break; 4835 case MPI2_SCSI_STATUS_BUSY: 4836 desc_scsi_status = "busy"; 4837 break; 4838 case MPI2_SCSI_STATUS_INTERMEDIATE: 4839 desc_scsi_status = "intermediate"; 4840 break; 4841 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: 4842 desc_scsi_status = "intermediate condmet"; 4843 break; 4844 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 4845 desc_scsi_status = "reservation conflict"; 4846 break; 4847 case MPI2_SCSI_STATUS_COMMAND_TERMINATED: 4848 desc_scsi_status = "command terminated"; 4849 break; 4850 case MPI2_SCSI_STATUS_TASK_SET_FULL: 4851 desc_scsi_status = "task set full"; 4852 break; 4853 case MPI2_SCSI_STATUS_ACA_ACTIVE: 4854 desc_scsi_status = "aca active"; 4855 break; 4856 case MPI2_SCSI_STATUS_TASK_ABORTED: 4857 desc_scsi_status = "task aborted"; 4858 break; 4859 default: 4860 desc_scsi_status = "unknown"; 4861 break; 4862 } 4863 4864 desc_scsi_state[0] = '\0'; 4865 if (!scsi_state) 4866 desc_scsi_state = " "; 4867 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 4868 strcat(desc_scsi_state, "response info "); 4869 if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 4870 strcat(desc_scsi_state, "state terminated "); 4871 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) 4872 strcat(desc_scsi_state, "no status "); 4873 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) 4874 strcat(desc_scsi_state, "autosense failed "); 4875 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) 4876 strcat(desc_scsi_state, "autosense valid "); 4877 4878 scsi_print_command(scmd); 4879 4880 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { 4881 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n", 4882 device_str, (u64)priv_target->sas_address); 4883 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) { 4884 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target); 4885 if (pcie_device) { 4886 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n", 4887 (u64)pcie_device->wwid, pcie_device->port_num); 4888 if (pcie_device->enclosure_handle != 0) 4889 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n", 4890 (u64)pcie_device->enclosure_logical_id, 4891 pcie_device->slot); 4892 if (pcie_device->connector_name[0]) 4893 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n", 4894 pcie_device->enclosure_level, 4895 pcie_device->connector_name); 4896 pcie_device_put(pcie_device); 4897 } 4898 } else { 4899 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target); 4900 if (sas_device) { 4901 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", 4902 (u64)sas_device->sas_address, sas_device->phy); 4903 4904 _scsih_display_enclosure_chassis_info(ioc, sas_device, 4905 NULL, NULL); 4906 4907 sas_device_put(sas_device); 4908 } 4909 } 4910 4911 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", 4912 le16_to_cpu(mpi_reply->DevHandle), 4913 desc_ioc_state, ioc_status, smid); 4914 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n", 4915 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd)); 4916 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", 4917 le16_to_cpu(mpi_reply->TaskTag), 4918 le32_to_cpu(mpi_reply->TransferCount), scmd->result); 4919 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", 4920 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state); 4921 4922 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 4923 struct sense_info data; 4924 _scsih_normalize_sense(scmd->sense_buffer, &data); 4925 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", 4926 data.skey, data.asc, data.ascq, 4927 le32_to_cpu(mpi_reply->SenseCount)); 4928 } 4929 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 4930 response_info = le32_to_cpu(mpi_reply->ResponseInfo); 4931 response_bytes = (u8 *)&response_info; 4932 _scsih_response_code(ioc, response_bytes[0]); 4933 } 4934 } 4935 4936 /** 4937 * _scsih_turn_on_pfa_led - illuminate PFA LED 4938 * @ioc: per adapter object 4939 * @handle: device handle 4940 * Context: process 4941 */ 4942 static void 4943 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 4944 { 4945 Mpi2SepReply_t mpi_reply; 4946 Mpi2SepRequest_t mpi_request; 4947 struct _sas_device *sas_device; 4948 4949 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 4950 if (!sas_device) 4951 return; 4952 4953 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 4954 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 4955 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 4956 mpi_request.SlotStatus = 4957 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); 4958 mpi_request.DevHandle = cpu_to_le16(handle); 4959 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; 4960 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 4961 &mpi_request)) != 0) { 4962 ioc_err(ioc, "failure at %s:%d/%s()!\n", 4963 __FILE__, __LINE__, __func__); 4964 goto out; 4965 } 4966 sas_device->pfa_led_on = 1; 4967 4968 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 4969 dewtprintk(ioc, 4970 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 4971 le16_to_cpu(mpi_reply.IOCStatus), 4972 le32_to_cpu(mpi_reply.IOCLogInfo))); 4973 goto out; 4974 } 4975 out: 4976 sas_device_put(sas_device); 4977 } 4978 4979 /** 4980 * _scsih_turn_off_pfa_led - turn off Fault LED 4981 * @ioc: per adapter object 4982 * @sas_device: sas device whose PFA LED has to turned off 4983 * Context: process 4984 */ 4985 static void 4986 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, 4987 struct _sas_device *sas_device) 4988 { 4989 Mpi2SepReply_t mpi_reply; 4990 Mpi2SepRequest_t mpi_request; 4991 4992 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 4993 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 4994 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; 4995 mpi_request.SlotStatus = 0; 4996 mpi_request.Slot = cpu_to_le16(sas_device->slot); 4997 mpi_request.DevHandle = 0; 4998 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); 4999 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; 5000 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, 5001 &mpi_request)) != 0) { 5002 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5003 __FILE__, __LINE__, __func__); 5004 return; 5005 } 5006 5007 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 5008 dewtprintk(ioc, 5009 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", 5010 le16_to_cpu(mpi_reply.IOCStatus), 5011 le32_to_cpu(mpi_reply.IOCLogInfo))); 5012 return; 5013 } 5014 } 5015 5016 /** 5017 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event 5018 * @ioc: per adapter object 5019 * @handle: device handle 5020 * Context: interrupt. 5021 */ 5022 static void 5023 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5024 { 5025 struct fw_event_work *fw_event; 5026 5027 fw_event = alloc_fw_event_work(0); 5028 if (!fw_event) 5029 return; 5030 fw_event->event = MPT3SAS_TURN_ON_PFA_LED; 5031 fw_event->device_handle = handle; 5032 fw_event->ioc = ioc; 5033 _scsih_fw_event_add(ioc, fw_event); 5034 fw_event_work_put(fw_event); 5035 } 5036 5037 /** 5038 * _scsih_smart_predicted_fault - process smart errors 5039 * @ioc: per adapter object 5040 * @handle: device handle 5041 * Context: interrupt. 5042 */ 5043 static void 5044 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5045 { 5046 struct scsi_target *starget; 5047 struct MPT3SAS_TARGET *sas_target_priv_data; 5048 Mpi2EventNotificationReply_t *event_reply; 5049 Mpi2EventDataSasDeviceStatusChange_t *event_data; 5050 struct _sas_device *sas_device; 5051 ssize_t sz; 5052 unsigned long flags; 5053 5054 /* only handle non-raid devices */ 5055 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5056 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 5057 if (!sas_device) 5058 goto out_unlock; 5059 5060 starget = sas_device->starget; 5061 sas_target_priv_data = starget->hostdata; 5062 5063 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || 5064 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) 5065 goto out_unlock; 5066 5067 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget); 5068 5069 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5070 5071 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) 5072 _scsih_send_event_to_turn_on_pfa_led(ioc, handle); 5073 5074 /* insert into event log */ 5075 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 5076 sizeof(Mpi2EventDataSasDeviceStatusChange_t); 5077 event_reply = kzalloc(sz, GFP_KERNEL); 5078 if (!event_reply) { 5079 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5080 __FILE__, __LINE__, __func__); 5081 goto out; 5082 } 5083 5084 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 5085 event_reply->Event = 5086 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 5087 event_reply->MsgLength = sz/4; 5088 event_reply->EventDataLength = 5089 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); 5090 event_data = (Mpi2EventDataSasDeviceStatusChange_t *) 5091 event_reply->EventData; 5092 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; 5093 event_data->ASC = 0x5D; 5094 event_data->DevHandle = cpu_to_le16(handle); 5095 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); 5096 mpt3sas_ctl_add_to_event_log(ioc, event_reply); 5097 kfree(event_reply); 5098 out: 5099 if (sas_device) 5100 sas_device_put(sas_device); 5101 return; 5102 5103 out_unlock: 5104 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5105 goto out; 5106 } 5107 5108 /** 5109 * _scsih_io_done - scsi request callback 5110 * @ioc: per adapter object 5111 * @smid: system request message index 5112 * @msix_index: MSIX table index supplied by the OS 5113 * @reply: reply message frame(lower 32bit addr) 5114 * 5115 * Callback handler when using _scsih_qcmd. 5116 * 5117 * Return: 1 meaning mf should be freed from _base_interrupt 5118 * 0 means the mf is freed from this function. 5119 */ 5120 static u8 5121 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 5122 { 5123 Mpi25SCSIIORequest_t *mpi_request; 5124 Mpi2SCSIIOReply_t *mpi_reply; 5125 struct scsi_cmnd *scmd; 5126 struct scsiio_tracker *st; 5127 u16 ioc_status; 5128 u32 xfer_cnt; 5129 u8 scsi_state; 5130 u8 scsi_status; 5131 u32 log_info; 5132 struct MPT3SAS_DEVICE *sas_device_priv_data; 5133 u32 response_code = 0; 5134 5135 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 5136 5137 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 5138 if (scmd == NULL) 5139 return 1; 5140 5141 _scsih_set_satl_pending(scmd, false); 5142 5143 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 5144 5145 if (mpi_reply == NULL) { 5146 scmd->result = DID_OK << 16; 5147 goto out; 5148 } 5149 5150 sas_device_priv_data = scmd->device->hostdata; 5151 if (!sas_device_priv_data || !sas_device_priv_data->sas_target || 5152 sas_device_priv_data->sas_target->deleted) { 5153 scmd->result = DID_NO_CONNECT << 16; 5154 goto out; 5155 } 5156 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 5157 5158 /* 5159 * WARPDRIVE: If direct_io is set then it is directIO, 5160 * the failed direct I/O should be redirected to volume 5161 */ 5162 st = scsi_cmd_priv(scmd); 5163 if (st->direct_io && 5164 ((ioc_status & MPI2_IOCSTATUS_MASK) 5165 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { 5166 st->direct_io = 0; 5167 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); 5168 mpi_request->DevHandle = 5169 cpu_to_le16(sas_device_priv_data->sas_target->handle); 5170 ioc->put_smid_scsi_io(ioc, smid, 5171 sas_device_priv_data->sas_target->handle); 5172 return 0; 5173 } 5174 /* turning off TLR */ 5175 scsi_state = mpi_reply->SCSIState; 5176 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 5177 response_code = 5178 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; 5179 if (!sas_device_priv_data->tlr_snoop_check) { 5180 sas_device_priv_data->tlr_snoop_check++; 5181 if ((!ioc->is_warpdrive && 5182 !scsih_is_raid(&scmd->device->sdev_gendev) && 5183 !scsih_is_nvme(&scmd->device->sdev_gendev)) 5184 && sas_is_tlr_enabled(scmd->device) && 5185 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { 5186 sas_disable_tlr(scmd->device); 5187 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); 5188 } 5189 } 5190 5191 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); 5192 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); 5193 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 5194 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 5195 else 5196 log_info = 0; 5197 ioc_status &= MPI2_IOCSTATUS_MASK; 5198 scsi_status = mpi_reply->SCSIStatus; 5199 5200 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && 5201 (scsi_status == MPI2_SCSI_STATUS_BUSY || 5202 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || 5203 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { 5204 ioc_status = MPI2_IOCSTATUS_SUCCESS; 5205 } 5206 5207 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 5208 struct sense_info data; 5209 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc, 5210 smid); 5211 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, 5212 le32_to_cpu(mpi_reply->SenseCount)); 5213 memcpy(scmd->sense_buffer, sense_data, sz); 5214 _scsih_normalize_sense(scmd->sense_buffer, &data); 5215 /* failure prediction threshold exceeded */ 5216 if (data.asc == 0x5D) 5217 _scsih_smart_predicted_fault(ioc, 5218 le16_to_cpu(mpi_reply->DevHandle)); 5219 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); 5220 5221 if ((ioc->logging_level & MPT_DEBUG_REPLY) && 5222 ((scmd->sense_buffer[2] == UNIT_ATTENTION) || 5223 (scmd->sense_buffer[2] == MEDIUM_ERROR) || 5224 (scmd->sense_buffer[2] == HARDWARE_ERROR))) 5225 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid); 5226 } 5227 switch (ioc_status) { 5228 case MPI2_IOCSTATUS_BUSY: 5229 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 5230 scmd->result = SAM_STAT_BUSY; 5231 break; 5232 5233 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 5234 scmd->result = DID_NO_CONNECT << 16; 5235 break; 5236 5237 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 5238 if (sas_device_priv_data->block) { 5239 scmd->result = DID_TRANSPORT_DISRUPTED << 16; 5240 goto out; 5241 } 5242 if (log_info == 0x31110630) { 5243 if (scmd->retries > 2) { 5244 scmd->result = DID_NO_CONNECT << 16; 5245 scsi_device_set_state(scmd->device, 5246 SDEV_OFFLINE); 5247 } else { 5248 scmd->result = DID_SOFT_ERROR << 16; 5249 scmd->device->expecting_cc_ua = 1; 5250 } 5251 break; 5252 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { 5253 scmd->result = DID_RESET << 16; 5254 break; 5255 } else if ((scmd->device->channel == RAID_CHANNEL) && 5256 (scsi_state == (MPI2_SCSI_STATE_TERMINATED | 5257 MPI2_SCSI_STATE_NO_SCSI_STATUS))) { 5258 scmd->result = DID_RESET << 16; 5259 break; 5260 } 5261 scmd->result = DID_SOFT_ERROR << 16; 5262 break; 5263 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 5264 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 5265 scmd->result = DID_RESET << 16; 5266 break; 5267 5268 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 5269 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) 5270 scmd->result = DID_SOFT_ERROR << 16; 5271 else 5272 scmd->result = (DID_OK << 16) | scsi_status; 5273 break; 5274 5275 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 5276 scmd->result = (DID_OK << 16) | scsi_status; 5277 5278 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) 5279 break; 5280 5281 if (xfer_cnt < scmd->underflow) { 5282 if (scsi_status == SAM_STAT_BUSY) 5283 scmd->result = SAM_STAT_BUSY; 5284 else 5285 scmd->result = DID_SOFT_ERROR << 16; 5286 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 5287 MPI2_SCSI_STATE_NO_SCSI_STATUS)) 5288 scmd->result = DID_SOFT_ERROR << 16; 5289 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5290 scmd->result = DID_RESET << 16; 5291 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { 5292 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; 5293 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; 5294 scmd->result = (DRIVER_SENSE << 24) | 5295 SAM_STAT_CHECK_CONDITION; 5296 scmd->sense_buffer[0] = 0x70; 5297 scmd->sense_buffer[2] = ILLEGAL_REQUEST; 5298 scmd->sense_buffer[12] = 0x20; 5299 scmd->sense_buffer[13] = 0; 5300 } 5301 break; 5302 5303 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 5304 scsi_set_resid(scmd, 0); 5305 /* fall through */ 5306 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 5307 case MPI2_IOCSTATUS_SUCCESS: 5308 scmd->result = (DID_OK << 16) | scsi_status; 5309 if (response_code == 5310 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || 5311 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | 5312 MPI2_SCSI_STATE_NO_SCSI_STATUS))) 5313 scmd->result = DID_SOFT_ERROR << 16; 5314 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 5315 scmd->result = DID_RESET << 16; 5316 break; 5317 5318 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 5319 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 5320 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 5321 _scsih_eedp_error_handling(scmd, ioc_status); 5322 break; 5323 5324 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 5325 case MPI2_IOCSTATUS_INVALID_FUNCTION: 5326 case MPI2_IOCSTATUS_INVALID_SGL: 5327 case MPI2_IOCSTATUS_INTERNAL_ERROR: 5328 case MPI2_IOCSTATUS_INVALID_FIELD: 5329 case MPI2_IOCSTATUS_INVALID_STATE: 5330 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 5331 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 5332 case MPI2_IOCSTATUS_INSUFFICIENT_POWER: 5333 default: 5334 scmd->result = DID_SOFT_ERROR << 16; 5335 break; 5336 5337 } 5338 5339 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) 5340 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); 5341 5342 out: 5343 5344 scsi_dma_unmap(scmd); 5345 mpt3sas_base_free_smid(ioc, smid); 5346 scmd->scsi_done(scmd); 5347 return 0; 5348 } 5349 5350 /** 5351 * _scsih_sas_host_refresh - refreshing sas host object contents 5352 * @ioc: per adapter object 5353 * Context: user 5354 * 5355 * During port enable, fw will send topology events for every device. Its 5356 * possible that the handles may change from the previous setting, so this 5357 * code keeping handles updating if changed. 5358 */ 5359 static void 5360 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) 5361 { 5362 u16 sz; 5363 u16 ioc_status; 5364 int i; 5365 Mpi2ConfigReply_t mpi_reply; 5366 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 5367 u16 attached_handle; 5368 u8 link_rate; 5369 5370 dtmprintk(ioc, 5371 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n", 5372 (u64)ioc->sas_hba.sas_address)); 5373 5374 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys 5375 * sizeof(Mpi2SasIOUnit0PhyData_t)); 5376 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 5377 if (!sas_iounit_pg0) { 5378 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5379 __FILE__, __LINE__, __func__); 5380 return; 5381 } 5382 5383 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 5384 sas_iounit_pg0, sz)) != 0) 5385 goto out; 5386 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 5387 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 5388 goto out; 5389 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 5390 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; 5391 if (i == 0) 5392 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> 5393 PhyData[0].ControllerDevHandle); 5394 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 5395 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. 5396 AttachedDevHandle); 5397 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 5398 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; 5399 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, 5400 attached_handle, i, link_rate); 5401 } 5402 out: 5403 kfree(sas_iounit_pg0); 5404 } 5405 5406 /** 5407 * _scsih_sas_host_add - create sas host object 5408 * @ioc: per adapter object 5409 * 5410 * Creating host side data object, stored in ioc->sas_hba 5411 */ 5412 static void 5413 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) 5414 { 5415 int i; 5416 Mpi2ConfigReply_t mpi_reply; 5417 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; 5418 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 5419 Mpi2SasPhyPage0_t phy_pg0; 5420 Mpi2SasDevicePage0_t sas_device_pg0; 5421 Mpi2SasEnclosurePage0_t enclosure_pg0; 5422 u16 ioc_status; 5423 u16 sz; 5424 u8 device_missing_delay; 5425 u8 num_phys; 5426 5427 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 5428 if (!num_phys) { 5429 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5430 __FILE__, __LINE__, __func__); 5431 return; 5432 } 5433 ioc->sas_hba.phy = kcalloc(num_phys, 5434 sizeof(struct _sas_phy), GFP_KERNEL); 5435 if (!ioc->sas_hba.phy) { 5436 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5437 __FILE__, __LINE__, __func__); 5438 goto out; 5439 } 5440 ioc->sas_hba.num_phys = num_phys; 5441 5442 /* sas_iounit page 0 */ 5443 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * 5444 sizeof(Mpi2SasIOUnit0PhyData_t)); 5445 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); 5446 if (!sas_iounit_pg0) { 5447 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5448 __FILE__, __LINE__, __func__); 5449 return; 5450 } 5451 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, 5452 sas_iounit_pg0, sz))) { 5453 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5454 __FILE__, __LINE__, __func__); 5455 goto out; 5456 } 5457 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5458 MPI2_IOCSTATUS_MASK; 5459 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5460 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5461 __FILE__, __LINE__, __func__); 5462 goto out; 5463 } 5464 5465 /* sas_iounit page 1 */ 5466 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * 5467 sizeof(Mpi2SasIOUnit1PhyData_t)); 5468 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 5469 if (!sas_iounit_pg1) { 5470 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5471 __FILE__, __LINE__, __func__); 5472 goto out; 5473 } 5474 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 5475 sas_iounit_pg1, sz))) { 5476 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5477 __FILE__, __LINE__, __func__); 5478 goto out; 5479 } 5480 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5481 MPI2_IOCSTATUS_MASK; 5482 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5483 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5484 __FILE__, __LINE__, __func__); 5485 goto out; 5486 } 5487 5488 ioc->io_missing_delay = 5489 sas_iounit_pg1->IODeviceMissingDelay; 5490 device_missing_delay = 5491 sas_iounit_pg1->ReportDeviceMissingDelay; 5492 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 5493 ioc->device_missing_delay = (device_missing_delay & 5494 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 5495 else 5496 ioc->device_missing_delay = device_missing_delay & 5497 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 5498 5499 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; 5500 for (i = 0; i < ioc->sas_hba.num_phys ; i++) { 5501 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, 5502 i))) { 5503 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5504 __FILE__, __LINE__, __func__); 5505 goto out; 5506 } 5507 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5508 MPI2_IOCSTATUS_MASK; 5509 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5510 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5511 __FILE__, __LINE__, __func__); 5512 goto out; 5513 } 5514 5515 if (i == 0) 5516 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> 5517 PhyData[0].ControllerDevHandle); 5518 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; 5519 ioc->sas_hba.phy[i].phy_id = i; 5520 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], 5521 phy_pg0, ioc->sas_hba.parent_dev); 5522 } 5523 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 5524 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { 5525 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5526 __FILE__, __LINE__, __func__); 5527 goto out; 5528 } 5529 ioc->sas_hba.enclosure_handle = 5530 le16_to_cpu(sas_device_pg0.EnclosureHandle); 5531 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 5532 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 5533 ioc->sas_hba.handle, 5534 (u64)ioc->sas_hba.sas_address, 5535 ioc->sas_hba.num_phys); 5536 5537 if (ioc->sas_hba.enclosure_handle) { 5538 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 5539 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 5540 ioc->sas_hba.enclosure_handle))) 5541 ioc->sas_hba.enclosure_logical_id = 5542 le64_to_cpu(enclosure_pg0.EnclosureLogicalID); 5543 } 5544 5545 out: 5546 kfree(sas_iounit_pg1); 5547 kfree(sas_iounit_pg0); 5548 } 5549 5550 /** 5551 * _scsih_expander_add - creating expander object 5552 * @ioc: per adapter object 5553 * @handle: expander handle 5554 * 5555 * Creating expander object, stored in ioc->sas_expander_list. 5556 * 5557 * Return: 0 for success, else error. 5558 */ 5559 static int 5560 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) 5561 { 5562 struct _sas_node *sas_expander; 5563 struct _enclosure_node *enclosure_dev; 5564 Mpi2ConfigReply_t mpi_reply; 5565 Mpi2ExpanderPage0_t expander_pg0; 5566 Mpi2ExpanderPage1_t expander_pg1; 5567 u32 ioc_status; 5568 u16 parent_handle; 5569 u64 sas_address, sas_address_parent = 0; 5570 int i; 5571 unsigned long flags; 5572 struct _sas_port *mpt3sas_port = NULL; 5573 5574 int rc = 0; 5575 5576 if (!handle) 5577 return -1; 5578 5579 if (ioc->shost_recovery || ioc->pci_error_recovery) 5580 return -1; 5581 5582 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 5583 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { 5584 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5585 __FILE__, __LINE__, __func__); 5586 return -1; 5587 } 5588 5589 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5590 MPI2_IOCSTATUS_MASK; 5591 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5592 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5593 __FILE__, __LINE__, __func__); 5594 return -1; 5595 } 5596 5597 /* handle out of order topology events */ 5598 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); 5599 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) 5600 != 0) { 5601 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5602 __FILE__, __LINE__, __func__); 5603 return -1; 5604 } 5605 if (sas_address_parent != ioc->sas_hba.sas_address) { 5606 spin_lock_irqsave(&ioc->sas_node_lock, flags); 5607 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 5608 sas_address_parent); 5609 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 5610 if (!sas_expander) { 5611 rc = _scsih_expander_add(ioc, parent_handle); 5612 if (rc != 0) 5613 return rc; 5614 } 5615 } 5616 5617 spin_lock_irqsave(&ioc->sas_node_lock, flags); 5618 sas_address = le64_to_cpu(expander_pg0.SASAddress); 5619 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 5620 sas_address); 5621 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 5622 5623 if (sas_expander) 5624 return 0; 5625 5626 sas_expander = kzalloc(sizeof(struct _sas_node), 5627 GFP_KERNEL); 5628 if (!sas_expander) { 5629 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5630 __FILE__, __LINE__, __func__); 5631 return -1; 5632 } 5633 5634 sas_expander->handle = handle; 5635 sas_expander->num_phys = expander_pg0.NumPhys; 5636 sas_expander->sas_address_parent = sas_address_parent; 5637 sas_expander->sas_address = sas_address; 5638 5639 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", 5640 handle, parent_handle, 5641 (u64)sas_expander->sas_address, sas_expander->num_phys); 5642 5643 if (!sas_expander->num_phys) 5644 goto out_fail; 5645 sas_expander->phy = kcalloc(sas_expander->num_phys, 5646 sizeof(struct _sas_phy), GFP_KERNEL); 5647 if (!sas_expander->phy) { 5648 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5649 __FILE__, __LINE__, __func__); 5650 rc = -1; 5651 goto out_fail; 5652 } 5653 5654 INIT_LIST_HEAD(&sas_expander->sas_port_list); 5655 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, 5656 sas_address_parent); 5657 if (!mpt3sas_port) { 5658 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5659 __FILE__, __LINE__, __func__); 5660 rc = -1; 5661 goto out_fail; 5662 } 5663 sas_expander->parent_dev = &mpt3sas_port->rphy->dev; 5664 5665 for (i = 0 ; i < sas_expander->num_phys ; i++) { 5666 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 5667 &expander_pg1, i, handle))) { 5668 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5669 __FILE__, __LINE__, __func__); 5670 rc = -1; 5671 goto out_fail; 5672 } 5673 sas_expander->phy[i].handle = handle; 5674 sas_expander->phy[i].phy_id = i; 5675 5676 if ((mpt3sas_transport_add_expander_phy(ioc, 5677 &sas_expander->phy[i], expander_pg1, 5678 sas_expander->parent_dev))) { 5679 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5680 __FILE__, __LINE__, __func__); 5681 rc = -1; 5682 goto out_fail; 5683 } 5684 } 5685 5686 if (sas_expander->enclosure_handle) { 5687 enclosure_dev = 5688 mpt3sas_scsih_enclosure_find_by_handle(ioc, 5689 sas_expander->enclosure_handle); 5690 if (enclosure_dev) 5691 sas_expander->enclosure_logical_id = 5692 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 5693 } 5694 5695 _scsih_expander_node_add(ioc, sas_expander); 5696 return 0; 5697 5698 out_fail: 5699 5700 if (mpt3sas_port) 5701 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 5702 sas_address_parent); 5703 kfree(sas_expander); 5704 return rc; 5705 } 5706 5707 /** 5708 * mpt3sas_expander_remove - removing expander object 5709 * @ioc: per adapter object 5710 * @sas_address: expander sas_address 5711 */ 5712 void 5713 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) 5714 { 5715 struct _sas_node *sas_expander; 5716 unsigned long flags; 5717 5718 if (ioc->shost_recovery) 5719 return; 5720 5721 spin_lock_irqsave(&ioc->sas_node_lock, flags); 5722 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, 5723 sas_address); 5724 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 5725 if (sas_expander) 5726 _scsih_expander_node_remove(ioc, sas_expander); 5727 } 5728 5729 /** 5730 * _scsih_done - internal SCSI_IO callback handler. 5731 * @ioc: per adapter object 5732 * @smid: system request message index 5733 * @msix_index: MSIX table index supplied by the OS 5734 * @reply: reply message frame(lower 32bit addr) 5735 * 5736 * Callback handler when sending internal generated SCSI_IO. 5737 * The callback index passed is `ioc->scsih_cb_idx` 5738 * 5739 * Return: 1 meaning mf should be freed from _base_interrupt 5740 * 0 means the mf is freed from this function. 5741 */ 5742 static u8 5743 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) 5744 { 5745 MPI2DefaultReply_t *mpi_reply; 5746 5747 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 5748 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED) 5749 return 1; 5750 if (ioc->scsih_cmds.smid != smid) 5751 return 1; 5752 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE; 5753 if (mpi_reply) { 5754 memcpy(ioc->scsih_cmds.reply, mpi_reply, 5755 mpi_reply->MsgLength*4); 5756 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID; 5757 } 5758 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING; 5759 complete(&ioc->scsih_cmds.done); 5760 return 1; 5761 } 5762 5763 5764 5765 5766 #define MPT3_MAX_LUNS (255) 5767 5768 5769 /** 5770 * _scsih_check_access_status - check access flags 5771 * @ioc: per adapter object 5772 * @sas_address: sas address 5773 * @handle: sas device handle 5774 * @access_status: errors returned during discovery of the device 5775 * 5776 * Return: 0 for success, else failure 5777 */ 5778 static u8 5779 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, 5780 u16 handle, u8 access_status) 5781 { 5782 u8 rc = 1; 5783 char *desc = NULL; 5784 5785 switch (access_status) { 5786 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: 5787 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: 5788 rc = 0; 5789 break; 5790 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: 5791 desc = "sata capability failed"; 5792 break; 5793 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: 5794 desc = "sata affiliation conflict"; 5795 break; 5796 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: 5797 desc = "route not addressable"; 5798 break; 5799 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: 5800 desc = "smp error not addressable"; 5801 break; 5802 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: 5803 desc = "device blocked"; 5804 break; 5805 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: 5806 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: 5807 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: 5808 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: 5809 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: 5810 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: 5811 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: 5812 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: 5813 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: 5814 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: 5815 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: 5816 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: 5817 desc = "sata initialization failed"; 5818 break; 5819 default: 5820 desc = "unknown"; 5821 break; 5822 } 5823 5824 if (!rc) 5825 return 0; 5826 5827 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", 5828 desc, (u64)sas_address, handle); 5829 return rc; 5830 } 5831 5832 /** 5833 * _scsih_check_device - checking device responsiveness 5834 * @ioc: per adapter object 5835 * @parent_sas_address: sas address of parent expander or sas host 5836 * @handle: attached device handle 5837 * @phy_number: phy number 5838 * @link_rate: new link rate 5839 */ 5840 static void 5841 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, 5842 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate) 5843 { 5844 Mpi2ConfigReply_t mpi_reply; 5845 Mpi2SasDevicePage0_t sas_device_pg0; 5846 struct _sas_device *sas_device; 5847 struct _enclosure_node *enclosure_dev = NULL; 5848 u32 ioc_status; 5849 unsigned long flags; 5850 u64 sas_address; 5851 struct scsi_target *starget; 5852 struct MPT3SAS_TARGET *sas_target_priv_data; 5853 u32 device_info; 5854 5855 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 5856 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) 5857 return; 5858 5859 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 5860 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 5861 return; 5862 5863 /* wide port handling ~ we need only handle device once for the phy that 5864 * is matched in sas device page zero 5865 */ 5866 if (phy_number != sas_device_pg0.PhyNum) 5867 return; 5868 5869 /* check if this is end device */ 5870 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 5871 if (!(_scsih_is_end_device(device_info))) 5872 return; 5873 5874 spin_lock_irqsave(&ioc->sas_device_lock, flags); 5875 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 5876 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 5877 sas_address); 5878 5879 if (!sas_device) 5880 goto out_unlock; 5881 5882 if (unlikely(sas_device->handle != handle)) { 5883 starget = sas_device->starget; 5884 sas_target_priv_data = starget->hostdata; 5885 starget_printk(KERN_INFO, starget, 5886 "handle changed from(0x%04x) to (0x%04x)!!!\n", 5887 sas_device->handle, handle); 5888 sas_target_priv_data->handle = handle; 5889 sas_device->handle = handle; 5890 if (le16_to_cpu(sas_device_pg0.Flags) & 5891 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 5892 sas_device->enclosure_level = 5893 sas_device_pg0.EnclosureLevel; 5894 memcpy(sas_device->connector_name, 5895 sas_device_pg0.ConnectorName, 4); 5896 sas_device->connector_name[4] = '\0'; 5897 } else { 5898 sas_device->enclosure_level = 0; 5899 sas_device->connector_name[0] = '\0'; 5900 } 5901 5902 sas_device->enclosure_handle = 5903 le16_to_cpu(sas_device_pg0.EnclosureHandle); 5904 sas_device->is_chassis_slot_valid = 0; 5905 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc, 5906 sas_device->enclosure_handle); 5907 if (enclosure_dev) { 5908 sas_device->enclosure_logical_id = 5909 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 5910 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 5911 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 5912 sas_device->is_chassis_slot_valid = 1; 5913 sas_device->chassis_slot = 5914 enclosure_dev->pg0.ChassisSlot; 5915 } 5916 } 5917 } 5918 5919 /* check if device is present */ 5920 if (!(le16_to_cpu(sas_device_pg0.Flags) & 5921 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 5922 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n", 5923 handle); 5924 goto out_unlock; 5925 } 5926 5927 /* check if there were any issues with discovery */ 5928 if (_scsih_check_access_status(ioc, sas_address, handle, 5929 sas_device_pg0.AccessStatus)) 5930 goto out_unlock; 5931 5932 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5933 _scsih_ublock_io_device(ioc, sas_address); 5934 5935 if (sas_device) 5936 sas_device_put(sas_device); 5937 return; 5938 5939 out_unlock: 5940 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 5941 if (sas_device) 5942 sas_device_put(sas_device); 5943 } 5944 5945 /** 5946 * _scsih_add_device - creating sas device object 5947 * @ioc: per adapter object 5948 * @handle: sas device handle 5949 * @phy_num: phy number end device attached to 5950 * @is_pd: is this hidden raid component 5951 * 5952 * Creating end device object, stored in ioc->sas_device_list. 5953 * 5954 * Return: 0 for success, non-zero for failure. 5955 */ 5956 static int 5957 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, 5958 u8 is_pd) 5959 { 5960 Mpi2ConfigReply_t mpi_reply; 5961 Mpi2SasDevicePage0_t sas_device_pg0; 5962 struct _sas_device *sas_device; 5963 struct _enclosure_node *enclosure_dev = NULL; 5964 u32 ioc_status; 5965 u64 sas_address; 5966 u32 device_info; 5967 5968 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 5969 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 5970 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5971 __FILE__, __LINE__, __func__); 5972 return -1; 5973 } 5974 5975 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 5976 MPI2_IOCSTATUS_MASK; 5977 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 5978 ioc_err(ioc, "failure at %s:%d/%s()!\n", 5979 __FILE__, __LINE__, __func__); 5980 return -1; 5981 } 5982 5983 /* check if this is end device */ 5984 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 5985 if (!(_scsih_is_end_device(device_info))) 5986 return -1; 5987 set_bit(handle, ioc->pend_os_device_add); 5988 sas_address = le64_to_cpu(sas_device_pg0.SASAddress); 5989 5990 /* check if device is present */ 5991 if (!(le16_to_cpu(sas_device_pg0.Flags) & 5992 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { 5993 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", 5994 handle); 5995 return -1; 5996 } 5997 5998 /* check if there were any issues with discovery */ 5999 if (_scsih_check_access_status(ioc, sas_address, handle, 6000 sas_device_pg0.AccessStatus)) 6001 return -1; 6002 6003 sas_device = mpt3sas_get_sdev_by_addr(ioc, 6004 sas_address); 6005 if (sas_device) { 6006 clear_bit(handle, ioc->pend_os_device_add); 6007 sas_device_put(sas_device); 6008 return -1; 6009 } 6010 6011 if (sas_device_pg0.EnclosureHandle) { 6012 enclosure_dev = 6013 mpt3sas_scsih_enclosure_find_by_handle(ioc, 6014 le16_to_cpu(sas_device_pg0.EnclosureHandle)); 6015 if (enclosure_dev == NULL) 6016 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 6017 sas_device_pg0.EnclosureHandle); 6018 } 6019 6020 sas_device = kzalloc(sizeof(struct _sas_device), 6021 GFP_KERNEL); 6022 if (!sas_device) { 6023 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6024 __FILE__, __LINE__, __func__); 6025 return 0; 6026 } 6027 6028 kref_init(&sas_device->refcount); 6029 sas_device->handle = handle; 6030 if (_scsih_get_sas_address(ioc, 6031 le16_to_cpu(sas_device_pg0.ParentDevHandle), 6032 &sas_device->sas_address_parent) != 0) 6033 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6034 __FILE__, __LINE__, __func__); 6035 sas_device->enclosure_handle = 6036 le16_to_cpu(sas_device_pg0.EnclosureHandle); 6037 if (sas_device->enclosure_handle != 0) 6038 sas_device->slot = 6039 le16_to_cpu(sas_device_pg0.Slot); 6040 sas_device->device_info = device_info; 6041 sas_device->sas_address = sas_address; 6042 sas_device->phy = sas_device_pg0.PhyNum; 6043 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & 6044 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 6045 6046 if (le16_to_cpu(sas_device_pg0.Flags) 6047 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 6048 sas_device->enclosure_level = 6049 sas_device_pg0.EnclosureLevel; 6050 memcpy(sas_device->connector_name, 6051 sas_device_pg0.ConnectorName, 4); 6052 sas_device->connector_name[4] = '\0'; 6053 } else { 6054 sas_device->enclosure_level = 0; 6055 sas_device->connector_name[0] = '\0'; 6056 } 6057 /* get enclosure_logical_id & chassis_slot*/ 6058 sas_device->is_chassis_slot_valid = 0; 6059 if (enclosure_dev) { 6060 sas_device->enclosure_logical_id = 6061 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 6062 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 6063 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 6064 sas_device->is_chassis_slot_valid = 1; 6065 sas_device->chassis_slot = 6066 enclosure_dev->pg0.ChassisSlot; 6067 } 6068 } 6069 6070 /* get device name */ 6071 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); 6072 6073 if (ioc->wait_for_discovery_to_complete) 6074 _scsih_sas_device_init_add(ioc, sas_device); 6075 else 6076 _scsih_sas_device_add(ioc, sas_device); 6077 6078 sas_device_put(sas_device); 6079 return 0; 6080 } 6081 6082 /** 6083 * _scsih_remove_device - removing sas device object 6084 * @ioc: per adapter object 6085 * @sas_device: the sas_device object 6086 */ 6087 static void 6088 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, 6089 struct _sas_device *sas_device) 6090 { 6091 struct MPT3SAS_TARGET *sas_target_priv_data; 6092 6093 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && 6094 (sas_device->pfa_led_on)) { 6095 _scsih_turn_off_pfa_led(ioc, sas_device); 6096 sas_device->pfa_led_on = 0; 6097 } 6098 6099 dewtprintk(ioc, 6100 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", 6101 __func__, 6102 sas_device->handle, (u64)sas_device->sas_address)); 6103 6104 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 6105 NULL, NULL)); 6106 6107 if (sas_device->starget && sas_device->starget->hostdata) { 6108 sas_target_priv_data = sas_device->starget->hostdata; 6109 sas_target_priv_data->deleted = 1; 6110 _scsih_ublock_io_device(ioc, sas_device->sas_address); 6111 sas_target_priv_data->handle = 6112 MPT3SAS_INVALID_DEVICE_HANDLE; 6113 } 6114 6115 if (!ioc->hide_drives) 6116 mpt3sas_transport_port_remove(ioc, 6117 sas_device->sas_address, 6118 sas_device->sas_address_parent); 6119 6120 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n", 6121 sas_device->handle, (u64)sas_device->sas_address); 6122 6123 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); 6124 6125 dewtprintk(ioc, 6126 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", 6127 __func__, 6128 sas_device->handle, (u64)sas_device->sas_address)); 6129 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device, 6130 NULL, NULL)); 6131 } 6132 6133 /** 6134 * _scsih_sas_topology_change_event_debug - debug for topology event 6135 * @ioc: per adapter object 6136 * @event_data: event data payload 6137 * Context: user. 6138 */ 6139 static void 6140 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 6141 Mpi2EventDataSasTopologyChangeList_t *event_data) 6142 { 6143 int i; 6144 u16 handle; 6145 u16 reason_code; 6146 u8 phy_number; 6147 char *status_str = NULL; 6148 u8 link_rate, prev_link_rate; 6149 6150 switch (event_data->ExpStatus) { 6151 case MPI2_EVENT_SAS_TOPO_ES_ADDED: 6152 status_str = "add"; 6153 break; 6154 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 6155 status_str = "remove"; 6156 break; 6157 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: 6158 case 0: 6159 status_str = "responding"; 6160 break; 6161 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 6162 status_str = "remove delay"; 6163 break; 6164 default: 6165 status_str = "unknown status"; 6166 break; 6167 } 6168 ioc_info(ioc, "sas topology change: (%s)\n", status_str); 6169 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ 6170 "start_phy(%02d), count(%d)\n", 6171 le16_to_cpu(event_data->ExpanderDevHandle), 6172 le16_to_cpu(event_data->EnclosureHandle), 6173 event_data->StartPhyNum, event_data->NumEntries); 6174 for (i = 0; i < event_data->NumEntries; i++) { 6175 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 6176 if (!handle) 6177 continue; 6178 phy_number = event_data->StartPhyNum + i; 6179 reason_code = event_data->PHY[i].PhyStatus & 6180 MPI2_EVENT_SAS_TOPO_RC_MASK; 6181 switch (reason_code) { 6182 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 6183 status_str = "target add"; 6184 break; 6185 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 6186 status_str = "target remove"; 6187 break; 6188 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: 6189 status_str = "delay target remove"; 6190 break; 6191 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 6192 status_str = "link rate change"; 6193 break; 6194 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: 6195 status_str = "target responding"; 6196 break; 6197 default: 6198 status_str = "unknown"; 6199 break; 6200 } 6201 link_rate = event_data->PHY[i].LinkRate >> 4; 6202 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 6203 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \ 6204 " link rate: new(0x%02x), old(0x%02x)\n", phy_number, 6205 handle, status_str, link_rate, prev_link_rate); 6206 6207 } 6208 } 6209 6210 /** 6211 * _scsih_sas_topology_change_event - handle topology changes 6212 * @ioc: per adapter object 6213 * @fw_event: The fw_event_work object 6214 * Context: user. 6215 * 6216 */ 6217 static int 6218 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 6219 struct fw_event_work *fw_event) 6220 { 6221 int i; 6222 u16 parent_handle, handle; 6223 u16 reason_code; 6224 u8 phy_number, max_phys; 6225 struct _sas_node *sas_expander; 6226 u64 sas_address; 6227 unsigned long flags; 6228 u8 link_rate, prev_link_rate; 6229 Mpi2EventDataSasTopologyChangeList_t *event_data = 6230 (Mpi2EventDataSasTopologyChangeList_t *) 6231 fw_event->event_data; 6232 6233 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 6234 _scsih_sas_topology_change_event_debug(ioc, event_data); 6235 6236 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) 6237 return 0; 6238 6239 if (!ioc->sas_hba.num_phys) 6240 _scsih_sas_host_add(ioc); 6241 else 6242 _scsih_sas_host_refresh(ioc); 6243 6244 if (fw_event->ignore) { 6245 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n")); 6246 return 0; 6247 } 6248 6249 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); 6250 6251 /* handle expander add */ 6252 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) 6253 if (_scsih_expander_add(ioc, parent_handle) != 0) 6254 return 0; 6255 6256 spin_lock_irqsave(&ioc->sas_node_lock, flags); 6257 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, 6258 parent_handle); 6259 if (sas_expander) { 6260 sas_address = sas_expander->sas_address; 6261 max_phys = sas_expander->num_phys; 6262 } else if (parent_handle < ioc->sas_hba.num_phys) { 6263 sas_address = ioc->sas_hba.sas_address; 6264 max_phys = ioc->sas_hba.num_phys; 6265 } else { 6266 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6267 return 0; 6268 } 6269 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 6270 6271 /* handle siblings events */ 6272 for (i = 0; i < event_data->NumEntries; i++) { 6273 if (fw_event->ignore) { 6274 dewtprintk(ioc, 6275 ioc_info(ioc, "ignoring expander event\n")); 6276 return 0; 6277 } 6278 if (ioc->remove_host || ioc->pci_error_recovery) 6279 return 0; 6280 phy_number = event_data->StartPhyNum + i; 6281 if (phy_number >= max_phys) 6282 continue; 6283 reason_code = event_data->PHY[i].PhyStatus & 6284 MPI2_EVENT_SAS_TOPO_RC_MASK; 6285 if ((event_data->PHY[i].PhyStatus & 6286 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != 6287 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) 6288 continue; 6289 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 6290 if (!handle) 6291 continue; 6292 link_rate = event_data->PHY[i].LinkRate >> 4; 6293 prev_link_rate = event_data->PHY[i].LinkRate & 0xF; 6294 switch (reason_code) { 6295 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: 6296 6297 if (ioc->shost_recovery) 6298 break; 6299 6300 if (link_rate == prev_link_rate) 6301 break; 6302 6303 mpt3sas_transport_update_links(ioc, sas_address, 6304 handle, phy_number, link_rate); 6305 6306 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) 6307 break; 6308 6309 _scsih_check_device(ioc, sas_address, handle, 6310 phy_number, link_rate); 6311 6312 if (!test_bit(handle, ioc->pend_os_device_add)) 6313 break; 6314 6315 /* fall through */ 6316 6317 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: 6318 6319 if (ioc->shost_recovery) 6320 break; 6321 6322 mpt3sas_transport_update_links(ioc, sas_address, 6323 handle, phy_number, link_rate); 6324 6325 _scsih_add_device(ioc, handle, phy_number, 0); 6326 6327 break; 6328 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: 6329 6330 _scsih_device_remove_by_handle(ioc, handle); 6331 break; 6332 } 6333 } 6334 6335 /* handle expander removal */ 6336 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && 6337 sas_expander) 6338 mpt3sas_expander_remove(ioc, sas_address); 6339 6340 return 0; 6341 } 6342 6343 /** 6344 * _scsih_sas_device_status_change_event_debug - debug for device event 6345 * @ioc: ? 6346 * @event_data: event data payload 6347 * Context: user. 6348 */ 6349 static void 6350 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 6351 Mpi2EventDataSasDeviceStatusChange_t *event_data) 6352 { 6353 char *reason_str = NULL; 6354 6355 switch (event_data->ReasonCode) { 6356 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: 6357 reason_str = "smart data"; 6358 break; 6359 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: 6360 reason_str = "unsupported device discovered"; 6361 break; 6362 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 6363 reason_str = "internal device reset"; 6364 break; 6365 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: 6366 reason_str = "internal task abort"; 6367 break; 6368 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 6369 reason_str = "internal task abort set"; 6370 break; 6371 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 6372 reason_str = "internal clear task set"; 6373 break; 6374 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: 6375 reason_str = "internal query task"; 6376 break; 6377 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: 6378 reason_str = "sata init failure"; 6379 break; 6380 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 6381 reason_str = "internal device reset complete"; 6382 break; 6383 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 6384 reason_str = "internal task abort complete"; 6385 break; 6386 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: 6387 reason_str = "internal async notification"; 6388 break; 6389 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: 6390 reason_str = "expander reduced functionality"; 6391 break; 6392 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: 6393 reason_str = "expander reduced functionality complete"; 6394 break; 6395 default: 6396 reason_str = "unknown reason"; 6397 break; 6398 } 6399 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)", 6400 reason_str, le16_to_cpu(event_data->DevHandle), 6401 (u64)le64_to_cpu(event_data->SASAddress), 6402 le16_to_cpu(event_data->TaskTag)); 6403 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) 6404 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", 6405 event_data->ASC, event_data->ASCQ); 6406 pr_cont("\n"); 6407 } 6408 6409 /** 6410 * _scsih_sas_device_status_change_event - handle device status change 6411 * @ioc: per adapter object 6412 * @fw_event: The fw_event_work object 6413 * Context: user. 6414 */ 6415 static void 6416 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 6417 struct fw_event_work *fw_event) 6418 { 6419 struct MPT3SAS_TARGET *target_priv_data; 6420 struct _sas_device *sas_device; 6421 u64 sas_address; 6422 unsigned long flags; 6423 Mpi2EventDataSasDeviceStatusChange_t *event_data = 6424 (Mpi2EventDataSasDeviceStatusChange_t *) 6425 fw_event->event_data; 6426 6427 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 6428 _scsih_sas_device_status_change_event_debug(ioc, 6429 event_data); 6430 6431 /* In MPI Revision K (0xC), the internal device reset complete was 6432 * implemented, so avoid setting tm_busy flag for older firmware. 6433 */ 6434 if ((ioc->facts.HeaderVersion >> 8) < 0xC) 6435 return; 6436 6437 if (event_data->ReasonCode != 6438 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && 6439 event_data->ReasonCode != 6440 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) 6441 return; 6442 6443 spin_lock_irqsave(&ioc->sas_device_lock, flags); 6444 sas_address = le64_to_cpu(event_data->SASAddress); 6445 sas_device = __mpt3sas_get_sdev_by_addr(ioc, 6446 sas_address); 6447 6448 if (!sas_device || !sas_device->starget) 6449 goto out; 6450 6451 target_priv_data = sas_device->starget->hostdata; 6452 if (!target_priv_data) 6453 goto out; 6454 6455 if (event_data->ReasonCode == 6456 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) 6457 target_priv_data->tm_busy = 1; 6458 else 6459 target_priv_data->tm_busy = 0; 6460 6461 out: 6462 if (sas_device) 6463 sas_device_put(sas_device); 6464 6465 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 6466 } 6467 6468 6469 /** 6470 * _scsih_check_pcie_access_status - check access flags 6471 * @ioc: per adapter object 6472 * @wwid: wwid 6473 * @handle: sas device handle 6474 * @access_status: errors returned during discovery of the device 6475 * 6476 * Return: 0 for success, else failure 6477 */ 6478 static u8 6479 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, 6480 u16 handle, u8 access_status) 6481 { 6482 u8 rc = 1; 6483 char *desc = NULL; 6484 6485 switch (access_status) { 6486 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS: 6487 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION: 6488 rc = 0; 6489 break; 6490 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED: 6491 desc = "PCIe device capability failed"; 6492 break; 6493 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED: 6494 desc = "PCIe device blocked"; 6495 break; 6496 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED: 6497 desc = "PCIe device mem space access failed"; 6498 break; 6499 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE: 6500 desc = "PCIe device unsupported"; 6501 break; 6502 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED: 6503 desc = "PCIe device MSIx Required"; 6504 break; 6505 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX: 6506 desc = "PCIe device init fail max"; 6507 break; 6508 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN: 6509 desc = "PCIe device status unknown"; 6510 break; 6511 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT: 6512 desc = "nvme ready timeout"; 6513 break; 6514 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED: 6515 desc = "nvme device configuration unsupported"; 6516 break; 6517 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED: 6518 desc = "nvme identify failed"; 6519 break; 6520 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED: 6521 desc = "nvme qconfig failed"; 6522 break; 6523 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED: 6524 desc = "nvme qcreation failed"; 6525 break; 6526 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED: 6527 desc = "nvme eventcfg failed"; 6528 break; 6529 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED: 6530 desc = "nvme get feature stat failed"; 6531 break; 6532 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT: 6533 desc = "nvme idle timeout"; 6534 break; 6535 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS: 6536 desc = "nvme failure status"; 6537 break; 6538 default: 6539 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n", 6540 access_status, (u64)wwid, handle); 6541 return rc; 6542 } 6543 6544 if (!rc) 6545 return rc; 6546 6547 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n", 6548 desc, (u64)wwid, handle); 6549 return rc; 6550 } 6551 6552 /** 6553 * _scsih_pcie_device_remove_from_sml - removing pcie device 6554 * from SML and free up associated memory 6555 * @ioc: per adapter object 6556 * @pcie_device: the pcie_device object 6557 */ 6558 static void 6559 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, 6560 struct _pcie_device *pcie_device) 6561 { 6562 struct MPT3SAS_TARGET *sas_target_priv_data; 6563 6564 dewtprintk(ioc, 6565 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", 6566 __func__, 6567 pcie_device->handle, (u64)pcie_device->wwid)); 6568 if (pcie_device->enclosure_handle != 0) 6569 dewtprintk(ioc, 6570 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", 6571 __func__, 6572 (u64)pcie_device->enclosure_logical_id, 6573 pcie_device->slot)); 6574 if (pcie_device->connector_name[0] != '\0') 6575 dewtprintk(ioc, 6576 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n", 6577 __func__, 6578 pcie_device->enclosure_level, 6579 pcie_device->connector_name)); 6580 6581 if (pcie_device->starget && pcie_device->starget->hostdata) { 6582 sas_target_priv_data = pcie_device->starget->hostdata; 6583 sas_target_priv_data->deleted = 1; 6584 _scsih_ublock_io_device(ioc, pcie_device->wwid); 6585 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; 6586 } 6587 6588 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 6589 pcie_device->handle, (u64)pcie_device->wwid); 6590 if (pcie_device->enclosure_handle != 0) 6591 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n", 6592 (u64)pcie_device->enclosure_logical_id, 6593 pcie_device->slot); 6594 if (pcie_device->connector_name[0] != '\0') 6595 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n", 6596 pcie_device->enclosure_level, 6597 pcie_device->connector_name); 6598 6599 if (pcie_device->starget) 6600 scsi_remove_target(&pcie_device->starget->dev); 6601 dewtprintk(ioc, 6602 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", 6603 __func__, 6604 pcie_device->handle, (u64)pcie_device->wwid)); 6605 if (pcie_device->enclosure_handle != 0) 6606 dewtprintk(ioc, 6607 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", 6608 __func__, 6609 (u64)pcie_device->enclosure_logical_id, 6610 pcie_device->slot)); 6611 if (pcie_device->connector_name[0] != '\0') 6612 dewtprintk(ioc, 6613 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n", 6614 __func__, 6615 pcie_device->enclosure_level, 6616 pcie_device->connector_name)); 6617 6618 kfree(pcie_device->serial_number); 6619 } 6620 6621 6622 /** 6623 * _scsih_pcie_check_device - checking device responsiveness 6624 * @ioc: per adapter object 6625 * @handle: attached device handle 6626 */ 6627 static void 6628 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 6629 { 6630 Mpi2ConfigReply_t mpi_reply; 6631 Mpi26PCIeDevicePage0_t pcie_device_pg0; 6632 u32 ioc_status; 6633 struct _pcie_device *pcie_device; 6634 u64 wwid; 6635 unsigned long flags; 6636 struct scsi_target *starget; 6637 struct MPT3SAS_TARGET *sas_target_priv_data; 6638 u32 device_info; 6639 6640 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 6641 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) 6642 return; 6643 6644 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6645 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 6646 return; 6647 6648 /* check if this is end device */ 6649 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 6650 if (!(_scsih_is_nvme_device(device_info))) 6651 return; 6652 6653 wwid = le64_to_cpu(pcie_device_pg0.WWID); 6654 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 6655 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 6656 6657 if (!pcie_device) { 6658 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 6659 return; 6660 } 6661 6662 if (unlikely(pcie_device->handle != handle)) { 6663 starget = pcie_device->starget; 6664 sas_target_priv_data = starget->hostdata; 6665 starget_printk(KERN_INFO, starget, 6666 "handle changed from(0x%04x) to (0x%04x)!!!\n", 6667 pcie_device->handle, handle); 6668 sas_target_priv_data->handle = handle; 6669 pcie_device->handle = handle; 6670 6671 if (le32_to_cpu(pcie_device_pg0.Flags) & 6672 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 6673 pcie_device->enclosure_level = 6674 pcie_device_pg0.EnclosureLevel; 6675 memcpy(&pcie_device->connector_name[0], 6676 &pcie_device_pg0.ConnectorName[0], 4); 6677 } else { 6678 pcie_device->enclosure_level = 0; 6679 pcie_device->connector_name[0] = '\0'; 6680 } 6681 } 6682 6683 /* check if device is present */ 6684 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 6685 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 6686 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n", 6687 handle); 6688 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 6689 pcie_device_put(pcie_device); 6690 return; 6691 } 6692 6693 /* check if there were any issues with discovery */ 6694 if (_scsih_check_pcie_access_status(ioc, wwid, handle, 6695 pcie_device_pg0.AccessStatus)) { 6696 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 6697 pcie_device_put(pcie_device); 6698 return; 6699 } 6700 6701 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 6702 pcie_device_put(pcie_device); 6703 6704 _scsih_ublock_io_device(ioc, wwid); 6705 6706 return; 6707 } 6708 6709 /** 6710 * _scsih_pcie_add_device - creating pcie device object 6711 * @ioc: per adapter object 6712 * @handle: pcie device handle 6713 * 6714 * Creating end device object, stored in ioc->pcie_device_list. 6715 * 6716 * Return: 1 means queue the event later, 0 means complete the event 6717 */ 6718 static int 6719 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) 6720 { 6721 Mpi26PCIeDevicePage0_t pcie_device_pg0; 6722 Mpi26PCIeDevicePage2_t pcie_device_pg2; 6723 Mpi2ConfigReply_t mpi_reply; 6724 struct _pcie_device *pcie_device; 6725 struct _enclosure_node *enclosure_dev; 6726 u32 ioc_status; 6727 u64 wwid; 6728 6729 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 6730 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) { 6731 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6732 __FILE__, __LINE__, __func__); 6733 return 0; 6734 } 6735 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 6736 MPI2_IOCSTATUS_MASK; 6737 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6738 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6739 __FILE__, __LINE__, __func__); 6740 return 0; 6741 } 6742 6743 set_bit(handle, ioc->pend_os_device_add); 6744 wwid = le64_to_cpu(pcie_device_pg0.WWID); 6745 6746 /* check if device is present */ 6747 if (!(le32_to_cpu(pcie_device_pg0.Flags) & 6748 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) { 6749 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n", 6750 handle); 6751 return 0; 6752 } 6753 6754 /* check if there were any issues with discovery */ 6755 if (_scsih_check_pcie_access_status(ioc, wwid, handle, 6756 pcie_device_pg0.AccessStatus)) 6757 return 0; 6758 6759 if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo)))) 6760 return 0; 6761 6762 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid); 6763 if (pcie_device) { 6764 clear_bit(handle, ioc->pend_os_device_add); 6765 pcie_device_put(pcie_device); 6766 return 0; 6767 } 6768 6769 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); 6770 if (!pcie_device) { 6771 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6772 __FILE__, __LINE__, __func__); 6773 return 0; 6774 } 6775 6776 kref_init(&pcie_device->refcount); 6777 pcie_device->id = ioc->pcie_target_id++; 6778 pcie_device->channel = PCIE_CHANNEL; 6779 pcie_device->handle = handle; 6780 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 6781 pcie_device->wwid = wwid; 6782 pcie_device->port_num = pcie_device_pg0.PortNum; 6783 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) & 6784 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; 6785 6786 pcie_device->enclosure_handle = 6787 le16_to_cpu(pcie_device_pg0.EnclosureHandle); 6788 if (pcie_device->enclosure_handle != 0) 6789 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot); 6790 6791 if (le32_to_cpu(pcie_device_pg0.Flags) & 6792 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) { 6793 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel; 6794 memcpy(&pcie_device->connector_name[0], 6795 &pcie_device_pg0.ConnectorName[0], 4); 6796 } else { 6797 pcie_device->enclosure_level = 0; 6798 pcie_device->connector_name[0] = '\0'; 6799 } 6800 6801 /* get enclosure_logical_id */ 6802 if (pcie_device->enclosure_handle) { 6803 enclosure_dev = 6804 mpt3sas_scsih_enclosure_find_by_handle(ioc, 6805 pcie_device->enclosure_handle); 6806 if (enclosure_dev) 6807 pcie_device->enclosure_logical_id = 6808 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 6809 } 6810 /* TODO -- Add device name once FW supports it */ 6811 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, 6812 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) { 6813 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6814 __FILE__, __LINE__, __func__); 6815 kfree(pcie_device); 6816 return 0; 6817 } 6818 6819 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 6820 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 6821 ioc_err(ioc, "failure at %s:%d/%s()!\n", 6822 __FILE__, __LINE__, __func__); 6823 kfree(pcie_device); 6824 return 0; 6825 } 6826 pcie_device->nvme_mdts = 6827 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); 6828 if (pcie_device_pg2.ControllerResetTO) 6829 pcie_device->reset_timeout = 6830 pcie_device_pg2.ControllerResetTO; 6831 else 6832 pcie_device->reset_timeout = 30; 6833 6834 if (ioc->wait_for_discovery_to_complete) 6835 _scsih_pcie_device_init_add(ioc, pcie_device); 6836 else 6837 _scsih_pcie_device_add(ioc, pcie_device); 6838 6839 pcie_device_put(pcie_device); 6840 return 0; 6841 } 6842 6843 /** 6844 * _scsih_pcie_topology_change_event_debug - debug for topology 6845 * event 6846 * @ioc: per adapter object 6847 * @event_data: event data payload 6848 * Context: user. 6849 */ 6850 static void 6851 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 6852 Mpi26EventDataPCIeTopologyChangeList_t *event_data) 6853 { 6854 int i; 6855 u16 handle; 6856 u16 reason_code; 6857 u8 port_number; 6858 char *status_str = NULL; 6859 u8 link_rate, prev_link_rate; 6860 6861 switch (event_data->SwitchStatus) { 6862 case MPI26_EVENT_PCIE_TOPO_SS_ADDED: 6863 status_str = "add"; 6864 break; 6865 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 6866 status_str = "remove"; 6867 break; 6868 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING: 6869 case 0: 6870 status_str = "responding"; 6871 break; 6872 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 6873 status_str = "remove delay"; 6874 break; 6875 default: 6876 status_str = "unknown status"; 6877 break; 6878 } 6879 ioc_info(ioc, "pcie topology change: (%s)\n", status_str); 6880 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)" 6881 "start_port(%02d), count(%d)\n", 6882 le16_to_cpu(event_data->SwitchDevHandle), 6883 le16_to_cpu(event_data->EnclosureHandle), 6884 event_data->StartPortNum, event_data->NumEntries); 6885 for (i = 0; i < event_data->NumEntries; i++) { 6886 handle = 6887 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 6888 if (!handle) 6889 continue; 6890 port_number = event_data->StartPortNum + i; 6891 reason_code = event_data->PortEntry[i].PortStatus; 6892 switch (reason_code) { 6893 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: 6894 status_str = "target add"; 6895 break; 6896 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 6897 status_str = "target remove"; 6898 break; 6899 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 6900 status_str = "delay target remove"; 6901 break; 6902 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 6903 status_str = "link rate change"; 6904 break; 6905 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE: 6906 status_str = "target responding"; 6907 break; 6908 default: 6909 status_str = "unknown"; 6910 break; 6911 } 6912 link_rate = event_data->PortEntry[i].CurrentPortInfo & 6913 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 6914 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo & 6915 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 6916 pr_info("\tport(%02d), attached_handle(0x%04x): %s:" 6917 " link rate: new(0x%02x), old(0x%02x)\n", port_number, 6918 handle, status_str, link_rate, prev_link_rate); 6919 } 6920 } 6921 6922 /** 6923 * _scsih_pcie_topology_change_event - handle PCIe topology 6924 * changes 6925 * @ioc: per adapter object 6926 * @fw_event: The fw_event_work object 6927 * Context: user. 6928 * 6929 */ 6930 static void 6931 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, 6932 struct fw_event_work *fw_event) 6933 { 6934 int i; 6935 u16 handle; 6936 u16 reason_code; 6937 u8 link_rate, prev_link_rate; 6938 unsigned long flags; 6939 int rc; 6940 Mpi26EventDataPCIeTopologyChangeList_t *event_data = 6941 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data; 6942 struct _pcie_device *pcie_device; 6943 6944 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 6945 _scsih_pcie_topology_change_event_debug(ioc, event_data); 6946 6947 if (ioc->shost_recovery || ioc->remove_host || 6948 ioc->pci_error_recovery) 6949 return; 6950 6951 if (fw_event->ignore) { 6952 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n")); 6953 return; 6954 } 6955 6956 /* handle siblings events */ 6957 for (i = 0; i < event_data->NumEntries; i++) { 6958 if (fw_event->ignore) { 6959 dewtprintk(ioc, 6960 ioc_info(ioc, "ignoring switch event\n")); 6961 return; 6962 } 6963 if (ioc->remove_host || ioc->pci_error_recovery) 6964 return; 6965 reason_code = event_data->PortEntry[i].PortStatus; 6966 handle = 6967 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle); 6968 if (!handle) 6969 continue; 6970 6971 link_rate = event_data->PortEntry[i].CurrentPortInfo 6972 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 6973 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo 6974 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK; 6975 6976 switch (reason_code) { 6977 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 6978 if (ioc->shost_recovery) 6979 break; 6980 if (link_rate == prev_link_rate) 6981 break; 6982 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 6983 break; 6984 6985 _scsih_pcie_check_device(ioc, handle); 6986 6987 /* This code after this point handles the test case 6988 * where a device has been added, however its returning 6989 * BUSY for sometime. Then before the Device Missing 6990 * Delay expires and the device becomes READY, the 6991 * device is removed and added back. 6992 */ 6993 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 6994 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle); 6995 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 6996 6997 if (pcie_device) { 6998 pcie_device_put(pcie_device); 6999 break; 7000 } 7001 7002 if (!test_bit(handle, ioc->pend_os_device_add)) 7003 break; 7004 7005 dewtprintk(ioc, 7006 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n", 7007 handle)); 7008 event_data->PortEntry[i].PortStatus &= 0xF0; 7009 event_data->PortEntry[i].PortStatus |= 7010 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; 7011 /* fall through */ 7012 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: 7013 if (ioc->shost_recovery) 7014 break; 7015 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5) 7016 break; 7017 7018 rc = _scsih_pcie_add_device(ioc, handle); 7019 if (!rc) { 7020 /* mark entry vacant */ 7021 /* TODO This needs to be reviewed and fixed, 7022 * we dont have an entry 7023 * to make an event void like vacant 7024 */ 7025 event_data->PortEntry[i].PortStatus |= 7026 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE; 7027 } 7028 break; 7029 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 7030 _scsih_pcie_device_remove_by_handle(ioc, handle); 7031 break; 7032 } 7033 } 7034 } 7035 7036 /** 7037 * _scsih_pcie_device_status_change_event_debug - debug for device event 7038 * @ioc: ? 7039 * @event_data: event data payload 7040 * Context: user. 7041 */ 7042 static void 7043 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7044 Mpi26EventDataPCIeDeviceStatusChange_t *event_data) 7045 { 7046 char *reason_str = NULL; 7047 7048 switch (event_data->ReasonCode) { 7049 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA: 7050 reason_str = "smart data"; 7051 break; 7052 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED: 7053 reason_str = "unsupported device discovered"; 7054 break; 7055 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET: 7056 reason_str = "internal device reset"; 7057 break; 7058 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL: 7059 reason_str = "internal task abort"; 7060 break; 7061 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL: 7062 reason_str = "internal task abort set"; 7063 break; 7064 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: 7065 reason_str = "internal clear task set"; 7066 break; 7067 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL: 7068 reason_str = "internal query task"; 7069 break; 7070 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE: 7071 reason_str = "device init failure"; 7072 break; 7073 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET: 7074 reason_str = "internal device reset complete"; 7075 break; 7076 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: 7077 reason_str = "internal task abort complete"; 7078 break; 7079 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION: 7080 reason_str = "internal async notification"; 7081 break; 7082 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED: 7083 reason_str = "pcie hot reset failed"; 7084 break; 7085 default: 7086 reason_str = "unknown reason"; 7087 break; 7088 } 7089 7090 ioc_info(ioc, "PCIE device status change: (%s)\n" 7091 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)", 7092 reason_str, le16_to_cpu(event_data->DevHandle), 7093 (u64)le64_to_cpu(event_data->WWID), 7094 le16_to_cpu(event_data->TaskTag)); 7095 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA) 7096 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n", 7097 event_data->ASC, event_data->ASCQ); 7098 pr_cont("\n"); 7099 } 7100 7101 /** 7102 * _scsih_pcie_device_status_change_event - handle device status 7103 * change 7104 * @ioc: per adapter object 7105 * @fw_event: The fw_event_work object 7106 * Context: user. 7107 */ 7108 static void 7109 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, 7110 struct fw_event_work *fw_event) 7111 { 7112 struct MPT3SAS_TARGET *target_priv_data; 7113 struct _pcie_device *pcie_device; 7114 u64 wwid; 7115 unsigned long flags; 7116 Mpi26EventDataPCIeDeviceStatusChange_t *event_data = 7117 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data; 7118 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7119 _scsih_pcie_device_status_change_event_debug(ioc, 7120 event_data); 7121 7122 if (event_data->ReasonCode != 7123 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET && 7124 event_data->ReasonCode != 7125 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET) 7126 return; 7127 7128 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 7129 wwid = le64_to_cpu(event_data->WWID); 7130 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid); 7131 7132 if (!pcie_device || !pcie_device->starget) 7133 goto out; 7134 7135 target_priv_data = pcie_device->starget->hostdata; 7136 if (!target_priv_data) 7137 goto out; 7138 7139 if (event_data->ReasonCode == 7140 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET) 7141 target_priv_data->tm_busy = 1; 7142 else 7143 target_priv_data->tm_busy = 0; 7144 out: 7145 if (pcie_device) 7146 pcie_device_put(pcie_device); 7147 7148 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 7149 } 7150 7151 /** 7152 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure 7153 * event 7154 * @ioc: per adapter object 7155 * @event_data: event data payload 7156 * Context: user. 7157 */ 7158 static void 7159 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7160 Mpi2EventDataSasEnclDevStatusChange_t *event_data) 7161 { 7162 char *reason_str = NULL; 7163 7164 switch (event_data->ReasonCode) { 7165 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 7166 reason_str = "enclosure add"; 7167 break; 7168 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 7169 reason_str = "enclosure remove"; 7170 break; 7171 default: 7172 reason_str = "unknown reason"; 7173 break; 7174 } 7175 7176 ioc_info(ioc, "enclosure status change: (%s)\n" 7177 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n", 7178 reason_str, 7179 le16_to_cpu(event_data->EnclosureHandle), 7180 (u64)le64_to_cpu(event_data->EnclosureLogicalID), 7181 le16_to_cpu(event_data->StartSlot)); 7182 } 7183 7184 /** 7185 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events 7186 * @ioc: per adapter object 7187 * @fw_event: The fw_event_work object 7188 * Context: user. 7189 */ 7190 static void 7191 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, 7192 struct fw_event_work *fw_event) 7193 { 7194 Mpi2ConfigReply_t mpi_reply; 7195 struct _enclosure_node *enclosure_dev = NULL; 7196 Mpi2EventDataSasEnclDevStatusChange_t *event_data = 7197 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data; 7198 int rc; 7199 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle); 7200 7201 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 7202 _scsih_sas_enclosure_dev_status_change_event_debug(ioc, 7203 (Mpi2EventDataSasEnclDevStatusChange_t *) 7204 fw_event->event_data); 7205 if (ioc->shost_recovery) 7206 return; 7207 7208 if (enclosure_handle) 7209 enclosure_dev = 7210 mpt3sas_scsih_enclosure_find_by_handle(ioc, 7211 enclosure_handle); 7212 switch (event_data->ReasonCode) { 7213 case MPI2_EVENT_SAS_ENCL_RC_ADDED: 7214 if (!enclosure_dev) { 7215 enclosure_dev = 7216 kzalloc(sizeof(struct _enclosure_node), 7217 GFP_KERNEL); 7218 if (!enclosure_dev) { 7219 ioc_info(ioc, "failure at %s:%d/%s()!\n", 7220 __FILE__, __LINE__, __func__); 7221 return; 7222 } 7223 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 7224 &enclosure_dev->pg0, 7225 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 7226 enclosure_handle); 7227 7228 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & 7229 MPI2_IOCSTATUS_MASK)) { 7230 kfree(enclosure_dev); 7231 return; 7232 } 7233 7234 list_add_tail(&enclosure_dev->list, 7235 &ioc->enclosure_list); 7236 } 7237 break; 7238 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: 7239 if (enclosure_dev) { 7240 list_del(&enclosure_dev->list); 7241 kfree(enclosure_dev); 7242 } 7243 break; 7244 default: 7245 break; 7246 } 7247 } 7248 7249 /** 7250 * _scsih_sas_broadcast_primitive_event - handle broadcast events 7251 * @ioc: per adapter object 7252 * @fw_event: The fw_event_work object 7253 * Context: user. 7254 */ 7255 static void 7256 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, 7257 struct fw_event_work *fw_event) 7258 { 7259 struct scsi_cmnd *scmd; 7260 struct scsi_device *sdev; 7261 struct scsiio_tracker *st; 7262 u16 smid, handle; 7263 u32 lun; 7264 struct MPT3SAS_DEVICE *sas_device_priv_data; 7265 u32 termination_count; 7266 u32 query_count; 7267 Mpi2SCSITaskManagementReply_t *mpi_reply; 7268 Mpi2EventDataSasBroadcastPrimitive_t *event_data = 7269 (Mpi2EventDataSasBroadcastPrimitive_t *) 7270 fw_event->event_data; 7271 u16 ioc_status; 7272 unsigned long flags; 7273 int r; 7274 u8 max_retries = 0; 7275 u8 task_abort_retries; 7276 7277 mutex_lock(&ioc->tm_cmds.mutex); 7278 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n", 7279 __func__, event_data->PhyNum, event_data->PortWidth); 7280 7281 _scsih_block_io_all_device(ioc); 7282 7283 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 7284 mpi_reply = ioc->tm_cmds.reply; 7285 broadcast_aen_retry: 7286 7287 /* sanity checks for retrying this loop */ 7288 if (max_retries++ == 5) { 7289 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__)); 7290 goto out; 7291 } else if (max_retries > 1) 7292 dewtprintk(ioc, 7293 ioc_info(ioc, "%s: %d retry\n", 7294 __func__, max_retries - 1)); 7295 7296 termination_count = 0; 7297 query_count = 0; 7298 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 7299 if (ioc->shost_recovery) 7300 goto out; 7301 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 7302 if (!scmd) 7303 continue; 7304 st = scsi_cmd_priv(scmd); 7305 sdev = scmd->device; 7306 sas_device_priv_data = sdev->hostdata; 7307 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) 7308 continue; 7309 /* skip hidden raid components */ 7310 if (sas_device_priv_data->sas_target->flags & 7311 MPT_TARGET_FLAGS_RAID_COMPONENT) 7312 continue; 7313 /* skip volumes */ 7314 if (sas_device_priv_data->sas_target->flags & 7315 MPT_TARGET_FLAGS_VOLUME) 7316 continue; 7317 /* skip PCIe devices */ 7318 if (sas_device_priv_data->sas_target->flags & 7319 MPT_TARGET_FLAGS_PCIE_DEVICE) 7320 continue; 7321 7322 handle = sas_device_priv_data->sas_target->handle; 7323 lun = sas_device_priv_data->lun; 7324 query_count++; 7325 7326 if (ioc->shost_recovery) 7327 goto out; 7328 7329 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 7330 r = mpt3sas_scsih_issue_tm(ioc, handle, lun, 7331 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid, 7332 st->msix_io, 30, 0); 7333 if (r == FAILED) { 7334 sdev_printk(KERN_WARNING, sdev, 7335 "mpt3sas_scsih_issue_tm: FAILED when sending " 7336 "QUERY_TASK: scmd(%p)\n", scmd); 7337 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 7338 goto broadcast_aen_retry; 7339 } 7340 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) 7341 & MPI2_IOCSTATUS_MASK; 7342 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7343 sdev_printk(KERN_WARNING, sdev, 7344 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", 7345 ioc_status, scmd); 7346 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 7347 goto broadcast_aen_retry; 7348 } 7349 7350 /* see if IO is still owned by IOC and target */ 7351 if (mpi_reply->ResponseCode == 7352 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || 7353 mpi_reply->ResponseCode == 7354 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { 7355 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 7356 continue; 7357 } 7358 task_abort_retries = 0; 7359 tm_retry: 7360 if (task_abort_retries++ == 60) { 7361 dewtprintk(ioc, 7362 ioc_info(ioc, "%s: ABORT_TASK: giving up\n", 7363 __func__)); 7364 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 7365 goto broadcast_aen_retry; 7366 } 7367 7368 if (ioc->shost_recovery) 7369 goto out_no_lock; 7370 7371 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun, 7372 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid, 7373 st->msix_io, 30, 0); 7374 if (r == FAILED || st->cb_idx != 0xFF) { 7375 sdev_printk(KERN_WARNING, sdev, 7376 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " 7377 "scmd(%p)\n", scmd); 7378 goto tm_retry; 7379 } 7380 7381 if (task_abort_retries > 1) 7382 sdev_printk(KERN_WARNING, sdev, 7383 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" 7384 " scmd(%p)\n", 7385 task_abort_retries - 1, scmd); 7386 7387 termination_count += le32_to_cpu(mpi_reply->TerminationCount); 7388 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 7389 } 7390 7391 if (ioc->broadcast_aen_pending) { 7392 dewtprintk(ioc, 7393 ioc_info(ioc, 7394 "%s: loop back due to pending AEN\n", 7395 __func__)); 7396 ioc->broadcast_aen_pending = 0; 7397 goto broadcast_aen_retry; 7398 } 7399 7400 out: 7401 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 7402 out_no_lock: 7403 7404 dewtprintk(ioc, 7405 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n", 7406 __func__, query_count, termination_count)); 7407 7408 ioc->broadcast_aen_busy = 0; 7409 if (!ioc->shost_recovery) 7410 _scsih_ublock_io_all_device(ioc); 7411 mutex_unlock(&ioc->tm_cmds.mutex); 7412 } 7413 7414 /** 7415 * _scsih_sas_discovery_event - handle discovery events 7416 * @ioc: per adapter object 7417 * @fw_event: The fw_event_work object 7418 * Context: user. 7419 */ 7420 static void 7421 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, 7422 struct fw_event_work *fw_event) 7423 { 7424 Mpi2EventDataSasDiscovery_t *event_data = 7425 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; 7426 7427 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { 7428 ioc_info(ioc, "discovery event: (%s)", 7429 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ? 7430 "start" : "stop"); 7431 if (event_data->DiscoveryStatus) 7432 pr_cont("discovery_status(0x%08x)", 7433 le32_to_cpu(event_data->DiscoveryStatus)); 7434 pr_cont("\n"); 7435 } 7436 7437 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && 7438 !ioc->sas_hba.num_phys) { 7439 if (disable_discovery > 0 && ioc->shost_recovery) { 7440 /* Wait for the reset to complete */ 7441 while (ioc->shost_recovery) 7442 ssleep(1); 7443 } 7444 _scsih_sas_host_add(ioc); 7445 } 7446 } 7447 7448 /** 7449 * _scsih_sas_device_discovery_error_event - display SAS device discovery error 7450 * events 7451 * @ioc: per adapter object 7452 * @fw_event: The fw_event_work object 7453 * Context: user. 7454 */ 7455 static void 7456 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, 7457 struct fw_event_work *fw_event) 7458 { 7459 Mpi25EventDataSasDeviceDiscoveryError_t *event_data = 7460 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data; 7461 7462 switch (event_data->ReasonCode) { 7463 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED: 7464 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n", 7465 le16_to_cpu(event_data->DevHandle), 7466 (u64)le64_to_cpu(event_data->SASAddress), 7467 event_data->PhysicalPort); 7468 break; 7469 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: 7470 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n", 7471 le16_to_cpu(event_data->DevHandle), 7472 (u64)le64_to_cpu(event_data->SASAddress), 7473 event_data->PhysicalPort); 7474 break; 7475 default: 7476 break; 7477 } 7478 } 7479 7480 /** 7481 * _scsih_pcie_enumeration_event - handle enumeration events 7482 * @ioc: per adapter object 7483 * @fw_event: The fw_event_work object 7484 * Context: user. 7485 */ 7486 static void 7487 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, 7488 struct fw_event_work *fw_event) 7489 { 7490 Mpi26EventDataPCIeEnumeration_t *event_data = 7491 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data; 7492 7493 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)) 7494 return; 7495 7496 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x", 7497 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ? 7498 "started" : "completed", 7499 event_data->Flags); 7500 if (event_data->EnumerationStatus) 7501 pr_cont("enumeration_status(0x%08x)", 7502 le32_to_cpu(event_data->EnumerationStatus)); 7503 pr_cont("\n"); 7504 } 7505 7506 /** 7507 * _scsih_ir_fastpath - turn on fastpath for IR physdisk 7508 * @ioc: per adapter object 7509 * @handle: device handle for physical disk 7510 * @phys_disk_num: physical disk number 7511 * 7512 * Return: 0 for success, else failure. 7513 */ 7514 static int 7515 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) 7516 { 7517 Mpi2RaidActionRequest_t *mpi_request; 7518 Mpi2RaidActionReply_t *mpi_reply; 7519 u16 smid; 7520 u8 issue_reset = 0; 7521 int rc = 0; 7522 u16 ioc_status; 7523 u32 log_info; 7524 7525 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) 7526 return rc; 7527 7528 mutex_lock(&ioc->scsih_cmds.mutex); 7529 7530 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 7531 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 7532 rc = -EAGAIN; 7533 goto out; 7534 } 7535 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 7536 7537 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 7538 if (!smid) { 7539 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 7540 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 7541 rc = -EAGAIN; 7542 goto out; 7543 } 7544 7545 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 7546 ioc->scsih_cmds.smid = smid; 7547 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); 7548 7549 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 7550 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; 7551 mpi_request->PhysDiskNum = phys_disk_num; 7552 7553 dewtprintk(ioc, 7554 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", 7555 handle, phys_disk_num)); 7556 7557 init_completion(&ioc->scsih_cmds.done); 7558 mpt3sas_base_put_smid_default(ioc, smid); 7559 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 7560 7561 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 7562 issue_reset = 7563 mpt3sas_base_check_cmd_timeout(ioc, 7564 ioc->scsih_cmds.status, mpi_request, 7565 sizeof(Mpi2RaidActionRequest_t)/4); 7566 rc = -EFAULT; 7567 goto out; 7568 } 7569 7570 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 7571 7572 mpi_reply = ioc->scsih_cmds.reply; 7573 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 7574 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 7575 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 7576 else 7577 log_info = 0; 7578 ioc_status &= MPI2_IOCSTATUS_MASK; 7579 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7580 dewtprintk(ioc, 7581 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", 7582 ioc_status, log_info)); 7583 rc = -EFAULT; 7584 } else 7585 dewtprintk(ioc, 7586 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n")); 7587 } 7588 7589 out: 7590 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 7591 mutex_unlock(&ioc->scsih_cmds.mutex); 7592 7593 if (issue_reset) 7594 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 7595 return rc; 7596 } 7597 7598 /** 7599 * _scsih_reprobe_lun - reprobing lun 7600 * @sdev: scsi device struct 7601 * @no_uld_attach: sdev->no_uld_attach flag setting 7602 * 7603 **/ 7604 static void 7605 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) 7606 { 7607 sdev->no_uld_attach = no_uld_attach ? 1 : 0; 7608 sdev_printk(KERN_INFO, sdev, "%s raid component\n", 7609 sdev->no_uld_attach ? "hiding" : "exposing"); 7610 WARN_ON(scsi_device_reprobe(sdev)); 7611 } 7612 7613 /** 7614 * _scsih_sas_volume_add - add new volume 7615 * @ioc: per adapter object 7616 * @element: IR config element data 7617 * Context: user. 7618 */ 7619 static void 7620 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, 7621 Mpi2EventIrConfigElement_t *element) 7622 { 7623 struct _raid_device *raid_device; 7624 unsigned long flags; 7625 u64 wwid; 7626 u16 handle = le16_to_cpu(element->VolDevHandle); 7627 int rc; 7628 7629 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 7630 if (!wwid) { 7631 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7632 __FILE__, __LINE__, __func__); 7633 return; 7634 } 7635 7636 spin_lock_irqsave(&ioc->raid_device_lock, flags); 7637 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); 7638 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 7639 7640 if (raid_device) 7641 return; 7642 7643 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 7644 if (!raid_device) { 7645 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7646 __FILE__, __LINE__, __func__); 7647 return; 7648 } 7649 7650 raid_device->id = ioc->sas_id++; 7651 raid_device->channel = RAID_CHANNEL; 7652 raid_device->handle = handle; 7653 raid_device->wwid = wwid; 7654 _scsih_raid_device_add(ioc, raid_device); 7655 if (!ioc->wait_for_discovery_to_complete) { 7656 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 7657 raid_device->id, 0); 7658 if (rc) 7659 _scsih_raid_device_remove(ioc, raid_device); 7660 } else { 7661 spin_lock_irqsave(&ioc->raid_device_lock, flags); 7662 _scsih_determine_boot_device(ioc, raid_device, 1); 7663 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 7664 } 7665 } 7666 7667 /** 7668 * _scsih_sas_volume_delete - delete volume 7669 * @ioc: per adapter object 7670 * @handle: volume device handle 7671 * Context: user. 7672 */ 7673 static void 7674 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) 7675 { 7676 struct _raid_device *raid_device; 7677 unsigned long flags; 7678 struct MPT3SAS_TARGET *sas_target_priv_data; 7679 struct scsi_target *starget = NULL; 7680 7681 spin_lock_irqsave(&ioc->raid_device_lock, flags); 7682 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 7683 if (raid_device) { 7684 if (raid_device->starget) { 7685 starget = raid_device->starget; 7686 sas_target_priv_data = starget->hostdata; 7687 sas_target_priv_data->deleted = 1; 7688 } 7689 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 7690 raid_device->handle, (u64)raid_device->wwid); 7691 list_del(&raid_device->list); 7692 kfree(raid_device); 7693 } 7694 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 7695 if (starget) 7696 scsi_remove_target(&starget->dev); 7697 } 7698 7699 /** 7700 * _scsih_sas_pd_expose - expose pd component to /dev/sdX 7701 * @ioc: per adapter object 7702 * @element: IR config element data 7703 * Context: user. 7704 */ 7705 static void 7706 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, 7707 Mpi2EventIrConfigElement_t *element) 7708 { 7709 struct _sas_device *sas_device; 7710 struct scsi_target *starget = NULL; 7711 struct MPT3SAS_TARGET *sas_target_priv_data; 7712 unsigned long flags; 7713 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 7714 7715 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7716 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 7717 if (sas_device) { 7718 sas_device->volume_handle = 0; 7719 sas_device->volume_wwid = 0; 7720 clear_bit(handle, ioc->pd_handles); 7721 if (sas_device->starget && sas_device->starget->hostdata) { 7722 starget = sas_device->starget; 7723 sas_target_priv_data = starget->hostdata; 7724 sas_target_priv_data->flags &= 7725 ~MPT_TARGET_FLAGS_RAID_COMPONENT; 7726 } 7727 } 7728 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7729 if (!sas_device) 7730 return; 7731 7732 /* exposing raid component */ 7733 if (starget) 7734 starget_for_each_device(starget, NULL, _scsih_reprobe_lun); 7735 7736 sas_device_put(sas_device); 7737 } 7738 7739 /** 7740 * _scsih_sas_pd_hide - hide pd component from /dev/sdX 7741 * @ioc: per adapter object 7742 * @element: IR config element data 7743 * Context: user. 7744 */ 7745 static void 7746 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, 7747 Mpi2EventIrConfigElement_t *element) 7748 { 7749 struct _sas_device *sas_device; 7750 struct scsi_target *starget = NULL; 7751 struct MPT3SAS_TARGET *sas_target_priv_data; 7752 unsigned long flags; 7753 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 7754 u16 volume_handle = 0; 7755 u64 volume_wwid = 0; 7756 7757 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle); 7758 if (volume_handle) 7759 mpt3sas_config_get_volume_wwid(ioc, volume_handle, 7760 &volume_wwid); 7761 7762 spin_lock_irqsave(&ioc->sas_device_lock, flags); 7763 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); 7764 if (sas_device) { 7765 set_bit(handle, ioc->pd_handles); 7766 if (sas_device->starget && sas_device->starget->hostdata) { 7767 starget = sas_device->starget; 7768 sas_target_priv_data = starget->hostdata; 7769 sas_target_priv_data->flags |= 7770 MPT_TARGET_FLAGS_RAID_COMPONENT; 7771 sas_device->volume_handle = volume_handle; 7772 sas_device->volume_wwid = volume_wwid; 7773 } 7774 } 7775 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 7776 if (!sas_device) 7777 return; 7778 7779 /* hiding raid component */ 7780 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 7781 7782 if (starget) 7783 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); 7784 7785 sas_device_put(sas_device); 7786 } 7787 7788 /** 7789 * _scsih_sas_pd_delete - delete pd component 7790 * @ioc: per adapter object 7791 * @element: IR config element data 7792 * Context: user. 7793 */ 7794 static void 7795 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, 7796 Mpi2EventIrConfigElement_t *element) 7797 { 7798 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 7799 7800 _scsih_device_remove_by_handle(ioc, handle); 7801 } 7802 7803 /** 7804 * _scsih_sas_pd_add - remove pd component 7805 * @ioc: per adapter object 7806 * @element: IR config element data 7807 * Context: user. 7808 */ 7809 static void 7810 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, 7811 Mpi2EventIrConfigElement_t *element) 7812 { 7813 struct _sas_device *sas_device; 7814 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 7815 Mpi2ConfigReply_t mpi_reply; 7816 Mpi2SasDevicePage0_t sas_device_pg0; 7817 u32 ioc_status; 7818 u64 sas_address; 7819 u16 parent_handle; 7820 7821 set_bit(handle, ioc->pd_handles); 7822 7823 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 7824 if (sas_device) { 7825 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 7826 sas_device_put(sas_device); 7827 return; 7828 } 7829 7830 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, 7831 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { 7832 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7833 __FILE__, __LINE__, __func__); 7834 return; 7835 } 7836 7837 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 7838 MPI2_IOCSTATUS_MASK; 7839 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 7840 ioc_err(ioc, "failure at %s:%d/%s()!\n", 7841 __FILE__, __LINE__, __func__); 7842 return; 7843 } 7844 7845 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 7846 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) 7847 mpt3sas_transport_update_links(ioc, sas_address, handle, 7848 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); 7849 7850 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); 7851 _scsih_add_device(ioc, handle, 0, 1); 7852 } 7853 7854 /** 7855 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events 7856 * @ioc: per adapter object 7857 * @event_data: event data payload 7858 * Context: user. 7859 */ 7860 static void 7861 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, 7862 Mpi2EventDataIrConfigChangeList_t *event_data) 7863 { 7864 Mpi2EventIrConfigElement_t *element; 7865 u8 element_type; 7866 int i; 7867 char *reason_str = NULL, *element_str = NULL; 7868 7869 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 7870 7871 ioc_info(ioc, "raid config change: (%s), elements(%d)\n", 7872 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ? 7873 "foreign" : "native", 7874 event_data->NumElements); 7875 for (i = 0; i < event_data->NumElements; i++, element++) { 7876 switch (element->ReasonCode) { 7877 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 7878 reason_str = "add"; 7879 break; 7880 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 7881 reason_str = "remove"; 7882 break; 7883 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: 7884 reason_str = "no change"; 7885 break; 7886 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 7887 reason_str = "hide"; 7888 break; 7889 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 7890 reason_str = "unhide"; 7891 break; 7892 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 7893 reason_str = "volume_created"; 7894 break; 7895 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 7896 reason_str = "volume_deleted"; 7897 break; 7898 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 7899 reason_str = "pd_created"; 7900 break; 7901 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 7902 reason_str = "pd_deleted"; 7903 break; 7904 default: 7905 reason_str = "unknown reason"; 7906 break; 7907 } 7908 element_type = le16_to_cpu(element->ElementFlags) & 7909 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; 7910 switch (element_type) { 7911 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: 7912 element_str = "volume"; 7913 break; 7914 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: 7915 element_str = "phys disk"; 7916 break; 7917 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: 7918 element_str = "hot spare"; 7919 break; 7920 default: 7921 element_str = "unknown element"; 7922 break; 7923 } 7924 pr_info("\t(%s:%s), vol handle(0x%04x), " \ 7925 "pd handle(0x%04x), pd num(0x%02x)\n", element_str, 7926 reason_str, le16_to_cpu(element->VolDevHandle), 7927 le16_to_cpu(element->PhysDiskDevHandle), 7928 element->PhysDiskNum); 7929 } 7930 } 7931 7932 /** 7933 * _scsih_sas_ir_config_change_event - handle ir configuration change events 7934 * @ioc: per adapter object 7935 * @fw_event: The fw_event_work object 7936 * Context: user. 7937 */ 7938 static void 7939 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, 7940 struct fw_event_work *fw_event) 7941 { 7942 Mpi2EventIrConfigElement_t *element; 7943 int i; 7944 u8 foreign_config; 7945 Mpi2EventDataIrConfigChangeList_t *event_data = 7946 (Mpi2EventDataIrConfigChangeList_t *) 7947 fw_event->event_data; 7948 7949 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && 7950 (!ioc->hide_ir_msg)) 7951 _scsih_sas_ir_config_change_event_debug(ioc, event_data); 7952 7953 foreign_config = (le32_to_cpu(event_data->Flags) & 7954 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; 7955 7956 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 7957 if (ioc->shost_recovery && 7958 ioc->hba_mpi_version_belonged != MPI2_VERSION) { 7959 for (i = 0; i < event_data->NumElements; i++, element++) { 7960 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) 7961 _scsih_ir_fastpath(ioc, 7962 le16_to_cpu(element->PhysDiskDevHandle), 7963 element->PhysDiskNum); 7964 } 7965 return; 7966 } 7967 7968 for (i = 0; i < event_data->NumElements; i++, element++) { 7969 7970 switch (element->ReasonCode) { 7971 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 7972 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 7973 if (!foreign_config) 7974 _scsih_sas_volume_add(ioc, element); 7975 break; 7976 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 7977 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 7978 if (!foreign_config) 7979 _scsih_sas_volume_delete(ioc, 7980 le16_to_cpu(element->VolDevHandle)); 7981 break; 7982 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 7983 if (!ioc->is_warpdrive) 7984 _scsih_sas_pd_hide(ioc, element); 7985 break; 7986 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: 7987 if (!ioc->is_warpdrive) 7988 _scsih_sas_pd_expose(ioc, element); 7989 break; 7990 case MPI2_EVENT_IR_CHANGE_RC_HIDE: 7991 if (!ioc->is_warpdrive) 7992 _scsih_sas_pd_add(ioc, element); 7993 break; 7994 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: 7995 if (!ioc->is_warpdrive) 7996 _scsih_sas_pd_delete(ioc, element); 7997 break; 7998 } 7999 } 8000 } 8001 8002 /** 8003 * _scsih_sas_ir_volume_event - IR volume event 8004 * @ioc: per adapter object 8005 * @fw_event: The fw_event_work object 8006 * Context: user. 8007 */ 8008 static void 8009 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, 8010 struct fw_event_work *fw_event) 8011 { 8012 u64 wwid; 8013 unsigned long flags; 8014 struct _raid_device *raid_device; 8015 u16 handle; 8016 u32 state; 8017 int rc; 8018 Mpi2EventDataIrVolume_t *event_data = 8019 (Mpi2EventDataIrVolume_t *) fw_event->event_data; 8020 8021 if (ioc->shost_recovery) 8022 return; 8023 8024 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) 8025 return; 8026 8027 handle = le16_to_cpu(event_data->VolDevHandle); 8028 state = le32_to_cpu(event_data->NewValue); 8029 if (!ioc->hide_ir_msg) 8030 dewtprintk(ioc, 8031 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 8032 __func__, handle, 8033 le32_to_cpu(event_data->PreviousValue), 8034 state)); 8035 switch (state) { 8036 case MPI2_RAID_VOL_STATE_MISSING: 8037 case MPI2_RAID_VOL_STATE_FAILED: 8038 _scsih_sas_volume_delete(ioc, handle); 8039 break; 8040 8041 case MPI2_RAID_VOL_STATE_ONLINE: 8042 case MPI2_RAID_VOL_STATE_DEGRADED: 8043 case MPI2_RAID_VOL_STATE_OPTIMAL: 8044 8045 spin_lock_irqsave(&ioc->raid_device_lock, flags); 8046 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 8047 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 8048 8049 if (raid_device) 8050 break; 8051 8052 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); 8053 if (!wwid) { 8054 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8055 __FILE__, __LINE__, __func__); 8056 break; 8057 } 8058 8059 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); 8060 if (!raid_device) { 8061 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8062 __FILE__, __LINE__, __func__); 8063 break; 8064 } 8065 8066 raid_device->id = ioc->sas_id++; 8067 raid_device->channel = RAID_CHANNEL; 8068 raid_device->handle = handle; 8069 raid_device->wwid = wwid; 8070 _scsih_raid_device_add(ioc, raid_device); 8071 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 8072 raid_device->id, 0); 8073 if (rc) 8074 _scsih_raid_device_remove(ioc, raid_device); 8075 break; 8076 8077 case MPI2_RAID_VOL_STATE_INITIALIZING: 8078 default: 8079 break; 8080 } 8081 } 8082 8083 /** 8084 * _scsih_sas_ir_physical_disk_event - PD event 8085 * @ioc: per adapter object 8086 * @fw_event: The fw_event_work object 8087 * Context: user. 8088 */ 8089 static void 8090 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, 8091 struct fw_event_work *fw_event) 8092 { 8093 u16 handle, parent_handle; 8094 u32 state; 8095 struct _sas_device *sas_device; 8096 Mpi2ConfigReply_t mpi_reply; 8097 Mpi2SasDevicePage0_t sas_device_pg0; 8098 u32 ioc_status; 8099 Mpi2EventDataIrPhysicalDisk_t *event_data = 8100 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data; 8101 u64 sas_address; 8102 8103 if (ioc->shost_recovery) 8104 return; 8105 8106 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) 8107 return; 8108 8109 handle = le16_to_cpu(event_data->PhysDiskDevHandle); 8110 state = le32_to_cpu(event_data->NewValue); 8111 8112 if (!ioc->hide_ir_msg) 8113 dewtprintk(ioc, 8114 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", 8115 __func__, handle, 8116 le32_to_cpu(event_data->PreviousValue), 8117 state)); 8118 8119 switch (state) { 8120 case MPI2_RAID_PD_STATE_ONLINE: 8121 case MPI2_RAID_PD_STATE_DEGRADED: 8122 case MPI2_RAID_PD_STATE_REBUILDING: 8123 case MPI2_RAID_PD_STATE_OPTIMAL: 8124 case MPI2_RAID_PD_STATE_HOT_SPARE: 8125 8126 if (!ioc->is_warpdrive) 8127 set_bit(handle, ioc->pd_handles); 8128 8129 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 8130 if (sas_device) { 8131 sas_device_put(sas_device); 8132 return; 8133 } 8134 8135 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 8136 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 8137 handle))) { 8138 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8139 __FILE__, __LINE__, __func__); 8140 return; 8141 } 8142 8143 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8144 MPI2_IOCSTATUS_MASK; 8145 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8146 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8147 __FILE__, __LINE__, __func__); 8148 return; 8149 } 8150 8151 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 8152 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) 8153 mpt3sas_transport_update_links(ioc, sas_address, handle, 8154 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); 8155 8156 _scsih_add_device(ioc, handle, 0, 1); 8157 8158 break; 8159 8160 case MPI2_RAID_PD_STATE_OFFLINE: 8161 case MPI2_RAID_PD_STATE_NOT_CONFIGURED: 8162 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: 8163 default: 8164 break; 8165 } 8166 } 8167 8168 /** 8169 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event 8170 * @ioc: per adapter object 8171 * @event_data: event data payload 8172 * Context: user. 8173 */ 8174 static void 8175 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, 8176 Mpi2EventDataIrOperationStatus_t *event_data) 8177 { 8178 char *reason_str = NULL; 8179 8180 switch (event_data->RAIDOperation) { 8181 case MPI2_EVENT_IR_RAIDOP_RESYNC: 8182 reason_str = "resync"; 8183 break; 8184 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: 8185 reason_str = "online capacity expansion"; 8186 break; 8187 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: 8188 reason_str = "consistency check"; 8189 break; 8190 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: 8191 reason_str = "background init"; 8192 break; 8193 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: 8194 reason_str = "make data consistent"; 8195 break; 8196 } 8197 8198 if (!reason_str) 8199 return; 8200 8201 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", 8202 reason_str, 8203 le16_to_cpu(event_data->VolDevHandle), 8204 event_data->PercentComplete); 8205 } 8206 8207 /** 8208 * _scsih_sas_ir_operation_status_event - handle RAID operation events 8209 * @ioc: per adapter object 8210 * @fw_event: The fw_event_work object 8211 * Context: user. 8212 */ 8213 static void 8214 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, 8215 struct fw_event_work *fw_event) 8216 { 8217 Mpi2EventDataIrOperationStatus_t *event_data = 8218 (Mpi2EventDataIrOperationStatus_t *) 8219 fw_event->event_data; 8220 static struct _raid_device *raid_device; 8221 unsigned long flags; 8222 u16 handle; 8223 8224 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && 8225 (!ioc->hide_ir_msg)) 8226 _scsih_sas_ir_operation_status_event_debug(ioc, 8227 event_data); 8228 8229 /* code added for raid transport support */ 8230 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { 8231 8232 spin_lock_irqsave(&ioc->raid_device_lock, flags); 8233 handle = le16_to_cpu(event_data->VolDevHandle); 8234 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); 8235 if (raid_device) 8236 raid_device->percent_complete = 8237 event_data->PercentComplete; 8238 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 8239 } 8240 } 8241 8242 /** 8243 * _scsih_prep_device_scan - initialize parameters prior to device scan 8244 * @ioc: per adapter object 8245 * 8246 * Set the deleted flag prior to device scan. If the device is found during 8247 * the scan, then we clear the deleted flag. 8248 */ 8249 static void 8250 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) 8251 { 8252 struct MPT3SAS_DEVICE *sas_device_priv_data; 8253 struct scsi_device *sdev; 8254 8255 shost_for_each_device(sdev, ioc->shost) { 8256 sas_device_priv_data = sdev->hostdata; 8257 if (sas_device_priv_data && sas_device_priv_data->sas_target) 8258 sas_device_priv_data->sas_target->deleted = 1; 8259 } 8260 } 8261 8262 /** 8263 * _scsih_mark_responding_sas_device - mark a sas_devices as responding 8264 * @ioc: per adapter object 8265 * @sas_device_pg0: SAS Device page 0 8266 * 8267 * After host reset, find out whether devices are still responding. 8268 * Used in _scsih_remove_unresponsive_sas_devices. 8269 */ 8270 static void 8271 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, 8272 Mpi2SasDevicePage0_t *sas_device_pg0) 8273 { 8274 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 8275 struct scsi_target *starget; 8276 struct _sas_device *sas_device = NULL; 8277 struct _enclosure_node *enclosure_dev = NULL; 8278 unsigned long flags; 8279 8280 if (sas_device_pg0->EnclosureHandle) { 8281 enclosure_dev = 8282 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8283 le16_to_cpu(sas_device_pg0->EnclosureHandle)); 8284 if (enclosure_dev == NULL) 8285 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n", 8286 sas_device_pg0->EnclosureHandle); 8287 } 8288 spin_lock_irqsave(&ioc->sas_device_lock, flags); 8289 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 8290 if ((sas_device->sas_address == le64_to_cpu( 8291 sas_device_pg0->SASAddress)) && (sas_device->slot == 8292 le16_to_cpu(sas_device_pg0->Slot))) { 8293 sas_device->responding = 1; 8294 starget = sas_device->starget; 8295 if (starget && starget->hostdata) { 8296 sas_target_priv_data = starget->hostdata; 8297 sas_target_priv_data->tm_busy = 0; 8298 sas_target_priv_data->deleted = 0; 8299 } else 8300 sas_target_priv_data = NULL; 8301 if (starget) { 8302 starget_printk(KERN_INFO, starget, 8303 "handle(0x%04x), sas_addr(0x%016llx)\n", 8304 le16_to_cpu(sas_device_pg0->DevHandle), 8305 (unsigned long long) 8306 sas_device->sas_address); 8307 8308 if (sas_device->enclosure_handle != 0) 8309 starget_printk(KERN_INFO, starget, 8310 "enclosure logical id(0x%016llx)," 8311 " slot(%d)\n", 8312 (unsigned long long) 8313 sas_device->enclosure_logical_id, 8314 sas_device->slot); 8315 } 8316 if (le16_to_cpu(sas_device_pg0->Flags) & 8317 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 8318 sas_device->enclosure_level = 8319 sas_device_pg0->EnclosureLevel; 8320 memcpy(&sas_device->connector_name[0], 8321 &sas_device_pg0->ConnectorName[0], 4); 8322 } else { 8323 sas_device->enclosure_level = 0; 8324 sas_device->connector_name[0] = '\0'; 8325 } 8326 8327 sas_device->enclosure_handle = 8328 le16_to_cpu(sas_device_pg0->EnclosureHandle); 8329 sas_device->is_chassis_slot_valid = 0; 8330 if (enclosure_dev) { 8331 sas_device->enclosure_logical_id = le64_to_cpu( 8332 enclosure_dev->pg0.EnclosureLogicalID); 8333 if (le16_to_cpu(enclosure_dev->pg0.Flags) & 8334 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { 8335 sas_device->is_chassis_slot_valid = 1; 8336 sas_device->chassis_slot = 8337 enclosure_dev->pg0.ChassisSlot; 8338 } 8339 } 8340 8341 if (sas_device->handle == le16_to_cpu( 8342 sas_device_pg0->DevHandle)) 8343 goto out; 8344 pr_info("\thandle changed from(0x%04x)!!!\n", 8345 sas_device->handle); 8346 sas_device->handle = le16_to_cpu( 8347 sas_device_pg0->DevHandle); 8348 if (sas_target_priv_data) 8349 sas_target_priv_data->handle = 8350 le16_to_cpu(sas_device_pg0->DevHandle); 8351 goto out; 8352 } 8353 } 8354 out: 8355 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 8356 } 8357 8358 /** 8359 * _scsih_create_enclosure_list_after_reset - Free Existing list, 8360 * And create enclosure list by scanning all Enclosure Page(0)s 8361 * @ioc: per adapter object 8362 */ 8363 static void 8364 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) 8365 { 8366 struct _enclosure_node *enclosure_dev; 8367 Mpi2ConfigReply_t mpi_reply; 8368 u16 enclosure_handle; 8369 int rc; 8370 8371 /* Free existing enclosure list */ 8372 mpt3sas_free_enclosure_list(ioc); 8373 8374 /* Re constructing enclosure list after reset*/ 8375 enclosure_handle = 0xFFFF; 8376 do { 8377 enclosure_dev = 8378 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL); 8379 if (!enclosure_dev) { 8380 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8381 __FILE__, __LINE__, __func__); 8382 return; 8383 } 8384 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, 8385 &enclosure_dev->pg0, 8386 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE, 8387 enclosure_handle); 8388 8389 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & 8390 MPI2_IOCSTATUS_MASK)) { 8391 kfree(enclosure_dev); 8392 return; 8393 } 8394 list_add_tail(&enclosure_dev->list, 8395 &ioc->enclosure_list); 8396 enclosure_handle = 8397 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); 8398 } while (1); 8399 } 8400 8401 /** 8402 * _scsih_search_responding_sas_devices - 8403 * @ioc: per adapter object 8404 * 8405 * After host reset, find out whether devices are still responding. 8406 * If not remove. 8407 */ 8408 static void 8409 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) 8410 { 8411 Mpi2SasDevicePage0_t sas_device_pg0; 8412 Mpi2ConfigReply_t mpi_reply; 8413 u16 ioc_status; 8414 u16 handle; 8415 u32 device_info; 8416 8417 ioc_info(ioc, "search for end-devices: start\n"); 8418 8419 if (list_empty(&ioc->sas_device_list)) 8420 goto out; 8421 8422 handle = 0xFFFF; 8423 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 8424 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 8425 handle))) { 8426 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8427 MPI2_IOCSTATUS_MASK; 8428 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 8429 break; 8430 handle = le16_to_cpu(sas_device_pg0.DevHandle); 8431 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 8432 if (!(_scsih_is_end_device(device_info))) 8433 continue; 8434 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); 8435 } 8436 8437 out: 8438 ioc_info(ioc, "search for end-devices: complete\n"); 8439 } 8440 8441 /** 8442 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding 8443 * @ioc: per adapter object 8444 * @pcie_device_pg0: PCIe Device page 0 8445 * 8446 * After host reset, find out whether devices are still responding. 8447 * Used in _scsih_remove_unresponding_devices. 8448 */ 8449 static void 8450 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, 8451 Mpi26PCIeDevicePage0_t *pcie_device_pg0) 8452 { 8453 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 8454 struct scsi_target *starget; 8455 struct _pcie_device *pcie_device; 8456 unsigned long flags; 8457 8458 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8459 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 8460 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID)) 8461 && (pcie_device->slot == le16_to_cpu( 8462 pcie_device_pg0->Slot))) { 8463 pcie_device->responding = 1; 8464 starget = pcie_device->starget; 8465 if (starget && starget->hostdata) { 8466 sas_target_priv_data = starget->hostdata; 8467 sas_target_priv_data->tm_busy = 0; 8468 sas_target_priv_data->deleted = 0; 8469 } else 8470 sas_target_priv_data = NULL; 8471 if (starget) { 8472 starget_printk(KERN_INFO, starget, 8473 "handle(0x%04x), wwid(0x%016llx) ", 8474 pcie_device->handle, 8475 (unsigned long long)pcie_device->wwid); 8476 if (pcie_device->enclosure_handle != 0) 8477 starget_printk(KERN_INFO, starget, 8478 "enclosure logical id(0x%016llx), " 8479 "slot(%d)\n", 8480 (unsigned long long) 8481 pcie_device->enclosure_logical_id, 8482 pcie_device->slot); 8483 } 8484 8485 if (((le32_to_cpu(pcie_device_pg0->Flags)) & 8486 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) && 8487 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) { 8488 pcie_device->enclosure_level = 8489 pcie_device_pg0->EnclosureLevel; 8490 memcpy(&pcie_device->connector_name[0], 8491 &pcie_device_pg0->ConnectorName[0], 4); 8492 } else { 8493 pcie_device->enclosure_level = 0; 8494 pcie_device->connector_name[0] = '\0'; 8495 } 8496 8497 if (pcie_device->handle == le16_to_cpu( 8498 pcie_device_pg0->DevHandle)) 8499 goto out; 8500 pr_info("\thandle changed from(0x%04x)!!!\n", 8501 pcie_device->handle); 8502 pcie_device->handle = le16_to_cpu( 8503 pcie_device_pg0->DevHandle); 8504 if (sas_target_priv_data) 8505 sas_target_priv_data->handle = 8506 le16_to_cpu(pcie_device_pg0->DevHandle); 8507 goto out; 8508 } 8509 } 8510 8511 out: 8512 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8513 } 8514 8515 /** 8516 * _scsih_search_responding_pcie_devices - 8517 * @ioc: per adapter object 8518 * 8519 * After host reset, find out whether devices are still responding. 8520 * If not remove. 8521 */ 8522 static void 8523 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) 8524 { 8525 Mpi26PCIeDevicePage0_t pcie_device_pg0; 8526 Mpi2ConfigReply_t mpi_reply; 8527 u16 ioc_status; 8528 u16 handle; 8529 u32 device_info; 8530 8531 ioc_info(ioc, "search for end-devices: start\n"); 8532 8533 if (list_empty(&ioc->pcie_device_list)) 8534 goto out; 8535 8536 handle = 0xFFFF; 8537 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 8538 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 8539 handle))) { 8540 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8541 MPI2_IOCSTATUS_MASK; 8542 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8543 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", 8544 __func__, ioc_status, 8545 le32_to_cpu(mpi_reply.IOCLogInfo)); 8546 break; 8547 } 8548 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 8549 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); 8550 if (!(_scsih_is_nvme_device(device_info))) 8551 continue; 8552 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); 8553 } 8554 out: 8555 ioc_info(ioc, "search for PCIe end-devices: complete\n"); 8556 } 8557 8558 /** 8559 * _scsih_mark_responding_raid_device - mark a raid_device as responding 8560 * @ioc: per adapter object 8561 * @wwid: world wide identifier for raid volume 8562 * @handle: device handle 8563 * 8564 * After host reset, find out whether devices are still responding. 8565 * Used in _scsih_remove_unresponsive_raid_devices. 8566 */ 8567 static void 8568 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, 8569 u16 handle) 8570 { 8571 struct MPT3SAS_TARGET *sas_target_priv_data = NULL; 8572 struct scsi_target *starget; 8573 struct _raid_device *raid_device; 8574 unsigned long flags; 8575 8576 spin_lock_irqsave(&ioc->raid_device_lock, flags); 8577 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 8578 if (raid_device->wwid == wwid && raid_device->starget) { 8579 starget = raid_device->starget; 8580 if (starget && starget->hostdata) { 8581 sas_target_priv_data = starget->hostdata; 8582 sas_target_priv_data->deleted = 0; 8583 } else 8584 sas_target_priv_data = NULL; 8585 raid_device->responding = 1; 8586 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 8587 starget_printk(KERN_INFO, raid_device->starget, 8588 "handle(0x%04x), wwid(0x%016llx)\n", handle, 8589 (unsigned long long)raid_device->wwid); 8590 8591 /* 8592 * WARPDRIVE: The handles of the PDs might have changed 8593 * across the host reset so re-initialize the 8594 * required data for Direct IO 8595 */ 8596 mpt3sas_init_warpdrive_properties(ioc, raid_device); 8597 spin_lock_irqsave(&ioc->raid_device_lock, flags); 8598 if (raid_device->handle == handle) { 8599 spin_unlock_irqrestore(&ioc->raid_device_lock, 8600 flags); 8601 return; 8602 } 8603 pr_info("\thandle changed from(0x%04x)!!!\n", 8604 raid_device->handle); 8605 raid_device->handle = handle; 8606 if (sas_target_priv_data) 8607 sas_target_priv_data->handle = handle; 8608 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 8609 return; 8610 } 8611 } 8612 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 8613 } 8614 8615 /** 8616 * _scsih_search_responding_raid_devices - 8617 * @ioc: per adapter object 8618 * 8619 * After host reset, find out whether devices are still responding. 8620 * If not remove. 8621 */ 8622 static void 8623 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) 8624 { 8625 Mpi2RaidVolPage1_t volume_pg1; 8626 Mpi2RaidVolPage0_t volume_pg0; 8627 Mpi2RaidPhysDiskPage0_t pd_pg0; 8628 Mpi2ConfigReply_t mpi_reply; 8629 u16 ioc_status; 8630 u16 handle; 8631 u8 phys_disk_num; 8632 8633 if (!ioc->ir_firmware) 8634 return; 8635 8636 ioc_info(ioc, "search for raid volumes: start\n"); 8637 8638 if (list_empty(&ioc->raid_device_list)) 8639 goto out; 8640 8641 handle = 0xFFFF; 8642 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 8643 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 8644 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8645 MPI2_IOCSTATUS_MASK; 8646 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 8647 break; 8648 handle = le16_to_cpu(volume_pg1.DevHandle); 8649 8650 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 8651 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 8652 sizeof(Mpi2RaidVolPage0_t))) 8653 continue; 8654 8655 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 8656 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || 8657 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) 8658 _scsih_mark_responding_raid_device(ioc, 8659 le64_to_cpu(volume_pg1.WWID), handle); 8660 } 8661 8662 /* refresh the pd_handles */ 8663 if (!ioc->is_warpdrive) { 8664 phys_disk_num = 0xFF; 8665 memset(ioc->pd_handles, 0, ioc->pd_handles_sz); 8666 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 8667 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 8668 phys_disk_num))) { 8669 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8670 MPI2_IOCSTATUS_MASK; 8671 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 8672 break; 8673 phys_disk_num = pd_pg0.PhysDiskNum; 8674 handle = le16_to_cpu(pd_pg0.DevHandle); 8675 set_bit(handle, ioc->pd_handles); 8676 } 8677 } 8678 out: 8679 ioc_info(ioc, "search for responding raid volumes: complete\n"); 8680 } 8681 8682 /** 8683 * _scsih_mark_responding_expander - mark a expander as responding 8684 * @ioc: per adapter object 8685 * @expander_pg0:SAS Expander Config Page0 8686 * 8687 * After host reset, find out whether devices are still responding. 8688 * Used in _scsih_remove_unresponsive_expanders. 8689 */ 8690 static void 8691 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, 8692 Mpi2ExpanderPage0_t *expander_pg0) 8693 { 8694 struct _sas_node *sas_expander = NULL; 8695 unsigned long flags; 8696 int i; 8697 struct _enclosure_node *enclosure_dev = NULL; 8698 u16 handle = le16_to_cpu(expander_pg0->DevHandle); 8699 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); 8700 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); 8701 8702 if (enclosure_handle) 8703 enclosure_dev = 8704 mpt3sas_scsih_enclosure_find_by_handle(ioc, 8705 enclosure_handle); 8706 8707 spin_lock_irqsave(&ioc->sas_node_lock, flags); 8708 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { 8709 if (sas_expander->sas_address != sas_address) 8710 continue; 8711 sas_expander->responding = 1; 8712 8713 if (enclosure_dev) { 8714 sas_expander->enclosure_logical_id = 8715 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); 8716 sas_expander->enclosure_handle = 8717 le16_to_cpu(expander_pg0->EnclosureHandle); 8718 } 8719 8720 if (sas_expander->handle == handle) 8721 goto out; 8722 pr_info("\texpander(0x%016llx): handle changed" \ 8723 " from(0x%04x) to (0x%04x)!!!\n", 8724 (unsigned long long)sas_expander->sas_address, 8725 sas_expander->handle, handle); 8726 sas_expander->handle = handle; 8727 for (i = 0 ; i < sas_expander->num_phys ; i++) 8728 sas_expander->phy[i].handle = handle; 8729 goto out; 8730 } 8731 out: 8732 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 8733 } 8734 8735 /** 8736 * _scsih_search_responding_expanders - 8737 * @ioc: per adapter object 8738 * 8739 * After host reset, find out whether devices are still responding. 8740 * If not remove. 8741 */ 8742 static void 8743 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) 8744 { 8745 Mpi2ExpanderPage0_t expander_pg0; 8746 Mpi2ConfigReply_t mpi_reply; 8747 u16 ioc_status; 8748 u64 sas_address; 8749 u16 handle; 8750 8751 ioc_info(ioc, "search for expanders: start\n"); 8752 8753 if (list_empty(&ioc->sas_expander_list)) 8754 goto out; 8755 8756 handle = 0xFFFF; 8757 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 8758 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 8759 8760 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8761 MPI2_IOCSTATUS_MASK; 8762 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 8763 break; 8764 8765 handle = le16_to_cpu(expander_pg0.DevHandle); 8766 sas_address = le64_to_cpu(expander_pg0.SASAddress); 8767 pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n", 8768 handle, 8769 (unsigned long long)sas_address); 8770 _scsih_mark_responding_expander(ioc, &expander_pg0); 8771 } 8772 8773 out: 8774 ioc_info(ioc, "search for expanders: complete\n"); 8775 } 8776 8777 /** 8778 * _scsih_remove_unresponding_devices - removing unresponding devices 8779 * @ioc: per adapter object 8780 */ 8781 static void 8782 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) 8783 { 8784 struct _sas_device *sas_device, *sas_device_next; 8785 struct _sas_node *sas_expander, *sas_expander_next; 8786 struct _raid_device *raid_device, *raid_device_next; 8787 struct _pcie_device *pcie_device, *pcie_device_next; 8788 struct list_head tmp_list; 8789 unsigned long flags; 8790 LIST_HEAD(head); 8791 8792 ioc_info(ioc, "removing unresponding devices: start\n"); 8793 8794 /* removing unresponding end devices */ 8795 ioc_info(ioc, "removing unresponding devices: end-devices\n"); 8796 /* 8797 * Iterate, pulling off devices marked as non-responding. We become the 8798 * owner for the reference the list had on any object we prune. 8799 */ 8800 spin_lock_irqsave(&ioc->sas_device_lock, flags); 8801 list_for_each_entry_safe(sas_device, sas_device_next, 8802 &ioc->sas_device_list, list) { 8803 if (!sas_device->responding) 8804 list_move_tail(&sas_device->list, &head); 8805 else 8806 sas_device->responding = 0; 8807 } 8808 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 8809 8810 /* 8811 * Now, uninitialize and remove the unresponding devices we pruned. 8812 */ 8813 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { 8814 _scsih_remove_device(ioc, sas_device); 8815 list_del_init(&sas_device->list); 8816 sas_device_put(sas_device); 8817 } 8818 8819 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n"); 8820 INIT_LIST_HEAD(&head); 8821 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 8822 list_for_each_entry_safe(pcie_device, pcie_device_next, 8823 &ioc->pcie_device_list, list) { 8824 if (!pcie_device->responding) 8825 list_move_tail(&pcie_device->list, &head); 8826 else 8827 pcie_device->responding = 0; 8828 } 8829 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 8830 8831 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) { 8832 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 8833 list_del_init(&pcie_device->list); 8834 pcie_device_put(pcie_device); 8835 } 8836 8837 /* removing unresponding volumes */ 8838 if (ioc->ir_firmware) { 8839 ioc_info(ioc, "removing unresponding devices: volumes\n"); 8840 list_for_each_entry_safe(raid_device, raid_device_next, 8841 &ioc->raid_device_list, list) { 8842 if (!raid_device->responding) 8843 _scsih_sas_volume_delete(ioc, 8844 raid_device->handle); 8845 else 8846 raid_device->responding = 0; 8847 } 8848 } 8849 8850 /* removing unresponding expanders */ 8851 ioc_info(ioc, "removing unresponding devices: expanders\n"); 8852 spin_lock_irqsave(&ioc->sas_node_lock, flags); 8853 INIT_LIST_HEAD(&tmp_list); 8854 list_for_each_entry_safe(sas_expander, sas_expander_next, 8855 &ioc->sas_expander_list, list) { 8856 if (!sas_expander->responding) 8857 list_move_tail(&sas_expander->list, &tmp_list); 8858 else 8859 sas_expander->responding = 0; 8860 } 8861 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 8862 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, 8863 list) { 8864 _scsih_expander_node_remove(ioc, sas_expander); 8865 } 8866 8867 ioc_info(ioc, "removing unresponding devices: complete\n"); 8868 8869 /* unblock devices */ 8870 _scsih_ublock_io_all_device(ioc); 8871 } 8872 8873 static void 8874 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, 8875 struct _sas_node *sas_expander, u16 handle) 8876 { 8877 Mpi2ExpanderPage1_t expander_pg1; 8878 Mpi2ConfigReply_t mpi_reply; 8879 int i; 8880 8881 for (i = 0 ; i < sas_expander->num_phys ; i++) { 8882 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, 8883 &expander_pg1, i, handle))) { 8884 ioc_err(ioc, "failure at %s:%d/%s()!\n", 8885 __FILE__, __LINE__, __func__); 8886 return; 8887 } 8888 8889 mpt3sas_transport_update_links(ioc, sas_expander->sas_address, 8890 le16_to_cpu(expander_pg1.AttachedDevHandle), i, 8891 expander_pg1.NegotiatedLinkRate >> 4); 8892 } 8893 } 8894 8895 /** 8896 * _scsih_scan_for_devices_after_reset - scan for devices after host reset 8897 * @ioc: per adapter object 8898 */ 8899 static void 8900 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) 8901 { 8902 Mpi2ExpanderPage0_t expander_pg0; 8903 Mpi2SasDevicePage0_t sas_device_pg0; 8904 Mpi26PCIeDevicePage0_t pcie_device_pg0; 8905 Mpi2RaidVolPage1_t volume_pg1; 8906 Mpi2RaidVolPage0_t volume_pg0; 8907 Mpi2RaidPhysDiskPage0_t pd_pg0; 8908 Mpi2EventIrConfigElement_t element; 8909 Mpi2ConfigReply_t mpi_reply; 8910 u8 phys_disk_num; 8911 u16 ioc_status; 8912 u16 handle, parent_handle; 8913 u64 sas_address; 8914 struct _sas_device *sas_device; 8915 struct _pcie_device *pcie_device; 8916 struct _sas_node *expander_device; 8917 static struct _raid_device *raid_device; 8918 u8 retry_count; 8919 unsigned long flags; 8920 8921 ioc_info(ioc, "scan devices: start\n"); 8922 8923 _scsih_sas_host_refresh(ioc); 8924 8925 ioc_info(ioc, "\tscan devices: expanders start\n"); 8926 8927 /* expanders */ 8928 handle = 0xFFFF; 8929 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, 8930 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { 8931 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8932 MPI2_IOCSTATUS_MASK; 8933 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8934 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 8935 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 8936 break; 8937 } 8938 handle = le16_to_cpu(expander_pg0.DevHandle); 8939 spin_lock_irqsave(&ioc->sas_node_lock, flags); 8940 expander_device = mpt3sas_scsih_expander_find_by_sas_address( 8941 ioc, le64_to_cpu(expander_pg0.SASAddress)); 8942 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 8943 if (expander_device) 8944 _scsih_refresh_expander_links(ioc, expander_device, 8945 handle); 8946 else { 8947 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", 8948 handle, 8949 (u64)le64_to_cpu(expander_pg0.SASAddress)); 8950 _scsih_expander_add(ioc, handle); 8951 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n", 8952 handle, 8953 (u64)le64_to_cpu(expander_pg0.SASAddress)); 8954 } 8955 } 8956 8957 ioc_info(ioc, "\tscan devices: expanders complete\n"); 8958 8959 if (!ioc->ir_firmware) 8960 goto skip_to_sas; 8961 8962 ioc_info(ioc, "\tscan devices: phys disk start\n"); 8963 8964 /* phys disk */ 8965 phys_disk_num = 0xFF; 8966 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, 8967 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, 8968 phys_disk_num))) { 8969 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8970 MPI2_IOCSTATUS_MASK; 8971 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8972 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 8973 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 8974 break; 8975 } 8976 phys_disk_num = pd_pg0.PhysDiskNum; 8977 handle = le16_to_cpu(pd_pg0.DevHandle); 8978 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); 8979 if (sas_device) { 8980 sas_device_put(sas_device); 8981 continue; 8982 } 8983 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 8984 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, 8985 handle) != 0) 8986 continue; 8987 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 8988 MPI2_IOCSTATUS_MASK; 8989 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 8990 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", 8991 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 8992 break; 8993 } 8994 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 8995 if (!_scsih_get_sas_address(ioc, parent_handle, 8996 &sas_address)) { 8997 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", 8998 handle, 8999 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 9000 mpt3sas_transport_update_links(ioc, sas_address, 9001 handle, sas_device_pg0.PhyNum, 9002 MPI2_SAS_NEG_LINK_RATE_1_5); 9003 set_bit(handle, ioc->pd_handles); 9004 retry_count = 0; 9005 /* This will retry adding the end device. 9006 * _scsih_add_device() will decide on retries and 9007 * return "1" when it should be retried 9008 */ 9009 while (_scsih_add_device(ioc, handle, retry_count++, 9010 1)) { 9011 ssleep(1); 9012 } 9013 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n", 9014 handle, 9015 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 9016 } 9017 } 9018 9019 ioc_info(ioc, "\tscan devices: phys disk complete\n"); 9020 9021 ioc_info(ioc, "\tscan devices: volumes start\n"); 9022 9023 /* volumes */ 9024 handle = 0xFFFF; 9025 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, 9026 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { 9027 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9028 MPI2_IOCSTATUS_MASK; 9029 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9030 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 9031 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 9032 break; 9033 } 9034 handle = le16_to_cpu(volume_pg1.DevHandle); 9035 spin_lock_irqsave(&ioc->raid_device_lock, flags); 9036 raid_device = _scsih_raid_device_find_by_wwid(ioc, 9037 le64_to_cpu(volume_pg1.WWID)); 9038 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 9039 if (raid_device) 9040 continue; 9041 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, 9042 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, 9043 sizeof(Mpi2RaidVolPage0_t))) 9044 continue; 9045 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9046 MPI2_IOCSTATUS_MASK; 9047 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9048 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 9049 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 9050 break; 9051 } 9052 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || 9053 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || 9054 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { 9055 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); 9056 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; 9057 element.VolDevHandle = volume_pg1.DevHandle; 9058 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n", 9059 volume_pg1.DevHandle); 9060 _scsih_sas_volume_add(ioc, &element); 9061 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n", 9062 volume_pg1.DevHandle); 9063 } 9064 } 9065 9066 ioc_info(ioc, "\tscan devices: volumes complete\n"); 9067 9068 skip_to_sas: 9069 9070 ioc_info(ioc, "\tscan devices: end devices start\n"); 9071 9072 /* sas devices */ 9073 handle = 0xFFFF; 9074 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, 9075 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 9076 handle))) { 9077 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 9078 MPI2_IOCSTATUS_MASK; 9079 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9080 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 9081 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 9082 break; 9083 } 9084 handle = le16_to_cpu(sas_device_pg0.DevHandle); 9085 if (!(_scsih_is_end_device( 9086 le32_to_cpu(sas_device_pg0.DeviceInfo)))) 9087 continue; 9088 sas_device = mpt3sas_get_sdev_by_addr(ioc, 9089 le64_to_cpu(sas_device_pg0.SASAddress)); 9090 if (sas_device) { 9091 sas_device_put(sas_device); 9092 continue; 9093 } 9094 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); 9095 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { 9096 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", 9097 handle, 9098 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 9099 mpt3sas_transport_update_links(ioc, sas_address, handle, 9100 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); 9101 retry_count = 0; 9102 /* This will retry adding the end device. 9103 * _scsih_add_device() will decide on retries and 9104 * return "1" when it should be retried 9105 */ 9106 while (_scsih_add_device(ioc, handle, retry_count++, 9107 0)) { 9108 ssleep(1); 9109 } 9110 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n", 9111 handle, 9112 (u64)le64_to_cpu(sas_device_pg0.SASAddress)); 9113 } 9114 } 9115 ioc_info(ioc, "\tscan devices: end devices complete\n"); 9116 ioc_info(ioc, "\tscan devices: pcie end devices start\n"); 9117 9118 /* pcie devices */ 9119 handle = 0xFFFF; 9120 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply, 9121 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, 9122 handle))) { 9123 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) 9124 & MPI2_IOCSTATUS_MASK; 9125 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 9126 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", 9127 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); 9128 break; 9129 } 9130 handle = le16_to_cpu(pcie_device_pg0.DevHandle); 9131 if (!(_scsih_is_nvme_device( 9132 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) 9133 continue; 9134 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, 9135 le64_to_cpu(pcie_device_pg0.WWID)); 9136 if (pcie_device) { 9137 pcie_device_put(pcie_device); 9138 continue; 9139 } 9140 retry_count = 0; 9141 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle); 9142 _scsih_pcie_add_device(ioc, handle); 9143 9144 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n", 9145 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID)); 9146 } 9147 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n"); 9148 ioc_info(ioc, "scan devices: complete\n"); 9149 } 9150 9151 /** 9152 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) 9153 * @ioc: per adapter object 9154 * 9155 * The handler for doing any required cleanup or initialization. 9156 */ 9157 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) 9158 { 9159 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); 9160 } 9161 9162 /** 9163 * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih) 9164 * @ioc: per adapter object 9165 * 9166 * The handler for doing any required cleanup or initialization. 9167 */ 9168 void 9169 mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) 9170 { 9171 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__)); 9172 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { 9173 ioc->scsih_cmds.status |= MPT3_CMD_RESET; 9174 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); 9175 complete(&ioc->scsih_cmds.done); 9176 } 9177 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { 9178 ioc->tm_cmds.status |= MPT3_CMD_RESET; 9179 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); 9180 complete(&ioc->tm_cmds.done); 9181 } 9182 9183 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); 9184 memset(ioc->device_remove_in_progress, 0, 9185 ioc->device_remove_in_progress_sz); 9186 _scsih_fw_event_cleanup_queue(ioc); 9187 _scsih_flush_running_cmds(ioc); 9188 } 9189 9190 /** 9191 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) 9192 * @ioc: per adapter object 9193 * 9194 * The handler for doing any required cleanup or initialization. 9195 */ 9196 void 9197 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) 9198 { 9199 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); 9200 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 && 9201 !ioc->sas_hba.num_phys)) { 9202 _scsih_prep_device_scan(ioc); 9203 _scsih_create_enclosure_list_after_reset(ioc); 9204 _scsih_search_responding_sas_devices(ioc); 9205 _scsih_search_responding_pcie_devices(ioc); 9206 _scsih_search_responding_raid_devices(ioc); 9207 _scsih_search_responding_expanders(ioc); 9208 _scsih_error_recovery_delete_devices(ioc); 9209 } 9210 } 9211 9212 /** 9213 * _mpt3sas_fw_work - delayed task for processing firmware events 9214 * @ioc: per adapter object 9215 * @fw_event: The fw_event_work object 9216 * Context: user. 9217 */ 9218 static void 9219 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) 9220 { 9221 _scsih_fw_event_del_from_list(ioc, fw_event); 9222 9223 /* the queue is being flushed so ignore this event */ 9224 if (ioc->remove_host || ioc->pci_error_recovery) { 9225 fw_event_work_put(fw_event); 9226 return; 9227 } 9228 9229 switch (fw_event->event) { 9230 case MPT3SAS_PROCESS_TRIGGER_DIAG: 9231 mpt3sas_process_trigger_data(ioc, 9232 (struct SL_WH_TRIGGERS_EVENT_DATA_T *) 9233 fw_event->event_data); 9234 break; 9235 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: 9236 while (scsi_host_in_recovery(ioc->shost) || 9237 ioc->shost_recovery) { 9238 /* 9239 * If we're unloading, bail. Otherwise, this can become 9240 * an infinite loop. 9241 */ 9242 if (ioc->remove_host) 9243 goto out; 9244 ssleep(1); 9245 } 9246 _scsih_remove_unresponding_devices(ioc); 9247 _scsih_scan_for_devices_after_reset(ioc); 9248 break; 9249 case MPT3SAS_PORT_ENABLE_COMPLETE: 9250 ioc->start_scan = 0; 9251 if (missing_delay[0] != -1 && missing_delay[1] != -1) 9252 mpt3sas_base_update_missing_delay(ioc, missing_delay[0], 9253 missing_delay[1]); 9254 dewtprintk(ioc, 9255 ioc_info(ioc, "port enable: complete from worker thread\n")); 9256 break; 9257 case MPT3SAS_TURN_ON_PFA_LED: 9258 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); 9259 break; 9260 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 9261 _scsih_sas_topology_change_event(ioc, fw_event); 9262 break; 9263 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 9264 _scsih_sas_device_status_change_event(ioc, fw_event); 9265 break; 9266 case MPI2_EVENT_SAS_DISCOVERY: 9267 _scsih_sas_discovery_event(ioc, fw_event); 9268 break; 9269 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 9270 _scsih_sas_device_discovery_error_event(ioc, fw_event); 9271 break; 9272 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 9273 _scsih_sas_broadcast_primitive_event(ioc, fw_event); 9274 break; 9275 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 9276 _scsih_sas_enclosure_dev_status_change_event(ioc, 9277 fw_event); 9278 break; 9279 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 9280 _scsih_sas_ir_config_change_event(ioc, fw_event); 9281 break; 9282 case MPI2_EVENT_IR_VOLUME: 9283 _scsih_sas_ir_volume_event(ioc, fw_event); 9284 break; 9285 case MPI2_EVENT_IR_PHYSICAL_DISK: 9286 _scsih_sas_ir_physical_disk_event(ioc, fw_event); 9287 break; 9288 case MPI2_EVENT_IR_OPERATION_STATUS: 9289 _scsih_sas_ir_operation_status_event(ioc, fw_event); 9290 break; 9291 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 9292 _scsih_pcie_device_status_change_event(ioc, fw_event); 9293 break; 9294 case MPI2_EVENT_PCIE_ENUMERATION: 9295 _scsih_pcie_enumeration_event(ioc, fw_event); 9296 break; 9297 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 9298 _scsih_pcie_topology_change_event(ioc, fw_event); 9299 return; 9300 break; 9301 } 9302 out: 9303 fw_event_work_put(fw_event); 9304 } 9305 9306 /** 9307 * _firmware_event_work 9308 * @work: The fw_event_work object 9309 * Context: user. 9310 * 9311 * wrappers for the work thread handling firmware events 9312 */ 9313 9314 static void 9315 _firmware_event_work(struct work_struct *work) 9316 { 9317 struct fw_event_work *fw_event = container_of(work, 9318 struct fw_event_work, work); 9319 9320 _mpt3sas_fw_work(fw_event->ioc, fw_event); 9321 } 9322 9323 /** 9324 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) 9325 * @ioc: per adapter object 9326 * @msix_index: MSIX table index supplied by the OS 9327 * @reply: reply message frame(lower 32bit addr) 9328 * Context: interrupt. 9329 * 9330 * This function merely adds a new work task into ioc->firmware_event_thread. 9331 * The tasks are worked from _firmware_event_work in user context. 9332 * 9333 * Return: 1 meaning mf should be freed from _base_interrupt 9334 * 0 means the mf is freed from this function. 9335 */ 9336 u8 9337 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 9338 u32 reply) 9339 { 9340 struct fw_event_work *fw_event; 9341 Mpi2EventNotificationReply_t *mpi_reply; 9342 u16 event; 9343 u16 sz; 9344 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData; 9345 9346 /* events turned off due to host reset */ 9347 if (ioc->pci_error_recovery) 9348 return 1; 9349 9350 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 9351 9352 if (unlikely(!mpi_reply)) { 9353 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n", 9354 __FILE__, __LINE__, __func__); 9355 return 1; 9356 } 9357 9358 event = le16_to_cpu(mpi_reply->Event); 9359 9360 if (event != MPI2_EVENT_LOG_ENTRY_ADDED) 9361 mpt3sas_trigger_event(ioc, event, 0); 9362 9363 switch (event) { 9364 /* handle these */ 9365 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 9366 { 9367 Mpi2EventDataSasBroadcastPrimitive_t *baen_data = 9368 (Mpi2EventDataSasBroadcastPrimitive_t *) 9369 mpi_reply->EventData; 9370 9371 if (baen_data->Primitive != 9372 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) 9373 return 1; 9374 9375 if (ioc->broadcast_aen_busy) { 9376 ioc->broadcast_aen_pending++; 9377 return 1; 9378 } else 9379 ioc->broadcast_aen_busy = 1; 9380 break; 9381 } 9382 9383 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 9384 _scsih_check_topo_delete_events(ioc, 9385 (Mpi2EventDataSasTopologyChangeList_t *) 9386 mpi_reply->EventData); 9387 break; 9388 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 9389 _scsih_check_pcie_topo_remove_events(ioc, 9390 (Mpi26EventDataPCIeTopologyChangeList_t *) 9391 mpi_reply->EventData); 9392 break; 9393 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 9394 _scsih_check_ir_config_unhide_events(ioc, 9395 (Mpi2EventDataIrConfigChangeList_t *) 9396 mpi_reply->EventData); 9397 break; 9398 case MPI2_EVENT_IR_VOLUME: 9399 _scsih_check_volume_delete_events(ioc, 9400 (Mpi2EventDataIrVolume_t *) 9401 mpi_reply->EventData); 9402 break; 9403 case MPI2_EVENT_LOG_ENTRY_ADDED: 9404 { 9405 Mpi2EventDataLogEntryAdded_t *log_entry; 9406 u32 *log_code; 9407 9408 if (!ioc->is_warpdrive) 9409 break; 9410 9411 log_entry = (Mpi2EventDataLogEntryAdded_t *) 9412 mpi_reply->EventData; 9413 log_code = (u32 *)log_entry->LogData; 9414 9415 if (le16_to_cpu(log_entry->LogEntryQualifier) 9416 != MPT2_WARPDRIVE_LOGENTRY) 9417 break; 9418 9419 switch (le32_to_cpu(*log_code)) { 9420 case MPT2_WARPDRIVE_LC_SSDT: 9421 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); 9422 break; 9423 case MPT2_WARPDRIVE_LC_SSDLW: 9424 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n"); 9425 break; 9426 case MPT2_WARPDRIVE_LC_SSDLF: 9427 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n"); 9428 break; 9429 case MPT2_WARPDRIVE_LC_BRMF: 9430 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n"); 9431 break; 9432 } 9433 9434 break; 9435 } 9436 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 9437 case MPI2_EVENT_IR_OPERATION_STATUS: 9438 case MPI2_EVENT_SAS_DISCOVERY: 9439 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 9440 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 9441 case MPI2_EVENT_IR_PHYSICAL_DISK: 9442 case MPI2_EVENT_PCIE_ENUMERATION: 9443 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE: 9444 break; 9445 9446 case MPI2_EVENT_TEMP_THRESHOLD: 9447 _scsih_temp_threshold_events(ioc, 9448 (Mpi2EventDataTemperature_t *) 9449 mpi_reply->EventData); 9450 break; 9451 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION: 9452 ActiveCableEventData = 9453 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; 9454 switch (ActiveCableEventData->ReasonCode) { 9455 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER: 9456 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n", 9457 ActiveCableEventData->ReceptacleID); 9458 pr_notice("cannot be powered and devices connected\n"); 9459 pr_notice("to this active cable will not be seen\n"); 9460 pr_notice("This active cable requires %d mW of power\n", 9461 ActiveCableEventData->ActiveCablePowerRequirement); 9462 break; 9463 9464 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: 9465 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n", 9466 ActiveCableEventData->ReceptacleID); 9467 pr_notice( 9468 "is not running at optimal speed(12 Gb/s rate)\n"); 9469 break; 9470 } 9471 9472 break; 9473 9474 default: /* ignore the rest */ 9475 return 1; 9476 } 9477 9478 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; 9479 fw_event = alloc_fw_event_work(sz); 9480 if (!fw_event) { 9481 ioc_err(ioc, "failure at %s:%d/%s()!\n", 9482 __FILE__, __LINE__, __func__); 9483 return 1; 9484 } 9485 9486 memcpy(fw_event->event_data, mpi_reply->EventData, sz); 9487 fw_event->ioc = ioc; 9488 fw_event->VF_ID = mpi_reply->VF_ID; 9489 fw_event->VP_ID = mpi_reply->VP_ID; 9490 fw_event->event = event; 9491 _scsih_fw_event_add(ioc, fw_event); 9492 fw_event_work_put(fw_event); 9493 return 1; 9494 } 9495 9496 /** 9497 * _scsih_expander_node_remove - removing expander device from list. 9498 * @ioc: per adapter object 9499 * @sas_expander: the sas_device object 9500 * 9501 * Removing object and freeing associated memory from the 9502 * ioc->sas_expander_list. 9503 */ 9504 static void 9505 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, 9506 struct _sas_node *sas_expander) 9507 { 9508 struct _sas_port *mpt3sas_port, *next; 9509 unsigned long flags; 9510 9511 /* remove sibling ports attached to this expander */ 9512 list_for_each_entry_safe(mpt3sas_port, next, 9513 &sas_expander->sas_port_list, port_list) { 9514 if (ioc->shost_recovery) 9515 return; 9516 if (mpt3sas_port->remote_identify.device_type == 9517 SAS_END_DEVICE) 9518 mpt3sas_device_remove_by_sas_address(ioc, 9519 mpt3sas_port->remote_identify.sas_address); 9520 else if (mpt3sas_port->remote_identify.device_type == 9521 SAS_EDGE_EXPANDER_DEVICE || 9522 mpt3sas_port->remote_identify.device_type == 9523 SAS_FANOUT_EXPANDER_DEVICE) 9524 mpt3sas_expander_remove(ioc, 9525 mpt3sas_port->remote_identify.sas_address); 9526 } 9527 9528 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, 9529 sas_expander->sas_address_parent); 9530 9531 ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n", 9532 sas_expander->handle, (unsigned long long) 9533 sas_expander->sas_address); 9534 9535 spin_lock_irqsave(&ioc->sas_node_lock, flags); 9536 list_del(&sas_expander->list); 9537 spin_unlock_irqrestore(&ioc->sas_node_lock, flags); 9538 9539 kfree(sas_expander->phy); 9540 kfree(sas_expander); 9541 } 9542 9543 /** 9544 * _scsih_ir_shutdown - IR shutdown notification 9545 * @ioc: per adapter object 9546 * 9547 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that 9548 * the host system is shutting down. 9549 */ 9550 static void 9551 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) 9552 { 9553 Mpi2RaidActionRequest_t *mpi_request; 9554 Mpi2RaidActionReply_t *mpi_reply; 9555 u16 smid; 9556 9557 /* is IR firmware build loaded ? */ 9558 if (!ioc->ir_firmware) 9559 return; 9560 9561 /* are there any volumes ? */ 9562 if (list_empty(&ioc->raid_device_list)) 9563 return; 9564 9565 mutex_lock(&ioc->scsih_cmds.mutex); 9566 9567 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { 9568 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); 9569 goto out; 9570 } 9571 ioc->scsih_cmds.status = MPT3_CMD_PENDING; 9572 9573 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); 9574 if (!smid) { 9575 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 9576 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 9577 goto out; 9578 } 9579 9580 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 9581 ioc->scsih_cmds.smid = smid; 9582 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); 9583 9584 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; 9585 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; 9586 9587 if (!ioc->hide_ir_msg) 9588 ioc_info(ioc, "IR shutdown (sending)\n"); 9589 init_completion(&ioc->scsih_cmds.done); 9590 mpt3sas_base_put_smid_default(ioc, smid); 9591 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); 9592 9593 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { 9594 ioc_err(ioc, "%s: timeout\n", __func__); 9595 goto out; 9596 } 9597 9598 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { 9599 mpi_reply = ioc->scsih_cmds.reply; 9600 if (!ioc->hide_ir_msg) 9601 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", 9602 le16_to_cpu(mpi_reply->IOCStatus), 9603 le32_to_cpu(mpi_reply->IOCLogInfo)); 9604 } 9605 9606 out: 9607 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 9608 mutex_unlock(&ioc->scsih_cmds.mutex); 9609 } 9610 9611 /** 9612 * scsih_remove - detach and remove add host 9613 * @pdev: PCI device struct 9614 * 9615 * Routine called when unloading the driver. 9616 */ 9617 static void scsih_remove(struct pci_dev *pdev) 9618 { 9619 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9620 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 9621 struct _sas_port *mpt3sas_port, *next_port; 9622 struct _raid_device *raid_device, *next; 9623 struct MPT3SAS_TARGET *sas_target_priv_data; 9624 struct _pcie_device *pcie_device, *pcienext; 9625 struct workqueue_struct *wq; 9626 unsigned long flags; 9627 9628 ioc->remove_host = 1; 9629 9630 mpt3sas_wait_for_commands_to_complete(ioc); 9631 _scsih_flush_running_cmds(ioc); 9632 9633 _scsih_fw_event_cleanup_queue(ioc); 9634 9635 spin_lock_irqsave(&ioc->fw_event_lock, flags); 9636 wq = ioc->firmware_event_thread; 9637 ioc->firmware_event_thread = NULL; 9638 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 9639 if (wq) 9640 destroy_workqueue(wq); 9641 9642 /* release all the volumes */ 9643 _scsih_ir_shutdown(ioc); 9644 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, 9645 list) { 9646 if (raid_device->starget) { 9647 sas_target_priv_data = 9648 raid_device->starget->hostdata; 9649 sas_target_priv_data->deleted = 1; 9650 scsi_remove_target(&raid_device->starget->dev); 9651 } 9652 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n", 9653 raid_device->handle, (u64)raid_device->wwid); 9654 _scsih_raid_device_remove(ioc, raid_device); 9655 } 9656 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list, 9657 list) { 9658 _scsih_pcie_device_remove_from_sml(ioc, pcie_device); 9659 list_del_init(&pcie_device->list); 9660 pcie_device_put(pcie_device); 9661 } 9662 9663 /* free ports attached to the sas_host */ 9664 list_for_each_entry_safe(mpt3sas_port, next_port, 9665 &ioc->sas_hba.sas_port_list, port_list) { 9666 if (mpt3sas_port->remote_identify.device_type == 9667 SAS_END_DEVICE) 9668 mpt3sas_device_remove_by_sas_address(ioc, 9669 mpt3sas_port->remote_identify.sas_address); 9670 else if (mpt3sas_port->remote_identify.device_type == 9671 SAS_EDGE_EXPANDER_DEVICE || 9672 mpt3sas_port->remote_identify.device_type == 9673 SAS_FANOUT_EXPANDER_DEVICE) 9674 mpt3sas_expander_remove(ioc, 9675 mpt3sas_port->remote_identify.sas_address); 9676 } 9677 9678 /* free phys attached to the sas_host */ 9679 if (ioc->sas_hba.num_phys) { 9680 kfree(ioc->sas_hba.phy); 9681 ioc->sas_hba.phy = NULL; 9682 ioc->sas_hba.num_phys = 0; 9683 } 9684 9685 sas_remove_host(shost); 9686 mpt3sas_base_detach(ioc); 9687 spin_lock(&gioc_lock); 9688 list_del(&ioc->list); 9689 spin_unlock(&gioc_lock); 9690 scsi_host_put(shost); 9691 } 9692 9693 /** 9694 * scsih_shutdown - routine call during system shutdown 9695 * @pdev: PCI device struct 9696 */ 9697 static void 9698 scsih_shutdown(struct pci_dev *pdev) 9699 { 9700 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9701 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 9702 struct workqueue_struct *wq; 9703 unsigned long flags; 9704 9705 ioc->remove_host = 1; 9706 9707 mpt3sas_wait_for_commands_to_complete(ioc); 9708 _scsih_flush_running_cmds(ioc); 9709 9710 _scsih_fw_event_cleanup_queue(ioc); 9711 9712 spin_lock_irqsave(&ioc->fw_event_lock, flags); 9713 wq = ioc->firmware_event_thread; 9714 ioc->firmware_event_thread = NULL; 9715 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 9716 if (wq) 9717 destroy_workqueue(wq); 9718 9719 _scsih_ir_shutdown(ioc); 9720 mpt3sas_base_detach(ioc); 9721 } 9722 9723 9724 /** 9725 * _scsih_probe_boot_devices - reports 1st device 9726 * @ioc: per adapter object 9727 * 9728 * If specified in bios page 2, this routine reports the 1st 9729 * device scsi-ml or sas transport for persistent boot device 9730 * purposes. Please refer to function _scsih_determine_boot_device() 9731 */ 9732 static void 9733 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) 9734 { 9735 u32 channel; 9736 void *device; 9737 struct _sas_device *sas_device; 9738 struct _raid_device *raid_device; 9739 struct _pcie_device *pcie_device; 9740 u16 handle; 9741 u64 sas_address_parent; 9742 u64 sas_address; 9743 unsigned long flags; 9744 int rc; 9745 int tid; 9746 9747 /* no Bios, return immediately */ 9748 if (!ioc->bios_pg3.BiosVersion) 9749 return; 9750 9751 device = NULL; 9752 if (ioc->req_boot_device.device) { 9753 device = ioc->req_boot_device.device; 9754 channel = ioc->req_boot_device.channel; 9755 } else if (ioc->req_alt_boot_device.device) { 9756 device = ioc->req_alt_boot_device.device; 9757 channel = ioc->req_alt_boot_device.channel; 9758 } else if (ioc->current_boot_device.device) { 9759 device = ioc->current_boot_device.device; 9760 channel = ioc->current_boot_device.channel; 9761 } 9762 9763 if (!device) 9764 return; 9765 9766 if (channel == RAID_CHANNEL) { 9767 raid_device = device; 9768 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 9769 raid_device->id, 0); 9770 if (rc) 9771 _scsih_raid_device_remove(ioc, raid_device); 9772 } else if (channel == PCIE_CHANNEL) { 9773 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 9774 pcie_device = device; 9775 tid = pcie_device->id; 9776 list_move_tail(&pcie_device->list, &ioc->pcie_device_list); 9777 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 9778 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0); 9779 if (rc) 9780 _scsih_pcie_device_remove(ioc, pcie_device); 9781 } else { 9782 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9783 sas_device = device; 9784 handle = sas_device->handle; 9785 sas_address_parent = sas_device->sas_address_parent; 9786 sas_address = sas_device->sas_address; 9787 list_move_tail(&sas_device->list, &ioc->sas_device_list); 9788 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9789 9790 if (ioc->hide_drives) 9791 return; 9792 if (!mpt3sas_transport_port_add(ioc, handle, 9793 sas_address_parent)) { 9794 _scsih_sas_device_remove(ioc, sas_device); 9795 } else if (!sas_device->starget) { 9796 if (!ioc->is_driver_loading) { 9797 mpt3sas_transport_port_remove(ioc, 9798 sas_address, 9799 sas_address_parent); 9800 _scsih_sas_device_remove(ioc, sas_device); 9801 } 9802 } 9803 } 9804 } 9805 9806 /** 9807 * _scsih_probe_raid - reporting raid volumes to scsi-ml 9808 * @ioc: per adapter object 9809 * 9810 * Called during initial loading of the driver. 9811 */ 9812 static void 9813 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) 9814 { 9815 struct _raid_device *raid_device, *raid_next; 9816 int rc; 9817 9818 list_for_each_entry_safe(raid_device, raid_next, 9819 &ioc->raid_device_list, list) { 9820 if (raid_device->starget) 9821 continue; 9822 rc = scsi_add_device(ioc->shost, RAID_CHANNEL, 9823 raid_device->id, 0); 9824 if (rc) 9825 _scsih_raid_device_remove(ioc, raid_device); 9826 } 9827 } 9828 9829 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc) 9830 { 9831 struct _sas_device *sas_device = NULL; 9832 unsigned long flags; 9833 9834 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9835 if (!list_empty(&ioc->sas_device_init_list)) { 9836 sas_device = list_first_entry(&ioc->sas_device_init_list, 9837 struct _sas_device, list); 9838 sas_device_get(sas_device); 9839 } 9840 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9841 9842 return sas_device; 9843 } 9844 9845 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc, 9846 struct _sas_device *sas_device) 9847 { 9848 unsigned long flags; 9849 9850 spin_lock_irqsave(&ioc->sas_device_lock, flags); 9851 9852 /* 9853 * Since we dropped the lock during the call to port_add(), we need to 9854 * be careful here that somebody else didn't move or delete this item 9855 * while we were busy with other things. 9856 * 9857 * If it was on the list, we need a put() for the reference the list 9858 * had. Either way, we need a get() for the destination list. 9859 */ 9860 if (!list_empty(&sas_device->list)) { 9861 list_del_init(&sas_device->list); 9862 sas_device_put(sas_device); 9863 } 9864 9865 sas_device_get(sas_device); 9866 list_add_tail(&sas_device->list, &ioc->sas_device_list); 9867 9868 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 9869 } 9870 9871 /** 9872 * _scsih_probe_sas - reporting sas devices to sas transport 9873 * @ioc: per adapter object 9874 * 9875 * Called during initial loading of the driver. 9876 */ 9877 static void 9878 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) 9879 { 9880 struct _sas_device *sas_device; 9881 9882 if (ioc->hide_drives) 9883 return; 9884 9885 while ((sas_device = get_next_sas_device(ioc))) { 9886 if (!mpt3sas_transport_port_add(ioc, sas_device->handle, 9887 sas_device->sas_address_parent)) { 9888 _scsih_sas_device_remove(ioc, sas_device); 9889 sas_device_put(sas_device); 9890 continue; 9891 } else if (!sas_device->starget) { 9892 /* 9893 * When asyn scanning is enabled, its not possible to 9894 * remove devices while scanning is turned on due to an 9895 * oops in scsi_sysfs_add_sdev()->add_device()-> 9896 * sysfs_addrm_start() 9897 */ 9898 if (!ioc->is_driver_loading) { 9899 mpt3sas_transport_port_remove(ioc, 9900 sas_device->sas_address, 9901 sas_device->sas_address_parent); 9902 _scsih_sas_device_remove(ioc, sas_device); 9903 sas_device_put(sas_device); 9904 continue; 9905 } 9906 } 9907 sas_device_make_active(ioc, sas_device); 9908 sas_device_put(sas_device); 9909 } 9910 } 9911 9912 /** 9913 * get_next_pcie_device - Get the next pcie device 9914 * @ioc: per adapter object 9915 * 9916 * Get the next pcie device from pcie_device_init_list list. 9917 * 9918 * Return: pcie device structure if pcie_device_init_list list is not empty 9919 * otherwise returns NULL 9920 */ 9921 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc) 9922 { 9923 struct _pcie_device *pcie_device = NULL; 9924 unsigned long flags; 9925 9926 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 9927 if (!list_empty(&ioc->pcie_device_init_list)) { 9928 pcie_device = list_first_entry(&ioc->pcie_device_init_list, 9929 struct _pcie_device, list); 9930 pcie_device_get(pcie_device); 9931 } 9932 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 9933 9934 return pcie_device; 9935 } 9936 9937 /** 9938 * pcie_device_make_active - Add pcie device to pcie_device_list list 9939 * @ioc: per adapter object 9940 * @pcie_device: pcie device object 9941 * 9942 * Add the pcie device which has registered with SCSI Transport Later to 9943 * pcie_device_list list 9944 */ 9945 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc, 9946 struct _pcie_device *pcie_device) 9947 { 9948 unsigned long flags; 9949 9950 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 9951 9952 if (!list_empty(&pcie_device->list)) { 9953 list_del_init(&pcie_device->list); 9954 pcie_device_put(pcie_device); 9955 } 9956 pcie_device_get(pcie_device); 9957 list_add_tail(&pcie_device->list, &ioc->pcie_device_list); 9958 9959 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 9960 } 9961 9962 /** 9963 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml 9964 * @ioc: per adapter object 9965 * 9966 * Called during initial loading of the driver. 9967 */ 9968 static void 9969 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc) 9970 { 9971 struct _pcie_device *pcie_device; 9972 int rc; 9973 9974 /* PCIe Device List */ 9975 while ((pcie_device = get_next_pcie_device(ioc))) { 9976 if (pcie_device->starget) { 9977 pcie_device_put(pcie_device); 9978 continue; 9979 } 9980 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, 9981 pcie_device->id, 0); 9982 if (rc) { 9983 _scsih_pcie_device_remove(ioc, pcie_device); 9984 pcie_device_put(pcie_device); 9985 continue; 9986 } else if (!pcie_device->starget) { 9987 /* 9988 * When async scanning is enabled, its not possible to 9989 * remove devices while scanning is turned on due to an 9990 * oops in scsi_sysfs_add_sdev()->add_device()-> 9991 * sysfs_addrm_start() 9992 */ 9993 if (!ioc->is_driver_loading) { 9994 /* TODO-- Need to find out whether this condition will 9995 * occur or not 9996 */ 9997 _scsih_pcie_device_remove(ioc, pcie_device); 9998 pcie_device_put(pcie_device); 9999 continue; 10000 } 10001 } 10002 pcie_device_make_active(ioc, pcie_device); 10003 pcie_device_put(pcie_device); 10004 } 10005 } 10006 10007 /** 10008 * _scsih_probe_devices - probing for devices 10009 * @ioc: per adapter object 10010 * 10011 * Called during initial loading of the driver. 10012 */ 10013 static void 10014 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) 10015 { 10016 u16 volume_mapping_flags; 10017 10018 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) 10019 return; /* return when IOC doesn't support initiator mode */ 10020 10021 _scsih_probe_boot_devices(ioc); 10022 10023 if (ioc->ir_firmware) { 10024 volume_mapping_flags = 10025 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & 10026 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; 10027 if (volume_mapping_flags == 10028 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { 10029 _scsih_probe_raid(ioc); 10030 _scsih_probe_sas(ioc); 10031 } else { 10032 _scsih_probe_sas(ioc); 10033 _scsih_probe_raid(ioc); 10034 } 10035 } else { 10036 _scsih_probe_sas(ioc); 10037 _scsih_probe_pcie(ioc); 10038 } 10039 } 10040 10041 /** 10042 * scsih_scan_start - scsi lld callback for .scan_start 10043 * @shost: SCSI host pointer 10044 * 10045 * The shost has the ability to discover targets on its own instead 10046 * of scanning the entire bus. In our implemention, we will kick off 10047 * firmware discovery. 10048 */ 10049 static void 10050 scsih_scan_start(struct Scsi_Host *shost) 10051 { 10052 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 10053 int rc; 10054 if (diag_buffer_enable != -1 && diag_buffer_enable != 0) 10055 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); 10056 10057 if (disable_discovery > 0) 10058 return; 10059 10060 ioc->start_scan = 1; 10061 rc = mpt3sas_port_enable(ioc); 10062 10063 if (rc != 0) 10064 ioc_info(ioc, "port enable: FAILED\n"); 10065 } 10066 10067 /** 10068 * scsih_scan_finished - scsi lld callback for .scan_finished 10069 * @shost: SCSI host pointer 10070 * @time: elapsed time of the scan in jiffies 10071 * 10072 * This function will be called periodicallyn until it returns 1 with the 10073 * scsi_host and the elapsed time of the scan in jiffies. In our implemention, 10074 * we wait for firmware discovery to complete, then return 1. 10075 */ 10076 static int 10077 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) 10078 { 10079 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 10080 10081 if (disable_discovery > 0) { 10082 ioc->is_driver_loading = 0; 10083 ioc->wait_for_discovery_to_complete = 0; 10084 return 1; 10085 } 10086 10087 if (time >= (300 * HZ)) { 10088 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 10089 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n"); 10090 ioc->is_driver_loading = 0; 10091 return 1; 10092 } 10093 10094 if (ioc->start_scan) 10095 return 0; 10096 10097 if (ioc->start_scan_failed) { 10098 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n", 10099 ioc->start_scan_failed); 10100 ioc->is_driver_loading = 0; 10101 ioc->wait_for_discovery_to_complete = 0; 10102 ioc->remove_host = 1; 10103 return 1; 10104 } 10105 10106 ioc_info(ioc, "port enable: SUCCESS\n"); 10107 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 10108 10109 if (ioc->wait_for_discovery_to_complete) { 10110 ioc->wait_for_discovery_to_complete = 0; 10111 _scsih_probe_devices(ioc); 10112 } 10113 mpt3sas_base_start_watchdog(ioc); 10114 ioc->is_driver_loading = 0; 10115 return 1; 10116 } 10117 10118 /* shost template for SAS 2.0 HBA devices */ 10119 static struct scsi_host_template mpt2sas_driver_template = { 10120 .module = THIS_MODULE, 10121 .name = "Fusion MPT SAS Host", 10122 .proc_name = MPT2SAS_DRIVER_NAME, 10123 .queuecommand = scsih_qcmd, 10124 .target_alloc = scsih_target_alloc, 10125 .slave_alloc = scsih_slave_alloc, 10126 .slave_configure = scsih_slave_configure, 10127 .target_destroy = scsih_target_destroy, 10128 .slave_destroy = scsih_slave_destroy, 10129 .scan_finished = scsih_scan_finished, 10130 .scan_start = scsih_scan_start, 10131 .change_queue_depth = scsih_change_queue_depth, 10132 .eh_abort_handler = scsih_abort, 10133 .eh_device_reset_handler = scsih_dev_reset, 10134 .eh_target_reset_handler = scsih_target_reset, 10135 .eh_host_reset_handler = scsih_host_reset, 10136 .bios_param = scsih_bios_param, 10137 .can_queue = 1, 10138 .this_id = -1, 10139 .sg_tablesize = MPT2SAS_SG_DEPTH, 10140 .max_sectors = 32767, 10141 .cmd_per_lun = 7, 10142 .use_clustering = ENABLE_CLUSTERING, 10143 .shost_attrs = mpt3sas_host_attrs, 10144 .sdev_attrs = mpt3sas_dev_attrs, 10145 .track_queue_depth = 1, 10146 .cmd_size = sizeof(struct scsiio_tracker), 10147 }; 10148 10149 /* raid transport support for SAS 2.0 HBA devices */ 10150 static struct raid_function_template mpt2sas_raid_functions = { 10151 .cookie = &mpt2sas_driver_template, 10152 .is_raid = scsih_is_raid, 10153 .get_resync = scsih_get_resync, 10154 .get_state = scsih_get_state, 10155 }; 10156 10157 /* shost template for SAS 3.0 HBA devices */ 10158 static struct scsi_host_template mpt3sas_driver_template = { 10159 .module = THIS_MODULE, 10160 .name = "Fusion MPT SAS Host", 10161 .proc_name = MPT3SAS_DRIVER_NAME, 10162 .queuecommand = scsih_qcmd, 10163 .target_alloc = scsih_target_alloc, 10164 .slave_alloc = scsih_slave_alloc, 10165 .slave_configure = scsih_slave_configure, 10166 .target_destroy = scsih_target_destroy, 10167 .slave_destroy = scsih_slave_destroy, 10168 .scan_finished = scsih_scan_finished, 10169 .scan_start = scsih_scan_start, 10170 .change_queue_depth = scsih_change_queue_depth, 10171 .eh_abort_handler = scsih_abort, 10172 .eh_device_reset_handler = scsih_dev_reset, 10173 .eh_target_reset_handler = scsih_target_reset, 10174 .eh_host_reset_handler = scsih_host_reset, 10175 .bios_param = scsih_bios_param, 10176 .can_queue = 1, 10177 .this_id = -1, 10178 .sg_tablesize = MPT3SAS_SG_DEPTH, 10179 .max_sectors = 32767, 10180 .cmd_per_lun = 7, 10181 .use_clustering = ENABLE_CLUSTERING, 10182 .shost_attrs = mpt3sas_host_attrs, 10183 .sdev_attrs = mpt3sas_dev_attrs, 10184 .track_queue_depth = 1, 10185 .cmd_size = sizeof(struct scsiio_tracker), 10186 }; 10187 10188 /* raid transport support for SAS 3.0 HBA devices */ 10189 static struct raid_function_template mpt3sas_raid_functions = { 10190 .cookie = &mpt3sas_driver_template, 10191 .is_raid = scsih_is_raid, 10192 .get_resync = scsih_get_resync, 10193 .get_state = scsih_get_state, 10194 }; 10195 10196 /** 10197 * _scsih_determine_hba_mpi_version - determine in which MPI version class 10198 * this device belongs to. 10199 * @pdev: PCI device struct 10200 * 10201 * return MPI2_VERSION for SAS 2.0 HBA devices, 10202 * MPI25_VERSION for SAS 3.0 HBA devices, and 10203 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices 10204 */ 10205 static u16 10206 _scsih_determine_hba_mpi_version(struct pci_dev *pdev) 10207 { 10208 10209 switch (pdev->device) { 10210 case MPI2_MFGPAGE_DEVID_SSS6200: 10211 case MPI2_MFGPAGE_DEVID_SAS2004: 10212 case MPI2_MFGPAGE_DEVID_SAS2008: 10213 case MPI2_MFGPAGE_DEVID_SAS2108_1: 10214 case MPI2_MFGPAGE_DEVID_SAS2108_2: 10215 case MPI2_MFGPAGE_DEVID_SAS2108_3: 10216 case MPI2_MFGPAGE_DEVID_SAS2116_1: 10217 case MPI2_MFGPAGE_DEVID_SAS2116_2: 10218 case MPI2_MFGPAGE_DEVID_SAS2208_1: 10219 case MPI2_MFGPAGE_DEVID_SAS2208_2: 10220 case MPI2_MFGPAGE_DEVID_SAS2208_3: 10221 case MPI2_MFGPAGE_DEVID_SAS2208_4: 10222 case MPI2_MFGPAGE_DEVID_SAS2208_5: 10223 case MPI2_MFGPAGE_DEVID_SAS2208_6: 10224 case MPI2_MFGPAGE_DEVID_SAS2308_1: 10225 case MPI2_MFGPAGE_DEVID_SAS2308_2: 10226 case MPI2_MFGPAGE_DEVID_SAS2308_3: 10227 case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP: 10228 return MPI2_VERSION; 10229 case MPI25_MFGPAGE_DEVID_SAS3004: 10230 case MPI25_MFGPAGE_DEVID_SAS3008: 10231 case MPI25_MFGPAGE_DEVID_SAS3108_1: 10232 case MPI25_MFGPAGE_DEVID_SAS3108_2: 10233 case MPI25_MFGPAGE_DEVID_SAS3108_5: 10234 case MPI25_MFGPAGE_DEVID_SAS3108_6: 10235 return MPI25_VERSION; 10236 case MPI26_MFGPAGE_DEVID_SAS3216: 10237 case MPI26_MFGPAGE_DEVID_SAS3224: 10238 case MPI26_MFGPAGE_DEVID_SAS3316_1: 10239 case MPI26_MFGPAGE_DEVID_SAS3316_2: 10240 case MPI26_MFGPAGE_DEVID_SAS3316_3: 10241 case MPI26_MFGPAGE_DEVID_SAS3316_4: 10242 case MPI26_MFGPAGE_DEVID_SAS3324_1: 10243 case MPI26_MFGPAGE_DEVID_SAS3324_2: 10244 case MPI26_MFGPAGE_DEVID_SAS3324_3: 10245 case MPI26_MFGPAGE_DEVID_SAS3324_4: 10246 case MPI26_MFGPAGE_DEVID_SAS3508: 10247 case MPI26_MFGPAGE_DEVID_SAS3508_1: 10248 case MPI26_MFGPAGE_DEVID_SAS3408: 10249 case MPI26_MFGPAGE_DEVID_SAS3516: 10250 case MPI26_MFGPAGE_DEVID_SAS3516_1: 10251 case MPI26_MFGPAGE_DEVID_SAS3416: 10252 case MPI26_MFGPAGE_DEVID_SAS3616: 10253 return MPI26_VERSION; 10254 } 10255 return 0; 10256 } 10257 10258 /** 10259 * _scsih_probe - attach and add scsi host 10260 * @pdev: PCI device struct 10261 * @id: pci device id 10262 * 10263 * Return: 0 success, anything else error. 10264 */ 10265 static int 10266 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) 10267 { 10268 struct MPT3SAS_ADAPTER *ioc; 10269 struct Scsi_Host *shost = NULL; 10270 int rv; 10271 u16 hba_mpi_version; 10272 10273 /* Determine in which MPI version class this pci device belongs */ 10274 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); 10275 if (hba_mpi_version == 0) 10276 return -ENODEV; 10277 10278 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one, 10279 * for other generation HBA's return with -ENODEV 10280 */ 10281 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION)) 10282 return -ENODEV; 10283 10284 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two, 10285 * for other generation HBA's return with -ENODEV 10286 */ 10287 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION 10288 || hba_mpi_version == MPI26_VERSION))) 10289 return -ENODEV; 10290 10291 switch (hba_mpi_version) { 10292 case MPI2_VERSION: 10293 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 10294 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 10295 /* Use mpt2sas driver host template for SAS 2.0 HBA's */ 10296 shost = scsi_host_alloc(&mpt2sas_driver_template, 10297 sizeof(struct MPT3SAS_ADAPTER)); 10298 if (!shost) 10299 return -ENODEV; 10300 ioc = shost_priv(shost); 10301 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); 10302 ioc->hba_mpi_version_belonged = hba_mpi_version; 10303 ioc->id = mpt2_ids++; 10304 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME); 10305 switch (pdev->device) { 10306 case MPI2_MFGPAGE_DEVID_SSS6200: 10307 ioc->is_warpdrive = 1; 10308 ioc->hide_ir_msg = 1; 10309 break; 10310 case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP: 10311 ioc->is_mcpu_endpoint = 1; 10312 break; 10313 default: 10314 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; 10315 break; 10316 } 10317 break; 10318 case MPI25_VERSION: 10319 case MPI26_VERSION: 10320 /* Use mpt3sas driver host template for SAS 3.0 HBA's */ 10321 shost = scsi_host_alloc(&mpt3sas_driver_template, 10322 sizeof(struct MPT3SAS_ADAPTER)); 10323 if (!shost) 10324 return -ENODEV; 10325 ioc = shost_priv(shost); 10326 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); 10327 ioc->hba_mpi_version_belonged = hba_mpi_version; 10328 ioc->id = mpt3_ids++; 10329 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); 10330 switch (pdev->device) { 10331 case MPI26_MFGPAGE_DEVID_SAS3508: 10332 case MPI26_MFGPAGE_DEVID_SAS3508_1: 10333 case MPI26_MFGPAGE_DEVID_SAS3408: 10334 case MPI26_MFGPAGE_DEVID_SAS3516: 10335 case MPI26_MFGPAGE_DEVID_SAS3516_1: 10336 case MPI26_MFGPAGE_DEVID_SAS3416: 10337 case MPI26_MFGPAGE_DEVID_SAS3616: 10338 ioc->is_gen35_ioc = 1; 10339 break; 10340 default: 10341 ioc->is_gen35_ioc = 0; 10342 } 10343 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION && 10344 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) || 10345 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) { 10346 ioc->combined_reply_queue = 1; 10347 if (ioc->is_gen35_ioc) 10348 ioc->combined_reply_index_count = 10349 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35; 10350 else 10351 ioc->combined_reply_index_count = 10352 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3; 10353 } 10354 break; 10355 default: 10356 return -ENODEV; 10357 } 10358 10359 INIT_LIST_HEAD(&ioc->list); 10360 spin_lock(&gioc_lock); 10361 list_add_tail(&ioc->list, &mpt3sas_ioc_list); 10362 spin_unlock(&gioc_lock); 10363 ioc->shost = shost; 10364 ioc->pdev = pdev; 10365 ioc->scsi_io_cb_idx = scsi_io_cb_idx; 10366 ioc->tm_cb_idx = tm_cb_idx; 10367 ioc->ctl_cb_idx = ctl_cb_idx; 10368 ioc->base_cb_idx = base_cb_idx; 10369 ioc->port_enable_cb_idx = port_enable_cb_idx; 10370 ioc->transport_cb_idx = transport_cb_idx; 10371 ioc->scsih_cb_idx = scsih_cb_idx; 10372 ioc->config_cb_idx = config_cb_idx; 10373 ioc->tm_tr_cb_idx = tm_tr_cb_idx; 10374 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; 10375 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; 10376 ioc->logging_level = logging_level; 10377 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; 10378 /* misc semaphores and spin locks */ 10379 mutex_init(&ioc->reset_in_progress_mutex); 10380 /* initializing pci_access_mutex lock */ 10381 mutex_init(&ioc->pci_access_mutex); 10382 spin_lock_init(&ioc->ioc_reset_in_progress_lock); 10383 spin_lock_init(&ioc->scsi_lookup_lock); 10384 spin_lock_init(&ioc->sas_device_lock); 10385 spin_lock_init(&ioc->sas_node_lock); 10386 spin_lock_init(&ioc->fw_event_lock); 10387 spin_lock_init(&ioc->raid_device_lock); 10388 spin_lock_init(&ioc->pcie_device_lock); 10389 spin_lock_init(&ioc->diag_trigger_lock); 10390 10391 INIT_LIST_HEAD(&ioc->sas_device_list); 10392 INIT_LIST_HEAD(&ioc->sas_device_init_list); 10393 INIT_LIST_HEAD(&ioc->sas_expander_list); 10394 INIT_LIST_HEAD(&ioc->enclosure_list); 10395 INIT_LIST_HEAD(&ioc->pcie_device_list); 10396 INIT_LIST_HEAD(&ioc->pcie_device_init_list); 10397 INIT_LIST_HEAD(&ioc->fw_event_list); 10398 INIT_LIST_HEAD(&ioc->raid_device_list); 10399 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); 10400 INIT_LIST_HEAD(&ioc->delayed_tr_list); 10401 INIT_LIST_HEAD(&ioc->delayed_sc_list); 10402 INIT_LIST_HEAD(&ioc->delayed_event_ack_list); 10403 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); 10404 INIT_LIST_HEAD(&ioc->reply_queue_list); 10405 10406 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); 10407 10408 /* init shost parameters */ 10409 shost->max_cmd_len = 32; 10410 shost->max_lun = max_lun; 10411 shost->transportt = mpt3sas_transport_template; 10412 shost->unique_id = ioc->id; 10413 10414 if (ioc->is_mcpu_endpoint) { 10415 /* mCPU MPI support 64K max IO */ 10416 shost->max_sectors = 128; 10417 ioc_info(ioc, "The max_sectors value is set to %d\n", 10418 shost->max_sectors); 10419 } else { 10420 if (max_sectors != 0xFFFF) { 10421 if (max_sectors < 64) { 10422 shost->max_sectors = 64; 10423 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n", 10424 max_sectors); 10425 } else if (max_sectors > 32767) { 10426 shost->max_sectors = 32767; 10427 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n", 10428 max_sectors); 10429 } else { 10430 shost->max_sectors = max_sectors & 0xFFFE; 10431 ioc_info(ioc, "The max_sectors value is set to %d\n", 10432 shost->max_sectors); 10433 } 10434 } 10435 } 10436 /* register EEDP capabilities with SCSI layer */ 10437 if (prot_mask > 0) 10438 scsi_host_set_prot(shost, prot_mask); 10439 else 10440 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION 10441 | SHOST_DIF_TYPE2_PROTECTION 10442 | SHOST_DIF_TYPE3_PROTECTION); 10443 10444 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 10445 10446 /* event thread */ 10447 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 10448 "fw_event_%s%d", ioc->driver_name, ioc->id); 10449 ioc->firmware_event_thread = alloc_ordered_workqueue( 10450 ioc->firmware_event_name, 0); 10451 if (!ioc->firmware_event_thread) { 10452 ioc_err(ioc, "failure at %s:%d/%s()!\n", 10453 __FILE__, __LINE__, __func__); 10454 rv = -ENODEV; 10455 goto out_thread_fail; 10456 } 10457 10458 ioc->is_driver_loading = 1; 10459 if ((mpt3sas_base_attach(ioc))) { 10460 ioc_err(ioc, "failure at %s:%d/%s()!\n", 10461 __FILE__, __LINE__, __func__); 10462 rv = -ENODEV; 10463 goto out_attach_fail; 10464 } 10465 10466 if (ioc->is_warpdrive) { 10467 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) 10468 ioc->hide_drives = 0; 10469 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS) 10470 ioc->hide_drives = 1; 10471 else { 10472 if (mpt3sas_get_num_volumes(ioc)) 10473 ioc->hide_drives = 1; 10474 else 10475 ioc->hide_drives = 0; 10476 } 10477 } else 10478 ioc->hide_drives = 0; 10479 10480 rv = scsi_add_host(shost, &pdev->dev); 10481 if (rv) { 10482 ioc_err(ioc, "failure at %s:%d/%s()!\n", 10483 __FILE__, __LINE__, __func__); 10484 goto out_add_shost_fail; 10485 } 10486 10487 scsi_scan_host(shost); 10488 return 0; 10489 out_add_shost_fail: 10490 mpt3sas_base_detach(ioc); 10491 out_attach_fail: 10492 destroy_workqueue(ioc->firmware_event_thread); 10493 out_thread_fail: 10494 spin_lock(&gioc_lock); 10495 list_del(&ioc->list); 10496 spin_unlock(&gioc_lock); 10497 scsi_host_put(shost); 10498 return rv; 10499 } 10500 10501 #ifdef CONFIG_PM 10502 /** 10503 * scsih_suspend - power management suspend main entry point 10504 * @pdev: PCI device struct 10505 * @state: PM state change to (usually PCI_D3) 10506 * 10507 * Return: 0 success, anything else error. 10508 */ 10509 static int 10510 scsih_suspend(struct pci_dev *pdev, pm_message_t state) 10511 { 10512 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10513 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 10514 pci_power_t device_state; 10515 10516 mpt3sas_base_stop_watchdog(ioc); 10517 flush_scheduled_work(); 10518 scsi_block_requests(shost); 10519 device_state = pci_choose_state(pdev, state); 10520 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n", 10521 pdev, pci_name(pdev), device_state); 10522 10523 pci_save_state(pdev); 10524 mpt3sas_base_free_resources(ioc); 10525 pci_set_power_state(pdev, device_state); 10526 return 0; 10527 } 10528 10529 /** 10530 * scsih_resume - power management resume main entry point 10531 * @pdev: PCI device struct 10532 * 10533 * Return: 0 success, anything else error. 10534 */ 10535 static int 10536 scsih_resume(struct pci_dev *pdev) 10537 { 10538 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10539 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 10540 pci_power_t device_state = pdev->current_state; 10541 int r; 10542 10543 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 10544 pdev, pci_name(pdev), device_state); 10545 10546 pci_set_power_state(pdev, PCI_D0); 10547 pci_enable_wake(pdev, PCI_D0, 0); 10548 pci_restore_state(pdev); 10549 ioc->pdev = pdev; 10550 r = mpt3sas_base_map_resources(ioc); 10551 if (r) 10552 return r; 10553 10554 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET); 10555 scsi_unblock_requests(shost); 10556 mpt3sas_base_start_watchdog(ioc); 10557 return 0; 10558 } 10559 #endif /* CONFIG_PM */ 10560 10561 /** 10562 * scsih_pci_error_detected - Called when a PCI error is detected. 10563 * @pdev: PCI device struct 10564 * @state: PCI channel state 10565 * 10566 * Description: Called when a PCI error is detected. 10567 * 10568 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. 10569 */ 10570 static pci_ers_result_t 10571 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 10572 { 10573 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10574 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 10575 10576 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state); 10577 10578 switch (state) { 10579 case pci_channel_io_normal: 10580 return PCI_ERS_RESULT_CAN_RECOVER; 10581 case pci_channel_io_frozen: 10582 /* Fatal error, prepare for slot reset */ 10583 ioc->pci_error_recovery = 1; 10584 scsi_block_requests(ioc->shost); 10585 mpt3sas_base_stop_watchdog(ioc); 10586 mpt3sas_base_free_resources(ioc); 10587 return PCI_ERS_RESULT_NEED_RESET; 10588 case pci_channel_io_perm_failure: 10589 /* Permanent error, prepare for device removal */ 10590 ioc->pci_error_recovery = 1; 10591 mpt3sas_base_stop_watchdog(ioc); 10592 _scsih_flush_running_cmds(ioc); 10593 return PCI_ERS_RESULT_DISCONNECT; 10594 } 10595 return PCI_ERS_RESULT_NEED_RESET; 10596 } 10597 10598 /** 10599 * scsih_pci_slot_reset - Called when PCI slot has been reset. 10600 * @pdev: PCI device struct 10601 * 10602 * Description: This routine is called by the pci error recovery 10603 * code after the PCI slot has been reset, just before we 10604 * should resume normal operations. 10605 */ 10606 static pci_ers_result_t 10607 scsih_pci_slot_reset(struct pci_dev *pdev) 10608 { 10609 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10610 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 10611 int rc; 10612 10613 ioc_info(ioc, "PCI error: slot reset callback!!\n"); 10614 10615 ioc->pci_error_recovery = 0; 10616 ioc->pdev = pdev; 10617 pci_restore_state(pdev); 10618 rc = mpt3sas_base_map_resources(ioc); 10619 if (rc) 10620 return PCI_ERS_RESULT_DISCONNECT; 10621 10622 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 10623 10624 ioc_warn(ioc, "hard reset: %s\n", 10625 (rc == 0) ? "success" : "failed"); 10626 10627 if (!rc) 10628 return PCI_ERS_RESULT_RECOVERED; 10629 else 10630 return PCI_ERS_RESULT_DISCONNECT; 10631 } 10632 10633 /** 10634 * scsih_pci_resume() - resume normal ops after PCI reset 10635 * @pdev: pointer to PCI device 10636 * 10637 * Called when the error recovery driver tells us that its 10638 * OK to resume normal operation. Use completion to allow 10639 * halted scsi ops to resume. 10640 */ 10641 static void 10642 scsih_pci_resume(struct pci_dev *pdev) 10643 { 10644 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10645 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 10646 10647 ioc_info(ioc, "PCI error: resume callback!!\n"); 10648 10649 mpt3sas_base_start_watchdog(ioc); 10650 scsi_unblock_requests(ioc->shost); 10651 } 10652 10653 /** 10654 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers 10655 * @pdev: pointer to PCI device 10656 */ 10657 static pci_ers_result_t 10658 scsih_pci_mmio_enabled(struct pci_dev *pdev) 10659 { 10660 struct Scsi_Host *shost = pci_get_drvdata(pdev); 10661 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 10662 10663 ioc_info(ioc, "PCI error: mmio enabled callback!!\n"); 10664 10665 /* TODO - dump whatever for debugging purposes */ 10666 10667 /* This called only if scsih_pci_error_detected returns 10668 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still 10669 * works, no need to reset slot. 10670 */ 10671 return PCI_ERS_RESULT_RECOVERED; 10672 } 10673 10674 /** 10675 * scsih__ncq_prio_supp - Check for NCQ command priority support 10676 * @sdev: scsi device struct 10677 * 10678 * This is called when a user indicates they would like to enable 10679 * ncq command priorities. This works only on SATA devices. 10680 */ 10681 bool scsih_ncq_prio_supp(struct scsi_device *sdev) 10682 { 10683 unsigned char *buf; 10684 bool ncq_prio_supp = false; 10685 10686 if (!scsi_device_supports_vpd(sdev)) 10687 return ncq_prio_supp; 10688 10689 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL); 10690 if (!buf) 10691 return ncq_prio_supp; 10692 10693 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN)) 10694 ncq_prio_supp = (buf[213] >> 4) & 1; 10695 10696 kfree(buf); 10697 return ncq_prio_supp; 10698 } 10699 /* 10700 * The pci device ids are defined in mpi/mpi2_cnfg.h. 10701 */ 10702 static const struct pci_device_id mpt3sas_pci_table[] = { 10703 /* Spitfire ~ 2004 */ 10704 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, 10705 PCI_ANY_ID, PCI_ANY_ID }, 10706 /* Falcon ~ 2008 */ 10707 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, 10708 PCI_ANY_ID, PCI_ANY_ID }, 10709 /* Liberator ~ 2108 */ 10710 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, 10711 PCI_ANY_ID, PCI_ANY_ID }, 10712 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2, 10713 PCI_ANY_ID, PCI_ANY_ID }, 10714 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, 10715 PCI_ANY_ID, PCI_ANY_ID }, 10716 /* Meteor ~ 2116 */ 10717 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, 10718 PCI_ANY_ID, PCI_ANY_ID }, 10719 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, 10720 PCI_ANY_ID, PCI_ANY_ID }, 10721 /* Thunderbolt ~ 2208 */ 10722 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, 10723 PCI_ANY_ID, PCI_ANY_ID }, 10724 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, 10725 PCI_ANY_ID, PCI_ANY_ID }, 10726 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, 10727 PCI_ANY_ID, PCI_ANY_ID }, 10728 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, 10729 PCI_ANY_ID, PCI_ANY_ID }, 10730 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, 10731 PCI_ANY_ID, PCI_ANY_ID }, 10732 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, 10733 PCI_ANY_ID, PCI_ANY_ID }, 10734 /* Mustang ~ 2308 */ 10735 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, 10736 PCI_ANY_ID, PCI_ANY_ID }, 10737 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, 10738 PCI_ANY_ID, PCI_ANY_ID }, 10739 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, 10740 PCI_ANY_ID, PCI_ANY_ID }, 10741 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP, 10742 PCI_ANY_ID, PCI_ANY_ID }, 10743 /* SSS6200 */ 10744 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, 10745 PCI_ANY_ID, PCI_ANY_ID }, 10746 /* Fury ~ 3004 and 3008 */ 10747 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, 10748 PCI_ANY_ID, PCI_ANY_ID }, 10749 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, 10750 PCI_ANY_ID, PCI_ANY_ID }, 10751 /* Invader ~ 3108 */ 10752 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, 10753 PCI_ANY_ID, PCI_ANY_ID }, 10754 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, 10755 PCI_ANY_ID, PCI_ANY_ID }, 10756 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, 10757 PCI_ANY_ID, PCI_ANY_ID }, 10758 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, 10759 PCI_ANY_ID, PCI_ANY_ID }, 10760 /* Cutlass ~ 3216 and 3224 */ 10761 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216, 10762 PCI_ANY_ID, PCI_ANY_ID }, 10763 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224, 10764 PCI_ANY_ID, PCI_ANY_ID }, 10765 /* Intruder ~ 3316 and 3324 */ 10766 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1, 10767 PCI_ANY_ID, PCI_ANY_ID }, 10768 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2, 10769 PCI_ANY_ID, PCI_ANY_ID }, 10770 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3, 10771 PCI_ANY_ID, PCI_ANY_ID }, 10772 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4, 10773 PCI_ANY_ID, PCI_ANY_ID }, 10774 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1, 10775 PCI_ANY_ID, PCI_ANY_ID }, 10776 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2, 10777 PCI_ANY_ID, PCI_ANY_ID }, 10778 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3, 10779 PCI_ANY_ID, PCI_ANY_ID }, 10780 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4, 10781 PCI_ANY_ID, PCI_ANY_ID }, 10782 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/ 10783 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, 10784 PCI_ANY_ID, PCI_ANY_ID }, 10785 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1, 10786 PCI_ANY_ID, PCI_ANY_ID }, 10787 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408, 10788 PCI_ANY_ID, PCI_ANY_ID }, 10789 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516, 10790 PCI_ANY_ID, PCI_ANY_ID }, 10791 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1, 10792 PCI_ANY_ID, PCI_ANY_ID }, 10793 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, 10794 PCI_ANY_ID, PCI_ANY_ID }, 10795 /* Mercator ~ 3616*/ 10796 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, 10797 PCI_ANY_ID, PCI_ANY_ID }, 10798 {0} /* Terminating entry */ 10799 }; 10800 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); 10801 10802 static struct pci_error_handlers _mpt3sas_err_handler = { 10803 .error_detected = scsih_pci_error_detected, 10804 .mmio_enabled = scsih_pci_mmio_enabled, 10805 .slot_reset = scsih_pci_slot_reset, 10806 .resume = scsih_pci_resume, 10807 }; 10808 10809 static struct pci_driver mpt3sas_driver = { 10810 .name = MPT3SAS_DRIVER_NAME, 10811 .id_table = mpt3sas_pci_table, 10812 .probe = _scsih_probe, 10813 .remove = scsih_remove, 10814 .shutdown = scsih_shutdown, 10815 .err_handler = &_mpt3sas_err_handler, 10816 #ifdef CONFIG_PM 10817 .suspend = scsih_suspend, 10818 .resume = scsih_resume, 10819 #endif 10820 }; 10821 10822 /** 10823 * scsih_init - main entry point for this driver. 10824 * 10825 * Return: 0 success, anything else error. 10826 */ 10827 static int 10828 scsih_init(void) 10829 { 10830 mpt2_ids = 0; 10831 mpt3_ids = 0; 10832 10833 mpt3sas_base_initialize_callback_handler(); 10834 10835 /* queuecommand callback hander */ 10836 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); 10837 10838 /* task management callback handler */ 10839 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); 10840 10841 /* base internal commands callback handler */ 10842 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done); 10843 port_enable_cb_idx = mpt3sas_base_register_callback_handler( 10844 mpt3sas_port_enable_done); 10845 10846 /* transport internal commands callback handler */ 10847 transport_cb_idx = mpt3sas_base_register_callback_handler( 10848 mpt3sas_transport_done); 10849 10850 /* scsih internal commands callback handler */ 10851 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done); 10852 10853 /* configuration page API internal commands callback handler */ 10854 config_cb_idx = mpt3sas_base_register_callback_handler( 10855 mpt3sas_config_done); 10856 10857 /* ctl module callback handler */ 10858 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done); 10859 10860 tm_tr_cb_idx = mpt3sas_base_register_callback_handler( 10861 _scsih_tm_tr_complete); 10862 10863 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( 10864 _scsih_tm_volume_tr_complete); 10865 10866 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( 10867 _scsih_sas_control_complete); 10868 10869 return 0; 10870 } 10871 10872 /** 10873 * scsih_exit - exit point for this driver (when it is a module). 10874 * 10875 * Return: 0 success, anything else error. 10876 */ 10877 static void 10878 scsih_exit(void) 10879 { 10880 10881 mpt3sas_base_release_callback_handler(scsi_io_cb_idx); 10882 mpt3sas_base_release_callback_handler(tm_cb_idx); 10883 mpt3sas_base_release_callback_handler(base_cb_idx); 10884 mpt3sas_base_release_callback_handler(port_enable_cb_idx); 10885 mpt3sas_base_release_callback_handler(transport_cb_idx); 10886 mpt3sas_base_release_callback_handler(scsih_cb_idx); 10887 mpt3sas_base_release_callback_handler(config_cb_idx); 10888 mpt3sas_base_release_callback_handler(ctl_cb_idx); 10889 10890 mpt3sas_base_release_callback_handler(tm_tr_cb_idx); 10891 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx); 10892 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx); 10893 10894 /* raid transport support */ 10895 if (hbas_to_enumerate != 1) 10896 raid_class_release(mpt3sas_raid_template); 10897 if (hbas_to_enumerate != 2) 10898 raid_class_release(mpt2sas_raid_template); 10899 sas_release_transport(mpt3sas_transport_template); 10900 } 10901 10902 /** 10903 * _mpt3sas_init - main entry point for this driver. 10904 * 10905 * Return: 0 success, anything else error. 10906 */ 10907 static int __init 10908 _mpt3sas_init(void) 10909 { 10910 int error; 10911 10912 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, 10913 MPT3SAS_DRIVER_VERSION); 10914 10915 mpt3sas_transport_template = 10916 sas_attach_transport(&mpt3sas_transport_functions); 10917 if (!mpt3sas_transport_template) 10918 return -ENODEV; 10919 10920 /* No need attach mpt3sas raid functions template 10921 * if hbas_to_enumarate value is one. 10922 */ 10923 if (hbas_to_enumerate != 1) { 10924 mpt3sas_raid_template = 10925 raid_class_attach(&mpt3sas_raid_functions); 10926 if (!mpt3sas_raid_template) { 10927 sas_release_transport(mpt3sas_transport_template); 10928 return -ENODEV; 10929 } 10930 } 10931 10932 /* No need to attach mpt2sas raid functions template 10933 * if hbas_to_enumarate value is two 10934 */ 10935 if (hbas_to_enumerate != 2) { 10936 mpt2sas_raid_template = 10937 raid_class_attach(&mpt2sas_raid_functions); 10938 if (!mpt2sas_raid_template) { 10939 sas_release_transport(mpt3sas_transport_template); 10940 return -ENODEV; 10941 } 10942 } 10943 10944 error = scsih_init(); 10945 if (error) { 10946 scsih_exit(); 10947 return error; 10948 } 10949 10950 mpt3sas_ctl_init(hbas_to_enumerate); 10951 10952 error = pci_register_driver(&mpt3sas_driver); 10953 if (error) 10954 scsih_exit(); 10955 10956 return error; 10957 } 10958 10959 /** 10960 * _mpt3sas_exit - exit point for this driver (when it is a module). 10961 * 10962 */ 10963 static void __exit 10964 _mpt3sas_exit(void) 10965 { 10966 pr_info("mpt3sas version %s unloading\n", 10967 MPT3SAS_DRIVER_VERSION); 10968 10969 mpt3sas_ctl_exit(hbas_to_enumerate); 10970 10971 pci_unregister_driver(&mpt3sas_driver); 10972 10973 scsih_exit(); 10974 } 10975 10976 module_init(_mpt3sas_init); 10977 module_exit(_mpt3sas_exit); 10978